summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2021-11-23 17:07:41 -0500
committerGitHub <noreply@github.com>2021-11-23 17:07:41 -0500
commit8aa0728a8be0fe56402b1b1a9007014d8bb39183 (patch)
tree0920cd7d5ff73bd530f0923ce1db15943ce1868c
parent378816d0439cf4cf2647952eec68a4e24ad6fa06 (diff)
parent32d3f3dc4f7ad5f7f4d16724c1ab3ced48d53916 (diff)
Merge pull request #3231 from Algo-devops-service/relbeta3.2.0v3.2.0-beta
go-algorand 3.2.0-beta
-rw-r--r--.circleci/config.yml81
-rw-r--r--.travis.yml186
-rw-r--r--Makefile2
-rw-r--r--agreement/abstractions.go18
-rw-r--r--agreement/agreementtest/keyManager.go9
-rw-r--r--agreement/agreementtest/simulate_test.go8
-rw-r--r--agreement/asyncVoteVerifier.go16
-rw-r--r--agreement/asyncVoteVerifier_test.go50
-rw-r--r--agreement/common_test.go15
-rw-r--r--agreement/cryptoVerifier.go9
-rw-r--r--agreement/cryptoVerifier_test.go27
-rw-r--r--agreement/demux_test.go4
-rw-r--r--agreement/fuzzer/fuzzer_test.go3
-rw-r--r--agreement/fuzzer/ledger_test.go9
-rw-r--r--agreement/keyManager_test.go74
-rw-r--r--agreement/msgp_gen.go1283
-rw-r--r--agreement/player_permutation_test.go3
-rw-r--r--agreement/proposal.go2
-rw-r--r--agreement/proposalStore_test.go17
-rw-r--r--agreement/proposal_test.go7
-rw-r--r--agreement/pseudonode.go115
-rw-r--r--agreement/pseudonode_test.go106
-rw-r--r--agreement/selector.go4
-rw-r--r--agreement/service_test.go18
-rw-r--r--buildnumber.dat2
-rw-r--r--catchup/fetcher_test.go12
-rw-r--r--catchup/peerSelector_test.go5
-rw-r--r--catchup/service.go37
-rw-r--r--catchup/service_test.go10
-rw-r--r--cmd/catchpointdump/file.go2
-rw-r--r--cmd/catchpointdump/net.go3
-rw-r--r--cmd/goal/account.go175
-rw-r--r--cmd/goal/application.go243
-rw-r--r--cmd/goal/clerk.go2
-rw-r--r--cmd/goal/multisig.go3
-rw-r--r--cmd/pingpong/runCmd.go2
-rw-r--r--cmd/tealdbg/cdtState.go6
-rw-r--r--cmd/tealdbg/local.go4
-rw-r--r--cmd/tealdbg/localLedger.go3
-rw-r--r--cmd/tealdbg/local_test.go100
-rw-r--r--cmd/tealdbg/main.go2
-rw-r--r--cmd/updater/versionCmd.go14
-rw-r--r--compactcert/abstractions.go4
-rw-r--r--compactcert/worker.go4
-rw-r--r--compactcert/worker_test.go7
-rw-r--r--config/config.go479
-rw-r--r--config/consensus.go11
-rw-r--r--config/defaultsGenerator/defaultsGenerator.go2
-rw-r--r--config/localTemplate.go500
-rw-r--r--config/local_defaults.go7
-rw-r--r--config/version.go2
-rw-r--r--crypto/batchverifier.go2
-rw-r--r--crypto/compactcert/builder.go4
-rw-r--r--crypto/compactcert/builder_test.go12
-rw-r--r--crypto/compactcert/common.go2
-rw-r--r--crypto/compactcert/msgp_gen.go184
-rw-r--r--crypto/compactcert/msgp_gen_test.go60
-rw-r--r--crypto/compactcert/structs.go46
-rw-r--r--crypto/curve25519.go15
-rw-r--r--crypto/msgp_gen.go46
-rw-r--r--crypto/multisig.go9
-rw-r--r--crypto/multisig_test.go2
-rw-r--r--crypto/vrf.go5
-rw-r--r--daemon/algod/api/algod.oas2.json324
-rw-r--r--daemon/algod/api/algod.oas3.yml395
-rw-r--r--daemon/algod/api/client/restClient.go25
-rw-r--r--daemon/algod/api/server/v2/dryrun.go3
-rw-r--r--daemon/algod/api/server/v2/errors.go1
-rw-r--r--daemon/algod/api/server/v2/generated/private/routes.go390
-rw-r--r--daemon/algod/api/server/v2/generated/private/types.go63
-rw-r--r--daemon/algod/api/server/v2/generated/routes.go339
-rw-r--r--daemon/algod/api/server/v2/generated/types.go47
-rw-r--r--daemon/algod/api/server/v2/handlers.go152
-rw-r--r--daemon/algod/api/server/v2/test/helpers.go19
-rw-r--r--data/abi/abi_encode.go665
-rw-r--r--data/abi/abi_encode_test.go876
-rw-r--r--data/abi/abi_json.go254
-rw-r--r--data/abi/abi_json_test.go123
-rw-r--r--data/abi/abi_type.go122
-rw-r--r--data/abi/abi_type_test.go164
-rw-r--r--data/abi/abi_value.go313
-rw-r--r--data/account/msgp_gen.go238
-rw-r--r--data/account/msgp_gen_test.go (renamed from data/pooldata/msgp_gen_test.go)22
-rw-r--r--data/account/participation.go44
-rw-r--r--data/account/participationRegistry.go953
-rw-r--r--data/account/participationRegistry_test.go769
-rw-r--r--data/accountManager.go96
-rw-r--r--data/basics/ccertpart.go50
-rw-r--r--data/basics/fields_test.go201
-rw-r--r--data/basics/msgp_gen.go166
-rw-r--r--data/basics/msgp_gen_test.go60
-rw-r--r--data/basics/userBalance.go57
-rw-r--r--data/basics/userBalance_test.go40
-rw-r--r--data/bookkeeping/block.go15
-rw-r--r--data/bookkeeping/msgp_gen.go922
-rw-r--r--data/bookkeeping/msgp_gen_test.go60
-rw-r--r--data/committee/committee.go2
-rw-r--r--data/committee/common_test.go2
-rw-r--r--data/committee/credential_test.go16
-rw-r--r--data/datatest/impls.go10
-rw-r--r--data/ledger.go4
-rw-r--r--data/ledger_test.go2
-rw-r--r--data/pooldata/msgp_gen.go91
-rw-r--r--data/pooldata/signedTxGroup.go63
-rw-r--r--data/pools/transactionPool.go235
-rw-r--r--data/pools/transactionPool_test.go44
-rw-r--r--data/transactions/application.go24
-rw-r--r--data/transactions/application_test.go8
-rw-r--r--data/transactions/logic/README.md5
-rw-r--r--data/transactions/logic/TEAL_opcodes.md17
-rw-r--r--data/transactions/logic/assembler.go38
-rw-r--r--data/transactions/logic/assembler_test.go33
-rw-r--r--data/transactions/logic/doc.go11
-rw-r--r--data/transactions/logic/eval.go173
-rw-r--r--data/transactions/logic/evalAppTxn_test.go64
-rw-r--r--data/transactions/logic/eval_test.go7
-rw-r--r--data/transactions/logic/fields.go9
-rw-r--r--data/transactions/logic/opcodes.go1
-rw-r--r--data/transactions/msgp_gen.go76
-rw-r--r--data/transactions/teal.go3
-rw-r--r--data/transactions/transaction.go5
-rw-r--r--data/transactions/verify/verifiedTxnCache.go26
-rw-r--r--data/txHandler.go254
-rw-r--r--data/txHandler_test.go172
-rw-r--r--go.mod5
-rw-r--r--go.sum34
-rw-r--r--installer/config.json.example5
-rw-r--r--ledger/README.md10
-rw-r--r--ledger/accountdb.go76
-rw-r--r--ledger/accountdb_test.go361
-rw-r--r--ledger/acctupdates.go1603
-rw-r--r--ledger/acctupdates_test.go568
-rw-r--r--ledger/applications_test.go426
-rw-r--r--ledger/apptxn_test.go324
-rw-r--r--ledger/archival_test.go22
-rw-r--r--ledger/blockqueue_test.go5
-rw-r--r--ledger/bulletin.go25
-rw-r--r--ledger/catchpointtracker.go901
-rw-r--r--ledger/catchpointtracker_test.go415
-rw-r--r--ledger/catchpointwriter.go2
-rw-r--r--ledger/catchpointwriter_test.go20
-rw-r--r--ledger/catchupaccessor.go10
-rw-r--r--ledger/catchupaccessor_test.go11
-rw-r--r--ledger/eval_test.go1934
-rw-r--r--ledger/evalbench_test.go440
-rw-r--r--ledger/evalindexer.go (renamed from ledger/evalIndexer.go)150
-rw-r--r--ledger/evalindexer_test.go (renamed from ledger/evalIndexer_test.go)129
-rw-r--r--ledger/internal/appcow.go (renamed from ledger/appcow.go)13
-rw-r--r--ledger/internal/appcow_test.go (renamed from ledger/appcow_test.go)43
-rw-r--r--ledger/internal/applications.go (renamed from ledger/applications.go)2
-rw-r--r--ledger/internal/applications_test.go353
-rw-r--r--ledger/internal/assetcow.go (renamed from ledger/assetcow.go)2
-rw-r--r--ledger/internal/compactcert.go (renamed from ledger/compactcert.go)2
-rw-r--r--ledger/internal/compactcert_test.go (renamed from ledger/compactcert_test.go)2
-rw-r--r--ledger/internal/cow.go (renamed from ledger/cow.go)2
-rw-r--r--ledger/internal/cow_test.go (renamed from ledger/cow_test.go)11
-rw-r--r--ledger/internal/eval.go (renamed from ledger/eval.go)420
-rw-r--r--ledger/internal/eval_blackbox_test.go1081
-rw-r--r--ledger/internal/eval_test.go1030
-rw-r--r--ledger/internal/evalindexer.go51
-rw-r--r--ledger/ledger.go176
-rw-r--r--ledger/ledger_perf_test.go18
-rw-r--r--ledger/ledger_test.go107
-rw-r--r--ledger/ledgercore/error.go4
-rw-r--r--ledger/ledgercore/misc.go51
-rw-r--r--ledger/ledgercore/onlineacct.go (renamed from agreement/fuzzer/keyManager_test.go)28
-rw-r--r--ledger/ledgercore/validatedBlock.go59
-rw-r--r--ledger/ledgercore/votersForRound.go164
-rw-r--r--ledger/metrics.go26
-rw-r--r--ledger/msgp_gen.go54
-rw-r--r--ledger/notifier.go26
-rw-r--r--ledger/onlinetopheap.go (renamed from ledger/onlineacct.go)23
-rw-r--r--ledger/onlinetopheap_test.go (renamed from ledger/onlineacct_test.go)11
-rw-r--r--ledger/perf_test.go52
-rw-r--r--ledger/testing/accountsTotals.go41
-rw-r--r--ledger/testing/initState.go111
-rw-r--r--ledger/testing/randomAccounts.go344
-rw-r--r--ledger/testing/testGenesis.go137
-rw-r--r--ledger/tracker.go552
-rw-r--r--ledger/trackerdb.go365
-rw-r--r--ledger/txtail.go26
-rw-r--r--ledger/txtail_test.go7
-rw-r--r--ledger/voters.go161
-rw-r--r--libgoal/libgoal.go36
-rw-r--r--libgoal/participation.go15
-rw-r--r--logging/telemetryspec/metric.go61
-rw-r--r--logging/testingLogger.go2
-rw-r--r--netdeploy/remote/deployedNetwork.go5
-rw-r--r--network/latencyTracker.go170
-rw-r--r--network/latencyTracker_test.go121
-rw-r--r--network/wsNetwork.go34
-rw-r--r--network/wsNetwork_test.go225
-rw-r--r--network/wsPeer.go150
-rw-r--r--node/assemble_test.go5
-rw-r--r--node/impls.go4
-rw-r--r--node/netprio.go10
-rw-r--r--node/node.go251
-rw-r--r--node/node_test.go48
-rw-r--r--node/txnSyncConn.go245
-rw-r--r--protocol/hash.go1
-rw-r--r--protocol/tags.go1
-rw-r--r--protocol/txntype.go12
-rw-r--r--rpcs/blockService_test.go7
-rw-r--r--rpcs/txService.go6
-rw-r--r--rpcs/txSyncer.go3
-rw-r--r--rpcs/txSyncer_test.go45
-rwxr-xr-xscripts/travis/codegen_verification.sh2
-rwxr-xr-xscripts/travis/deploy_packages.sh4
-rw-r--r--test/README.md5
-rw-r--r--test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp15
-rw-r--r--test/e2e-go/cli/goal/expect/goalExpectCommon.exp12
-rw-r--r--test/e2e-go/cli/goal/expect/goalNodeTest.exp25
-rw-r--r--test/e2e-go/features/devmode/devmode_test.go66
-rw-r--r--test/e2e-go/features/participation/accountParticipationTransitions_test.go126
-rw-r--r--test/e2e-go/features/participation/participationExpiration_test.go196
-rw-r--r--test/e2e-go/features/partitionRecovery/partitionRecovery_test.go12
-rw-r--r--test/e2e-go/features/transactions/accountv2_test.go45
-rw-r--r--test/e2e-go/features/transactions/messageRate_test.go248
-rw-r--r--test/e2e-go/features/transactions/onlineStatusChange_test.go34
-rw-r--r--test/e2e-go/features/transactions/sendReceive_test.go110
-rw-r--r--test/e2e-go/features/transactions/txnsync_test.go376
-rw-r--r--test/e2e-go/upgrades/rekey_support_test.go32
-rw-r--r--test/framework/fixtures/fixture.go3
-rw-r--r--test/framework/fixtures/libgoalFixture.go1
-rw-r--r--test/heapwatch/heapWatch.py3
-rwxr-xr-xtest/scripts/e2e_subs/app-group.py77
-rwxr-xr-xtest/scripts/e2e_subs/app-rekey.py1
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-abi-add.sh39
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-abi-arg.sh37
-rwxr-xr-xtest/scripts/e2e_subs/goal-partkey-information.sh45
-rwxr-xr-xtest/scripts/e2e_subs/rest-participation-key.sh61
-rwxr-xr-xtest/scripts/e2e_subs/rest.sh185
-rw-r--r--test/scripts/e2e_subs/tealprogs/app-abi-add-example.teal87
-rw-r--r--test/scripts/e2e_subs/tealprogs/app-abi-arg.teal73
-rw-r--r--test/testdata/configs/config-v18.json2
-rw-r--r--test/testdata/configs/config-v19.json97
-rw-r--r--test/testdata/deployednettemplates/hosttemplates/hosttemplates.json24
-rw-r--r--test/testdata/mainnetblocksbin25415596 -> 0 bytes
-rw-r--r--test/testdata/nettemplates/DevModeOneWallet.json22
-rw-r--r--test/testdata/nettemplates/OneNodeTwoRelays.json30
-rw-r--r--test/testdata/nettemplates/TenNodesDistributedMultiWallet.json2
-rw-r--r--test/testdata/nettemplates/ThreeNodesOneOnline.json39
-rw-r--r--test/testdata/nettemplates/TwoNodes50EachWithTwoRelays.json41
-rw-r--r--test/testdata/nettemplates/TwoNodesExpiredOfflineV29.json36
-rw-r--r--test/testdata/nettemplates/TwoNodesExpiredOfflineVFuture.json36
-rw-r--r--txnsync/bitmask.go250
-rw-r--r--txnsync/bitmask_test.go252
-rw-r--r--txnsync/bloomFilter.go239
-rw-r--r--txnsync/bloomFilter_test.go372
-rw-r--r--txnsync/emulatorCore_test.go267
-rw-r--r--txnsync/emulatorLogger_test.go151
-rw-r--r--txnsync/emulatorNode_test.go397
-rw-r--r--txnsync/emulatorTimer_test.go122
-rw-r--r--txnsync/emulator_test.go781
-rw-r--r--txnsync/encodedgroups_test.go132
-rw-r--r--txnsync/encodedgroupsmarshalers.go795
-rw-r--r--txnsync/encodedgroupstypes.go372
-rw-r--r--txnsync/encodedgroupsunmarshalers.go648
-rw-r--r--txnsync/exchange.go91
-rw-r--r--txnsync/incoming.go276
-rw-r--r--txnsync/incomingMsgQ.go372
-rw-r--r--txnsync/incomingMsgQ_test.go158
-rw-r--r--txnsync/incoming_test.go357
-rw-r--r--txnsync/interfaces.go116
-rw-r--r--txnsync/logger.go88
-rw-r--r--txnsync/mainloop.go445
-rw-r--r--txnsync/mainloop_test.go71
-rw-r--r--txnsync/metrics.go28
-rw-r--r--txnsync/msgbuffers.go76
-rw-r--r--txnsync/msgbuffers_test.go143
-rw-r--r--txnsync/msgorderingheap.go100
-rw-r--r--txnsync/msgorderingheap_test.go271
-rw-r--r--txnsync/msgp_gen.go35449
-rw-r--r--txnsync/msgp_gen_test.go1693
-rw-r--r--txnsync/outgoing.go348
-rw-r--r--txnsync/outgoing_test.go615
-rw-r--r--txnsync/peer.go852
-rw-r--r--txnsync/peer_test.go1011
-rw-r--r--txnsync/peerscheduler.go200
-rw-r--r--txnsync/peerscheduler_test.go283
-rw-r--r--txnsync/profiler.go210
-rw-r--r--txnsync/profiler_test.go269
-rw-r--r--txnsync/sent_filter.go82
-rw-r--r--txnsync/sent_filter_test.go57
-rw-r--r--txnsync/service.go78
-rw-r--r--txnsync/service_test.go171
-rw-r--r--txnsync/transactionCache.go374
-rw-r--r--txnsync/transactionCache_test.go333
-rw-r--r--txnsync/txngroups.go224
-rw-r--r--txnsync/txngroups_test.go530
-rw-r--r--util/bloom/bloom.go21
-rw-r--r--util/bloom/bloom_test.go28
-rw-r--r--util/bloom/generic.go27
-rw-r--r--util/bloom/xor.go277
-rw-r--r--util/bloom/xor_test.go322
-rw-r--r--util/compress/deflate.go174
-rw-r--r--util/compress/deflate_test.go123
-rw-r--r--util/compress/libdeflate/.cirrus.yml8
-rw-r--r--util/compress/libdeflate/.github/workflows/ci.yml123
-rw-r--r--util/compress/libdeflate/.gitignore21
-rw-r--r--util/compress/libdeflate/COPYING21
-rw-r--r--util/compress/libdeflate/Makefile372
-rw-r--r--util/compress/libdeflate/Makefile.msc65
-rw-r--r--util/compress/libdeflate/NEWS200
-rw-r--r--util/compress/libdeflate/README.md283
-rw-r--r--util/compress/libdeflate/common/common_defs.h334
-rw-r--r--util/compress/libdeflate/common/compiler_gcc.h201
-rw-r--r--util/compress/libdeflate/common/compiler_msc.h80
-rw-r--r--util/compress/libdeflate/lib/adler32.c130
-rw-r--r--util/compress/libdeflate/lib/adler32_vec_template.h124
-rw-r--r--util/compress/libdeflate/lib/arm/adler32_impl.h125
-rw-r--r--util/compress/libdeflate/lib/arm/cpu_features.c133
-rw-r--r--util/compress/libdeflate/lib/arm/cpu_features.h40
-rw-r--r--util/compress/libdeflate/lib/arm/crc32_impl.h247
-rw-r--r--util/compress/libdeflate/lib/arm/matchfinder_impl.h86
-rw-r--r--util/compress/libdeflate/lib/bt_matchfinder.h363
-rw-r--r--util/compress/libdeflate/lib/cpu_features_common.h88
-rw-r--r--util/compress/libdeflate/lib/crc32.c313
-rw-r--r--util/compress/libdeflate/lib/crc32_table.h526
-rw-r--r--util/compress/libdeflate/lib/crc32_vec_template.h61
-rw-r--r--util/compress/libdeflate/lib/decompress_template.h421
-rw-r--r--util/compress/libdeflate/lib/deflate_compress.c2854
-rw-r--r--util/compress/libdeflate/lib/deflate_compress.h13
-rw-r--r--util/compress/libdeflate/lib/deflate_constants.h66
-rw-r--r--util/compress/libdeflate/lib/deflate_decompress.c1000
-rw-r--r--util/compress/libdeflate/lib/gzip_compress.c95
-rw-r--r--util/compress/libdeflate/lib/gzip_constants.h45
-rw-r--r--util/compress/libdeflate/lib/gzip_decompress.c148
-rw-r--r--util/compress/libdeflate/lib/hc_matchfinder.h412
-rw-r--r--util/compress/libdeflate/lib/lib_common.h67
-rw-r--r--util/compress/libdeflate/lib/matchfinder_common.h176
-rw-r--r--util/compress/libdeflate/lib/unaligned.h228
-rw-r--r--util/compress/libdeflate/lib/utils.c142
-rw-r--r--util/compress/libdeflate/lib/x86/adler32_impl.h337
-rw-r--r--util/compress/libdeflate/lib/x86/cpu_features.c152
-rw-r--r--util/compress/libdeflate/lib/x86/cpu_features.h41
-rw-r--r--util/compress/libdeflate/lib/x86/crc32_impl.h92
-rw-r--r--util/compress/libdeflate/lib/x86/crc32_pclmul_template.h262
-rw-r--r--util/compress/libdeflate/lib/x86/decompress_impl.h31
-rw-r--r--util/compress/libdeflate/lib/x86/matchfinder_impl.h122
-rw-r--r--util/compress/libdeflate/lib/zlib_compress.c87
-rw-r--r--util/compress/libdeflate/lib/zlib_constants.h21
-rw-r--r--util/compress/libdeflate/lib/zlib_decompress.c108
-rw-r--r--util/compress/libdeflate/libdeflate.h366
-rw-r--r--util/compress/libdeflate/programs/benchmark.c696
-rw-r--r--util/compress/libdeflate/programs/checksum.c207
-rw-r--r--util/compress/libdeflate/programs/gzip.c658
-rw-r--r--util/compress/libdeflate/programs/prog_util.c496
-rw-r--r--util/compress/libdeflate/programs/prog_util.h179
-rw-r--r--util/compress/libdeflate/programs/test_checksums.c196
-rw-r--r--util/compress/libdeflate/programs/test_custom_malloc.c85
-rw-r--r--util/compress/libdeflate/programs/test_incomplete_codes.c385
-rw-r--r--util/compress/libdeflate/programs/test_litrunlen_overflow.c72
-rw-r--r--util/compress/libdeflate/programs/test_slow_decompression.c472
-rw-r--r--util/compress/libdeflate/programs/test_trailing_bytes.c152
-rw-r--r--util/compress/libdeflate/programs/test_util.c243
-rw-r--r--util/compress/libdeflate/programs/test_util.h67
-rw-r--r--util/compress/libdeflate/programs/tgetopt.c118
-rw-r--r--util/compress/libdeflate/scripts/afl-fuzz/Makefile12
-rw-r--r--util/compress/libdeflate/scripts/afl-fuzz/deflate_compress/fuzz.c40
-rw-r--r--util/compress/libdeflate/scripts/afl-fuzz/deflate_compress/inputs/0bin500 -> 0 bytes
-rw-r--r--util/compress/libdeflate/scripts/afl-fuzz/deflate_decompress/fuzz.c28
-rw-r--r--util/compress/libdeflate/scripts/afl-fuzz/deflate_decompress/inputs/03
-rw-r--r--util/compress/libdeflate/scripts/afl-fuzz/gzip_decompress/fuzz.c28
-rw-r--r--util/compress/libdeflate/scripts/afl-fuzz/gzip_decompress/inputs/0bin187 -> 0 bytes
-rwxr-xr-xutil/compress/libdeflate/scripts/afl-fuzz/prepare_for_fuzz.sh14
-rw-r--r--util/compress/libdeflate/scripts/afl-fuzz/zlib_decompress/fuzz.c28
-rw-r--r--util/compress/libdeflate/scripts/afl-fuzz/zlib_decompress/inputs/03
-rwxr-xr-xutil/compress/libdeflate/scripts/android_build.sh108
-rwxr-xr-xutil/compress/libdeflate/scripts/android_tests.sh69
-rwxr-xr-xutil/compress/libdeflate/scripts/checksum_benchmarks.sh167
-rwxr-xr-xutil/compress/libdeflate/scripts/detect.sh63
-rw-r--r--util/compress/libdeflate/scripts/exec_tests.sh34
-rw-r--r--util/compress/libdeflate/scripts/gen_crc32_multipliers.c108
-rw-r--r--util/compress/libdeflate/scripts/gen_crc32_table.c100
-rwxr-xr-xutil/compress/libdeflate/scripts/gzip_tests.sh490
-rwxr-xr-xutil/compress/libdeflate/scripts/make-windows-releases.sh20
-rwxr-xr-xutil/compress/libdeflate/scripts/msc_test.bat3
-rwxr-xr-xutil/compress/libdeflate/scripts/pgo_build.sh23
-rwxr-xr-xutil/compress/libdeflate/scripts/produce_gzip_benchmark_table.sh37
-rwxr-xr-xutil/compress/libdeflate/scripts/run_tests.sh329
-rw-r--r--util/db/dbutil.go2
-rw-r--r--util/db/initialize.go123
-rw-r--r--util/db/initialize_test.go246
-rw-r--r--util/s3/s3Helper.go11
-rw-r--r--util/timers/deadlineMonitor.go48
-rw-r--r--util/timers/interface.go21
-rw-r--r--util/timers/monotonic.go13
388 files changed, 18442 insertions, 80300 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 8cf5a042a..27c616436 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -48,7 +48,7 @@ executors:
workflows:
version: 2
- build_pr:
+ "circleci_build_and_test":
jobs:
- codegen_verification
@@ -275,6 +275,8 @@ commands:
generic_test:
description: Run build tests from build workspace, for re-use by diferent architectures
parameters:
+ platform:
+ type: string
build_dir:
type: string
default: << pipeline.parameters.build_dir >>
@@ -335,6 +337,9 @@ commands:
key: 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-{{ .Environment.CIRCLE_BUILD_NUM }}'
paths:
- tmp/go-cache
+ - upload_to_buildpulse:
+ platform: << parameters.platform >>
+ path: << parameters.result_path >>/<< parameters.result_subdir>>
upload_coverage:
description: Collect coverage reports and upload them
@@ -345,9 +350,45 @@ commands:
command: |
scripts/travis/upload_coverage.sh || true
+ upload_to_buildpulse:
+ description: Collect build reports and upload them
+ parameters:
+ platform:
+ type: string
+ path:
+ type: string
+ steps:
+ - run:
+ name: Send test results to BuildPulse
+ when: always
+ command: |
+ set -e
+ if ! ls << parameters.path >>/*/*.xml &> /dev/null; then exit 0; fi
+ sed -i"" -e 's/classname="/classname="<< parameters.platform >>-/' << parameters.path >>/*/*.xml
+ case "<< parameters.platform >>" in
+ arm64)
+ URL=https://github.com/buildpulse/test-reporter/releases/download/v0.21.0-pre/test-reporter-linux-arm64
+ SUM=53f94c29ad162c2b9ebb1f4a2f967f5262c0459ee4a0c34332977d8c89aafc18
+ ;;
+ amd64)
+ URL=https://github.com/buildpulse/test-reporter/releases/download/v0.21.0-pre/test-reporter-linux-amd64
+ SUM=4655e54d756580c0de0112cab488e6e08d0af75e9fc8caea2d63f9e13be8beb5
+ ;;
+ mac_amd64)
+ URL=https://github.com/buildpulse/test-reporter/releases/download/v0.21.0-pre/test-reporter-darwin-amd64
+ SUM=2f9e20a6f683c80f35d04e36bc57ecfe605bb48fee5a1b8d8f7c45094028eea3
+ ;;
+ esac
+ curl -fsSL --retry 3 --retry-connrefused $URL > ./buildpulse-test-reporter
+ echo "$SUM *buildpulse-test-reporter" | shasum -a 256 -c --status
+ chmod +x ./buildpulse-test-reporter
+ ./buildpulse-test-reporter submit << parameters.path >> --account-id 23182699 --repository-id 191266671 || true
+
generic_integration:
description: Run integration tests from build workspace, for re-use by diferent architectures
parameters:
+ platform:
+ type: string
build_dir:
type: string
default: << pipeline.parameters.build_dir >>
@@ -401,6 +442,9 @@ commands:
root: << parameters.result_path >>
paths:
- << parameters.result_subdir >>
+ - upload_to_buildpulse:
+ platform: << parameters.platform >>
+ path: << parameters.result_path >>/<< parameters.result_subdir>>
tests_verification_command:
description: Check if all tests were run at least once and only once across all parallel runs
@@ -431,9 +475,14 @@ commands:
type: string
default: << pipeline.parameters.build_dir >>
steps:
+ - attach_workspace:
+ at: << parameters.build_dir >>
- run:
name: Upload binaries << parameters.platform >>
command: |
+ export PATH=$(echo "$PATH" | sed -e "s|:${HOME}/\.go_workspace/bin||g" | sed -e 's|:/usr/local/go/bin||g')
+ export GOPATH="<< parameters.build_dir >>/go"
+ export NO_BUILD=true
export TRAVIS_BRANCH=${CIRCLE_BRANCH}
scripts/travis/deploy_packages.sh
- when:
@@ -473,13 +522,14 @@ jobs:
parameters:
platform:
type: string
- executor: << parameters.platform >>_large
+ executor: << parameters.platform >>_medium
working_directory: << pipeline.parameters.build_dir >>/project
parallelism: 4
steps:
- prepare_build_dir
- prepare_go
- generic_test:
+ platform: << parameters.platform >>
result_subdir: << parameters.platform >>_test
short_test_flag: "-short"
- upload_coverage
@@ -488,13 +538,14 @@ jobs:
parameters:
platform:
type: string
- executor: << parameters.platform >>_large
+ executor: << parameters.platform >>_medium
working_directory: << pipeline.parameters.build_dir >>/project
parallelism: 4
steps:
- prepare_build_dir
- prepare_go
- generic_test:
+ platform: << parameters.platform >>
result_subdir: << parameters.platform >>_test_nightly
no_output_timeout: 45m
- upload_coverage
@@ -508,13 +559,14 @@ jobs:
type: string
executor: << parameters.platform >>_large
working_directory: << pipeline.parameters.build_dir >>/project
- parallelism: 4
+ parallelism: 2
environment:
E2E_TEST_FILTER: "GO"
steps:
- prepare_build_dir
- prepare_go
- generic_integration:
+ platform: << parameters.platform >>
result_subdir: << parameters.platform >>_integration
short_test_flag: "-short"
@@ -531,6 +583,7 @@ jobs:
- prepare_build_dir
- prepare_go
- generic_integration:
+ platform: << parameters.platform >>
result_subdir: << parameters.platform >>_integration_nightly
no_output_timeout: 45m
- slack/notify:
@@ -540,15 +593,16 @@ jobs:
parameters:
platform:
type: string
- executor: << parameters.platform >>_large
+ executor: << parameters.platform >>_medium
working_directory: << pipeline.parameters.build_dir >>/project
- parallelism: 4
+ parallelism: 2
environment:
E2E_TEST_FILTER: "EXPECT"
steps:
- prepare_build_dir
- prepare_go
- generic_integration:
+ platform: << parameters.platform >>
result_subdir: << parameters.platform >>_e2e_expect
short_test_flag: "-short"
@@ -556,15 +610,16 @@ jobs:
parameters:
platform:
type: string
- executor: << parameters.platform >>_large
+ executor: << parameters.platform >>_medium
working_directory: << pipeline.parameters.build_dir >>/project
- parallelism: 4
+ parallelism: 2
environment:
E2E_TEST_FILTER: "EXPECT"
steps:
- prepare_build_dir
- prepare_go
- generic_integration:
+ platform: << parameters.platform >>
result_subdir: << parameters.platform>>_e2e_expect_nightly
no_output_timeout: 45m
- slack/notify:
@@ -576,13 +631,13 @@ jobs:
type: string
executor: << parameters.platform >>_large
working_directory: << pipeline.parameters.build_dir >>/project
- parallelism: 4
environment:
E2E_TEST_FILTER: "SCRIPTS"
steps:
- prepare_build_dir
- prepare_go
- generic_integration:
+ platform: << parameters.platform >>
result_subdir: << parameters.platform >>_e2e_subs
short_test_flag: "-short"
@@ -592,13 +647,13 @@ jobs:
type: string
executor: << parameters.platform >>_large
working_directory: << pipeline.parameters.build_dir >>/project
- parallelism: 4
environment:
E2E_TEST_FILTER: "SCRIPTS"
steps:
- prepare_build_dir
- prepare_go
- generic_integration:
+ platform: << parameters.platform >>
result_subdir: << parameters.platform >>_e2e_subs_nightly
no_output_timeout: 45m
- slack/notify:
@@ -646,10 +701,8 @@ jobs:
executor: << parameters.platform >>_medium
steps:
- prepare_build_dir
- - checkout
- prepare_go
- upload_binaries_command:
platform: << parameters.platform >>
- - slack/notify: &slack-fail-event
- event: fail
- template: basic_fail_1
+ - slack/notify:
+ <<: *slack-fail-event
diff --git a/.travis.yml b/.travis.yml
index 201be5dd2..fa0457051 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,3 +1,4 @@
+name: "Travis Windows build"
os: linux
dist: bionic
language: minimal
@@ -6,135 +7,32 @@ language: minimal
if: tag IS blank
stages:
- - name: build_commit
- if: NOT (branch =~ /^hotfix\//) AND NOT (branch =~ /^rel\//) AND type != pull_request
- - name: build_pr
+ - name: build_windows_pr
if: type = pull_request
- - name: build_release
+ - name: build_windows_release
if: (branch =~ /^hotfix\// OR branch =~ /^rel\//) AND type != pull_request
jobs:
- allow_failures:
- - name: External ARM64 Integration Test
- - name: External ARM Build
- - name: Test Release Builds
include:
- - stage: build_commit
- os: linux
+ - stage: build_windows_pr
+ os: windows
+ name: Windows x64 PR Build
+ cache:
+ directories:
+ - $HOME/AppData/Local/Temp/chocolatey
+ - /C/tools/msys64
script:
- - scripts/travis/build_test.sh
+ - $mingw64 scripts/travis/build_test.sh
- - stage: build_pr
- os: linux
- name: Ubuntu AMD64 Build
+ - stage: build_windows_release
+ os: windows
+ name: Windows x64 Release Build
+ cache:
+ directories:
+ - $HOME/AppData/Local/Temp/chocolatey
+ - /C/tools/msys64
script:
- - scripts/travis/build_test.sh
- - # same stage, parallel job
- os: linux
- name: Ubuntu AMD64 Integration Test
- script:
- - ./scripts/travis/integration_test.sh
- - # same stage, parallel job
- name: External ARM64 Build
- os: linux
- env:
- - BUILD_TYPE: "external_build"
- - TARGET_PLATFORM: "linux-arm64"
- addons:
- apt:
- packages:
- - awscli
- script:
- - scripts/travis/external_build.sh ./scripts/travis/build_test.sh
- - # same stage, parallel job
- name: External ARM64 Integration Test
- os: linux
- env:
- - BUILD_TYPE: "external_build"
- - TARGET_PLATFORM: "linux-arm64"
- addons:
- apt:
- packages:
- - awscli
- script:
- - scripts/travis/external_build.sh ./scripts/travis/integration_test.sh
- - # same stage, parallel job
- os: osx
- osx_image: xcode11
- name: MacOS AMD64 Build
- script:
- - scripts/travis/build_test.sh
- - # same stage, parallel job
- os: osx
- osx_image: xcode11
- name: MacOS AMD64 Integration Test
- script:
- - ./scripts/travis/integration_test.sh
- - # same stage, parallel job
- os: windows
- name: Windows x64 Build
- cache:
- directories:
- - $HOME/AppData/Local/Temp/chocolatey
- - /C/tools/msys64
- script:
- - $mingw64 scripts/travis/build_test.sh
-
- - stage: build_release
- os: linux
- name: Ubuntu AMD64 Build
- script:
- - ./scripts/travis/build_test.sh
- - # same stage, parallel job
- os: linux
- name: Ubuntu AMD64 Integration Test
- script:
- - ./scripts/travis/integration_test.sh
- - # same stage, parallel job
- name: External ARM64 Build
- os: linux
- env:
- - BUILD_TYPE: "external_build"
- - TARGET_PLATFORM: "linux-arm64"
- addons:
- apt:
- packages:
- - awscli
- script:
- - scripts/travis/external_build.sh ./scripts/travis/build_test.sh
- - # same stage, parallel job
- name: External ARM64 Integration Test
- os: linux
- env:
- - BUILD_TYPE: "external_build"
- - TARGET_PLATFORM: "linux-arm64"
- addons:
- apt:
- packages:
- - awscli
- script:
- - scripts/travis/external_build.sh ./scripts/travis/integration_test.sh
- - # same stage, parallel job
- os: osx
- osx_image: xcode11
- name: MacOS AMD64 Build
- script:
- - scripts/travis/build_test.sh
- - # same stage, parallel job
- os: osx
- osx_image: xcode11
- name: MacOS AMD64 Integration Test
- script:
- - ./scripts/travis/integration_test.sh
- - # same stage, parallel job
- os: windows
- name: Windows x64 Build
- cache:
- directories:
- - $HOME/AppData/Local/Temp/chocolatey
- - /C/tools/msys64
- script:
- - $mingw64 scripts/travis/build_test.sh
+ - $mingw64 scripts/travis/build_test.sh
# Don't rebuild libsodium every time
cache:
@@ -144,38 +42,26 @@ cache:
before_install:
- |-
- case $TRAVIS_OS_NAME in
- linux)
- # Disable sometimes-broken sources.list in Travis base images
- sudo rm -vf /etc/apt/sources.list.d/*
- ;;
- windows)
- [[ ! -f C:/tools/msys64/msys2_shell.cmd ]] && rm -rf C:/tools/msys64
- choco uninstall -y mingw
- choco upgrade --no-progress -y msys2
- export msys2='cmd //C RefreshEnv.cmd '
- export msys2+='& set MSYS=winsymlinks:nativestrict '
- export msys2+='& C:\\tools\\msys64\\msys2_shell.cmd -defterm -no-start'
- export mingw64="$msys2 -mingw64 -full-path -here -c "\"\$@"\" --"
- export msys2+=" -msys2 -c "\"\$@"\" --"
- $msys2 pacman --sync --noconfirm --needed mingw-w64-x86_64-toolchain unzip
- ## Install more MSYS2 packages from https://packages.msys2.org/base here
- taskkill //IM gpg-agent.exe //F # https://travis-ci.community/t/4967
- export PATH=/C/tools/msys64/mingw64/bin:$PATH
- export MAKE=mingw32-make # so that Autotools can find it
- ;;
- esac
- docker load -i $HOME/docker_cache/images.tar || true
+ [[ ! -f C:/tools/msys64/msys2_shell.cmd ]] && rm -rf C:/tools/msys64
+ choco uninstall -y mingw
+ choco upgrade --no-progress -y msys2
+ export msys2='cmd //C RefreshEnv.cmd '
+ export msys2+='& set MSYS=winsymlinks:nativestrict '
+ export msys2+='& C:\\tools\\msys64\\msys2_shell.cmd -defterm -no-start'
+ export mingw64="$msys2 -mingw64 -full-path -here -c "\"\$@"\" --"
+ export msys2+=" -msys2 -c "\"\$@"\" --"
+ $msys2 pacman --sync --noconfirm --needed mingw-w64-x86_64-toolchain unzip
+ ## Install more MSYS2 packages from https://packages.msys2.org/base here
+ taskkill //IM gpg-agent.exe //F # https://travis-ci.community/t/4967
+ export PATH=/C/tools/msys64/mingw64/bin:$PATH
+ export MAKE=mingw32-make # so that Autotools can find it
+ docker load -i $HOME/docker_cache/images.tar || true
before_cache:
- |-
- case $TRAVIS_OS_NAME in
- windows)
- # https://unix.stackexchange.com/a/137322/107554
- $msys2 pacman --sync --clean --noconfirm
- ;;
- esac
- docker save -o $HOME/docker_cache/images.tar $(docker images -a -q)
+ # https://unix.stackexchange.com/a/137322/107554
+ $msys2 pacman --sync --clean --noconfirm
+ docker save -o $HOME/docker_cache/images.tar $(docker images -a -q)
addons:
apt:
diff --git a/Makefile b/Makefile
index 77a965887..b17010bfe 100644
--- a/Makefile
+++ b/Makefile
@@ -76,7 +76,7 @@ GOLDFLAGS := $(GOLDFLAGS_BASE) \
UNIT_TEST_SOURCES := $(sort $(shell GOPATH=$(GOPATH) && GO111MODULE=off && go list ./... | grep -v /go-algorand/test/ ))
ALGOD_API_PACKAGES := $(sort $(shell GOPATH=$(GOPATH) && GO111MODULE=off && cd daemon/algod/api; go list ./... ))
-MSGP_GENERATE := ./protocol ./protocol/test ./crypto ./crypto/compactcert ./data/basics ./data/transactions ./data/committee ./data/bookkeeping ./data/hashable ./agreement ./rpcs ./node ./ledger ./ledger/ledgercore ./compactcert ./txnsync ./data/pooldata
+MSGP_GENERATE := ./protocol ./protocol/test ./crypto ./crypto/compactcert ./data/basics ./data/transactions ./data/committee ./data/bookkeeping ./data/hashable ./agreement ./rpcs ./node ./ledger ./ledger/ledgercore ./compactcert ./data/account
default: build
diff --git a/agreement/abstractions.go b/agreement/abstractions.go
index 99bd45d98..390b32dfd 100644
--- a/agreement/abstractions.go
+++ b/agreement/abstractions.go
@@ -19,7 +19,6 @@ package agreement
import (
"context"
"errors"
- "time"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -74,10 +73,7 @@ var ErrAssembleBlockRoundStale = errors.New("requested round for AssembleBlock i
// Round.
type BlockFactory interface {
// AssembleBlock produces a new ValidatedBlock which is suitable for proposal
- // at a given Round. The time argument specifies a target deadline by
- // which the block should be produced. Specifically, the deadline can
- // cause the factory to add fewer transactions to the block in question
- // than might otherwise be possible.
+ // at a given Round.
//
// AssembleBlock should produce a ValidatedBlock for which the corresponding
// BlockValidator validates (i.e. for which BlockValidator.Validate
@@ -88,7 +84,7 @@ type BlockFactory interface {
// produce a ValidatedBlock for the given round. If an insufficient number of
// nodes on the network can assemble entries, the agreement protocol may
// lose liveness.
- AssembleBlock(basics.Round, time.Time) (ValidatedBlock, error)
+ AssembleBlock(basics.Round) (ValidatedBlock, error)
}
// A Ledger represents the sequence of Entries agreed upon by the protocol.
@@ -128,14 +124,14 @@ type LedgerReader interface {
// protocol may lose liveness.
Seed(basics.Round) (committee.Seed, error)
- // Lookup returns the AccountData associated with some Address
- // at the conclusion of a given round.
+ // LookupAgreement returns the AccountData associated with some Address
+ // needed by agreement at the conclusion of a given round.
//
// This method returns an error if the given Round has not yet been
// confirmed. It may also return an error if the given Round is
// unavailable by the storage device. In that case, the agreement
// protocol may lose liveness.
- Lookup(basics.Round, basics.Address) (basics.AccountData, error)
+ LookupAgreement(basics.Round, basics.Address) (basics.OnlineAccountData, error)
// Circulation returns the total amount of money in circulation at the
// conclusion of a given round.
@@ -229,6 +225,10 @@ type KeyManager interface {
// valid for the provided votingRound, and were available at
// keysRound.
VotingKeys(votingRound, keysRound basics.Round) []account.Participation
+
+ // Record indicates that the given participation action has been taken.
+ // The operation needs to be asynchronous to avoid impacting agreement.
+ Record(account basics.Address, round basics.Round, participationType account.ParticipationAction)
}
// MessageHandle is an ID referring to a specific message.
diff --git a/agreement/agreementtest/keyManager.go b/agreement/agreementtest/keyManager.go
index 384fba8cd..340c8f40e 100644
--- a/agreement/agreementtest/keyManager.go
+++ b/agreement/agreementtest/keyManager.go
@@ -21,7 +21,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
)
-// SimpleKeyManager provides a simple implementation of a KeyManager.
+// SimpleKeyManager provides a simple implementation of a KeyManager for unit tests.
type SimpleKeyManager []account.Participation
// VotingKeys implements KeyManager.VotingKeys.
@@ -37,7 +37,8 @@ func (m SimpleKeyManager) VotingKeys(votingRound, _ basics.Round) []account.Part
// DeleteOldKeys implements KeyManager.DeleteOldKeys.
func (m SimpleKeyManager) DeleteOldKeys(r basics.Round) {
- // for _, acc := range m {
- // acc.DeleteOldKeys(r)
- // }
+}
+
+// Record implements KeyManager.Record.
+func (m SimpleKeyManager) Record(account basics.Address, round basics.Round, action account.ParticipationAction) {
}
diff --git a/agreement/agreementtest/simulate_test.go b/agreement/agreementtest/simulate_test.go
index 086349b82..c2dcbe233 100644
--- a/agreement/agreementtest/simulate_test.go
+++ b/agreement/agreementtest/simulate_test.go
@@ -92,7 +92,7 @@ type testBlockFactory struct {
Owner int
}
-func (f testBlockFactory) AssembleBlock(r basics.Round, deadline time.Time) (agreement.ValidatedBlock, error) {
+func (f testBlockFactory) AssembleBlock(r basics.Round) (agreement.ValidatedBlock, error) {
return testValidatedBlock{Inside: bookkeeping.Block{BlockHeader: bookkeeping.BlockHeader{Round: r}}}, nil
}
@@ -203,7 +203,7 @@ func (l *testLedger) LookupDigest(r basics.Round) (crypto.Digest, error) {
return l.entries[r].Digest(), nil
}
-func (l *testLedger) Lookup(r basics.Round, a basics.Address) (basics.AccountData, error) {
+func (l *testLedger) LookupAgreement(r basics.Round, a basics.Address) (basics.OnlineAccountData, error) {
l.mu.Lock()
defer l.mu.Unlock()
@@ -211,7 +211,7 @@ func (l *testLedger) Lookup(r basics.Round, a basics.Address) (basics.AccountDat
err := fmt.Errorf("Lookup called on future round: %v > %v! (this is probably a bug)", r, l.nextRound)
panic(err)
}
- return l.state[a], nil
+ return l.state[a].OnlineAccountData(), nil
}
func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
@@ -226,7 +226,7 @@ func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
var sum basics.MicroAlgos
var overflowed bool
for _, rec := range l.state {
- sum, overflowed = basics.OAddA(sum, rec.VotingStake())
+ sum, overflowed = basics.OAddA(sum, rec.OnlineAccountData().VotingStake())
if overflowed {
panic("circulation computation overflowed")
}
diff --git a/agreement/asyncVoteVerifier.go b/agreement/asyncVoteVerifier.go
index 59bb90b09..072fb2f15 100644
--- a/agreement/asyncVoteVerifier.go
+++ b/agreement/asyncVoteVerifier.go
@@ -100,7 +100,7 @@ func (avv *AsyncVoteVerifier) executeVoteVerification(task interface{}) interfac
select {
case <-req.ctx.Done():
// request cancelled, return an error response on the channel
- return &asyncVerifyVoteResponse{err: req.ctx.Err(), cancelled: true, req: &req}
+ return &asyncVerifyVoteResponse{err: req.ctx.Err(), cancelled: true, req: &req, index: req.index}
default:
// request was not cancelled, so we verify it here and return the result on the channel
v, err := req.uv.verify(req.l)
@@ -119,7 +119,7 @@ func (avv *AsyncVoteVerifier) executeEqVoteVerification(task interface{}) interf
select {
case <-req.ctx.Done():
// request cancelled, return an error response on the channel
- return &asyncVerifyVoteResponse{err: req.ctx.Err(), cancelled: true, req: &req}
+ return &asyncVerifyVoteResponse{err: req.ctx.Err(), cancelled: true, req: &req, index: req.index}
default:
// request was not cancelled, so we verify it here and return the result on the channel
ev, err := req.uev.verify(req.l)
@@ -131,7 +131,7 @@ func (avv *AsyncVoteVerifier) executeEqVoteVerification(task interface{}) interf
}
}
-func (avv *AsyncVoteVerifier) verifyVote(verctx context.Context, l LedgerReader, uv unauthenticatedVote, index int, message message, out chan<- asyncVerifyVoteResponse) {
+func (avv *AsyncVoteVerifier) verifyVote(verctx context.Context, l LedgerReader, uv unauthenticatedVote, index int, message message, out chan<- asyncVerifyVoteResponse) error {
select {
case <-avv.ctx.Done(): // if we're quitting, don't enqueue the request
// case <-verctx.Done(): DO NOT DO THIS! otherwise we will lose the vote (and forget to clean up)!
@@ -140,16 +140,18 @@ func (avv *AsyncVoteVerifier) verifyVote(verctx context.Context, l LedgerReader,
// if we're done while waiting for room in the requests channel, don't queue the request
req := asyncVerifyVoteRequest{ctx: verctx, l: l, uv: &uv, index: index, message: message, out: out}
avv.wg.Add(1)
- if avv.backlogExecPool.EnqueueBacklog(avv.ctx, avv.executeVoteVerification, req, avv.execpoolOut) != nil {
+ if err := avv.backlogExecPool.EnqueueBacklog(avv.ctx, avv.executeVoteVerification, req, avv.execpoolOut); err != nil {
// we want to call "wg.Done()" here to "fix" the accounting of the number of pending tasks.
// if we got a non-nil, it means that our context has expired, which means that we won't see this task
// getting to the verification function.
avv.wg.Done()
+ return err
}
}
+ return nil
}
-func (avv *AsyncVoteVerifier) verifyEqVote(verctx context.Context, l LedgerReader, uev unauthenticatedEquivocationVote, index int, message message, out chan<- asyncVerifyVoteResponse) {
+func (avv *AsyncVoteVerifier) verifyEqVote(verctx context.Context, l LedgerReader, uev unauthenticatedEquivocationVote, index int, message message, out chan<- asyncVerifyVoteResponse) error {
select {
case <-avv.ctx.Done(): // if we're quitting, don't enqueue the request
// case <-verctx.Done(): DO NOT DO THIS! otherwise we will lose the vote (and forget to clean up)!
@@ -158,13 +160,15 @@ func (avv *AsyncVoteVerifier) verifyEqVote(verctx context.Context, l LedgerReade
// if we're done while waiting for room in the requests channel, don't queue the request
req := asyncVerifyVoteRequest{ctx: verctx, l: l, uev: &uev, index: index, message: message, out: out}
avv.wg.Add(1)
- if avv.backlogExecPool.EnqueueBacklog(avv.ctx, avv.executeEqVoteVerification, req, avv.execpoolOut) != nil {
+ if err := avv.backlogExecPool.EnqueueBacklog(avv.ctx, avv.executeEqVoteVerification, req, avv.execpoolOut); err != nil {
// we want to call "wg.Done()" here to "fix" the accounting of the number of pending tasks.
// if we got a non-nil, it means that our context has expired, which means that we won't see this task
// getting to the verification function.
avv.wg.Done()
+ return err
}
}
+ return nil
}
// Quit tells the AsyncVoteVerifier to shutdown and waits until all workers terminate.
diff --git a/agreement/asyncVoteVerifier_test.go b/agreement/asyncVoteVerifier_test.go
new file mode 100644
index 000000000..6cfadedd8
--- /dev/null
+++ b/agreement/asyncVoteVerifier_test.go
@@ -0,0 +1,50 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package agreement
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/execpool"
+)
+
+type expiredExecPool struct {
+ execpool.ExecutionPool
+}
+
+func (fp *expiredExecPool) EnqueueBacklog(enqueueCtx context.Context, t execpool.ExecFunc, arg interface{}, out chan interface{}) error {
+ // generate an error, to see if we correctly report that on the verifyVote() call.
+ return context.Canceled
+}
+
+// Test async vote verifier against a full execution pool.
+func TestVerificationAgainstFullExecutionPool(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ mainPool := execpool.MakePool(t)
+ defer mainPool.Shutdown()
+
+ voteVerifier := MakeAsyncVoteVerifier(&expiredExecPool{mainPool})
+ defer voteVerifier.Quit()
+ verifyErr := voteVerifier.verifyVote(context.Background(), nil, unauthenticatedVote{}, 0, message{}, make(chan<- asyncVerifyVoteResponse, 1))
+ require.Error(t, context.Canceled, verifyErr)
+ verifyEqVoteErr := voteVerifier.verifyEqVote(context.Background(), nil, unauthenticatedEquivocationVote{}, 0, message{}, make(chan<- asyncVerifyVoteResponse, 1))
+ require.Error(t, context.Canceled, verifyEqVoteErr)
+}
diff --git a/agreement/common_test.go b/agreement/common_test.go
index ff453c1f6..9ecf5b4b9 100644
--- a/agreement/common_test.go
+++ b/agreement/common_test.go
@@ -21,7 +21,6 @@ import (
"fmt"
"math/rand"
"testing"
- "time"
"github.com/algorand/go-deadlock"
"github.com/stretchr/testify/require"
@@ -180,7 +179,7 @@ type testBlockFactory struct {
Owner int
}
-func (f testBlockFactory) AssembleBlock(r basics.Round, deadline time.Time) (ValidatedBlock, error) {
+func (f testBlockFactory) AssembleBlock(r basics.Round) (ValidatedBlock, error) {
return testValidatedBlock{Inside: bookkeeping.Block{BlockHeader: bookkeeping.BlockHeader{Round: r}}}, nil
}
@@ -320,7 +319,7 @@ func (l *testLedger) LookupDigest(r basics.Round) (crypto.Digest, error) {
return l.entries[r].Digest(), nil
}
-func (l *testLedger) Lookup(r basics.Round, a basics.Address) (basics.AccountData, error) {
+func (l *testLedger) LookupAgreement(r basics.Round, a basics.Address) (basics.OnlineAccountData, error) {
l.mu.Lock()
defer l.mu.Unlock()
@@ -330,10 +329,10 @@ func (l *testLedger) Lookup(r basics.Round, a basics.Address) (basics.AccountDat
}
if l.maxNumBlocks != 0 && r+round(l.maxNumBlocks) < l.nextRound {
- return basics.AccountData{}, &LedgerDroppedRoundError{}
+ return basics.OnlineAccountData{}, &LedgerDroppedRoundError{}
}
- return l.state[a], nil
+ return l.state[a].OnlineAccountData(), nil
}
func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
@@ -348,7 +347,7 @@ func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
var sum basics.MicroAlgos
var overflowed bool
for _, rec := range l.state {
- sum, overflowed = basics.OAddA(sum, rec.VotingStake())
+ sum, overflowed = basics.OAddA(sum, rec.OnlineAccountData().VotingStake())
if overflowed {
panic("circulation computation overflowed")
}
@@ -422,7 +421,7 @@ type testAccountData struct {
}
func makeProposalsTesting(accs testAccountData, round basics.Round, period period, factory BlockFactory, ledger Ledger) (ps []proposal, vs []vote) {
- ve, err := factory.AssembleBlock(round, time.Now().Add(time.Minute))
+ ve, err := factory.AssembleBlock(round)
if err != nil {
logging.Base().Errorf("Could not generate a proposal for round %d: %v", round, err)
return nil, nil
@@ -534,7 +533,7 @@ func (v *voteMakerHelper) MakeRandomProposalValue() *proposalValue {
func (v *voteMakerHelper) MakeRandomProposalPayload(t *testing.T, r round) (*proposal, *proposalValue) {
f := testBlockFactory{Owner: 1}
- ve, err := f.AssembleBlock(r, time.Now().Add(time.Minute))
+ ve, err := f.AssembleBlock(r)
require.NoError(t, err)
var payload unauthenticatedProposal
diff --git a/agreement/cryptoVerifier.go b/agreement/cryptoVerifier.go
index 0c79496de..4f84ca3ba 100644
--- a/agreement/cryptoVerifier.go
+++ b/agreement/cryptoVerifier.go
@@ -205,7 +205,14 @@ func (c *poolCryptoVerifier) voteFillWorker(toBundleWait chan<- bundleFuture) {
}
uv := votereq.message.UnauthenticatedVote
- c.voteVerifier.verifyVote(votereq.ctx, c.ledger, uv, votereq.TaskIndex, votereq.message, c.votes.out)
+ err := c.voteVerifier.verifyVote(votereq.ctx, c.ledger, uv, votereq.TaskIndex, votereq.message, c.votes.out)
+ if err != nil && c.votes.out != nil {
+ select {
+ case c.votes.out <- asyncVerifyVoteResponse{index: votereq.TaskIndex, err: err, cancelled: true}:
+ default:
+ c.log.Infof("poolCryptoVerifier.voteFillWorker unable to write failed enqueue response to output channel")
+ }
+ }
case bundlereq, ok := <-bundlesin:
if !ok {
bundlesin = nil
diff --git a/agreement/cryptoVerifier_test.go b/agreement/cryptoVerifier_test.go
index bced9c9f7..78e6f6488 100644
--- a/agreement/cryptoVerifier_test.go
+++ b/agreement/cryptoVerifier_test.go
@@ -26,6 +26,7 @@ import (
"github.com/algorand/go-deadlock"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -35,6 +36,7 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/execpool"
)
var _ = fmt.Printf
@@ -314,7 +316,7 @@ func BenchmarkCryptoVerifierProposalVertification(b *testing.B) {
pn := &asyncPseudonode{
factory: testBlockFactory{Owner: 0},
validator: testBlockValidator{},
- keys: simpleKeyManager(participations),
+ keys: makeRecordingKeyManager(participations),
ledger: ledger,
log: serviceLogger{logging.Base()},
}
@@ -385,3 +387,26 @@ func BenchmarkCryptoVerifierBundleVertification(b *testing.B) {
<-c
}
}
+
+// TestCryptoVerifierVerificationFailures tests to see that the cryptoVerifier.VerifyVote returns an error in the vote response
+// when being unable to enqueue a vote.
+func TestCryptoVerifierVerificationFailures(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ mainPool := execpool.MakePool(t)
+ defer mainPool.Shutdown()
+
+ voteVerifier := MakeAsyncVoteVerifier(&expiredExecPool{mainPool})
+ defer voteVerifier.Quit()
+
+ cryptoVerifier := makeCryptoVerifier(nil, nil, voteVerifier, logging.TestingLog(t))
+ defer cryptoVerifier.Quit()
+
+ cryptoVerifier.VerifyVote(context.Background(), cryptoVoteRequest{message: message{Tag: protocol.AgreementVoteTag}, Round: basics.Round(8), TaskIndex: 14})
+ // read the failed response from VerifiedVotes:
+ votesout := cryptoVerifier.VerifiedVotes()
+ voteResponse := <-votesout
+ require.Equal(t, context.Canceled, voteResponse.err)
+ require.True(t, voteResponse.cancelled)
+ require.Equal(t, 14, voteResponse.index)
+}
diff --git a/agreement/demux_test.go b/agreement/demux_test.go
index f099d79f8..e351b9a79 100644
--- a/agreement/demux_test.go
+++ b/agreement/demux_test.go
@@ -484,9 +484,9 @@ func (t *demuxTester) LookupDigest(basics.Round) (crypto.Digest, error) {
}
// implement Ledger
-func (t *demuxTester) Lookup(basics.Round, basics.Address) (basics.AccountData, error) {
+func (t *demuxTester) LookupAgreement(basics.Round, basics.Address) (basics.OnlineAccountData, error) {
// we don't care about this function in this test.
- return basics.AccountData{}, nil
+ return basics.OnlineAccountData{}, nil
}
// implement Ledger
diff --git a/agreement/fuzzer/fuzzer_test.go b/agreement/fuzzer/fuzzer_test.go
index 8c526b13d..c6cc91be7 100644
--- a/agreement/fuzzer/fuzzer_test.go
+++ b/agreement/fuzzer/fuzzer_test.go
@@ -29,6 +29,7 @@ import (
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/agreement/agreementtest"
"github.com/algorand/go-algorand/agreement/gossip"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -132,7 +133,7 @@ func (n *Fuzzer) initAgreementNode(nodeID int, filters ...NetworkFilterFactory)
Logger: logger,
Ledger: n.ledgers[nodeID],
Network: gossip.WrapNetwork(n.facades[nodeID], logger),
- KeyManager: simpleKeyManager(n.accounts[nodeID : nodeID+1]),
+ KeyManager: agreementtest.SimpleKeyManager(n.accounts[nodeID : nodeID+1]),
BlockValidator: n.blockValidator,
BlockFactory: testBlockFactory{Owner: nodeID},
Clock: n.clocks[nodeID],
diff --git a/agreement/fuzzer/ledger_test.go b/agreement/fuzzer/ledger_test.go
index 15a3fbebe..9866f53c4 100644
--- a/agreement/fuzzer/ledger_test.go
+++ b/agreement/fuzzer/ledger_test.go
@@ -20,7 +20,6 @@ import (
"context"
"fmt"
"math/rand"
- "time"
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
@@ -109,7 +108,7 @@ type testBlockFactory struct {
Owner int
}
-func (f testBlockFactory) AssembleBlock(r basics.Round, deadline time.Time) (agreement.ValidatedBlock, error) {
+func (f testBlockFactory) AssembleBlock(r basics.Round) (agreement.ValidatedBlock, error) {
return testValidatedBlock{Inside: bookkeeping.Block{BlockHeader: bookkeeping.BlockHeader{Round: r}}}, nil
}
@@ -226,7 +225,7 @@ func (l *testLedger) LookupDigest(r basics.Round) (crypto.Digest, error) {
return l.entries[r].Digest(), nil
}
-func (l *testLedger) Lookup(r basics.Round, a basics.Address) (basics.AccountData, error) {
+func (l *testLedger) LookupAgreement(r basics.Round, a basics.Address) (basics.OnlineAccountData, error) {
l.mu.Lock()
defer l.mu.Unlock()
@@ -234,7 +233,7 @@ func (l *testLedger) Lookup(r basics.Round, a basics.Address) (basics.AccountDat
err := fmt.Errorf("Lookup called on future round: %d >= %d! (this is probably a bug)", r, l.nextRound)
panic(err)
}
- return l.state[a], nil
+ return l.state[a].OnlineAccountData(), nil
}
func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
@@ -249,7 +248,7 @@ func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
var sum basics.MicroAlgos
var overflowed bool
for _, rec := range l.state {
- sum, overflowed = basics.OAddA(sum, rec.VotingStake())
+ sum, overflowed = basics.OAddA(sum, rec.OnlineAccountData().VotingStake())
if overflowed {
panic("circulation computation overflowed")
}
diff --git a/agreement/keyManager_test.go b/agreement/keyManager_test.go
new file mode 100644
index 000000000..f992e01f0
--- /dev/null
+++ b/agreement/keyManager_test.go
@@ -0,0 +1,74 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package agreement
+
+import (
+ "testing"
+
+ "github.com/algorand/go-deadlock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/data/basics"
+)
+
+func makeRecordingKeyManager(accounts []account.Participation) *recordingKeyManager {
+ return &recordingKeyManager{
+ keys: accounts,
+ recording: make(map[basics.Address]map[account.ParticipationAction]basics.Round),
+ }
+}
+
+// recordingKeyManager provides a simple implementation of a KeyManager for unit tests.
+type recordingKeyManager struct {
+ keys []account.Participation
+ recording map[basics.Address]map[account.ParticipationAction]basics.Round
+ mutex deadlock.Mutex
+}
+
+// VotingKeys implements KeyManager.VotingKeys.
+func (m *recordingKeyManager) VotingKeys(votingRound, _ basics.Round) []account.Participation {
+ var km []account.Participation
+ for _, acc := range m.keys {
+ if acc.OverlapsInterval(votingRound, votingRound) {
+ km = append(km, acc)
+ }
+ }
+ return km
+}
+
+// DeleteOldKeys implements KeyManager.DeleteOldKeys.
+func (m *recordingKeyManager) DeleteOldKeys(r basics.Round) {
+}
+
+// Record implements KeyManager.Record.
+func (m *recordingKeyManager) Record(acct basics.Address, round basics.Round, action account.ParticipationAction) {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ if _, ok := m.recording[acct]; !ok {
+ m.recording[acct] = make(map[account.ParticipationAction]basics.Round)
+ }
+ m.recording[acct][action] = round
+}
+
+// ValidateVoteRound requires that the given address voted on a particular round.
+func (m *recordingKeyManager) ValidateVoteRound(t *testing.T, address basics.Address, round basics.Round) {
+ m.mutex.Lock()
+ require.Equal(t, round, m.recording[address][account.Vote])
+ require.Equal(t, round, m.recording[address][account.BlockProposal])
+ m.mutex.Unlock()
+}
diff --git a/agreement/msgp_gen.go b/agreement/msgp_gen.go
index 61db86db6..4eb67496e 100644
--- a/agreement/msgp_gen.go
+++ b/agreement/msgp_gen.go
@@ -6,6 +6,7 @@ import (
"sort"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/msgp/msgp"
@@ -1354,120 +1355,124 @@ func (z period) MsgIsZero() bool {
func (z *proposal) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0003Len := uint32(27)
- var zb0003Mask uint32 /* 32 bits */
+ zb0004Len := uint32(28)
+ var zb0004Mask uint64 /* 34 bits */
if len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0 {
- zb0003Len--
- zb0003Mask |= 0x10
+ zb0004Len--
+ zb0004Mask |= 0x20
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0 {
- zb0003Len--
- zb0003Mask |= 0x20
+ zb0004Len--
+ zb0004Mask |= 0x40
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x40
+ zb0004Len--
+ zb0004Mask |= 0x80
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0 {
- zb0003Len--
- zb0003Mask |= 0x80
+ zb0004Len--
+ zb0004Mask |= 0x100
}
if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "" {
- zb0003Len--
- zb0003Mask |= 0x100
+ zb0004Len--
+ zb0004Mask |= 0x200
}
if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x200
+ zb0004Len--
+ zb0004Mask |= 0x400
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400
+ zb0004Len--
+ zb0004Mask |= 0x800
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800
+ zb0004Len--
+ zb0004Mask |= 0x1000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x1000
+ zb0004Len--
+ zb0004Mask |= 0x2000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
- zb0003Len--
- zb0003Mask |= 0x2000
+ zb0004Len--
+ zb0004Mask |= 0x4000
}
if (*z).unauthenticatedProposal.OriginalPeriod == 0 {
- zb0003Len--
- zb0003Mask |= 0x4000
+ zb0004Len--
+ zb0004Mask |= 0x8000
}
if (*z).unauthenticatedProposal.OriginalProposer.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x8000
+ zb0004Len--
+ zb0004Mask |= 0x10000
+ }
+ if len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x20000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000
+ zb0004Len--
+ zb0004Mask |= 0x40000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000
+ zb0004Len--
+ zb0004Mask |= 0x80000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0 {
- zb0003Len--
- zb0003Mask |= 0x40000
+ zb0004Len--
+ zb0004Mask |= 0x100000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x80000
+ zb0004Len--
+ zb0004Mask |= 0x200000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x100000
+ zb0004Len--
+ zb0004Mask |= 0x400000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x200000
+ zb0004Len--
+ zb0004Mask |= 0x800000
}
if (*z).unauthenticatedProposal.SeedProof.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400000
+ zb0004Len--
+ zb0004Mask |= 0x1000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800000
+ zb0004Len--
+ zb0004Mask |= 0x2000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0 {
- zb0003Len--
- zb0003Mask |= 0x1000000
+ zb0004Len--
+ zb0004Mask |= 0x4000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0 {
- zb0003Len--
- zb0003Mask |= 0x2000000
+ zb0004Len--
+ zb0004Mask |= 0x8000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x4000000
+ zb0004Len--
+ zb0004Mask |= 0x10000000
}
if (*z).unauthenticatedProposal.Block.Payset.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x8000000
+ zb0004Len--
+ zb0004Mask |= 0x20000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000000
+ zb0004Len--
+ zb0004Mask |= 0x40000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000000
+ zb0004Len--
+ zb0004Mask |= 0x80000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false {
- zb0003Len--
- zb0003Mask |= 0x40000000
+ zb0004Len--
+ zb0004Mask |= 0x100000000
}
- // variable map header, size zb0003Len
- o = msgp.AppendMapHeader(o, zb0003Len)
- if zb0003Len != 0 {
- if (zb0003Mask & 0x10) == 0 { // if not empty
+ // variable map header, size zb0004Len
+ o = msgp.AppendMapHeader(o, zb0004Len)
+ if zb0004Len != 0 {
+ if (zb0004Mask & 0x20) == 0 { // if not empty
// string "cc"
o = append(o, 0xa2, 0x63, 0x63)
if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
@@ -1487,132 +1492,144 @@ func (z *proposal) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
- if (zb0003Mask & 0x20) == 0 { // if not empty
+ if (zb0004Mask & 0x40) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel)
}
- if (zb0003Mask & 0x40) == 0 { // if not empty
+ if (zb0004Mask & 0x80) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0003Mask & 0x80) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue)
}
- if (zb0003Mask & 0x100) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID)
}
- if (zb0003Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MarshalMsg(o)
}
- if (zb0003Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0003Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0003Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals)
}
- if (zb0003Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "oper"
o = append(o, 0xa4, 0x6f, 0x70, 0x65, 0x72)
o = msgp.AppendUint64(o, uint64((*z).unauthenticatedProposal.OriginalPeriod))
}
- if (zb0003Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "oprop"
o = append(o, 0xa5, 0x6f, 0x70, 0x72, 0x6f, 0x70)
o = (*z).unauthenticatedProposal.OriginalProposer.MarshalMsg(o)
}
- if (zb0003Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
+ // string "partupdrmv"
+ o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
+ if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)))
+ }
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ o = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
+ }
+ }
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate)
}
- if (zb0003Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Round.MarshalMsg(o)
}
- if (zb0003Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0003Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0003Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000000) == 0 { // if not empty
// string "sdpf"
o = append(o, 0xa4, 0x73, 0x64, 0x70, 0x66)
o = (*z).unauthenticatedProposal.SeedProof.MarshalMsg(o)
}
- if (zb0003Mask & 0x800000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter)
}
- if (zb0003Mask & 0x2000000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000000) == 0 { // if not empty
// string "ts"
o = append(o, 0xa2, 0x74, 0x73)
o = msgp.AppendInt64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp)
}
- if (zb0003Mask & 0x4000000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000000) == 0 { // if not empty
// string "txn"
o = append(o, 0xa3, 0x74, 0x78, 0x6e)
o = (*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MarshalMsg(o)
}
- if (zb0003Mask & 0x8000000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000000) == 0 { // if not empty
// string "txns"
o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73)
o = (*z).unauthenticatedProposal.Block.Payset.MarshalMsg(o)
}
- if (zb0003Mask & 0x10000000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000000) == 0 { // if not empty
// string "upgradedelay"
o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000000) == 0 { // if not empty
// string "upgradeprop"
o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000000) == 0 { // if not empty
// string "upgradeyes"
o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73)
o = msgp.AppendBool(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove)
@@ -1630,214 +1647,214 @@ func (_ *proposal) CanMarshalMsg(z interface{}) bool {
func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Round.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Round")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Branch.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Branch")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Seed")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnRoot")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TimeStamp")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisHash")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FeeSink")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsPool")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsLevel")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRate")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsResidue")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRecalculationRound")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CurrentProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolVoteBefore")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolSwitchOn")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradePropose")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeDelay")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeApprove")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnCounter")
return
}
}
- if zb0003 > 0 {
- zb0003--
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if zb0004 > 0 {
+ zb0004--
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0005 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(protocol.NumCompactCertTypes))
+ if zb0006 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0006 {
+ if zb0007 {
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = nil
} else if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0005)
+ (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0006)
}
- for zb0005 > 0 {
+ for zb0006 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 bookkeeping.CompactCertState
- zb0005--
+ zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
@@ -1851,44 +1868,73 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert[zb0001] = zb0002
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0008 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0009 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ }
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.Payset.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Payset")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.SeedProof.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "SeedProof")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
{
- var zb0007 uint64
- zb0007, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0010 uint64
+ zb0010, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalPeriod")
return
}
- (*z).unauthenticatedProposal.OriginalPeriod = period(zb0007)
+ (*z).unauthenticatedProposal.OriginalPeriod = period(zb0010)
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.OriginalProposer.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalProposer")
return
}
}
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
+ if zb0004 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0004)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -1899,11 +1945,11 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
- if zb0004 {
+ if zb0005 {
(*z) = proposal{}
}
- for zb0003 > 0 {
- zb0003--
+ for zb0004 > 0 {
+ zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -2043,27 +2089,27 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "cc":
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0008 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(protocol.NumCompactCertTypes))
+ if zb0011 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0009 {
+ if zb0012 {
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = nil
} else if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0008)
+ (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0011)
}
- for zb0008 > 0 {
+ for zb0011 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 bookkeeping.CompactCertState
- zb0008--
+ zb0011--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
@@ -2076,6 +2122,33 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert[zb0001] = zb0002
}
+ case "partupdrmv":
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0013 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0014 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0013 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0013]
+ } else {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0013)
+ }
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
case "txns":
bts, err = (*z).unauthenticatedProposal.Block.Payset.UnmarshalMsg(bts)
if err != nil {
@@ -2090,13 +2163,13 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
case "oper":
{
- var zb0010 uint64
- zb0010, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0015 uint64
+ zb0015, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OriginalPeriod")
return
}
- (*z).unauthenticatedProposal.OriginalPeriod = period(zb0010)
+ (*z).unauthenticatedProposal.OriginalPeriod = period(zb0015)
}
case "oprop":
bts, err = (*z).unauthenticatedProposal.OriginalProposer.UnmarshalMsg(bts)
@@ -2132,13 +2205,17 @@ func (z *proposal) Msgsize() (s int) {
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
}
}
+ s += 11 + msgp.ArrayHeaderSize
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ s += (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].Msgsize()
+ }
s += 5 + (*z).unauthenticatedProposal.Block.Payset.Msgsize() + 5 + (*z).unauthenticatedProposal.SeedProof.Msgsize() + 5 + msgp.Uint64Size + 6 + (*z).unauthenticatedProposal.OriginalProposer.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *proposal) MsgIsZero() bool {
- return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero())
+ return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
@@ -3017,124 +3094,128 @@ func (z step) MsgIsZero() bool {
func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0003Len := uint32(28)
- var zb0003Mask uint64 /* 33 bits */
+ zb0004Len := uint32(29)
+ var zb0004Mask uint64 /* 35 bits */
if len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0 {
- zb0003Len--
- zb0003Mask |= 0x20
+ zb0004Len--
+ zb0004Mask |= 0x40
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0 {
- zb0003Len--
- zb0003Mask |= 0x40
+ zb0004Len--
+ zb0004Mask |= 0x80
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x80
+ zb0004Len--
+ zb0004Mask |= 0x100
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0 {
- zb0003Len--
- zb0003Mask |= 0x100
+ zb0004Len--
+ zb0004Mask |= 0x200
}
if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "" {
- zb0003Len--
- zb0003Mask |= 0x200
+ zb0004Len--
+ zb0004Mask |= 0x400
}
if (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400
+ zb0004Len--
+ zb0004Mask |= 0x800
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800
+ zb0004Len--
+ zb0004Mask |= 0x1000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x1000
+ zb0004Len--
+ zb0004Mask |= 0x2000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x2000
+ zb0004Len--
+ zb0004Mask |= 0x4000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
- zb0003Len--
- zb0003Mask |= 0x4000
+ zb0004Len--
+ zb0004Mask |= 0x8000
}
if (*z).unauthenticatedProposal.OriginalPeriod == 0 {
- zb0003Len--
- zb0003Mask |= 0x8000
+ zb0004Len--
+ zb0004Mask |= 0x10000
}
if (*z).unauthenticatedProposal.OriginalProposer.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000
+ zb0004Len--
+ zb0004Mask |= 0x20000
+ }
+ if len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x40000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000
+ zb0004Len--
+ zb0004Mask |= 0x80000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x40000
+ zb0004Len--
+ zb0004Mask |= 0x100000
}
if (*z).PriorVote.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x80000
+ zb0004Len--
+ zb0004Mask |= 0x200000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0 {
- zb0003Len--
- zb0003Mask |= 0x100000
+ zb0004Len--
+ zb0004Mask |= 0x400000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x200000
+ zb0004Len--
+ zb0004Mask |= 0x800000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400000
+ zb0004Len--
+ zb0004Mask |= 0x1000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800000
+ zb0004Len--
+ zb0004Mask |= 0x2000000
}
if (*z).unauthenticatedProposal.SeedProof.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x1000000
+ zb0004Len--
+ zb0004Mask |= 0x4000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x2000000
+ zb0004Len--
+ zb0004Mask |= 0x8000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0 {
- zb0003Len--
- zb0003Mask |= 0x4000000
+ zb0004Len--
+ zb0004Mask |= 0x10000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0 {
- zb0003Len--
- zb0003Mask |= 0x8000000
+ zb0004Len--
+ zb0004Mask |= 0x20000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000000
+ zb0004Len--
+ zb0004Mask |= 0x40000000
}
if (*z).unauthenticatedProposal.Block.Payset.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000000
+ zb0004Len--
+ zb0004Mask |= 0x80000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x40000000
+ zb0004Len--
+ zb0004Mask |= 0x100000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x80000000
+ zb0004Len--
+ zb0004Mask |= 0x200000000
}
if (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false {
- zb0003Len--
- zb0003Mask |= 0x100000000
+ zb0004Len--
+ zb0004Mask |= 0x400000000
}
- // variable map header, size zb0003Len
- o = msgp.AppendMapHeader(o, zb0003Len)
- if zb0003Len != 0 {
- if (zb0003Mask & 0x20) == 0 { // if not empty
+ // variable map header, size zb0004Len
+ o = msgp.AppendMapHeader(o, zb0004Len)
+ if zb0004Len != 0 {
+ if (zb0004Mask & 0x40) == 0 { // if not empty
// string "cc"
o = append(o, 0xa2, 0x63, 0x63)
if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
@@ -3154,137 +3235,149 @@ func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
- if (zb0003Mask & 0x40) == 0 { // if not empty
+ if (zb0004Mask & 0x80) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel)
}
- if (zb0003Mask & 0x80) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0003Mask & 0x100) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue)
}
- if (zb0003Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).unauthenticatedProposal.Block.BlockHeader.GenesisID)
}
- if (zb0003Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MarshalMsg(o)
}
- if (zb0003Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0003Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals)
}
- if (zb0003Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "oper"
o = append(o, 0xa4, 0x6f, 0x70, 0x65, 0x72)
o = msgp.AppendUint64(o, uint64((*z).unauthenticatedProposal.OriginalPeriod))
}
- if (zb0003Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
// string "oprop"
o = append(o, 0xa5, 0x6f, 0x70, 0x72, 0x6f, 0x70)
o = (*z).unauthenticatedProposal.OriginalProposer.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
+ // string "partupdrmv"
+ o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
+ if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)))
+ }
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ o = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
+ }
+ }
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Branch.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "pv"
o = append(o, 0xa2, 0x70, 0x76)
o = (*z).PriorVote.MarshalMsg(o)
}
- if (zb0003Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate)
}
- if (zb0003Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Round.MarshalMsg(o)
}
- if (zb0003Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0003Mask & 0x800000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000000) == 0 { // if not empty
// string "sdpf"
o = append(o, 0xa4, 0x73, 0x64, 0x70, 0x66)
o = (*z).unauthenticatedProposal.SeedProof.MarshalMsg(o)
}
- if (zb0003Mask & 0x2000000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.MarshalMsg(o)
}
- if (zb0003Mask & 0x4000000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
o = msgp.AppendUint64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter)
}
- if (zb0003Mask & 0x8000000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000000) == 0 { // if not empty
// string "ts"
o = append(o, 0xa2, 0x74, 0x73)
o = msgp.AppendInt64(o, (*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp)
}
- if (zb0003Mask & 0x10000000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000000) == 0 { // if not empty
// string "txn"
o = append(o, 0xa3, 0x74, 0x78, 0x6e)
o = (*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000000) == 0 { // if not empty
// string "txns"
o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73)
o = (*z).unauthenticatedProposal.Block.Payset.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000000) == 0 { // if not empty
// string "upgradedelay"
o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o)
}
- if (zb0003Mask & 0x80000000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000000) == 0 { // if not empty
// string "upgradeprop"
o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70)
o = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o)
}
- if (zb0003Mask & 0x100000000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000000) == 0 { // if not empty
// string "upgradeyes"
o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73)
o = msgp.AppendBool(o, (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove)
@@ -3302,214 +3395,214 @@ func (_ *transmittedPayload) CanMarshalMsg(z interface{}) bool {
func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Round.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Round")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Branch.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Branch")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.Seed.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Seed")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnRoot")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TimeStamp")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisHash")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FeeSink")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsPool")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsLevel")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRate")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsResidue")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRecalculationRound")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CurrentProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolVoteBefore")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolSwitchOn")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradePropose")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeDelay")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeApprove")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnCounter")
return
}
}
- if zb0003 > 0 {
- zb0003--
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if zb0004 > 0 {
+ zb0004--
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0005 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(protocol.NumCompactCertTypes))
+ if zb0006 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0006 {
+ if zb0007 {
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = nil
} else if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0005)
+ (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0006)
}
- for zb0005 > 0 {
+ for zb0006 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 bookkeeping.CompactCertState
- zb0005--
+ zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
@@ -3523,52 +3616,81 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert[zb0001] = zb0002
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0008 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0009 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ }
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.Block.Payset.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Payset")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.SeedProof.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "SeedProof")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
{
- var zb0007 uint64
- zb0007, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0010 uint64
+ zb0010, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalPeriod")
return
}
- (*z).unauthenticatedProposal.OriginalPeriod = period(zb0007)
+ (*z).unauthenticatedProposal.OriginalPeriod = period(zb0010)
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).unauthenticatedProposal.OriginalProposer.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalProposer")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).PriorVote.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "PriorVote")
return
}
}
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
+ if zb0004 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0004)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -3579,11 +3701,11 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
- if zb0004 {
+ if zb0005 {
(*z) = transmittedPayload{}
}
- for zb0003 > 0 {
- zb0003--
+ for zb0004 > 0 {
+ zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -3723,27 +3845,27 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "cc":
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0008 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(protocol.NumCompactCertTypes))
+ if zb0011 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0009 {
+ if zb0012 {
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = nil
} else if (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0008)
+ (*z).unauthenticatedProposal.Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0011)
}
- for zb0008 > 0 {
+ for zb0011 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 bookkeeping.CompactCertState
- zb0008--
+ zb0011--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
@@ -3756,6 +3878,33 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
(*z).unauthenticatedProposal.Block.BlockHeader.CompactCert[zb0001] = zb0002
}
+ case "partupdrmv":
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0013 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0014 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0013 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0013]
+ } else {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0013)
+ }
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
case "txns":
bts, err = (*z).unauthenticatedProposal.Block.Payset.UnmarshalMsg(bts)
if err != nil {
@@ -3770,13 +3919,13 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
case "oper":
{
- var zb0010 uint64
- zb0010, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0015 uint64
+ zb0015, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OriginalPeriod")
return
}
- (*z).unauthenticatedProposal.OriginalPeriod = period(zb0010)
+ (*z).unauthenticatedProposal.OriginalPeriod = period(zb0015)
}
case "oprop":
bts, err = (*z).unauthenticatedProposal.OriginalProposer.UnmarshalMsg(bts)
@@ -3818,13 +3967,17 @@ func (z *transmittedPayload) Msgsize() (s int) {
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
}
}
+ s += 11 + msgp.ArrayHeaderSize
+ for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ s += (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].Msgsize()
+ }
s += 5 + (*z).unauthenticatedProposal.Block.Payset.Msgsize() + 5 + (*z).unauthenticatedProposal.SeedProof.Msgsize() + 5 + msgp.Uint64Size + 6 + (*z).unauthenticatedProposal.OriginalProposer.Msgsize() + 3 + (*z).PriorVote.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *transmittedPayload) MsgIsZero() bool {
- return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero()) && ((*z).PriorVote.MsgIsZero())
+ return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnRoot.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.CompactCert) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero()) && ((*z).PriorVote.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
@@ -4494,120 +4647,124 @@ func (z *unauthenticatedEquivocationVote) MsgIsZero() bool {
func (z *unauthenticatedProposal) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0003Len := uint32(27)
- var zb0003Mask uint32 /* 31 bits */
+ zb0004Len := uint32(28)
+ var zb0004Mask uint64 /* 33 bits */
if len((*z).Block.BlockHeader.CompactCert) == 0 {
- zb0003Len--
- zb0003Mask |= 0x10
+ zb0004Len--
+ zb0004Mask |= 0x20
}
if (*z).Block.BlockHeader.RewardsState.RewardsLevel == 0 {
- zb0003Len--
- zb0003Mask |= 0x20
+ zb0004Len--
+ zb0004Mask |= 0x40
}
if (*z).Block.BlockHeader.RewardsState.FeeSink.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x40
+ zb0004Len--
+ zb0004Mask |= 0x80
}
if (*z).Block.BlockHeader.RewardsState.RewardsResidue == 0 {
- zb0003Len--
- zb0003Mask |= 0x80
+ zb0004Len--
+ zb0004Mask |= 0x100
}
if (*z).Block.BlockHeader.GenesisID == "" {
- zb0003Len--
- zb0003Mask |= 0x100
+ zb0004Len--
+ zb0004Mask |= 0x200
}
if (*z).Block.BlockHeader.GenesisHash.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x200
+ zb0004Len--
+ zb0004Mask |= 0x400
}
if (*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400
+ zb0004Len--
+ zb0004Mask |= 0x800
}
if (*z).Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800
+ zb0004Len--
+ zb0004Mask |= 0x1000
}
if (*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x1000
+ zb0004Len--
+ zb0004Mask |= 0x2000
}
if (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
- zb0003Len--
- zb0003Mask |= 0x2000
+ zb0004Len--
+ zb0004Mask |= 0x4000
}
if (*z).OriginalPeriod == 0 {
- zb0003Len--
- zb0003Mask |= 0x4000
+ zb0004Len--
+ zb0004Mask |= 0x8000
}
if (*z).OriginalProposer.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x8000
+ zb0004Len--
+ zb0004Mask |= 0x10000
+ }
+ if len((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x20000
}
if (*z).Block.BlockHeader.Branch.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000
+ zb0004Len--
+ zb0004Mask |= 0x40000
}
if (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000
+ zb0004Len--
+ zb0004Mask |= 0x80000
}
if (*z).Block.BlockHeader.RewardsState.RewardsRate == 0 {
- zb0003Len--
- zb0003Mask |= 0x40000
+ zb0004Len--
+ zb0004Mask |= 0x100000
}
if (*z).Block.BlockHeader.Round.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x80000
+ zb0004Len--
+ zb0004Mask |= 0x200000
}
if (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x100000
+ zb0004Len--
+ zb0004Mask |= 0x400000
}
if (*z).Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x200000
+ zb0004Len--
+ zb0004Mask |= 0x800000
}
if (*z).SeedProof.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400000
+ zb0004Len--
+ zb0004Mask |= 0x1000000
}
if (*z).Block.BlockHeader.Seed.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800000
+ zb0004Len--
+ zb0004Mask |= 0x2000000
}
if (*z).Block.BlockHeader.TxnCounter == 0 {
- zb0003Len--
- zb0003Mask |= 0x1000000
+ zb0004Len--
+ zb0004Mask |= 0x4000000
}
if (*z).Block.BlockHeader.TimeStamp == 0 {
- zb0003Len--
- zb0003Mask |= 0x2000000
+ zb0004Len--
+ zb0004Mask |= 0x8000000
}
if (*z).Block.BlockHeader.TxnRoot.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x4000000
+ zb0004Len--
+ zb0004Mask |= 0x10000000
}
if (*z).Block.Payset.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x8000000
+ zb0004Len--
+ zb0004Mask |= 0x20000000
}
if (*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000000
+ zb0004Len--
+ zb0004Mask |= 0x40000000
}
if (*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000000
+ zb0004Len--
+ zb0004Mask |= 0x80000000
}
if (*z).Block.BlockHeader.UpgradeVote.UpgradeApprove == false {
- zb0003Len--
- zb0003Mask |= 0x40000000
+ zb0004Len--
+ zb0004Mask |= 0x100000000
}
- // variable map header, size zb0003Len
- o = msgp.AppendMapHeader(o, zb0003Len)
- if zb0003Len != 0 {
- if (zb0003Mask & 0x10) == 0 { // if not empty
+ // variable map header, size zb0004Len
+ o = msgp.AppendMapHeader(o, zb0004Len)
+ if zb0004Len != 0 {
+ if (zb0004Mask & 0x20) == 0 { // if not empty
// string "cc"
o = append(o, 0xa2, 0x63, 0x63)
if (*z).Block.BlockHeader.CompactCert == nil {
@@ -4627,132 +4784,144 @@ func (z *unauthenticatedProposal) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
- if (zb0003Mask & 0x20) == 0 { // if not empty
+ if (zb0004Mask & 0x40) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).Block.BlockHeader.RewardsState.RewardsLevel)
}
- if (zb0003Mask & 0x40) == 0 { // if not empty
+ if (zb0004Mask & 0x80) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).Block.BlockHeader.RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0003Mask & 0x80) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).Block.BlockHeader.RewardsState.RewardsResidue)
}
- if (zb0003Mask & 0x100) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).Block.BlockHeader.GenesisID)
}
- if (zb0003Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).Block.BlockHeader.GenesisHash.MarshalMsg(o)
}
- if (zb0003Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0003Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).Block.BlockHeader.UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0003Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals)
}
- if (zb0003Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "oper"
o = append(o, 0xa4, 0x6f, 0x70, 0x65, 0x72)
o = msgp.AppendUint64(o, uint64((*z).OriginalPeriod))
}
- if (zb0003Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "oprop"
o = append(o, 0xa5, 0x6f, 0x70, 0x72, 0x6f, 0x70)
o = (*z).OriginalProposer.MarshalMsg(o)
}
- if (zb0003Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
+ // string "partupdrmv"
+ o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
+ if (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)))
+ }
+ for zb0003 := range (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ o = (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
+ }
+ }
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).Block.BlockHeader.Branch.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).Block.BlockHeader.RewardsState.RewardsRate)
}
- if (zb0003Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).Block.BlockHeader.Round.MarshalMsg(o)
}
- if (zb0003Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0003Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).Block.BlockHeader.RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0003Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000000) == 0 { // if not empty
// string "sdpf"
o = append(o, 0xa4, 0x73, 0x64, 0x70, 0x66)
o = (*z).SeedProof.MarshalMsg(o)
}
- if (zb0003Mask & 0x800000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).Block.BlockHeader.Seed.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
o = msgp.AppendUint64(o, (*z).Block.BlockHeader.TxnCounter)
}
- if (zb0003Mask & 0x2000000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000000) == 0 { // if not empty
// string "ts"
o = append(o, 0xa2, 0x74, 0x73)
o = msgp.AppendInt64(o, (*z).Block.BlockHeader.TimeStamp)
}
- if (zb0003Mask & 0x4000000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000000) == 0 { // if not empty
// string "txn"
o = append(o, 0xa3, 0x74, 0x78, 0x6e)
o = (*z).Block.BlockHeader.TxnRoot.MarshalMsg(o)
}
- if (zb0003Mask & 0x8000000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000000) == 0 { // if not empty
// string "txns"
o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73)
o = (*z).Block.Payset.MarshalMsg(o)
}
- if (zb0003Mask & 0x10000000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000000) == 0 { // if not empty
// string "upgradedelay"
o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79)
o = (*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000000) == 0 { // if not empty
// string "upgradeprop"
o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70)
o = (*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000000) == 0 { // if not empty
// string "upgradeyes"
o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73)
o = msgp.AppendBool(o, (*z).Block.BlockHeader.UpgradeVote.UpgradeApprove)
@@ -4770,214 +4939,214 @@ func (_ *unauthenticatedProposal) CanMarshalMsg(z interface{}) bool {
func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.Round.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Round")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.Branch.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Branch")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.Seed.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Seed")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.TxnRoot.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnRoot")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TimeStamp")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.GenesisHash.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisHash")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.RewardsState.FeeSink.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FeeSink")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.RewardsState.RewardsPool.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsPool")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.RewardsState.RewardsLevel, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsLevel")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.RewardsState.RewardsRate, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRate")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.RewardsState.RewardsResidue, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsResidue")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRecalculationRound")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.UpgradeState.CurrentProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CurrentProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.UpgradeState.NextProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolVoteBefore")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolSwitchOn")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.UpgradeVote.UpgradePropose.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradePropose")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeDelay")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.UpgradeVote.UpgradeApprove, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeApprove")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).Block.BlockHeader.TxnCounter, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnCounter")
return
}
}
- if zb0003 > 0 {
- zb0003--
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if zb0004 > 0 {
+ zb0004--
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0005 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(protocol.NumCompactCertTypes))
+ if zb0006 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0006 {
+ if zb0007 {
(*z).Block.BlockHeader.CompactCert = nil
} else if (*z).Block.BlockHeader.CompactCert == nil {
- (*z).Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0005)
+ (*z).Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0006)
}
- for zb0005 > 0 {
+ for zb0006 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 bookkeeping.CompactCertState
- zb0005--
+ zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
@@ -4991,44 +5160,73 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
(*z).Block.BlockHeader.CompactCert[zb0001] = zb0002
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0008 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0009 {
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else {
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ }
+ for zb0003 := range (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Block.Payset.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Payset")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).SeedProof.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "SeedProof")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
{
- var zb0007 uint64
- zb0007, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0010 uint64
+ zb0010, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalPeriod")
return
}
- (*z).OriginalPeriod = period(zb0007)
+ (*z).OriginalPeriod = period(zb0010)
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).OriginalProposer.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalProposer")
return
}
}
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
+ if zb0004 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0004)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -5039,11 +5237,11 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
err = msgp.WrapError(err)
return
}
- if zb0004 {
+ if zb0005 {
(*z) = unauthenticatedProposal{}
}
- for zb0003 > 0 {
- zb0003--
+ for zb0004 > 0 {
+ zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -5183,27 +5381,27 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
return
}
case "cc":
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0008 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(protocol.NumCompactCertTypes))
+ if zb0011 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0009 {
+ if zb0012 {
(*z).Block.BlockHeader.CompactCert = nil
} else if (*z).Block.BlockHeader.CompactCert == nil {
- (*z).Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0008)
+ (*z).Block.BlockHeader.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState, zb0011)
}
- for zb0008 > 0 {
+ for zb0011 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 bookkeeping.CompactCertState
- zb0008--
+ zb0011--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
@@ -5216,6 +5414,33 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
}
(*z).Block.BlockHeader.CompactCert[zb0001] = zb0002
}
+ case "partupdrmv":
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0013 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0014 {
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0013 {
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0013]
+ } else {
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0013)
+ }
+ for zb0003 := range (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
case "txns":
bts, err = (*z).Block.Payset.UnmarshalMsg(bts)
if err != nil {
@@ -5230,13 +5455,13 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
}
case "oper":
{
- var zb0010 uint64
- zb0010, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0015 uint64
+ zb0015, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OriginalPeriod")
return
}
- (*z).OriginalPeriod = period(zb0010)
+ (*z).OriginalPeriod = period(zb0015)
}
case "oprop":
bts, err = (*z).OriginalProposer.UnmarshalMsg(bts)
@@ -5272,13 +5497,17 @@ func (z *unauthenticatedProposal) Msgsize() (s int) {
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
}
}
+ s += 11 + msgp.ArrayHeaderSize
+ for zb0003 := range (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ s += (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].Msgsize()
+ }
s += 5 + (*z).Block.Payset.Msgsize() + 5 + (*z).SeedProof.Msgsize() + 5 + msgp.Uint64Size + 6 + (*z).OriginalProposer.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *unauthenticatedProposal) MsgIsZero() bool {
- return ((*z).Block.BlockHeader.Round.MsgIsZero()) && ((*z).Block.BlockHeader.Branch.MsgIsZero()) && ((*z).Block.BlockHeader.Seed.MsgIsZero()) && ((*z).Block.BlockHeader.TxnRoot.MsgIsZero()) && ((*z).Block.BlockHeader.TimeStamp == 0) && ((*z).Block.BlockHeader.GenesisID == "") && ((*z).Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).Block.BlockHeader.TxnCounter == 0) && (len((*z).Block.BlockHeader.CompactCert) == 0) && ((*z).Block.Payset.MsgIsZero()) && ((*z).SeedProof.MsgIsZero()) && ((*z).OriginalPeriod == 0) && ((*z).OriginalProposer.MsgIsZero())
+ return ((*z).Block.BlockHeader.Round.MsgIsZero()) && ((*z).Block.BlockHeader.Branch.MsgIsZero()) && ((*z).Block.BlockHeader.Seed.MsgIsZero()) && ((*z).Block.BlockHeader.TxnRoot.MsgIsZero()) && ((*z).Block.BlockHeader.TimeStamp == 0) && ((*z).Block.BlockHeader.GenesisID == "") && ((*z).Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).Block.BlockHeader.TxnCounter == 0) && (len((*z).Block.BlockHeader.CompactCert) == 0) && (len((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).Block.Payset.MsgIsZero()) && ((*z).SeedProof.MsgIsZero()) && ((*z).OriginalPeriod == 0) && ((*z).OriginalProposer.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
diff --git a/agreement/player_permutation_test.go b/agreement/player_permutation_test.go
index 2b832c63b..960669483 100644
--- a/agreement/player_permutation_test.go
+++ b/agreement/player_permutation_test.go
@@ -19,7 +19,6 @@ package agreement
import (
"fmt"
"testing"
- "time"
"github.com/stretchr/testify/require"
@@ -32,7 +31,7 @@ import (
func makeRandomProposalPayload(r round) *proposal {
f := testBlockFactory{Owner: 1}
- ve, _ := f.AssembleBlock(r, time.Time{})
+ ve, _ := f.AssembleBlock(r)
var payload unauthenticatedProposal
payload.Block = ve.Block()
diff --git a/agreement/proposal.go b/agreement/proposal.go
index e823ce0ce..f5256decb 100644
--- a/agreement/proposal.go
+++ b/agreement/proposal.go
@@ -184,7 +184,7 @@ func verifyNewSeed(p unauthenticatedProposal, ledger LedgerReader) error {
}
balanceRound := balanceRound(rnd, cparams)
- proposerRecord, err := ledger.Lookup(balanceRound, value.OriginalProposer)
+ proposerRecord, err := ledger.LookupAgreement(balanceRound, value.OriginalProposer)
if err != nil {
return fmt.Errorf("failed to obtain balance record for address %v in round %d: %v", value.OriginalProposer, balanceRound, err)
}
diff --git a/agreement/proposalStore_test.go b/agreement/proposalStore_test.go
index 93f2d15b4..2333b0344 100644
--- a/agreement/proposalStore_test.go
+++ b/agreement/proposalStore_test.go
@@ -20,7 +20,6 @@ import (
"os"
"reflect"
"testing"
- "time"
"github.com/stretchr/testify/require"
@@ -65,7 +64,7 @@ func TestBlockAssemblerPipeline(t *testing.T) {
round := player.Round
period := player.Period
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", round, err)
accountIndex := 0
@@ -133,7 +132,7 @@ func TestBlockAssemblerBind(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
@@ -201,7 +200,7 @@ func TestBlockAssemblerAuthenticator(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
proposalPayload, _, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
@@ -267,7 +266,7 @@ func TestBlockAssemblerTrim(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
proposalPayload, _, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
@@ -340,7 +339,7 @@ func TestProposalStoreT(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
proposalPayload, proposalV, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
@@ -414,7 +413,7 @@ func TestProposalStoreUnderlying(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
proposalPayload, proposalV, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
@@ -478,7 +477,7 @@ func TestProposalStoreHandle(t *testing.T) {
proposalVoteEventBatch, proposalPayloadEventBatch, _ := generateProposalEvents(t, player, accounts, factory, ledger)
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
_, proposalV0, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
@@ -662,7 +661,7 @@ func TestProposalStoreGetPinnedValue(t *testing.T) {
// create proposal Store
player, router, accounts, factory, ledger := testPlayerSetup()
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
// create a route handler for the proposal store
diff --git a/agreement/proposal_test.go b/agreement/proposal_test.go
index 49481ae84..24ddfbfd1 100644
--- a/agreement/proposal_test.go
+++ b/agreement/proposal_test.go
@@ -20,7 +20,6 @@ import (
"context"
"os"
"testing"
- "time"
"github.com/stretchr/testify/require"
@@ -47,7 +46,7 @@ func testSetup(periodCount uint64) (player, rootRouter, testAccountData, testBlo
}
func createProposalsTesting(accs testAccountData, round basics.Round, period period, factory BlockFactory, ledger Ledger) (ps []proposal, vs []vote) {
- ve, err := factory.AssembleBlock(round, time.Now().Add(time.Minute))
+ ve, err := factory.AssembleBlock(round)
if err != nil {
logging.Base().Errorf("Could not generate a proposal for round %d: %v", round, err)
return nil, nil
@@ -123,7 +122,7 @@ func TestProposalFunctions(t *testing.T) {
player, _, accs, factory, ledger := testSetup(0)
round := player.Round
period := player.Period
- ve, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ ve, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", round, err)
validator := testBlockValidator{}
@@ -163,7 +162,7 @@ func TestProposalUnauthenticated(t *testing.T) {
round := player.Round
period := player.Period
- testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
+ testBlockFactory, err := factory.AssembleBlock(player.Round)
require.NoError(t, err, "Could not generate a proposal for round %d: %v", round, err)
validator := testBlockValidator{}
diff --git a/agreement/pseudonode.go b/agreement/pseudonode.go
index e2809fb13..2589028cb 100644
--- a/agreement/pseudonode.go
+++ b/agreement/pseudonode.go
@@ -18,11 +18,11 @@ package agreement
import (
"context"
+ "errors"
"fmt"
"sync"
"time"
- "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/logging"
@@ -33,13 +33,14 @@ import (
// TODO put these in config
const (
- pseudonodeVerificationBacklog = 32
+ pseudonodeVerificationBacklog = 32
+ maxPseudonodeOutputWaitDuration = 2 * time.Second
)
var errPseudonodeBacklogFull = fmt.Errorf("pseudonode input channel is full")
-var errPseudonodeVerifierClosedChannel = fmt.Errorf("crypto verifier closed the output channel prematurely")
-var errPseudonodeNoVotes = fmt.Errorf("no valid participation keys to generate votes for given round")
-var errPseudonodeNoProposals = fmt.Errorf("no valid participation keys to generate proposals for given round")
+var errPseudonodeVerifierClosedChannel = errors.New("crypto verifier closed the output channel prematurely")
+var errPseudonodeNoVotes = errors.New("no valid participation keys to generate votes for given round")
+var errPseudonodeNoProposals = errors.New("no valid participation keys to generate proposals for given round")
// A pseudonode creates proposals and votes with a KeyManager which holds participation keys.
//
@@ -174,7 +175,7 @@ func (n asyncPseudonode) MakeProposals(ctx context.Context, r round, p period) (
return proposalTask.outputChannel(), nil
default:
proposalTask.close()
- return nil, errPseudonodeBacklogFull
+ return nil, fmt.Errorf("unable to make proposal for (%d, %d): %w", r, p, errPseudonodeBacklogFull)
}
}
@@ -191,7 +192,7 @@ func (n asyncPseudonode) MakeVotes(ctx context.Context, r round, p period, s ste
return proposalTask.outputChannel(), nil
default:
proposalTask.close()
- return nil, errPseudonodeBacklogFull
+ return nil, fmt.Errorf("unable to make vote for (%d, %d, %d): %w", r, p, s, errPseudonodeBacklogFull)
}
}
@@ -267,8 +268,7 @@ func (n asyncPseudonode) makePseudonodeVerifier(voteVerifier *AsyncVoteVerifier)
// makeProposals creates a slice of block proposals for the given round and period.
func (n asyncPseudonode) makeProposals(round basics.Round, period period, accounts []account.Participation) ([]proposal, []unauthenticatedVote) {
- deadline := time.Now().Add(config.ProposalAssemblyTime)
- ve, err := n.factory.AssembleBlock(round, deadline)
+ ve, err := n.factory.AssembleBlock(round)
if err != nil {
if err != ErrAssembleBlockRoundStale {
n.log.Errorf("pseudonode.makeProposals: could not generate a proposal for round %d: %v", round, err)
@@ -367,13 +367,20 @@ func (t pseudonodeVotesTask) execute(verifier *AsyncVoteVerifier, quit chan stru
unverifiedVotes := t.node.makeVotes(t.round, t.period, t.step, t.prop, t.participation)
t.node.log.Infof("pseudonode: made %v votes", len(unverifiedVotes))
results := make(chan asyncVerifyVoteResponse, len(unverifiedVotes))
+ orderedResults := make([]asyncVerifyVoteResponse, len(unverifiedVotes))
+ asyncVerifyingVotes := len(unverifiedVotes)
for i, uv := range unverifiedVotes {
msg := message{Tag: protocol.AgreementVoteTag, UnauthenticatedVote: uv}
- verifier.verifyVote(context.TODO(), t.node.ledger, uv, i, msg, results)
+ err := verifier.verifyVote(context.TODO(), t.node.ledger, uv, i, msg, results)
+ if err != nil {
+ orderedResults[i].err = err
+ t.node.log.Infof("pseudonode.makeVotes: failed to enqueue vote verification for (%d, %d): %v", t.round, t.period, err)
+ asyncVerifyingVotes--
+ continue
+ }
}
- orderedResults := make([]asyncVerifyVoteResponse, len(unverifiedVotes))
- for i := 0; i < len(unverifiedVotes); i++ {
+ for i := 0; i < asyncVerifyingVotes; i++ {
resp := <-results
orderedResults[resp.index] = resp
}
@@ -440,15 +447,26 @@ func (t pseudonodeVotesTask) execute(verifier *AsyncVoteVerifier, quit chan stru
}
t.node.monitor.dec(pseudonodeCoserviceType)
+ outputTimeout := time.After(maxPseudonodeOutputWaitDuration)
+
// push results into channel.
+verifiedVotesLoop:
for _, r := range verifiedResults {
- select {
- case t.out <- messageEvent{T: voteVerified, Input: r.message, Err: makeSerErr(r.err)}:
- case <-quit:
- return
- case <-t.context.Done():
- // we done care about the output anymore; just exit.
- return
+ for {
+ select {
+ case t.out <- messageEvent{T: voteVerified, Input: r.message, Err: makeSerErr(r.err)}:
+ t.node.keys.Record(r.v.R.Sender, r.v.R.Round, account.Vote)
+ continue verifiedVotesLoop
+ case <-quit:
+ return
+ case <-t.context.Done():
+ // we done care about the output anymore; just exit.
+ return
+ case <-outputTimeout:
+ // we've been waiting for too long for this vote to be written to the output.
+ t.node.log.Warnf("pseudonode.makeVotes: unable to write vote to output channel for round %d, period %d", t.round, t.period)
+ outputTimeout = nil
+ }
}
}
}
@@ -477,13 +495,20 @@ func (t pseudonodeProposalsTask) execute(verifier *AsyncVoteVerifier, quit chan
// For now, don't log at all, and revisit when the metric becomes more important.
results := make(chan asyncVerifyVoteResponse, len(votes))
+ cryptoOutputs := make([]asyncVerifyVoteResponse, len(votes))
+ asyncVerifyingVotes := len(votes)
for i, uv := range votes {
msg := message{Tag: protocol.AgreementVoteTag, UnauthenticatedVote: uv}
- verifier.verifyVote(context.TODO(), t.node.ledger, uv, i, msg, results)
+ err := verifier.verifyVote(context.TODO(), t.node.ledger, uv, i, msg, results)
+ if err != nil {
+ cryptoOutputs[i].err = err
+ t.node.log.Infof("pseudonode.makeProposals: failed to enqueue vote verification for (%d, %d): %v", t.round, t.period, err)
+ asyncVerifyingVotes--
+ continue
+ }
}
- cryptoOutputs := make([]asyncVerifyVoteResponse, len(votes))
- for i := 0; i < len(votes); i++ {
+ for i := 0; i < asyncVerifyingVotes; i++ {
resp := <-results
cryptoOutputs[resp.index] = resp
}
@@ -527,27 +552,45 @@ func (t pseudonodeProposalsTask) execute(verifier *AsyncVoteVerifier, quit chan
}
t.node.monitor.dec(pseudonodeCoserviceType)
+ outputTimeout := time.After(maxPseudonodeOutputWaitDuration)
// push results into channel.
+verifiedVotesLoop:
for _, r := range verifiedVotes {
- select {
- case t.out <- messageEvent{T: voteVerified, Input: r.message, Err: makeSerErr(r.err)}:
- case <-quit:
- return
- case <-t.context.Done():
- // we done care about the output anymore; just exit.
- return
+ for {
+ select {
+ case t.out <- messageEvent{T: voteVerified, Input: r.message, Err: makeSerErr(r.err)}:
+ t.node.keys.Record(r.v.R.Sender, r.v.R.Round, account.BlockProposal)
+ continue verifiedVotesLoop
+ case <-quit:
+ return
+ case <-t.context.Done():
+ // we done care about the output anymore; just exit.
+ return
+ case <-outputTimeout:
+ // we've been waiting for too long for this vote to be written to the output.
+ t.node.log.Warnf("pseudonode.makeProposals: unable to write proposal vote to output channel for round %d, period %d", t.round, t.period)
+ outputTimeout = nil
+ }
}
}
+verifiedPayloadsLoop:
for _, payload := range verifiedPayloads {
msg := message{Tag: protocol.ProposalPayloadTag, UnauthenticatedProposal: payload.u(), Proposal: payload}
- select {
- case t.out <- messageEvent{T: payloadVerified, Input: msg}:
- case <-quit:
- return
- case <-t.context.Done():
- // we done care about the output anymore; just exit.
- return
+ for {
+ select {
+ case t.out <- messageEvent{T: payloadVerified, Input: msg}:
+ continue verifiedPayloadsLoop
+ case <-quit:
+ return
+ case <-t.context.Done():
+ // we done care about the output anymore; just exit.
+ return
+ case <-outputTimeout:
+ // we've been waiting for too long for this vote to be written to the output.
+ t.node.log.Warnf("pseudonode.makeProposals: unable to write proposal payload to output channel for round %d, period %d", t.round, t.period)
+ outputTimeout = nil
+ }
}
}
}
diff --git a/agreement/pseudonode_test.go b/agreement/pseudonode_test.go
index 72f1a427c..e65855556 100644
--- a/agreement/pseudonode_test.go
+++ b/agreement/pseudonode_test.go
@@ -19,7 +19,9 @@ package agreement
import (
"context"
"crypto/sha256"
+ "errors"
"fmt"
+ "strings"
"testing"
"github.com/stretchr/testify/assert"
@@ -30,6 +32,7 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/execpool"
)
// The serializedPseudonode is the trivial implementation for the pseudonode interface
@@ -126,7 +129,7 @@ func compareEventChannels(t *testing.T, ch1, ch2 <-chan externalEvent) bool {
}
}
default:
- assert.NoError(t, fmt.Errorf("Unexpected tag %v encountered", ev1.Input.Tag))
+ assert.NoError(t, fmt.Errorf("Unexpected tag '%v' encountered", ev1.Input.Tag))
}
}
return true
@@ -145,7 +148,7 @@ func TestPseudonode(t *testing.T) {
sLogger := serviceLogger{logging.NewLogger()}
sLogger.SetLevel(logging.Warn)
- keyManager := simpleKeyManager(accounts)
+ keyManager := makeRecordingKeyManager(accounts)
pb := makePseudonode(pseudonodeParams{
factory: testBlockFactory{Owner: 0},
validator: testBlockValidator{},
@@ -222,6 +225,8 @@ func TestPseudonode(t *testing.T) {
}
messageEvent, typeOk := ev.(messageEvent)
assert.True(t, true, typeOk)
+ // Verify votes are recorded - everyone is voting and proposing blocks.
+ keyManager.ValidateVoteRound(t, messageEvent.Input.Vote.R.Sender, startRound)
events[messageEvent.t()] = append(events[messageEvent.t()], messageEvent)
}
assert.Subset(t, []int{5, 6, 7, 8, 9, 10}, []int{len(events[voteVerified])})
@@ -390,6 +395,9 @@ func (k *KeyManagerProxy) VotingKeys(votingRound, balanceRound basics.Round) []a
return k.target(votingRound, balanceRound)
}
+func (k *KeyManagerProxy) Record(account basics.Address, round basics.Round, action account.ParticipationAction) {
+}
+
func TestPseudonodeLoadingOfParticipationKeys(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -403,7 +411,7 @@ func TestPseudonodeLoadingOfParticipationKeys(t *testing.T) {
sLogger := serviceLogger{logging.NewLogger()}
sLogger.SetLevel(logging.Warn)
- keyManager := simpleKeyManager(accounts)
+ keyManager := makeRecordingKeyManager(accounts)
pb := makePseudonode(pseudonodeParams{
factory: testBlockFactory{Owner: 0},
validator: testBlockValidator{},
@@ -447,3 +455,95 @@ func TestPseudonodeLoadingOfParticipationKeys(t *testing.T) {
pb.loadRoundParticipationKeys(basics.Round(rnd))
}
}
+
+type substrServiceLogger struct {
+ logging.Logger
+ looupStrings []string
+ instancesFound []int
+}
+
+func (ssl *substrServiceLogger) Infof(s string, args ...interface{}) {
+ for i, str := range ssl.looupStrings {
+ if strings.Contains(s, str) {
+ ssl.instancesFound[i]++
+ return
+ }
+ }
+}
+
+// TestPseudonodeFailedEnqueuedTasks test to see that in the case where we cannot enqueue the verification task to the backlog, we won't be waiting forever - instead,
+// we would generate a warning message and keep going.
+func TestPseudonodeFailedEnqueuedTasks(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ t.Parallel()
+
+ // generate a nice, fixed hash.
+ rootSeed := sha256.Sum256([]byte(t.Name()))
+ accounts, balances := createTestAccountsAndBalances(t, 10, rootSeed[:])
+ ledger := makeTestLedger(balances)
+
+ subStrLogger := &substrServiceLogger{
+ Logger: logging.TestingLog(t),
+ looupStrings: []string{"pseudonode.makeVotes: failed to enqueue vote verification for", "pseudonode.makeProposals: failed to enqueue vote verification"},
+ instancesFound: []int{0, 0},
+ }
+ sLogger := serviceLogger{
+ Logger: subStrLogger,
+ }
+ sLogger.SetLevel(logging.Warn)
+
+ keyManager := makeRecordingKeyManager(accounts)
+
+ mainPool := execpool.MakePool(t)
+ defer mainPool.Shutdown()
+
+ voteVerifier := MakeAsyncVoteVerifier(&expiredExecPool{mainPool})
+ defer voteVerifier.Quit()
+
+ pb := makePseudonode(pseudonodeParams{
+ factory: testBlockFactory{Owner: 0},
+ validator: testBlockValidator{},
+ keys: keyManager,
+ ledger: ledger,
+ voteVerifier: voteVerifier,
+ log: sLogger,
+ monitor: nil,
+ })
+ defer pb.Quit()
+
+ startRound := ledger.NextRound()
+
+ channels := make([]<-chan externalEvent, 0)
+ var ch <-chan externalEvent
+ var err error
+ for i := 0; i < pseudonodeVerificationBacklog*2; i++ {
+ ch, err = pb.MakeProposals(context.Background(), startRound, period(i))
+ if err != nil {
+ require.Subset(t, []int{pseudonodeVerificationBacklog, pseudonodeVerificationBacklog + 1}, []int{i})
+ break
+ }
+ channels = append(channels, ch)
+ }
+ require.Error(t, err, "MakeProposals did not returned an error when being overflowed with requests")
+ require.True(t, errors.Is(err, errPseudonodeBacklogFull))
+
+ persist := make(chan error)
+ close(persist)
+ for i := 0; i < pseudonodeVerificationBacklog*2; i++ {
+ ch, err = pb.MakeVotes(context.Background(), startRound, period(i), step(i%5), makeProposalValue(period(i), accounts[0].Address()), persist)
+ if err != nil {
+ require.Subset(t, []int{pseudonodeVerificationBacklog, pseudonodeVerificationBacklog + 1}, []int{i})
+ break
+ }
+ channels = append(channels, ch)
+ }
+ require.Error(t, err, "MakeVotes did not returned an error when being overflowed with requests")
+
+ // drain output channels.
+ for _, ch := range channels {
+ drainChannel(ch)
+ }
+ require.Equal(t, 330, subStrLogger.instancesFound[0])
+ require.Equal(t, 330, subStrLogger.instancesFound[1])
+}
diff --git a/agreement/selector.go b/agreement/selector.go
index 623c4d23e..74bfdebdd 100644
--- a/agreement/selector.go
+++ b/agreement/selector.go
@@ -64,7 +64,7 @@ func membership(l LedgerReader, addr basics.Address, r basics.Round, p period, s
balanceRound := balanceRound(r, cparams)
seedRound := seedRound(r, cparams)
- record, err := l.Lookup(balanceRound, addr)
+ record, err := l.LookupAgreement(balanceRound, addr)
if err != nil {
err = fmt.Errorf("Service.initializeVote (r=%d): Failed to obtain balance record for address %v in round %d: %w", r, addr, balanceRound, err)
return
@@ -82,7 +82,7 @@ func membership(l LedgerReader, addr basics.Address, r basics.Round, p period, s
return
}
- m.Record = committee.BalanceRecord{AccountData: record, Addr: addr}
+ m.Record = committee.BalanceRecord{OnlineAccountData: record, Addr: addr}
m.Selector = selector{Seed: seed, Round: r, Period: p, Step: s}
m.TotalMoney = total
return m, nil
diff --git a/agreement/service_test.go b/agreement/service_test.go
index b0469237f..68db73a60 100644
--- a/agreement/service_test.go
+++ b/agreement/service_test.go
@@ -105,22 +105,6 @@ func (c *testingClock) fire(d time.Duration) {
close(c.TA[d])
}
-type simpleKeyManager []account.Participation
-
-func (m simpleKeyManager) VotingKeys(votingRound, _ basics.Round) []account.Participation {
- var km []account.Participation
- for _, acc := range m {
- if acc.OverlapsInterval(votingRound, votingRound) {
- km = append(km, acc)
- }
- }
- return km
-}
-
-func (m simpleKeyManager) DeleteOldKeys(basics.Round) {
- // noop
-}
-
type testingNetwork struct {
validator BlockValidator
@@ -743,7 +727,7 @@ func setupAgreementWithValidator(t *testing.T, numNodes int, traceLevel traceLev
m.coserviceListener = am.coserviceListener(nodeID(i))
clocks[i] = makeTestingClock(m)
ledgers[i] = ledgerFactory(balances)
- keys := simpleKeyManager(accounts[i : i+1])
+ keys := makeRecordingKeyManager(accounts[i : i+1])
endpoint := baseNetwork.testingNetworkEndpoint(nodeID(i))
ilog := log.WithFields(logging.Fields{"Source": "service-" + strconv.Itoa(i)})
diff --git a/buildnumber.dat b/buildnumber.dat
index 00750edc0..573541ac9 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-3
+0
diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go
index 4d62fdde0..65ace1345 100644
--- a/catchup/fetcher_test.go
+++ b/catchup/fetcher_test.go
@@ -23,7 +23,6 @@ import (
"net/url"
"strings"
"testing"
- "time"
"github.com/gorilla/mux"
"github.com/stretchr/testify/require"
@@ -288,16 +287,7 @@ func (p *testUnicastPeer) Version() string {
return p.version
}
-func (p *testUnicastPeer) IsOutgoing() bool {
- return false
-}
-
-// GetConnectionLatency returns the connection latency between the local node and this peer.
-func (p *testUnicastPeer) GetConnectionLatency() time.Duration {
- return time.Duration(0)
-}
-
-func (p *testUnicastPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag, callback network.UnicastWebsocketMessageStateCallback) error {
+func (p *testUnicastPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag) error {
ps := p.gn.(*httpTestPeerSource)
var dispather network.MessageHandler
for _, v := range ps.dispatchHandlers {
diff --git a/catchup/peerSelector_test.go b/catchup/peerSelector_test.go
index 02d759b20..a11e7db1e 100644
--- a/catchup/peerSelector_test.go
+++ b/catchup/peerSelector_test.go
@@ -50,7 +50,7 @@ type mockUnicastPeer struct {
func (d *mockUnicastPeer) GetAddress() string {
return d.address
}
-func (d *mockUnicastPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag, callback network.UnicastWebsocketMessageStateCallback) error {
+func (d *mockUnicastPeer) Unicast(ctx context.Context, data []byte, tag protocol.Tag) error {
return nil
}
func (d *mockUnicastPeer) Version() string {
@@ -62,9 +62,6 @@ func (d *mockUnicastPeer) Request(ctx context.Context, tag network.Tag, topics n
func (d *mockUnicastPeer) Respond(ctx context.Context, reqMsg network.IncomingMessage, topics network.Topics) (e error) {
return nil
}
-func (d *mockUnicastPeer) IsOutgoing() bool {
- return false
-}
// GetConnectionLatency returns the connection latency between the local node and this peer.
func (d *mockUnicastPeer) GetConnectionLatency() time.Duration {
diff --git a/catchup/service.go b/catchup/service.go
index f8f92b9b6..27ce957ba 100644
--- a/catchup/service.go
+++ b/catchup/service.go
@@ -18,6 +18,7 @@ package catchup
import (
"context"
+ "errors"
"fmt"
"sync"
"sync/atomic"
@@ -28,7 +29,6 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
@@ -58,8 +58,8 @@ type Ledger interface {
LastRound() basics.Round
Block(basics.Round) (bookkeeping.Block, error)
IsWritingCatchpointFile() bool
- Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledger.ValidatedBlock, error)
- AddValidatedBlock(vb ledger.ValidatedBlock, cert agreement.Certificate) error
+ Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledgercore.ValidatedBlock, error)
+ AddValidatedBlock(vb ledgercore.ValidatedBlock, cert agreement.Certificate) error
}
// Service represents the catchup service. Once started and until it is stopped, it ensures that the ledger is up to date with network.
@@ -156,8 +156,19 @@ func (s *Service) SynchronizingTime() time.Duration {
return time.Duration(timeInNS - startNS)
}
+// errLedgerAlreadyHasBlock is returned by innerFetch in case the local ledger already has the requested block.
+var errLedgerAlreadyHasBlock = errors.New("ledger already has block")
+
// function scope to make a bunch of defer statements better
func (s *Service) innerFetch(r basics.Round, peer network.Peer) (blk *bookkeeping.Block, cert *agreement.Certificate, ddur time.Duration, err error) {
+ ledgerWaitCh := s.ledger.Wait(r)
+ select {
+ case <-ledgerWaitCh:
+ // if our ledger already have this block, no need to attempt to fetch it.
+ return nil, nil, time.Duration(0), errLedgerAlreadyHasBlock
+ default:
+ }
+
ctx, cf := context.WithCancel(s.ctx)
fetcher := makeUniversalBlockFetcher(s.log, s.net, s.cfg)
defer cf()
@@ -166,11 +177,21 @@ func (s *Service) innerFetch(r basics.Round, peer network.Peer) (blk *bookkeepin
go func() {
select {
case <-stopWaitingForLedgerRound:
- case <-s.ledger.Wait(r):
+ case <-ledgerWaitCh:
cf()
}
}()
- return fetcher.fetchBlock(ctx, r, peer)
+ blk, cert, ddur, err = fetcher.fetchBlock(ctx, r, peer)
+ // check to see if we aborted due to ledger.
+ if err != nil {
+ select {
+ case <-ledgerWaitCh:
+ // yes, we aborted since the ledger received this round.
+ err = errLedgerAlreadyHasBlock
+ default:
+ }
+ }
+ return
}
// fetchAndWrite fetches a block, checks the cert, and writes it to the ledger. Cert checking and ledger writing both wait for the ledger to advance if necessary.
@@ -219,6 +240,10 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool,
block, cert, blockDownloadDuration, err := s.innerFetch(r, peer)
if err != nil {
+ if err == errLedgerAlreadyHasBlock {
+ // ledger already has the block, no need to request this block from anyone.
+ return true
+ }
s.log.Debugf("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i)
peerSelector.rankPeer(psp, peerRankDownloadFailed)
// we've just failed to retrieve a block; wait until the previous block is fetched before trying again
@@ -307,7 +332,7 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool,
}
if s.cfg.CatchupVerifyTransactionSignatures() || s.cfg.CatchupVerifyApplyData() {
- var vb *ledger.ValidatedBlock
+ var vb *ledgercore.ValidatedBlock
vb, err = s.ledger.Validate(s.ctx, *block, s.blockValidationPool)
if err != nil {
if s.ctx.Err() != nil {
diff --git a/catchup/service_test.go b/catchup/service_test.go
index a0df457a1..4cb89338d 100644
--- a/catchup/service_test.go
+++ b/catchup/service_test.go
@@ -35,7 +35,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/committee"
- "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
@@ -658,11 +658,11 @@ func (m *mockedLedger) AddBlock(blk bookkeeping.Block, cert agreement.Certificat
return nil
}
-func (m *mockedLedger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledger.ValidatedBlock, error) {
+func (m *mockedLedger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledgercore.ValidatedBlock, error) {
return nil, nil
}
-func (m *mockedLedger) AddValidatedBlock(vb ledger.ValidatedBlock, cert agreement.Certificate) error {
+func (m *mockedLedger) AddValidatedBlock(vb ledgercore.ValidatedBlock, cert agreement.Certificate) error {
return nil
}
@@ -722,6 +722,10 @@ func (m *mockedLedger) LookupDigest(basics.Round) (crypto.Digest, error) {
return crypto.Digest{}, errors.New("not needed for mockedLedger")
}
+func (m *mockedLedger) LookupAgreement(basics.Round, basics.Address) (basics.OnlineAccountData, error) {
+ return basics.OnlineAccountData{}, errors.New("not needed for mockedLedger")
+}
+
func (m *mockedLedger) IsWritingCatchpointFile() bool {
return false
}
diff --git a/cmd/catchpointdump/file.go b/cmd/catchpointdump/file.go
index 15eb857ce..a09485e83 100644
--- a/cmd/catchpointdump/file.go
+++ b/cmd/catchpointdump/file.go
@@ -63,7 +63,7 @@ var fileCmd = &cobra.Command{
if err != nil || len(tarFileBytes) == 0 {
reportErrorf("Unable to read '%s' : %v", tarFile, err)
}
- genesisInitState := ledger.InitState{}
+ genesisInitState := ledgercore.InitState{}
cfg := config.GetDefaultLocal()
l, err := ledger.OpenLedger(logging.Base(), "./ledger", false, genesisInitState, cfg)
if err != nil {
diff --git a/cmd/catchpointdump/net.go b/cmd/catchpointdump/net.go
index 354d2c360..79fd986fa 100644
--- a/cmd/catchpointdump/net.go
+++ b/cmd/catchpointdump/net.go
@@ -30,6 +30,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
tools "github.com/algorand/go-algorand/tools/network"
@@ -268,7 +269,7 @@ func saveCatchpointTarFile(addr string, catchpointFileBytes []byte) (err error)
}
func makeFileDump(addr string, catchpointFileBytes []byte) error {
- genesisInitState := ledger.InitState{}
+ genesisInitState := ledgercore.InitState{}
deleteLedgerFiles := func() {
os.Remove("./ledger.block.sqlite")
os.Remove("./ledger.block.sqlite-shm")
diff --git a/cmd/goal/account.go b/cmd/goal/account.go
index d1bf2f6cf..7d20af38f 100644
--- a/cmd/goal/account.go
+++ b/cmd/goal/account.go
@@ -59,6 +59,7 @@ var (
partKeyOutDir string
partKeyFile string
partKeyDeleteInput bool
+ listpartkeyCompat bool
importDefault bool
mnemonic string
dumpOutFile string
@@ -165,6 +166,9 @@ func init() {
installParticipationKeyCmd.MarkFlagRequired("partkey")
installParticipationKeyCmd.Flags().BoolVar(&partKeyDeleteInput, "delete-input", false, "Acknowledge that installpartkey will delete the input key file")
+ // listpartkey flags
+ listParticipationKeysCmd.Flags().BoolVarP(&listpartkeyCompat, "compatibility", "c", false, "Print output in compatibility mode. This option will be removed in a future release, please use REST API for tooling.")
+
// import flags
importCmd.Flags().BoolVarP(&importDefault, "default", "f", false, "Set this account as the default one")
importCmd.Flags().StringVarP(&mnemonic, "mnemonic", "m", "", "Mnemonic to import (will prompt otherwise)")
@@ -933,7 +937,7 @@ var renewParticipationKeyCmd = &cobra.Command{
}
// Make sure we don't already have a partkey valid for (or after) specified roundLastValid
- parts, err := client.ListParticipationKeys()
+ parts, err := client.ListParticipationKeyFiles()
if err != nil {
reportErrorf(errorRequestFail, err)
}
@@ -991,7 +995,7 @@ func renewPartKeysInDir(dataDir string, lastValidRound uint64, fee uint64, lease
client := ensureAlgodClient(dataDir)
// Build list of accounts to renew from all accounts with part keys present
- parts, err := client.ListParticipationKeys()
+ parts, err := client.ListParticipationKeyFiles()
if err != nil {
return fmt.Errorf(errorRequestFail, err)
}
@@ -1051,12 +1055,73 @@ func renewPartKeysInDir(dataDir string, lastValidRound uint64, fee uint64, lease
return nil
}
+func maxRound(current uint64, next *uint64) uint64 {
+ if next != nil && *next > current {
+ return *next
+ }
+ return current
+}
+
+func uintToStr(number uint64) string {
+ return fmt.Sprintf("%d", number)
+}
+
+// legacyListParticipationKeysCommand prints key information in the same
+// format as earlier versions of goal. Some users are using this information
+// in scripts and need some extra time to migrate to the REST API.
+func legacyListParticipationKeysCommand() {
+ dataDir := ensureSingleDataDir()
+
+ client := ensureGoalClient(dataDir, libgoal.DynamicClient)
+ parts, err := client.ListParticipationKeyFiles()
+ if err != nil {
+ reportErrorf(errorRequestFail, err)
+ }
+
+ var filenames []string
+ for fn := range parts {
+ filenames = append(filenames, fn)
+ }
+ sort.Strings(filenames)
+
+ rowFormat := "%-10s\t%-80s\t%-60s\t%12s\t%12s\t%12s\n"
+ fmt.Printf(rowFormat, "Registered", "Filename", "Parent address", "First round", "Last round", "First key")
+ for _, fn := range filenames {
+ onlineInfoStr := "unknown"
+ onlineAccountInfo, err := client.AccountInformation(parts[fn].Address().GetUserAddress())
+ if err == nil {
+ votingBytes := parts[fn].Voting.OneTimeSignatureVerifier
+ vrfBytes := parts[fn].VRF.PK
+ if onlineAccountInfo.Participation != nil &&
+ (string(onlineAccountInfo.Participation.ParticipationPK) == string(votingBytes[:])) &&
+ (string(onlineAccountInfo.Participation.VRFPK) == string(vrfBytes[:])) &&
+ (onlineAccountInfo.Participation.VoteFirst == uint64(parts[fn].FirstValid)) &&
+ (onlineAccountInfo.Participation.VoteLast == uint64(parts[fn].LastValid)) &&
+ (onlineAccountInfo.Participation.VoteKeyDilution == parts[fn].KeyDilution) {
+ onlineInfoStr = "yes"
+ } else {
+ onlineInfoStr = "no"
+ }
+ }
+ // it's okay to proceed without algod info
+ first, last := parts[fn].ValidInterval()
+ fmt.Printf(rowFormat, onlineInfoStr, fn, parts[fn].Address().GetUserAddress(),
+ fmt.Sprintf("%d", first),
+ fmt.Sprintf("%d", last),
+ fmt.Sprintf("%d.%d", parts[fn].Voting.FirstBatch, parts[fn].Voting.FirstOffset))
+ }
+}
+
var listParticipationKeysCmd = &cobra.Command{
Use: "listpartkeys",
- Short: "List participation keys",
- Long: `List all participation keys tracked by algod, with additional information such as key validity period.`,
+ Short: "List participation keys summary",
+ Long: `List all participation keys tracked by algod along with summary of additional information. For detailed key information use 'partkeyinfo'.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
+ if listpartkeyCompat {
+ legacyListParticipationKeysCommand()
+ return
+ }
dataDir := ensureSingleDataDir()
client := ensureGoalClient(dataDir, libgoal.DynamicClient)
@@ -1065,37 +1130,53 @@ var listParticipationKeysCmd = &cobra.Command{
reportErrorf(errorRequestFail, err)
}
- var filenames []string
- for fn := range parts {
- filenames = append(filenames, fn)
- }
- sort.Strings(filenames)
-
- rowFormat := "%-10s\t%-80s\t%-60s\t%12s\t%12s\t%12s\n"
- fmt.Printf(rowFormat, "Registered", "Filename", "Parent address", "First round", "Last round", "First key")
- for _, fn := range filenames {
+ // Squeezed this into 77 characters.
+ rowFormat := "%-10s %-11s %-15s %10s %11s %10s\n"
+ fmt.Printf(rowFormat, "Registered", "Account", "ParticipationID", "Last Used", "First round", "Last round")
+ for _, part := range parts {
onlineInfoStr := "unknown"
- onlineAccountInfo, err := client.AccountInformation(parts[fn].Address().GetUserAddress())
+ onlineAccountInfo, err := client.AccountInformation(part.Address)
if err == nil {
- votingBytes := parts[fn].Voting.OneTimeSignatureVerifier
- vrfBytes := parts[fn].VRF.PK
+ votingBytes := part.Key.VoteParticipationKey
+ vrfBytes := part.Key.SelectionParticipationKey
if onlineAccountInfo.Participation != nil &&
(string(onlineAccountInfo.Participation.ParticipationPK) == string(votingBytes[:])) &&
(string(onlineAccountInfo.Participation.VRFPK) == string(vrfBytes[:])) &&
- (onlineAccountInfo.Participation.VoteFirst == uint64(parts[fn].FirstValid)) &&
- (onlineAccountInfo.Participation.VoteLast == uint64(parts[fn].LastValid)) &&
- (onlineAccountInfo.Participation.VoteKeyDilution == parts[fn].KeyDilution) {
+ (onlineAccountInfo.Participation.VoteFirst == part.Key.VoteFirstValid) &&
+ (onlineAccountInfo.Participation.VoteLast == part.Key.VoteLastValid) &&
+ (onlineAccountInfo.Participation.VoteKeyDilution == part.Key.VoteKeyDilution) {
onlineInfoStr = "yes"
} else {
onlineInfoStr = "no"
}
+
+ /*
+ // PKI TODO: We could avoid querying the account with something like this.
+ // One problem is that it doesn't account for multiple keys on the same
+ // account, so we'd still need to query the round.
+ if part.EffectiveFirstValid != nil && part.EffectiveLastValid < currentRound {
+ onlineInfoStr = "yes"
+ } else {
+ onlineInfoStr = "no"
+ }
+ */
+
+ // it's okay to proceed without algod info
+ lastUsed := maxRound(0, part.LastVote)
+ lastUsed = maxRound(lastUsed, part.LastBlockProposal)
+ lastUsed = maxRound(lastUsed, part.LastStateProof)
+ lastUsedString := "N/A"
+ if lastUsed != 0 {
+ lastUsedString = uintToStr(lastUsed)
+ }
+ fmt.Printf(rowFormat,
+ onlineInfoStr,
+ fmt.Sprintf("%s...%s", part.Address[:4], part.Address[len(part.Address)-4:]),
+ fmt.Sprintf("%s...", part.Id[:8]),
+ lastUsedString,
+ uintToStr(part.Key.VoteFirstValid),
+ uintToStr(part.Key.VoteLastValid))
}
- // it's okay to proceed without algod info
- first, last := parts[fn].ValidInterval()
- fmt.Printf(rowFormat, onlineInfoStr, fn, parts[fn].Address().GetUserAddress(),
- fmt.Sprintf("%d", first),
- fmt.Sprintf("%d", last),
- fmt.Sprintf("%d.%d", parts[fn].Voting.FirstBatch, parts[fn].Voting.FirstOffset))
}
},
}
@@ -1276,14 +1357,11 @@ var importRootKeysCmd = &cobra.Command{
},
}
-type partkeyInfo struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
- Address string `codec:"acct"`
- FirstValid basics.Round `codec:"first"`
- LastValid basics.Round `codec:"last"`
- VoteID crypto.OneTimeSignatureVerifier `codec:"vote"`
- SelectionID crypto.VRFVerifier `codec:"sel"`
- VoteKeyDilution uint64 `codec:"voteKD"`
+func strOrNA(value *uint64) string {
+ if value == nil {
+ return "N/A"
+ }
+ return uintToStr(*value)
}
var partkeyInfoCmd = &cobra.Command{
@@ -1295,7 +1373,7 @@ var partkeyInfoCmd = &cobra.Command{
onDataDirs(func(dataDir string) {
fmt.Printf("Dumping participation key info from %s...\n", dataDir)
- client := ensureGoalClient(dataDir, libgoal.DynamicClient)
+ client := ensureAlgodClient(dataDir)
// Make sure we don't already have a partkey valid for (or after) specified roundLastValid
parts, err := client.ListParticipationKeys()
@@ -1303,18 +1381,23 @@ var partkeyInfoCmd = &cobra.Command{
reportErrorf(errorRequestFail, err)
}
- for filename, part := range parts {
- fmt.Println("------------------------------------------------------------------")
- info := partkeyInfo{
- Address: part.Address().String(),
- FirstValid: part.FirstValid,
- LastValid: part.LastValid,
- VoteID: part.VotingSecrets().OneTimeSignatureVerifier,
- SelectionID: part.VRFSecrets().PK,
- VoteKeyDilution: part.KeyDilution,
- }
- infoString := protocol.EncodeJSON(&info)
- fmt.Printf("File: %s\n%s\n", filename, string(infoString))
+ for _, part := range parts {
+ fmt.Println()
+ fmt.Printf("Participation ID: %s\n", part.Id)
+ fmt.Printf("Parent address: %s\n", part.Address)
+ fmt.Printf("Last vote round: %s\n", strOrNA(part.LastVote))
+ fmt.Printf("Last block proposal round: %s\n", strOrNA(part.LastBlockProposal))
+ // PKI TODO: enable with state proof support.
+ //fmt.Printf("Last state proof round: %s\n", strOrNA(part.LastStateProof))
+ fmt.Printf("Effective first round: %s\n", strOrNA(part.EffectiveFirstValid))
+ fmt.Printf("Effective last round: %s\n", strOrNA(part.EffectiveLastValid))
+ fmt.Printf("First round: %d\n", part.Key.VoteFirstValid)
+ fmt.Printf("Last round: %d\n", part.Key.VoteLastValid)
+ fmt.Printf("Key dilution: %d\n", part.Key.VoteKeyDilution)
+ fmt.Printf("Selection key: %s\n", base64.StdEncoding.EncodeToString(part.Key.SelectionParticipationKey))
+ fmt.Printf("Voting key: %s\n", base64.StdEncoding.EncodeToString(part.Key.VoteParticipationKey))
+ // PKI TODO: enable with state proof support.
+ //fmt.Printf("State proof key: %s\n", base64.StdEncoding.EncodeToString(part.StateProofKey))
}
})
},
diff --git a/cmd/goal/application.go b/cmd/goal/application.go
index da142e3e0..2ed7eed2d 100644
--- a/cmd/goal/application.go
+++ b/cmd/goal/application.go
@@ -17,6 +17,8 @@
package main
import (
+ "bytes"
+ "crypto/sha512"
"encoding/base32"
"encoding/base64"
"encoding/binary"
@@ -28,9 +30,11 @@ import (
"github.com/spf13/cobra"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/abi"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/protocol"
)
@@ -41,6 +45,9 @@ var (
approvalProgFile string
clearProgFile string
+ method string
+ methodArgs []string
+
approvalProgRawFile string
clearProgRawFile string
@@ -79,9 +86,10 @@ func init() {
appCmd.AddCommand(clearAppCmd)
appCmd.AddCommand(readStateAppCmd)
appCmd.AddCommand(infoAppCmd)
+ appCmd.AddCommand(methodAppCmd)
appCmd.PersistentFlags().StringVarP(&walletName, "wallet", "w", "", "Set the wallet to be used for the selected operation")
- appCmd.PersistentFlags().StringSliceVar(&appArgs, "app-arg", nil, "Args to encode for application transactions (all will be encoded to a byte slice). For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.")
+ appCmd.PersistentFlags().StringArrayVar(&appArgs, "app-arg", nil, "Args to encode for application transactions (all will be encoded to a byte slice). For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.")
appCmd.PersistentFlags().StringSliceVar(&foreignApps, "foreign-app", nil, "Indexes of other apps whose global state is read in this transaction")
appCmd.PersistentFlags().StringSliceVar(&foreignAssets, "foreign-asset", nil, "Indexes of assets whose parameters are read in this transaction")
appCmd.PersistentFlags().StringSliceVar(&appStrAccounts, "app-account", nil, "Accounts that may be accessed from application logic")
@@ -108,6 +116,10 @@ func init() {
deleteAppCmd.Flags().StringVarP(&account, "from", "f", "", "Account to send delete transaction from")
readStateAppCmd.Flags().StringVarP(&account, "from", "f", "", "Account to fetch state from")
updateAppCmd.Flags().StringVarP(&account, "from", "f", "", "Account to send update transaction from")
+ methodAppCmd.Flags().StringVarP(&account, "from", "f", "", "Account to call method from")
+
+ methodAppCmd.Flags().StringVar(&method, "method", "", "Method to be called")
+ methodAppCmd.Flags().StringArrayVar(&methodArgs, "arg", nil, "Args to pass in for calling a method")
// Can't use PersistentFlags on the root because for some reason marking
// a root command as required with MarkPersistentFlagRequired isn't
@@ -120,6 +132,7 @@ func init() {
readStateAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID")
updateAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID")
infoAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID")
+ methodAppCmd.Flags().Uint64Var(&appIdx, "app-id", 0, "Application ID")
// Add common transaction flags to all txn-generating app commands
addTxnFlags(createAppCmd)
@@ -129,6 +142,7 @@ func init() {
addTxnFlags(optInAppCmd)
addTxnFlags(closeOutAppCmd)
addTxnFlags(clearAppCmd)
+ addTxnFlags(methodAppCmd)
readStateAppCmd.Flags().BoolVar(&fetchLocal, "local", false, "Fetch account-specific state for this application. `--from` address is required when using this flag")
readStateAppCmd.Flags().BoolVar(&fetchGlobal, "global", false, "Fetch global state for this application.")
@@ -161,6 +175,11 @@ func init() {
readStateAppCmd.MarkFlagRequired("app-id")
infoAppCmd.MarkFlagRequired("app-id")
+
+ methodAppCmd.MarkFlagRequired("method") // nolint:errcheck // follow previous required flag format
+ methodAppCmd.MarkFlagRequired("app-id") // nolint:errcheck
+ methodAppCmd.MarkFlagRequired("from") // nolint:errcheck
+ methodAppCmd.Flags().MarkHidden("app-arg") // nolint:errcheck
}
type appCallArg struct {
@@ -229,6 +248,23 @@ func parseAppArg(arg appCallArg) (rawValue []byte, parseErr error) {
return
}
rawValue = data
+ case "abi":
+ typeAndValue := strings.SplitN(arg.Value, ":", 2)
+ if len(typeAndValue) != 2 {
+ parseErr = fmt.Errorf("Could not decode abi string (%s): should split abi-type and abi-value with colon", arg.Value)
+ return
+ }
+ abiType, err := abi.TypeOf(typeAndValue[0])
+ if err != nil {
+ parseErr = fmt.Errorf("Could not decode abi type string (%s): %v", typeAndValue[0], err)
+ return
+ }
+ value, err := abiType.UnmarshalFromJSON([]byte(typeAndValue[1]))
+ if err != nil {
+ parseErr = fmt.Errorf("Could not decode abi value string (%s):%v ", typeAndValue[1], err)
+ return
+ }
+ return abiType.Encode(value)
default:
parseErr = fmt.Errorf("Unknown encoding: %s", arg.Encoding)
}
@@ -266,6 +302,20 @@ func processAppInputFile() (args [][]byte, accounts []string, foreignApps []uint
return parseAppInputs(inputs)
}
+// filterEmptyStrings filters out empty string parsed in by StringArrayVar
+// this function is added to support abi argument parsing
+// since parsing of `appArg` diverted from `StringSliceVar` to `StringArrayVar`
+func filterEmptyStrings(strSlice []string) []string {
+ var newStrSlice []string
+
+ for _, str := range strSlice {
+ if len(str) > 0 {
+ newStrSlice = append(newStrSlice, str)
+ }
+ }
+ return newStrSlice
+}
+
func getAppInputs() (args [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) {
if (appArgs != nil || appStrAccounts != nil || foreignApps != nil) && appInputFilename != "" {
reportErrorf("Cannot specify both command-line arguments/accounts and JSON input filename")
@@ -275,7 +325,11 @@ func getAppInputs() (args [][]byte, accounts []string, foreignApps []uint64, for
}
var encodedArgs []appCallArg
- for _, arg := range appArgs {
+
+ // we need to filter out empty strings from appArgs first, caused by change to `StringArrayVar`
+ newAppArgs := filterEmptyStrings(appArgs)
+
+ for _, arg := range newAppArgs {
encodingValue := strings.SplitN(arg, ":", 2)
if len(encodingValue) != 2 {
reportErrorf("all arguments should be of the form 'encoding:value'")
@@ -327,6 +381,12 @@ func mustParseOnCompletion(ocString string) (oc transactions.OnCompletion) {
}
}
+func getDataDirAndClient() (dataDir string, client libgoal.Client) {
+ dataDir = ensureSingleDataDir()
+ client = ensureFullClient(dataDir)
+ return
+}
+
func mustParseProgArgs() (approval []byte, clear []byte) {
// Ensure we don't have ambiguous or all empty args
if (approvalProgFile == "") == (approvalProgRawFile == "") {
@@ -357,9 +417,7 @@ var createAppCmd = &cobra.Command{
Long: `Issue a transaction that creates an application`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
-
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ dataDir, client := getDataDirAndClient()
// Construct schemas from args
localSchema := basics.StateSchema{
@@ -451,8 +509,7 @@ var updateAppCmd = &cobra.Command{
Long: `Issue a transaction that updates an application's ApprovalProgram and ClearStateProgram`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ dataDir, client := getDataDirAndClient()
// Parse transaction parameters
approvalProg, clearProg := mustParseProgArgs()
@@ -523,8 +580,7 @@ var optInAppCmd = &cobra.Command{
Long: `Opt an account in to an application, allocating local state in your account`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ dataDir, client := getDataDirAndClient()
// Parse transaction parameters
appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
@@ -594,8 +650,7 @@ var closeOutAppCmd = &cobra.Command{
Long: `Close an account out of an application, removing local state from your account. The application must still exist. If it doesn't, use 'goal app clear'.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ dataDir, client := getDataDirAndClient()
// Parse transaction parameters
appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
@@ -665,8 +720,7 @@ var clearAppCmd = &cobra.Command{
Long: `Remove any local state from your account associated with an application. The application does not need to exist anymore.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ dataDir, client := getDataDirAndClient()
// Parse transaction parameters
appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
@@ -736,8 +790,7 @@ var callAppCmd = &cobra.Command{
Long: `Call an application, invoking application-specific functionality`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ dataDir, client := getDataDirAndClient()
// Parse transaction parameters
appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
@@ -807,8 +860,7 @@ var deleteAppCmd = &cobra.Command{
Long: `Delete an application, removing the global state and other application parameters from the creator's account`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ dataDir, client := getDataDirAndClient()
// Parse transaction parameters
appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
@@ -879,8 +931,7 @@ var readStateAppCmd = &cobra.Command{
Long: `Read global or local (account-specific) state for an application`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ _, client := getDataDirAndClient()
// Ensure exactly one of --local or --global is specified
if fetchLocal == fetchGlobal {
@@ -961,8 +1012,7 @@ var infoAppCmd = &cobra.Command{
Long: `Look up application information stored on the network, such as program hash.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- dataDir := ensureSingleDataDir()
- client := ensureFullClient(dataDir)
+ _, client := getDataDirAndClient()
meta, err := client.ApplicationInformation(appIdx)
if err != nil {
@@ -995,3 +1045,154 @@ var infoAppCmd = &cobra.Command{
}
},
}
+
+var methodAppCmd = &cobra.Command{
+ Use: "method",
+ Short: "Invoke a method",
+ Long: `Invoke a method in an App (stateful contract) with an application call transaction`,
+ Args: validateNoPosArgsFn,
+ Run: func(cmd *cobra.Command, args []string) {
+ dataDir, client := getDataDirAndClient()
+
+ // Parse transaction parameters
+ appArgsParsed, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ if len(appArgsParsed) > 0 {
+ reportErrorf("in goal app method: --arg and --app-arg are mutually exclusive, do not use --app-arg")
+ }
+
+ onCompletion := mustParseOnCompletion(createOnCompletion)
+
+ if appIdx == 0 {
+ reportErrorf("app id == 0, goal app create not supported in goal app method")
+ }
+
+ var approvalProg, clearProg []byte
+ if onCompletion == transactions.UpdateApplicationOC {
+ approvalProg, clearProg = mustParseProgArgs()
+ }
+
+ var applicationArgs [][]byte
+
+ // insert the method selector hash
+ hash := sha512.Sum512_256([]byte(method))
+ applicationArgs = append(applicationArgs, hash[0:4])
+
+ // parse down the ABI type from method signature
+ argTupleTypeStr, retTypeStr, err := abi.ParseMethodSignature(method)
+ if err != nil {
+ reportErrorf("cannot parse method signature: %v", err)
+ }
+ err = abi.ParseArgJSONtoByteSlice(argTupleTypeStr, methodArgs, &applicationArgs)
+ if err != nil {
+ reportErrorf("cannot parse arguments to ABI encoding: %v", err)
+ }
+
+ tx, err := client.MakeUnsignedApplicationCallTx(
+ appIdx, applicationArgs, appAccounts, foreignApps, foreignAssets,
+ onCompletion, approvalProg, clearProg, basics.StateSchema{}, basics.StateSchema{}, 0)
+
+ if err != nil {
+ reportErrorf("Cannot create application txn: %v", err)
+ }
+
+ // Fill in note and lease
+ tx.Note = parseNoteField(cmd)
+ tx.Lease = parseLease(cmd)
+
+ // Fill in rounds, fee, etc.
+ fv, lv, err := client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ if err != nil {
+ reportErrorf("Cannot determine last valid round: %s", err)
+ }
+
+ tx, err = client.FillUnsignedTxTemplate(account, fv, lv, fee, tx)
+ if err != nil {
+ reportErrorf("Cannot construct transaction: %s", err)
+ }
+ explicitFee := cmd.Flags().Changed("fee")
+ if explicitFee {
+ tx.Fee = basics.MicroAlgos{Raw: fee}
+ }
+
+ if outFilename != "" {
+ if dumpForDryrun {
+ err = writeDryrunReqToFile(client, tx, outFilename)
+ } else {
+ // Write transaction to file
+ err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename)
+ }
+
+ if err != nil {
+ reportErrorf(err.Error())
+ }
+ return
+ }
+
+ // Broadcast
+ wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
+ signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ if err != nil {
+ reportErrorf(errorSigningTX, err)
+ }
+
+ txid, err := client.BroadcastTransaction(signedTxn)
+ if err != nil {
+ reportErrorf(errorBroadcastingTX, err)
+ }
+
+ // Report tx details to user
+ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
+
+ if !noWaitAfterSend {
+ _, err := waitForCommit(client, txid, lv)
+ if err != nil {
+ reportErrorf(err.Error())
+ }
+
+ resp, err := client.PendingTransactionInformationV2(txid)
+ if err != nil {
+ reportErrorf(err.Error())
+ }
+
+ if retTypeStr == "void" {
+ return
+ }
+
+ // specify the return hash prefix
+ hashRet := sha512.Sum512_256([]byte("return"))
+ hashRetPrefix := hashRet[:4]
+
+ var abiEncodedRet []byte
+ foundRet := false
+ if resp.Logs != nil {
+ for i := len(*resp.Logs) - 1; i >= 0; i-- {
+ retLog := (*resp.Logs)[i]
+ if bytes.HasPrefix(retLog, hashRetPrefix) {
+ abiEncodedRet = retLog[4:]
+ foundRet = true
+ break
+ }
+ }
+ }
+
+ if !foundRet {
+ reportErrorf("cannot find return log for abi type %s", retTypeStr)
+ }
+
+ retType, err := abi.TypeOf(retTypeStr)
+ if err != nil {
+ reportErrorf("cannot cast %s to abi type: %v", retTypeStr, err)
+ }
+ decoded, err := retType.Decode(abiEncodedRet)
+ if err != nil {
+ reportErrorf("cannot decode return value %v: %v", abiEncodedRet, err)
+ }
+
+ decodedJSON, err := retType.MarshalToJSON(decoded)
+ if err != nil {
+ reportErrorf("cannot marshal returned bytes %v to JSON: %v", decoded, err)
+ }
+ fmt.Printf("method %s output: %s\n", method, string(decodedJSON))
+ }
+ },
+}
diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go
index 889fe7194..ed5392f5a 100644
--- a/cmd/goal/clerk.go
+++ b/cmd/goal/clerk.go
@@ -922,7 +922,7 @@ func assembleFile(fname string) (program []byte) {
}
ops, err := logic.AssembleString(string(text))
if err != nil {
- ops.ReportProblems(fname)
+ ops.ReportProblems(fname, os.Stderr)
reportErrorf("%s: %s", fname, err)
}
_, params := getProto(protoVersion)
diff --git a/cmd/goal/multisig.go b/cmd/goal/multisig.go
index 4bd72681e..387883582 100644
--- a/cmd/goal/multisig.go
+++ b/cmd/goal/multisig.go
@@ -20,6 +20,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "os"
"github.com/spf13/cobra"
@@ -163,7 +164,7 @@ var signProgramCmd = &cobra.Command{
}
ops, err := logic.AssembleString(string(text))
if err != nil {
- ops.ReportProblems(programSource)
+ ops.ReportProblems(programSource, os.Stderr)
reportErrorf("%s: %s", programSource, err)
}
if outname == "" {
diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go
index ed834f4ad..7d0136d10 100644
--- a/cmd/pingpong/runCmd.go
+++ b/cmd/pingpong/runCmd.go
@@ -242,7 +242,7 @@ var runCmd = &cobra.Command{
}
ops, err := logic.AssembleString(programStr)
if err != nil {
- ops.ReportProblems(teal)
+ ops.ReportProblems(teal, os.Stderr)
reportErrorf("Internal error, cannot assemble %v \n", programStr)
}
cfg.Program = ops.Program
diff --git a/cmd/tealdbg/cdtState.go b/cmd/tealdbg/cdtState.go
index dca4a3ff9..77f4eb896 100644
--- a/cmd/tealdbg/cdtState.go
+++ b/cmd/tealdbg/cdtState.go
@@ -364,7 +364,11 @@ func prepareTxn(txn *transactions.Transaction, groupIndex int) []fieldDesc {
field == int(logic.Accounts) ||
field == int(logic.ApplicationArgs) ||
field == int(logic.Assets) ||
- field == int(logic.Applications) {
+ field == int(logic.Applications) ||
+ field == int(logic.CreatedApplicationID) ||
+ field == int(logic.CreatedAssetID) ||
+ field == int(logic.Logs) ||
+ field == int(logic.NumLogs) {
continue
}
var value string
diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go
index 24b2e1330..aa5fb3aaf 100644
--- a/cmd/tealdbg/local.go
+++ b/cmd/tealdbg/local.go
@@ -540,7 +540,7 @@ func (r *LocalRunner) RunAll() error {
ep := logic.EvalParams{
Proto: &r.proto,
Debugger: r.debugger,
- Txn: &r.txnGroup[groupIndex],
+ Txn: &r.txnGroup[run.groupIndex],
TxnGroup: r.txnGroup,
GroupIndex: run.groupIndex,
PastSideEffects: run.pastSideEffects,
@@ -588,7 +588,7 @@ func (r *LocalRunner) Run() (bool, error) {
}
ep := logic.EvalParams{
Proto: &r.proto,
- Txn: &r.txnGroup[groupIndex],
+ Txn: &r.txnGroup[run.groupIndex],
TxnGroup: r.txnGroup,
GroupIndex: run.groupIndex,
PastSideEffects: run.pastSideEffects,
diff --git a/cmd/tealdbg/localLedger.go b/cmd/tealdbg/localLedger.go
index 23e58e3f3..364186f55 100644
--- a/cmd/tealdbg/localLedger.go
+++ b/cmd/tealdbg/localLedger.go
@@ -32,6 +32,7 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/apply"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
)
@@ -280,7 +281,7 @@ func (l *localLedger) BlockHdr(basics.Round) (bookkeeping.BlockHeader, error) {
return bookkeeping.BlockHeader{}, nil
}
-func (l *localLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledger.TxLease) error {
+func (l *localLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
return nil
}
diff --git a/cmd/tealdbg/local_test.go b/cmd/tealdbg/local_test.go
index ad7faa605..d78c402e8 100644
--- a/cmd/tealdbg/local_test.go
+++ b/cmd/tealdbg/local_test.go
@@ -1350,3 +1350,103 @@ byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr
})
}
}
+
+func TestGroupTxnIdx(t *testing.T) {
+
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ ddrBlob := `{
+ "accounts": [
+ {
+ "address": "FPVVJ7N42QRVP2OWBGZ3XPTQAZFQNBYHJGZ2CJFOATAQNWFA5NWB4MPWBQ",
+ "amount": 3999999999497000,
+ "amount-without-pending-rewards": 3999999999497000,
+ "created-apps": [
+ {
+ "id": 1,
+ "params": {
+ "approval-program": "BSABATEQIhJAABExEIEGEkAAByJAAAEAIkMiQ4EAQw==",
+ "clear-state-program": "BYEBQw==",
+ "creator": "FPVVJ7N42QRVP2OWBGZ3XPTQAZFQNBYHJGZ2CJFOATAQNWFA5NWB4MPWBQ"
+ }
+ }
+ ],
+ "pending-rewards": 0,
+ "rewards": 0,
+ "round": 2,
+ "status": "Online"
+ },
+ {
+ "address": "WCS6TVPJRBSARHLN2326LRU5BYVJZUKI2VJ53CAWKYYHDE455ZGKANWMGM",
+ "amount": 500000,
+ "amount-without-pending-rewards": 500000,
+ "pending-rewards": 0,
+ "rewards": 0,
+ "round": 2,
+ "status": "Offline"
+ }
+ ],
+ "apps": [
+ {
+ "id": 1,
+ "params": {
+ "approval-program": "BSABATEQIhJAABExEIEGEkAAByJAAAEAIkMiQ4EAQw==",
+ "clear-state-program": "BYEBQw==",
+ "creator": "FPVVJ7N42QRVP2OWBGZ3XPTQAZFQNBYHJGZ2CJFOATAQNWFA5NWB4MPWBQ"
+ }
+ }
+ ],
+ "latest-timestamp": 1634765269,
+ "protocol-version": "future",
+ "round": 2,
+ "sources": null,
+ "txns": [
+ {
+ "sig": "8Z/ECart3vFBSKp5sFuNRN4coliea4TE+xttZNn9E15DJ8GZ++kgtZKhG4Tiopv7r61Lqh8VBuyuTf9AC3uQBQ==",
+ "txn": {
+ "amt": 5000,
+ "fee": 1000,
+ "fv": 3,
+ "gen": "sandnet-v1",
+ "gh": "pjM5GFR9MpNkWIibcfqtu/a2OIZTBy/mSQc++sF1r0Q=",
+ "grp": "2ca4sSb5aGab0k065qIT3J3AcB5YWYezrRh6bLB0ve8=",
+ "lv": 1003,
+ "note": "V+GSPgDmLQo=",
+ "rcv": "WCS6TVPJRBSARHLN2326LRU5BYVJZUKI2VJ53CAWKYYHDE455ZGKANWMGM",
+ "snd": "FPVVJ7N42QRVP2OWBGZ3XPTQAZFQNBYHJGZ2CJFOATAQNWFA5NWB4MPWBQ",
+ "type": "pay"
+ }
+ },
+ {
+ "sig": "4/gj+6rllN/Uc55kAJ0BOKTzoUJKJ7gExE3vp7cr5vC9XVStx0QNZq1DFXLhpTZnTQAl3zOrGzIxfS5HOpSyCg==",
+ "txn": {
+ "apid": 1,
+ "fee": 1000,
+ "fv": 3,
+ "gh": "pjM5GFR9MpNkWIibcfqtu/a2OIZTBy/mSQc++sF1r0Q=",
+ "grp": "2ca4sSb5aGab0k065qIT3J3AcB5YWYezrRh6bLB0ve8=",
+ "lv": 1003,
+ "note": "+fl8jkXqyFc=",
+ "snd": "FPVVJ7N42QRVP2OWBGZ3XPTQAZFQNBYHJGZ2CJFOATAQNWFA5NWB4MPWBQ",
+ "type": "appl"
+ }
+ }
+ ]
+ }`
+
+ ds := DebugParams{
+ Proto: string(protocol.ConsensusCurrentVersion),
+ DdrBlob: []byte(ddrBlob),
+ GroupIndex: 0,
+ RunMode: "application",
+ }
+
+ local := MakeLocalRunner(nil)
+ err := local.Setup(&ds)
+ a.NoError(err)
+
+ pass, err := local.Run()
+ a.NoError(err)
+ a.True(pass)
+}
diff --git a/cmd/tealdbg/main.go b/cmd/tealdbg/main.go
index 6a1004c27..ea09dd1ed 100644
--- a/cmd/tealdbg/main.go
+++ b/cmd/tealdbg/main.go
@@ -240,7 +240,7 @@ func debugLocal(args []string) {
if len(txnFile) > 0 {
txnBlob, err = ioutil.ReadFile(txnFile)
if err != nil {
- log.Fatalf("Error txn reading %s: %s", balanceFile, err)
+ log.Fatalf("Error txn reading %s: %s", txnFile, err)
}
}
diff --git a/cmd/updater/versionCmd.go b/cmd/updater/versionCmd.go
index 4b700ef1e..5b8345bf8 100644
--- a/cmd/updater/versionCmd.go
+++ b/cmd/updater/versionCmd.go
@@ -28,20 +28,26 @@ import (
var (
destFile string
versionBucket string
+ packageName string
specificVersion uint64
semanticOutput bool
)
+// DefaultPackageName is the package we'll use by default.
+const DefaultPackageName = "node"
+
func init() {
versionCmd.AddCommand(checkCmd)
versionCmd.AddCommand(getCmd)
- checkCmd.Flags().StringVarP(&versionBucket, "bucket", "b", "", "S3 bucket containing updates.")
checkCmd.Flags().BoolVarP(&semanticOutput, "semantic", "s", false, "Human readable semantic version output.")
+ checkCmd.Flags().StringVarP(&packageName, "package", "p", DefaultPackageName, "Get version of specific package.")
+ checkCmd.Flags().StringVarP(&versionBucket, "bucket", "b", "", "S3 bucket containing updates.")
getCmd.Flags().StringVarP(&destFile, "outputFile", "o", "", "Path for downloaded file (required).")
- getCmd.Flags().StringVarP(&versionBucket, "bucket", "b", "", "S3 bucket containing updates.")
getCmd.Flags().Uint64VarP(&specificVersion, "version", "v", 0, "Specific version to download.")
+ getCmd.Flags().StringVarP(&packageName, "package", "p", DefaultPackageName, "Get version of specific package.")
+ getCmd.Flags().StringVarP(&versionBucket, "bucket", "b", "", "S3 bucket containing updates.")
getCmd.MarkFlagRequired("outputFile")
}
@@ -67,7 +73,7 @@ var checkCmd = &cobra.Command{
if err != nil {
exitErrorf("Error creating s3 session %s\n", err.Error())
} else {
- version, _, err := s3Session.GetLatestUpdateVersion(channel)
+ version, _, err := s3Session.GetPackageVersion(channel, packageName, 0)
if err != nil {
exitErrorf("Error getting latest version from s3 %s\n", err.Error())
}
@@ -102,7 +108,7 @@ var getCmd = &cobra.Command{
if err != nil {
exitErrorf("Error creating s3 session %s\n", err.Error())
} else {
- version, name, err := s3Session.GetUpdateVersion(channel, specificVersion)
+ version, name, err := s3Session.GetPackageVersion(channel, packageName, specificVersion)
if err != nil {
exitErrorf("Error getting latest version from s3 %s\n", err.Error())
}
diff --git a/compactcert/abstractions.go b/compactcert/abstractions.go
index 6a369dee1..ac02ec2c8 100644
--- a/compactcert/abstractions.go
+++ b/compactcert/abstractions.go
@@ -24,7 +24,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
)
@@ -41,7 +41,7 @@ type Ledger interface {
Wait(basics.Round) chan struct{}
GenesisHash() crypto.Digest
BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
- CompactCertVoters(basics.Round) (*ledger.VotersForRound, error)
+ CompactCertVoters(basics.Round) (*ledgercore.VotersForRound, error)
}
// Network captures the aspects of the gossip network protocol that are
diff --git a/compactcert/worker.go b/compactcert/worker.go
index fb78a7300..d84f7848f 100644
--- a/compactcert/worker.go
+++ b/compactcert/worker.go
@@ -24,7 +24,7 @@ import (
"github.com/algorand/go-algorand/crypto/compactcert"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
@@ -36,7 +36,7 @@ import (
type builder struct {
*compactcert.Builder
- voters *ledger.VotersForRound
+ voters *ledgercore.VotersForRound
votersHdr bookkeeping.BlockHeader
}
diff --git a/compactcert/worker_test.go b/compactcert/worker_test.go
index c2236f6cd..adc784f0e 100644
--- a/compactcert/worker_test.go
+++ b/compactcert/worker_test.go
@@ -33,7 +33,6 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
@@ -122,8 +121,8 @@ func (s *testWorkerStubs) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, err
return hdr, nil
}
-func (s *testWorkerStubs) CompactCertVoters(r basics.Round) (*ledger.VotersForRound, error) {
- voters := &ledger.VotersForRound{
+func (s *testWorkerStubs) CompactCertVoters(r basics.Round) (*ledgercore.VotersForRound, error) {
+ voters := &ledgercore.VotersForRound{
Proto: config.Consensus[protocol.ConsensusFuture],
AddrToPos: make(map[basics.Address]uint64),
TotalWeight: basics.MicroAlgos{Raw: uint64(s.totalWeight)},
@@ -131,7 +130,7 @@ func (s *testWorkerStubs) CompactCertVoters(r basics.Round) (*ledger.VotersForRo
for i, k := range s.keysForVoters {
voters.AddrToPos[k.Parent] = uint64(i)
- voters.Participants = append(voters.Participants, compactcert.Participant{
+ voters.Participants = append(voters.Participants, basics.Participant{
PK: k.Voting.OneTimeSignatureVerifier,
Weight: 1,
KeyDilution: config.Consensus[protocol.ConsensusFuture].DefaultKeyDilution,
diff --git a/config/config.go b/config/config.go
index 781ab73a8..315d53289 100644
--- a/config/config.go
+++ b/config/config.go
@@ -23,8 +23,6 @@ import (
"os"
"os/user"
"path/filepath"
- "strings"
- "time"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/codecs"
@@ -48,399 +46,6 @@ const Mainnet protocol.NetworkID = "mainnet"
// GenesisJSONFile is the name of the genesis.json file
const GenesisJSONFile = "genesis.json"
-// Local holds the per-node-instance configuration settings for the protocol.
-// !!! WARNING !!!
-//
-// These versioned struct tags need to be maintained CAREFULLY and treated
-// like UNIVERSAL CONSTANTS - they should not be modified once committed.
-//
-// New fields may be added to the Local struct, along with a version tag
-// denoting a new version. When doing so, also update the
-// test/testdata/configs/config-v{n}.json and call "make generate" to regenerate the constants.
-//
-// !!! WARNING !!!
-type Local struct {
- // Version tracks the current version of the defaults so we can migrate old -> new
- // This is specifically important whenever we decide to change the default value
- // for an existing parameter. This field tag must be updated any time we add a new version.
- Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18"`
-
- // environmental (may be overridden)
- // When enabled, stores blocks indefinitally, otherwise, only the most recents blocks
- // are being kept around. ( the precise number of recent blocks depends on the consensus parameters )
- Archival bool `version[0]:"false"`
-
- // gossipNode.go
- // how many peers to propagate to?
- GossipFanout int `version[0]:"4"`
- NetAddress string `version[0]:""`
-
- // 1 * time.Minute = 60000000000 ns
- ReconnectTime time.Duration `version[0]:"60" version[1]:"60000000000"`
-
- // what we should tell people to connect to
- PublicAddress string `version[0]:""`
-
- MaxConnectionsPerIP int `version[3]:"30"`
-
- // 0 == disable
- PeerPingPeriodSeconds int `version[0]:"0" version[18]:"10"`
-
- // for https serving
- TLSCertFile string `version[0]:""`
- TLSKeyFile string `version[0]:""`
-
- // Logging
- BaseLoggerDebugLevel uint32 `version[0]:"1" version[1]:"4"`
- // if this is 0, do not produce agreement.cadaver
- CadaverSizeTarget uint64 `version[0]:"1073741824"`
-
- // IncomingConnectionsLimit specifies the max number of long-lived incoming
- // connections. 0 means no connections allowed. -1 is unbounded.
- // Estimating 5MB per incoming connection, 5MB*800 = 4GB
- IncomingConnectionsLimit int `version[0]:"-1" version[1]:"10000" version[17]:"800"`
-
- // BroadcastConnectionsLimit specifies the number of connections that
- // will receive broadcast (gossip) messages from this node. If the
- // node has more connections than this number, it will send broadcasts
- // to the top connections by priority (outgoing connections first, then
- // by money held by peers based on their participation key). 0 means
- // no outgoing messages (not even transaction broadcasting to outgoing
- // peers). -1 means unbounded (default).
- BroadcastConnectionsLimit int `version[4]:"-1"`
-
- // AnnounceParticipationKey specifies that this node should announce its
- // participation key (with the largest stake) to its gossip peers. This
- // allows peers to prioritize our connection, if necessary, in case of a
- // DoS attack. Disabling this means that the peers will not have any
- // additional information to allow them to prioritize our connection.
- AnnounceParticipationKey bool `version[4]:"true"`
-
- // PriorityPeers specifies peer IP addresses that should always get
- // outgoing broadcast messages from this node.
- PriorityPeers map[string]bool `version[4]:""`
-
- // To make sure the algod process does not run out of FDs, algod ensures
- // that RLIMIT_NOFILE exceeds the max number of incoming connections (i.e.,
- // IncomingConnectionsLimit) by at least ReservedFDs. ReservedFDs are meant
- // to leave room for short-lived FDs like DNS queries, SQLite files, etc.
- ReservedFDs uint64 `version[2]:"256"`
-
- // local server
- // API endpoint address
- EndpointAddress string `version[0]:"127.0.0.1:0"`
-
- // timeouts passed to the rest http.Server implementation
- RestReadTimeoutSeconds int `version[4]:"15"`
- RestWriteTimeoutSeconds int `version[4]:"120"`
-
- // SRV-based phonebook
- DNSBootstrapID string `version[0]:"<network>.algorand.network"`
-
- // Log file size limit in bytes
- LogSizeLimit uint64 `version[0]:"1073741824"`
-
- // text/template for creating log archive filename.
- // Available template vars:
- // Time at start of log: {{.Year}} {{.Month}} {{.Day}} {{.Hour}} {{.Minute}} {{.Second}}
- // Time at end of log: {{.EndYear}} {{.EndMonth}} {{.EndDay}} {{.EndHour}} {{.EndMinute}} {{.EndSecond}}
- //
- // If the filename ends with .gz or .bz2 it will be compressed.
- //
- // default: "node.archive.log" (no rotation, clobbers previous archive)
- LogArchiveName string `version[4]:"node.archive.log"`
-
- // LogArchiveMaxAge will be parsed by time.ParseDuration().
- // Valid units are 's' seconds, 'm' minutes, 'h' hours
- LogArchiveMaxAge string `version[4]:""`
-
- // number of consecutive attempts to catchup after which we replace the peers we're connected to
- CatchupFailurePeerRefreshRate int `version[0]:"10"`
-
- // where should the node exporter listen for metrics
- NodeExporterListenAddress string `version[0]:":9100"`
-
- // enable metric reporting flag
- EnableMetricReporting bool `version[0]:"false"`
-
- // enable top accounts reporting flag
- EnableTopAccountsReporting bool `version[0]:"false"`
-
- // enable agreement reporting flag. Currently only prints additional period events.
- EnableAgreementReporting bool `version[3]:"false"`
-
- // enable agreement timing metrics flag
- EnableAgreementTimeMetrics bool `version[3]:"false"`
-
- // The path to the node exporter.
- NodeExporterPath string `version[0]:"./node_exporter"`
-
- // The fallback DNS resolver address that would be used if the system resolver would fail to retrieve SRV records
- FallbackDNSResolverAddress string `version[0]:""`
-
- // exponential increase factor of transaction pool's fee threshold, should always be 2 in production
- TxPoolExponentialIncreaseFactor uint64 `version[0]:"2"`
-
- SuggestedFeeBlockHistory int `version[0]:"3"`
-
- // TxPoolSize is the number of transactions that fit in the transaction pool
- TxPoolSize int `version[0]:"50000" version[5]:"15000"`
-
- // number of seconds allowed for syncing transactions
- TxSyncTimeoutSeconds int64 `version[0]:"30"`
-
- // number of seconds between transaction synchronizations
- TxSyncIntervalSeconds int64 `version[0]:"60"`
-
- // the number of incoming message hashes buckets.
- IncomingMessageFilterBucketCount int `version[0]:"5"`
-
- // the size of each incoming message hash bucket.
- IncomingMessageFilterBucketSize int `version[0]:"512"`
-
- // the number of outgoing message hashes buckets.
- OutgoingMessageFilterBucketCount int `version[0]:"3"`
-
- // the size of each outgoing message hash bucket.
- OutgoingMessageFilterBucketSize int `version[0]:"128"`
-
- // enable the filtering of outgoing messages
- EnableOutgoingNetworkMessageFiltering bool `version[0]:"true"`
-
- // enable the filtering of incoming messages
- EnableIncomingMessageFilter bool `version[0]:"false"`
-
- // control enabling / disabling deadlock detection.
- // negative (-1) to disable, positive (1) to enable, 0 for default.
- DeadlockDetection int `version[1]:"0"`
-
- // Prefer to run algod Hosted (under algoh)
- // Observed by `goal` for now.
- RunHosted bool `version[3]:"false"`
-
- // The maximal number of blocks that catchup will fetch in parallel.
- // If less than Protocol.SeedLookback, then Protocol.SeedLookback will be used as to limit the catchup.
- // Setting this variable to 0 would disable the catchup
- CatchupParallelBlocks uint64 `version[3]:"50" version[5]:"16"`
-
- // Generate AssembleBlockMetrics telemetry event
- EnableAssembleStats bool `version[0]:""`
-
- // Generate ProcessBlockMetrics telemetry event
- EnableProcessBlockStats bool `version[0]:""`
-
- // SuggestedFeeSlidingWindowSize is number of past blocks that will be considered in computing the suggested fee
- SuggestedFeeSlidingWindowSize uint32 `version[3]:"50"`
-
- // the max size the sync server would return
- TxSyncServeResponseSize int `version[3]:"1000000"`
-
- // IsIndexerActive indicates whether to activate the indexer for fast retrieval of transactions
- // Note -- Indexer cannot operate on non Archival nodes
- IsIndexerActive bool `version[3]:"false"`
-
- // UseXForwardedForAddress indicates whether or not the node should use the X-Forwarded-For HTTP Header when
- // determining the source of a connection. If used, it should be set to the string "X-Forwarded-For", unless the
- // proxy vendor provides another header field. In the case of CloudFlare proxy, the "CF-Connecting-IP" header
- // field can be used.
- UseXForwardedForAddressField string `version[0]:""`
-
- // ForceRelayMessages indicates whether the network library relay messages even in the case that no NetAddress was specified.
- ForceRelayMessages bool `version[0]:"false"`
-
- // ConnectionsRateLimitingWindowSeconds is being used in conjunction with ConnectionsRateLimitingCount;
- // see ConnectionsRateLimitingCount description for further information. Providing a zero value
- // in this variable disables the connection rate limiting.
- ConnectionsRateLimitingWindowSeconds uint `version[4]:"1"`
-
- // ConnectionsRateLimitingCount is being used along with ConnectionsRateLimitingWindowSeconds to determine if
- // a connection request should be accepted or not. The gossip network examine all the incoming requests in the past
- // ConnectionsRateLimitingWindowSeconds seconds that share the same origin. If the total count exceed the ConnectionsRateLimitingCount
- // value, the connection is refused.
- ConnectionsRateLimitingCount uint `version[4]:"60"`
-
- // EnableRequestLogger enabled the logging of the incoming requests to the telemetry server.
- EnableRequestLogger bool `version[4]:"false"`
-
- // PeerConnectionsUpdateInterval defines the interval at which the peer connections information is being sent to the
- // telemetry ( when enabled ). Defined in seconds.
- PeerConnectionsUpdateInterval int `version[5]:"3600"`
-
- // EnableProfiler enables the go pprof endpoints, should be false if
- // the algod api will be exposed to untrusted individuals
- EnableProfiler bool `version[0]:"false"`
-
- // TelemetryToLog records messages to node.log that are normally sent to remote event monitoring
- TelemetryToLog bool `version[5]:"true"`
-
- // DNSSecurityFlags instructs algod validating DNS responses.
- // Possible fla values
- // 0x00 - disabled
- // 0x01 (dnssecSRV) - validate SRV response
- // 0x02 (dnssecRelayAddr) - validate relays' names to addresses resolution
- // 0x04 (dnssecTelemetryAddr) - validate telemetry and metrics names to addresses resolution
- // ...
- DNSSecurityFlags uint32 `version[6]:"1"`
-
- // EnablePingHandler controls whether the gossip node would respond to ping messages with a pong message.
- EnablePingHandler bool `version[6]:"true"`
-
- // DisableOutgoingConnectionThrottling disables the connection throttling of the network library, which
- // allow the network library to continuesly disconnect relays based on their relative ( and absolute ) performance.
- DisableOutgoingConnectionThrottling bool `version[5]:"false"`
-
- // NetworkProtocolVersion overrides network protocol version ( if present )
- NetworkProtocolVersion string `version[6]:""`
-
- // CatchpointInterval sets the interval at which catchpoint are being generated. Setting this to 0 disables the catchpoint from being generated.
- // See CatchpointTracking for more details.
- CatchpointInterval uint64 `version[7]:"10000"`
-
- // CatchpointFileHistoryLength defines how many catchpoint files we want to store back.
- // 0 means don't store any, -1 mean unlimited and positive number suggest the number of most recent catchpoint files.
- CatchpointFileHistoryLength int `version[7]:"365"`
-
- // EnableLedgerService enables the ledger serving service. The functionality of this depends on NetAddress, which must also be provided.
- // This functionality is required for the catchpoint catchup.
- EnableLedgerService bool `version[7]:"false"`
-
- // EnableBlockService enables the block serving service. The functionality of this depends on NetAddress, which must also be provided.
- // This functionality is required for the catchup.
- EnableBlockService bool `version[7]:"false"`
-
- // EnableGossipBlockService enables the block serving service over the gossip network. The functionality of this depends on NetAddress, which must also be provided.
- // This functionality is required for the relays to perform catchup from nodes.
- EnableGossipBlockService bool `version[8]:"true"`
-
- // CatchupHTTPBlockFetchTimeoutSec controls how long the http query for fetching a block from a relay would take before giving up and trying another relay.
- CatchupHTTPBlockFetchTimeoutSec int `version[9]:"4"`
-
- // CatchupGossipBlockFetchTimeoutSec controls how long the gossip query for fetching a block from a relay would take before giving up and trying another relay.
- CatchupGossipBlockFetchTimeoutSec int `version[9]:"4"`
-
- // CatchupLedgerDownloadRetryAttempts controls the number of attempt the ledger fetching would be attempted before giving up catching up to the provided catchpoint.
- CatchupLedgerDownloadRetryAttempts int `version[9]:"50"`
-
- // CatchupLedgerDownloadRetryAttempts controls the number of attempt the block fetching would be attempted before giving up catching up to the provided catchpoint.
- CatchupBlockDownloadRetryAttempts int `version[9]:"1000"`
-
- // EnableDeveloperAPI enables teal/compile, teal/dryrun API endpoints.
- // This functionality is disabled by default.
- EnableDeveloperAPI bool `version[9]:"false"`
-
- // OptimizeAccountsDatabaseOnStartup controls whether the accounts database would be optimized
- // on algod startup.
- OptimizeAccountsDatabaseOnStartup bool `version[10]:"false"`
-
- // CatchpointTracking determines if catchpoints are going to be tracked. The value is interpreted as follows:
- // A value of -1 means "don't track catchpoints".
- // A value of 1 means "track catchpoints as long as CatchpointInterval is also set to a positive non-zero value". If CatchpointInterval <= 0, no catchpoint tracking would be performed.
- // A value of 0 means automatic, which is the default value. In this mode, a non archival node would not track the catchpoints, and an archival node would track the catchpoints as long as CatchpointInterval > 0.
- // Other values of CatchpointTracking would give a warning in the log file, and would behave as if the default value was provided.
- CatchpointTracking int64 `version[11]:"0"`
-
- // LedgerSynchronousMode defines the synchronous mode used by the ledger database. The supported options are:
- // 0 - SQLite continues without syncing as soon as it has handed data off to the operating system.
- // 1 - SQLite database engine will still sync at the most critical moments, but less often than in FULL mode.
- // 2 - SQLite database engine will use the xSync method of the VFS to ensure that all content is safely written to the disk surface prior to continuing. On Mac OS, the data is additionally syncronized via fullfsync.
- // 3 - In addition to what being done in 2, it provides additional durability if the commit is followed closely by a power loss.
- // for further information see the description of SynchronousMode in dbutil.go
- LedgerSynchronousMode int `version[12]:"2"`
-
- // AccountsRebuildSynchronousMode defines the synchronous mode used by the ledger database while the account database is being rebuilt. This is not a typical operational usecase,
- // and is expected to happen only on either startup ( after enabling the catchpoint interval, or on certain database upgrades ) or during fast catchup. The values specified here
- // and their meanings are identical to the ones in LedgerSynchronousMode.
- AccountsRebuildSynchronousMode int `version[12]:"1"`
-
- // MaxCatchpointDownloadDuration defines the maximum duration a client will be keeping the outgoing connection of a catchpoint download request open for processing before
- // shutting it down. Networks that have large catchpoint files, slow connection or slow storage could be a good reason to increase this value. Note that this is a client-side only
- // configuration value, and it's independent of the actual catchpoint file size.
- MaxCatchpointDownloadDuration time.Duration `version[13]:"7200000000000"`
-
- // MinCatchpointFileDownloadBytesPerSecond defines the minimal download speed that would be considered to be "acceptable" by the catchpoint file fetcher, measured in bytes per seconds. If the
- // provided stream speed drops below this threshold, the connection would be recycled. Note that this field is evaluated per catchpoint "chunk" and not on it's own. If this field is zero,
- // the default of 20480 would be used.
- MinCatchpointFileDownloadBytesPerSecond uint64 `version[13]:"20480"`
-
- // TraceServer is a host:port to report graph propagation trace info to.
- NetworkMessageTraceServer string `version[13]:""`
-
- // VerifiedTranscationsCacheSize defines the number of transactions that the verified transactions cache would hold before cycling the cache storage in a round-robin fashion.
- VerifiedTranscationsCacheSize int `version[14]:"30000"`
-
- // EnableCatchupFromArchiveServers controls which peers the catchup service would use in order to catchup.
- // When enabled, the catchup service would use the archive servers before falling back to the relays.
- // On networks that doesn't have archive servers, this becomes a no-op, as the catchup service would have no
- // archive server to pick from, and therefore automatically selects one of the relay nodes.
- EnableCatchupFromArchiveServers bool `version[15]:"false"`
-
- // DisableLocalhostConnectionRateLimit controls whether the incoming connection rate limit would apply for
- // connections that are originating from the local machine. Setting this to "true", allow to create large
- // local-machine networks that won't trip the incoming connection limit observed by relays.
- DisableLocalhostConnectionRateLimit bool `version[16]:"true"`
-
- // BlockServiceCustomFallbackEndpoints is a comma delimited list of endpoints which the block service uses to
- // redirect the http requests to in case it does not have the round. If it is not specified, will check
- // EnableBlockServiceFallbackToArchiver.
- BlockServiceCustomFallbackEndpoints string `version[16]:""`
-
- // EnableBlockServiceFallbackToArchiver controls whether the block service redirects the http requests to
- // an archiver or return StatusNotFound (404) when in does not have the requested round, and
- // BlockServiceCustomFallbackEndpoints is empty.
- // The archiver is randomly selected, if none is available, will return StatusNotFound (404).
- EnableBlockServiceFallbackToArchiver bool `version[16]:"true"`
-
- // CatchupBlockValidateMode is a development and testing configuration used by the catchup service.
- // It can be used to omit certain validations to speed up the catchup process, or to apply extra validations which are redundant in normal operation.
- // This field is a bit-field with:
- // bit 0: (default 0) 0: verify the block certificate; 1: skip this validation
- // bit 1: (default 0) 0: verify payset committed hash in block header matches payset hash; 1: skip this validation
- // bit 2: (default 0) 0: don't verify the transaction signatures on the block are valid; 1: verify the transaction signatures on block
- // bit 3: (default 0) 0: don't verify that the hash of the recomputed payset matches the hash of the payset committed in the block header; 1: do perform the above verification
- // Note: not all permutations of the above bitset are currently functional. In particular, the ones that are functional are:
- // 0 : default behavior.
- // 3 : speed up catchup by skipping necessary validations
- // 12 : perform all validation methods (normal and additional). These extra tests helps to verify the integrity of the compiled executable against
- // previously used executabled, and would not provide any additional security guarantees.
- CatchupBlockValidateMode int `version[16]:"0"`
-
- // Generate AccountUpdates telemetry event
- EnableAccountUpdatesStats bool `version[16]:"false"`
-
- // Time interval in nanoseconds for generating accountUpdates telemetry event
- AccountUpdatesStatsInterval time.Duration `version[16]:"5000000000"`
-
- // ParticipationKeysRefreshInterval is the duration between two consecutive checks to see if new participation
- // keys have been placed on the genesis directory.
- ParticipationKeysRefreshInterval time.Duration `version[16]:"60000000000"`
-
- // DisableNetworking disables all the incoming and outgoing communication a node would perform. This is useful
- // when we have a single-node private network, where there is no other nodes that need to be communicated with.
- // features like catchpoint catchup would be rendered completly non-operational, and many of the node inner
- // working would be completly dis-functional.
- DisableNetworking bool `version[16]:"false"`
-
- // ForceFetchTransactions allows to explicitly configure a node to retrieve all the transactions
- // into it's transaction pool, even if those would not be required as the node doesn't
- // participate in the consensus or used to relay transactions.
- ForceFetchTransactions bool `version[17]:"false"`
-
- // EnableVerbosedTransactionSyncLogging enables the transaction sync to write extensive
- // message exchange information to the log file. This option is disabled by default,
- // so that the log files would not grow too rapidly.
- EnableVerbosedTransactionSyncLogging bool `version[17]:"false"`
-
- // TransactionSyncDataExchangeRate overrides the auto-calculated data exchange rate between each
- // two peers. The unit of the data exchange rate is in bytes per second. Setting the value to
- // zero implies allowing the transaction sync to dynamically calculate the value.
- TransactionSyncDataExchangeRate uint64 `version[17]:"0"`
-
- // TransactionSyncSignificantMessageThreshold define the threshold used for a transaction sync
- // message before it can be used for calculating the data exchange rate. Setting this to zero
- // would use the default values. The threshold is defined in units of bytes.
- TransactionSyncSignificantMessageThreshold uint64 `version[17]:"0"`
-}
-
// Filenames of config files within the configdir (e.g. ~/.algorand)
// ConfigFilename is the name of the config.json file where we store per-algod-instance settings
@@ -460,6 +65,10 @@ const CrashFilename = "crash.sqlite"
// It is used to track in-progress compact certificates.
const CompactCertFilename = "compactcert.sqlite"
+// ParticipationRegistryFilename is the name of the participation registry database file.
+// It is used for tracking participation key metadata.
+const ParticipationRegistryFilename = "partregistry.sqlite"
+
// ConfigurableConsensusProtocolsFilename defines a set of consensus prototocols that
// are to be loaded from the data directory ( if present ), to override the
// built-in supported consensus protocols.
@@ -522,48 +131,6 @@ func loadConfig(reader io.Reader, config *Local) error {
return dec.Decode(config)
}
-// DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers
-func (cfg Local) DNSBootstrapArray(networkID protocol.NetworkID) (bootstrapArray []string) {
- dnsBootstrapString := cfg.DNSBootstrap(networkID)
- bootstrapArray = strings.Split(dnsBootstrapString, ";")
- // omit zero length entries from the result set.
- for i := len(bootstrapArray) - 1; i >= 0; i-- {
- if len(bootstrapArray[i]) == 0 {
- bootstrapArray = append(bootstrapArray[:i], bootstrapArray[i+1:]...)
- }
- }
- return
-}
-
-// DNSBootstrap returns the network-specific DNSBootstrap identifier
-func (cfg Local) DNSBootstrap(network protocol.NetworkID) string {
- // if user hasn't modified the default DNSBootstrapID in the configuration
- // file and we're targeting a devnet ( via genesis file ), we the
- // explicit devnet network bootstrap.
- if defaultLocal.DNSBootstrapID == cfg.DNSBootstrapID {
- if network == Devnet {
- return "devnet.algodev.network"
- } else if network == Betanet {
- return "betanet.algodev.network"
- }
- }
- return strings.Replace(cfg.DNSBootstrapID, "<network>", string(network), -1)
-}
-
-// SaveToDisk writes the Local settings into a root/ConfigFilename file
-func (cfg Local) SaveToDisk(root string) error {
- configpath := filepath.Join(root, ConfigFilename)
- filename := os.ExpandEnv(configpath)
- return cfg.SaveToFile(filename)
-}
-
-// SaveToFile saves the config to a specific filename, allowing overriding the default name
-func (cfg Local) SaveToFile(filename string) error {
- var alwaysInclude []string
- alwaysInclude = append(alwaysInclude, "Version")
- return codecs.SaveNonDefaultValuesToFile(filename, cfg, defaultLocal, alwaysInclude, true)
-}
-
type phonebookBlackWhiteList struct {
Include []string
}
@@ -670,47 +237,9 @@ const (
dnssecTelemetryAddr
)
-// DNSSecuritySRVEnforced returns true if SRV response verification enforced
-func (cfg Local) DNSSecuritySRVEnforced() bool {
- return cfg.DNSSecurityFlags&dnssecSRV != 0
-}
-
-// DNSSecurityRelayAddrEnforced returns true if relay name to ip addr resolution enforced
-func (cfg Local) DNSSecurityRelayAddrEnforced() bool {
- return cfg.DNSSecurityFlags&dnssecRelayAddr != 0
-}
-
-// DNSSecurityTelemeryAddrEnforced returns true if relay name to ip addr resolution enforced
-func (cfg Local) DNSSecurityTelemeryAddrEnforced() bool {
- return cfg.DNSSecurityFlags&dnssecTelemetryAddr != 0
-}
-
-// ProposalAssemblyTime is the max amount of time to spend on generating a proposal block. This should eventually have it's own configurable value.
-const ProposalAssemblyTime time.Duration = 250 * time.Millisecond
-
const (
catchupValidationModeCertificate = 1
catchupValidationModePaysetHash = 2
catchupValidationModeVerifyTransactionSignatures = 4
catchupValidationModeVerifyApplyData = 8
)
-
-// CatchupVerifyCertificate returns true if certificate verification is needed
-func (cfg Local) CatchupVerifyCertificate() bool {
- return cfg.CatchupBlockValidateMode&catchupValidationModeCertificate == 0
-}
-
-// CatchupVerifyPaysetHash returns true if payset hash verification is needed
-func (cfg Local) CatchupVerifyPaysetHash() bool {
- return cfg.CatchupBlockValidateMode&catchupValidationModePaysetHash == 0
-}
-
-// CatchupVerifyTransactionSignatures returns true if transactions signature verification is needed
-func (cfg Local) CatchupVerifyTransactionSignatures() bool {
- return cfg.CatchupBlockValidateMode&catchupValidationModeVerifyTransactionSignatures != 0
-}
-
-// CatchupVerifyApplyData returns true if verifying the ApplyData of the payset needed
-func (cfg Local) CatchupVerifyApplyData() bool {
- return cfg.CatchupBlockValidateMode&catchupValidationModeVerifyApplyData != 0
-}
diff --git a/config/consensus.go b/config/consensus.go
index 99cd1884d..f0ffa1a51 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -391,6 +391,10 @@ type ConsensusParams struct {
EnableKeyregCoherencyCheck bool
EnableExtraPagesOnAppUpdate bool
+
+ // MaxProposedExpiredOnlineAccounts is the maximum number of online accounts, which need
+ // to be taken offline, that would be proposed to be taken offline.
+ MaxProposedExpiredOnlineAccounts int
}
// PaysetCommitType enumerates possible ways for the block header to commit to
@@ -465,6 +469,10 @@ var MaxExtraAppProgramLen int
//supported supported by any of the consensus protocols. used for decoding purposes.
var MaxAvailableAppProgramLen int
+// MaxProposedExpiredOnlineAccounts is the maximum number of online accounts, which need
+// to be taken offline, that would be proposed to be taken offline.
+var MaxProposedExpiredOnlineAccounts int
+
func checkSetMax(value int, curMax *int) {
if value > *curMax {
*curMax = value
@@ -501,6 +509,7 @@ func checkSetAllocBounds(p ConsensusParams) {
// Its value is much larger than any possible reasonable MaxLogCalls value in future
checkSetMax(p.MaxAppProgramLen, &MaxLogCalls)
checkSetMax(p.MaxInnerTransactions, &MaxInnerTransactions)
+ checkSetMax(p.MaxProposedExpiredOnlineAccounts, &MaxProposedExpiredOnlineAccounts)
}
// SaveConfigurableConsensus saves the configurable protocols file to the provided data directory.
@@ -1045,6 +1054,8 @@ func initConsensusProtocols() {
// Enable TEAL 6 / AVM 1.1
vFuture.LogicSigVersion = 6
+ vFuture.MaxProposedExpiredOnlineAccounts = 32
+
Consensus[protocol.ConsensusFuture] = vFuture
}
diff --git a/config/defaultsGenerator/defaultsGenerator.go b/config/defaultsGenerator/defaultsGenerator.go
index df5f4e14f..70a8e7b13 100644
--- a/config/defaultsGenerator/defaultsGenerator.go
+++ b/config/defaultsGenerator/defaultsGenerator.go
@@ -39,7 +39,7 @@ var jsonExampleFileName = flag.String("j", "", "Name of the json example file")
var autoGenHeader = `
// This file was auto generated by ./config/defaultsGenerator/defaultsGenerator.go, and SHOULD NOT BE MODIFIED in any way
-// If you want to make changes to this file, make the corresponding changes to Local in config.go and run "go generate".
+// If you want to make changes to this file, make the corresponding changes to Local in localTemplate.go and run "go generate".
`
// printExit prints the given formatted string ( i.e. just like fmt.Printf ), with the defaultGenerator executable program name
diff --git a/config/localTemplate.go b/config/localTemplate.go
new file mode 100644
index 000000000..20a141e8c
--- /dev/null
+++ b/config/localTemplate.go
@@ -0,0 +1,500 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package config
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/codecs"
+)
+
+// Local holds the per-node-instance configuration settings for the protocol.
+// !!! WARNING !!!
+//
+// These versioned struct tags need to be maintained CAREFULLY and treated
+// like UNIVERSAL CONSTANTS - they should not be modified once committed.
+//
+// New fields may be added to the Local struct, along with a version tag
+// denoting a new version. When doing so, also update the
+// test/testdata/configs/config-v{n}.json and call "make generate" to regenerate the constants.
+//
+// !!! WARNING !!!
+type Local struct {
+ // Version tracks the current version of the defaults so we can migrate old -> new
+ // This is specifically important whenever we decide to change the default value
+ // for an existing parameter. This field tag must be updated any time we add a new version.
+ Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19"`
+
+ // environmental (may be overridden)
+ // When enabled, stores blocks indefinitally, otherwise, only the most recents blocks
+ // are being kept around. ( the precise number of recent blocks depends on the consensus parameters )
+ Archival bool `version[0]:"false"`
+
+ // gossipNode.go
+ // how many peers to propagate to?
+ GossipFanout int `version[0]:"4"`
+ NetAddress string `version[0]:""`
+
+ // 1 * time.Minute = 60000000000 ns
+ ReconnectTime time.Duration `version[0]:"60" version[1]:"60000000000"`
+
+ // what we should tell people to connect to
+ PublicAddress string `version[0]:""`
+
+ MaxConnectionsPerIP int `version[3]:"30"`
+
+ // 0 == disable
+ PeerPingPeriodSeconds int `version[0]:"0"`
+
+ // for https serving
+ TLSCertFile string `version[0]:""`
+ TLSKeyFile string `version[0]:""`
+
+ // Logging
+ BaseLoggerDebugLevel uint32 `version[0]:"1" version[1]:"4"`
+ // if this is 0, do not produce agreement.cadaver
+ CadaverSizeTarget uint64 `version[0]:"1073741824"`
+
+ // IncomingConnectionsLimit specifies the max number of long-lived incoming
+ // connections. 0 means no connections allowed. -1 is unbounded.
+ // Estimating 5MB per incoming connection, 5MB*800 = 4GB
+ IncomingConnectionsLimit int `version[0]:"-1" version[1]:"10000" version[17]:"800"`
+
+ // BroadcastConnectionsLimit specifies the number of connections that
+ // will receive broadcast (gossip) messages from this node. If the
+ // node has more connections than this number, it will send broadcasts
+ // to the top connections by priority (outgoing connections first, then
+ // by money held by peers based on their participation key). 0 means
+ // no outgoing messages (not even transaction broadcasting to outgoing
+ // peers). -1 means unbounded (default).
+ BroadcastConnectionsLimit int `version[4]:"-1"`
+
+ // AnnounceParticipationKey specifies that this node should announce its
+ // participation key (with the largest stake) to its gossip peers. This
+ // allows peers to prioritize our connection, if necessary, in case of a
+ // DoS attack. Disabling this means that the peers will not have any
+ // additional information to allow them to prioritize our connection.
+ AnnounceParticipationKey bool `version[4]:"true"`
+
+ // PriorityPeers specifies peer IP addresses that should always get
+ // outgoing broadcast messages from this node.
+ PriorityPeers map[string]bool `version[4]:""`
+
+ // To make sure the algod process does not run out of FDs, algod ensures
+ // that RLIMIT_NOFILE exceeds the max number of incoming connections (i.e.,
+ // IncomingConnectionsLimit) by at least ReservedFDs. ReservedFDs are meant
+ // to leave room for short-lived FDs like DNS queries, SQLite files, etc.
+ ReservedFDs uint64 `version[2]:"256"`
+
+ // local server
+ // API endpoint address
+ EndpointAddress string `version[0]:"127.0.0.1:0"`
+
+ // timeouts passed to the rest http.Server implementation
+ RestReadTimeoutSeconds int `version[4]:"15"`
+ RestWriteTimeoutSeconds int `version[4]:"120"`
+
+ // SRV-based phonebook
+ DNSBootstrapID string `version[0]:"<network>.algorand.network"`
+
+ // Log file size limit in bytes
+ LogSizeLimit uint64 `version[0]:"1073741824"`
+
+ // text/template for creating log archive filename.
+ // Available template vars:
+ // Time at start of log: {{.Year}} {{.Month}} {{.Day}} {{.Hour}} {{.Minute}} {{.Second}}
+ // Time at end of log: {{.EndYear}} {{.EndMonth}} {{.EndDay}} {{.EndHour}} {{.EndMinute}} {{.EndSecond}}
+ //
+ // If the filename ends with .gz or .bz2 it will be compressed.
+ //
+ // default: "node.archive.log" (no rotation, clobbers previous archive)
+ LogArchiveName string `version[4]:"node.archive.log"`
+
+ // LogArchiveMaxAge will be parsed by time.ParseDuration().
+ // Valid units are 's' seconds, 'm' minutes, 'h' hours
+ LogArchiveMaxAge string `version[4]:""`
+
+ // number of consecutive attempts to catchup after which we replace the peers we're connected to
+ CatchupFailurePeerRefreshRate int `version[0]:"10"`
+
+ // where should the node exporter listen for metrics
+ NodeExporterListenAddress string `version[0]:":9100"`
+
+ // enable metric reporting flag
+ EnableMetricReporting bool `version[0]:"false"`
+
+ // enable top accounts reporting flag
+ EnableTopAccountsReporting bool `version[0]:"false"`
+
+ // enable agreement reporting flag. Currently only prints additional period events.
+ EnableAgreementReporting bool `version[3]:"false"`
+
+ // enable agreement timing metrics flag
+ EnableAgreementTimeMetrics bool `version[3]:"false"`
+
+ // The path to the node exporter.
+ NodeExporterPath string `version[0]:"./node_exporter"`
+
+ // The fallback DNS resolver address that would be used if the system resolver would fail to retrieve SRV records
+ FallbackDNSResolverAddress string `version[0]:""`
+
+ // exponential increase factor of transaction pool's fee threshold, should always be 2 in production
+ TxPoolExponentialIncreaseFactor uint64 `version[0]:"2"`
+
+ SuggestedFeeBlockHistory int `version[0]:"3"`
+
+ // TxPoolSize is the number of transactions that fit in the transaction pool
+ TxPoolSize int `version[0]:"50000" version[5]:"15000"`
+
+ // number of seconds allowed for syncing transactions
+ TxSyncTimeoutSeconds int64 `version[0]:"30"`
+
+ // number of seconds between transaction synchronizations
+ TxSyncIntervalSeconds int64 `version[0]:"60"`
+
+ // the number of incoming message hashes buckets.
+ IncomingMessageFilterBucketCount int `version[0]:"5"`
+
+ // the size of each incoming message hash bucket.
+ IncomingMessageFilterBucketSize int `version[0]:"512"`
+
+ // the number of outgoing message hashes buckets.
+ OutgoingMessageFilterBucketCount int `version[0]:"3"`
+
+ // the size of each outgoing message hash bucket.
+ OutgoingMessageFilterBucketSize int `version[0]:"128"`
+
+ // enable the filtering of outgoing messages
+ EnableOutgoingNetworkMessageFiltering bool `version[0]:"true"`
+
+ // enable the filtering of incoming messages
+ EnableIncomingMessageFilter bool `version[0]:"false"`
+
+ // control enabling / disabling deadlock detection.
+ // negative (-1) to disable, positive (1) to enable, 0 for default.
+ DeadlockDetection int `version[1]:"0"`
+
+ // Prefer to run algod Hosted (under algoh)
+ // Observed by `goal` for now.
+ RunHosted bool `version[3]:"false"`
+
+ // The maximal number of blocks that catchup will fetch in parallel.
+ // If less than Protocol.SeedLookback, then Protocol.SeedLookback will be used as to limit the catchup.
+ // Setting this variable to 0 would disable the catchup
+ CatchupParallelBlocks uint64 `version[3]:"50" version[5]:"16"`
+
+ // Generate AssembleBlockMetrics telemetry event
+ EnableAssembleStats bool `version[0]:""`
+
+ // Generate ProcessBlockMetrics telemetry event
+ EnableProcessBlockStats bool `version[0]:""`
+
+ // SuggestedFeeSlidingWindowSize is number of past blocks that will be considered in computing the suggested fee
+ SuggestedFeeSlidingWindowSize uint32 `version[3]:"50"`
+
+ // the max size the sync server would return
+ TxSyncServeResponseSize int `version[3]:"1000000"`
+
+ // IsIndexerActive indicates whether to activate the indexer for fast retrieval of transactions
+ // Note -- Indexer cannot operate on non Archival nodes
+ IsIndexerActive bool `version[3]:"false"`
+
+ // UseXForwardedForAddress indicates whether or not the node should use the X-Forwarded-For HTTP Header when
+ // determining the source of a connection. If used, it should be set to the string "X-Forwarded-For", unless the
+ // proxy vendor provides another header field. In the case of CloudFlare proxy, the "CF-Connecting-IP" header
+ // field can be used.
+ UseXForwardedForAddressField string `version[0]:""`
+
+ // ForceRelayMessages indicates whether the network library relay messages even in the case that no NetAddress was specified.
+ ForceRelayMessages bool `version[0]:"false"`
+
+ // ConnectionsRateLimitingWindowSeconds is being used in conjunction with ConnectionsRateLimitingCount;
+ // see ConnectionsRateLimitingCount description for further information. Providing a zero value
+ // in this variable disables the connection rate limiting.
+ ConnectionsRateLimitingWindowSeconds uint `version[4]:"1"`
+
+ // ConnectionsRateLimitingCount is being used along with ConnectionsRateLimitingWindowSeconds to determine if
+ // a connection request should be accepted or not. The gossip network examine all the incoming requests in the past
+ // ConnectionsRateLimitingWindowSeconds seconds that share the same origin. If the total count exceed the ConnectionsRateLimitingCount
+ // value, the connection is refused.
+ ConnectionsRateLimitingCount uint `version[4]:"60"`
+
+ // EnableRequestLogger enabled the logging of the incoming requests to the telemetry server.
+ EnableRequestLogger bool `version[4]:"false"`
+
+ // PeerConnectionsUpdateInterval defines the interval at which the peer connections information is being sent to the
+ // telemetry ( when enabled ). Defined in seconds.
+ PeerConnectionsUpdateInterval int `version[5]:"3600"`
+
+ // EnableProfiler enables the go pprof endpoints, should be false if
+ // the algod api will be exposed to untrusted individuals
+ EnableProfiler bool `version[0]:"false"`
+
+ // TelemetryToLog records messages to node.log that are normally sent to remote event monitoring
+ TelemetryToLog bool `version[5]:"true"`
+
+ // DNSSecurityFlags instructs algod validating DNS responses.
+ // Possible fla values
+ // 0x00 - disabled
+ // 0x01 (dnssecSRV) - validate SRV response
+ // 0x02 (dnssecRelayAddr) - validate relays' names to addresses resolution
+ // 0x04 (dnssecTelemetryAddr) - validate telemetry and metrics names to addresses resolution
+ // ...
+ DNSSecurityFlags uint32 `version[6]:"1"`
+
+ // EnablePingHandler controls whether the gossip node would respond to ping messages with a pong message.
+ EnablePingHandler bool `version[6]:"true"`
+
+ // DisableOutgoingConnectionThrottling disables the connection throttling of the network library, which
+ // allow the network library to continuesly disconnect relays based on their relative ( and absolute ) performance.
+ DisableOutgoingConnectionThrottling bool `version[5]:"false"`
+
+ // NetworkProtocolVersion overrides network protocol version ( if present )
+ NetworkProtocolVersion string `version[6]:""`
+
+ // CatchpointInterval sets the interval at which catchpoint are being generated. Setting this to 0 disables the catchpoint from being generated.
+ // See CatchpointTracking for more details.
+ CatchpointInterval uint64 `version[7]:"10000"`
+
+ // CatchpointFileHistoryLength defines how many catchpoint files we want to store back.
+ // 0 means don't store any, -1 mean unlimited and positive number suggest the number of most recent catchpoint files.
+ CatchpointFileHistoryLength int `version[7]:"365"`
+
+ // EnableLedgerService enables the ledger serving service. The functionality of this depends on NetAddress, which must also be provided.
+ // This functionality is required for the catchpoint catchup.
+ EnableLedgerService bool `version[7]:"false"`
+
+ // EnableBlockService enables the block serving service. The functionality of this depends on NetAddress, which must also be provided.
+ // This functionality is required for the catchup.
+ EnableBlockService bool `version[7]:"false"`
+
+ // EnableGossipBlockService enables the block serving service over the gossip network. The functionality of this depends on NetAddress, which must also be provided.
+ // This functionality is required for the relays to perform catchup from nodes.
+ EnableGossipBlockService bool `version[8]:"true"`
+
+ // CatchupHTTPBlockFetchTimeoutSec controls how long the http query for fetching a block from a relay would take before giving up and trying another relay.
+ CatchupHTTPBlockFetchTimeoutSec int `version[9]:"4"`
+
+ // CatchupGossipBlockFetchTimeoutSec controls how long the gossip query for fetching a block from a relay would take before giving up and trying another relay.
+ CatchupGossipBlockFetchTimeoutSec int `version[9]:"4"`
+
+ // CatchupLedgerDownloadRetryAttempts controls the number of attempt the ledger fetching would be attempted before giving up catching up to the provided catchpoint.
+ CatchupLedgerDownloadRetryAttempts int `version[9]:"50"`
+
+ // CatchupLedgerDownloadRetryAttempts controls the number of attempt the block fetching would be attempted before giving up catching up to the provided catchpoint.
+ CatchupBlockDownloadRetryAttempts int `version[9]:"1000"`
+
+ // EnableDeveloperAPI enables teal/compile, teal/dryrun API endpoints.
+ // This functionality is disabled by default.
+ EnableDeveloperAPI bool `version[9]:"false"`
+
+ // OptimizeAccountsDatabaseOnStartup controls whether the accounts database would be optimized
+ // on algod startup.
+ OptimizeAccountsDatabaseOnStartup bool `version[10]:"false"`
+
+ // CatchpointTracking determines if catchpoints are going to be tracked. The value is interpreted as follows:
+ // A value of -1 means "don't track catchpoints".
+ // A value of 1 means "track catchpoints as long as CatchpointInterval is also set to a positive non-zero value". If CatchpointInterval <= 0, no catchpoint tracking would be performed.
+ // A value of 0 means automatic, which is the default value. In this mode, a non archival node would not track the catchpoints, and an archival node would track the catchpoints as long as CatchpointInterval > 0.
+ // Other values of CatchpointTracking would give a warning in the log file, and would behave as if the default value was provided.
+ CatchpointTracking int64 `version[11]:"0"`
+
+ // LedgerSynchronousMode defines the synchronous mode used by the ledger database. The supported options are:
+ // 0 - SQLite continues without syncing as soon as it has handed data off to the operating system.
+ // 1 - SQLite database engine will still sync at the most critical moments, but less often than in FULL mode.
+ // 2 - SQLite database engine will use the xSync method of the VFS to ensure that all content is safely written to the disk surface prior to continuing. On Mac OS, the data is additionally syncronized via fullfsync.
+ // 3 - In addition to what being done in 2, it provides additional durability if the commit is followed closely by a power loss.
+ // for further information see the description of SynchronousMode in dbutil.go
+ LedgerSynchronousMode int `version[12]:"2"`
+
+ // AccountsRebuildSynchronousMode defines the synchronous mode used by the ledger database while the account database is being rebuilt. This is not a typical operational usecase,
+ // and is expected to happen only on either startup ( after enabling the catchpoint interval, or on certain database upgrades ) or during fast catchup. The values specified here
+ // and their meanings are identical to the ones in LedgerSynchronousMode.
+ AccountsRebuildSynchronousMode int `version[12]:"1"`
+
+ // MaxCatchpointDownloadDuration defines the maximum duration a client will be keeping the outgoing connection of a catchpoint download request open for processing before
+ // shutting it down. Networks that have large catchpoint files, slow connection or slow storage could be a good reason to increase this value. Note that this is a client-side only
+ // configuration value, and it's independent of the actual catchpoint file size.
+ MaxCatchpointDownloadDuration time.Duration `version[13]:"7200000000000"`
+
+ // MinCatchpointFileDownloadBytesPerSecond defines the minimal download speed that would be considered to be "acceptable" by the catchpoint file fetcher, measured in bytes per seconds. If the
+ // provided stream speed drops below this threshold, the connection would be recycled. Note that this field is evaluated per catchpoint "chunk" and not on it's own. If this field is zero,
+ // the default of 20480 would be used.
+ MinCatchpointFileDownloadBytesPerSecond uint64 `version[13]:"20480"`
+
+ // TraceServer is a host:port to report graph propagation trace info to.
+ NetworkMessageTraceServer string `version[13]:""`
+
+ // VerifiedTranscationsCacheSize defines the number of transactions that the verified transactions cache would hold before cycling the cache storage in a round-robin fashion.
+ VerifiedTranscationsCacheSize int `version[14]:"30000"`
+
+ // EnableCatchupFromArchiveServers controls which peers the catchup service would use in order to catchup.
+ // When enabled, the catchup service would use the archive servers before falling back to the relays.
+ // On networks that doesn't have archive servers, this becomes a no-op, as the catchup service would have no
+ // archive server to pick from, and therefore automatically selects one of the relay nodes.
+ EnableCatchupFromArchiveServers bool `version[15]:"false"`
+
+ // DisableLocalhostConnectionRateLimit controls whether the incoming connection rate limit would apply for
+ // connections that are originating from the local machine. Setting this to "true", allow to create large
+ // local-machine networks that won't trip the incoming connection limit observed by relays.
+ DisableLocalhostConnectionRateLimit bool `version[16]:"true"`
+
+ // BlockServiceCustomFallbackEndpoints is a comma delimited list of endpoints which the block service uses to
+ // redirect the http requests to in case it does not have the round. If it is not specified, will check
+ // EnableBlockServiceFallbackToArchiver.
+ BlockServiceCustomFallbackEndpoints string `version[16]:""`
+
+ // EnableBlockServiceFallbackToArchiver controls whether the block service redirects the http requests to
+ // an archiver or return StatusNotFound (404) when in does not have the requested round, and
+ // BlockServiceCustomFallbackEndpoints is empty.
+ // The archiver is randomly selected, if none is available, will return StatusNotFound (404).
+ EnableBlockServiceFallbackToArchiver bool `version[16]:"true"`
+
+ // CatchupBlockValidateMode is a development and testing configuration used by the catchup service.
+ // It can be used to omit certain validations to speed up the catchup process, or to apply extra validations which are redundant in normal operation.
+ // This field is a bit-field with:
+ // bit 0: (default 0) 0: verify the block certificate; 1: skip this validation
+ // bit 1: (default 0) 0: verify payset committed hash in block header matches payset hash; 1: skip this validation
+ // bit 2: (default 0) 0: don't verify the transaction signatures on the block are valid; 1: verify the transaction signatures on block
+ // bit 3: (default 0) 0: don't verify that the hash of the recomputed payset matches the hash of the payset committed in the block header; 1: do perform the above verification
+ // Note: not all permutations of the above bitset are currently functional. In particular, the ones that are functional are:
+ // 0 : default behavior.
+ // 3 : speed up catchup by skipping necessary validations
+ // 12 : perform all validation methods (normal and additional). These extra tests helps to verify the integrity of the compiled executable against
+ // previously used executabled, and would not provide any additional security guarantees.
+ CatchupBlockValidateMode int `version[16]:"0"`
+
+ // Generate AccountUpdates telemetry event
+ EnableAccountUpdatesStats bool `version[16]:"false"`
+
+ // Time interval in nanoseconds for generating accountUpdates telemetry event
+ AccountUpdatesStatsInterval time.Duration `version[16]:"5000000000"`
+
+ // ParticipationKeysRefreshInterval is the duration between two consecutive checks to see if new participation
+ // keys have been placed on the genesis directory.
+ ParticipationKeysRefreshInterval time.Duration `version[16]:"60000000000"`
+
+ // DisableNetworking disables all the incoming and outgoing communication a node would perform. This is useful
+ // when we have a single-node private network, where there is no other nodes that need to be communicated with.
+ // features like catchpoint catchup would be rendered completly non-operational, and many of the node inner
+ // working would be completly dis-functional.
+ DisableNetworking bool `version[16]:"false"`
+
+ // ForceFetchTransactions allows to explicitly configure a node to retrieve all the transactions
+ // into it's transaction pool, even if those would not be required as the node doesn't
+ // participate in the consensus or used to relay transactions.
+ ForceFetchTransactions bool `version[17]:"false"`
+
+ // EnableVerbosedTransactionSyncLogging enables the transaction sync to write extensive
+ // message exchange information to the log file. This option is disabled by default,
+ // so that the log files would not grow too rapidly.
+ EnableVerbosedTransactionSyncLogging bool `version[17]:"false"`
+
+ // TransactionSyncDataExchangeRate overrides the auto-calculated data exchange rate between each
+ // two peers. The unit of the data exchange rate is in bytes per second. Setting the value to
+ // zero implies allowing the transaction sync to dynamically calculate the value.
+ TransactionSyncDataExchangeRate uint64 `version[17]:"0"`
+
+ // TransactionSyncSignificantMessageThreshold define the threshold used for a transaction sync
+ // message before it can be used for calculating the data exchange rate. Setting this to zero
+ // would use the default values. The threshold is defined in units of bytes.
+ TransactionSyncSignificantMessageThreshold uint64 `version[17]:"0"`
+
+ // ProposalAssemblyTime is the max amount of time to spend on generating a proposal block.
+ ProposalAssemblyTime time.Duration `version[19]:"250000000"`
+}
+
+// DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers
+func (cfg Local) DNSBootstrapArray(networkID protocol.NetworkID) (bootstrapArray []string) {
+ dnsBootstrapString := cfg.DNSBootstrap(networkID)
+ bootstrapArray = strings.Split(dnsBootstrapString, ";")
+ // omit zero length entries from the result set.
+ for i := len(bootstrapArray) - 1; i >= 0; i-- {
+ if len(bootstrapArray[i]) == 0 {
+ bootstrapArray = append(bootstrapArray[:i], bootstrapArray[i+1:]...)
+ }
+ }
+ return
+}
+
+// DNSBootstrap returns the network-specific DNSBootstrap identifier
+func (cfg Local) DNSBootstrap(network protocol.NetworkID) string {
+ // if user hasn't modified the default DNSBootstrapID in the configuration
+ // file and we're targeting a devnet ( via genesis file ), we the
+ // explicit devnet network bootstrap.
+ if defaultLocal.DNSBootstrapID == cfg.DNSBootstrapID {
+ if network == Devnet {
+ return "devnet.algodev.network"
+ } else if network == Betanet {
+ return "betanet.algodev.network"
+ }
+ }
+ return strings.Replace(cfg.DNSBootstrapID, "<network>", string(network), -1)
+}
+
+// SaveToDisk writes the Local settings into a root/ConfigFilename file
+func (cfg Local) SaveToDisk(root string) error {
+ configpath := filepath.Join(root, ConfigFilename)
+ filename := os.ExpandEnv(configpath)
+ return cfg.SaveToFile(filename)
+}
+
+// SaveToFile saves the config to a specific filename, allowing overriding the default name
+func (cfg Local) SaveToFile(filename string) error {
+ var alwaysInclude []string
+ alwaysInclude = append(alwaysInclude, "Version")
+ return codecs.SaveNonDefaultValuesToFile(filename, cfg, defaultLocal, alwaysInclude, true)
+}
+
+// DNSSecuritySRVEnforced returns true if SRV response verification enforced
+func (cfg Local) DNSSecuritySRVEnforced() bool {
+ return cfg.DNSSecurityFlags&dnssecSRV != 0
+}
+
+// DNSSecurityRelayAddrEnforced returns true if relay name to ip addr resolution enforced
+func (cfg Local) DNSSecurityRelayAddrEnforced() bool {
+ return cfg.DNSSecurityFlags&dnssecRelayAddr != 0
+}
+
+// DNSSecurityTelemeryAddrEnforced returns true if relay name to ip addr resolution enforced
+func (cfg Local) DNSSecurityTelemeryAddrEnforced() bool {
+ return cfg.DNSSecurityFlags&dnssecTelemetryAddr != 0
+}
+
+// CatchupVerifyCertificate returns true if certificate verification is needed
+func (cfg Local) CatchupVerifyCertificate() bool {
+ return cfg.CatchupBlockValidateMode&catchupValidationModeCertificate == 0
+}
+
+// CatchupVerifyPaysetHash returns true if payset hash verification is needed
+func (cfg Local) CatchupVerifyPaysetHash() bool {
+ return cfg.CatchupBlockValidateMode&catchupValidationModePaysetHash == 0
+}
+
+// CatchupVerifyTransactionSignatures returns true if transactions signature verification is needed
+func (cfg Local) CatchupVerifyTransactionSignatures() bool {
+ return cfg.CatchupBlockValidateMode&catchupValidationModeVerifyTransactionSignatures != 0
+}
+
+// CatchupVerifyApplyData returns true if verifying the ApplyData of the payset needed
+func (cfg Local) CatchupVerifyApplyData() bool {
+ return cfg.CatchupBlockValidateMode&catchupValidationModeVerifyApplyData != 0
+}
diff --git a/config/local_defaults.go b/config/local_defaults.go
index de8090339..ae1aa7043 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -15,12 +15,12 @@
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
// This file was auto generated by ./config/defaultsGenerator/defaultsGenerator.go, and SHOULD NOT BE MODIFIED in any way
-// If you want to make changes to this file, make the corresponding changes to Local in config.go and run "go generate".
+// If you want to make changes to this file, make the corresponding changes to Local in localTemplate.go and run "go generate".
package config
var defaultLocal = Local{
- Version: 18,
+ Version: 19,
AccountUpdatesStatsInterval: 5000000000,
AccountsRebuildSynchronousMode: 1,
AnnounceParticipationKey: true,
@@ -92,8 +92,9 @@ var defaultLocal = Local{
OutgoingMessageFilterBucketSize: 128,
ParticipationKeysRefreshInterval: 60000000000,
PeerConnectionsUpdateInterval: 3600,
- PeerPingPeriodSeconds: 10,
+ PeerPingPeriodSeconds: 0,
PriorityPeers: map[string]bool{},
+ ProposalAssemblyTime: 250000000,
PublicAddress: "",
ReconnectTime: 60000000000,
ReservedFDs: 256,
diff --git a/config/version.go b/config/version.go
index 38e6661f7..61d4ff58a 100644
--- a/config/version.go
+++ b/config/version.go
@@ -33,7 +33,7 @@ const VersionMajor = 3
// VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced.
// Not enforced until after initial public release (x > 0).
-const VersionMinor = 1
+const VersionMinor = 2
// Version is the type holding our full version information.
type Version struct {
diff --git a/crypto/batchverifier.go b/crypto/batchverifier.go
index 71df901d7..c7f84fa45 100644
--- a/crypto/batchverifier.go
+++ b/crypto/batchverifier.go
@@ -29,7 +29,7 @@ const minBatchVerifierAlloc = 16
// Batch verifications errors
var (
- ErrBatchVerificationFailed = errors.New("At least on signature didn't pass verification")
+ ErrBatchVerificationFailed = errors.New("At least one signature didn't pass verification")
ErrZeroTranscationsInBatch = errors.New("Could not validate empty signature set")
)
diff --git a/crypto/compactcert/builder.go b/crypto/compactcert/builder.go
index f830a40ff..5966ca9a7 100644
--- a/crypto/compactcert/builder.go
+++ b/crypto/compactcert/builder.go
@@ -45,7 +45,7 @@ type Builder struct {
sigs []sigslot // Indexed by pos in participants
sigsHasValidL bool // The L values in sigs are consistent with weights
signedWeight uint64 // Total weight of signatures so far
- participants []Participant
+ participants []basics.Participant
parttree *merklearray.Tree
// Cached cert, if Build() was called and no subsequent
@@ -57,7 +57,7 @@ type Builder struct {
// to be signed, as well as other security parameters, are specified in
// param. The participants that will sign the message are in part and
// parttree.
-func MkBuilder(param Params, part []Participant, parttree *merklearray.Tree) (*Builder, error) {
+func MkBuilder(param Params, part []basics.Participant, parttree *merklearray.Tree) (*Builder, error) {
npart := len(part)
b := &Builder{
diff --git a/crypto/compactcert/builder_test.go b/crypto/compactcert/builder_test.go
index 0bab55da7..13738b5c3 100644
--- a/crypto/compactcert/builder_test.go
+++ b/crypto/compactcert/builder_test.go
@@ -36,7 +36,7 @@ func (m TestMessage) ToBeHashed() (protocol.HashID, []byte) {
}
type PartCommit struct {
- participants []Participant
+ participants []basics.Participant
}
func (pc PartCommit) Length() uint64 {
@@ -78,10 +78,10 @@ func TestBuildVerify(t *testing.T) {
// Share the key; we allow the same vote key to appear in multiple accounts..
key := crypto.GenerateOneTimeSignatureSecrets(0, 1)
- var parts []Participant
+ var parts []basics.Participant
var sigs []crypto.OneTimeSignature
for i := 0; i < npartHi; i++ {
- part := Participant{
+ part := basics.Participant{
PK: key.OneTimeSignatureVerifier,
Weight: uint64(totalWeight / 2 / npartHi),
KeyDilution: 10000,
@@ -91,7 +91,7 @@ func TestBuildVerify(t *testing.T) {
}
for i := 0; i < npartLo; i++ {
- part := Participant{
+ part := basics.Participant{
PK: key.OneTimeSignatureVerifier,
Weight: uint64(totalWeight / 2 / npartLo),
KeyDilution: 10000,
@@ -165,12 +165,12 @@ func BenchmarkBuildVerify(b *testing.B) {
SecKQ: 128,
}
- var parts []Participant
+ var parts []basics.Participant
var partkeys []*crypto.OneTimeSignatureSecrets
var sigs []crypto.OneTimeSignature
for i := 0; i < npart; i++ {
key := crypto.GenerateOneTimeSignatureSecrets(0, 1)
- part := Participant{
+ part := basics.Participant{
PK: key.OneTimeSignatureVerifier,
Weight: uint64(totalWeight / npart),
KeyDilution: 10000,
diff --git a/crypto/compactcert/common.go b/crypto/compactcert/common.go
index 53b469bfd..d2370d779 100644
--- a/crypto/compactcert/common.go
+++ b/crypto/compactcert/common.go
@@ -113,5 +113,5 @@ func numReveals(signedWeight uint64, provenWeight uint64, secKQ uint64, bound ui
}
func (p Params) numReveals(signedWeight uint64) (uint64, error) {
- return numReveals(signedWeight, p.ProvenWeight, p.SecKQ, MaxReveals)
+ return numReveals(signedWeight, p.ProvenWeight, p.SecKQ, maxReveals)
}
diff --git a/crypto/compactcert/msgp_gen.go b/crypto/compactcert/msgp_gen.go
index 60d36184f..2f2301653 100644
--- a/crypto/compactcert/msgp_gen.go
+++ b/crypto/compactcert/msgp_gen.go
@@ -26,14 +26,6 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
-// Participant
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
// Reveal
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -189,8 +181,8 @@ func (z *Cert) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "struct-from-array", "SigProofs")
return
}
- if zb0007 > MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(MaxProofDigests))
+ if zb0007 > maxProofDigests {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(maxProofDigests))
err = msgp.WrapError(err, "struct-from-array", "SigProofs")
return
}
@@ -218,8 +210,8 @@ func (z *Cert) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "struct-from-array", "PartProofs")
return
}
- if zb0009 > MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0009), uint64(MaxProofDigests))
+ if zb0009 > maxProofDigests {
+ err = msgp.ErrOverflow(uint64(zb0009), uint64(maxProofDigests))
err = msgp.WrapError(err, "struct-from-array", "PartProofs")
return
}
@@ -247,8 +239,8 @@ func (z *Cert) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "struct-from-array", "Reveals")
return
}
- if zb0011 > MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(MaxReveals))
+ if zb0011 > maxReveals {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(maxReveals))
err = msgp.WrapError(err, "struct-from-array", "Reveals")
return
}
@@ -317,8 +309,8 @@ func (z *Cert) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "SigProofs")
return
}
- if zb0013 > MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(MaxProofDigests))
+ if zb0013 > maxProofDigests {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(maxProofDigests))
err = msgp.WrapError(err, "SigProofs")
return
}
@@ -344,8 +336,8 @@ func (z *Cert) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "PartProofs")
return
}
- if zb0015 > MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0015), uint64(MaxProofDigests))
+ if zb0015 > maxProofDigests {
+ err = msgp.ErrOverflow(uint64(zb0015), uint64(maxProofDigests))
err = msgp.WrapError(err, "PartProofs")
return
}
@@ -371,8 +363,8 @@ func (z *Cert) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "Reveals")
return
}
- if zb0017 > MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(MaxReveals))
+ if zb0017 > maxReveals {
+ err = msgp.ErrOverflow(uint64(zb0017), uint64(maxReveals))
err = msgp.WrapError(err, "Reveals")
return
}
@@ -663,158 +655,6 @@ func (z *CompactOneTimeSignature) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
-func (z *Participant) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0001Len := uint32(3)
- var zb0001Mask uint8 /* 4 bits */
- if (*z).KeyDilution == 0 {
- zb0001Len--
- zb0001Mask |= 0x2
- }
- if (*z).PK.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x4
- }
- if (*z).Weight == 0 {
- zb0001Len--
- zb0001Mask |= 0x8
- }
- // variable map header, size zb0001Len
- o = append(o, 0x80|uint8(zb0001Len))
- if zb0001Len != 0 {
- if (zb0001Mask & 0x2) == 0 { // if not empty
- // string "d"
- o = append(o, 0xa1, 0x64)
- o = msgp.AppendUint64(o, (*z).KeyDilution)
- }
- if (zb0001Mask & 0x4) == 0 { // if not empty
- // string "p"
- o = append(o, 0xa1, 0x70)
- o = (*z).PK.MarshalMsg(o)
- }
- if (zb0001Mask & 0x8) == 0 { // if not empty
- // string "w"
- o = append(o, 0xa1, 0x77)
- o = msgp.AppendUint64(o, (*z).Weight)
- }
- }
- return
-}
-
-func (_ *Participant) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*Participant)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *Participant) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0001 int
- var zb0002 bool
- zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).PK.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PK")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- (*z).Weight, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Weight")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- (*z).KeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KeyDilution")
- return
- }
- }
- if zb0001 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0001)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 {
- (*z) = Participant{}
- }
- for zb0001 > 0 {
- zb0001--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "p":
- bts, err = (*z).PK.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "PK")
- return
- }
- case "w":
- (*z).Weight, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Weight")
- return
- }
- case "d":
- (*z).KeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "KeyDilution")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *Participant) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*Participant)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *Participant) Msgsize() (s int) {
- s = 1 + 2 + (*z).PK.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *Participant) MsgIsZero() bool {
- return ((*z).PK.MsgIsZero()) && ((*z).Weight == 0) && ((*z).KeyDilution == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
func (z *Reveal) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
diff --git a/crypto/compactcert/msgp_gen_test.go b/crypto/compactcert/msgp_gen_test.go
index 5b292879e..23ebb5a3f 100644
--- a/crypto/compactcert/msgp_gen_test.go
+++ b/crypto/compactcert/msgp_gen_test.go
@@ -132,66 +132,6 @@ func BenchmarkUnmarshalCompactOneTimeSignature(b *testing.B) {
}
}
-func TestMarshalUnmarshalParticipant(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := Participant{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingParticipant(t *testing.T) {
- protocol.RunEncodingTest(t, &Participant{})
-}
-
-func BenchmarkMarshalMsgParticipant(b *testing.B) {
- v := Participant{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgParticipant(b *testing.B) {
- v := Participant{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalParticipant(b *testing.B) {
- v := Participant{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
func TestMarshalUnmarshalReveal(t *testing.T) {
partitiontest.PartitionTest(t)
v := Reveal{}
diff --git a/crypto/compactcert/structs.go b/crypto/compactcert/structs.go
index c0e333868..1e02e4eaf 100644
--- a/crypto/compactcert/structs.go
+++ b/crypto/compactcert/structs.go
@@ -30,34 +30,6 @@ type Params struct {
SecKQ uint64 // Security parameter (k+q) from analysis document
}
-// A Participant corresponds to an account whose AccountData.Status
-// is Online, and for which the expected sigRound satisfies
-// AccountData.VoteFirstValid <= sigRound <= AccountData.VoteLastValid.
-//
-// In the Algorand ledger, it is possible for multiple accounts to have
-// the same PK. Thus, the PK is not necessarily unique among Participants.
-// However, each account will produce a unique Participant struct, to avoid
-// potential DoS attacks where one account claims to have the same VoteID PK
-// as another account.
-type Participant struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
-
- // PK is AccountData.VoteID.
- PK crypto.OneTimeSignatureVerifier `codec:"p"`
-
- // Weight is AccountData.MicroAlgos.
- Weight uint64 `codec:"w"`
-
- // KeyDilution is AccountData.KeyDilution() with the protocol for sigRound
- // as expected by the Builder.
- KeyDilution uint64 `codec:"d"`
-}
-
-// ToBeHashed implements the crypto.Hashable interface.
-func (p Participant) ToBeHashed() (protocol.HashID, []byte) {
- return protocol.CompactCertPart, protocol.Encode(&p)
-}
-
// CompactOneTimeSignature is crypto.OneTimeSignature with omitempty
type CompactOneTimeSignature struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
@@ -87,15 +59,13 @@ func (ssc sigslotCommit) ToBeHashed() (protocol.HashID, []byte) {
type Reveal struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
- SigSlot sigslotCommit `codec:"s"`
- Part Participant `codec:"p"`
+ SigSlot sigslotCommit `codec:"s"`
+ Part basics.Participant `codec:"p"`
}
-// MaxReveals is a bound on allocation and on numReveals to limit log computation
-const MaxReveals = 1024
-
-// MaxProofDigests is a bound on allocation on number of proofs
-const MaxProofDigests = 20 * MaxReveals
+// maxReveals is a bound on allocation and on numReveals to limit log computation
+const maxReveals = 1024
+const maxProofDigests = 20 * maxReveals
// Cert represents a compact certificate.
type Cert struct {
@@ -103,13 +73,13 @@ type Cert struct {
SigCommit crypto.Digest `codec:"c"`
SignedWeight uint64 `codec:"w"`
- SigProofs []crypto.Digest `codec:"S,allocbound=MaxProofDigests"`
- PartProofs []crypto.Digest `codec:"P,allocbound=MaxProofDigests"`
+ SigProofs []crypto.Digest `codec:"S,allocbound=maxProofDigests"`
+ PartProofs []crypto.Digest `codec:"P,allocbound=maxProofDigests"`
// Reveals is a sparse map from the position being revealed
// to the corresponding elements from the sigs and participants
// arrays.
- Reveals map[uint64]Reveal `codec:"r,allocbound=MaxReveals"`
+ Reveals map[uint64]Reveal `codec:"r,allocbound=maxReveals"`
}
// SortUint64 implements sorting by uint64 keys for
diff --git a/crypto/curve25519.go b/crypto/curve25519.go
index eede5d337..e0bf278de 100644
--- a/crypto/curve25519.go
+++ b/crypto/curve25519.go
@@ -69,19 +69,10 @@ func init() {
// A Seed holds the entropy needed to generate cryptographic keys.
type Seed ed25519Seed
-// PublicKeyByteLength is the length, in bytes, of a public key
-const PublicKeyByteLength = 32
-
-// PrivateKeyByteLength is the length, in bytes, of a private key
-const PrivateKeyByteLength = 64
-
-// SignatureByteLength is the length, in bytes, of a signature
-const SignatureByteLength = 64
-
/* Classical signatures */
-type ed25519Signature [SignatureByteLength]byte
-type ed25519PublicKey [PublicKeyByteLength]byte
-type ed25519PrivateKey [PrivateKeyByteLength]byte
+type ed25519Signature [64]byte
+type ed25519PublicKey [32]byte
+type ed25519PrivateKey [64]byte
type ed25519Seed [32]byte
// MasterDerivationKey is used to derive ed25519 keys for use in wallets
diff --git a/crypto/msgp_gen.go b/crypto/msgp_gen.go
index 4690dbfc4..84fe8622e 100644
--- a/crypto/msgp_gen.go
+++ b/crypto/msgp_gen.go
@@ -379,8 +379,8 @@ func (z *MultisigSig) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "struct-from-array", "Subsigs")
return
}
- if zb0004 > MaxMultisig {
- err = msgp.ErrOverflow(uint64(zb0004), uint64(MaxMultisig))
+ if zb0004 > maxMultisig {
+ err = msgp.ErrOverflow(uint64(zb0004), uint64(maxMultisig))
err = msgp.WrapError(err, "struct-from-array", "Subsigs")
return
}
@@ -442,8 +442,8 @@ func (z *MultisigSig) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "Subsigs")
return
}
- if zb0006 > MaxMultisig {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(MaxMultisig))
+ if zb0006 > maxMultisig {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(maxMultisig))
err = msgp.WrapError(err, "Subsigs")
return
}
@@ -613,7 +613,7 @@ func (_ *MultisigSubsig) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *MultisigSubsig) Msgsize() (s int) {
- s = 1 + 3 + msgp.ArrayHeaderSize + (PublicKeyByteLength * (msgp.ByteSize)) + 2 + msgp.ArrayHeaderSize + (SignatureByteLength * (msgp.ByteSize))
+ s = 1 + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 2 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
return
}
@@ -792,7 +792,7 @@ func (_ *OneTimeSignature) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *OneTimeSignature) Msgsize() (s int) {
- s = 1 + 2 + msgp.ArrayHeaderSize + (SignatureByteLength * (msgp.ByteSize)) + 2 + msgp.ArrayHeaderSize + (PublicKeyByteLength * (msgp.ByteSize)) + 3 + msgp.ArrayHeaderSize + (SignatureByteLength * (msgp.ByteSize)) + 3 + msgp.ArrayHeaderSize + (PublicKeyByteLength * (msgp.ByteSize)) + 4 + msgp.ArrayHeaderSize + (SignatureByteLength * (msgp.ByteSize)) + 4 + msgp.ArrayHeaderSize + (SignatureByteLength * (msgp.ByteSize))
+ s = 1 + 2 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize)) + 2 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 3 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize)) + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 4 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize)) + 4 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
return
}
@@ -1114,7 +1114,7 @@ func (_ *OneTimeSignatureSecrets) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *OneTimeSignatureSecrets) Msgsize() (s int) {
- s = 1 + 25 + msgp.ArrayHeaderSize + (PublicKeyByteLength * (msgp.ByteSize)) + 6 + msgp.Uint64Size + 4 + msgp.ArrayHeaderSize
+ s = 1 + 25 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 6 + msgp.Uint64Size + 4 + msgp.ArrayHeaderSize
for zb0002 := range (*z).OneTimeSignatureSecretsPersistent.Batches {
s += (*z).OneTimeSignatureSecretsPersistent.Batches[zb0002].Msgsize()
}
@@ -1122,7 +1122,7 @@ func (z *OneTimeSignatureSecrets) Msgsize() (s int) {
for zb0003 := range (*z).OneTimeSignatureSecretsPersistent.Offsets {
s += (*z).OneTimeSignatureSecretsPersistent.Offsets[zb0003].Msgsize()
}
- s += 7 + msgp.ArrayHeaderSize + (PublicKeyByteLength * (msgp.ByteSize)) + 10 + msgp.ArrayHeaderSize + (SignatureByteLength * (msgp.ByteSize))
+ s += 7 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 10 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
return
}
@@ -1444,7 +1444,7 @@ func (_ *OneTimeSignatureSecretsPersistent) CanUnmarshalMsg(z interface{}) bool
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *OneTimeSignatureSecretsPersistent) Msgsize() (s int) {
- s = 1 + 25 + msgp.ArrayHeaderSize + (PublicKeyByteLength * (msgp.ByteSize)) + 6 + msgp.Uint64Size + 4 + msgp.ArrayHeaderSize
+ s = 1 + 25 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 6 + msgp.Uint64Size + 4 + msgp.ArrayHeaderSize
for zb0002 := range (*z).Batches {
s += (*z).Batches[zb0002].Msgsize()
}
@@ -1452,7 +1452,7 @@ func (z *OneTimeSignatureSecretsPersistent) Msgsize() (s int) {
for zb0003 := range (*z).Offsets {
s += (*z).Offsets[zb0003].Msgsize()
}
- s += 7 + msgp.ArrayHeaderSize + (PublicKeyByteLength * (msgp.ByteSize)) + 10 + msgp.ArrayHeaderSize + (SignatureByteLength * (msgp.ByteSize))
+ s += 7 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 10 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
return
}
@@ -1563,7 +1563,7 @@ func (_ *OneTimeSignatureSubkeyBatchID) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *OneTimeSignatureSubkeyBatchID) Msgsize() (s int) {
- s = 1 + 3 + msgp.ArrayHeaderSize + (PublicKeyByteLength * (msgp.ByteSize)) + 6 + msgp.Uint64Size
+ s = 1 + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 6 + msgp.Uint64Size
return
}
@@ -1691,7 +1691,7 @@ func (_ *OneTimeSignatureSubkeyOffsetID) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *OneTimeSignatureSubkeyOffsetID) Msgsize() (s int) {
- s = 1 + 3 + msgp.ArrayHeaderSize + (PublicKeyByteLength * (msgp.ByteSize)) + 6 + msgp.Uint64Size + 4 + msgp.Uint64Size
+ s = 1 + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 6 + msgp.Uint64Size + 4 + msgp.Uint64Size
return
}
@@ -1730,7 +1730,7 @@ func (_ *OneTimeSignatureVerifier) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *OneTimeSignatureVerifier) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + (PublicKeyByteLength * (msgp.ByteSize))
+ s = msgp.ArrayHeaderSize + (32 * (msgp.ByteSize))
return
}
@@ -1769,7 +1769,7 @@ func (_ *PrivateKey) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *PrivateKey) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + (PrivateKeyByteLength * (msgp.ByteSize))
+ s = msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
return
}
@@ -1808,7 +1808,7 @@ func (_ *PublicKey) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *PublicKey) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + (PublicKeyByteLength * (msgp.ByteSize))
+ s = msgp.ArrayHeaderSize + (32 * (msgp.ByteSize))
return
}
@@ -1886,7 +1886,7 @@ func (_ *Signature) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Signature) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + (SignatureByteLength * (msgp.ByteSize))
+ s = msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
return
}
@@ -1997,7 +1997,7 @@ func (_ *SignatureSecrets) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *SignatureSecrets) Msgsize() (s int) {
- s = 1 + 18 + (*z).SignatureVerifier.Msgsize() + 3 + msgp.ArrayHeaderSize + (PrivateKeyByteLength * (msgp.ByteSize))
+ s = 1 + 18 + (*z).SignatureVerifier.Msgsize() + 3 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
return
}
@@ -2108,7 +2108,7 @@ func (_ *VRFSecrets) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *VRFSecrets) Msgsize() (s int) {
- s = 1 + 3 + msgp.ArrayHeaderSize + (VrfPubkeyByteLength * (msgp.ByteSize)) + 3 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
+ s = 1 + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 3 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
return
}
@@ -2264,7 +2264,7 @@ func (_ *VrfPubkey) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *VrfPubkey) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + (VrfPubkeyByteLength * (msgp.ByteSize))
+ s = msgp.ArrayHeaderSize + (32 * (msgp.ByteSize))
return
}
@@ -2303,7 +2303,7 @@ func (_ *ed25519PrivateKey) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ed25519PrivateKey) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + (PrivateKeyByteLength * (msgp.ByteSize))
+ s = msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
return
}
@@ -2342,7 +2342,7 @@ func (_ *ed25519PublicKey) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ed25519PublicKey) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + (PublicKeyByteLength * (msgp.ByteSize))
+ s = msgp.ArrayHeaderSize + (32 * (msgp.ByteSize))
return
}
@@ -2420,7 +2420,7 @@ func (_ *ed25519Signature) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ed25519Signature) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + (SignatureByteLength * (msgp.ByteSize))
+ s = msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
return
}
@@ -2565,7 +2565,7 @@ func (_ *ephemeralSubkey) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ephemeralSubkey) Msgsize() (s int) {
- s = 1 + 3 + msgp.ArrayHeaderSize + (PublicKeyByteLength * (msgp.ByteSize)) + 3 + msgp.ArrayHeaderSize + (PrivateKeyByteLength * (msgp.ByteSize)) + 6 + msgp.ArrayHeaderSize + (SignatureByteLength * (msgp.ByteSize)) + 5 + msgp.ArrayHeaderSize + (SignatureByteLength * (msgp.ByteSize))
+ s = 1 + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 3 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize)) + 6 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize)) + 5 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
return
}
diff --git a/crypto/multisig.go b/crypto/multisig.go
index 886d2cb62..6c9a1ca66 100644
--- a/crypto/multisig.go
+++ b/crypto/multisig.go
@@ -38,7 +38,7 @@ type MultisigSig struct {
Version uint8 `codec:"v"`
Threshold uint8 `codec:"thr"`
- Subsigs []MultisigSubsig `codec:"subsig,allocbound=MaxMultisig"`
+ Subsigs []MultisigSubsig `codec:"subsig,allocbound=maxMultisig"`
}
// MultisigPreimageFromPKs makes an empty MultisigSig for a given preimage. It should be renamed.
@@ -76,9 +76,7 @@ func (msig MultisigSig) Preimage() (version, threshold uint8, pks []PublicKey) {
}
const multiSigString = "MultisigAddr"
-
-// MaxMultisig is a bound on allocation and on the number of subsigs
-const MaxMultisig = 255
+const maxMultisig = 255
// MultisigAddrGen identifes the exact group, version,
// and devices (Public keys) that it requires to sign
@@ -258,9 +256,8 @@ func MultisigBatchVerify(msg Hashable, addr Digest, sig MultisigSig, batchVerifi
}
// check that we don't have too many multisig subsigs
- if len(sig.Subsigs) > MaxMultisig {
+ if len(sig.Subsigs) > maxMultisig {
err = errInvalidNumberOfSignature
-
return
}
diff --git a/crypto/multisig_test.go b/crypto/multisig_test.go
index 492c3cfe8..8636331bf 100644
--- a/crypto/multisig_test.go
+++ b/crypto/multisig_test.go
@@ -297,7 +297,7 @@ func TestMoreThanMaxSigsInMultisig(t *testing.T) {
var s Seed
var secrets []*SecretKey
var pks []PublicKey
- multiSigLen := MaxMultisig + 1
+ multiSigLen := maxMultisig + 1
txid := TestingHashable{[]byte("test: txid 1000")}
version := uint8(1)
threshold := uint8(1)
diff --git a/crypto/vrf.go b/crypto/vrf.go
index 36a6e8270..88e314485 100644
--- a/crypto/vrf.go
+++ b/crypto/vrf.go
@@ -45,9 +45,6 @@ type VRFVerifier = VrfPubkey
// VRFProof is a deprecated name for VrfProof
type VRFProof = VrfProof
-// VrfPubkeyByteLength is the size, in bytes, of a VRF public key.
-const VrfPubkeyByteLength = 32
-
// VRFSecrets is a wrapper for a VRF keypair. Use *VrfPrivkey instead
type VRFSecrets struct {
_struct struct{} `codec:""`
@@ -71,7 +68,7 @@ type (
// Specifically, we use a 64-byte ed25519 private key (the latter 32-bytes are the precomputed public key)
VrfPrivkey [64]byte
// A VrfPubkey is a public key that can be used to verify VRF proofs.
- VrfPubkey [VrfPubkeyByteLength]byte
+ VrfPubkey [32]byte
// A VrfProof for a message can be generated with a secret key and verified against a public key, like a signature.
// Proofs are malleable, however, for a given message and public key, the VRF output that can be computed from a proof is unique.
VrfProof [80]byte
diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json
index 46e15396f..57b719d5a 100644
--- a/daemon/algod/api/algod.oas2.json
+++ b/daemon/algod/api/algod.oas2.json
@@ -462,59 +462,224 @@
}
}
},
- "/v2/register-participation-keys/{address}": {
+ "/v2/participation": {
+
+ "get": {
+
+ "tags": [
+ "private"
+ ],
+
+ "description": "Return a list of participation keys",
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Return a list of participation keys",
+ "operationId": "GetParticipationKeys",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "$ref": "#/responses/ParticipationKeysResponse"
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Application Not Found",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ },
+
+
"post": {
- "description": "Generate (or renew) and register participation keys on the node for a given account address.",
+
"tags": [
"private"
],
- "operationId": "RegisterParticipationKeys",
+
+ "consumes": [
+ "application/msgpack"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Add a participation key to the node",
+ "operationId": "AddParticipationKey",
"parameters": [
{
- "type": "string",
- "description": "The `account-id` to update, or `all` to update all accounts.",
- "name": "address",
- "in": "path",
- "required": true
+ "description": "The participation key to add to the node",
+ "name": "participationkey",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "format": "binary"
+ }
+ }
+ ],
+
+ "responses": {
+ "200": {
+ "$ref": "#/responses/PostParticipationResponse"
},
- {
- "type": "integer",
- "default": 1000,
- "description": "The fee to use when submitting key registration transactions. Defaults to the suggested fee.",
- "name": "fee",
- "in": "query"
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
},
- {
- "type": "integer",
- "description": "value to use for two-level participation key.",
- "name": "key-dilution",
- "in": "query"
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
},
- {
- "type": "integer",
- "description": "The last round for which the generated participation keys will be valid.",
- "name": "round-last-valid",
- "in": "query"
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
},
- {
- "type": "boolean",
- "description": "Don't wait for transaction to commit before returning response.",
- "name": "no-wait",
- "in": "query"
+ "503": {
+ "description": "Service Temporarily Unavailable",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+
+ }
+
+ },
+ "/v2/participation/{participation-id}": {
+ "delete": {
+
+ "tags": [
+ "private"
+ ],
+
+ "description": "Delete a given participation key by id",
+ "produces": [
+ "application/json"
+ ],
+
+ "schemes": [
+ "http"
+ ],
+ "summary": "Delete a given participation key by id",
+ "operationId": "DeleteParticipationKeyByID",
+ "responses": {
+ "200": {
+ "$ref": "#/responses/DeleteParticipationIdResponse"
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
}
+ }
+
+ },
+
+ "get": {
+
+ "tags": [
+ "private"
+ ],
+
+ "description": "Given a participation id, return information about that participation key",
+ "produces": [
+ "application/json"
],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Get participation key info by id",
+ "operationId": "GetParticipationKeyByID",
"responses": {
"200": {
"description": "OK",
- "$ref": "#/responses/PostTransactionsResponse"
+ "$ref": "#/responses/ParticipationKeyResponse"
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Application Not Found",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
}
}
},
"parameters": [
{
"type": "string",
- "description": "Account address where keys will be registered.",
- "name": "address",
+ "name": "participation-id",
"in": "path",
"required": true
}
@@ -1496,6 +1661,52 @@
}
}
},
+ "ParticipationKey": {
+ "description": "Represents a participation key used by the node.",
+ "type": "object",
+ "required": [
+ "id",
+ "key",
+ "address"
+ ],
+ "properties": {
+ "id": {
+ "description": "The key's ParticipationID.",
+ "type": "string"
+ },
+ "address": {
+ "description": "Address the key was generated for.",
+ "type": "string",
+ "x-algorand-format": "Address"
+ },
+ "effective-first-valid": {
+ "description": "When registered, this is the first round it may be used.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ },
+ "effective-last-valid": {
+ "description": "When registered, this is the last round it may be used.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ },
+ "last-vote": {
+ "description": "Round when this key was last used to vote.",
+ "type": "integer"
+ },
+ "last-block-proposal": {
+ "description": "Round when this key was last used to propose a block.",
+ "type": "integer"
+ },
+ "last-state-proof": {
+ "description": "Round when this key was last used to generate a state proof.",
+ "type": "integer"
+ },
+ "key": {
+ "description": "Key information stored on the account.",
+ "$ref": "#/definitions/AccountParticipation"
+ }
+ }
+ },
"TealKeyValueStore": {
"description": "Represents a key-value store for use in an application.",
"type": "array",
@@ -1529,7 +1740,7 @@
],
"properties": {
"type": {
- "description": "\\[tt\\] value type.",
+ "description": "\\[tt\\] value type. Value `1` refers to **bytes**, value `2` refers to **uint**",
"type": "integer"
},
"bytes": {
@@ -1657,7 +1868,7 @@
"$ref": "#/definitions/ApplicationStateSchema"
},
"global-state-schema": {
- "description": "[\\lsch\\] global schema",
+ "description": "[\\gsch\\] global schema",
"$ref": "#/definitions/ApplicationStateSchema"
},
"global-state": {
@@ -2366,6 +2577,51 @@
}
}
},
+
+ "ParticipationKeysResponse": {
+ "description": "A list of participation keys",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/ParticipationKey"
+ }
+ }
+ },
+ "ParticipationKeyResponse": {
+ "description": "A detailed description of a participation id",
+ "schema": {
+ "type": "object",
+ "required": [
+ "participationKey"
+ ],
+ "properties": {
+ "participationKey": {
+ "description": "Detailed description of a participation key",
+ "type": "string"
+ }
+ }
+ }
+ },
+ "DeleteParticipationIdResponse" : {
+ "description": "Participation key got deleted by ID"
+ },
+ "PostParticipationResponse" : {
+ "description": "Participation ID of the submission",
+ "schema": {
+ "type": "object",
+ "required": [
+ "partId"
+ ],
+ "properties": {
+ "partId": {
+ "description": "encoding of the participation id.",
+ "type": "string"
+ }
+ }
+ }
+
+ },
+
"PostTransactionsResponse": {
"description": "Transaction ID of the submission.",
"schema": {
diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml
index 423b23180..a8a4d9ea8 100644
--- a/daemon/algod/api/algod.oas3.yml
+++ b/daemon/algod/api/algod.oas3.yml
@@ -341,6 +341,10 @@
},
"description": "Teal compile Result"
},
+ "DeleteParticipationIdResponse": {
+ "content": {},
+ "description": "Participation key got deleted by ID"
+ },
"DryrunResponse": {
"content": {
"application/json": {
@@ -453,6 +457,38 @@
}
}
},
+ "ParticipationKeyResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "participationKey": {
+ "description": "Detailed description of a participation key",
+ "type": "string"
+ }
+ },
+ "required": [
+ "participationKey"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "A detailed description of a participation id"
+ },
+ "ParticipationKeysResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "items": {
+ "$ref": "#/components/schemas/ParticipationKey"
+ },
+ "type": "array"
+ }
+ }
+ },
+ "description": "A list of participation keys"
+ },
"PendingTransactionsResponse": {
"content": {
"application/json": {
@@ -483,6 +519,25 @@
},
"description": "A potentially truncated list of transactions currently in the node's transaction pool. You can compute whether or not the list is truncated if the number of elements in the **top-transactions** array is fewer than **total-transactions**."
},
+ "PostParticipationResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "partId": {
+ "description": "encoding of the participation id.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "partId"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Participation ID of the submission"
+ },
"PostTransactionsResponse": {
"content": {
"application/json": {
@@ -1270,6 +1325,51 @@
],
"type": "object"
},
+ "ParticipationKey": {
+ "description": "Represents a participation key used by the node.",
+ "properties": {
+ "address": {
+ "description": "Address the key was generated for.",
+ "type": "string",
+ "x-algorand-format": "Address"
+ },
+ "effective-first-valid": {
+ "description": "When registered, this is the first round it may be used.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ },
+ "effective-last-valid": {
+ "description": "When registered, this is the last round it may be used.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ },
+ "id": {
+ "description": "The key's ParticipationID.",
+ "type": "string"
+ },
+ "key": {
+ "$ref": "#/components/schemas/AccountParticipation"
+ },
+ "last-block-proposal": {
+ "description": "Round when this key was last used to propose a block.",
+ "type": "integer"
+ },
+ "last-state-proof": {
+ "description": "Round when this key was last used to generate a state proof.",
+ "type": "integer"
+ },
+ "last-vote": {
+ "description": "Round when this key was last used to vote.",
+ "type": "integer"
+ }
+ },
+ "required": [
+ "address",
+ "id",
+ "key"
+ ],
+ "type": "object"
+ },
"PendingTransactionResponse": {
"description": "Details about a pending transaction. If the transaction was recently confirmed, includes confirmation details like the round and reward details.",
"properties": {
@@ -1386,7 +1486,7 @@
"type": "string"
},
"type": {
- "description": "\\[tt\\] value type.",
+ "description": "\\[tt\\] value type. Value `1` refers to **bytes**, value `2` refers to **uint**",
"type": "integer"
},
"uint": {
@@ -2440,51 +2540,229 @@
"summary": "Get the current supply reported by the ledger."
}
},
- "/v2/register-participation-keys/{address}": {
+ "/v2/participation": {
+ "get": {
+ "description": "Return a list of participation keys",
+ "operationId": "GetParticipationKeys",
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "items": {
+ "$ref": "#/components/schemas/ParticipationKey"
+ },
+ "type": "array"
+ }
+ }
+ },
+ "description": "A list of participation keys"
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request"
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "404": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Application Not Found"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Return a list of participation keys",
+ "tags": [
+ "private"
+ ]
+ },
"post": {
- "description": "Generate (or renew) and register participation keys on the node for a given account address.",
- "operationId": "RegisterParticipationKeys",
+ "operationId": "AddParticipationKey",
+ "requestBody": {
+ "content": {
+ "application/msgpack": {
+ "schema": {
+ "format": "binary",
+ "type": "string"
+ }
+ }
+ },
+ "description": "The participation key to add to the node",
+ "required": true
+ },
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "partId": {
+ "description": "encoding of the participation id.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "partId"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Participation ID of the submission"
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request"
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "503": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Service Temporarily Unavailable"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Add a participation key to the node",
+ "tags": [
+ "private"
+ ],
+ "x-codegen-request-body-name": "participationkey"
+ }
+ },
+ "/v2/participation/{participation-id}": {
+ "delete": {
+ "description": "Delete a given participation key by id",
+ "operationId": "DeleteParticipationKeyByID",
"parameters": [
{
- "description": "The `account-id` to update, or `all` to update all accounts.",
"in": "path",
- "name": "address",
+ "name": "participation-id",
"required": true,
"schema": {
"type": "string"
}
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {},
+ "description": "Participation key got deleted by ID"
},
- {
- "description": "The fee to use when submitting key registration transactions. Defaults to the suggested fee.",
- "in": "query",
- "name": "fee",
- "schema": {
- "default": 1000,
- "type": "integer"
- }
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request"
},
- {
- "description": "value to use for two-level participation key.",
- "in": "query",
- "name": "key-dilution",
- "schema": {
- "type": "integer"
- }
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
},
- {
- "description": "The last round for which the generated participation keys will be valid.",
- "in": "query",
- "name": "round-last-valid",
- "schema": {
- "type": "integer"
- }
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
},
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Delete a given participation key by id",
+ "tags": [
+ "private"
+ ]
+ },
+ "get": {
+ "description": "Given a participation id, return information about that participation key",
+ "operationId": "GetParticipationKeyByID",
+ "parameters": [
{
- "description": "Don't wait for transaction to commit before returning response.",
- "in": "query",
- "name": "no-wait",
+ "in": "path",
+ "name": "participation-id",
+ "required": true,
"schema": {
- "type": "boolean"
+ "type": "string"
}
}
],
@@ -2494,21 +2772,66 @@
"application/json": {
"schema": {
"properties": {
- "txId": {
- "description": "encoding of the transaction hash.",
+ "participationKey": {
+ "description": "Detailed description of a participation key",
"type": "string"
}
},
"required": [
- "txId"
+ "participationKey"
],
"type": "object"
}
}
},
- "description": "Transaction ID of the submission."
+ "description": "A detailed description of a participation id"
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request"
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "404": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Application Not Found"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
}
},
+ "summary": "Get participation key info by id",
"tags": [
"private"
]
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index 626bec0b7..f57c0d0be 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -59,9 +59,10 @@ const (
// rawRequestPaths is a set of paths where the body should not be urlencoded
var rawRequestPaths = map[string]bool{
- "/v1/transactions": true,
- "/v2/teal/dryrun": true,
- "/v2/teal/compile": true,
+ "/v1/transactions": true,
+ "/v2/teal/dryrun": true,
+ "/v2/teal/compile": true,
+ "/v2/participation": true,
}
// unauthorizedRequestError is generated when we receive 401 error from the server. This error includes the inner error
@@ -604,3 +605,21 @@ func (client RestClient) Proof(txid string, round uint64) (response generatedV2.
err = client.get(&response, fmt.Sprintf("/v2/blocks/%d/transactions/%s/proof", round, txid), nil)
return
}
+
+// PostParticipationKey sends a key file to the node.
+func (client RestClient) PostParticipationKey(file []byte) (response generatedV2.PostParticipationResponse, err error) {
+ err = client.post(&response, "/v2/participation", file)
+ return
+}
+
+// GetParticipationKeys gets all of the participation keys
+func (client RestClient) GetParticipationKeys() (response generatedV2.ParticipationKeysResponse, err error) {
+ err = client.get(&response, "/v2/participation", nil)
+ return
+}
+
+// GetParticipationKeyByID gets a single participation key
+func (client RestClient) GetParticipationKeyByID(participationID string) (response generatedV2.ParticipationKeyResponse, err error) {
+ err = client.get(&response, fmt.Sprintf("/v2/participation/%s", participationID), nil)
+ return
+}
diff --git a/daemon/algod/api/server/v2/dryrun.go b/daemon/algod/api/server/v2/dryrun.go
index 3677c4b28..04279ce67 100644
--- a/daemon/algod/api/server/v2/dryrun.go
+++ b/daemon/algod/api/server/v2/dryrun.go
@@ -28,6 +28,7 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger/apply"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/protocol"
@@ -256,7 +257,7 @@ func (dl *dryrunLedger) BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
return bookkeeping.BlockHeader{}, nil
}
-func (dl *dryrunLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledger.TxLease) error {
+func (dl *dryrunLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
return nil
}
diff --git a/daemon/algod/api/server/v2/errors.go b/daemon/algod/api/server/v2/errors.go
index bb1f8bbc1..3123e5d7c 100644
--- a/daemon/algod/api/server/v2/errors.go
+++ b/daemon/algod/api/server/v2/errors.go
@@ -38,4 +38,5 @@ var (
errFailedToAbortCatchup = "failed to abort catchup : %v"
errFailedToStartCatchup = "failed to start catchup : %v"
errOperationNotAvailableDuringCatchup = "operation not available during catchup"
+ errRESTPayloadZeroLength = "payload was of zero length"
)
diff --git a/daemon/algod/api/server/v2/generated/private/routes.go b/daemon/algod/api/server/v2/generated/private/routes.go
index 3e6a3591a..a061b310c 100644
--- a/daemon/algod/api/server/v2/generated/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/private/routes.go
@@ -23,9 +23,18 @@ type ServerInterface interface {
// Starts a catchpoint catchup.
// (POST /v2/catchup/{catchpoint})
StartCatchup(ctx echo.Context, catchpoint string) error
-
- // (POST /v2/register-participation-keys/{address})
- RegisterParticipationKeys(ctx echo.Context, address string, params RegisterParticipationKeysParams) error
+ // Return a list of participation keys
+ // (GET /v2/participation)
+ GetParticipationKeys(ctx echo.Context) error
+ // Add a participation key to the node
+ // (POST /v2/participation)
+ AddParticipationKey(ctx echo.Context) error
+ // Delete a given participation key by id
+ // (DELETE /v2/participation/{participation-id})
+ DeleteParticipationKeyByID(ctx echo.Context, participationId string) error
+ // Get participation key info by id
+ // (GET /v2/participation/{participation-id})
+ GetParticipationKeyByID(ctx echo.Context, participationId string) error
// (POST /v2/shutdown)
ShutdownNode(ctx echo.Context, params ShutdownNodeParams) error
@@ -96,15 +105,11 @@ func (w *ServerInterfaceWrapper) StartCatchup(ctx echo.Context) error {
return err
}
-// RegisterParticipationKeys converts echo context to params.
-func (w *ServerInterfaceWrapper) RegisterParticipationKeys(ctx echo.Context) error {
+// GetParticipationKeys converts echo context to params.
+func (w *ServerInterfaceWrapper) GetParticipationKeys(ctx echo.Context) error {
validQueryParams := map[string]bool{
- "pretty": true,
- "fee": true,
- "key-dilution": true,
- "round-last-valid": true,
- "no-wait": true,
+ "pretty": true,
}
// Check for unknown query parameters.
@@ -115,60 +120,94 @@ func (w *ServerInterfaceWrapper) RegisterParticipationKeys(ctx echo.Context) err
}
var err error
- // ------------- Path parameter "address" -------------
- var address string
-
- err = runtime.BindStyledParameter("simple", false, "address", ctx.Param("address"), &address)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err))
- }
ctx.Set("api_key.Scopes", []string{""})
- // Parameter object where we will unmarshal all parameters from the context
- var params RegisterParticipationKeysParams
- // ------------- Optional query parameter "fee" -------------
- if paramValue := ctx.QueryParam("fee"); paramValue != "" {
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetParticipationKeys(ctx)
+ return err
+}
+// AddParticipationKey converts echo context to params.
+func (w *ServerInterfaceWrapper) AddParticipationKey(ctx echo.Context) error {
+
+ validQueryParams := map[string]bool{
+ "pretty": true,
}
- err = runtime.BindQueryParameter("form", true, false, "fee", ctx.QueryParams(), &params.Fee)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter fee: %s", err))
+ // Check for unknown query parameters.
+ for name, _ := range ctx.QueryParams() {
+ if _, ok := validQueryParams[name]; !ok {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
+ }
}
- // ------------- Optional query parameter "key-dilution" -------------
- if paramValue := ctx.QueryParam("key-dilution"); paramValue != "" {
+ var err error
- }
+ ctx.Set("api_key.Scopes", []string{""})
- err = runtime.BindQueryParameter("form", true, false, "key-dilution", ctx.QueryParams(), &params.KeyDilution)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter key-dilution: %s", err))
- }
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.AddParticipationKey(ctx)
+ return err
+}
+
+// DeleteParticipationKeyByID converts echo context to params.
+func (w *ServerInterfaceWrapper) DeleteParticipationKeyByID(ctx echo.Context) error {
- // ------------- Optional query parameter "round-last-valid" -------------
- if paramValue := ctx.QueryParam("round-last-valid"); paramValue != "" {
+ validQueryParams := map[string]bool{
+ "pretty": true,
+ }
+ // Check for unknown query parameters.
+ for name, _ := range ctx.QueryParams() {
+ if _, ok := validQueryParams[name]; !ok {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
+ }
}
- err = runtime.BindQueryParameter("form", true, false, "round-last-valid", ctx.QueryParams(), &params.RoundLastValid)
+ var err error
+ // ------------- Path parameter "participation-id" -------------
+ var participationId string
+
+ err = runtime.BindStyledParameter("simple", false, "participation-id", ctx.Param("participation-id"), &participationId)
if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round-last-valid: %s", err))
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter participation-id: %s", err))
}
- // ------------- Optional query parameter "no-wait" -------------
- if paramValue := ctx.QueryParam("no-wait"); paramValue != "" {
+ ctx.Set("api_key.Scopes", []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.DeleteParticipationKeyByID(ctx, participationId)
+ return err
+}
+
+// GetParticipationKeyByID converts echo context to params.
+func (w *ServerInterfaceWrapper) GetParticipationKeyByID(ctx echo.Context) error {
+ validQueryParams := map[string]bool{
+ "pretty": true,
+ }
+
+ // Check for unknown query parameters.
+ for name, _ := range ctx.QueryParams() {
+ if _, ok := validQueryParams[name]; !ok {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
+ }
}
- err = runtime.BindQueryParameter("form", true, false, "no-wait", ctx.QueryParams(), &params.NoWait)
+ var err error
+ // ------------- Path parameter "participation-id" -------------
+ var participationId string
+
+ err = runtime.BindStyledParameter("simple", false, "participation-id", ctx.Param("participation-id"), &participationId)
if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter no-wait: %s", err))
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter participation-id: %s", err))
}
+ ctx.Set("api_key.Scopes", []string{""})
+
// Invoke the callback with all the unmarshalled arguments
- err = w.Handler.RegisterParticipationKeys(ctx, address, params)
+ err = w.Handler.GetParticipationKeyByID(ctx, participationId)
return err
}
@@ -227,7 +266,10 @@ func RegisterHandlers(router interface {
router.DELETE("/v2/catchup/:catchpoint", wrapper.AbortCatchup, m...)
router.POST("/v2/catchup/:catchpoint", wrapper.StartCatchup, m...)
- router.POST("/v2/register-participation-keys/:address", wrapper.RegisterParticipationKeys, m...)
+ router.GET("/v2/participation", wrapper.GetParticipationKeys, m...)
+ router.POST("/v2/participation", wrapper.AddParticipationKey, m...)
+ router.DELETE("/v2/participation/:participation-id", wrapper.DeleteParticipationKeyByID, m...)
+ router.GET("/v2/participation/:participation-id", wrapper.GetParticipationKeyByID, m...)
router.POST("/v2/shutdown", wrapper.ShutdownNode, m...)
}
@@ -235,136 +277,142 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9/XPbtrLov4LROTP5eKJk56On8UznPDdOW7+maSZ2++49cW4LkSsJNQmwAGhJzfX/",
- "fgcLgARJUJI/Tnoz5/yUWAQWi93FYnexWHwcpaIoBQeu1ejo46ikkhagQeJfNE1FxXXCMvNXBiqVrNRM",
- "8NGR/0aUlowvRuMRM7+WVC9H4xGnBTRtTP/xSMLvFZOQjY60rGA8UukSCmoA601pWteQ1slCJA7EsQVx",
- "ejK63vKBZpkEpfpY/sjzDWE8zasMiJaUK5qaT4qsmF4SvWSKuM6EcSI4EDEnetlqTOYM8kxN/CR/r0Bu",
- "glm6wYendN2gmEiRQx/Pl6KYMQ4eK6iRqhlCtCAZzLHRkmpiRjC4+oZaEAVUpksyF3IHqhaJEF/gVTE6",
- "ej9SwDOQyK0U2BX+dy4B/oBEU7kAPfowjk1urkEmmhWRqZ066ktQVa4VwbY4xwW7Ak5Mrwn5oVKazIBQ",
- "Tt5985I8ffr0hZlIQbWGzAnZ4Kya0cM52e6jo1FGNfjPfVmj+UJIyrOkbv/um5c4/pmb4L6tqFIQXyzH",
- "5gs5PRmagO8YESHGNSyQDy3pNz0ii6L5eQZzIWFPntjG98qUcPw/lSsp1emyFIzrCF8IfiX2c1SHBd23",
- "6bAagVb70lBKGqDvD5IXHz4ejg8Prv/y/jj5h/vz+dPrPaf/soa7gwLRhmklJfB0kywkUFwtS8r79Hjn",
- "5EEtRZVnZEmvkPm0QFXv+hLT16rOK5pXRk5YKsVxvhCKUCdGGcxplWviByYVz42aMtCctBOmSCnFFcsg",
- "Gxvtu1qydElSqiwIbEdWLM+NDFYKsiFZi89uy2K6Dkli8LoVPXBC/3uJ0cxrByVgjdogSXOhINFix/bk",
- "dxzKMxJuKM1epW62WZHzJRAc3Hywmy3SjhuZzvMN0cjXjFBFKPFb05iwOdmIiqyQOTm7xP5uNoZqBTFE",
- "Q+a09lGzeIfI1yNGhHgzIXKgHInn112fZHzOFpUERVZL0Eu350lQpeAKiJj9Bqk2bP9/Zz++IUKSH0Ap",
- "uoC3NL0kwFORDfPYDRrbwX9TwjC8UIuSppfx7TpnBYug/ANds6IqCK+KGUjDL78/aEEk6EryIYQsxB1y",
- "VtB1f9BzWfEUmdsM2zLUjCgxVeZ0MyGnc1LQ9VcHY4eOIjTPSQk8Y3xB9JoPGmlm7N3oJVJUPNvDhtGG",
- "YcGuqUpI2ZxBRmooWzBxw+zCh/Gb4dNYVgE6HsggOvUoO9DhsI7IjFm65gsp6QICkZmQn5zmwq9aXAKv",
- "FRyZbfBTKeGKiUrVnQZwxKG3m9dcaEhKCXMWkbEzRw6jPWwbp14LZ+CkgmvKOGRG8yLSQoPVRIM4BQNu",
- "d2b6W/SMKvji2dAG3nzdk/tz0eX6Vo7vxW1slNglGdkXzVe3YONmU6v/Hs5fOLZii8T+3GMkW5ybrWTO",
- "ctxmfjP882SoFCqBFiH8xqPYglNdSTi64I/NXyQhZ5ryjMrM/FLYn36ocs3O2ML8lNufXosFS8/YYoCY",
- "Na5Rbwq7FfYfAy+ujvU66jS8FuKyKsMJpS2vdLYhpydDTLYwbyqYx7UrG3oV52vvady0h17XjBxAcpB2",
- "JTUNL2EjwWBL0zn+s56jPNG5/MP8U5Z5jKZGgN1Gi0EBFyx4534zP5klD9YnMFBYSg1Rp7h9Hn0MEPqr",
- "hPnoaPSXaRMpmdqvaurgmhGvx6PjBs79j9T0tPPrODLNZ8K45Q42HVuf8P7xMVCjmKCh2sHh61ykl7fC",
- "oZSiBKmZ5ePMwOmvFARPlkAzkCSjmk4ap8raWQPyjh2/w37oJYGMbHE/4n9oTsxnswqp9uabMV2ZMkac",
- "CAJNmbH47D5iRzIN0BIVpLBGHjHG2Y2wfNkMbhV0rVHfO7J86EKLcOeVtSsJ9vCTMFNvvMbjmZC3k5eO",
- "IHDS+MKEGqi19Wtm3uYsNq3KxNEnYk/bBh1ATfixr1ZDCnXBx2jVosKZpv8EKigD9T6o0AZ031QQRcly",
- "uIf1uqRq2Z+EMXCePiFn3x0/P3zyy5PnX5gdupRiIWlBZhsNijx0+wpRepPDo/7MUMFXuY5D/+KZ96Da",
- "cHdSCBGuYe+zos7BaAZLMWLjBQa7E7mRFb8HEoKUQkZsXhQdLVKRJ1cgFROR8MVb14K4FkYPWbu787vF",
- "lqyoImZsdMcqnoGcxChv/Czc0jUUatdGYUGfr3lDGweQSkk3PQ7Y+UZm58bdhydt4nvrXpESZKLXnGQw",
- "qxbhHkXmUhSEkgw7okJ8IzI401RX6h60QAOsQcYwIkSBzkSlCSVcZGZBm8Zx/TAQy8QgCsZ+dKhy9NLu",
- "PzMw1nFKq8VSE2NWihhrm44JTS1TEtwr1IDrV/vstpUdzsbJcgk025AZACdi5vwr5/nhJCmGZbQ/cXHa",
- "qUGr9glaeJVSpKAUZIk7XtqJmm9nuay30AkRR4TrUYgSZE7lLZHVQtN8B6LYJoZubU44p7SP9X7Db2Ng",
- "d/CQjVQaH9NKgbFdzOrOQcMQCfekyRVIdM7+qfzzg9yWfVU5cHTiduBzVpjlSzjlQkEqeKaiwHKqdLJr",
- "2ZpGLTPBzCBYKbGVioAHAgSvqdLWRWc8Q5PRqhscB/vgEMMID+4oBvLPfjPpw06NnuSqUvXOoqqyFFJD",
- "FpsDh/WWsd7Auh5LzAPY9falBakU7II8RKUAviOWnYklENUuRlTHsPqTw3C82Qc2UVK2kGgIsQ2RM98q",
- "oG4YPh5AxPgXdU8UHKY6klPHrMcjpUVZmvWnk4rX/YbIdGZbH+ufmrZ94aK60euZADO69jg5zFeWsvbg",
- "YEmNbYeQSUEvzd6ElpqNJfRxNosxUYynkGyTfLMsz0yrcAnsWKQDRrI7mgxG6yyOjvxGhW5QCHZwYWjC",
- "Axb7WxsBPw/i5vdgtUSgGkmjnKDp5uNqZnMIm8CapjrfGJWrl7AhK5BAVDUrmNb2SKNt1GhRJiGAqBO1",
- "ZUTnxtrosTdJ9/GrzxBUML2+cToe2S10O37nnU20RQ63eZdC5JPd0tcjRhSDfYzgY1IKw3XmTtD8MUvO",
- "lO4h6TZUjGHUC/mBapEZZ0D+U1QkpRyNgUpDrZ2ExCWPW4EZwSjTekxmd92GQpBDAdbGwS+PH3cn/vix",
- "4zlTZA4rf+xsGnbJ8fgxWuxvhdJ3XgEd0VyfRpQMupZGY0VShYwDOdnpZiLcvbzLAPTpiR8QF5MyGsVO",
- "XAoxv4fZsmwdO2zIYB2bqeMcGowPjHW1UaAn0Y2wNAhGzhtBXubojYp5RyJJAUZU1JKVBmRzNrLR0Mqr",
- "+K+Hfz96f5z8gyZ/HCQv/s/0w8dn148e9358cv3VV//d/unp9VeP/v7XmPGgNJvFIxffUbU0mDrNsean",
- "3MYe50Jak3PjdjIx/9R4d0TMMNNTPpjSPkL3NsYQxgm1zEaZM4ZKvrmHTcYCIhJKCQpVQmjgK/tVzMO0",
- "Cid5aqM0FH0f2Xb9ZcBCeOf3156UCp4zDkkhOGyimYSMww/4MdbbqqWBzrhBDPXt2h8t/DtotcfZh5l3",
- "pS9yO1BDb+skj3tgfhduJzwSJpSgewd5SShJc4bOn+BKyyrVF5yieRmIayS06o3mYYfjpW8S93AiDogD",
- "dcGpMjSsjc5o2GwOEXfyGwDvd6hqsQClO8bNHOCCu1aMk4ozjWMVhl+JZVgJEuObE9uyoBsypzn6R3+A",
- "FGRW6fZ2j+feShv3xcZqzDBEzC841SQH48r9wPj5GsH542UvMxz0SsjLmgpxnb8ADoqpJK5Iv7VfUZ+6",
- "6S+dbsUkRPvZ65tPvQF43GOnsg7z0xNnCp+eoL3TRGl6uH8y171gPIkK2fkSSME4Jvd0ZIs8NFabF6BH",
- "TbzHcf2C6zU3gnRFc5ZRfTtx6Kq43lq0q6MjNS1GdDwxP9cPsSO0hUhKml7iCcpowfSymk1SUUy9CzBd",
- "iNodmGYUCsHxWzalJZuqEtLp1eEOc+wO+opE1NX1eOS0jrr3s1oHODah7ph1DMT/rQV58O2rczJ1nFIP",
- "bIqGBR2crUe8NndDoBXkNpO3KcY2R+WCX/ATmDPOzPejC55RTaczqliqppUC+TXNKU9hshDkiDiQJ1TT",
- "C95T8YO3ADCB0mFTVrOcpeQy3IqbpWkzO/sQLi7eGwG5uPjQi5j2N043VHSN2gGSFdNLUenEpa4lElZU",
- "ZhHUVZ26hJBt4um2UcfEwbYS6VLjHPy4qqZlqZJcpDRPlKYa4tMvy9xMPxBDRbATnrgTpYX0StBoRosN",
- "8veNcDFjSVc+77FSoMivBS3fM64/kOSiOjh4CuS4LF8bmGcGj1+drjEyuSmh5d/vmSvRAIv59jhxa1DB",
- "WkualHQBKjp9DbRE7uNGXWBYOs8JdgtpUp83IqhmAp4ewwyweNw4GwQnd2Z7+TsI8SngJ2QhtjHaqQkW",
- "3pZfBtR3IjdCdmt2BTCiXKr0MjFrOzorZUTcc6ZOTV4YnewjuIotuFkELot7BiRdQnoJGSaUQlHqzbjV",
- "3R8SuB3Oqw6mbOK1TfrA7EAMhcyAVGVGnQ1A+aabpqVAa5+b9g4uYXMumuTCm+RlXY9HqU2FTozMDC1U",
- "lNRgMzLCGi5bB6PLfHfgZDClZUkWuZi51V2LxVEtF77P8EK2O+Q9LOKYUNRk2CLvJZURQljhHyDBLSZq",
- "4N1J9GPTK6nULGWlnf9+WWhvW30MkF2bS3Q7EfPurtFT6lElZhsnM6riGwiYL4YfZg11z+P8SDaqiDOY",
- "ELy85wR3lqMtUh8F2pVNJRpdftr2NtIQanEpAcmbXd2j0aZIaD4sqfIXEPCehl8we220Q4cW9aGTkSJ/",
- "6oT+XmM5MTNuDld0iP7DWbOnwVFScBmjzon1iq27GMZ1frS9F+lzZ33CrM+SHY1vlPE6Hrnshhg7BEcr",
- "I4McFnbitrEXFIfaAxUwyODx43yeMw4kiZ1KUaVEyuwNkkaXuzHAGKGPCbEBHrI3hJgYB2hjtBwBkzci",
- "XJt8cRMkOTAMr1MPG+Pswd+wO9rcXFB15u1OM7SvO5pFNG4SyC0b+1Go8SiqkoY8hFYrYpvMoOdSxUTU",
- "qKZ+XKYf/VGQA27HSUuzJpexaJ2xKgDF8Mx3C9wG8pDNzSb/KDg0kbBgSkPjN5vV6gNBnzZ2cSU0JHMm",
- "lU7QZY9OzzT6RqEx+I1pGlc/LVIRe8ONZXHtg8NewibJWF7Fue3G/f7EDPum9p9UNbuEDW4yQNMlmeGN",
- "TLMLtYY3bbYMbU9mt074tZ3wa3pv891PlkxTM7AUQnfG+EykqqNPti2miADGhKPPtUGSblEv6PucQK5j",
- "ibeBT4ZerVGYNjN8MGrQW0yZh73N/AqwGNa8FlJ0LoGhu3UWDE/iKM8I08GFxn6W4MAaoGXJsnXHh7dQ",
- "B47t0IC/gaFuLf7IUdSoBraDAoG/HktEkeBjDpalwZ5pr6bycG6TvShjrK+QIIFCCIdiyhdW6BPKiDbe",
- "/t1Fq3Og+few+dm0xemMrseju7n8MVo7iDto/bZmb5TOGMu2LmArgndDktOylOKK5okLjAyJphRXTjSx",
- "uY+jfGJVF3e/z18dv37r0De+Zw5U2lDZ1llhu/KzmZXxiIUcWCD+4raxVr3vbA2xgPn1bZgwmLJagrsk",
- "G9hyRos54bLLqwmUBUvRBVfm8SO1naESF9OzU9wS24OyDu01HrGN7LWjefSKsty7oh7bgeMvnFwTT72x",
- "VggB3DkqGAR3k3tVN73VHV8djXTt0EnhWFuu8Rb2proigncTi4wJiR4uimpBN0aCbHC6r5x4VSRm+SUq",
- "Z2k8bMFnyggHtzFf05hg4wFj1ECs2MARAq9YAMs0U3uclnWQDMaIEhNDSltoNxOuxFDF2e8VEJYB1+aT",
- "xFXZWahmXfoyFf3t1NgO/bEcYFuyogF/FxvDgBqyLhCJ7QZGGGHuoXtSO5x+onVo3PwQBAZvcFAVjtjb",
- "ErccMjn5cNJsT/uX7UhxWBGor/+MYNjb47vLEfmwxdIiOjBGtLzQ4G5xPLxTmN432COaLQHRDTeDsS0+",
- "kisRAVPxFeW2WojpZ2noeiuwMQPTayUkpt0riJ7SM5XMpfgD4p7s3DAqkvvoSInmIvaeRNKZu0q0jso0",
- "daA8fUM8BkV7yJILPpL2QeLACkcpD0LneI/VB7got2JtK5u0jq/jiyNMOZla+M3icDj30nRyuprR2CVf",
- "Y1AZnI6bQ5pWKE4L4jt7LrioYSN7wXlP3ZbZXPUSZJOg3L8XdUvj6PMS+QxSVtA8biVlSP32zZyMLZgt",
- "D1MpCOqPOEC2rpaVIlfDxR6DNaQ5nZODcVDhyHEjY1dMsVkO2OLQtphRhbtWHW6tu5jpAddLhc2f7NF8",
- "WfFMQqaXyhJWCVIbsOjK1bHvGegVACcH2O7wBXmIUX/FruCRoaKzRUZHhy8wLcX+cRDb7FwdqG16JUPF",
- "8v+dYonLMR57WBhmk3JQJ9F7E7Z437AK27KabNd91hK2dFpv91oqKKcLiJ/mFjtwsn2Rmxg07NCFZ7by",
- "lNJSbAjT8fFBU6OfBlLTjPqzaJBUFAXThVlAWhAlCiNPTXERO6gHZ8tYuQv/Hi//EY9YSus2QNdh/rQB",
- "YruXx2aNB2FvaAFtso4JtdeLctZc4HQKcUJO/SVFrIBQFz6wtDFjmamjSWdYiBe9GdfoRFV6nnxJ0iWV",
- "NDXqbzKEbjL74lmk6kP7oje/GeKfnO4SFMirOOnlgNh7a8L1JQ+54ElhNEr2qEkFDVZl9Lq20DSPJ7V4",
- "jd7NadoOel8D1EBJBsWtaokbDTT1nQSPbwF4R1Gs53MjebzxzD65ZFYyLh60Mhz66d1rZ2UUQsaurDfL",
- "3VkcErRkcIX5NXEmGZh35IXM9+LCXbD/c09ZGg+gNsv8Wo45Al9XLM9+blLbO4VzJOXpMnrGMTMdf2kq",
- "fdVTtus4ekN6STmHPArO7pm/+L01svv/JvYdp2B8z7bdgjh2up3JNYi30fRI+QENeZnOzQAhVdu5vnVy",
- "WL4QGcFxmuu4jZT1a/wExUF+r0DpWNVR/GDzKjGWZfwCW5uCAM/Qqp6Qb22l3iWQ1g1NtGZZUeX2th9k",
- "C5AuyFqVuaDZmBg456+OXxM7qu1jKyra2hgLNObas+jEMIK7+/ulOvlSWfE0zP3hbM8LM7NWGi/vKk2L",
- "MpZhb1qc+waYxh/GddHMC6kzISfWwlbefrODGHmYM1kYy7SGZnU8yoT5j9Y0XaLp2tImwyK/f1EXL5Uq",
- "KG5Y14mrr9/jujN4u7outqzLmAjjX6yYsgVa4QraSf31DRfnOvkk//b0ZMW5lZSojt52A+s2ZPfI2cN7",
- "H/qNYtYh/A0NFyUqmcJNa9ycYa/oHeJuwZxeVUN7m7CuKuYLb6eUC85SvMEblIStUXbFXvc5F9njsnM3",
- "LOWXuFuhkcUVLdNTpwc5Kg4W7vGK0BGuH5gNvhqmWumwf2qsKrqkmixAK6fZIBv7UkwuXsK4AldOAev+",
- "BnpSyNZZE2rI6PFlUoe5byhGmOI7YAB/Y769ce4RpuVdMo6GkCObywC0EQ2sRamN9cQ0WQhQbj7tK7nq",
- "vekzwWupGaw/THztSoRhj2rMtO25ZB/UsT+ldKeCpu1L05bgsUzzcyud2A56XJZu0OiN2prDsWJSgwSO",
- "nDYlPtwfELeGH0LbIm5b0wtwPzWCBld4OAkl7sM9wajrcnUK7F3RvLIShS2ITeuJXgNjPILGa8ahqawa",
- "2SDS6JaAjMH1OtBPpZJqawLupdPOgeZ4IhlTaEq7EO1dQXUYjCTBOfoxhtnYlBQbUBx1g8Zwo3xTF3Q1",
- "0h0YEy+xkrQjZL9AGFpVzojKMHGzUzIspjiM4vbF9tobQH8Z9G0i211LalfOTXaioQsvqYjZm6/WkFb2",
- "wF3Y2hC0LEmKN0iD/SIa0WTKOE/FLI/kvp3UH4M6fJhkO9vgv7GKHcMkcSfiN87J8sff2PHGBmsbUs/c",
- "NMKUKLa4JZub/vfK51ws2oh82oDC1jUeikxsdb8yajO8A9mrBWMVa31FEdOQhC/Sik5TfbmmvSZRkUed",
- "0qbe5nanfLhy5hhV/0Ay4rvm9j21u4s9YxhKSUwHM2ipdunxmpLmqnt/YdpylzEINp/Bltm0T1ZE4ytD",
- "OQw2hcF87vXezy7qWZkIeytBfXJMH6HvfeYdKSlzB2jNiu1T1uXo9rOm98neaxjcnYTLfEUgsZn0KykN",
- "C/gJaMpyVdeDrF83CM5bjT3XrceycjdTMHW4dk39HRVQ/jefZW9Hsa9mNFXPMBCwojLzLaI7m980k4EM",
- "kG5OpU1dZXGk5/XIrDk+7acVRq5N4nF5mgvF+CIZyqpon1jW4b4HysZl0YfAElWI1xykq3ao/aMkiRb+",
- "uHUbHttI4Wpi34YIarCqjkVu8G7Tu+byFtaKoPZJGhdzDidIJBTUYCeDK1bDY24j9kv73efR+VoBncoc",
- "EbheXpOdd6T8wTlTPSKGUj8nTuXuzs+7jUnBOLfFZFXsvhU3pAydzVKKrEptrD9cGOBNr72vDG5RJVFD",
- "IO3PsqfTc7xA+zrIdr6EzdTq1XRJeXOTub2sbU1ZO4fgbk6H2/dqbcX3tHxhJ7C4Fzz/TGNpPCqFyJMB",
- "7/K0f22suwYuWXoJGTF7hz9yGqjlRh6iU1OHD1fLja+iWpbAIXs0IcSYW0WpNz6S2K5K0hmcP9Dbxl/j",
- "qFllb3I6O25yweOnpfaRpzvqNw9mu1azrx7ecSgLZPtAes0HVBtdRSob7vtAQCS21zFQAqGyWMSslFte",
- "p9lrffdtuYjoh4nQO4zoy5bhZ+/dd+J5QsI9G4BBIOOGBmA/xXvf6eE8UKtVCvrz3JsBLdoO0H4fwjfe",
- "S5+4w06Hnu3jdMSvL5vu6PVYgvgL9v3V9cl8ltZbAm7cGNd/HjrDsecUA8eFHZpWLM92Mbd1+NsUsMLj",
- "zV/cMfmfUkLrF5t+3F9urprQTaIlXSYgYSJzbQ0eDBUc6+5xouu6Rc5vccNIK8n0Bm8qeK+I/RK9Afot",
- "cPeignugps73dOmG9m00l32wqFs3z1l9K+wTE4XZrzF+prEU66s1Lcoc3Lr46sHsb/D0y2fZwdPDv82+",
- "PHh+kMKz5y8ODuiLZ/TwxdNDePLl82cHcDj/4sXsSfbk2ZPZsyfPvnj+In367HD27IsXf3vg35KyiDbv",
- "NP0H1plLjt+eJucG2YYmtGTfw8ZWljJi7GtW0RRXovEr8tGR/+n/+hU2SUURPH/rfh25VJTRUutSHU2n",
- "q9VqEnaZLtDPSrSo0uXUj9OvfPv2tD4mt+nNyFF7AmpEAZnqROEYv717dXZOjt+eThqBGR2NDiYHk0Ms",
- "DVkCpyUbHY2e4k+4epbI96kTttHRx+vxaLoEmuul+6MALVnqP6kVXSxATlzxLvPT1ZOpP2WbfnQ+5vW2",
- "b+2cahcaCDoEVV6mH1uOehbCxRoo048+3zz4ZOv/Tz+irzX4exuNj3rNsuupr/Lqerg62tOPTWH7a7s6",
- "coidv9h0BhrUwR8bXxjf+1H2V7MgfBYlU+13EGrunmaGq6bXy7rIf/is+ft/0UeAP3TeRHtycPAv9rrT",
- "sxvOeKs924pSRyrrfU0z4jN8cOzDTzf2Kcc79UahEauwr8ej559y9qfciDzNCbYMct/7rP+JX3Kx4r6l",
- "2V2roqBy45exaikF/3QH6nC6UOjdSHZFNYw+oPscO+IaUC74jNaNlQu+DfZv5fKplMvn8Wjakxsu8M9/",
- "xv9Wp5+bOj2z6m5/depMOZtEOrXFwhsLz9en6RdtaVuzQzrZuTrkIZ7mclg9cudPFmykAFCd9CcyGxPx",
- "xWT9hYngnKats985oK1aU9/DRu1S4OdLIL868AnLfsXLbpgCMiZCkl9pnge/YVFQb7ZP4vq+KQqz81Xk",
- "ZoHG0JoD+Kt3mFnv3lgxG9kl+PJBlgat04h+ZmVTenwOgy/j2wrNoQZzInh4cHAQS8nu4uziNxZjjNOv",
- "RJLDFeR9Vg8h0akitO0d6cGXtvrFn0K/OyJ1+M7TDJp6UIPParcrGt0EuxPBH2iyosydmgWRdfv0WsG0",
- "f3Hepmq7i0H1HhF/pTwxIGO4NLeR77p5f35vplxvUXZqWelMrPiw4sJaCjR3lxHxemAdbtCCeAC1ppoQ",
- "/4RwvvFv4BOKSeOi0k08yHT25yudJ7Xq0rULxnEAXOU4ir11S4MzbvfUVV8JnjnM3tiXwTp6L/pCt8Ux",
- "vu5ji/6ustQ3NLbyyheSbP09NSJvzFX78mGCFOqHNDTQfOrShTu/2qS+4Mf280+RX6d1IYvox26gJvbV",
- "xVF8oyZCGkYckVN1rPH9B0NwvBvomNgE0I6mUzz9XQqlpyOjcNrBtfDjh5rGHz3nPa2vP1z/TwAAAP//",
- "+Zih2CWOAAA=",
+ "H4sIAAAAAAAC/+x9/3PbNrL4v4LR3UyafETJSZxe45nOfdwkbf2appnY7b13cV4LkSsJNQmwAGhJzfP/",
+ "/gYLgARJUJK/nPsy158Si8Bisdhd7C4Wi4+jVBSl4MC1Gh19HJVU0gI0SPyLpqmouE5YZv7KQKWSlZoJ",
+ "Pjry34jSkvHFaDxi5teS6uVoPOK0gKaN6T8eSfitYhKy0ZGWFYxHKl1CQQ1gvSlN6xrSOlmIxIE4tiBO",
+ "Xo6utnygWSZBqT6WP/B8QxhP8yoDoiXliqbmkyIrppdEL5kirjNhnAgORMyJXrYakzmDPFMTP8nfKpCb",
+ "YJZu8OEpXTUoJlLk0MfzhShmjIPHCmqk6gUhWpAM5thoSTUxIxhcfUMtiAIq0yWZC7kDVYtEiC/wqhgd",
+ "vR8p4BlIXK0U2CX+dy4BfodEU7kAPfowjk1urkEmmhWRqZ046ktQVa4VwbY4xwW7BE5Mrwn5vlKazIBQ",
+ "Tt59/YI8ffr0uZlIQbWGzDHZ4Kya0cM52e6jo1FGNfjPfV6j+UJIyrOkbv/u6xc4/qmb4L6tqFIQF5Zj",
+ "84WcvByagO8YYSHGNSxwHVrcb3pEhKL5eQZzIWHPNbGN73RRwvH/0FVJqU6XpWBcR9aF4FdiP0d1WNB9",
+ "mw6rEWi1Lw2lpAH6/iB5/uHj4/Hjg6u/vD9O/un+fPb0as/pv6jh7qBAtGFaSQk83SQLCRSlZUl5nx7v",
+ "HD+opajyjCzpJS4+LVDVu77E9LWq85LmleETlkpxnC+EItSxUQZzWuWa+IFJxXOjpgw0x+2EKVJKccky",
+ "yMZG+66WLF2SlCoLAtuRFctzw4OVgmyI1+Kz2yJMVyFJDF43ogdO6P8uMZp57aAErFEbJGkuFCRa7Nie",
+ "/I5DeUbCDaXZq9T1NitytgSCg5sPdrNF2nHD03m+IRrXNSNUEUr81jQmbE42oiIrXJycXWB/NxtDtYIY",
+ "ouHitPZRI7xD5OsRI0K8mRA5UI7E83LXJxmfs0UlQZHVEvTS7XkSVCm4AiJmv0KqzbL/x+kPb4iQ5HtQ",
+ "ii7gLU0vCPBUZMNr7AaN7eC/KmEWvFCLkqYX8e06ZwWLoPw9XbOiKgivihlIs15+f9CCSNCV5EMIWYg7",
+ "+Kyg6/6gZ7LiKS5uM2zLUDOsxFSZ082EnMxJQddfHowdOorQPCcl8IzxBdFrPmikmbF3o5dIUfFsDxtG",
+ "mwULdk1VQsrmDDJSQ9mCiRtmFz6MXw+fxrIK0PFABtGpR9mBDod1hGeM6JovpKQLCFhmQn50mgu/anEB",
+ "vFZwZLbBT6WESyYqVXcawBGH3m5ec6EhKSXMWYTHTh05jPawbZx6LZyBkwquKeOQGc2LSAsNVhMN4hQM",
+ "uN2Z6W/RM6rg88OhDbz5uufqz0V31beu+F6rjY0SK5KRfdF8dQIbN5ta/fdw/sKxFVsk9ufeQrLFmdlK",
+ "5izHbeZXs36eDJVCJdAihN94FFtwqisJR+f8kfmLJORUU55RmZlfCvvT91Wu2SlbmJ9y+9NrsWDpKVsM",
+ "ELPGNepNYbfC/mPgxdWxXkedhtdCXFRlOKG05ZXONuTk5dAiW5jXZczj2pUNvYqztfc0rttDr+uFHEBy",
+ "kHYlNQ0vYCPBYEvTOf6zniM/0bn83fxTlnmMpoaB3UaLQQEXLHjnfjM/GZEH6xMYKCylhqhT3D6PPgYI",
+ "/VXCfHQ0+su0iZRM7Vc1dXDNiFfj0XED5+5Hanra+XUcmeYzYdyuDjYdW5/w7vExUKOYoKHaweGrXKQX",
+ "N8KhlKIEqZldx5mB05cUBE+WQDOQJKOaThqnytpZA/yOHb/FfuglgYxscT/gf2hOzGcjhVR7882YrkwZ",
+ "I04EgabMWHx2H7EjmQZoiQpSWCOPGOPsWli+aAa3CrrWqO8dWT50oUVW55W1Kwn28JMwU2+8xuOZkDfj",
+ "lw4jcNL4woQaqLX1a2beXllsWpWJo0/EnrYNOoCa8GNfrYYU6oKP0apFhVNN/wVUUAbqXVChDeiuqSCK",
+ "kuVwB/K6pGrZn4QxcJ4+IaffHj97/OTnJ88+Nzt0KcVC0oLMNhoU+cztK0TpTQ4P+zNDBV/lOg7980Pv",
+ "QbXh7qQQIlzD3keizsBoBksxYuMFBruXkIOGt1RqlrISqXWShRRtQ2k1JBewIQuhSYZAMrvTI1S5kRW/",
+ "g4UBKYWMWNLIkFqkIk8uQSomIkGRt64FcS2MdrPWfOd3iy1ZUUXM2OjkVTwDOYmtp/He0FDQUKhd248F",
+ "fbbmDcUdQCol3fTW1c43Mjs37j4r3Sa+9xkUKUEmes1JBrNqEe58ZC5FQSjJsCOq2Tcig1NNdaXuQLc0",
+ "wBpkzEKEKNCZqDShhIvMqAnTOK51BiKkGJrBiJIOFZle2l1tBsbmTmm1WGpijFURW9qmY0JTuygJ7kBq",
+ "wKGsIwG2lR3ORt9yCTTbkBkAJ2LmvDbnT+IkKQZ7tD/HcTqvQav2NFp4lVKkoBRkiTu02omab2dXWW+h",
+ "EyKOCNejECXInMobIquFpvkORLFNDN3aSHGubh/r/YbftoDdwcNlpNJ4rpYLjEVkpNuouSES7kmTS5Do",
+ "8v1L188PctPlq8qBAxm3r5+xwogv4ZQLBangmYoCy6nSyS6xNY1axoeZQSApMUlFwANhh9dUaev4M56h",
+ "IWrVDY6DfXCIYYQHdxQD+Se/mfRhp0ZPclWpemdRVVkKqSGLzYHDestYb2BdjyXmAex6+9KCVAp2QR6i",
+ "UgDfEcvOxBKIahd5qiNj/clhkN/sA5soKVtINITYhsipbxVQNwxKDyBivJa6JzIOUx3OqSPh45HSoiyN",
+ "/Omk4nW/ITKd2tbH+sembZ+5qG70eibAjK49Tg7zlaWsPY5YUmMxImRS0AuzN6H9ZyMUfZyNMCaK8RSS",
+ "bZxvxPLUtApFYIeQDpje7sAzGK0jHB3+jTLdIBPsWIWhCQ/4AS2j9DvY3IHdWXZA9mn9EjRlOWQk+BnV",
+ "NSm7NvJOa7432j5m3jHJ9kSBZaMIlW5m2e1l9XaHipi9kenkTOEO1aOfQvTt4clZcORyB6ZpBKpRJ5QT",
+ "RNSHZI0FEDaBNU11vjH7ql7ChqxAAlHVrGBa29OwNjtpUSYhgKj/vWVEFwGxBw9+BfYJyZwiqGB6/aUY",
+ "j6ydtB2/s46l1CKHs9BKIfLJbhXTI0YUg/1EoBRm1Zk7fPUndJ6TWkg6qwnDX7W2fqBaZMYZkP8SFUkp",
+ "R4uv0lBvQUKiXsf93oxgdsx6TGZNq4ZCkEMB1pDFL48edSf+6JFbc6bIHFY+Y8E07JLj0SN0y94KpVvC",
+ "dUeq7iSymWBgwuxMzmjs6pTJXmrtJNtrJdtxhZOXflCUKaUc45rp31oBdCRzvc/cQx5ZUrXcPXeEu1dc",
+ "JgAdm7dddynE/A5my7J17Jgug3Vspo5x0Sl6YDyIjQI9iRp7pUEwclIP8iLHiIuYdwSSFGAkRS1ZaUA2",
+ "p4obDa2MpP/+7O9H74+Tf9Lk94Pk+f+bfvh4ePXwUe/HJ1dffvk/7Z+eXn358O9/jRnISrNZPOb3LVVL",
+ "g6lTnGt+wm3Ufi6kdas2zloT8/vGu8NiZjE95YMp7SVusQVhnFC72MhzxhjPN3ewx1pAREIpQaFGDJ1Y",
+ "Zb+KeZiQ5DhPbZSGoh8Hsl1/HrCC33kbsselgueMQ1IIHrPofsCv3+PHWG+rlQc64/441LdrY7fw76DV",
+ "HmefxbwtfXG1AzX0tk6PuoPF78LthADDVCwMYUBeEkrSnGGAQ3ClZZXqc07RhQrYNXIo4R3DYaf6hW8S",
+ "9+IjTrYDdc6pMjSsHatoaHgOkZDJ1wDet1bVYgFKd2y7OcA5d60YJxVnGscqzHoldsFKkHgyMLEtC7oh",
+ "c5pjDOB3kILMKt22djBjRGnjott4pBmGiPk5p5rkQJUm3zN+tkZwPjHD8wwHvRLyoqZCXOcvgINiKokr",
+ "0m/sV9SnbvpLp1sxfdd+9vrmvjcAj3ssn8FhfvLSeQInL9HcayKRPdzvLTxVMJ5EmexsCaRgHNPiOrxF",
+ "PjNGq2egh01M0636OddrbhjpkuYso/pm7NBVcT1ZtNLR4ZrWQnSiDX6uH2KHzwuRlDS9wLPH0YLpZTWb",
+ "pKKYeg9ouhC1NzTNKBSC47dsSks2VSWk08vHO8yxW+grElFXV+OR0zrqzrMcHODYhLpj1nE+/7cW5ME3",
+ "r87I1K2UemCTmyzoICsl4rS6uzWtgxwzeZucb7O7zvk5fwlzxpn5fnTOM6rpdEYVS9W0UiC/ojnlKUwW",
+ "ghwRB/Il1fSc91T84P0ZTD122JTVLGdpPPAyHtmc6D6E8/P3hkHOzz/0TgX6G6cbKiqjdoBkxfRSVDpx",
+ "SZ+JhBWVWQR1VSf9IWSbsr1t1DFxsC1HuqRSBz+uqmlZqiQXKc0TpamG+PTLMjfTD9hQEeyEuSpEaSG9",
+ "EjSa0WKD6/tGuHMRSVc+Y7hSoMgvBS3fM64/kOS8Ojh4CuS4LF8bmKcGj1+crjE8uSmhFd7YM8uoARYL",
+ "beDErUEFay1pUtIFqOj0NdASVx836gIDaXlOsFtIk/qkHkE1E/D0GF4Ai8e186hwcqe2l7+9E58CfsIl",
+ "xDZGOzUB8ZuulwH1rcgNk914uQIY0VWq9DIxsh2dlTIs7lemTupfGJ3sTykUW3AjBO7+wwxIuoT0AjJM",
+ "xYai1Jtxq7s/CHM7nFcdTNkrCzZdCvNqMRI0A1KVGXU2AOWbboKjAq19Vuc7uIDNmWjScq+T0Xg1HqX2",
+ "EkFieGZIUJFTg83IMGsotg5Gd/HdoarBlJYlWeRi5qS7Zoujmi98n2FBtjvkHQhxjClqMmzh95LKCCEs",
+ "8w+Q4AYTNfBuxfqx6bXCaXvmb7aiZAhk1+YS3U7EvLtr9JR6VInZxsmMqvgGAuaLWQ8jQ90zZz+SDari",
+ "DCYEr706xp3laIvUx91WsqlsRR7tPb4h1OJcApI3u7pHo02R0HxYUuWv7uANJy8we220Qwdz9cGq4SJ/",
+ "sor+XmM5MTNuDpd0iP7D+eYnwXFpcI2pzib3iq0rDOP6ZoG9Ueyzzn2quc8vH42vlSs+HrkMnthyCI5W",
+ "RgY5LOzEbWPPKA61BypYIIPHD/N5zjiQJHbySpUSKbN3rxpd7sYAY4Q+IsQGeMjeEGJsHKCNhwUImLwR",
+ "oWzyxXWQ5MDwdIF62HjMEPwNu6PNzdVuZ97uNEP7uqMRonFz9cIuYz8KNR5FVdKQh9AO79smM+i5VDEW",
+ "NaqpH5fpR38U5IDbcdLSrMlFLFpnrApANjz13QK3gXzG5maTfxicGUlYMKWh8ZuNtPpA0P3GLi6FhmTO",
+ "pNIJuuzR6ZlGXys0Br82TePqp3Omo2wMIK59cNgL2CQZy6v4artxv3tphn1T+0+qml3ABjcZoOmSzPAu",
+ "c/Skd8vQNvtg64Rf2wm/pnc23/14yTQ1A0shdGeMT4SrOvpkmzBFGDDGHP1VGyTpFvWCvs9LyHUsZT3w",
+ "ydCrNQrT3qkYjBr0hCnzsLeZXwEWw5rXQorOJTB0t86C4Ukc5RlhOrgK3M+EHZABWpYsW3d8eAt14NgO",
+ "DfhrGOrW4o8cRY1qYDsoEPjrsWQrCT7mYJc02DPtpW4ezm2yF2WM9RUSJFAI4VBM+ZIkfUIZ1sZ787to",
+ "dQY0/w42P5m2OJ3R1Xh0O5c/RmsHcQet39bLG6UzxrKtC9iK4F2T5LQspbikeeICI0OsKcWlY01s7uMo",
+ "96zq4u732avj128d+sb3zIFKGyrbOitsV34yszIesZADAuJLHhhr1fvO1hALFr++RxYGU1ZLcNfLA1vO",
+ "aDHHXFa8mkBZIIouuDKPH6ntDJW4mJ6d4pbYHpR1aK/xiG1krx3No5eU5d4V9dgOHH/h5Jp46rW1Qgjg",
+ "1lHBILib3Km66Ul3XDoa7tqhk8KxtlyAL2yNB0UE7+ZVGRMSPVxk1YJuDAfZ4HRfOfGqSIz4JSpnaTxs",
+ "wWfKMAe3MV/TmGDjAWPUQKzYwBECr1gAyzRTe5yWdZAMxogSE0NKW2g3E644V8XZbxUQlgHX5pNEqewI",
+ "qpFLX+Clv50a26E/lgNsi7004G9jYxhQQ9YFIrHdwAgjzJFcXe9w+onWoXHzQxAYvMZBVThib0vccsjk",
+ "+MNxsz3tX7YjxWEtrb7+M4xh6y7sLuTlwxZLi+jAGNHCXIO7xfHwTmF6X2OPaLYERDfcDMa2bE+uRARM",
+ "xVeU2zo7pp+loeutwMYMTK+VkHi1REH0lJ6pZC7F7xD3ZOdmoSKpn46UaC5i70kkZb+rROuoTFNBzdM3",
+ "xGOQtYcsueAjaR8kDkg4cnkQOscb4D7ARblla1sTqHV8HReOMOVkauE3wuFw7qXp5HQ1o7Hr8cagMjgd",
+ "N4c0rVCcFsR39qvgooYN7wXnPXVbZu9jlCCb/Oz+3b8bGkefFstnkLKC5nErKUPqt2+fZWzBbGGlSkFQ",
+ "uccBshXpLBe56kf2GKwhzcmcHIyD2mBuNTJ2yRSb5YAtHtsWM6pw16rDrXUXMz3geqmw+ZM9mi8rnknI",
+ "9FJZwipBagMWXbk69j0DvQLg5ADbPX5OPsOov2KX8NBQ0dkio6PHzzEtxf5xENvsXAW1bXolQ8XyD6dY",
+ "4nyMxx4WhtmkHNRJ9G6QLXs5rMK2SJPtuo8sYUun9XbLUkE5XUD8NLfYgZPti6uJQcMOXXhma7YpLcWG",
+ "MB0fHzQ1+mkgNc2oP4sGSUVRMF0YAdKCKFEYfmrK8thBPThbAM6VyvB4+Y94xFJatwG6DvP9BojtXh6b",
+ "NR6EvaEFtMk6JtReoctZc0nZKcQJOfEXcbF2SF0yxNLGjGWmjiadWUIskcC4Rieq0vPkC5IuqaSpUX+T",
+ "IXST2eeHkXop7RIJ/HqI3zvdJSiQl3HSywG299aE60s+44InhdEo2cMmFTSQymhJAqFpHk9q8Rq9m9O0",
+ "HfS+BqiBkgyyW9ViNxpo6lsxHt8C8JasWM/nWvx47ZndO2dWMs4etDIr9OO7187KKISMlWVoxN1ZHBK0",
+ "ZHCJ+TXxRTIwb7kWMt9rFW6D/R97ytJ4ALVZ5mU55gh8VbE8+6lJbe+UnJKUp8voGcfMdPy5qZFXT9nK",
+ "cbQKwJJyDnkUnN0zf/Z7a2T3/1XsO07B+J5tu6Wk7HQ7k2sQb6PpkfIDGvIynZsBQqq2c33r5LB8ITKC",
+ "4zRXzhsu61fHCgrg/FaB0rF6vfjB5lViLMv4Bbb+CgGeoVU9Id/YGtdLIK0LqmjNsqLK7WVHyBYgXZC1",
+ "KnNBszExcM5eHb8mdlTbx9YitfVfFmjMtWfRiWEE9Sn2S3XyRebiaZj7w9meF2ZmrTReUFeaFmUsw960",
+ "OPMNMI0/jOuimRdSZ0JeWgtbefvNDmL4Yc5kYSzTGprV8cgT5j9a03SJpmtLmwyz/P6FizxXqqAsaF1h",
+ "sS4xgXJn8Ha1i2zpojERxr9YMWVLG8MltJP66xsuznXySf7t6cmKc8spUR297QbWTcjukbOH9z70G8Ws",
+ "Q/hrGi5KVDKF69ZxOsVe0SvU3aJQvXqg9jZhXY/Pl6xPKRecpXiBOSimXKPsyiTvcy6yx13vbljKi7iT",
+ "0IhwRUtR1elBjoqDxam8InSE6wdmg69mUS132D811uNdUk0WoJXTbJCNfREzFy9hXIErGYIVswM9KWTr",
+ "rAk1ZPT4MqnD3NdkI0zxHTCAvzbf3jj3CNPyLhhHQ8iRzWUA2ogGVnHVxnpimiwEKDef9pVc9d70meC1",
+ "1AzWHya+6ivCsEc1Ztr2XLIP6tifUrpTQdP2hWlL8Fim+bmVTmwHPS5LN2j0Rm29wrGCaYMEjpw2JT7c",
+ "HxC3hh9C28JuW9MLcD81jAaXeDgJJe7DPcaoa891SlNe0ryyHIUtiE3riV4DYzyCxmvGoalJHNkg0uiW",
+ "gAuD8jrQT6WSamsC7qXTzoDmeCIZU2hKuxDtbUF1FhhJgnP0YwwvY1M2b0Bx1A0aw43yTV0K2XB3YEy8",
+ "wBrsjpD9InhoVTkjKsPEzU5ZvJjiMIrbl6lsbwB9MejbRLa7ltRKznV2oqELL6mI2Zuv1pBW9sBd2NIY",
+ "tCxJijdIg/0iGtFkyjhPxSyPFcGpPwYVLDHJdrbBf2MFS4ZJ4k7Er52T5Y+/seO1DdY2pJ65aZgpUWxx",
+ "w2Vu+t/pOudi0UbkfgMKW2U8ZJmYdL8yanO4xuixV6z1FUVMQxK+vDE6TfXlmrZMoiKPOqVNpdrtTvlw",
+ "zdkxqv6BZMR3ze17ancXe8YwlJKYDmbQUu3S4zUlzVX3vmDaQrExCDafwRaotY+9ROMrQzkMNoXBfO71",
+ "3s8u6lmZCHsrQX1yTB+h73zmHSkpcwdojcT2KetydPtZ0/tk7zUL3J2Ey3xFILGZvN1ZMKzFIb3M5yD3",
+ "3VYmmux/+bU5kMczEyxkuwDuKtm2cxr3zqyazyHV7HJHpvk/jMXaZDGPvU1ri4oHieesztTxbwJd09Ru",
+ "ENqWCL4Vn+CG/a3RGcozvYDNA0Xa9ZRfRuXPMepNrn0hBbD6QGJYRKhY9N864S4gy1TNGUgFf9pmu0NT",
+ "+GWwKGad7hWr87PXWJ4lCXV2Vl1EZ6gOp4hZ8XuNZbrukXjVZG9jSsZQMnq/Stzw7mXrAqq6oHH96E+Q",
+ "TGGctW6xpZW7dob3Auq4k7+ABsr/5q/Q2FHsY1JN2U6M8q2ozHyLqNnqLeJkIL2rmzBt89JZHOl5PTJr",
+ "ciP6OcORO9GYC5PmQjG+SIZSptrpCHUs/4Gyhy4YIMDye4jXHKQr16v9W12JFj6XYhse20jhnoq4CRHU",
+ "YMksi9zgxcV3zc1MLARD7Utt7kApnCCRUFCDnQzuTw6PuY3YL+x3nyTrC4F0yu5E4Hp+TXZegPRZMUz1",
+ "iBhy/Zy43XJ38u1N/AXGua2GrmKXKbkhZRhJKqXIqtRu0KFggPer9r4PvEWVRK38tD/LnsGW4+3418FV",
+ "hgvYTK3RlC4pb8oUtMXaFkW3cwgu3nVW+05dqbjBmi/sBBZ3gucf6QmNR6UQeTIQOjrp3wntysAFSy8g",
+ "I2bv8OfJA3UqyWcYsajPBlbLjS8DXpbAIXs4IcT4UkWpN/6YoF1yqDM4f6C3jb/GUbPKXtN2TtrknMdT",
+ "Iezbh7fUbx7Mdq1mHwO+5VAWyPaB9JoPqDa6ilRt3ffdnEjgvltJs2Eqi0XMSrnhXbm95LvvqEVYP7zl",
+ "sMP/uWh5dbaoRidYLyTcsXcXRCmv6d3172/sOz2cB2q1SkF/nnsvQIu2A7Tfh/BNaKJP3OGIgp7tE1GI",
+ "1yYw3TGkYQmC1TMIokp+efwLkTB3D7E+eoQDPHo0dk1/edL+bLyvR4+iknlvwYzW8zxu3BjH/DR0uGsP",
+ "MAfyCDrrUbE828UYrayQprId5j387PJn/pDaej9bF7kvqq7M2HXCqN1FQMJE5toaPBgqyPfYI9XDdYsk",
+ "duBmk1aS6Q1eYfIeFfs5ejX8mzoI4958qxPBXR6yfW7UpSU1IZvmhchvhH21qTB7PQbWNZaofrWmRZmD",
+ "E5QvH8z+Bk+/OMwOnj7+2+yLg2cHKRw+e35wQJ8f0sfPnz6GJ188OzyAx/PPn8+eZE8On8wOnxx+/ux5",
+ "+vTw8ezw8+d/e+CfZ7SINk8f/icWoEyO354kZwbZhia0ZFja/QrN6bnwxexoipJofJJ8dOR/+v9ewiap",
+ "KIIX5d2vI5ejNlpqXaqj6XS1Wk3CLtMF+miJFlW6nPpx+hXB357U+TP23gOuqE2NMKyAi+pY4Ri/vXt1",
+ "ekaO355MGoYZHY0OJgeTx1gztgROSzY6Gj3Fn1B6lrjuU8dso6OPV+PRdAk010v3RwFastR/Uiu6WICc",
+ "uKp+5qfLJ1N//D796PzTq23f2pctXFgh6BCUf5p+bDn5WQgXiyNNP/qLKMEn+/jN9CP6aYO/t9H4qNcs",
+ "u5r6sJDr4R6RmH5sXnW5stKRQyykY/OcaPAIzNj40fiEnrK/GoHw6dVMtR8Bqlf3JDOranq9qF+4CW7R",
+ "H73/N31X/0PnmdEnBwf/Zg8mHl5zxltt4dbxVaTk5lc0Iz71D8d+fH9jn3CMjBuFRqzCvhqPnt3n7E+4",
+ "YXmaE2wZXIrpL/2P/IKLFfctze5aFQWVGy/GqqUU/LtVqMPpQqFnJNkl1TD6gK537Ox7QLngy5TXVi74",
+ "3OafyuW+lMun8Q7pk2sK+Kc/4z/V6aemTk+tuttfnTpTzmaXT+0rAo2F16teuYBomjsmnNNtTzt1New3",
+ "oHsvVY1uqWL+sEer/r3l5PDg8P4wCCOcb4QmX+NB1CcqrfsJzjYbqOMTZVmPva3iB6W/EtlmC4UKtShd",
+ "LmjEIpkxblDu7yv9yvq9N6QuYEPs4awPwrtHG9uW0NUtpf+Tfe7qz132D5TbZwdP72/4U5CXLAVyBkUp",
+ "JJUs35AfeX175uZOVJZFk83a4tbTI8b2T0UGC+CJUxLJTGQbXyWmBfACbIC2ZxZMP7ZLPdpg02AQyL5E",
+ "Xz8e0Ud6tiEY1W2rtsgD9t/B5qvNycu+fxbxwLoobvXDuvI/4Prc6Mn8P4X9U9uk92bY2D4dtZd9qKK7",
+ "94z91c3Y5Waqoy/O7rSq/1AR+fNZXv8s758uwp8uwk20zzcQkXtUEFv0jtum1bLSmVjZKgLRoCkWE6S5",
+ "q8aD9XHqYzUtiAfQZKuTH9z1jHxDSikuWWY0o2YFGC1VKxnT2ecgdd5Nr99uWTCOA2AhfRzFlp2iQR6o",
+ "e8980g/QOszeWNMmpt1+qwA9FqfeHI6jcStC51YkUuTp1iqtH1C72rZW/iWF1t/TFWU6mQvp0sCRQv2j",
+ "Ow00n7r7sp1f7a224Mf288+RX6d1Jcfox+6BZOyrOy/0jZpMgPBkHVeqPlN//8EQHIvjuEVsDoqPplPM",
+ "kFwKpaejq/HHziFy+PFDTeOP9cbmaH314ep/AwAA//+cgLi/YKAAAA==",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/private/types.go b/daemon/algod/api/server/v2/generated/private/types.go
index e4c80509b..e8ba9221e 100644
--- a/daemon/algod/api/server/v2/generated/private/types.go
+++ b/daemon/algod/api/server/v2/generated/private/types.go
@@ -340,6 +340,34 @@ type EvalDeltaKeyValue struct {
Value EvalDelta `json:"value"`
}
+// ParticipationKey defines model for ParticipationKey.
+type ParticipationKey struct {
+
+ // Address the key was generated for.
+ Address string `json:"address"`
+
+ // When registered, this is the first round it may be used.
+ EffectiveFirstValid *uint64 `json:"effective-first-valid,omitempty"`
+
+ // When registered, this is the last round it may be used.
+ EffectiveLastValid *uint64 `json:"effective-last-valid,omitempty"`
+
+ // The key's ParticipationID.
+ Id string `json:"id"`
+
+ // AccountParticipation describes the parameters used by this account in consensus protocol.
+ Key AccountParticipation `json:"key"`
+
+ // Round when this key was last used to propose a block.
+ LastBlockProposal *uint64 `json:"last-block-proposal,omitempty"`
+
+ // Round when this key was last used to generate a state proof.
+ LastStateProof *uint64 `json:"last-state-proof,omitempty"`
+
+ // Round when this key was last used to vote.
+ LastVote *uint64 `json:"last-vote,omitempty"`
+}
+
// PendingTransactionResponse defines model for PendingTransactionResponse.
type PendingTransactionResponse struct {
@@ -406,7 +434,7 @@ type TealValue struct {
// \[tb\] bytes value.
Bytes string `json:"bytes"`
- // \[tt\] value type.
+ // \[tt\] value type. Value `1` refers to **bytes**, value `2` refers to **uint**
Type uint64 `json:"type"`
// \[ui\] uint value.
@@ -588,6 +616,16 @@ type NodeStatusResponse struct {
TimeSinceLastRound uint64 `json:"time-since-last-round"`
}
+// ParticipationKeyResponse defines model for ParticipationKeyResponse.
+type ParticipationKeyResponse struct {
+
+ // Detailed description of a participation key
+ ParticipationKey string `json:"participationKey"`
+}
+
+// ParticipationKeysResponse defines model for ParticipationKeysResponse.
+type ParticipationKeysResponse []ParticipationKey
+
// PendingTransactionsResponse defines model for PendingTransactionsResponse.
type PendingTransactionsResponse struct {
@@ -598,6 +636,13 @@ type PendingTransactionsResponse struct {
TotalTransactions uint64 `json:"total-transactions"`
}
+// PostParticipationResponse defines model for PostParticipationResponse.
+type PostParticipationResponse struct {
+
+ // encoding of the participation id.
+ PartId string `json:"partId"`
+}
+
// PostTransactionsResponse defines model for PostTransactionsResponse.
type PostTransactionsResponse struct {
@@ -661,22 +706,6 @@ type TransactionParametersResponse struct {
// VersionsResponse defines model for VersionsResponse.
type VersionsResponse Version
-// RegisterParticipationKeysParams defines parameters for RegisterParticipationKeys.
-type RegisterParticipationKeysParams struct {
-
- // The fee to use when submitting key registration transactions. Defaults to the suggested fee.
- Fee *uint64 `json:"fee,omitempty"`
-
- // value to use for two-level participation key.
- KeyDilution *uint64 `json:"key-dilution,omitempty"`
-
- // The last round for which the generated participation keys will be valid.
- RoundLastValid *uint64 `json:"round-last-valid,omitempty"`
-
- // Don't wait for transaction to commit before returning response.
- NoWait *bool `json:"no-wait,omitempty"`
-}
-
// ShutdownNodeParams defines parameters for ShutdownNode.
type ShutdownNodeParams struct {
Timeout *uint64 `json:"timeout,omitempty"`
diff --git a/daemon/algod/api/server/v2/generated/routes.go b/daemon/algod/api/server/v2/generated/routes.go
index dbecc6dc9..80a74df41 100644
--- a/daemon/algod/api/server/v2/generated/routes.go
+++ b/daemon/algod/api/server/v2/generated/routes.go
@@ -616,172 +616,179 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9a3PbuJLoX8HVblUeK0rOa86Jq6b2euLMjO9JMqnYM3vuxrmzENmScEwCPABoS5Pr",
- "/76FBkCCJCjJj7xm/SmxiEej0Wj0C90fR6koSsGBazXa/zgqqaQFaJD4F01TUXGdsMz8lYFKJSs1E3y0",
- "778RpSXji9F4xMyvJdXL0XjEaQFNG9N/PJLwz4pJyEb7WlYwHql0CQU1A+t1aVrXI62ShUjcEAd2iKPD",
- "0eWGDzTLJCjVh/IXnq8J42leZUC0pFzR1HxS5ILpJdFLpojrTBgnggMRc6KXrcZkziDP1MQv8p8VyHWw",
- "Sjf58JIuGxATKXLow/lCFDPGwUMFNVD1hhAtSAZzbLSkmpgZDKy+oRZEAZXpksyF3AKqBSKEF3hVjPbf",
- "jxTwDCTuVgrsHP87lwB/QKKpXIAefRjHFjfXIBPNisjSjhz2Jagq14pgW1zjgp0DJ6bXhLyulCYzIJST",
- "dz++IE+ePHluFlJQrSFzRDa4qmb2cE22+2h/lFEN/nOf1mi+EJLyLKnbv/vxBc5/7Ba4ayuqFMQPy4H5",
- "Qo4OhxbgO0ZIiHENC9yHFvWbHpFD0fw8g7mQsOOe2Ma3uinh/F90V1Kq02UpGNeRfSH4ldjPUR4WdN/E",
- "w2oAWu1LgylpBn2/lzz/8PHR+NHe5b+8P0j+0/357Mnljst/UY+7BQPRhmklJfB0nSwkUDwtS8r7+Hjn",
- "6EEtRZVnZEnPcfNpgaze9SWmr2Wd5zSvDJ2wVIqDfCEUoY6MMpjTKtfET0wqnhs2ZUZz1E6YIqUU5yyD",
- "bGy478WSpUuSUmWHwHbkguW5ocFKQTZEa/HVbThMlyFKDFzXwgcu6OtFRrOuLZiAFXKDJM2FgkSLLdeT",
- "v3Eoz0h4oTR3lbraZUVOlkBwcvPBXraIO25oOs/XROO+ZoQqQom/msaEzclaVOQCNydnZ9jfrcZgrSAG",
- "abg5rXvUHN4h9PWQEUHeTIgcKEfk+XPXRxmfs0UlQZGLJeilu/MkqFJwBUTM/gGpNtv+f45/eUOEJK9B",
- "KbqAtzQ9I8BTkQ3vsZs0doP/Qwmz4YValDQ9i1/XOStYBOTXdMWKqiC8KmYgzX75+0ELIkFXkg8BZEfc",
- "QmcFXfUnPZEVT3Fzm2lbgpohJabKnK4n5GhOCrr6fm/swFGE5jkpgWeML4he8UEhzcy9HbxEiopnO8gw",
- "2mxYcGuqElI2Z5CRepQNkLhptsHD+NXgaSSrABw/yCA49SxbwOGwitCMObrmCynpAgKSmZBfHefCr1qc",
- "Aa8ZHJmt8VMp4ZyJStWdBmDEqTeL11xoSEoJcxahsWOHDsM9bBvHXgsn4KSCa8o4ZIbzItBCg+VEgzAF",
- "E25WZvpX9Iwq+O7p0AXefN1x9+eiu+sbd3yn3cZGiT2SkXvRfHUHNi42tfrvoPyFcyu2SOzPvY1kixNz",
- "lcxZjtfMP8z+eTRUCplACxH+4lFswamuJOyf8ofmL5KQY015RmVmfinsT6+rXLNjtjA/5fanV2LB0mO2",
- "GEBmDWtUm8Juhf3HjBdnx3oVVRpeCXFWleGC0pZWOluTo8OhTbZjXpUwD2pVNtQqTlZe07hqD72qN3IA",
- "yEHcldQ0PIO1BAMtTef4z2qO9ETn8g/zT1nmMZwaAnYXLRoFnLHgnfvN/GSOPFidwIzCUmqQOsXrc/9j",
- "ANC/SpiP9kf/Mm0sJVP7VU3duGbGy/HooBnn9mdqetr1dRSZ5jNh3O4ONh1bnfD24TGjRiFBQbUDww+5",
- "SM+uBUMpRQlSM7uPMzNO/6Tg8GQJNANJMqrppFGqrJw1QO/Y8Wfsh1oSyMgV9wv+h+bEfDankGovvhnR",
- "lSkjxInA0JQZic/eI3Ym0wAlUUEKK+QRI5xdCcoXzeSWQdcc9b1Dy4fuaJHdeWnlSoI9/CLM0hut8WAm",
- "5PXopUMInDS6MKFm1Fr6NStv7yw2rcrE4SciT9sGnYEa82OfrYYY6g4fw1ULC8eafgIsKDPqbWChPdBt",
- "Y0EUJcvhFs7rkqplfxFGwHnymBz/fPDs0ePfHz/7ztzQpRQLSQsyW2tQ5L67V4jS6xwe9FeGDL7KdXz0",
- "7556Dao97lYMIcD12LucqBMwnMFijFh7gYHuUK5lxW8BhSClkBGZF0lHi1TkyTlIxUTEfPHWtSCuheFD",
- "Vu7u/G6hJRdUETM3qmMVz0BOYpg3ehZe6RoKte2isEOfrHiDGzcglZKueztg1xtZnZt3lz1pI99L94qU",
- "IBO94iSDWbUI7ygyl6IglGTYERniG5HBsaa6UrfABZrBGmDMRoQg0JmoNKGEi8wcaNM4zh8GbJloREHb",
- "jw5Zjl7a+2cGRjpOabVYamLEShHb2qZjQlO7KQneFWpA9at1dtvKTmftZLkEmq3JDIATMXP6ldP8cJEU",
- "zTLae1wcd2rAqnWCFlylFCkoBVni3EtbQfPt7C7rDXhCwBHgehaiBJlTeU1gtdA03wIotomBW4sTTint",
- "Q73b9Js2sDt5uI1UGh3TUoGRXczpzkHDEAp3xMk5SFTOPun++Umuu31VOeA6cTfwCSvM8SWccqEgFTxT",
- "0cFyqnSy7diaRi0xwawgOCmxk4oDDxgIXlGlrYrOeIYio2U3OA/2wSmGAR68UczIv/nLpD92avgkV5Wq",
- "bxZVlaWQGrLYGjisNsz1Blb1XGIejF1fX1qQSsG2kYewFIzvkGVXYhFEtbMR1Tas/uLQHG/ugXUUlS0g",
- "GkRsAuTYtwqwG5qPBwAx+kXdEwmHqQ7l1Dbr8UhpUZbm/Omk4nW/ITQd29YH+tembZ+4qG74eibAzK49",
- "TA7yC4tZ6zhYUiPb4cikoGfmbkJJzdoS+jCbw5goxlNINlG+OZbHplV4BLYc0gEh2bkmg9k6h6NDv1Gi",
- "GySCLbswtOABif2ttYCfBHbzW5BaIqMaSqOcoOjm7WrmcgibwIqmOl8blquXsCYXIIGoalYwra1Loy3U",
- "aFEm4QBRJWrDjE6NtdZjL5Luolcf41DB8vrC6Xhkr9DN8J10LtEWOtzlXQqRT7ZTXw8ZUQh2EYIPSCnM",
- "rjPnQfNulpwp3QPSXahow6gP8j3VQjOugPxfUZGUchQGKg01dxISjzxeBWYGw0zrOZm9dRsMQQ4FWBkH",
- "vzx82F34w4duz5kic7jwbmfTsIuOhw9RYn8rlL7xCeiQ5uoowmRQtTQcKxIqZBTIyVY1E8fdSbsMhj46",
- "9BPiYVKGo9iFSyHmt7Balq1izoYMVrGVup1DgfGeka7WCvQkehGWBsCIvxHkWY7aqJh3KJIUYEhFLVlp",
- "hmx8I2sNrbiK/3f/3/ffHyT/SZM/9pLn/zb98PHp5YOHvR8fX37//f9v//Tk8vsH//6vMeFBaTaLWy5+",
- "pmppIHWcY8WPuLU9zoW0Iufa3WRi/rnh7pCY2UyP+WBJuxDd29iGME6o3WykOSOo5OtbuGTsQERCKUEh",
- "SwgFfGW/inkYVuEoT62VhqKvI9uuvw9ICO/8/dqjUsFzxiEpBId1NJKQcXiNH2O9LVsa6IwXxFDfrvzR",
- "gr8DVnueXTbzpvjF3Q7Y0Ns6yOMWNr87bsc8EgaUoHoHeUkoSXOGyp/gSssq1aecongZkGvEtOqF5mGF",
- "44VvEtdwIgqIG+qUU2VwWAudUbPZHCLq5I8AXu9Q1WIBSneEmznAKXetGCcVZxrnKsx+JXbDSpBo35zY",
- "lgVdkznNUT/6A6Qgs0q3r3v0eytt1BdrqzHTEDE/5VSTHIwq95rxkxUO593LnmY46Ashz2osxHn+Ajgo",
- "ppI4I/3JfkV+6pa/dLwVgxDtZ89vPvcF4GGPeWUd5EeHThQ+OkR5p7HS9GD/bKp7wXgSJbKTJZCCcQzu",
- "6dAWuW+kNk9ADxp7j9v1U65X3BDSOc1ZRvX1yKHL4npn0Z6ODtW0NqKjifm1foi50BYiKWl6hh6U0YLp",
- "ZTWbpKKYehVguhC1OjDNKBSC47dsSks2VSWk0/NHW8SxG/ArEmFXl+OR4zrq1n21buDYgrpz1jYQ/7cW",
- "5N5PL0/I1O2UumdDNOzQgW89orW5FwItI7dZvA0xtjEqp/yUH8KccWa+75/yjGo6nVHFUjWtFMgfaE55",
- "CpOFIPvEDXlINT3lPRY/+AoAAygdNGU1y1lKzsKruDmaNrKzP8Lp6XtDIKenH3oW0/7F6aaKnlE7QXLB",
- "9FJUOnGha4mECyqzCOiqDl3CkW3g6aZZx8SNbSnShca58eOsmpalSnKR0jxRmmqIL78sc7P8gAwVwU7o",
- "cSdKC+mZoOGMFhrc3zfC2YwlvfBxj5UCRf6roOV7xvUHkpxWe3tPgByU5Ssz5rGB478crzE0uS6hpd/v",
- "GCvRDBbT7XHhVqCClZY0KekCVHT5GmiJu48XdYFm6Twn2C3ESe1vxKGaBXh8DG+AhePK0SC4uGPby79B",
- "iC8BP+EWYhvDnRpj4XX3ywz1s8gNkV17u4IxortU6WViznZ0VcqQuN+ZOjR5YXiyt+AqtuDmELgo7hmQ",
- "dAnpGWQYUApFqdfjVnfvJHA3nGcdTNnAaxv0gdGBaAqZAanKjDoZgPJ1N0xLgdY+Nu0dnMH6RDTBhVeJ",
- "y7ocj1IbCp0Ymhk6qEipwWVkiDU8tm6M7uY7h5OBlJYlWeRi5k53TRb7NV34PsMH2d6Qt3CIY0RRo2ED",
- "vZdURhBhiX8ABddYqBnvRqQfW15JpWYpK+36d4tCe9vqYwbZdrlErxMx794aPaYeZWK2cTKjKn6BgPli",
- "9sOcoa4/zs9krYq4ggnBx3uOcGc5yiK1K9CebCpR6PLLtq+RhkCLUwlI3tzqHow2RkLxYUmVf4CA7zT8",
- "gdnpoh1yWtROJ0NF3uuE+l4jOTEzbw7ndAj/w1GzR4ErKXiMUcfEesbWPQzjOj7avov0sbM+YNZHyY7G",
- "V4p4HY9cdENsOwRHKSODHBZ24baxJxQH2j0VbJCB45f5PGccSBLzSlGlRMrsC5KGl7s5wAihDwmxBh6y",
- "8wgxMg7ARms5DkzeiPBs8sVVgOTA0LxO/dhoZw/+hu3W5uaBqhNvt4qhfd7RHKJxE0But7FvhRqPoixp",
- "SENotSK2yQx6KlWMRA1r6ttl+tYfBTngdZy0OGtyFrPWGakCkAyPfbdAbSD32dxc8g8Cp4mEBVMaGr3Z",
- "nFZvCPq8totzoSGZM6l0gip7dHmm0Y8KhcEfTdM4+2mhitgXbiyLcx+c9gzWScbyKr7bbt6/HZpp39T6",
- "k6pmZ7DGSwZouiQzfJFpbqHW9KbNhqmtZ3bjgl/ZBb+it7be3WjJNDUTSyF0Z45vhKo6/GTTYYoQYIw4",
- "+rs2iNIN7AV1n0PIdSzwNtDJUKs1DNNGhg9aDXqHKfNjbxK/AiiGOa8dKbqWQNDduAqGnjjKM8J08KCx",
- "HyU4cAZoWbJs1dHh7agDbjsU4K8gqFuJP+KKGtWDbcFAoK/HAlEkeJuD3dLgzrRPU3m4tslOmDHSV4iQ",
- "gCGEUzHlEyv0EWVIG1//bsPVCdD8b7D+zbTF5Ywux6ObqfwxXLsRt+D6bb29UTyjLduqgC0L3hVRTstS",
- "inOaJ84wMkSaUpw70sTm3o7ymVldXP0+eXnw6q0D3+ieOVBpTWUbV4Xtym9mVUYjFnLggPiH20Za9bqz",
- "FcSCza9fw4TGlIsluEeygSxnuJgjLnu8GkNZcBSdcWUed6ltNZU4m55d4gbbHpS1aa/RiK1lr23No+eU",
- "5V4V9dAOuL9wcY099cpcIRzgxlbBwLib3Cq76Z3u+OloqGsLTwrn2vCMt7Av1RURvBtYZERI1HCRVAu6",
- "NhRkjdN95sSrIjHHL1E5S+NmCz5Thji4tfmaxgQbDwijZsSKDbgQeMWCsUwztYO3rANkMEcUmWhS2oC7",
- "mXAphirO/lkBYRlwbT5JPJWdg2rOpU9T0b9OjezQn8sNbFNWNMPfRMYwQw1JFwjEZgEjtDD3wD2sFU6/",
- "0No0bn4IDINXcFSFM/auxA1OJkcfjpqtt3/ZthSHGYH6/M8Qhn09vj0dkTdbLC2gA3NE0wsN3hYHwzeF",
- "6X2FO6K5EhDc8DIY2+QjuRKRYSp+QbnNFmL6WRy63gqszcD0uhASw+4VRL30TCVzKf6AuCY7NxsViX10",
- "qERxEXtPIuHMXSZaW2WaPFAevyEcg6Q9JMkFH0nbkThwwpHKA9M5vmP1Bi7KLVnbzCYt93X8cIQhJ1M7",
- "fnM4HMy9MJ2cXsxo7JGvEagMTAeNk6ZlitOC+M5+F5zVsKG9wN9Tt2U2Vr0E2QQo999FXVM4+rZIPoOU",
- "FTSPS0kZYr/9MidjC2bTw1QKgvwjbiCbV8tSkcvhYt1gDWqO5mRvHGQ4cruRsXOm2CwHbPHItphRhbdW",
- "bW6tu5jlAddLhc0f79B8WfFMQqaXyiJWCVILsKjK1bbvGegLAE72sN2j5+Q+Wv0VO4cHBotOFhntP3qO",
- "YSn2j73YZefyQG3iKxkylv9wjCVOx+j2sGOYS8qNOom+m7DJ+4ZZ2IbTZLvucpawpeN6289SQTldQNyb",
- "W2yByfbF3USjYQcvPLOZp5SWYk2Yjs8Pmhr+NBCaZtifBYOkoiiYLswB0oIoURh6apKL2En9cDaNlXvw",
- "7+HyH9HFUlq1AboK8+c1ENu7PLZqdIS9oQW00Tom1D4vylnzgNMxxAk58o8UMQNCnfjA4sbMZZaOIp3Z",
- "QnzozbhGJarS8+SvJF1SSVPD/iZD4Caz755Gsj60H3rzqwH+2fEuQYE8j6NeDpC9lyZcX3KfC54UhqNk",
- "D5pQ0OBURp9rC03zeFCL5+jdmKbNQ+8qgJpRkkFyq1rkRgNOfSPC4xsGvCEp1uu5Ej1eeWWfnTIrGScP",
- "Wpkd+vXdKydlFELGnqw3x91JHBK0ZHCO8TXxTTJj3nAvZL7TLtwE+i/rZWk0gFos82c5pgj8ULE8+60J",
- "be8kzpGUp8uoj2NmOv7eZPqql2zPcfSF9JJyDnl0OHtn/u7v1sjt/w+x6zwF4zu27SbEscvtLK4BvA2m",
- "B8pPaNDLdG4mCLHajvWtg8PyhcgIztM8x22orJ/jJ0gO8s8KlI5lHcUPNq4SbVlGL7C5KQjwDKXqCfnJ",
- "ZupdAmm90ERplhVVbl/7QbYA6YysVZkLmo2JGefk5cErYme1fWxGRZsbY4HCXHsVHRtG8HZ/t1Annyor",
- "Hoa5+zib48LMqpXGx7tK06KMRdibFie+AYbxh3ZdFPNC7EzIoZWwlZff7CSGHuZMFkYyrUezPB5pwvxH",
- "a5ouUXRtcZNhkt89qYunShUkN6zzxNXP7/HcGbhdXheb1mVMhNEvLpiyCVrhHNpB/fULF6c6+SD/9vJk",
- "xbmllCiP3vQC6zpo98BZ5703/UYh6yD+ioKLEpVM4ao5bo6xV/QNcTdhTi+roX1NWGcV84m3U8oFZym+",
- "4A1SwtYgu2Svu/hFdnjs3DVL+SPuTmjkcEXT9NThQQ6Lg4l7PCN0iOsbZoOvZlMtddg/NWYVXVJNFqCV",
- "42yQjX0qJmcvYVyBS6eAeX8DPilky9eEHDLqvkxqM/cVyQhDfAcE4B/NtzdOPcKwvDPGURByaHMRgNai",
- "gbkotZGemCYLAcqtp/0kV703fSb4LDWD1YeJz12JY1hXjVm29Uv2hzrwXkrnFTRtX5i2BN0yzc+tcGI7",
- "6UFZukmjL2rrHY4lkxpEcMTblHhzf4DcevxwtA3ktjG8AO9TQ2hwjs5JKPEe7hFGnZerk2DvnOaVpShs",
- "QWxYT/QZGOMRMF4xDk1m1cgFkUavBNwYPK8D/VQqqbYi4E487QRojh7JGENT2plobzpUZ4MRJbhGP8fw",
- "NjYpxQYYR92gEdwoX9cJXQ11B8LEC8wk7RDZTxCGUpUTojIM3OykDIsxDsO4fbK99gXQPwZ9mch215La",
- "k3OVm2jowUsqYvLmyxWklXW4C5sbgpYlSfEFaXBfRC2aTBnlqZjlkdi3w/pjkIcPg2xna/w3lrFjGCXO",
- "I37lmCzv/saOVxZY2yP1xE1DTIlii2tuc9P/Vvc5F4s2IJ/XoLDxjIckEzvdLw3bDN9A9nLBWMZaP1HE",
- "MCThk7Si0lQ/rmmfSWTkUaW0ybe5WSkfzpw5RtY/EIz4rnl9T+3tYn0MQyGJ6WAELdUuPF5T0jx17x9M",
- "m+4yNoKNZ7BpNm3Jiqh9ZSiGwYYwmM+93rvJRT0pE8feiFAfHNMH6G8+8o6UlDkHWnNi+5h1Mbr9qOld",
- "oveaDe4uwkW+4iCxlfQzKQ0T+CFoynJV54OsqxsE/lYjz3XzsVy4lykYOlyrpv6NCij/m4+yt7PYqhlN",
- "1jM0BFxQmfkW0ZvNX5rJQARIN6bShq6yONDzembWuE/7YYWRZ5PoLk9zoRhfJENRFW2PZW3uu6esXRZ1",
- "CExRhXDNQbpsh9oXJUm08O7WTXBsQoXLiX0dJKjBrDoWuMG3Te+ax1uYK4LakjTO5hwukEgoqIFOBk+s",
- "hufchOwX9ruPo/O5AjqZOSLjenpNtr6R8o5zpnpIDKl+ThzL3R6fdx2RgnFuk8mq2HsrblAZKpulFFmV",
- "Wlt/eDDAi147PxncwEqigkDaX2WPp+f4gPZVEO18Buup5avpkvLmJXP7WNucsnYNwduczm7fqrQVv9Py",
- "hV3A4lbg/JLC0nhUCpEnA9rlUf/ZWPcMnLH0DDJi7g7vchrI5Ubuo1JTmw8vlmufRbUsgUP2YEKIEbeK",
- "Uq+9JbGdlaQzOb+nN82/wlmzyr7kdHLc5JTHvaW2yNMN+ZsfZjNXs1UPbziVHWTzRHrFB1gbvYhkNty1",
- "QEDEttcRUAKislDEpJRrPqfZ6Xz3ZbkI6YeB0FuE6LOW4Gff3XfseULCLQuAgSHjigJgP8R71+XhOpCr",
- "VQr669x5A1q4HcD9LohvtJc+coeVDj3bRemIP1823VHrsQjxD+z7p+uz6SytWgJu3tiu/zbkw7F+igF3",
- "YQenFcuzbZvbcv42CazQvfm7c5N/kRRav9vw4/5xc9mErmIt6W4CIiay1tbkwVSBW3cHj67rFvHf4oWR",
- "VpLpNb5U8FoR+z36AvQn4K6igitQU8d7unBDWxvNRR8s6tZNOaufhC0xUZj7Gu1nGlOxvlzRoszBnYvv",
- "783+Ak/++jTbe/LoL7O/7j3bS+Hps+d7e/T5U/ro+ZNH8Pivz57uwaP5d89nj7PHTx/Pnj5++t2z5+mT",
- "p49mT797/pd7vpaUBbSp0/R3zDOXHLw9Sk4MsA1OaMn+BmubWcqQsc9ZRVM8iUavyEf7/qf/7U/YJBVF",
- "UP7W/TpyoSijpdal2p9OLy4uJmGX6QL1rESLKl1O/Tz9zLdvj2o3uQ1vxh21HlBDCripjhQO8Nu7l8cn",
- "5ODt0aQhmNH+aG+yN3mEqSFL4LRko/3RE/wJT88S933qiG20//FyPJougeZ66f4oQEuW+k/qgi4WICcu",
- "eZf56fzx1HvZph+djnlpRl3E3nBYh3/g5e3ntBpbiQttt75EYpA2QblsCmMys68ViBMBeYZ+WKu2GdZW",
- "I+soC4ptB1Wdxq1a4e+/ofKXsezSseRgsYLm9Xve4YJ2Qc1fX+f32V8vI+E+HzpFyh7v7X2CwmTj1ige",
- "L9escPb0FkFs23lvDGh3uB5XeE1zQzdQF60d4YIefbMLOuL4ct6wLWLZ8uV49Owb3qEjbg4OzQm2DALm",
- "+6zwV37GxQX3Lc2VXBUFlWu8cIOUXaFodTnIcttPVZzFdZgPQ5DnPEiX1LL4zNaezsZE1YUZSsmEERyw",
- "xHMGqQSK17yQGJXTZEx32j3YShSvD/6ONt/XB38n35Oh8rfB9FarbjPxn0BHMvr/sG5KOG7k6F+KTY6/",
- "2orB386dd9Or5q4uxDdbF2IHpn23u3dVP77Zqh/ftki6qp8ZUsIFTzimjzsHEpi17mTUr1pGfbb35Jtd",
- "zTHIc5YCOYGiFJJKlq/Jr7yOy76ZCF7znIoHkfIb+U/PRdVI0YH4HqSynX5sRSNk240nrbCEbEyYbiTD",
- "eBHtIMune5MzbhL6UJ7ZeFof4KbGPrENWuusT9Xux7iX9mYSE9IDV8sP66PDXeTy1pqCfBsx2byFr6uV",
- "5v+kFotrFzj/lDdAD44faEb8w51PzJt3Y6ZP955+PgjCXXgjNPkRgzU+MUv/pHaCOFkFzAbTRU8/+tQc",
- "OzAYl/amzVq6VfFjTMWc0LF7i+sK89QeesNPLCO0mYf6XMPMsCu/6GfmiXGKJhvJ18IjbLrsCF120XvH",
- "F+74wo34QpegGo5gKyRPP2I0WsgOekcSS8P9iRwlQZ5yKQqfKFOQOeh0aesddX3ZEbbin4cN85RNSVRu",
- "zF863nXcov4jclyL89dico8dI3Gw48/WfXo5HqUgI8T3i49VN5/ZHOOp6qd/PlcQPpivK4DXL+ddfhGm",
- "iCFQLYiLSCdmF68E5Ytm8r5vHdFyPWvSHYJvguAeU3vpEhnY4+UW8a0bPoLbkiTkDYpDeMD9y7c/o9nj",
- "U97In3pBbwQHAiumsH6BpcU7d2MtLtSFcOvw47DG2YDo0HY6ftQrll1O61K5Q0LFW1fRdaNQ0dzUjAdl",
- "7EPzCi1LoFJd+5Le7g476cx4dBgm3Bd1qBOhTcHcCCgGL1f0JP7bLm7EP6+37q6q811V5+tVdf6sKnMT",
- "kGNZlfcTyQ7X+KL6tP4i+vQbwRO8bYFrL/m10PLldGt8RNCqfOVTxXBh60kLiUJCyAfUZKfrFQZdCS2m",
- "giGdw2TsLtuU6nRZldOP+B8MBr1swi5tXqSpNbNtum9t/ezRrQZQ3NU8/wZqnn95E96NxNHOaiWUdRAa",
- "euuR/pvT4msN9QvwtCOTXXO1rHQmLoI45qam2+BJsi1u9SS9ERnYcdux/P08fxSDG1z8c/8A1Twi/j7L",
- "Y7NpZ5/KMeUeF6a0Wiy1zfEaTSBdd0xoagk/serAthfLtpV/mXcOhOYSaLYmMwBOxMwsutlXXGSnKp3j",
- "hPGHtw1cpRQpKAVZEiZ32wRaHVWO9kC9AU8IOAJcz0KUIHMqrwmsZQmbAe1mNa3Bra0+7tT3od5t+k0b",
- "2J083EYqoSm0rgVG1eTgiu5GULgjTlBUZZ94//wk192+qsT8YZGn4/brCSvwmRunXChIBc9UdDAsHbbt",
- "2GLV/GAtCmzKbH9SPmd1flvrbOhFmBn5t/o9WG/spsZhndnPSlqQRZMmw2rDXG9gVc8l5pH6iS6h+7aR",
- "h7AUjF/n+gtSTOjAImGGiyzuguU5+mbjckcLiAYRmwA59q0C7IZq/wAgTDWIrt+NtyknSLautChLc/50",
- "UvG63xCajm3rA/1r07ZPXC4QHPl6JkCFYraD/MJi1qbxXFJFHBykoGdOQl+4eOw+zOYwJorx1FXjG8q/",
- "wAo4Nq3CI7DlkHaFvPD4t85Z53B06DdKdINEsGUXhhYcEyu/CiHwqlpe137wCc2ebbE6EK8asdL+Pb2g",
- "TCdzIe2NmWChiIgHtT37f1CmXXkSpwNr4cyWrtSEZShunCCJrQqDWV39YJ8ugRWRqCsz1Y9C7uSwbWyr",
- "WhCzMFJxzfxzO6wz72XMr8/7eSc930nPd9LznfR8Jz3fSc930vOd9PyppecvE4FJksTzaf+8Jva4hoy+",
- "SQn/G3q/8jkfnDRCfy3yo5JgRHRzjjdGZmig+dSljkcXejRRsg3xDtPQp2Y6xkmZU6xBt9L+oTGWnwsK",
- "0fj8xzYHkuE1psGTx+T454Nnjx7//vjZd4b7LG0tnLDtfV8YSul1Dg9cBFud4MSHsgGnmGgZI9mo135S",
- "H+Vgpfk5y4Eog6yX2PwQziE3orz1dRKjjPTVoxOg+QuHHMuVQOkfRLbuEI5Z/xRR0SaZxmHOOJWRZOh9",
- "QukhWQssiOCy+/c0qMtbjZmIxwn0N2zbXg3UAYuS9yZ62RoX4OrYuLF38ZGZPfXoJC6R+hdl2QQhcmTW",
- "sKevJpK+m6XXHRxsa6QKd/6+1ah3j/jowcNjO/ZZTAkWJbYUt0pMowXwxLGFZCaytS8Y7OoytLisTZg/",
- "zGRtNnpw5T7cMbivHhg2ixhd6ZapJ1qwKCju1aRY/TKM06Zq38g3r08d7UpSN46Z7A7X5xpB0MV9IclC",
- "iqp8YEvT8jWqxEVJ+dqbwYysiKWoMOc0xnnfLqeuE6X2+OzulZRCfQUf7Xd/t2jB9KqujFJm6yjFsxh2",
- "q/1sx3hTy2Jb1jufwzNSd2egyk5/E/0uu0DH2vRX2ozGkeoXnVoXd4+r/kdcCW+lOGdGcY5y2H4UVsMQ",
- "JltvBhmwLLwaOqk2/N3Q5qfv6MVJqyLJbjx1lTjB88ZS6RJQIKultEheEnNfSkGzlCp8P+IKlH1iiVWv",
- "jiJ2BwQT80v1I33NBT7ZKljiuDvJk+1IbzchJoBRNpHml5Uum2jTA/dcp4WNO1PAn8UU8IM/fIpQzKvd",
- "OZxB0cAd2BS90Cse5VJT9BIOR7wFB+KtbXmrvrve8G0XXuPCdC4IyEtCSZozdFAIrrSsUn3KKZpAO0nH",
- "O+49b9gdFqVe+CZxK3zESO6GOuUUC03XhtGoSDWHWAk9AC+xqWqxAKU7nHgOcMpdK8abotaYwz2xcZ/m",
- "ujYcfWJbFnRN5lj4SpA/QAoyM1pEmLMEDYpKszx3/kQzDRHzU041ycEw/dfMCHRmOG9zqn3krlilx8JA",
- "bQqbUXagPv1P9is+WnDL93YjNG/Zzz4aevxl8j4nLBuE/OjQ5RM7OsQUMY0nsQf7Z3MvFYwnUSIzN77z",
- "yHdpi9w3Mp4noAeNT9Lt+ik3wrQWBBk91dcjh64boHcW7enoUE1rIzreAr/WD7G3rAuRGJURi2GNFkwv",
- "qxlmXvZvXKcLUb93nWYUCsHxWzalJZuqEtLp+aMt8sEN+BWJsKu7m/vPY8QP6cCclnrjsahQd+8H7uVb",
- "SN/6deds3RqidJch9S5D6l0OzbsMqXe7e5ch9S5/6F3+0P+p+UMnGyVEl3Nja0a/1ktjrNFPm0qrNQMP",
- "m7Vy//XdkkxPCDnBOpbU3AFwDpLmJKXKCkbcRsoVbLHURFVpCpDtn/KkBUkqCjfx/ea/Vs09rfb2ngDZ",
- "e9DtY+0WAeft90VRFT/ZMsvfk9PR6ag3koRCnIPLBBbW9bO9tg77v+pxf+mVCEUrDBpXfCVCoqr5nKXM",
- "ojwXRhlYiE58Hxf4BaQBziaaIEzbpKuIT4yLdNE57fKDbaG7f79fofDNQYdc7pKafPpqN5tqot6UB24c",
- "u8cQ71jG52AZX5xp/Inyr92lWvvKFhQ6Ulu5VG8gSdUV42LF5J2M1FRkDCsc4g1X1zZ8/8HwcQXy3F9+",
- "TcG+/ekUs50vhdLTkbma2sX8wo/mfqALO4K7XErJzjFT4ofL/w4AAP//80cbi5XqAAA=",
+ "H4sIAAAAAAAC/+y9e3fbOJIo/lXw0+45eawoOa+eic/psz93nO72nSSdE7tn526c2w2RJQljEuAAoC11",
+ "rr/7PSgAJEiCkvzIq9d/JRbxKBQKhUI9P45SUZSCA9dqtP9xVFJJC9Ag8S+apqLiOmGZ+SsDlUpWaib4",
+ "aN9/I0pLxhej8YiZX0uql6PxiNMCmjam/3gk4V8Vk5CN9rWsYDxS6RIKagbW69K0rkdaJQuRuCEO7BBH",
+ "h6PLDR9olklQqg/lLzxfE8bTvMqAaEm5oqn5pMgF00uil0wR15kwTgQHIuZEL1uNyZxBnqmJX+S/KpDr",
+ "YJVu8uElXTYgJlLk0IfzhShmjIOHCmqg6g0hWpAM5thoSTUxMxhYfUMtiAIq0yWZC7kFVAtECC/wqhjt",
+ "vx8p4BlI3K0U2Dn+dy4B/oBEU7kAPfowji1urkEmmhWRpR057EtQVa4Vwba4xgU7B05Mrwl5XSlNZkAo",
+ "J+9+fEGePHny3CykoFpD5ohscFXN7OGabPfR/iijGvznPq3RfCEk5VlSt3/34wuc/9gtcNdWVCmIH5YD",
+ "84UcHQ4twHeMkBDjGha4Dy3qNz0ih6L5eQZzIWHHPbGNb3VTwvm/6K6kVKfLUjCuI/tC8Cuxn6M8LOi+",
+ "iYfVALTalwZT0gz6fi95/uHjo/Gjvct/e3+Q/Lf789mTyx2X/6IedwsGog3TSkrg6TpZSKB4WpaU9/Hx",
+ "ztGDWooqz8iSnuPm0wJZvetLTF/LOs9pXhk6YakUB/lCKEIdGWUwp1WuiZ+YVDw3bMqM5qidMEVKKc5Z",
+ "BtnYcN+LJUuXJKXKDoHtyAXLc0ODlYJsiNbiq9twmC5DlBi4roUPXNDXi4xmXVswASvkBkmaCwWJFluu",
+ "J3/jUJ6R8EJp7ip1tcuKnCyB4OTmg71sEXfc0HSer4nGfc0IVYQSfzWNCZuTtajIBW5Ozs6wv1uNwVpB",
+ "DNJwc1r3qDm8Q+jrISOCvJkQOVCOyPPnro8yPmeLSoIiF0vQS3fnSVCl4AqImP0TUm22/X8d//KGCEle",
+ "g1J0AW9pekaApyIb3mM3aewG/6cSZsMLtShpeha/rnNWsAjIr+mKFVVBeFXMQJr98veDFkSCriQfAsiO",
+ "uIXOCrrqT3oiK57i5jbTtgQ1Q0pMlTldT8jRnBR09f3e2IGjCM1zUgLPGF8QveKDQpqZezt4iRQVz3aQ",
+ "YbTZsODWVCWkbM4gI/UoGyBx02yDh/GrwdNIVgE4fpBBcOpZtoDDYRWhGXN0zRdS0gUEJDMhvzrOhV+1",
+ "OANeMzgyW+OnUsI5E5WqOw3AiFNvFq+50JCUEuYsQmPHDh2Ge9g2jr0WTsBJBdeUccgM50WghQbLiQZh",
+ "Cibc/JjpX9EzquC7p0MXePN1x92fi+6ub9zxnXYbGyX2SEbuRfPVHdi42NTqv8PjL5xbsUVif+5tJFuc",
+ "mKtkznK8Zv5p9s+joVLIBFqI8BePYgtOdSVh/5Q/NH+RhBxryjMqM/NLYX96XeWaHbOF+Sm3P70SC5Ye",
+ "s8UAMmtYo68p7FbYf8x4cXasV9FHwyshzqoyXFDaepXO1uTocGiT7ZhXJcyD+ikbvipOVv6lcdUeelVv",
+ "5ACQg7grqWl4BmsJBlqazvGf1Rzpic7lH+afssxjODUE7C5aVAo4ZcE795v5yRx5sG8CMwpLqUHqFK/P",
+ "/Y8BQP8uYT7aH/3btNGUTO1XNXXjmhkvx6ODZpzbn6npadfXecg0nwnjdnew6di+CW8fHjNqFBIUVDsw",
+ "/JCL9OxaMJRSlCA1s/s4M+P0TwoOT5ZAM5Ako5pOmkeVlbMG6B07/oz98JUEMnLF/YL/oTkxn80ppNqL",
+ "b0Z0ZcoIcSJQNGVG4rP3iJ3JNEBJVJDCCnnECGdXgvJFM7ll0DVHfe/Q8qE7WmR3Xlq5kmAPvwiz9ObV",
+ "eDAT8nr00iEETpq3MKFm1Fr6NStv7yw2rcrE4SciT9sGnYEa9WOfrYYY6g4fw1ULC8eafgIsKDPqbWCh",
+ "PdBtY0EUJcvhFs7rkqplfxFGwHnymBz/fPDs0ePfHj/7ztzQpRQLSQsyW2tQ5L67V4jS6xwe9FeGDL7K",
+ "dXz07576F1R73K0YQoDrsXc5USdgOIPFGLH6AgPdIeSg4S2VmqWsRGwdZSFG26O0GpIzWJOF0CTDQTJ7",
+ "0+Ooci0rfgsbA1IKGZGkkSC1SEWenINUTESUIm9dC+JaGO5mpfnO7xZackEVMXPjI6/iGchJbD/N6w0F",
+ "BQ2F2nb92KFPVrzBuBuQSknXvX21642szs27y063ke/fDIqUIBO94iSDWbUIbz4yl6IglGTYEdnsG5HB",
+ "saa6UrfAW5rBGmDMRoQg0JmoNKGEi8ywCdM4znUGNKSomkGNkg4ZmV7aW20GRuZOabVYamKEVRHb2qZj",
+ "QlO7KQneQGrgQVlrAmwrO53VvuUSaLYmMwBOxMy92tx7EhdJUdmjvR3H8bwGrPql0YKrlCIFpSBLnNFq",
+ "K2i+nd1lvQFPCDgCXM9ClCBzKq8JrBaa5lsAxTYxcGshxT11+1DvNv2mDexOHm4jleblaqnASETmdBs2",
+ "N4TCHXFyDhKffJ90//wk192+qhwwyLh7/YQV5vgSTrlQkAqeqehgOVU62XZsTaOW8GFWEJyU2EnFgQfU",
+ "Dq+o0vbhz3iGgqhlNzgP9sEphgEevFHMyH/3l0l/7NTwSa4qVd8sqipLITVksTVwWG2Y6w2s6rnEPBi7",
+ "vr60IJWCbSMPYSkY3yHLrsQiiGqneao1Y/3FoZLf3APrKCpbQDSI2ATIsW8VYDdUSg8AYl4tdU8kHKY6",
+ "lFNrwscjpUVZmvOnk4rX/YbQdGxbH+hfm7Z94qK64euZADO79jA5yC8sZq05YkmNxIgjk4KembsJ5T+r",
+ "oejDbA5johhPIdlE+eZYHptW4RHYckgHRG9n8Axm6xyODv1GiW6QCLbswtCCB94BLaH0b7C+Bbmz7AzZ",
+ "x/UhaMpyyEjwM7JrUnZl5K3SfG+2XcS8A5LtCALLRhEsXU+y20nq7U4VEXsjy8mZwhuqhz+F4FvjyUlg",
+ "crkF0TQyqmEnlBME1KtkjQQQNoEVTXW+NveqXsKaXIAEoqpZwbS21rA2OWlRJuEA0ff3hhmdBsQaHvwO",
+ "7KKSOcahguX1t2I8snLSZvhOOpJSCx1OQiuFyCfbWUwPGVEIdjsCpTC7zpzx1VvoPCW1gHRSE6q/am59",
+ "T7XQjCsg/1tUJKUcJb5KQ30FCYl8He97M4O5Mes5mRWtGgxBDgVYQRa/PHzYXfjDh27PmSJzuPAeC6Zh",
+ "Fx0PH+Kz7K1QunW4bonVHUUuE1RMmJvJCY1dnjLZia0dZTvtZFuvcHToJ8UzpZQjXLP8GzOAzslc7bL2",
+ "kEaWVC23rx3H3UkvEwwdW7fddynE/BZWy7JVzEyXwSq2Uke4+Ci6Z14QawV6EhX2SgNgxFIP8ixHjYuY",
+ "dw4kKcCcFLVkpRmysSquNbQ8kv7P/f/cf3+Q/DdN/thLnv/H9MPHp5cPHvZ+fHz5/ff/t/3Tk8vvH/zn",
+ "v8cEZKXZLK7z+5mqpYHUMc4VP+JWaz8X0j6r1k5aE/PPDXeHxMxmeswHS9rpuMU2hHFC7WYjzRlhPF/f",
+ "wh1rByISSgkKOWL4iFX2q5iHDkmO8tRaaSj6eiDb9bcBKfidlyF7VCp4zjgkheAxie4X/PoaP8Z6W648",
+ "0Bnvx6G+XRm7BX8HrPY8u2zmTfGLux2wobe1e9QtbH533I4KMHTFQhUG5CWhJM0ZKjgEV1pWqT7lFJ9Q",
+ "AblGjBL+YTj8qH7hm8Rf8ZFHthvqlFNlcFg/rKKq4TlEVCY/Avi3taoWC1C6I9vNAU65a8U4qTjTOFdh",
+ "9iuxG1aCRMvAxLYs6JrMaY46gD9ACjKrdFvaQY8Rpc0T3eojzTREzE851SQHqjR5zfjJCofzjhmeZjjo",
+ "CyHPaizEef4COCimkjgj/cl+RX7qlr90vBXdd+1nz28+9wXgYY/5MzjIjw7dS+DoEMW9RhPZg/2zqacK",
+ "xpMokZ0sgRSMo1tch7bIfSO0egJ60Og03a6fcr3ihpDOac4yqq9HDl0W1zuL9nR0qKa1ER1tg1/rh5jx",
+ "eSGSkqZnaHscLZheVrNJKoqpfwFNF6J+DU0zCoXg+C2b0pJNVQnp9PzRFnHsBvyKRNjV5XjkuI66dS8H",
+ "N3BsQd05az2f/1sLcu+nlydk6nZK3bPOTXbowCsl8mh1sTUtQ45ZvHXOt95dp/yUH8KccWa+75/yjGo6",
+ "nVHFUjWtFMgfaE55CpOFIPvEDXlINT3lPRY/GD+DrscOmrKa5SyNK17GI+sT3R/h9PS9IZDT0w89q0D/",
+ "4nRTRc+onSC5YHopKp04p89EwgWVWQR0VTv94cjWZXvTrGPixrYU6ZxK3fhxVk3LUiW5SGmeKE01xJdf",
+ "lrlZfkCGimAn9FUhSgvpmaDhjBYa3N83wtlFJL3wHsOVAkV+L2j5nnH9gSSn1d7eEyAHZfnKjHls4Pjd",
+ "8RpDk+sSWuqNHb2MmsFiqg1cuBWoYKUlTUq6ABVdvgZa4u7jRV2gIi3PCXYLcVJb6nGoZgEeH8MbYOG4",
+ "sh8VLu7Y9vLRO/El4CfcQmxjuFOjEL/ufpmhfha5IbJrb1cwRnSXKr1MzNmOrkoZEvc7Uzv1LwxP9lYK",
+ "xRbcHAIX/zADki4hPYMMXbGhKPV63OruDWHuhvOsgykbsmDdpdCvFjVBMyBVmVEnA1C+7jo4KtDae3W+",
+ "gzNYn4jGLfcqHo2X41FqgwgSQzNDBxUpNbiMDLGGx9aN0d18Z1Q1kNKyJItczNzprsliv6YL32f4INsb",
+ "8hYOcYwoajRsoPeSyggiLPEPoOAaCzXj3Yj0Y8trqdN29N9saclwkG2XS/Q6EfPurdFj6lEmZhsnM6ri",
+ "FwiYL2Y/zBnq2pz9TFapiiuYEAx7dYQ7y1EWqc3d9mRT2dI82ji+IdDiVAKSN7e6B6ONkVB8WFLlQ3cw",
+ "wskfmJ0u2iHDXG1YNVTkLav43mskJ2bmzeGcDuF/2N/8KDCXBmFMtTe5Z2zdwzCuIwtsRLH3Oveu5t6/",
+ "fDS+kq/4eOQ8eGLbIThKGRnksLALt409oTjQ7qlggwwcv8znOeNAkpjllSolUmZjrxpe7uYAI4Q+JMQq",
+ "eMjOI8TIOAAbjQU4MHkjwrPJF1cBkgND6wL1Y6OZIfgbtmubm9BuJ95uFUP7vKM5ROMm9MJuY18LNR5F",
+ "WdLQC6Gt3rdNZtB7UsVI1LCmvl6mr/1RkANex0mLsyZnMW2dkSoAyfDYdwueDeQ+m5tL/kFgM5KwYEpD",
+ "8242p9Urgj6v7uJcaEjmTCqd4JM9ujzT6EeFwuCPpmmc/XRsOsrqAOLcB6c9g3WSsbyK77ab92+HZto3",
+ "9ftJVbMzWOMlAzRdkhnGMkctvRumtt4HGxf8yi74Fb219e5GS6apmVgKoTtzfCNU1eEnmw5ThABjxNHf",
+ "tUGUbmAv+PY5hFzHXNaDNxm+ag3DtDEVg1qD3mHK/NibxK8AimHOa0eKriUQdDeugqEljvKMMB2EAvc9",
+ "YQfOAC1Llq06b3g76oDZDgX4KwjqVuKPmKJG9WBbMBC812POVhK8zsFuaXBn2qBuHq5tshNmjPQVIiRg",
+ "COFUTPmUJH1EGdLGuPltuDoBmv8N1n83bXE5o8vx6GZP/hiu3YhbcP223t4onlGXbZ+ALQ3eFVFOy1KK",
+ "c5onTjEyRJpSnDvSxOZej/KZWV38+X3y8uDVWwe+eXvmQKVVlW1cFbYrv5lVmRexkAMHxKc8MNKqfztb",
+ "QSzY/DqOLFSmXCzBhZcHspzhYo647PFqFGXBUXTKlXncpLZVVeJ0enaJG3R7UNaqveZFbDV7bW0ePacs",
+ "909RD+2A+QsX1+hTr8wVwgFurBUMlLvJrbKb3umOn46GurbwpHCuDQHwhc3xoIjgXb8qI0LiCxdJtaBr",
+ "Q0FWOd1nTrwqEnP8EpWzNK624DNliINbna9pTLDxgDBqRqzYgAmBVywYyzRTO1jLOkAGc0SRiSqlDbib",
+ "CZecq+LsXxUQlgHX5pPEU9k5qOZc+gQv/evUyA79udzANtlLM/xNZAwz1JB0gUBsFjBCDXPEV9c/OP1C",
+ "a9W4+SFQDF7BUBXO2LsSNxiZHH04arbW/mVbUxzm0urzP0MYNu/C9kReXm2xtIAOzBFNzDV4WxwM3xSm",
+ "9xXuiOZKQHDDy2Bs0/bkSkSGqfgF5TbPjulnceh6K7A6A9PrQkgMLVEQtdIzlcyl+APiL9m52aiI66dD",
+ "JYqL2HsScdnvMtFaK9NkUPP4DeEYJO0hSS74SNqGxIETjlQeqM4xAtwruCi3ZG1zArXM1/HDEbqcTO34",
+ "zeFwMPfcdHJ6MaOx8HgjUBmYDhojTUsVpwXxnf0uOK1hQ3uBvaduy2w8Rgmy8c/ux/5dUzj6tkg+g5QV",
+ "NI9LSRlivx19lrEFs4mVKgVB5h43kM1IZ6nIZT+yZrAGNUdzsjcOcoO53cjYOVNslgO2eGRbzKjCW6tW",
+ "t9ZdzPKA66XC5o93aL6seCYh00tlEasEqQVYfMrVuu8Z6AsATvaw3aPn5D5q/RU7hwcGi04WGe0/eo5u",
+ "KfaPvdhl5zKobeIrGTKW/3KMJU7HaPawY5hLyo06icYG2bSXwyxsw2myXXc5S9jScb3tZ6mgnC4gbs0t",
+ "tsBk++JuotKwgxee2ZxtSkuxJkzH5wdNDX8acE0z7M+CQVJRFEwX5gBpQZQoDD01aXnspH44mwDOpcrw",
+ "cPmPaGIp7bMBug/mz6sgtnd5bNVoCHtDC2ijdUyoDaHLWROk7BjihBz5QFzMHVKnDLG4MXOZpaNIZ7YQ",
+ "UyQwrvERVel58leSLqmkqWF/kyFwk9l3TyP5UtopEvjVAP/seJegQJ7HUS8HyN5LE64vuc8FTwrDUbIH",
+ "jStocCqjKQmEpnncqcVz9K5P0+ahdxVAzSjJILlVLXKjAae+EeHxDQPekBTr9VyJHq+8ss9OmZWMkwet",
+ "zA79+u6VkzIKIWNpGZrj7iQOCVoyOEf/mvgmmTFvuBcy32kXbgL9l7WyNC+AWizzZzn2EPihYnn298a1",
+ "vZNySlKeLqM2jpnp+FuTI69esj3H0SwAS8o55NHh7J35m79bI7f/P8Wu8xSM79i2m0rKLrezuAbwNpge",
+ "KD+hQS/TuZkgxGrb17d2DssXIiM4TxNy3lBZPztWkADnXxUoHcvXix+sXyXqssy7wOZfIcAzlKon5Ceb",
+ "43oJpBWgitIsK6rcBjtCtgDplKxVmQuajYkZ5+TlwStiZ7V9bC5Sm/9lgcJcexUdHUaQn2I3VyefZC7u",
+ "hrn7OJv9wsyqlcYAdaVpUcY87E2LE98A3fhDvS6KeSF2JuTQStjKy292EkMPcyYLI5nWo1kejzRh/qM1",
+ "TZcoura4yTDJ7564yFOlCtKC1hkW6xQTeO4M3C53kU1dNCbCvC8umLKpjeEc2k79dYSLezp5J//28mTF",
+ "uaWUKI/eFIF1HbR74Kzx3qt+o5B1EH9FwUWJSqZw1TxOx9grGkLdTQrVywdqownrfHw+ZX1KueAsxQDm",
+ "IJlyDbJLk7yLXWSHWO+uWsofcXdCI4crmoqqdg9yWBxMTuUZoUNcXzEbfDWbaqnD/qkxH++SarIArRxn",
+ "g2zsk5g5fQnjClzKEMyYHfBJIVu2JuSQUfNlUqu5r0hG6OI7IAD/aL69cc8jdMs7YxwFIYc25wFoNRqY",
+ "xVUb6YlpshCg3HraIbnqvekzwbDUDFYfJj7rK45hTTVm2dYu2R/qwFspnVXQtH1h2hI0yzQ/t9yJ7aQH",
+ "ZekmjUbU1jscS5g2iOCItSnx6v4AufX44WgbyG2jewHep4bQ4ByNk1DiPdwjjDr3XCc15TnNK0tR2IJY",
+ "t55oGBjjETBeMQ5NTuLIBZFGrwTcGDyvA/1UKqm2IuBOPO0EaI4WyRhDU9qpaG86VGeDESW4Rj/H8DY2",
+ "afMGGEfdoBHcKF/XqZANdQfCxAvMwe4Q2U+Ch1KVE6IydNzspMWLMQ7DuH2ayvYF0D8GfZnIdteS2pNz",
+ "lZtoKOAlFTF58+UK0soa3IVNjUHLkqQYQRrcF1GNJlPm8VTM8lgSnPpjkMESnWxna/w3lrBkGCXOIn5l",
+ "nyxv/saOVxZY2yP1xE1DTIlii2tuc9P/Vvc5F4s2IJ9XobDxjIckEzvdLw3bHM4xeuAZax2iiG5Iwqc3",
+ "xkdTHVzTPpPIyKOP0iZT7eZH+XDO2TGy/gFnxHdN9D21t4u1MQy5JKaDHrRUO/d4TUkT6t4/mDZRbGwE",
+ "689gE9TaYi9R/cqQD4N1YTCfe713k4t6UiaOvRGh3jmmD9DfvOcdKSlzBrTmxPYx63x0+17Tu3jvNRvc",
+ "XYTzfMVBYit5uzVhWItCep7Pge+7zUw02T34tTHIo80EE9kugLtMtm2fxp09q+ZzSDU73+Jp/l9GYm28",
+ "mMdeprVJxQPHc1Z76viaQFcUtRuANjmCb4QniLC/MThDfqZnsL6nSDuf8mH0/DlCvU7YF2IAsw8khkSE",
+ "imn/7SPcKWSZqikDseCtbbY7NIlfBpNi1u5esTw/O83lSZJQJ2fVSXSG8nCKmBS/01ym6w6OV433Nrpk",
+ "DDmj97PEDd9eNi+gqhMa10V/AmcK81jrJlu6cGFnGBdQ6518ABoo/5sPobGz2GJSTdpO1PJdUJn5FlGx",
+ "1UvEyYB7V9dh2vqlszjQ83pm1vhG9H2GIzHR6AuT5kIxvkiGXKba7gi1Lv+eskYXVBBg+j2Eaw7SpevV",
+ "vlZXooX3pdgExyZUuFIR10GCGkyZZYEbDFx810RmYiIYaiu1OYNSuEAioaAGOhnETw7PuQnZL+x37yTr",
+ "E4F00u5ExvX0mmwNgPReMUz1kBhS/Zy423K78+113guMc5sNXcWCKblBZahJKqXIqtRe0OHBAP+u2jke",
+ "eAMriUr5aX+VPYEtx+j4V0Eowxmsp1ZoSpeUN2kK2sfaJkW3awgC7zq7fatPqbjAmi/sAha3AueXfAmN",
+ "R6UQeTKgOjrqx4R2z8AZS88gI+bu8PbkgTyV5D5qLGrbwMVy7dOAlyVwyB5MCDFvqaLUa28maKcc6kzO",
+ "7+lN869w1qyyYdrukTY55XFXCFv78Ib8zQ+zmavZYsA3nMoOsnkiveIDrI1eRLK27lo3J6K472bSbIjK",
+ "QhGTUq4ZK7fT+e4/1CKkH0Y5bHn/nLVedTapRkdZLyTc8usu0FJe8XXXj9/YdXm4DuRqlYL+OnfegBZu",
+ "B3C/C+Ib1UQfucMaBT3bRaMQz01guqNKwyIEs2cQBJX8/uh3ImHuCrE+fIgTPHw4dk1/f9z+bF5fDx9G",
+ "T+ZnU2a0yvO4eWMU8/ch4641YA74EXT2o2J5to0wWl4hTWY79Hv4zfnPfJHcer/ZJ3L/qLo0Y1dRo3Y3",
+ "ARETWWtr8mCqwN9jB1cP1y3i2IGXTVpJptcYwuRfVOy3aGj4T7USxtV8qx3BnR+yLTfq3JIalU1TIfIn",
+ "Yas2FeauR8W6xhTVL1e0KHNwB+X7e7O/wJO/Ps32njz6y+yve8/2Unj67PneHn3+lD56/uQRPP7rs6d7",
+ "8Gj+3fPZ4+zx08ezp4+ffvfsefrk6aPZ0++e/+WeL89oAW1KH/4DE1AmB2+PkhMDbIMTWjJM7X6J4vRc",
+ "+GR2NMWTaN4k+Wjf//T/+xM2SUURVJR3v46cj9poqXWp9qfTi4uLSdhlusA3WqJFlS6nfp5+RvC3R7X/",
+ "jI17wB21rhGGFHBTHSkc4Ld3L49PyMHbo0lDMKP90d5kb/IIc8aWwGnJRvujJ/gTnp4l7vvUEdto/+Pl",
+ "eDRdAs310v1RgJYs9Z/UBV0sQE5cVj/z0/njqTe/Tz+69+mlGXURC+6ynkCB+0c/2Z3TdaFRx1cdDvKp",
+ "KJdmZUxmNoyJOPGRZ+igYZ98hrXVyDrKmgweR0GhRBeJZUPT999/QxWlY1n3Y1kDI4ViG1XRcI3YoIy+",
+ "L53/7K+XET/AD526n4/39j5Brc9xaxSPl2sWDX16iyC2DUA3BrQ7XI8rvKa5oRuo68CPcEGPvtkFHXHU",
+ "fxu2RSxbvhyPnn3DO3TEzcGhOcGWQSRNnxX+ys+4uOC+pbmSq6Kgco0XbpDLLxStLgdZbjuGzWlrh/kw",
+ "BPUfgjxqLW3RbO3pbExUXZWolEwYwWFsXgEZpBIoXvNCorteU0nCaQbAlmF6ffAP1Be/PvgH+Z4MVZQP",
+ "prcv8jYT/wl0pNLJD+umKvJGjv6l2OT4qy3C/+3ceTe9au7q5Xyz9XJ2YNp3u3tXDembrYb0bYukqzr+",
+ "mBIueMIxr+Q5kECtdSejftUy6rO9J9/sao5BnrMUyAkUpZBUsnxNfuV1wMbNRPCa51Q8CKHZyH965q1G",
+ "ig7E9yDH9fRjy5Mh2648abk0ZGPCdCMZtrwdgpy8dfpfF6w3bjJ9UZ5ZR3vv+arGPuMVauusPdbux7iX",
+ "D2sSE9IDM80P66PDXeTy1pqCRDwx2byFr40ieu/S+qQaizDgK3KvxffmU98APTh+oBnxEX2fmDfvxkyf",
+ "7j39fBCEu/BGaPIjOnp8Ypb+SfUEcbIKmA3mkZ9+9Dl7dmAwLh9Wm7U476GNTMWc0LEL0ncVu2rrvuEn",
+ "lhHalGR9rmFm2JVf9FN2xThFk6boa+ERNo9+hC676L3jC3d84UZ8oUtQDUdAH1k1/YiebCE76B1JrBn5",
+ "JzKUBAUMpCh8Bl1B5qDTpfUd7tqyI2zFx40O85RN2ZVuzF861nXcon52CVyLs9di1p8dvXiw48/WfHo5",
+ "HqUgI8T3iw9iMZ/ZHH2x6phgn0QMM2kwn1ejTqnhEg8xRQyBakFcqAoxu3glKF80k/dt64iW62mT7hB8",
+ "EwT3mNpLl+HEHi+3iG9d8RHcliQhb1AcwgPuQ2L/jGqPT3kjf+oFvREcCKyYwsImlhbvzI21uFBXyK5d",
+ "l8PihwOiQ9vo+FGvWHY5rWNrhoSKt67U80ahormpWZPpvq1eoWUJVKprX9LbzWEnnRmPDsNKHK1QoDoI",
+ "KAKKwcsVLYn/sYsZ8c9rrbsr935X7v165d4/65O5ccixrMrbiWSHa3zR97T+Iu/pN4IneNsC117ya6Hl",
+ "y72tMQChVRLP55DiwhaaFxKFhJAPqMlO1ysMmhJaTAVdOofJ2F22KdXpsiqnH/E/6Ax62bhd2oRpU6tm",
+ "23Tf2sL6o1t1oLhpsf5+Om3b9bdNqbiiPFxgNbykEDzmumxr5b3Gj9FQGDTKDnRG8/hQ324SxBb8HbDa",
+ "8+zC6m6K38nXocK7kTjaWa2EsnZCQ2s90n9zWrqVSGM/Tz+2y3ZZbbhrqZaVzsRF0Lcp/zh4tmyLWz1b",
+ "b0QGdty2d38/JShFdwfnEd0/UjXXiEd7efw27WzgHVMuVDGl1WKpbTroaK75umNCU3sUbDi/2hb/bFv5",
+ "OL9zIDSXQLM1mQFwImZm0e08Et0Clo43xsN4G7hKKVJQCrIkzAO5CbTazxw1hHoDnhBwBLiehShB5lRe",
+ "E1jLJDYD2k2AXINb64EcH+hDvdv0mzawO3m4jVSal4elAixxIIoyB1efO4LCHXGCwiv7xPvnJ7nu9lUl",
+ "phqMBKLbryeswKA5TrlQkAqeqeF0EduOLSaICNaiwGbX9yclmsHNDDxwtb6iSrtMl62o2iDNiJliQ36L",
+ "oRgxM/Lf6wix3thNOdQ6CaiVvSCL5leH1Ya53sCqnkvMI6VWXe2HbSMPYSkYv04LGiSs0IGOwgwXWdwF",
+ "y3O01sYlkRYQDSI2AXLsWwXYDRUBA4Aw1SC6jkJvU05Ql0FpUZbm/Omk4nW/ITQd29YH+tembZ+4nGs4",
+ "8vVMgAoFbwf5hcWszfi7pIo4OEhBz5zMvnAe2n2YzWFMFOOpy7IzlM2BFXBsWoVHYMsh7Yp94fFvnbPO",
+ "4ejQb5ToBolgyy4MLTgmaH4VYuFV331djcInVIS2Be1AvGoETfv39IIyncyFdBmMsKZMxKbaSexEmXaV",
+ "jNyrWAunyHRVaSxDceME+a5V6N7qSo375AusiPhhmal+FHInE26jbdWCmIWRimvmA/DMeatlzK/PHnon",
+ "Pd9Jz3fS8530fCc930nPd9LznfT8qaXnL+OTSZLE82kfcBMLtyGjb1LC/4YiWj5nCEoj9NciPz4SjIhu",
+ "zvFGXw0NNJ+6KhNoVI/mVLdO32HFitRMxzgpc4rlKlfahx5jpcqgZpVPlW4zKhleYxo8eUyOfz549ujx",
+ "b4+ffWe4z9KWzQrb3vfJfpVe5/DA+bTVKU+8cxtwijnZ0beN+tdP6v0erDQ/ZzkQZZD1EpsfwjnkRpS3",
+ "1k9iHiP959EJ0PyFQ47lSqD0DyJbdwjHrH+KqGiTTGNCZ5zKSN2EPqH0kKwF1k5xhUB6L6jLW/WiiHsO",
+ "9Dds214NlAyMkvcmetnqKeBKXrmxd7GamT316CSu5sIXZdkEIXJk1rCnr8a3vpvz1x0cbGukCnf+vlU/",
+ "eI/46MHDYzv2OVEJ1i+3FLdKTKMF8MSxhWQmsrWvLe5KuLS4rK2tMcxkbeEKcJWB3DG4rx4YNosYXemW",
+ "qida2yyoA9gkbP0yjNNWddjIN69PHe2iczf2ouwO1+cagRvGfSHJQoqqfGCrWPM1PomLkvK1V4MZWRGr",
+ "1mEGa/T8vl1OXadd7fHZ3Yuuhe8VDOPv/m7RgslaXcW1zJZci+dE7BYG247xpuzNtjx4PiNopETXQEGu",
+ "/ib6XXauj7Xqr7T5kSOFcjplce7Crf5HXAlvpThn5uEc5bB9v6yGIUy23gwyYFl4NXSSb/i7oc1P39GL",
+ "k1bxot146ipxgueNpdIloEBWS2mRTCXmvpSCZilVGFHiahl+YolVr44iegcEEzNO9X1/zQU+2SpY4rg7",
+ "yZNt3283IaaEUTa15peVLhv/0wMXwNPCxp0q4M+iCvjBHz5FKGbp7hzOoL7oDmyKXugVj3KpKVoJhz3e",
+ "ggPx1ra8Vdtdb/i2Ca8xYToTBOQloSTNGRooBFdaVqk+5RRVoJ0U5h3znlfsDotSL3yTuBY+oiR3Q51y",
+ "ijXpa8VoVKSaQ6zaJoCX2FS1WIDSHU48BzjlrhXjTf17zAifWE9Qc10bjj6xLQu6JnOskSfIHyAFmZlX",
+ "RJjFBBWKSrM8d/ZEMw0R81NONcnBMP3XzAh0Zjivc6pt5K6urcfCQKULm2M2iWshfrJfMYzBLd/rjVC9",
+ "ZT83xX2+SCboJFYsyUF+dOgyjB0dYtKYxpLYg/2zmZcKxpMokZkb31nku7RF7hsZzxPQg8Ym6Xb9lBth",
+ "WguCjJ7q65FD1wzQO4v2dHSoprURHWuBX+uHWHTrQiTmyYh180YLppfVDHMx+6jX6ULUEbDTjEIhOH7L",
+ "prRkU1VCOj1/tEU+uAG/IhF2dXdz/3mU+CEdmNNSbzyWKOru/cC9fAsJXb/uLK5bXZTucqbe5Uy9y6p5",
+ "lzP1bnfvcqbeZRS9yyj6PzWj6GSjhOiycGzN8deKPc7Q9bOp21oz8LBZKxtg3yzJ9ISQE6yKSc0dAOcg",
+ "aU5Sqqxg5MrcFmyx1ERVaQqQ7Z/ypAVJKgo38f3mv/aZe1rt7T0Bsveg28fqLQLO2++Loip+shXZvyen",
+ "o9NRbyQJhTgHlxssrBJoe20d9v+rx/2lV3AUtTCoXPF1DYmq5nOWMovyXJjHwEJ0/Pu4wC8gDXA29QRh",
+ "2qZhRXyiX6TzzmkXM2wL3f37/QqlcA465HKX5uTT17/ZVGH1pjxw49g9hnjHMj4Hy/jiTONPlJHtLvna",
+ "V7ag0JDayq56A0mqriEXK03vZKSmRmNY8xBvuLra4fsPho8rkOf+8mtK+O1Pp5j/fCmUno7M1dQu7xd+",
+ "NPcDXdgR3OVSSnaOuRM/XP6/AAAA//9Gwo6X+vEAAA==",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/types.go b/daemon/algod/api/server/v2/generated/types.go
index 2ee2e91cd..6e10c1f78 100644
--- a/daemon/algod/api/server/v2/generated/types.go
+++ b/daemon/algod/api/server/v2/generated/types.go
@@ -340,6 +340,34 @@ type EvalDeltaKeyValue struct {
Value EvalDelta `json:"value"`
}
+// ParticipationKey defines model for ParticipationKey.
+type ParticipationKey struct {
+
+ // Address the key was generated for.
+ Address string `json:"address"`
+
+ // When registered, this is the first round it may be used.
+ EffectiveFirstValid *uint64 `json:"effective-first-valid,omitempty"`
+
+ // When registered, this is the last round it may be used.
+ EffectiveLastValid *uint64 `json:"effective-last-valid,omitempty"`
+
+ // The key's ParticipationID.
+ Id string `json:"id"`
+
+ // AccountParticipation describes the parameters used by this account in consensus protocol.
+ Key AccountParticipation `json:"key"`
+
+ // Round when this key was last used to propose a block.
+ LastBlockProposal *uint64 `json:"last-block-proposal,omitempty"`
+
+ // Round when this key was last used to generate a state proof.
+ LastStateProof *uint64 `json:"last-state-proof,omitempty"`
+
+ // Round when this key was last used to vote.
+ LastVote *uint64 `json:"last-vote,omitempty"`
+}
+
// PendingTransactionResponse defines model for PendingTransactionResponse.
type PendingTransactionResponse struct {
@@ -406,7 +434,7 @@ type TealValue struct {
// \[tb\] bytes value.
Bytes string `json:"bytes"`
- // \[tt\] value type.
+ // \[tt\] value type. Value `1` refers to **bytes**, value `2` refers to **uint**
Type uint64 `json:"type"`
// \[ui\] uint value.
@@ -588,6 +616,16 @@ type NodeStatusResponse struct {
TimeSinceLastRound uint64 `json:"time-since-last-round"`
}
+// ParticipationKeyResponse defines model for ParticipationKeyResponse.
+type ParticipationKeyResponse struct {
+
+ // Detailed description of a participation key
+ ParticipationKey string `json:"participationKey"`
+}
+
+// ParticipationKeysResponse defines model for ParticipationKeysResponse.
+type ParticipationKeysResponse []ParticipationKey
+
// PendingTransactionsResponse defines model for PendingTransactionsResponse.
type PendingTransactionsResponse struct {
@@ -598,6 +636,13 @@ type PendingTransactionsResponse struct {
TotalTransactions uint64 `json:"total-transactions"`
}
+// PostParticipationResponse defines model for PostParticipationResponse.
+type PostParticipationResponse struct {
+
+ // encoding of the participation id.
+ PartId string `json:"partId"`
+}
+
// PostTransactionsResponse defines model for PostTransactionsResponse.
type PostTransactionsResponse struct {
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index 7db6031c4..da47ce350 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -24,6 +24,7 @@ import (
"io"
"math"
"net/http"
+ "strings"
"time"
"github.com/labstack/echo/v4"
@@ -33,6 +34,7 @@ import (
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/private"
"github.com/algorand/go-algorand/data"
+ "github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -67,13 +69,147 @@ type NodeInterface interface {
StartCatchup(catchpoint string) error
AbortCatchup(catchpoint string) error
Config() config.Local
+ InstallParticipationKey(partKeyBinary []byte) (account.ParticipationID, error)
+ ListParticipationKeys() ([]account.ParticipationRecord, error)
+ GetParticipationKey(account.ParticipationID) (account.ParticipationRecord, error)
+ RemoveParticipationKey(account.ParticipationID) error
}
-// RegisterParticipationKeys registers participation keys.
-// (POST /v2/register-participation-keys/{address})
-func (v2 *Handlers) RegisterParticipationKeys(ctx echo.Context, address string, params private.RegisterParticipationKeysParams) error {
- // TODO: register participation keys endpoint
- return ctx.String(http.StatusNotImplemented, "Endpoint not implemented.")
+func roundToPtrOrNil(value basics.Round) *uint64 {
+ if value == 0 {
+ return nil
+ }
+ result := uint64(value)
+ return &result
+}
+
+func convertParticipationRecord(record account.ParticipationRecord) generated.ParticipationKey {
+ participationKey := generated.ParticipationKey{
+ Id: record.ParticipationID.String(),
+ Address: record.Account.String(),
+ Key: generated.AccountParticipation{
+ VoteFirstValid: uint64(record.FirstValid),
+ VoteLastValid: uint64(record.LastValid),
+ VoteKeyDilution: record.KeyDilution,
+ },
+ }
+
+ // These are pointers but should always be present.
+ if record.Voting != nil {
+ participationKey.Key.VoteParticipationKey = record.Voting.OneTimeSignatureVerifier[:]
+ }
+ if record.VRF != nil {
+ participationKey.Key.SelectionParticipationKey = record.VRF.PK[:]
+ }
+
+ // Optional fields.
+ if record.EffectiveLast != 0 && record.EffectiveFirst == 0 {
+ // Special case for first valid on round 0
+ zero := uint64(0)
+ participationKey.EffectiveFirstValid = &zero
+ } else {
+ participationKey.EffectiveFirstValid = roundToPtrOrNil(record.EffectiveFirst)
+ }
+ participationKey.EffectiveLastValid = roundToPtrOrNil(record.EffectiveLast)
+ participationKey.LastVote = roundToPtrOrNil(record.LastVote)
+ participationKey.LastBlockProposal = roundToPtrOrNil(record.LastBlockProposal)
+ participationKey.LastVote = roundToPtrOrNil(record.LastVote)
+ participationKey.LastStateProof = roundToPtrOrNil(record.LastStateProof)
+
+ return participationKey
+}
+
+// GetParticipationKeys Return a list of participation keys
+// (GET /v2/participation)
+func (v2 *Handlers) GetParticipationKeys(ctx echo.Context) error {
+ partKeys, err := v2.Node.ListParticipationKeys()
+
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+
+ var response []generated.ParticipationKey
+
+ for _, participationRecord := range partKeys {
+ response = append(response, convertParticipationRecord(participationRecord))
+ }
+
+ return ctx.JSON(http.StatusOK, response)
+}
+
+// AddParticipationKey Add a participation key to the node
+// (POST /v2/participation)
+func (v2 *Handlers) AddParticipationKey(ctx echo.Context) error {
+
+ buf := new(bytes.Buffer)
+ _, err := buf.ReadFrom(ctx.Request().Body)
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+ partKeyBinary := buf.Bytes()
+
+ if len(partKeyBinary) == 0 {
+ err := fmt.Errorf(errRESTPayloadZeroLength)
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+
+ partID, err := v2.Node.InstallParticipationKey(partKeyBinary)
+
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+
+ response := generated.PostParticipationResponse{PartId: partID.String()}
+ return ctx.JSON(http.StatusOK, response)
+
+}
+
+// DeleteParticipationKeyByID Delete a given participation key by id
+// (DELETE /v2/participation/{participation-id})
+func (v2 *Handlers) DeleteParticipationKeyByID(ctx echo.Context, participationID string) error {
+
+ decodedParticipationID, err := account.ParseParticipationID(participationID)
+
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+
+ err = v2.Node.RemoveParticipationKey(decodedParticipationID)
+
+ if err != nil {
+ if errors.Is(err, account.ErrParticipationIDNotFound) {
+ return notFound(ctx, account.ErrParticipationIDNotFound, "participation id not found", v2.Log)
+ }
+
+ return internalError(ctx, err, err.Error(), v2.Log)
+ }
+
+ return ctx.NoContent(http.StatusOK)
+}
+
+// GetParticipationKeyByID Get participation key info by id
+// (GET /v2/participation/{participation-id})
+func (v2 *Handlers) GetParticipationKeyByID(ctx echo.Context, participationID string) error {
+
+ decodedParticipationID, err := account.ParseParticipationID(participationID)
+
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+
+ participationRecord, err := v2.Node.GetParticipationKey(decodedParticipationID)
+
+ if err != nil {
+ return internalError(ctx, err, err.Error(), v2.Log)
+ }
+
+ if participationRecord.IsZero() {
+ return notFound(ctx, account.ErrParticipationIDNotFound, account.ErrParticipationIDNotFound.Error(), v2.Log)
+ }
+
+ response := convertParticipationRecord(participationRecord)
+
+ return ctx.JSON(http.StatusOK, response)
}
// ShutdownNode shuts down the node.
@@ -715,7 +851,7 @@ func (v2 *Handlers) GetAssetByID(ctx echo.Context, assetID uint64) error {
}
lastRound := ledger.Latest()
- record, err := ledger.Lookup(lastRound, creator)
+ record, _, err := ledger.LookupWithoutRewards(lastRound, creator)
if err != nil {
return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
}
@@ -761,7 +897,9 @@ func (v2 *Handlers) TealCompile(ctx echo.Context) error {
source := buf.String()
ops, err := logic.AssembleString(source)
if err != nil {
- return badRequest(ctx, err, err.Error(), v2.Log)
+ sb := strings.Builder{}
+ ops.ReportProblems("", &sb)
+ return badRequest(ctx, err, sb.String(), v2.Log)
}
pd := logic.HashProgram(ops.Program)
addr := basics.Address(pd)
diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go
index b248e40d9..31e6053a6 100644
--- a/daemon/algod/api/server/v2/test/helpers.go
+++ b/daemon/algod/api/server/v2/test/helpers.go
@@ -21,7 +21,6 @@ import (
"math/rand"
"strconv"
"testing"
- "time"
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
@@ -87,6 +86,22 @@ type mockNode struct {
err error
}
+func (m mockNode) InstallParticipationKey(partKeyBinary []byte) (account.ParticipationID, error) {
+ panic("implement me")
+}
+
+func (m mockNode) ListParticipationKeys() ([]account.ParticipationRecord, error) {
+ panic("implement me")
+}
+
+func (m mockNode) GetParticipationKey(id account.ParticipationID) (account.ParticipationRecord, error) {
+ panic("implement me")
+}
+
+func (m mockNode) RemoveParticipationKey(id account.ParticipationID) error {
+ panic("implement me")
+}
+
func makeMockNode(ledger *data.Ledger, genesisID string, nodeError error) mockNode {
return mockNode{
ledger: ledger,
@@ -171,7 +186,7 @@ func (m mockNode) GetTransactionByID(txid transactions.Txid, rnd basics.Round) (
return node.TxnWithStatus{}, fmt.Errorf("get transaction by id not implemented")
}
-func (m mockNode) AssembleBlock(round basics.Round, deadline time.Time) (agreement.ValidatedBlock, error) {
+func (m mockNode) AssembleBlock(round basics.Round) (agreement.ValidatedBlock, error) {
return nil, fmt.Errorf("assemble block not implemented")
}
diff --git a/data/abi/abi_encode.go b/data/abi/abi_encode.go
index db4750bd0..fa5dbd57c 100644
--- a/data/abi/abi_encode.go
+++ b/data/abi/abi_encode.go
@@ -20,214 +20,242 @@ import (
"encoding/binary"
"fmt"
"math/big"
+ "reflect"
+ "strings"
)
-// arrayToTuple casts an array-like ABI Value into an ABI Value of Tuple type.
-// This is used in both ABI Encoding and Decoding.
-func (v Value) arrayToTuple() (Value, error) {
+// typeCastToTuple cast an array-like ABI type into an ABI tuple type.
+func (t Type) typeCastToTuple(tupLen ...int) (Type, error) {
var childT []Type
- var valueArr []Value
- switch v.ABIType.abiTypeID {
+ switch t.abiTypeID {
case String:
- strValue, err := v.GetString()
- if err != nil {
- return Value{}, err
+ if len(tupLen) != 1 {
+ return Type{}, fmt.Errorf("string type conversion to tuple need 1 length argument")
}
- strByte := []byte(strValue)
-
- childT = make([]Type, len(strByte))
- valueArr = make([]Value, len(strByte))
-
- for i := 0; i < len(strByte); i++ {
- childT[i] = MakeByteType()
- valueArr[i] = MakeByte(strByte[i])
+ childT = make([]Type, tupLen[0])
+ for i := 0; i < tupLen[0]; i++ {
+ childT[i] = byteType
}
case Address:
- addr, err := v.GetAddress()
- if err != nil {
- return Value{}, err
- }
-
childT = make([]Type, addressByteSize)
- valueArr = make([]Value, addressByteSize)
-
for i := 0; i < addressByteSize; i++ {
- childT[i] = MakeByteType()
- valueArr[i] = MakeByte(addr[i])
+ childT[i] = byteType
}
case ArrayStatic:
- childT = make([]Type, v.ABIType.staticLength)
- for i := 0; i < int(v.ABIType.staticLength); i++ {
- childT[i] = v.ABIType.childTypes[0]
+ childT = make([]Type, t.staticLength)
+ for i := 0; i < int(t.staticLength); i++ {
+ childT[i] = t.childTypes[0]
}
- valueArr = v.value.([]Value)
case ArrayDynamic:
- arrayElems := v.value.([]Value)
- childT = make([]Type, len(arrayElems))
- for i := 0; i < len(arrayElems); i++ {
- childT[i] = v.ABIType.childTypes[0]
+ if len(tupLen) != 1 {
+ return Type{}, fmt.Errorf("dynamic array type conversion to tuple need 1 length argument")
+ }
+ childT = make([]Type, tupLen[0])
+ for i := 0; i < tupLen[0]; i++ {
+ childT[i] = t.childTypes[0]
}
- valueArr = arrayElems
default:
- return Value{}, fmt.Errorf("value type not supported to conversion to tuple")
+ return Type{}, fmt.Errorf("type cannot support conversion to tuple")
}
- castedTupleType, err := MakeTupleType(childT)
+ tuple, err := MakeTupleType(childT)
if err != nil {
- return Value{}, err
+ return Type{}, err
}
-
- return Value{
- ABIType: castedTupleType,
- value: valueArr,
- }, nil
+ return tuple, nil
}
-// Encode method serialize the ABI value into a byte string of ABI encoding rule.
-func (v Value) Encode() ([]byte, error) {
- switch v.ABIType.abiTypeID {
- case Uint:
- bigIntValue, err := v.GetUint()
- if err != nil {
- return []byte{}, err
- }
- // NOTE: ugly work-round for golang 1.14. if upgraded to 1.15, should use `fillbytes`
- bigIntBytes := bigIntValue.Bytes()
- buffer := make([]byte, v.ABIType.bitSize/8-uint16(len(bigIntBytes)))
- buffer = append(buffer, bigIntBytes...)
- return buffer, nil
- case Ufixed:
- ufixedValue, err := v.GetUfixed()
- if err != nil {
- return []byte{}, err
- }
- // NOTE: ugly work-round for golang 1.14. if upgraded to 1.15, should use `fillbytes`
- encodeBuffer := ufixedValue.Bytes()
- buffer := make([]byte, v.ABIType.bitSize/8-uint16(len(encodeBuffer)))
- buffer = append(buffer, encodeBuffer...)
- return buffer, nil
+// Encode is an ABI type method to encode go values into bytes following ABI encoding rules
+func (t Type) Encode(value interface{}) ([]byte, error) {
+ switch t.abiTypeID {
+ case Uint, Ufixed:
+ return encodeInt(value, t.bitSize)
case Bool:
- boolValue, err := v.GetBool()
- if err != nil {
- return []byte{}, err
+ boolValue, ok := value.(bool)
+ if !ok {
+ return nil, fmt.Errorf("cannot cast value to bool in bool encoding")
}
if boolValue {
return []byte{0x80}, nil
}
return []byte{0x00}, nil
case Byte:
- bytesValue, err := v.GetByte()
- if err != nil {
- return []byte{}, nil
+ byteValue, ok := value.(byte)
+ if !ok {
+ return nil, fmt.Errorf("cannot cast value to byte in byte encoding")
}
- return []byte{bytesValue}, nil
+ return []byte{byteValue}, nil
case ArrayStatic, Address:
- convertedTuple, err := v.arrayToTuple()
+ castedType, err := t.typeCastToTuple()
+ if err != nil {
+ return nil, err
+ }
+ return castedType.Encode(value)
+ case ArrayDynamic:
+ dynamicArray, err := inferToSlice(value)
if err != nil {
- return []byte{}, err
+ return nil, err
}
- return tupleEncoding(convertedTuple)
- case ArrayDynamic, String:
- convertedTuple, err := v.arrayToTuple()
+ castedType, err := t.typeCastToTuple(len(dynamicArray))
if err != nil {
- return []byte{}, err
+ return nil, err
}
- length := len(convertedTuple.ABIType.childTypes)
lengthEncode := make([]byte, lengthEncodeByteSize)
- binary.BigEndian.PutUint16(lengthEncode, uint16(length))
-
- encoded, err := tupleEncoding(convertedTuple)
+ binary.BigEndian.PutUint16(lengthEncode, uint16(len(dynamicArray)))
+ encoded, err := castedType.Encode(value)
+ if err != nil {
+ return nil, err
+ }
+ encoded = append(lengthEncode, encoded...)
+ return encoded, nil
+ case String:
+ stringValue, okString := value.(string)
+ if !okString {
+ return nil, fmt.Errorf("cannot cast value to string or array dynamic in encoding")
+ }
+ byteValue := []byte(stringValue)
+ castedType, err := t.typeCastToTuple(len(byteValue))
if err != nil {
- return []byte{}, err
+ return nil, err
}
- return append(lengthEncode, encoded...), nil
+ lengthEncode := make([]byte, lengthEncodeByteSize)
+ binary.BigEndian.PutUint16(lengthEncode, uint16(len(byteValue)))
+ encoded, err := castedType.Encode(byteValue)
+ if err != nil {
+ return nil, err
+ }
+ encoded = append(lengthEncode, encoded...)
+ return encoded, nil
case Tuple:
- return tupleEncoding(v)
+ return encodeTuple(value, t.childTypes)
default:
- return []byte{}, fmt.Errorf("Encoding: unknown type error (bruh why you are here)")
+ return nil, fmt.Errorf("cannot infer type for encoding")
}
}
-// compressMultipleBool compress consecutive bool values into a byte in ABI tuple/array value.
-func compressMultipleBool(valueList []Value) (uint8, error) {
- var res uint8 = 0
- if len(valueList) > 8 {
- return 0, fmt.Errorf("value list passed in should be no greater than length 8")
+// encodeInt encodes int-alike golang values to bytes, following ABI encoding rules
+func encodeInt(intValue interface{}, bitSize uint16) ([]byte, error) {
+ var bigInt *big.Int
+
+ switch intValue := intValue.(type) {
+ case int8:
+ bigInt = big.NewInt(int64(intValue))
+ case uint8:
+ bigInt = new(big.Int).SetUint64(uint64(intValue))
+ case int16:
+ bigInt = big.NewInt(int64(intValue))
+ case uint16:
+ bigInt = new(big.Int).SetUint64(uint64(intValue))
+ case int32:
+ bigInt = big.NewInt(int64(intValue))
+ case uint32:
+ bigInt = new(big.Int).SetUint64(uint64(intValue))
+ case int64:
+ bigInt = big.NewInt(intValue)
+ case uint64:
+ bigInt = new(big.Int).SetUint64(intValue)
+ case uint:
+ bigInt = new(big.Int).SetUint64(uint64(intValue))
+ case int:
+ bigInt = big.NewInt(int64(intValue))
+ case *big.Int:
+ bigInt = intValue
+ default:
+ return nil, fmt.Errorf("cannot infer go type for uint encode")
}
- for i := 0; i < len(valueList); i++ {
- if valueList[i].ABIType.abiTypeID != Bool {
- return 0, fmt.Errorf("bool type not matching in compressMultipleBool")
- }
- boolVal, err := valueList[i].GetBool()
- if err != nil {
- return 0, err
- }
- if boolVal {
- res |= 1 << uint(7-i)
+
+ if bigInt.Sign() < 0 {
+ return nil, fmt.Errorf("passed in numeric value should be non negative")
+ }
+
+ bytes := bigInt.Bytes()
+ if len(bytes) > int(bitSize/8) {
+ return nil, fmt.Errorf("input value bit size %d > abi type bit size %d", len(bytes)*8, bitSize)
+ }
+
+ zeroPadding := make([]byte, bitSize/8-uint16(len(bytes)))
+ buffer := append(zeroPadding, bytes...)
+ return buffer, nil
+}
+
+// inferToSlice infers an interface element to a slice of interface{}, returns error if it cannot infer successfully
+func inferToSlice(value interface{}) ([]interface{}, error) {
+ reflectVal := reflect.ValueOf(value)
+ if reflectVal.Kind() != reflect.Slice && reflectVal.Kind() != reflect.Array {
+ return nil, fmt.Errorf("cannot infer an interface value as a slice of interface element")
+ }
+ if reflectVal.IsNil() {
+ if reflectVal.Kind() == reflect.Slice {
+ return nil, nil
}
+ return nil, fmt.Errorf("cannot infer nil value for array kind interface")
}
- return res, nil
+ values := make([]interface{}, reflectVal.Len())
+ for i := 0; i < reflectVal.Len(); i++ {
+ values[i] = reflectVal.Index(i).Interface()
+ }
+ return values, nil
}
-// tupleEncoding encodes an ABI value of tuple type into an ABI encoded byte string.
-func tupleEncoding(v Value) ([]byte, error) {
- if v.ABIType.abiTypeID != Tuple {
- return []byte{}, fmt.Errorf("type not supported in tupleEncoding")
+// encodeTuple encodes slice-of-interface of golang values to bytes, following ABI encoding rules
+func encodeTuple(value interface{}, childT []Type) ([]byte, error) {
+ if len(childT) >= (1 << 16) {
+ return nil, fmt.Errorf("abi child type number exceeds uint16 maximum")
}
- if len(v.ABIType.childTypes) >= (1 << 16) {
- return []byte{}, fmt.Errorf("value abi type exceed 2^16")
+ values, err := inferToSlice(value)
+ if err != nil {
+ return nil, err
}
- tupleElems := v.value.([]Value)
- if len(tupleElems) != len(v.ABIType.childTypes) {
- return []byte{}, fmt.Errorf("tuple abi child type number unmatch with tuple argument number")
+ if len(values) != len(childT) {
+ return nil, fmt.Errorf("cannot encode abi tuple: value slice length != child type number")
}
// for each tuple element value, it has a head/tail component
// we create slots for head/tail bytes now, store them and concat them later
- heads := make([][]byte, len(v.ABIType.childTypes))
- tails := make([][]byte, len(v.ABIType.childTypes))
+ heads := make([][]byte, len(childT))
+ tails := make([][]byte, len(childT))
isDynamicIndex := make(map[int]bool)
- for i := 0; i < len(v.ABIType.childTypes); i++ {
- if tupleElems[i].ABIType.IsDynamic() {
+ for i := 0; i < len(childT); i++ {
+ if childT[i].IsDynamic() {
// if it is a dynamic value, the head component is not pre-determined
// we store an empty placeholder first, since we will need it in byte length calculation
headsPlaceholder := []byte{0x00, 0x00}
heads[i] = headsPlaceholder
// we keep track that the index points to a dynamic value
isDynamicIndex[i] = true
- tailEncoding, err := tupleElems[i].Encode()
+ tailEncoding, err := childT[i].Encode(values[i])
if err != nil {
- return []byte{}, err
+ return nil, err
}
tails[i] = tailEncoding
+ isDynamicIndex[i] = true
+ } else if childT[i].abiTypeID == Bool {
+ // search previous bool
+ before := findBoolLR(childT, i, -1)
+ // search after bool
+ after := findBoolLR(childT, i, 1)
+ // append to heads and tails
+ if before%8 != 0 {
+ return nil, fmt.Errorf("cannot encode abi tuple: expected before has number of bool mod 8 == 0")
+ }
+ if after > 7 {
+ after = 7
+ }
+ compressed, err := compressBools(values[i : i+after+1])
+ if err != nil {
+ return nil, err
+ }
+ heads[i] = []byte{compressed}
+ i += after
+ isDynamicIndex[i] = false
} else {
- if tupleElems[i].ABIType.abiTypeID == Bool {
- // search previous bool
- before := findBoolLR(v.ABIType.childTypes, i, -1)
- // search after bool
- after := findBoolLR(v.ABIType.childTypes, i, 1)
- // append to heads and tails
- if before%8 != 0 {
- return []byte{}, fmt.Errorf("expected before has number of bool mod 8 = 0")
- }
- if after > 7 {
- after = 7
- }
- compressed, err := compressMultipleBool(tupleElems[i : i+after+1])
- if err != nil {
- return []byte{}, err
- }
- heads[i] = []byte{compressed}
- i += after
- } else {
- encodeTi, err := tupleElems[i].Encode()
- if err != nil {
- return []byte{}, err
- }
- heads[i] = encodeTi
+ encodeTi, err := childT[i].Encode(values[i])
+ if err != nil {
+ return nil, err
}
+ heads[i] = encodeTi
isDynamicIndex[i] = false
}
}
@@ -249,7 +277,7 @@ func tupleEncoding(v Value) ([]byte, error) {
// calculate where the index of dynamic value encoding byte start
headValue := headLength + tailCurrLength
if headValue >= (1 << 16) {
- return []byte{}, fmt.Errorf("encoding error: byte length exceed 2^16")
+ return nil, fmt.Errorf("cannot encode abi tuple: encode length exceeds uint16 maximum")
}
binary.BigEndian.PutUint16(heads[i], uint16(headValue))
}
@@ -268,203 +296,254 @@ func tupleEncoding(v Value) ([]byte, error) {
return encoded, nil
}
-// Decode takes an ABI encoded byte string and a target ABI type,
-// and decodes the bytes into an ABI Value.
-func Decode(valueByte []byte, valueType Type) (Value, error) {
- switch valueType.abiTypeID {
- case Uint:
- if len(valueByte) != int(valueType.bitSize)/8 {
- return Value{},
- fmt.Errorf("uint%d decode: expected byte length %d, but got byte length %d",
- valueType.bitSize, valueType.bitSize/8, len(valueByte))
- }
- uintValue := new(big.Int).SetBytes(valueByte)
- return MakeUint(uintValue, valueType.bitSize)
- case Ufixed:
- if len(valueByte) != int(valueType.bitSize)/8 {
- return Value{},
- fmt.Errorf("ufixed%dx%d decode: expected length %d, got byte length %d",
- valueType.bitSize, valueType.precision, valueType.bitSize/8, len(valueByte))
- }
- ufixedNumerator := new(big.Int).SetBytes(valueByte)
- return MakeUfixed(ufixedNumerator, valueType.bitSize, valueType.precision)
+// compressBools takes a slice of interface{} (which can be casted to bools) length <= 8
+// and compress the bool values into a uint8 integer
+func compressBools(boolSlice []interface{}) (uint8, error) {
+ var res uint8 = 0
+ if len(boolSlice) > 8 {
+ return 0, fmt.Errorf("compressBools: cannot have slice length > 8")
+ }
+ for i := 0; i < len(boolSlice); i++ {
+ temp, ok := boolSlice[i].(bool)
+ if !ok {
+ return 0, fmt.Errorf("compressBools: cannot cast slice element to bool")
+ }
+ if temp {
+ res |= 1 << uint(7-i)
+ }
+ }
+ return res, nil
+}
+
+// decodeUint decodes byte slice into golang int/big.Int
+func decodeUint(encoded []byte, bitSize uint16) (interface{}, error) {
+ if len(encoded) != int(bitSize)/8 {
+ return nil,
+ fmt.Errorf("uint/ufixed decode: expected byte length %d, but got byte length %d", bitSize/8, len(encoded))
+ }
+ switch bitSize / 8 {
+ case 1:
+ return encoded[0], nil
+ case 2:
+ return uint16(new(big.Int).SetBytes(encoded).Uint64()), nil
+ case 3, 4:
+ return uint32(new(big.Int).SetBytes(encoded).Uint64()), nil
+ case 5, 6, 7, 8:
+ return new(big.Int).SetBytes(encoded).Uint64(), nil
+ default:
+ return new(big.Int).SetBytes(encoded), nil
+ }
+}
+
+// Decode is an ABI type method to decode bytes to go values from ABI encoding rules
+func (t Type) Decode(encoded []byte) (interface{}, error) {
+ switch t.abiTypeID {
+ case Uint, Ufixed:
+ return decodeUint(encoded, t.bitSize)
case Bool:
- if len(valueByte) != 1 {
- return Value{}, fmt.Errorf("boolean byte should be length 1 byte")
- }
- var boolValue bool
- if valueByte[0] == 0x00 {
- boolValue = false
- } else if valueByte[0] == 0x80 {
- boolValue = true
- } else {
- return Value{}, fmt.Errorf("sinble boolean encoded byte should be of form 0x80 or 0x00")
+ if len(encoded) != 1 {
+ return nil, fmt.Errorf("boolean byte should be length 1 byte")
+ }
+ if encoded[0] == 0x00 {
+ return false, nil
+ } else if encoded[0] == 0x80 {
+ return true, nil
}
- return MakeBool(boolValue), nil
+ return nil, fmt.Errorf("single boolean encoded byte should be of form 0x80 or 0x00")
case Byte:
- if len(valueByte) != 1 {
- return Value{}, fmt.Errorf("byte should be length 1")
+ if len(encoded) != 1 {
+ return nil, fmt.Errorf("byte should be length 1")
}
- return MakeByte(valueByte[0]), nil
+ return encoded[0], nil
case ArrayStatic:
- childT := make([]Type, valueType.staticLength)
- for i := 0; i < int(valueType.staticLength); i++ {
- childT[i] = valueType.childTypes[0]
- }
- converted, err := MakeTupleType(childT)
+ castedType, err := t.typeCastToTuple()
if err != nil {
- return Value{}, err
+ return nil, err
}
- tupleDecoded, err := tupleDecoding(valueByte, converted)
- if err != nil {
- return Value{}, err
- }
- tupleDecoded.ABIType = valueType
- return tupleDecoded, nil
+ return castedType.Decode(encoded)
case Address:
- if len(valueByte) != addressByteSize {
- return Value{}, fmt.Errorf("address should be length 32")
+ if len(encoded) != addressByteSize {
+ return nil, fmt.Errorf("address should be length 32")
}
- var byteAssign [addressByteSize]byte
- copy(byteAssign[:], valueByte)
- return MakeAddress(byteAssign), nil
+ return encoded, nil
case ArrayDynamic:
- if len(valueByte) < lengthEncodeByteSize {
- return Value{}, fmt.Errorf("dynamic array format corrupted")
- }
- dynamicLen := binary.BigEndian.Uint16(valueByte[:lengthEncodeByteSize])
- childT := make([]Type, dynamicLen)
- for i := 0; i < int(dynamicLen); i++ {
- childT[i] = valueType.childTypes[0]
- }
- converted, err := MakeTupleType(childT)
- if err != nil {
- return Value{}, err
+ if len(encoded) < lengthEncodeByteSize {
+ return nil, fmt.Errorf("dynamic array format corrupted")
}
- tupleDecoded, err := tupleDecoding(valueByte[lengthEncodeByteSize:], converted)
+ dynamicLen := binary.BigEndian.Uint16(encoded[:lengthEncodeByteSize])
+ castedType, err := t.typeCastToTuple(int(dynamicLen))
if err != nil {
- return Value{}, err
+ return nil, err
}
- tupleDecoded.ABIType = valueType
- return tupleDecoded, nil
+ return castedType.Decode(encoded[lengthEncodeByteSize:])
case String:
- if len(valueByte) < lengthEncodeByteSize {
- return Value{}, fmt.Errorf("string format corrupted")
+ if len(encoded) < lengthEncodeByteSize {
+ return nil, fmt.Errorf("string format corrupted")
}
- stringLenBytes := valueByte[:lengthEncodeByteSize]
+ stringLenBytes := encoded[:lengthEncodeByteSize]
byteLen := binary.BigEndian.Uint16(stringLenBytes)
- if len(valueByte[lengthEncodeByteSize:]) != int(byteLen) {
- return Value{}, fmt.Errorf("string representation in byte: length not matching")
+ if len(encoded[lengthEncodeByteSize:]) != int(byteLen) {
+ return nil, fmt.Errorf("string representation in byte: length not matching")
}
- return MakeString(string(valueByte[lengthEncodeByteSize:])), nil
+ return string(encoded[lengthEncodeByteSize:]), nil
case Tuple:
- return tupleDecoding(valueByte, valueType)
+ return decodeTuple(encoded, t.childTypes)
default:
- return Value{}, fmt.Errorf("decode: unknown type error")
+ return nil, fmt.Errorf("cannot infer type for decoding")
}
}
-// tupleDecoding takes a byte string and an ABI tuple type,
-// and decodes the bytes into an ABI tuple value.
-func tupleDecoding(valueBytes []byte, valueType Type) (Value, error) {
- dynamicSegments := make([]segment, 0)
- valuePartition := make([][]byte, 0)
+// decodeTuple decodes byte slice with ABI type slice, outputting a slice of golang interface values
+// following ABI encoding rules
+func decodeTuple(encoded []byte, childT []Type) ([]interface{}, error) {
+ dynamicSegments := make([]int, 0, len(childT)+1)
+ valuePartition := make([][]byte, 0, len(childT))
iterIndex := 0
- for i := 0; i < len(valueType.childTypes); i++ {
- if valueType.childTypes[i].IsDynamic() {
- if len(valueBytes[iterIndex:]) < lengthEncodeByteSize {
- return Value{}, fmt.Errorf("ill formed tuple dynamic typed value encoding")
- }
- dynamicIndex := binary.BigEndian.Uint16(valueBytes[iterIndex : iterIndex+lengthEncodeByteSize])
- if len(dynamicSegments) > 0 {
- dynamicSegments[len(dynamicSegments)-1].right = int(dynamicIndex)
+ for i := 0; i < len(childT); i++ {
+ if childT[i].IsDynamic() {
+ if len(encoded[iterIndex:]) < lengthEncodeByteSize {
+ return nil, fmt.Errorf("ill formed tuple dynamic typed value encoding")
}
- // we know where encoded bytes for dynamic value start, but we do not know where it ends
- // unless we see the start of the next encoded bytes for dynamic value
- dynamicSegments = append(dynamicSegments, segment{
- left: int(dynamicIndex),
- right: -1,
- })
+ dynamicIndex := binary.BigEndian.Uint16(encoded[iterIndex : iterIndex+lengthEncodeByteSize])
+ dynamicSegments = append(dynamicSegments, int(dynamicIndex))
valuePartition = append(valuePartition, nil)
iterIndex += lengthEncodeByteSize
- } else {
- // if bool ...
- if valueType.childTypes[i].abiTypeID == Bool {
- // search previous bool
- before := findBoolLR(valueType.childTypes, i, -1)
- // search after bool
- after := findBoolLR(valueType.childTypes, i, 1)
- if before%8 == 0 {
- if after > 7 {
- after = 7
- }
- // parse bool in a byte to multiple byte strings
- for boolIndex := uint(0); boolIndex <= uint(after); boolIndex++ {
- boolMask := 0x80 >> boolIndex
- if valueBytes[iterIndex]&byte(boolMask) > 0 {
- valuePartition = append(valuePartition, []byte{0x80})
- } else {
- valuePartition = append(valuePartition, []byte{0x00})
- }
+ } else if childT[i].abiTypeID == Bool {
+ // search previous bool
+ before := findBoolLR(childT, i, -1)
+ // search after bool
+ after := findBoolLR(childT, i, 1)
+ if before%8 == 0 {
+ if after > 7 {
+ after = 7
+ }
+ // parse bool in a byte to multiple byte strings
+ for boolIndex := uint(0); boolIndex <= uint(after); boolIndex++ {
+ boolMask := 0x80 >> boolIndex
+ if encoded[iterIndex]&byte(boolMask) > 0 {
+ valuePartition = append(valuePartition, []byte{0x80})
+ } else {
+ valuePartition = append(valuePartition, []byte{0x00})
}
- i += after
- iterIndex++
- } else {
- return Value{}, fmt.Errorf("expected before bool number mod 8 == 0")
}
+ i += after
+ iterIndex++
} else {
- // not bool ...
- currLen, err := valueType.childTypes[i].ByteLen()
- if err != nil {
- return Value{}, err
- }
- valuePartition = append(valuePartition, valueBytes[iterIndex:iterIndex+currLen])
- iterIndex += currLen
+ return nil, fmt.Errorf("expected before bool number mod 8 == 0")
}
+ } else {
+ // not bool ...
+ currLen, err := childT[i].ByteLen()
+ if err != nil {
+ return nil, err
+ }
+ valuePartition = append(valuePartition, encoded[iterIndex:iterIndex+currLen])
+ iterIndex += currLen
}
- if i != len(valueType.childTypes)-1 && iterIndex >= len(valueBytes) {
- return Value{}, fmt.Errorf("input byte not enough to decode")
+ if i != len(childT)-1 && iterIndex >= len(encoded) {
+ return nil, fmt.Errorf("input byte not enough to decode")
}
}
+
if len(dynamicSegments) > 0 {
- dynamicSegments[len(dynamicSegments)-1].right = len(valueBytes)
- iterIndex = len(valueBytes)
+ dynamicSegments = append(dynamicSegments, len(encoded))
+ iterIndex = len(encoded)
}
- if iterIndex < len(valueBytes) {
- return Value{}, fmt.Errorf("input byte not fully consumed")
+ if iterIndex < len(encoded) {
+ return nil, fmt.Errorf("input byte not fully consumed")
}
-
- // check segment indices are valid
- // if the dynamic segment are not consecutive and well-ordered, we return error
- for index, seg := range dynamicSegments {
- if seg.left > seg.right {
- return Value{}, fmt.Errorf("dynamic segment should display a [l, r] space with l <= r")
- }
- if index != len(dynamicSegments)-1 && seg.right != dynamicSegments[index+1].left {
- return Value{}, fmt.Errorf("dynamic segment should be consecutive")
+ for i := 0; i < len(dynamicSegments)-1; i++ {
+ if dynamicSegments[i] > dynamicSegments[i+1] {
+ return nil, fmt.Errorf("dynamic segment should display a [l, r] space with l <= r")
}
}
segIndex := 0
- for i := 0; i < len(valueType.childTypes); i++ {
- if valueType.childTypes[i].IsDynamic() {
- valuePartition[i] = valueBytes[dynamicSegments[segIndex].left:dynamicSegments[segIndex].right]
+ for i := 0; i < len(childT); i++ {
+ if childT[i].IsDynamic() {
+ valuePartition[i] = encoded[dynamicSegments[segIndex]:dynamicSegments[segIndex+1]]
segIndex++
}
}
- // decode each tuple element bytes
- values := make([]Value, 0)
- for i := 0; i < len(valueType.childTypes); i++ {
- valueTi, err := Decode(valuePartition[i], valueType.childTypes[i])
+ values := make([]interface{}, len(childT))
+ for i := 0; i < len(childT); i++ {
+ var err error
+ values[i], err = childT[i].Decode(valuePartition[i])
if err != nil {
- return Value{}, err
+ return nil, err
+ }
+ }
+ return values, nil
+}
+
+// ParseArgJSONtoByteSlice convert input method arguments to ABI encoded bytes
+// it converts funcArgTypes into a tuple type and apply changes over input argument string (in JSON format)
+// if there are greater or equal to 15 inputs, then we compact the tailing inputs into one tuple
+func ParseArgJSONtoByteSlice(funcArgTypes string, jsonArgs []string, applicationArgs *[][]byte) error {
+ abiTupleT, err := TypeOf(funcArgTypes)
+ if err != nil {
+ return err
+ }
+ if len(abiTupleT.childTypes) != len(jsonArgs) {
+ return fmt.Errorf("input argument number %d != method argument number %d", len(jsonArgs), len(abiTupleT.childTypes))
+ }
+
+ // change the input args to be 1 - 14 + 15 (compacting everything together)
+ if len(jsonArgs) > 14 {
+ compactedType, err := MakeTupleType(abiTupleT.childTypes[14:])
+ if err != nil {
+ return err
+ }
+ abiTupleT.childTypes = abiTupleT.childTypes[:14]
+ abiTupleT.childTypes = append(abiTupleT.childTypes, compactedType)
+ abiTupleT.staticLength = 15
+
+ remainingJSON := "[" + strings.Join(jsonArgs[14:], ",") + "]"
+ jsonArgs = jsonArgs[:14]
+ jsonArgs = append(jsonArgs, remainingJSON)
+ }
+
+ // parse JSON value to ABI encoded bytes
+ for i := 0; i < len(jsonArgs); i++ {
+ interfaceVal, err := abiTupleT.childTypes[i].UnmarshalFromJSON([]byte(jsonArgs[i]))
+ if err != nil {
+ return err
+ }
+ abiEncoded, err := abiTupleT.childTypes[i].Encode(interfaceVal)
+ if err != nil {
+ return err
+ }
+ *applicationArgs = append(*applicationArgs, abiEncoded)
+ }
+ return nil
+}
+
+// ParseMethodSignature parses a method of format `method(...argTypes...)retType`
+// into `(...argTypes)` and `retType`
+func ParseMethodSignature(methodSig string) (string, string, error) {
+ var stack []int
+
+ for index, chr := range methodSig {
+ if chr == '(' {
+ stack = append(stack, index)
+ } else if chr == ')' {
+ if len(stack) == 0 {
+ break
+ }
+ leftParenIndex := stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+ if len(stack) == 0 {
+ returnType := methodSig[index+1:]
+ if _, err := TypeOf(returnType); err != nil {
+ if returnType != "void" {
+ return "", "", fmt.Errorf("cannot infer return type: %s", returnType)
+ }
+ }
+ return methodSig[leftParenIndex : index+1], methodSig[index+1:], nil
+ }
}
- values = append(values, valueTi)
}
- return Value{
- ABIType: valueType,
- value: values,
- }, nil
+ return "", "", fmt.Errorf("unpaired parentheses: %s", methodSig)
}
diff --git a/data/abi/abi_encode_test.go b/data/abi/abi_encode_test.go
index 22d37c553..c585564c6 100644
--- a/data/abi/abi_encode_test.go
+++ b/data/abi/abi_encode_test.go
@@ -34,6 +34,9 @@ func TestEncodeValid(t *testing.T) {
// randomly pick 1000 valid uint values and check if encoded value match with expected
for intSize := 8; intSize <= 512; intSize += 8 {
upperLimit := big.NewInt(0).Lsh(big.NewInt(1), uint(intSize))
+ uintType, err := makeUintType(intSize)
+ require.NoError(t, err, "make uint type fail")
+
for i := 0; i < 1000; i++ {
randomInt, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
@@ -42,12 +45,10 @@ func TestEncodeValid(t *testing.T) {
expected := make([]byte, intSize/8-len(randomIntByte))
expected = append(expected, randomIntByte...)
- uintValue, err := MakeUint(randomInt, uint16(intSize))
- require.NoError(t, err, "makeUint Fail")
- uintBytesActual, err := uintValue.Encode()
+ uintEncode, err := uintType.Encode(randomInt)
+ require.NoError(t, err, "encoding from uint type fail")
- require.NoError(t, err, "uint encode fail")
- require.Equal(t, expected, uintBytesActual, "encode uint not match with expected")
+ require.Equal(t, expected, uintEncode, "encode uint not match with expected")
}
// 2^[bitSize] - 1 test
// check if uint<bitSize> can contain max uint value (2^bitSize - 1)
@@ -55,9 +56,7 @@ func TestEncodeValid(t *testing.T) {
upperLimit,
big.NewInt(1).Neg(big.NewInt(1)),
)
- valueLargest, err := MakeUint(largest, uint16(intSize))
- require.NoError(t, err, "make largest uint fail")
- encoded, err := valueLargest.Encode()
+ encoded, err := uintType.Encode(largest)
require.NoError(t, err, "largest uint encode error")
require.Equal(t, largest.Bytes(), encoded, "encode uint largest do not match with expected")
}
@@ -72,14 +71,14 @@ func TestEncodeValid(t *testing.T) {
big.NewInt(1).Neg(big.NewInt(1)),
)
for precision := 1; precision <= 160; precision++ {
+ typeUfixed, err := makeUfixedType(size, precision)
+ require.NoError(t, err, "make ufixed type fail")
+
for i := 0; i < 10; i++ {
randomInt, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
- valueUfixed, err := MakeUfixed(randomInt, uint16(size), uint16(precision))
- require.NoError(t, err, "makeUfixed Fail")
-
- encodedUfixed, err := valueUfixed.Encode()
+ encodedUfixed, err := typeUfixed.Encode(randomInt)
require.NoError(t, err, "ufixed encode fail")
randomBytes := randomInt.Bytes()
@@ -88,9 +87,7 @@ func TestEncodeValid(t *testing.T) {
require.Equal(t, buffer, encodedUfixed, "encode ufixed not match with expected")
}
// (2^[bitSize] - 1) / (10^[precision]) test
- ufixedLargestValue, err := MakeUfixed(largest, uint16(size), uint16(precision))
- require.NoError(t, err, "make largest ufixed fail")
- ufixedLargestEncode, err := ufixedLargestValue.Encode()
+ ufixedLargestEncode, err := typeUfixed.Encode(largest)
require.NoError(t, err, "largest ufixed encode error")
require.Equal(t, largest.Bytes(), ufixedLargestEncode,
"encode ufixed largest do not match with expected")
@@ -108,19 +105,14 @@ func TestEncodeValid(t *testing.T) {
addrBytesExpected := make([]byte, 32-len(rand256Bytes))
addrBytesExpected = append(addrBytesExpected, rand256Bytes...)
- var addrBytes [32]byte
- copy(addrBytes[:], addrBytesExpected[:32])
-
- addressValue := MakeAddress(addrBytes)
- addrBytesActual, err := addressValue.Encode()
+ addrBytesActual, err := addressType.Encode(addrBytesExpected)
require.NoError(t, err, "address encode fail")
require.Equal(t, addrBytesExpected, addrBytesActual, "encode addr not match with expected")
}
// encoding test for bool values
for i := 0; i < 2; i++ {
- boolValue := MakeBool(i == 1)
- boolEncode, err := boolValue.Encode()
+ boolEncode, err := boolType.Encode(i == 1)
require.NoError(t, err, "bool encode fail")
expected := []byte{0x00}
if i == 1 {
@@ -131,8 +123,7 @@ func TestEncodeValid(t *testing.T) {
// encoding test for byte values
for i := 0; i < (1 << 8); i++ {
- byteValue := MakeByte(byte(i))
- byteEncode, err := byteValue.Encode()
+ byteEncode, err := byteType.Encode(byte(i))
require.NoError(t, err, "byte encode fail")
expected := []byte{byte(i)}
require.Equal(t, expected, byteEncode, "encode byte not match with expected")
@@ -146,14 +137,13 @@ func TestEncodeValid(t *testing.T) {
for i := 0; i < 10; i++ {
// generate utf8 strings from `gobberish` at some length
utf8Str := gobberish.GenerateString(length)
- strValue := MakeString(utf8Str)
// since string is just type alias of `byte[]`, we need to store number of bytes in encoding
utf8ByteLen := len([]byte(utf8Str))
lengthBytes := make([]byte, 2)
binary.BigEndian.PutUint16(lengthBytes, uint16(utf8ByteLen))
expected := append(lengthBytes, []byte(utf8Str)...)
- strEncode, err := strValue.Encode()
+ strEncode, err := stringType.Encode(utf8Str)
require.NoError(t, err, "string encode fail")
require.Equal(t, expected, strEncode, "encode string not match with expected")
}
@@ -162,54 +152,39 @@ func TestEncodeValid(t *testing.T) {
// encoding test for static bool array, the expected behavior of encoding is to
// compress multiple bool into a single byte.
// input: {T, F, F, T, T}, encode expected: {0b10011000}
+ staticBoolArrType := makeStaticArrayType(boolType, 5)
t.Run("static bool array encoding", func(t *testing.T) {
inputBase := []bool{true, false, false, true, true}
- arrayElems := make([]Value, len(inputBase))
- for index, bVal := range inputBase {
- arrayElems[index] = MakeBool(bVal)
- }
expected := []byte{
0b10011000,
}
- boolArr, err := MakeStaticArray(arrayElems)
- require.NoError(t, err, "make static array should not return error")
- boolArrEncode, err := boolArr.Encode()
+ boolArrEncode, err := staticBoolArrType.Encode(inputBase)
require.NoError(t, err, "static bool array encoding should not return error")
require.Equal(t, expected, boolArrEncode, "static bool array encode not match expected")
})
// encoding test for static bool array
// input: {F, F, F, T, T, F, T, F, T, F, T}, encode expected: {0b00011010, 0b10100000}
+ staticBoolArrType = makeStaticArrayType(boolType, 11)
t.Run("static bool array encoding", func(t *testing.T) {
inputBase := []bool{false, false, false, true, true, false, true, false, true, false, true}
- arrayElems := make([]Value, len(inputBase))
- for index, bVal := range inputBase {
- arrayElems[index] = MakeBool(bVal)
- }
expected := []byte{
0b00011010, 0b10100000,
}
- boolArr, err := MakeStaticArray(arrayElems)
- require.NoError(t, err, "make static array should not return error")
- boolArrEncode, err := boolArr.Encode()
+ boolArrEncode, err := staticBoolArrType.Encode(inputBase)
require.NoError(t, err, "static bool array encoding should not return error")
require.Equal(t, expected, boolArrEncode, "static bool array encode not match expected")
})
// encoding test for dynamic bool array
// input: {F, T, F, T, F, T, F, T, F, T}, encode expected: {0b01010101, 0b01000000}
+ dynamicBoolArrayType := makeDynamicArrayType(boolType)
t.Run("dynamic bool array encoding", func(t *testing.T) {
inputBase := []bool{false, true, false, true, false, true, false, true, false, true}
- arrayElems := make([]Value, len(inputBase))
- for index, bVal := range inputBase {
- arrayElems[index] = MakeBool(bVal)
- }
expected := []byte{
0x00, 0x0A, 0b01010101, 0b01000000,
}
- boolArr, err := MakeDynamicArray(arrayElems, MakeBoolType())
- require.NoError(t, err, "make dynamic array should not return error")
- boolArrEncode, err := boolArr.Encode()
+ boolArrEncode, err := dynamicBoolArrayType.Encode(inputBase)
require.NoError(t, err, "dynamic bool array encoding should not return error")
require.Equal(t, expected, boolArrEncode, "dynamic bool array encode not match expected")
})
@@ -227,29 +202,18 @@ func TestEncodeValid(t *testing.T) {
0x00, 0x03 (second string byte length 3)
byte('D'), byte('E'), byte('F') (second string encoded bytes)
*/
+ tupleType, err := TypeOf("(string,bool,bool,bool,bool,string)")
+ require.NoError(t, err, "type from string for dynamic tuple type should not return error")
t.Run("dynamic tuple encoding", func(t *testing.T) {
inputBase := []interface{}{
"ABC", true, false, true, false, "DEF",
}
- tupleElems := make([]Value, len(inputBase))
- // make tuple element values
- for index, bVal := range inputBase {
- temp, ok := bVal.(string)
- if ok {
- tupleElems[index] = MakeString(temp)
- } else {
- temp := bVal.(bool)
- tupleElems[index] = MakeBool(temp)
- }
- }
expected := []byte{
0x00, 0x05, 0b10100000, 0x00, 0x0A,
0x00, 0x03, byte('A'), byte('B'), byte('C'),
0x00, 0x03, byte('D'), byte('E'), byte('F'),
}
- stringTuple, err := MakeTuple(tupleElems)
- require.NoError(t, err, "make string tuple should not return error")
- stringTupleEncode, err := stringTuple.Encode()
+ stringTupleEncode, err := tupleType.Encode(inputBase)
require.NoError(t, err, "string tuple encoding should not return error")
require.Equal(t, expected, stringTupleEncode, "string tuple encoding not match expected")
})
@@ -262,21 +226,17 @@ func TestEncodeValid(t *testing.T) {
0b11000000 (first static bool array)
0b11000000 (second static bool array)
*/
+ tupleType, err = TypeOf("(bool[2],bool[2])")
+ require.NoError(t, err, "type from string for tuple type should not return error")
t.Run("static bool array tuple encoding", func(t *testing.T) {
- boolArr := []bool{true, true}
- boolValArr := make([]Value, 2)
- for i := 0; i < 2; i++ {
- boolValArr[i] = MakeBool(boolArr[i])
- }
- boolArrVal, err := MakeStaticArray(boolValArr)
- require.NoError(t, err, "make bool static array should not return error")
- tupleVal, err := MakeTuple([]Value{boolArrVal, boolArrVal})
- require.NoError(t, err, "make tuple value should not return error")
expected := []byte{
0b11000000,
0b11000000,
}
- actual, err := tupleVal.Encode()
+ actual, err := tupleType.Encode([]interface{}{
+ []bool{true, true},
+ []bool{true, true},
+ })
require.NoError(t, err, "encode tuple value should not return error")
require.Equal(t, expected, actual, "encode static bool tuple should be equal")
})
@@ -291,24 +251,18 @@ func TestEncodeValid(t *testing.T) {
0x00, 0x02 (dynamic bool array length 2)
0b11000000 (second static bool array)
*/
+ tupleType, err = TypeOf("(bool[2],bool[])")
+ require.NoError(t, err, "type from string for tuple type should not return error")
t.Run("static/dynamic bool array tuple encoding", func(t *testing.T) {
- boolArr := []bool{true, true}
- boolValArr := make([]Value, 2)
- for i := 0; i < 2; i++ {
- boolValArr[i] = MakeBool(boolArr[i])
- }
- boolArrStaticVal, err := MakeStaticArray(boolValArr)
- require.NoError(t, err, "make static bool array should not return error")
- boolArrDynamicVal, err := MakeDynamicArray(boolValArr, MakeBoolType())
- require.NoError(t, err, "make dynamic bool array should not return error")
- tupleVal, err := MakeTuple([]Value{boolArrStaticVal, boolArrDynamicVal})
- require.NoError(t, err, "make tuple for static/dynamic bool array should not return error")
expected := []byte{
0b11000000,
0x00, 0x03,
0x00, 0x02, 0b11000000,
}
- actual, err := tupleVal.Encode()
+ actual, err := tupleType.Encode([]interface{}{
+ []bool{true, true},
+ []bool{true, true},
+ })
require.NoError(t, err, "tuple value encoding should not return error")
require.Equal(t, expected, actual, "encode static/dynamic bool array tuple should not return error")
})
@@ -323,27 +277,27 @@ func TestEncodeValid(t *testing.T) {
0x00, 0x00 (first dynamic bool array length 0)
0x00, 0x00 (second dynamic bool array length 0)
*/
+ tupleType, err = TypeOf("(bool[],bool[])")
+ require.NoError(t, err, "type from string for tuple type should not return error")
t.Run("empty dynamic array tuple encoding", func(t *testing.T) {
- emptyDynamicArray, err := MakeDynamicArray([]Value{}, MakeBoolType())
- require.NoError(t, err, "make empty dynamic array should not return error")
- tupleVal, err := MakeTuple([]Value{emptyDynamicArray, emptyDynamicArray})
- require.NoError(t, err, "make empty dynamic array tuple should not return error")
expected := []byte{
0x00, 0x04, 0x00, 0x06,
0x00, 0x00, 0x00, 0x00,
}
- actual, err := tupleVal.Encode()
+ actual, err := tupleType.Encode([]interface{}{
+ []bool{}, []bool{},
+ })
require.NoError(t, err, "encode empty dynamic array tuple should not return error")
require.Equal(t, expected, actual, "encode empty dynamic array tuple does not match with expected")
})
// encoding test for empty tuple
// input: (), expected encoding: ""
+ tupleType, err = TypeOf("()")
+ require.NoError(t, err, "type from string for tuple type should not return error")
t.Run("empty tuple encoding", func(t *testing.T) {
- emptyTuple, err := MakeTuple([]Value{})
- require.NoError(t, err, "make empty tuple should not return error")
expected := make([]byte, 0)
- actual, err := emptyTuple.Encode()
+ actual, err := tupleType.Encode([]interface{}{})
require.NoError(t, err, "encode empty tuple should not return error")
require.Equal(t, expected, actual, "empty tuple encode should not return error")
})
@@ -356,17 +310,29 @@ func TestDecodeValid(t *testing.T) {
// generate bytes from random uint values and decode bytes with additional type information
for intSize := 8; intSize <= 512; intSize += 8 {
upperLimit := big.NewInt(0).Lsh(big.NewInt(1), uint(intSize))
+ uintType, err := makeUintType(intSize)
+ require.NoError(t, err, "make uint type failure")
for i := 0; i < 1000; i++ {
- randomInt, err := rand.Int(rand.Reader, upperLimit)
+ randBig, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
- expected, err := MakeUint(randomInt, uint16(intSize))
- require.NoError(t, err, "makeUint Fail")
- encodedUint, err := expected.Encode()
+
+ var expected interface{}
+ if intSize <= 64 && intSize > 32 {
+ expected = randBig.Uint64()
+ } else if intSize <= 32 && intSize > 16 {
+ expected = uint32(randBig.Uint64())
+ } else if intSize == 16 {
+ expected = uint16(randBig.Uint64())
+ } else if intSize == 8 {
+ expected = uint8(randBig.Uint64())
+ } else {
+ expected = randBig
+ }
+
+ encodedUint, err := uintType.Encode(expected)
require.NoError(t, err, "uint encode fail")
- // attempt to decode from given bytes: encodedUint
- uintType, err := MakeUintType(uint16(intSize))
- require.NoError(t, err, "uint type make fail")
- actual, err := Decode(encodedUint, uintType)
+
+ actual, err := uintType.Decode(encodedUint)
require.NoError(t, err, "decoding uint should not return error")
require.Equal(t, expected, actual, "decode uint fail to match expected value")
}
@@ -378,22 +344,32 @@ func TestDecodeValid(t *testing.T) {
for size := 8; size <= 512; size += 8 {
upperLimit := big.NewInt(0).Lsh(big.NewInt(1), uint(size))
for precision := 1; precision <= 160; precision++ {
+ ufixedType, err := makeUfixedType(size, precision)
+ require.NoError(t, err, "make ufixed type failure")
for i := 0; i < 10; i++ {
- randomInt, err := rand.Int(rand.Reader, upperLimit)
+ randBig, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
- valueUfixed, err := MakeUfixed(randomInt, uint16(size), uint16(precision))
- require.NoError(t, err, "makeUfixed Fail")
-
- encodedUfixed, err := valueUfixed.Encode()
+ var expected interface{}
+ if size <= 64 && size > 32 {
+ expected = randBig.Uint64()
+ } else if size <= 32 && size > 16 {
+ expected = uint32(randBig.Uint64())
+ } else if size == 16 {
+ expected = uint16(randBig.Uint64())
+ } else if size == 8 {
+ expected = uint8(randBig.Uint64())
+ } else {
+ expected = randBig
+ }
+
+ encodedUfixed, err := ufixedType.Encode(expected)
require.NoError(t, err, "ufixed encode fail")
+ require.NoError(t, err, "cast big integer to expected value should not return error")
- ufixedType, err := MakeUfixedType(uint16(size), uint16(precision))
- require.NoError(t, err, "ufixed type make fail")
-
- decodedUfixed, err := Decode(encodedUfixed, ufixedType)
+ actual, err := ufixedType.Decode(encodedUfixed)
require.NoError(t, err, "decoding ufixed should not return error")
- require.Equal(t, valueUfixed, decodedUfixed, "decode ufixed fail to match expected value")
+ require.Equal(t, expected, actual, "decode ufixed fail to match expected value")
}
}
}
@@ -407,39 +383,30 @@ func TestDecodeValid(t *testing.T) {
require.NoError(t, err, "cryptographic random int init fail")
addressBytes := randomAddrInt.Bytes()
- address := make([]byte, 32-len(addressBytes))
- address = append(address, addressBytes...)
-
- var addrBytes [32]byte
- copy(addrBytes[:], address[:32])
+ expected := make([]byte, 32-len(addressBytes))
+ expected = append(expected, addressBytes...)
- addressValue := MakeAddress(addrBytes)
- addrEncode, err := addressValue.Encode()
- require.NoError(t, err, "address encode fail")
-
- addressDecoded, err := Decode(addrEncode, MakeAddressType())
+ actual, err := addressType.Decode(expected)
require.NoError(t, err, "decoding address should not return error")
- require.Equal(t, addressValue, addressDecoded, "decode addr not match with expected")
+ require.Equal(t, expected, actual, "decode addr not match with expected")
}
// bool value decoding test
for i := 0; i < 2; i++ {
- boolValue := MakeBool(i == 1)
- boolEncode, err := boolValue.Encode()
+ boolEncode, err := boolType.Encode(i == 1)
require.NoError(t, err, "bool encode fail")
- boolDecode, err := Decode(boolEncode, MakeBoolType())
+ actual, err := boolType.Decode(boolEncode)
require.NoError(t, err, "decoding bool should not return error")
- require.Equal(t, boolValue, boolDecode, "decode bool not match with expected")
+ require.Equal(t, i == 1, actual, "decode bool not match with expected")
}
// byte value decoding test, iterating through 256 valid byte value
for i := 0; i < (1 << 8); i++ {
- byteValue := MakeByte(byte(i))
- byteEncode, err := byteValue.Encode()
+ byteEncode, err := byteType.Encode(byte(i))
require.NoError(t, err, "byte encode fail")
- byteDecode, err := Decode(byteEncode, MakeByteType())
+ actual, err := byteType.Decode(byteEncode)
require.NoError(t, err, "decoding byte should not return error")
- require.Equal(t, byteValue, byteDecode, "decode byte not match with expected")
+ require.Equal(t, byte(i), actual, "decode byte not match with expected")
}
// string value decoding test, test from utf string length 1 to 100
@@ -447,13 +414,12 @@ func TestDecodeValid(t *testing.T) {
// decode the encoded expected value and check if they match
for length := 1; length <= 100; length++ {
for i := 0; i < 10; i++ {
- utf8Str := gobberish.GenerateString(length)
- strValue := MakeString(utf8Str)
- strEncode, err := strValue.Encode()
+ expected := gobberish.GenerateString(length)
+ strEncode, err := stringType.Encode(expected)
require.NoError(t, err, "string encode fail")
- strDecode, err := Decode(strEncode, MakeStringType())
+ actual, err := stringType.Decode(strEncode)
require.NoError(t, err, "decoding string should not return error")
- require.Equal(t, strValue, strDecode, "encode string not match with expected")
+ require.Equal(t, expected, actual, "encode string not match with expected")
}
}
@@ -461,17 +427,10 @@ func TestDecodeValid(t *testing.T) {
// expected value: bool[5]: {T, F, F, T, T}
// input: 0b10011000
t.Run("static bool array decode", func(t *testing.T) {
- inputBase := []bool{true, false, false, true, true}
- arrayElems := make([]Value, len(inputBase))
- for index, bVal := range inputBase {
- arrayElems[index] = MakeBool(bVal)
- }
- expected, err := MakeStaticArray(arrayElems)
- require.NoError(t, err, "make expected value should not return error")
- actual, err := Decode(
- []byte{0b10011000},
- MakeStaticArrayType(MakeBoolType(), uint16(len(inputBase))),
- )
+ staticBoolArrT, err := TypeOf("bool[5]")
+ require.NoError(t, err, "make static bool array type failure")
+ expected := []interface{}{true, false, false, true, true}
+ actual, err := staticBoolArrT.Decode([]byte{0b10011000})
require.NoError(t, err, "decoding static bool array should not return error")
require.Equal(t, expected, actual, "static bool array decode do not match expected")
})
@@ -480,19 +439,10 @@ func TestDecodeValid(t *testing.T) {
// expected value: bool[11]: F, F, F, T, T, F, T, F, T, F, T
// input: 0b00011010, 0b10100000
t.Run("static bool array decode", func(t *testing.T) {
- inputBase := []bool{false, false, false, true, true, false, true, false, true, false, true}
- arrayElems := make([]Value, len(inputBase))
- for index, bVal := range inputBase {
- arrayElems[index] = MakeBool(bVal)
- }
- expected, err := MakeStaticArray(arrayElems)
- require.NoError(t, err, "make expected value should not return error")
- actual, err := Decode(
- []byte{
- 0b00011010, 0b10100000,
- },
- MakeStaticArrayType(MakeBoolType(), uint16(len(inputBase))),
- )
+ staticBoolArrT, err := TypeOf("bool[11]")
+ require.NoError(t, err, "make static bool array type failure")
+ expected := []interface{}{false, false, false, true, true, false, true, false, true, false, true}
+ actual, err := staticBoolArrT.Decode([]byte{0b00011010, 0b10100000})
require.NoError(t, err, "decoding static bool array should not return error")
require.Equal(t, expected, actual, "static bool array decode do not match expected")
})
@@ -510,20 +460,19 @@ func TestDecodeValid(t *testing.T) {
0, 0, 0, 0, 0, 0, 0, 8 (encoding for uint64 8)
*/
t.Run("static uint array decode", func(t *testing.T) {
- inputUint := []uint64{1, 2, 3, 4, 5, 6, 7, 8}
- arrayElems := make([]Value, len(inputUint))
- for index, uintVal := range inputUint {
- arrayElems[index] = MakeUint64(uintVal)
- }
- uintT, err := MakeUintType(64)
- require.NoError(t, err, "make uint64 type should not return error")
- expected, err := MakeStaticArray(arrayElems)
- require.NoError(t, err, "make uint64 static array should not return error")
- arrayEncoded, err := expected.Encode()
+ staticUintArrT, err := TypeOf("uint64[8]")
+ require.NoError(t, err, "make static uint array type failure")
+ expected := []interface{}{
+ uint64(1), uint64(2),
+ uint64(3), uint64(4),
+ uint64(5), uint64(6),
+ uint64(7), uint64(8),
+ }
+ arrayEncoded, err := staticUintArrT.Encode(expected)
require.NoError(t, err, "uint64 static array encode should not return error")
- arrayDecoded, err := Decode(arrayEncoded, MakeStaticArrayType(uintT, uint16(len(inputUint))))
+ actual, err := staticUintArrT.Decode(arrayEncoded)
require.NoError(t, err, "uint64 static array decode should not return error")
- require.Equal(t, expected, arrayDecoded, "uint64 static array decode do not match with expected value")
+ require.Equal(t, expected, actual, "uint64 static array decode do not match with expected value")
})
// decoding test for dynamic bool array
@@ -533,17 +482,13 @@ func TestDecodeValid(t *testing.T) {
0b01010101, 0b01000000 (dynamic bool array encoding)
*/
t.Run("dynamic bool array decode", func(t *testing.T) {
- inputBool := []bool{false, true, false, true, false, true, false, true, false, true}
- arrayElems := make([]Value, len(inputBool))
- for index, bVal := range inputBool {
- arrayElems[index] = MakeBool(bVal)
- }
- expected, err := MakeDynamicArray(arrayElems, MakeBoolType())
- require.NoError(t, err, "make expected value should not return error")
+ dynamicBoolArrT, err := TypeOf("bool[]")
+ require.NoError(t, err, "make dynamic bool array type failure")
+ expected := []interface{}{false, true, false, true, false, true, false, true, false, true}
inputEncoded := []byte{
0x00, 0x0A, 0b01010101, 0b01000000,
}
- actual, err := Decode(inputEncoded, MakeDynamicArrayType(MakeBoolType()))
+ actual, err := dynamicBoolArrT.Decode(inputEncoded)
require.NoError(t, err, "decode dynamic array should not return error")
require.Equal(t, expected, actual, "decode dynamic array do not match expected")
})
@@ -562,38 +507,17 @@ func TestDecodeValid(t *testing.T) {
byte('D'), byte('E'), byte('F') (second string encoded bytes)
*/
t.Run("dynamic tuple decoding", func(t *testing.T) {
+ tupleT, err := TypeOf("(string,bool,bool,bool,bool,string)")
+ require.NoError(t, err, "make tuple type failure")
inputEncode := []byte{
0x00, 0x05, 0b10100000, 0x00, 0x0A,
0x00, 0x03, byte('A'), byte('B'), byte('C'),
0x00, 0x03, byte('D'), byte('E'), byte('F'),
}
- expectedBase := []interface{}{
+ expected := []interface{}{
"ABC", true, false, true, false, "DEF",
}
- tupleElems := make([]Value, len(expectedBase))
- for index, bVal := range expectedBase {
- temp, ok := bVal.(string)
- if ok {
- tupleElems[index] = MakeString(temp)
- } else {
- temp := bVal.(bool)
- tupleElems[index] = MakeBool(temp)
- }
- }
- expected, err := MakeTuple(tupleElems)
- require.NoError(t, err, "make expected value should not return error")
- actual, err := Decode(
- inputEncode,
- Type{
- abiTypeID: Tuple,
- childTypes: []Type{
- MakeStringType(),
- MakeBoolType(), MakeBoolType(), MakeBoolType(), MakeBoolType(),
- MakeStringType(),
- },
- staticLength: 6,
- },
- )
+ actual, err := tupleT.Decode(inputEncode)
require.NoError(t, err, "decoding dynamic tuple should not return error")
require.Equal(t, expected, actual, "dynamic tuple not match with expected")
})
@@ -607,37 +531,19 @@ func TestDecodeValid(t *testing.T) {
0b11000000 (second static bool array)
*/
t.Run("static bool array tuple decoding", func(t *testing.T) {
- boolArr := []bool{true, true}
- boolValArr := make([]Value, 2)
- for i := 0; i < 2; i++ {
- boolValArr[i] = MakeBool(boolArr[i])
+ tupleT, err := TypeOf("(bool[2],bool[2])")
+ require.NoError(t, err, "make tuple type failure")
+ expected := []interface{}{
+ []interface{}{true, true},
+ []interface{}{true, true},
}
- boolArrVal, err := MakeStaticArray(boolValArr)
- require.NoError(t, err, "make bool static array should not return error")
- tupleVal, err := MakeTuple([]Value{boolArrVal, boolArrVal})
- require.NoError(t, err, "make tuple value should not return error")
encodedInput := []byte{
0b11000000,
0b11000000,
}
- decoded, err := Decode(encodedInput, Type{
- abiTypeID: Tuple,
- staticLength: 2,
- childTypes: []Type{
- {
- abiTypeID: ArrayStatic,
- staticLength: 2,
- childTypes: []Type{MakeBoolType()},
- },
- {
- abiTypeID: ArrayStatic,
- staticLength: 2,
- childTypes: []Type{MakeBoolType()},
- },
- },
- })
+ actual, err := tupleT.Decode(encodedInput)
require.NoError(t, err, "decode tuple value should not return error")
- require.Equal(t, tupleVal, decoded, "decoded tuple value do not match with expected")
+ require.Equal(t, expected, actual, "decoded tuple value do not match with expected")
})
// decoding test for tuple with static and dynamic bool array
@@ -651,39 +557,20 @@ func TestDecodeValid(t *testing.T) {
0b11000000 (second static bool array)
*/
t.Run("static/dynamic bool array tuple decoding", func(t *testing.T) {
- boolArr := []bool{true, true}
- boolValArr := make([]Value, 2)
- for i := 0; i < 2; i++ {
- boolValArr[i] = MakeBool(boolArr[i])
+ tupleT, err := TypeOf("(bool[2],bool[])")
+ require.NoError(t, err, "make tuple type failure")
+ expected := []interface{}{
+ []interface{}{true, true},
+ []interface{}{true, true},
}
- boolArrStaticVal, err := MakeStaticArray(boolValArr)
- require.NoError(t, err, "make static bool array should not return error")
- boolArrDynamicVal, err := MakeDynamicArray(boolValArr, MakeBoolType())
- require.NoError(t, err, "make dynamic bool array should not return error")
- tupleVal, err := MakeTuple([]Value{boolArrStaticVal, boolArrDynamicVal})
- require.NoError(t, err, "make tuple for static/dynamic bool array should not return error")
encodedInput := []byte{
0b11000000,
0x00, 0x03,
0x00, 0x02, 0b11000000,
}
- decoded, err := Decode(encodedInput, Type{
- abiTypeID: Tuple,
- staticLength: 2,
- childTypes: []Type{
- {
- abiTypeID: ArrayStatic,
- staticLength: 2,
- childTypes: []Type{MakeBoolType()},
- },
- {
- abiTypeID: ArrayDynamic,
- childTypes: []Type{MakeBoolType()},
- },
- },
- })
+ actual, err := tupleT.Decode(encodedInput)
require.NoError(t, err, "decode tuple for static/dynamic bool array should not return error")
- require.Equal(t, tupleVal, decoded, "decoded tuple value do not match with expected")
+ require.Equal(t, expected, actual, "decoded tuple value do not match with expected")
})
// decoding test for tuple with all dynamic bool array
@@ -697,46 +584,29 @@ func TestDecodeValid(t *testing.T) {
0x00, 0x00 (second dynamic bool array length 0)
*/
t.Run("empty dynamic array tuple decoding", func(t *testing.T) {
- emptyDynamicArray, err := MakeDynamicArray([]Value{}, MakeBoolType())
- require.NoError(t, err, "make empty dynamic array should not return error")
- tupleVal, err := MakeTuple([]Value{emptyDynamicArray, emptyDynamicArray})
- require.NoError(t, err, "make empty dynamic array tuple should not return error")
+ tupleT, err := TypeOf("(bool[],bool[])")
+ require.NoError(t, err, "make tuple type failure")
+ expected := []interface{}{
+ []interface{}{}, []interface{}{},
+ }
encodedInput := []byte{
0x00, 0x04, 0x00, 0x06,
0x00, 0x00, 0x00, 0x00,
}
- decoded, err := Decode(encodedInput, Type{
- abiTypeID: Tuple,
- staticLength: 2,
- childTypes: []Type{
- {
- abiTypeID: ArrayDynamic,
- childTypes: []Type{MakeBoolType()},
- },
- {
- abiTypeID: ArrayDynamic,
- childTypes: []Type{MakeBoolType()},
- },
- },
- })
+ actual, err := tupleT.Decode(encodedInput)
require.NoError(t, err, "decode tuple for empty dynamic array should not return error")
- require.Equal(t, tupleVal, decoded, "decoded tuple value do not match with expected")
+ require.Equal(t, expected, actual, "decoded tuple value do not match with expected")
})
// decoding test for empty tuple
// expected value: ()
// byte input: ""
t.Run("empty tuple decoding", func(t *testing.T) {
- emptyTuple, err := MakeTuple([]Value{})
- require.NoError(t, err, "make empty tuple should not return error")
- encodedInput := make([]byte, 0)
- decoded, err := Decode(encodedInput, Type{
- abiTypeID: Tuple,
- staticLength: 0,
- childTypes: []Type{},
- })
+ tupleT, err := TypeOf("()")
+ require.NoError(t, err, "make empty tuple type should not return error")
+ actual, err := tupleT.Decode([]byte{})
require.NoError(t, err, "decode empty tuple should not return error")
- require.Equal(t, emptyTuple, decoded, "empty tuple encode should not return error")
+ require.Equal(t, []interface{}{}, actual, "empty tuple encode should not return error")
})
}
@@ -748,8 +618,8 @@ func TestDecodeInvalid(t *testing.T) {
// should throw error
t.Run("corrupted static bool array decode", func(t *testing.T) {
inputBase := []byte{0b11111111}
- arrayType := MakeStaticArrayType(MakeBoolType(), 9)
- _, err := Decode(inputBase, arrayType)
+ arrayType := makeStaticArrayType(boolType, 9)
+ _, err := arrayType.Decode(inputBase)
require.Error(t, err, "decoding corrupted static bool array should return error")
})
@@ -759,8 +629,8 @@ func TestDecodeInvalid(t *testing.T) {
// should throw error
t.Run("corrupted static bool array decode", func(t *testing.T) {
inputBase := []byte{0b01001011, 0b00000000}
- arrayType := MakeStaticArrayType(MakeBoolType(), 8)
- _, err := Decode(inputBase, arrayType)
+ arrayType := makeStaticArrayType(boolType, 8)
+ _, err := arrayType.Decode(inputBase)
require.Error(t, err, "decoding corrupted static bool array should return error")
})
@@ -778,10 +648,9 @@ func TestDecodeInvalid(t *testing.T) {
0, 0, 0, 0, 0, 0, 0, 5,
0, 0, 0, 0, 0, 0, 0, 6,
}
- uintT, err := MakeUintType(64)
- require.NoError(t, err, "make uint64 type should not return error")
- uintTArray := MakeStaticArrayType(uintT, 8)
- _, err = Decode(inputBase, uintTArray)
+ uintTArray, err := TypeOf("uint64[8]")
+ require.NoError(t, err, "make uint64 static array type should not return error")
+ _, err = uintTArray.Decode(inputBase)
require.Error(t, err, "corrupted uint64 static array decode should return error")
})
@@ -800,10 +669,9 @@ func TestDecodeInvalid(t *testing.T) {
0, 0, 0, 0, 0, 0, 0, 6,
0, 0, 0, 0, 0, 0, 0, 7,
}
- uintT, err := MakeUintType(64)
- require.NoError(t, err, "make uint64 type should not return error")
- uintTArray := MakeStaticArrayType(uintT, 7)
- _, err = Decode(inputBase, uintTArray)
+ uintTArray, err := TypeOf("uint64[7]")
+ require.NoError(t, err, "make uint64 static array type should not return error")
+ _, err = uintTArray.Decode(inputBase)
require.Error(t, err, "corrupted uint64 static array decode should return error")
})
@@ -815,8 +683,8 @@ func TestDecodeInvalid(t *testing.T) {
inputBase := []byte{
0x00, 0x0A, 0b10101010,
}
- dynamicT := MakeDynamicArrayType(MakeBoolType())
- _, err := Decode(inputBase, dynamicT)
+ dynamicT := makeDynamicArrayType(boolType)
+ _, err := dynamicT.Decode(inputBase)
require.Error(t, err, "decode corrupted dynamic array should return error")
})
@@ -828,8 +696,8 @@ func TestDecodeInvalid(t *testing.T) {
inputBase := []byte{
0x00, 0x07, 0b10101010, 0b00000000,
}
- dynamicT := MakeDynamicArrayType(MakeBoolType())
- _, err := Decode(inputBase, dynamicT)
+ dynamicT := makeDynamicArrayType(boolType)
+ _, err := dynamicT.Decode(inputBase)
require.Error(t, err, "decode corrupted dynamic array should return error")
})
@@ -855,30 +723,9 @@ func TestDecodeInvalid(t *testing.T) {
0x00, 0x03, byte('A'), byte('B'), byte('C'),
0x00, 0x03, byte('D'), byte('E'), byte('F'),
}
- expectedBase := []interface{}{
- "ABC", true, false, true, false, "DEF",
- }
- tupleElems := make([]Value, len(expectedBase))
- for index, bVal := range expectedBase {
- temp, ok := bVal.(string)
- if ok {
- tupleElems[index] = MakeString(temp)
- } else {
- temp := bVal.(bool)
- tupleElems[index] = MakeBool(temp)
- }
- }
- _, err := Decode(
- inputEncode,
- Type{
- abiTypeID: Tuple,
- childTypes: []Type{
- MakeStringType(),
- MakeBoolType(), MakeBoolType(), MakeBoolType(), MakeBoolType(),
- MakeStringType(),
- },
- },
- )
+ tupleT, err := TypeOf("(string,bool,bool,bool,bool,string)")
+ require.NoError(t, err, "make tuple type failure")
+ _, err = tupleT.Decode(inputEncode)
require.Error(t, err, "corrupted decoding dynamic tuple should return error")
})
@@ -896,35 +743,20 @@ func TestDecodeInvalid(t *testing.T) {
<- corrupted byte, 1 byte missing
*/
t.Run("corrupted static bool array tuple decoding", func(t *testing.T) {
- expectedType := Type{
- abiTypeID: Tuple,
- staticLength: 2,
- childTypes: []Type{
- {
- abiTypeID: ArrayStatic,
- staticLength: 2,
- childTypes: []Type{MakeBoolType()},
- },
- {
- abiTypeID: ArrayStatic,
- staticLength: 2,
- childTypes: []Type{MakeBoolType()},
- },
- },
- }
-
+ expectedType, err := TypeOf("(bool[2],bool[2])")
+ require.NoError(t, err, "make tuple type failure")
encodedInput0 := []byte{
0b11000000,
0b11000000,
0b00000000,
}
- _, err := Decode(encodedInput0, expectedType)
+ _, err = expectedType.Decode(encodedInput0)
require.Error(t, err, "decode corrupted tuple value should return error")
encodedInput1 := []byte{
0b11000000,
}
- _, err = Decode(encodedInput1, expectedType)
+ _, err = expectedType.Decode(encodedInput1)
require.Error(t, err, "decode corrupted tuple value should return error")
})
@@ -944,21 +776,9 @@ func TestDecodeInvalid(t *testing.T) {
0x03,
0x00, 0x02, 0b11000000,
}
- _, err := Decode(encodedInput, Type{
- abiTypeID: Tuple,
- staticLength: 2,
- childTypes: []Type{
- {
- abiTypeID: ArrayStatic,
- staticLength: 2,
- childTypes: []Type{MakeBoolType()},
- },
- {
- abiTypeID: ArrayDynamic,
- childTypes: []Type{MakeBoolType()},
- },
- },
- })
+ tupleT, err := TypeOf("(bool[2],bool[])")
+ require.NoError(t, err, "make tuple type failure")
+ _, err = tupleT.Decode(encodedInput)
require.Error(t, err, "decode corrupted tuple for static/dynamic bool array should return error")
})
@@ -981,20 +801,9 @@ func TestDecodeInvalid(t *testing.T) {
0x00, 0x04, 0x00, 0x07,
0x00, 0x00, 0x00, 0x00,
}
- _, err := Decode(encodedInput, Type{
- abiTypeID: Tuple,
- staticLength: 2,
- childTypes: []Type{
- {
- abiTypeID: ArrayDynamic,
- childTypes: []Type{MakeBoolType()},
- },
- {
- abiTypeID: ArrayDynamic,
- childTypes: []Type{MakeBoolType()},
- },
- },
- })
+ tupleT, err := TypeOf("(bool[],bool[])")
+ require.NoError(t, err, "make tuple type failure")
+ _, err = tupleT.Decode(encodedInput)
require.Error(t, err, "decode corrupted tuple for empty dynamic array should return error")
})
@@ -1004,200 +813,191 @@ func TestDecodeInvalid(t *testing.T) {
// should return error
t.Run("corrupted empty tuple decoding", func(t *testing.T) {
encodedInput := []byte{0xFF}
- _, err := Decode(encodedInput, Type{
- abiTypeID: Tuple,
- staticLength: 0,
- childTypes: []Type{},
- })
+ tupleT, err := TypeOf("()")
+ require.NoError(t, err, "make tuple type failure")
+ _, err = tupleT.Decode(encodedInput)
require.Error(t, err, "decode corrupted empty tuple should return error")
})
}
-func generateStaticArray(t *testing.T, testValuePool *[][]Value) {
- // int
- for intIndex := 0; intIndex < len((*testValuePool)[Uint]); intIndex += 200 {
- staticArrayList := make([]Value, 20)
- for i := 0; i < 20; i++ {
- staticArrayList[i] = (*testValuePool)[Uint][intIndex+i]
- }
- staticArray, err := MakeStaticArray(staticArrayList)
- require.NoError(t, err, "make static array for uint should not return error")
- (*testValuePool)[ArrayStatic] = append((*testValuePool)[ArrayStatic], staticArray)
+type testUnit struct {
+ serializedType string
+ value interface{}
+}
+
+func categorySelfRoundTripTest(t *testing.T, category []testUnit) {
+ for _, testObj := range category {
+ abiType, err := TypeOf(testObj.serializedType)
+ require.NoError(t, err, "failure to deserialize type")
+ encodedValue, err := abiType.Encode(testObj.value)
+ require.NoError(t, err, "failure to encode value")
+ actual, err := abiType.Decode(encodedValue)
+ require.NoError(t, err, "failure to decode value")
+ require.Equal(t, testObj.value, actual, "decoded value not equal to expected")
+ jsonEncodedValue, err := abiType.MarshalToJSON(testObj.value)
+ require.NoError(t, err, "failure to encode value to JSON type")
+ jsonActual, err := abiType.UnmarshalFromJSON(jsonEncodedValue)
+ require.NoError(t, err, "failure to decode JSON value back")
+ require.Equal(t, testObj.value, jsonActual, "decode JSON value not equal to expected")
}
- // byte
- byteArrayList := make([]Value, 20)
- for byteIndex := 0; byteIndex < 20; byteIndex++ {
- byteArrayList[byteIndex] = (*testValuePool)[Byte][byteIndex]
+}
+
+func addPrimitiveRandomValues(t *testing.T, pool *map[BaseType][]testUnit) {
+ (*pool)[Uint] = make([]testUnit, 200*64)
+ (*pool)[Ufixed] = make([]testUnit, 160*64)
+
+ uintIndex := 0
+ ufixedIndex := 0
+
+ for bitSize := 8; bitSize <= 512; bitSize += 8 {
+ max := new(big.Int).Lsh(big.NewInt(1), uint(bitSize))
+
+ uintT, err := makeUintType(bitSize)
+ require.NoError(t, err, "make uint type failure")
+ uintTstr := uintT.String()
+
+ for j := 0; j < 200; j++ {
+ randVal, err := rand.Int(rand.Reader, max)
+ require.NoError(t, err, "generate random uint, should be no error")
+
+ narrowest, err := castBigIntToNearestPrimitive(randVal, uint16(bitSize))
+ require.NoError(t, err, "cast random uint to nearest primitive failure")
+
+ (*pool)[Uint][uintIndex] = testUnit{serializedType: uintTstr, value: narrowest}
+ uintIndex++
+ }
+
+ for precision := 1; precision <= 160; precision++ {
+ randVal, err := rand.Int(rand.Reader, max)
+ require.NoError(t, err, "generate random ufixed, should be no error")
+
+ narrowest, err := castBigIntToNearestPrimitive(randVal, uint16(bitSize))
+ require.NoError(t, err, "cast random uint to nearest primitive failure")
+
+ ufixedT, err := makeUfixedType(bitSize, precision)
+ require.NoError(t, err, "make ufixed type failure")
+ ufixedTstr := ufixedT.String()
+ (*pool)[Ufixed][ufixedIndex] = testUnit{serializedType: ufixedTstr, value: narrowest}
+ ufixedIndex++
+ }
}
- byteStaticArray, err := MakeStaticArray(byteArrayList)
- require.NoError(t, err, "make static array for byte should not return error")
- (*testValuePool)[ArrayStatic] = append((*testValuePool)[ArrayStatic], byteStaticArray)
- // address
- addressArrayList := make([]Value, 20)
- for addrIndex := 0; addrIndex < 20; addrIndex++ {
- addressArrayList[addrIndex] = (*testValuePool)[Address][addrIndex]
+ categorySelfRoundTripTest(t, (*pool)[Uint])
+ categorySelfRoundTripTest(t, (*pool)[Ufixed])
+
+ (*pool)[Byte] = make([]testUnit, 1<<8)
+ for i := 0; i < (1 << 8); i++ {
+ (*pool)[Byte][i] = testUnit{serializedType: byteType.String(), value: byte(i)}
}
- addressStaticArray, err := MakeStaticArray(addressArrayList)
- require.NoError(t, err, "make static array for address should not return error")
- (*testValuePool)[ArrayStatic] = append((*testValuePool)[ArrayStatic], addressStaticArray)
- // string
- stringArrayList := make([]Value, 20)
- for strIndex := 0; strIndex < 20; strIndex++ {
- stringArrayList[strIndex] = (*testValuePool)[String][strIndex]
+ categorySelfRoundTripTest(t, (*pool)[Byte])
+
+ (*pool)[Bool] = make([]testUnit, 2)
+ (*pool)[Bool][0] = testUnit{serializedType: boolType.String(), value: false}
+ (*pool)[Bool][1] = testUnit{serializedType: boolType.String(), value: true}
+ categorySelfRoundTripTest(t, (*pool)[Bool])
+
+ maxAddress := new(big.Int).Lsh(big.NewInt(1), 256)
+ (*pool)[Address] = make([]testUnit, 300)
+ for i := 0; i < 300; i++ {
+ randAddrVal, err := rand.Int(rand.Reader, maxAddress)
+ require.NoError(t, err, "generate random value for address, should be no error")
+ addrBytes := randAddrVal.Bytes()
+ remainBytes := make([]byte, 32-len(addrBytes))
+ addrBytes = append(remainBytes, addrBytes...)
+ (*pool)[Address][i] = testUnit{serializedType: addressType.String(), value: addrBytes}
}
- stringStaticArray, err := MakeStaticArray(stringArrayList)
- require.NoError(t, err, "make static array for string should not return error")
- (*testValuePool)[ArrayStatic] = append((*testValuePool)[ArrayStatic], stringStaticArray)
- // bool
- boolArrayList := make([]Value, 20)
- for boolIndex := 0; boolIndex < 20; boolIndex++ {
- valBig, err := rand.Int(rand.Reader, big.NewInt(2))
- require.NoError(t, err, "generate random bool index should not return error")
- valIndex := valBig.Int64()
- boolArrayList[boolIndex] = (*testValuePool)[Bool][valIndex]
+ categorySelfRoundTripTest(t, (*pool)[Address])
+
+ (*pool)[String] = make([]testUnit, 400)
+ stringIndex := 0
+ for length := 1; length <= 100; length++ {
+ for i := 0; i < 4; i++ {
+ (*pool)[String][stringIndex] = testUnit{
+ serializedType: stringType.String(),
+ value: gobberish.GenerateString(length),
+ }
+ stringIndex++
+ }
}
- boolStaticArray, err := MakeStaticArray(boolArrayList)
- require.NoError(t, err, "make static array for bool should not return error")
- (*testValuePool)[ArrayStatic] = append((*testValuePool)[ArrayStatic], boolStaticArray)
+ categorySelfRoundTripTest(t, (*pool)[String])
}
-func generateDynamicArray(t *testing.T, testValuePool *[][]Value) {
- // int
- for intIndex := 0; intIndex < len((*testValuePool)[Uint]); intIndex += 200 {
- dynamicArrayList := make([]Value, 20)
- for i := 0; i < 20; i++ {
- dynamicArrayList[i] = (*testValuePool)[Uint][intIndex+i]
+func takeSomeFromCategoryAndGenerateArray(
+ t *testing.T, abiT BaseType, srtIndex int, takeNum uint16, pool *map[BaseType][]testUnit) {
+
+ tempArray := make([]interface{}, takeNum)
+ for i := 0; i < int(takeNum); i++ {
+ index := srtIndex + i
+ if index >= len((*pool)[abiT]) {
+ index = srtIndex
}
- dynamicArray, err := MakeDynamicArray(dynamicArrayList, dynamicArrayList[0].ABIType)
- require.NoError(t, err, "make static array for uint should not return error")
- (*testValuePool)[ArrayDynamic] = append((*testValuePool)[ArrayDynamic], dynamicArray)
- }
- // byte
- byteArrayList := make([]Value, 20)
- for byteIndex := 0; byteIndex < 20; byteIndex++ {
- byteArrayList[byteIndex] = (*testValuePool)[Byte][byteIndex]
- }
- byteDynamicArray, err := MakeDynamicArray(byteArrayList, byteArrayList[0].ABIType)
- require.NoError(t, err, "make dynamic array for byte should not return error")
- (*testValuePool)[ArrayDynamic] = append((*testValuePool)[ArrayDynamic], byteDynamicArray)
- // address
- addressArrayList := make([]Value, 20)
- for addrIndex := 0; addrIndex < 20; addrIndex++ {
- addressArrayList[addrIndex] = (*testValuePool)[Address][addrIndex]
- }
- addressDynamicArray, err := MakeDynamicArray(addressArrayList, MakeAddressType())
- require.NoError(t, err, "make dynamic array for address should not return error")
- (*testValuePool)[ArrayDynamic] = append((*testValuePool)[ArrayDynamic], addressDynamicArray)
- // string
- stringArrayList := make([]Value, 20)
- for strIndex := 0; strIndex < 20; strIndex++ {
- stringArrayList[strIndex] = (*testValuePool)[String][strIndex]
+ tempArray[i] = (*pool)[abiT][index].value
}
- stringDynamicArray, err := MakeDynamicArray(stringArrayList, MakeStringType())
- require.NoError(t, err, "make dynamic array for string should not return error")
- (*testValuePool)[ArrayDynamic] = append((*testValuePool)[ArrayDynamic], stringDynamicArray)
- // bool
- boolArrayList := make([]Value, 20)
- for boolIndex := 0; boolIndex < 20; boolIndex++ {
- valBig, err := rand.Int(rand.Reader, big.NewInt(2))
- require.NoError(t, err, "generate random bool index should not return error")
- valIndex := valBig.Int64()
- boolArrayList[boolIndex] = (*testValuePool)[Bool][valIndex]
+ tempT, err := TypeOf((*pool)[abiT][srtIndex].serializedType)
+ require.NoError(t, err, "type in test uint cannot be deserialized")
+ (*pool)[ArrayStatic] = append((*pool)[ArrayStatic], testUnit{
+ serializedType: makeStaticArrayType(tempT, takeNum).String(),
+ value: tempArray,
+ })
+ (*pool)[ArrayDynamic] = append((*pool)[ArrayDynamic], testUnit{
+ serializedType: makeDynamicArrayType(tempT).String(),
+ value: tempArray,
+ })
+}
+
+func addArrayRandomValues(t *testing.T, pool *map[BaseType][]testUnit) {
+ for intIndex := 0; intIndex < len((*pool)[Uint]); intIndex += 200 {
+ takeSomeFromCategoryAndGenerateArray(t, Uint, intIndex, 20, pool)
}
- boolDynamicArray, err := MakeDynamicArray(boolArrayList, MakeBoolType())
- require.NoError(t, err, "make dynamic array for bool should not return error")
- (*testValuePool)[ArrayDynamic] = append((*testValuePool)[ArrayDynamic], boolDynamicArray)
+ takeSomeFromCategoryAndGenerateArray(t, Byte, 0, 20, pool)
+ takeSomeFromCategoryAndGenerateArray(t, Address, 0, 20, pool)
+ takeSomeFromCategoryAndGenerateArray(t, String, 0, 20, pool)
+ takeSomeFromCategoryAndGenerateArray(t, Bool, 0, 20, pool)
+
+ categorySelfRoundTripTest(t, (*pool)[ArrayStatic])
+ categorySelfRoundTripTest(t, (*pool)[ArrayDynamic])
}
-func generateTuples(t *testing.T, testValuePool *[][]Value, slotRange int) {
+func addTupleRandomValues(t *testing.T, slotRange BaseType, pool *map[BaseType][]testUnit) {
for i := 0; i < 100; i++ {
- tupleLenBig, err := rand.Int(rand.Reader, big.NewInt(2))
+ tupleLenBig, err := rand.Int(rand.Reader, big.NewInt(20))
require.NoError(t, err, "generate random tuple length should not return error")
- tupleLen := 1 + tupleLenBig.Int64()
- tupleValList := make([]Value, tupleLen)
- for tupleElemIndex := 0; tupleElemIndex < int(tupleLen); tupleElemIndex++ {
- tupleTypeIndexBig, err := rand.Int(rand.Reader, big.NewInt(int64(slotRange)))
+ tupleLen := tupleLenBig.Int64() + 1
+ testUnits := make([]testUnit, tupleLen)
+ for index := 0; index < int(tupleLen); index++ {
+ tupleTypeIndexBig, err := rand.Int(rand.Reader, big.NewInt(int64(slotRange)+1))
require.NoError(t, err, "generate random tuple element type index should not return error")
- tupleTypeIndex := tupleTypeIndexBig.Int64()
- tupleElemChoiceRange := len((*testValuePool)[tupleTypeIndex])
+ tupleTypeIndex := BaseType(tupleTypeIndexBig.Int64())
+ tupleElemChoiceRange := len((*pool)[tupleTypeIndex])
tupleElemRangeIndexBig, err := rand.Int(rand.Reader, big.NewInt(int64(tupleElemChoiceRange)))
require.NoError(t, err, "generate random tuple element index in test pool should not return error")
tupleElemRangeIndex := tupleElemRangeIndexBig.Int64()
- tupleElem := (*testValuePool)[tupleTypeIndex][tupleElemRangeIndex]
- tupleValList[tupleElemIndex] = tupleElem
- }
- tupleVal, err := MakeTuple(tupleValList)
- require.NoError(t, err, "make tuple should not return error")
- (*testValuePool)[Tuple] = append((*testValuePool)[Tuple], tupleVal)
+ tupleElem := (*pool)[tupleTypeIndex][tupleElemRangeIndex]
+ testUnits[index] = tupleElem
+ }
+ elemValues := make([]interface{}, tupleLen)
+ elemTypes := make([]Type, tupleLen)
+ for index := 0; index < int(tupleLen); index++ {
+ elemValues[index] = testUnits[index].value
+ abiT, err := TypeOf(testUnits[index].serializedType)
+ require.NoError(t, err, "deserialize type failure for tuple elements")
+ elemTypes[index] = abiT
+ }
+ tupleT, err := MakeTupleType(elemTypes)
+ require.NoError(t, err, "make tuple type failure")
+ (*pool)[Tuple] = append((*pool)[Tuple], testUnit{
+ serializedType: tupleT.String(),
+ value: elemValues,
+ })
}
}
-// round-trip test for random tuple elements
-// first we generate base type elements to each slot of testValuePool
-// then we generate static/dynamic array based on the pre-generated random values
-// we generate base tuples based on base-type elements/static arrays/dynamic arrays
-// we also generate cascaded tuples (tuples with tuple elements)
-func TestEncodeDecodeRandomTuple(t *testing.T) {
+func TestRandomABIEncodeDecodeRoundTrip(t *testing.T) {
partitiontest.PartitionTest(t)
- // test pool for 9 distinct types
- testValuePool := make([][]Value, 9)
- for i := 8; i <= 512; i += 8 {
- max := big.NewInt(1).Lsh(big.NewInt(1), uint(i))
- for j := 0; j < 200; j++ {
- randVal, err := rand.Int(rand.Reader, max)
- require.NoError(t, err, "generate largest number bound, should be no error")
- uintTemp, err := MakeUint(randVal, uint16(i))
- require.NoError(t, err, "generate random ABI uint should not return error")
- testValuePool[Uint] = append(testValuePool[Uint], uintTemp)
- }
- for j := 1; j < 160; j++ {
- randVal, err := rand.Int(rand.Reader, max)
- require.NoError(t, err, "generate largest number bound, should be no error")
- ufixedTemp, err := MakeUfixed(randVal, uint16(i), uint16(j))
- require.NoError(t, err, "generate random ABI ufixed should not return error")
- testValuePool[Ufixed] = append(testValuePool[Ufixed], ufixedTemp)
- }
- }
- for i := 0; i < (1 << 8); i++ {
- testValuePool[Byte] = append(testValuePool[Byte], MakeByte(byte(i)))
- }
- for i := 0; i < 2; i++ {
- testValuePool[Bool] = append(testValuePool[Bool], MakeBool(i == 1))
- }
- for i := 0; i < 500; i++ {
- max := big.NewInt(1).Lsh(big.NewInt(1), 256)
- randVal, err := rand.Int(rand.Reader, max)
- require.NoError(t, err, "generate largest number bound, should be no error")
- addrBytes := randVal.Bytes()
- remainBytes := make([]byte, 32-len(addrBytes))
- addrBytes = append(remainBytes, addrBytes...)
- var addrBytesToMake [32]byte
- copy(addrBytesToMake[:], addrBytes)
- testValuePool[Address] = append(testValuePool[Address], MakeAddress(addrBytesToMake))
- }
- for i := 1; i <= 100; i++ {
- for j := 0; j < 4; j++ {
- abiString := MakeString(gobberish.GenerateString(i))
- testValuePool[String] = append(testValuePool[String], abiString)
- }
- }
- // Array static
- generateStaticArray(t, &testValuePool)
- // Array dynamic
- generateDynamicArray(t, &testValuePool)
- // tuple generation
- generateTuples(t, &testValuePool, 8)
- // generate cascaded tuples
- generateTuples(t, &testValuePool, 9)
- // test tuple encode-decode round-trip
- for _, tuple := range testValuePool[Tuple] {
- t.Run("random tuple encode-decode test", func(t *testing.T) {
- encoded, err := tuple.Encode()
- require.NoError(t, err, "encode tuple should not have error")
- decoded, err := Decode(encoded, tuple.ABIType)
- require.NoError(t, err, "decode tuple should not have error")
- require.Equal(t, tuple, decoded, "encoded-decoded tuple should match with expected")
- })
- }
+ testValuePool := make(map[BaseType][]testUnit)
+ addPrimitiveRandomValues(t, &testValuePool)
+ addArrayRandomValues(t, &testValuePool)
+ addTupleRandomValues(t, String, &testValuePool)
+ addTupleRandomValues(t, Tuple, &testValuePool)
+ categorySelfRoundTripTest(t, testValuePool[Tuple])
}
diff --git a/data/abi/abi_json.go b/data/abi/abi_json.go
new file mode 100644
index 000000000..482419e6b
--- /dev/null
+++ b/data/abi/abi_json.go
@@ -0,0 +1,254 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package abi
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "github.com/algorand/go-algorand/data/basics"
+ "math/big"
+)
+
+func castBigIntToNearestPrimitive(num *big.Int, bitSize uint16) (interface{}, error) {
+ if num.BitLen() > int(bitSize) {
+ return nil, fmt.Errorf("cast big int to nearest primitive failure: %v >= 2^%d", num, bitSize)
+ } else if num.Sign() < 0 {
+ return nil, fmt.Errorf("cannot cast big int to near primitive: %v < 0", num)
+ }
+
+ switch bitSize / 8 {
+ case 1:
+ return uint8(num.Uint64()), nil
+ case 2:
+ return uint16(num.Uint64()), nil
+ case 3, 4:
+ return uint32(num.Uint64()), nil
+ case 5, 6, 7, 8:
+ return num.Uint64(), nil
+ default:
+ return num, nil
+ }
+}
+
+// MarshalToJSON convert golang value to JSON format from ABI type
+func (t Type) MarshalToJSON(value interface{}) ([]byte, error) {
+ switch t.abiTypeID {
+ case Uint:
+ bytesUint, err := encodeInt(value, t.bitSize)
+ if err != nil {
+ return nil, err
+ }
+ return new(big.Int).SetBytes(bytesUint).MarshalJSON()
+ case Ufixed:
+ denom := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(t.precision)), nil)
+ encodedUint, err := encodeInt(value, t.bitSize)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(new(big.Rat).SetFrac(new(big.Int).SetBytes(encodedUint), denom).FloatString(int(t.precision))), nil
+ case Bool:
+ boolValue, ok := value.(bool)
+ if !ok {
+ return nil, fmt.Errorf("cannot infer to bool for marshal to JSON")
+ }
+ return json.Marshal(boolValue)
+ case Byte:
+ byteValue, ok := value.(byte)
+ if !ok {
+ return nil, fmt.Errorf("cannot infer to byte for marshal to JSON")
+ }
+ return json.Marshal(byteValue)
+ case Address:
+ var addressInternal basics.Address
+ switch valueCasted := value.(type) {
+ case []byte:
+ copy(addressInternal[:], valueCasted[:])
+ return json.Marshal(addressInternal.String())
+ case [addressByteSize]byte:
+ addressInternal = valueCasted
+ return json.Marshal(addressInternal.String())
+ default:
+ return nil, fmt.Errorf("cannot infer to byte slice/array for marshal to JSON")
+ }
+ case ArrayStatic, ArrayDynamic:
+ values, err := inferToSlice(value)
+ if err != nil {
+ return nil, err
+ }
+ if t.abiTypeID == ArrayStatic && int(t.staticLength) != len(values) {
+ return nil, fmt.Errorf("length of slice %d != type specific length %d", len(values), t.staticLength)
+ }
+ if t.childTypes[0].abiTypeID == Byte {
+ byteArr := make([]byte, len(values))
+ for i := 0; i < len(values); i++ {
+ tempByte, ok := values[i].(byte)
+ if !ok {
+ return nil, fmt.Errorf("cannot infer byte element from slice")
+ }
+ byteArr[i] = tempByte
+ }
+ return json.Marshal(byteArr)
+ }
+ rawMsgSlice := make([]json.RawMessage, len(values))
+ for i := 0; i < len(values); i++ {
+ rawMsgSlice[i], err = t.childTypes[0].MarshalToJSON(values[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return json.Marshal(rawMsgSlice)
+ case String:
+ stringVal, ok := value.(string)
+ if !ok {
+ return nil, fmt.Errorf("cannot infer to string for marshal to JSON")
+ }
+ return json.Marshal(stringVal)
+ case Tuple:
+ values, err := inferToSlice(value)
+ if err != nil {
+ return nil, err
+ }
+ if len(values) != int(t.staticLength) {
+ return nil, fmt.Errorf("tuple element number != value slice length")
+ }
+ rawMsgSlice := make([]json.RawMessage, len(values))
+ for i := 0; i < len(values); i++ {
+ rawMsgSlice[i], err = t.childTypes[i].MarshalToJSON(values[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return json.Marshal(rawMsgSlice)
+ default:
+ return nil, fmt.Errorf("cannot infer ABI type for marshalling value to JSON")
+ }
+}
+
+// UnmarshalFromJSON convert bytes to golang value following ABI type and encoding rules
+func (t Type) UnmarshalFromJSON(jsonEncoded []byte) (interface{}, error) {
+ switch t.abiTypeID {
+ case Uint:
+ num := new(big.Int)
+ if err := num.UnmarshalJSON(jsonEncoded); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to uint: %v", string(jsonEncoded), err)
+ }
+ return castBigIntToNearestPrimitive(num, t.bitSize)
+ case Ufixed:
+ floatTemp := new(big.Rat)
+ if err := floatTemp.UnmarshalText(jsonEncoded); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to ufixed: %v", string(jsonEncoded), err)
+ }
+ denom := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(t.precision)), nil)
+ denomRat := new(big.Rat).SetInt(denom)
+ numeratorRat := new(big.Rat).Mul(denomRat, floatTemp)
+ if !numeratorRat.IsInt() {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to ufixed: precision out of range", string(jsonEncoded))
+ }
+ return castBigIntToNearestPrimitive(numeratorRat.Num(), t.bitSize)
+ case Bool:
+ var elem bool
+ if err := json.Unmarshal(jsonEncoded, &elem); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to bool: %v", string(jsonEncoded), err)
+ }
+ return elem, nil
+ case Byte:
+ var elem byte
+ if err := json.Unmarshal(jsonEncoded, &elem); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded to byte: %v", err)
+ }
+ return elem, nil
+ case Address:
+ var addrStr string
+ if err := json.Unmarshal(jsonEncoded, &addrStr); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded to string: %v", err)
+ }
+ addr, err := basics.UnmarshalChecksumAddress(addrStr)
+ if err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to address: %v", string(jsonEncoded), err)
+ }
+ return addr[:], nil
+ case ArrayStatic, ArrayDynamic:
+ if t.childTypes[0].abiTypeID == Byte && bytes.HasPrefix(jsonEncoded, []byte{'"'}) {
+ var byteArr []byte
+ err := json.Unmarshal(jsonEncoded, &byteArr)
+ if err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to bytes: %v", string(jsonEncoded), err)
+ }
+ if t.abiTypeID == ArrayStatic && len(byteArr) != int(t.staticLength) {
+ return nil, fmt.Errorf("length of slice %d != type specific length %d", len(byteArr), t.staticLength)
+ }
+ outInterface := make([]interface{}, len(byteArr))
+ for i := 0; i < len(byteArr); i++ {
+ outInterface[i] = byteArr[i]
+ }
+ return outInterface, nil
+ }
+ var elems []json.RawMessage
+ if err := json.Unmarshal(jsonEncoded, &elems); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to array: %v", string(jsonEncoded), err)
+ }
+ if t.abiTypeID == ArrayStatic && len(elems) != int(t.staticLength) {
+ return nil, fmt.Errorf("JSON array element number != ABI array elem number")
+ }
+ values := make([]interface{}, len(elems))
+ for i := 0; i < len(elems); i++ {
+ tempValue, err := t.childTypes[0].UnmarshalFromJSON(elems[i])
+ if err != nil {
+ return nil, err
+ }
+ values[i] = tempValue
+ }
+ return values, nil
+ case String:
+ stringEncoded := string(jsonEncoded)
+ if bytes.HasPrefix(jsonEncoded, []byte{'"'}) {
+ var stringVar string
+ if err := json.Unmarshal(jsonEncoded, &stringVar); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to string: %v", stringEncoded, err)
+ }
+ return stringVar, nil
+ } else if bytes.HasPrefix(jsonEncoded, []byte{'['}) {
+ var elems []byte
+ if err := json.Unmarshal(jsonEncoded, &elems); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to string: %v", stringEncoded, err)
+ }
+ return string(elems), nil
+ } else {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to string", stringEncoded)
+ }
+ case Tuple:
+ var elems []json.RawMessage
+ if err := json.Unmarshal(jsonEncoded, &elems); err != nil {
+ return nil, fmt.Errorf("cannot cast JSON encoded (%s) to array for tuple: %v", string(jsonEncoded), err)
+ }
+ if len(elems) != int(t.staticLength) {
+ return nil, fmt.Errorf("JSON array element number != ABI tuple elem number")
+ }
+ values := make([]interface{}, len(elems))
+ for i := 0; i < len(elems); i++ {
+ tempValue, err := t.childTypes[i].UnmarshalFromJSON(elems[i])
+ if err != nil {
+ return nil, err
+ }
+ values[i] = tempValue
+ }
+ return values, nil
+ default:
+ return nil, fmt.Errorf("cannot cast JSON encoded %s to ABI encoding stuff", string(jsonEncoded))
+ }
+}
diff --git a/data/abi/abi_json_test.go b/data/abi/abi_json_test.go
new file mode 100644
index 000000000..d65e3c10a
--- /dev/null
+++ b/data/abi/abi_json_test.go
@@ -0,0 +1,123 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package abi
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestJSONtoInterfaceValid(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var testCases = []struct {
+ input string
+ typeStr string
+ expected interface{}
+ }{
+ {
+ input: `[true, [0, 1, 2], 17]`,
+ typeStr: `(bool,byte[],uint64)`,
+ expected: []interface{}{
+ true,
+ []interface{}{byte(0), byte(1), byte(2)},
+ uint64(17),
+ },
+ },
+ {
+ input: `[true, "AAEC", 17]`,
+ typeStr: `(bool,byte[],uint64)`,
+ expected: []interface{}{
+ true,
+ []interface{}{byte(0), byte(1), byte(2)},
+ uint64(17),
+ },
+ },
+ {
+ input: `"AQEEBQEE"`,
+ typeStr: `byte[6]`,
+ expected: []interface{}{byte(1), byte(1), byte(4), byte(5), byte(1), byte(4)},
+ },
+ {
+ input: `[[0, [true, false], "utf-8"], [18446744073709551615, [false, true], "pistachio"]]`,
+ typeStr: `(uint64,bool[2],string)[]`,
+ expected: []interface{}{
+ []interface{}{uint64(0), []interface{}{true, false}, "utf-8"},
+ []interface{}{^uint64(0), []interface{}{false, true}, "pistachio"},
+ },
+ },
+ {
+ input: `[]`,
+ typeStr: `(uint64,bool[2],string)[]`,
+ expected: []interface{}{},
+ },
+ {
+ input: "[]",
+ typeStr: "()",
+ expected: []interface{}{},
+ },
+ {
+ input: "[65, 66, 67]",
+ typeStr: "string",
+ expected: "ABC",
+ },
+ {
+ input: "[]",
+ typeStr: "string",
+ expected: "",
+ },
+ {
+ input: "123.456",
+ typeStr: "ufixed64x3",
+ expected: uint64(123456),
+ },
+ {
+ input: `"optin"`,
+ typeStr: "string",
+ expected: "optin",
+ },
+ {
+ input: `"AAEC"`,
+ typeStr: "byte[3]",
+ expected: []interface{}{byte(0), byte(1), byte(2)},
+ },
+ {
+ input: `["uwu",["AAEC",12.34]]`,
+ typeStr: "(string,(byte[3],ufixed64x3))",
+ expected: []interface{}{"uwu", []interface{}{[]interface{}{byte(0), byte(1), byte(2)}, uint64(12340)}},
+ },
+ {
+ input: `[399,"should pass",[true,false,false,true]]`,
+ typeStr: "(uint64,string,bool[])",
+ expected: []interface{}{uint64(399), "should pass", []interface{}{true, false, false, true}},
+ },
+ }
+
+ for _, testCase := range testCases {
+ abiT, err := TypeOf(testCase.typeStr)
+ require.NoError(t, err, "fail to construct ABI type (%s): %v", testCase.typeStr, err)
+ res, err := abiT.UnmarshalFromJSON([]byte(testCase.input))
+ require.NoError(t, err, "fail to unmarshal JSON to interface: (%s): %v", testCase.input, err)
+ require.Equal(t, testCase.expected, res, "%v not matching with expected value %v", res, testCase.expected)
+ resEncoded, err := abiT.Encode(res)
+ require.NoError(t, err, "fail to encode %v to ABI bytes: %v", res, err)
+ resDecoded, err := abiT.Decode(resEncoded)
+ require.NoError(t, err, "fail to decode ABI bytes of %v: %v", res, err)
+ require.Equal(t, res, resDecoded, "ABI encode-decode round trip: %v not match with expected %v", resDecoded, res)
+ }
+}
diff --git a/data/abi/abi_type.go b/data/abi/abi_type.go
index 65e3dfc6a..eb93f9eea 100644
--- a/data/abi/abi_type.go
+++ b/data/abi/abi_type.go
@@ -84,15 +84,15 @@ type Type struct {
func (t Type) String() string {
switch t.abiTypeID {
case Uint:
- return "uint" + strconv.Itoa(int(t.bitSize))
+ return fmt.Sprintf("uint%d", t.bitSize)
case Byte:
return "byte"
case Ufixed:
- return "ufixed" + strconv.Itoa(int(t.bitSize)) + "x" + strconv.Itoa(int(t.precision))
+ return fmt.Sprintf("ufixed%dx%d", t.bitSize, t.precision)
case Bool:
return "bool"
case ArrayStatic:
- return t.childTypes[0].String() + "[" + strconv.Itoa(int(t.staticLength)) + "]"
+ return fmt.Sprintf("%s[%d]", t.childTypes[0].String(), t.staticLength)
case Address:
return "address"
case ArrayDynamic:
@@ -110,31 +110,19 @@ func (t Type) String() string {
}
}
-var staticArrayRegexp *regexp.Regexp = nil
-var ufixedRegexp *regexp.Regexp = nil
+var staticArrayRegexp = regexp.MustCompile(`^([a-z\d\[\](),]+)\[([1-9][\d]*)]$`)
+var ufixedRegexp = regexp.MustCompile(`^ufixed([1-9][\d]*)x([1-9][\d]*)$`)
-func init() {
- var err error
- // Note that we allow only decimal static array length
- staticArrayRegexp, err = regexp.Compile(`^([a-z\d\[\](),]+)\[([1-9][\d]*)]$`)
- if err != nil {
- panic(err.Error())
- }
- ufixedRegexp, err = regexp.Compile(`^ufixed([1-9][\d]*)x([1-9][\d]*)$`)
- if err != nil {
- panic(err.Error())
- }
-}
-
-// TypeFromString de-serialize ABI type from a string following ABI encoding.
-func TypeFromString(str string) (Type, error) {
+// TypeOf parses an ABI type string.
+// For example: `TypeOf("(uint64,byte[])")`
+func TypeOf(str string) (Type, error) {
switch {
case strings.HasSuffix(str, "[]"):
- arrayArgType, err := TypeFromString(str[:len(str)-2])
+ arrayArgType, err := TypeOf(str[:len(str)-2])
if err != nil {
return Type{}, err
}
- return MakeDynamicArrayType(arrayArgType), nil
+ return makeDynamicArrayType(arrayArgType), nil
case strings.HasSuffix(str, "]"):
stringMatches := staticArrayRegexp.FindStringSubmatch(str)
// match the string itself, array element type, then array length
@@ -149,19 +137,19 @@ func TypeFromString(str string) (Type, error) {
return Type{}, err
}
// parse the array element type
- arrayType, err := TypeFromString(stringMatches[1])
+ arrayType, err := TypeOf(stringMatches[1])
if err != nil {
return Type{}, err
}
- return MakeStaticArrayType(arrayType, uint16(arrayLength)), nil
+ return makeStaticArrayType(arrayType, uint16(arrayLength)), nil
case strings.HasPrefix(str, "uint"):
typeSize, err := strconv.ParseUint(str[4:], 10, 16)
if err != nil {
return Type{}, fmt.Errorf("ill formed uint type: %s", str)
}
- return MakeUintType(uint16(typeSize))
+ return makeUintType(int(typeSize))
case str == "byte":
- return MakeByteType(), nil
+ return byteType, nil
case strings.HasPrefix(str, "ufixed"):
stringMatches := ufixedRegexp.FindStringSubmatch(str)
// match string itself, then type-bitSize, and type-precision
@@ -177,13 +165,13 @@ func TypeFromString(str string) (Type, error) {
if err != nil {
return Type{}, err
}
- return MakeUfixedType(uint16(ufixedSize), uint16(ufixedPrecision))
+ return makeUfixedType(int(ufixedSize), int(ufixedPrecision))
case str == "bool":
- return MakeBoolType(), nil
+ return boolType, nil
case str == "address":
- return MakeAddressType(), nil
+ return addressType, nil
case str == "string":
- return MakeStringType(), nil
+ return stringType, nil
case len(str) >= 2 && str[0] == '(' && str[len(str)-1] == ')':
tupleContent, err := parseTupleContent(str[1 : len(str)-1])
if err != nil {
@@ -191,7 +179,7 @@ func TypeFromString(str string) (Type, error) {
}
tupleTypes := make([]Type, len(tupleContent))
for i := 0; i < len(tupleContent); i++ {
- ti, err := TypeFromString(tupleContent[i])
+ ti, err := TypeOf(tupleContent[i])
if err != nil {
return Type{}, err
}
@@ -284,29 +272,36 @@ func parseTupleContent(str string) ([]string, error) {
return tupleStrSegs, nil
}
-// MakeUintType makes `Uint` ABI type by taking a type bitSize argument.
+// makeUintType makes `Uint` ABI type by taking a type bitSize argument.
// The range of type bitSize is [8, 512] and type bitSize % 8 == 0.
-func MakeUintType(typeSize uint16) (Type, error) {
+func makeUintType(typeSize int) (Type, error) {
if typeSize%8 != 0 || typeSize < 8 || typeSize > 512 {
return Type{}, fmt.Errorf("unsupported uint type bitSize: %d", typeSize)
}
return Type{
abiTypeID: Uint,
- bitSize: typeSize,
+ bitSize: uint16(typeSize),
}, nil
}
-// MakeByteType makes `Byte` ABI type.
-func MakeByteType() Type {
- return Type{
- abiTypeID: Byte,
- }
-}
+var (
+ // byteType is ABI type constant for byte
+ byteType = Type{abiTypeID: Byte}
-// MakeUfixedType makes `UFixed` ABI type by taking type bitSize and type precision as arguments.
+ // boolType is ABI type constant for bool
+ boolType = Type{abiTypeID: Bool}
+
+ // addressType is ABI type constant for address
+ addressType = Type{abiTypeID: Address}
+
+ // stringType is ABI type constant for string
+ stringType = Type{abiTypeID: String}
+)
+
+// makeUfixedType makes `UFixed` ABI type by taking type bitSize and type precision as arguments.
// The range of type bitSize is [8, 512] and type bitSize % 8 == 0.
// The range of type precision is [1, 160].
-func MakeUfixedType(typeSize uint16, typePrecision uint16) (Type, error) {
+func makeUfixedType(typeSize int, typePrecision int) (Type, error) {
if typeSize%8 != 0 || typeSize < 8 || typeSize > 512 {
return Type{}, fmt.Errorf("unsupported ufixed type bitSize: %d", typeSize)
}
@@ -315,21 +310,14 @@ func MakeUfixedType(typeSize uint16, typePrecision uint16) (Type, error) {
}
return Type{
abiTypeID: Ufixed,
- bitSize: typeSize,
- precision: typePrecision,
+ bitSize: uint16(typeSize),
+ precision: uint16(typePrecision),
}, nil
}
-// MakeBoolType makes `Bool` ABI type.
-func MakeBoolType() Type {
- return Type{
- abiTypeID: Bool,
- }
-}
-
-// MakeStaticArrayType makes static length array ABI type by taking
+// makeStaticArrayType makes static length array ABI type by taking
// array element type and array length as arguments.
-func MakeStaticArrayType(argumentType Type, arrayLength uint16) Type {
+func makeStaticArrayType(argumentType Type, arrayLength uint16) Type {
return Type{
abiTypeID: ArrayStatic,
childTypes: []Type{argumentType},
@@ -337,28 +325,14 @@ func MakeStaticArrayType(argumentType Type, arrayLength uint16) Type {
}
}
-// MakeAddressType makes `Address` ABI type.
-func MakeAddressType() Type {
- return Type{
- abiTypeID: Address,
- }
-}
-
-// MakeDynamicArrayType makes dynamic length array by taking array element type as argument.
-func MakeDynamicArrayType(argumentType Type) Type {
+// makeDynamicArrayType makes dynamic length array by taking array element type as argument.
+func makeDynamicArrayType(argumentType Type) Type {
return Type{
abiTypeID: ArrayDynamic,
childTypes: []Type{argumentType},
}
}
-// MakeStringType makes `String` ABI type.
-func MakeStringType() Type {
- return Type{
- abiTypeID: String,
- }
-}
-
// MakeTupleType makes tuple ABI type by taking an array of tuple element types as argument.
func MakeTupleType(argumentTypes []Type) (Type, error) {
if len(argumentTypes) >= math.MaxUint16 {
@@ -451,10 +425,7 @@ func (t Type) ByteLen() (int, error) {
return singleBoolSize, nil
case ArrayStatic:
if t.childTypes[0].abiTypeID == Bool {
- byteLen := int(t.staticLength) / 8
- if t.staticLength%8 != 0 {
- byteLen++
- }
+ byteLen := int(t.staticLength+7) / 8
return byteLen, nil
}
elemByteLen, err := t.childTypes[0].ByteLen()
@@ -472,10 +443,7 @@ func (t Type) ByteLen() (int, error) {
i += after
// get number of bool
boolNum := after + 1
- size += boolNum / 8
- if boolNum%8 != 0 {
- size++
- }
+ size += (boolNum + 7) / 8
} else {
childByteSize, err := t.childTypes[i].ByteLen()
if err != nil {
diff --git a/data/abi/abi_type_test.go b/data/abi/abi_type_test.go
index 136ecb8cf..f96dfaf06 100644
--- a/data/abi/abi_type_test.go
+++ b/data/abi/abi_type_test.go
@@ -32,21 +32,21 @@ func TestMakeTypeValid(t *testing.T) {
partitiontest.PartitionTest(t)
// uint
for i := 8; i <= 512; i += 8 {
- uintType, err := MakeUintType(uint16(i))
+ uintType, err := makeUintType(i)
require.NoError(t, err, "make uint type in valid space should not return error")
expected := "uint" + strconv.Itoa(i)
actual := uintType.String()
- require.Equal(t, expected, actual, "MakeUintType: expected %s, actual %s", expected, actual)
+ require.Equal(t, expected, actual, "makeUintType: expected %s, actual %s", expected, actual)
}
// ufixed
for i := 8; i <= 512; i += 8 {
for j := 1; j <= 160; j++ {
- ufixedType, err := MakeUfixedType(uint16(i), uint16(j))
+ ufixedType, err := makeUfixedType(i, j)
require.NoError(t, err, "make ufixed type in valid space should not return error")
expected := "ufixed" + strconv.Itoa(i) + "x" + strconv.Itoa(j)
actual := ufixedType.String()
require.Equal(t, expected, actual,
- "TypeFromString ufixed error: expected %s, actual %s", expected, actual)
+ "TypeOf ufixed error: expected %s, actual %s", expected, actual)
}
}
// bool/strings/address/byte + dynamic/static array + tuple
@@ -55,13 +55,13 @@ func TestMakeTypeValid(t *testing.T) {
testType string
expected string
}{
- {input: MakeBoolType(), testType: "bool", expected: "bool"},
- {input: MakeStringType(), testType: "string", expected: "string"},
- {input: MakeAddressType(), testType: "address", expected: "address"},
- {input: MakeByteType(), testType: "byte", expected: "byte"},
+ {input: boolType, testType: "bool", expected: "bool"},
+ {input: stringType, testType: "string", expected: "string"},
+ {input: addressType, testType: "address", expected: "address"},
+ {input: byteType, testType: "byte", expected: "byte"},
// dynamic array
{
- input: MakeDynamicArrayType(
+ input: makeDynamicArrayType(
Type{
abiTypeID: Uint,
bitSize: uint16(32),
@@ -71,16 +71,16 @@ func TestMakeTypeValid(t *testing.T) {
expected: "uint32[]",
},
{
- input: MakeDynamicArrayType(
- MakeDynamicArrayType(
- MakeByteType(),
+ input: makeDynamicArrayType(
+ makeDynamicArrayType(
+ byteType,
),
),
testType: "dynamic array",
expected: "byte[][]",
},
{
- input: MakeStaticArrayType(
+ input: makeStaticArrayType(
Type{
abiTypeID: Ufixed,
bitSize: uint16(128),
@@ -92,9 +92,9 @@ func TestMakeTypeValid(t *testing.T) {
expected: "ufixed128x10[100]",
},
{
- input: MakeStaticArrayType(
- MakeStaticArrayType(
- MakeBoolType(),
+ input: makeStaticArrayType(
+ makeStaticArrayType(
+ boolType,
uint16(128),
),
uint16(256),
@@ -114,10 +114,10 @@ func TestMakeTypeValid(t *testing.T) {
{
abiTypeID: Tuple,
childTypes: []Type{
- MakeAddressType(),
- MakeByteType(),
- MakeStaticArrayType(MakeBoolType(), uint16(10)),
- MakeDynamicArrayType(
+ addressType,
+ byteType,
+ makeStaticArrayType(boolType, uint16(10)),
+ makeDynamicArrayType(
Type{
abiTypeID: Ufixed,
bitSize: uint16(256),
@@ -127,7 +127,7 @@ func TestMakeTypeValid(t *testing.T) {
},
staticLength: 4,
},
- MakeDynamicArrayType(MakeByteType()),
+ makeDynamicArrayType(byteType),
},
staticLength: 3,
},
@@ -153,8 +153,8 @@ func TestMakeTypeInvalid(t *testing.T) {
randInput = rand.Uint32() % (1 << 16)
}
// note: if a var mod 8 = 0 (or not) in uint32, then it should mod 8 = 0 (or not) in uint16.
- _, err := MakeUintType(uint16(randInput))
- require.Error(t, err, "MakeUintType: should throw error on bitSize input %d", uint16(randInput))
+ _, err := makeUintType(int(randInput))
+ require.Error(t, err, "makeUintType: should throw error on bitSize input %d", uint16(randInput))
}
// ufixed
for i := 0; i <= 10000; i++ {
@@ -166,8 +166,8 @@ func TestMakeTypeInvalid(t *testing.T) {
for randPrecision >= 1 && randPrecision <= 160 {
randPrecision = rand.Uint32()
}
- _, err := MakeUfixedType(uint16(randSize), uint16(randPrecision))
- require.Error(t, err, "MakeUfixedType: should throw error on bitSize %d, precision %d", randSize, randPrecision)
+ _, err := makeUfixedType(int(randSize), int(randPrecision))
+ require.Error(t, err, "makeUfixedType: should throw error on bitSize %d, precision %d", randSize, randPrecision)
}
}
@@ -175,22 +175,22 @@ func TestTypeFromStringValid(t *testing.T) {
partitiontest.PartitionTest(t)
// uint
for i := 8; i <= 512; i += 8 {
- expected, err := MakeUintType(uint16(i))
+ expected, err := makeUintType(i)
require.NoError(t, err, "make uint type in valid space should not return error")
- actual, err := TypeFromString(expected.String())
- require.NoError(t, err, "TypeFromString: uint parsing error: %s", expected.String())
+ actual, err := TypeOf(expected.String())
+ require.NoError(t, err, "TypeOf: uint parsing error: %s", expected.String())
require.Equal(t, expected, actual,
- "TypeFromString: expected %s, actual %s", expected.String(), actual.String())
+ "TypeOf: expected %s, actual %s", expected.String(), actual.String())
}
// ufixed
for i := 8; i <= 512; i += 8 {
for j := 1; j <= 160; j++ {
- expected, err := MakeUfixedType(uint16(i), uint16(j))
+ expected, err := makeUfixedType(i, j)
require.NoError(t, err, "make ufixed type in valid space should not return error")
- actual, err := TypeFromString("ufixed" + strconv.Itoa(i) + "x" + strconv.Itoa(j))
- require.NoError(t, err, "TypeFromString ufixed parsing error: %s", expected.String())
+ actual, err := TypeOf("ufixed" + strconv.Itoa(i) + "x" + strconv.Itoa(j))
+ require.NoError(t, err, "TypeOf ufixed parsing error: %s", expected.String())
require.Equal(t, expected, actual,
- "TypeFromString ufixed: expected %s, actual %s", expected.String(), actual.String())
+ "TypeOf ufixed: expected %s, actual %s", expected.String(), actual.String())
}
}
var testcases = []struct {
@@ -198,19 +198,19 @@ func TestTypeFromStringValid(t *testing.T) {
testType string
expected Type
}{
- {input: MakeBoolType().String(), testType: "bool", expected: MakeBoolType()},
- {input: MakeStringType().String(), testType: "string", expected: MakeStringType()},
- {input: MakeAddressType().String(), testType: "address", expected: MakeAddressType()},
- {input: MakeByteType().String(), testType: "byte", expected: MakeByteType()},
+ {input: boolType.String(), testType: "bool", expected: boolType},
+ {input: stringType.String(), testType: "string", expected: stringType},
+ {input: addressType.String(), testType: "address", expected: addressType},
+ {input: byteType.String(), testType: "byte", expected: byteType},
{
input: "uint256[]",
testType: "dynamic array",
- expected: MakeDynamicArrayType(Type{abiTypeID: Uint, bitSize: 256}),
+ expected: makeDynamicArrayType(Type{abiTypeID: Uint, bitSize: 256}),
},
{
input: "ufixed256x64[]",
testType: "dynamic array",
- expected: MakeDynamicArrayType(
+ expected: makeDynamicArrayType(
Type{
abiTypeID: Ufixed,
bitSize: 256,
@@ -221,11 +221,11 @@ func TestTypeFromStringValid(t *testing.T) {
{
input: "byte[][][][]",
testType: "dynamic array",
- expected: MakeDynamicArrayType(
- MakeDynamicArrayType(
- MakeDynamicArrayType(
- MakeDynamicArrayType(
- MakeByteType(),
+ expected: makeDynamicArrayType(
+ makeDynamicArrayType(
+ makeDynamicArrayType(
+ makeDynamicArrayType(
+ byteType,
),
),
),
@@ -235,16 +235,16 @@ func TestTypeFromStringValid(t *testing.T) {
{
input: "address[100]",
testType: "static array",
- expected: MakeStaticArrayType(
- MakeAddressType(),
+ expected: makeStaticArrayType(
+ addressType,
uint16(100),
),
},
{
input: "uint64[][200]",
testType: "static array",
- expected: MakeStaticArrayType(
- MakeDynamicArrayType(
+ expected: makeStaticArrayType(
+ makeDynamicArrayType(
Type{abiTypeID: Uint, bitSize: uint16(64)},
),
uint16(200),
@@ -273,10 +273,10 @@ func TestTypeFromStringValid(t *testing.T) {
{
abiTypeID: Tuple,
childTypes: []Type{
- MakeAddressType(),
- MakeByteType(),
- MakeStaticArrayType(MakeBoolType(), uint16(10)),
- MakeDynamicArrayType(
+ addressType,
+ byteType,
+ makeStaticArrayType(boolType, uint16(10)),
+ makeDynamicArrayType(
Type{
abiTypeID: Ufixed,
bitSize: uint16(256),
@@ -286,7 +286,7 @@ func TestTypeFromStringValid(t *testing.T) {
},
staticLength: 4,
},
- MakeDynamicArrayType(MakeByteType()),
+ makeDynamicArrayType(byteType),
},
staticLength: 3,
},
@@ -304,13 +304,13 @@ func TestTypeFromStringValid(t *testing.T) {
{
abiTypeID: Tuple,
childTypes: []Type{
- MakeAddressType(),
- MakeByteType(),
- MakeStaticArrayType(MakeBoolType(), uint16(10)),
+ addressType,
+ byteType,
+ makeStaticArrayType(boolType, uint16(10)),
{
abiTypeID: Tuple,
childTypes: []Type{
- MakeDynamicArrayType(
+ makeDynamicArrayType(
Type{
abiTypeID: Ufixed,
bitSize: uint16(256),
@@ -346,13 +346,13 @@ func TestTypeFromStringValid(t *testing.T) {
{
abiTypeID: Tuple,
childTypes: []Type{
- MakeAddressType(),
+ addressType,
{
abiTypeID: Tuple,
childTypes: []Type{
- MakeByteType(),
- MakeStaticArrayType(MakeBoolType(), uint16(10)),
- MakeDynamicArrayType(
+ byteType,
+ makeStaticArrayType(boolType, uint16(10)),
+ makeDynamicArrayType(
Type{
abiTypeID: Ufixed,
bitSize: uint16(256),
@@ -371,9 +371,9 @@ func TestTypeFromStringValid(t *testing.T) {
},
}
for _, testcase := range testcases {
- t.Run(fmt.Sprintf("TypeFromString test %s", testcase.testType), func(t *testing.T) {
- actual, err := TypeFromString(testcase.input)
- require.NoError(t, err, "TypeFromString %s parsing error", testcase.testType)
+ t.Run(fmt.Sprintf("TypeOf test %s", testcase.testType), func(t *testing.T) {
+ actual, err := TypeOf(testcase.input)
+ require.NoError(t, err, "TypeOf %s parsing error", testcase.testType)
require.Equal(t, testcase.expected, actual, "TestFromString %s: expected %s, actual %s",
testcase.testType, testcase.expected.String(), actual.String())
})
@@ -388,8 +388,8 @@ func TestTypeFromStringInvalid(t *testing.T) {
randSize = rand.Uint64()
}
errorInput := "uint" + strconv.FormatUint(randSize, 10)
- _, err := TypeFromString(errorInput)
- require.Error(t, err, "MakeUintType: should throw error on bitSize input %d", randSize)
+ _, err := TypeOf(errorInput)
+ require.Error(t, err, "makeUintType: should throw error on bitSize input %d", randSize)
}
for i := 0; i <= 10000; i++ {
randSize := rand.Uint64()
@@ -401,8 +401,8 @@ func TestTypeFromStringInvalid(t *testing.T) {
randPrecision = rand.Uint64()
}
errorInput := "ufixed" + strconv.FormatUint(randSize, 10) + "x" + strconv.FormatUint(randPrecision, 10)
- _, err := TypeFromString(errorInput)
- require.Error(t, err, "MakeUintType: should throw error on bitSize input %d", randSize)
+ _, err := TypeOf(errorInput)
+ require.Error(t, err, "makeUintType: should throw error on bitSize input %d", randSize)
}
var testcases = []string{
// uint
@@ -442,8 +442,8 @@ func TestTypeFromStringInvalid(t *testing.T) {
"((byte),,(byte))",
}
for _, testcase := range testcases {
- t.Run(fmt.Sprintf("TypeFromString dynamic array test %s", testcase), func(t *testing.T) {
- _, err := TypeFromString(testcase)
+ t.Run(fmt.Sprintf("TypeOf dynamic array test %s", testcase), func(t *testing.T) {
+ _, err := TypeOf(testcase)
require.Error(t, err, "%s should throw error", testcase)
})
}
@@ -474,27 +474,27 @@ func TestTypeMISC(t *testing.T) {
rand.Seed(time.Now().Unix())
var testpool = []Type{
- MakeBoolType(),
- MakeAddressType(),
- MakeStringType(),
- MakeByteType(),
+ boolType,
+ addressType,
+ stringType,
+ byteType,
}
for i := 8; i <= 512; i += 8 {
- uintT, err := MakeUintType(uint16(i))
+ uintT, err := makeUintType(i)
require.NoError(t, err, "make uint type error")
testpool = append(testpool, uintT)
}
for i := 8; i <= 512; i += 8 {
for j := 1; j <= 160; j++ {
- ufixedT, err := MakeUfixedType(uint16(i), uint16(j))
+ ufixedT, err := makeUfixedType(i, j)
require.NoError(t, err, "make ufixed type error: bitSize %d, precision %d", i, j)
testpool = append(testpool, ufixedT)
}
}
for _, testcase := range testpool {
- testpool = append(testpool, MakeDynamicArrayType(testcase))
- testpool = append(testpool, MakeStaticArrayType(testcase, 10))
- testpool = append(testpool, MakeStaticArrayType(testcase, 20))
+ testpool = append(testpool, makeDynamicArrayType(testcase))
+ testpool = append(testpool, makeStaticArrayType(testcase, 10))
+ testpool = append(testpool, makeStaticArrayType(testcase, 20))
}
for _, testcase := range testpool {
@@ -545,13 +545,13 @@ func TestTypeMISC(t *testing.T) {
isDynamicCount++
}
- addressByteLen, err := MakeAddressType().ByteLen()
+ addressByteLen, err := addressType.ByteLen()
require.NoError(t, err, "address type bytelen should not return error")
require.Equal(t, 32, addressByteLen, "address type bytelen should be 32")
- byteByteLen, err := MakeByteType().ByteLen()
+ byteByteLen, err := byteType.ByteLen()
require.NoError(t, err, "byte type bytelen should not return error")
require.Equal(t, 1, byteByteLen, "byte type bytelen should be 1")
- boolByteLen, err := MakeBoolType().ByteLen()
+ boolByteLen, err := boolType.ByteLen()
require.NoError(t, err, "bool type bytelen should be 1")
require.Equal(t, 1, boolByteLen, "bool type bytelen should be 1")
diff --git a/data/abi/abi_value.go b/data/abi/abi_value.go
deleted file mode 100644
index 9f72ba755..000000000
--- a/data/abi/abi_value.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package abi
-
-import (
- "fmt"
- "math"
- "math/big"
-)
-
-// Value struct is the ABI Value, holding ABI Type information and the ABI value representation.
-type Value struct {
- ABIType Type
- value interface{}
-}
-
-// MakeUint8 takes a go `uint8` and gives an ABI Value of ABI type `uint8`.
-func MakeUint8(value uint8) Value {
- bigInt := big.NewInt(int64(value))
- res, _ := MakeUint(bigInt, 8)
- return res
-}
-
-// MakeUint16 takes a go `uint16` and gives an ABI Value of ABI type `uint16`.
-func MakeUint16(value uint16) Value {
- bigInt := big.NewInt(int64(value))
- res, _ := MakeUint(bigInt, 16)
- return res
-}
-
-// MakeUint32 takes a go `uint32` and gives an ABI Value of ABI type `uint32`.
-func MakeUint32(value uint32) Value {
- bigInt := big.NewInt(int64(value))
- res, _ := MakeUint(bigInt, 32)
- return res
-}
-
-// MakeUint64 takes a go `uint64` and gives an ABI Value of ABI type `uint64`.
-func MakeUint64(value uint64) Value {
- bigInt := new(big.Int).SetUint64(value)
- res, _ := MakeUint(bigInt, 64)
- return res
-}
-
-// MakeUint takes a big integer representation and a type bitSize,
-// and returns an ABI Value of ABI Uint<bitSize> type.
-func MakeUint(value *big.Int, size uint16) (Value, error) {
- typeUint, err := MakeUintType(size)
- if err != nil {
- return Value{}, err
- }
- upperLimit := new(big.Int).Lsh(big.NewInt(1), uint(size))
- if value.Cmp(upperLimit) >= 0 {
- return Value{}, fmt.Errorf("passed value larger than uint bitSize %d", size)
- }
- return Value{
- ABIType: typeUint,
- value: value,
- }, nil
-}
-
-// MakeUfixed takes a big integer representation, a type bitSize, and a type precision,
-// and returns an ABI Value of ABI UFixed<bitSize>x<precision>
-func MakeUfixed(value *big.Int, size uint16, precision uint16) (Value, error) {
- ufixedValueType, err := MakeUfixedType(size, precision)
- if err != nil {
- return Value{}, err
- }
- uintVal, err := MakeUint(value, size)
- if err != nil {
- return Value{}, err
- }
- uintVal.ABIType = ufixedValueType
- return uintVal, nil
-}
-
-// MakeString takes a string and returns an ABI String type Value.
-func MakeString(value string) Value {
- return Value{
- ABIType: MakeStringType(),
- value: value,
- }
-}
-
-// MakeByte takes a byte and returns an ABI Byte type value.
-func MakeByte(value byte) Value {
- return Value{
- ABIType: MakeByteType(),
- value: value,
- }
-}
-
-// MakeAddress takes an [32]byte array and returns an ABI Address type value.
-func MakeAddress(value [32]byte) Value {
- return Value{
- ABIType: MakeAddressType(),
- value: value,
- }
-}
-
-// MakeDynamicArray takes an array of ABI value (can be empty) and element type,
-// returns an ABI dynamic length array value.
-func MakeDynamicArray(values []Value, elemType Type) (Value, error) {
- if len(values) >= math.MaxUint16 {
- return Value{}, fmt.Errorf("dynamic array make error: pass in array length larger than maximum of uint16")
- }
- for i := 0; i < len(values); i++ {
- if !values[i].ABIType.Equal(elemType) {
- return Value{}, fmt.Errorf("type mismatch: %s and %s",
- values[i].ABIType.String(), elemType.String())
- }
- }
- return Value{
- ABIType: MakeDynamicArrayType(elemType),
- value: values,
- }, nil
-}
-
-// MakeStaticArray takes an array of ABI value and returns an ABI static length array value.
-func MakeStaticArray(values []Value) (Value, error) {
- if len(values) >= math.MaxUint16 {
- return Value{}, fmt.Errorf("static array make error: pass in array length larger than maximum of uint16")
- } else if len(values) == 0 {
- return Value{}, fmt.Errorf("static array make error: 0 array element passed in")
- }
- for i := 0; i < len(values); i++ {
- if !values[i].ABIType.Equal(values[0].ABIType) {
- return Value{}, fmt.Errorf("type mismatch: %s and %s",
- values[i].ABIType.String(), values[0].ABIType.String())
- }
- }
- return Value{
- ABIType: MakeStaticArrayType(values[0].ABIType, uint16(len(values))),
- value: values,
- }, nil
-}
-
-// MakeTuple takes an array of ABI values and returns an ABI tuple value.
-func MakeTuple(values []Value) (Value, error) {
- if len(values) >= math.MaxUint16 {
- return Value{}, fmt.Errorf("tuple make error: pass in tuple length larger than maximum of uint16")
- }
- tupleType := make([]Type, len(values))
- for i := 0; i < len(values); i++ {
- tupleType[i] = values[i].ABIType
- }
-
- castedTupleType, err := MakeTupleType(tupleType)
- if err != nil {
- return Value{}, err
- }
-
- return Value{
- ABIType: castedTupleType,
- value: values,
- }, nil
-}
-
-// MakeBool takes a boolean value and returns an ABI bool value.
-func MakeBool(value bool) Value {
- return Value{
- ABIType: MakeBoolType(),
- value: value,
- }
-}
-
-func checkUintValid(t Type, bitSize uint16) bool {
- return t.abiTypeID == Uint && t.bitSize <= bitSize
-}
-
-// GetUint8 tries to retreve an uint8 from an ABI Value.
-func (v Value) GetUint8() (uint8, error) {
- if !checkUintValid(v.ABIType, 8) {
- return 0, fmt.Errorf("value type mismatch or bitSize too large")
- }
- bigIntForm, err := v.GetUint()
- if err != nil {
- return 0, err
- }
- return uint8(bigIntForm.Uint64()), nil
-}
-
-// GetUint16 tries to retrieve an uint16 from an ABI Value.
-func (v Value) GetUint16() (uint16, error) {
- if !checkUintValid(v.ABIType, 16) {
- return 0, fmt.Errorf("value type mismatch or bitSize too large")
- }
- bigIntForm, err := v.GetUint()
- if err != nil {
- return 0, err
- }
- return uint16(bigIntForm.Uint64()), nil
-}
-
-// GetUint32 tries to retrieve an uint32 from an ABI Value.
-func (v Value) GetUint32() (uint32, error) {
- if !checkUintValid(v.ABIType, 32) {
- return 0, fmt.Errorf("value type mismatch or bitSize too large")
- }
- bigIntForm, err := v.GetUint()
- if err != nil {
- return 0, err
- }
- return uint32(bigIntForm.Uint64()), nil
-}
-
-// GetUint64 tries to retrieve an uint64 from an ABI Value.
-func (v Value) GetUint64() (uint64, error) {
- if !checkUintValid(v.ABIType, 64) {
- return 0, fmt.Errorf("value type mismatch or bitSize too large")
- }
- bigIntForm, err := v.GetUint()
- if err != nil {
- return 0, err
- }
- return bigIntForm.Uint64(), nil
-}
-
-// GetUint tries to retrieve an big uint from an ABI Value.
-func (v Value) GetUint() (*big.Int, error) {
- if v.ABIType.abiTypeID != Uint {
- return nil, fmt.Errorf("value type mismatch")
- }
- bigIntForm := v.value.(*big.Int)
- sizeThreshold := new(big.Int).Lsh(big.NewInt(1), uint(v.ABIType.bitSize))
- if sizeThreshold.Cmp(bigIntForm) <= 0 {
- return nil, fmt.Errorf("value exceeds uint bitSize scope")
- }
- return bigIntForm, nil
-}
-
-// GetUfixed tries to retrieve an big integer number from an ABI Value.
-func (v Value) GetUfixed() (*big.Int, error) {
- if v.ABIType.abiTypeID != Ufixed {
- return nil, fmt.Errorf("value type mismatch, should be ufixed")
- }
- bigIntForm := v.value.(*big.Int)
- sizeThreshold := new(big.Int).Lsh(big.NewInt(1), uint(v.ABIType.bitSize))
- if sizeThreshold.Cmp(bigIntForm) <= 0 {
- return nil, fmt.Errorf("value exceeds ufixed bitSize scope")
- }
- return bigIntForm, nil
-}
-
-// GetString tries to retrieve a string from ABI Value.
-func (v Value) GetString() (string, error) {
- if v.ABIType.abiTypeID != String {
- return "", fmt.Errorf("value type mismatch, should be ufixed")
- }
- stringForm := v.value.(string)
- return stringForm, nil
-}
-
-// GetByte tries to retrieve a byte from ABI Value.
-func (v Value) GetByte() (byte, error) {
- if v.ABIType.abiTypeID != Byte {
- return byte(0), fmt.Errorf("value type mismatch, should be bytes")
- }
- bytesForm := v.value.(byte)
- return bytesForm, nil
-}
-
-// GetAddress tries to retrieve a [32]byte array from ABI Value.
-func (v Value) GetAddress() ([32]byte, error) {
- if v.ABIType.abiTypeID != Address {
- return [32]byte{}, fmt.Errorf("value type mismatch, should be address")
- }
- addressForm := v.value.([32]byte)
- return addressForm, nil
-}
-
-// GetValueByIndex retrieve value element by the index passed in
-func (v Value) GetValueByIndex(index uint16) (Value, error) {
- switch v.ABIType.abiTypeID {
- case ArrayDynamic:
- elements := v.value.([]Value)
- if len(elements) <= int(index) {
- return Value{}, fmt.Errorf("cannot get element: index out of scope")
- }
- return elements[index], nil
- case ArrayStatic, Tuple:
- elements := v.value.([]Value)
- if v.ABIType.staticLength <= index {
- return Value{}, fmt.Errorf("cannot get element: index out of scope")
- }
- return elements[index], nil
- default:
- return Value{}, fmt.Errorf("cannot get value by index for non array-like type")
- }
-}
-
-// GetBool tries to retrieve a boolean value from the ABI Value.
-func (v Value) GetBool() (bool, error) {
- if v.ABIType.abiTypeID != Bool {
- return false, fmt.Errorf("value type mismatch, should be bool")
- }
- boolForm := v.value.(bool)
- return boolForm, nil
-}
diff --git a/data/account/msgp_gen.go b/data/account/msgp_gen.go
new file mode 100644
index 000000000..8f6a96fd7
--- /dev/null
+++ b/data/account/msgp_gen.go
@@ -0,0 +1,238 @@
+package account
+
+// Code generated by github.com/algorand/msgp DO NOT EDIT.
+
+import (
+ "github.com/algorand/msgp/msgp"
+)
+
+// The following msgp objects are implemented in this file:
+// ParticipationKeyIdentity
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+
+// MarshalMsg implements msgp.Marshaler
+func (z *ParticipationKeyIdentity) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(6)
+ var zb0001Mask uint8 /* 7 bits */
+ if (*z).Parent.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).FirstValid.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).KeyDilution == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x8
+ }
+ if (*z).LastValid.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x10
+ }
+ if (*z).VoteID.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x20
+ }
+ if (*z).VRFSK.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x40
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "addr"
+ o = append(o, 0xa4, 0x61, 0x64, 0x64, 0x72)
+ o = (*z).Parent.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "fv"
+ o = append(o, 0xa2, 0x66, 0x76)
+ o = (*z).FirstValid.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
+ // string "kd"
+ o = append(o, 0xa2, 0x6b, 0x64)
+ o = msgp.AppendUint64(o, (*z).KeyDilution)
+ }
+ if (zb0001Mask & 0x10) == 0 { // if not empty
+ // string "lv"
+ o = append(o, 0xa2, 0x6c, 0x76)
+ o = (*z).LastValid.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x20) == 0 { // if not empty
+ // string "vote-id"
+ o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x2d, 0x69, 0x64)
+ o = (*z).VoteID.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x40) == 0 { // if not empty
+ // string "vrfsk"
+ o = append(o, 0xa5, 0x76, 0x72, 0x66, 0x73, 0x6b)
+ o = (*z).VRFSK.MarshalMsg(o)
+ }
+ }
+ return
+}
+
+func (_ *ParticipationKeyIdentity) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ParticipationKeyIdentity)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *ParticipationKeyIdentity) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Parent.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Parent")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).VRFSK.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VRFSK")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).VoteID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteID")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).FirstValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "FirstValid")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).LastValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "LastValid")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).KeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KeyDilution")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = ParticipationKeyIdentity{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "addr":
+ bts, err = (*z).Parent.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Parent")
+ return
+ }
+ case "vrfsk":
+ bts, err = (*z).VRFSK.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VRFSK")
+ return
+ }
+ case "vote-id":
+ bts, err = (*z).VoteID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteID")
+ return
+ }
+ case "fv":
+ bts, err = (*z).FirstValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "FirstValid")
+ return
+ }
+ case "lv":
+ bts, err = (*z).LastValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LastValid")
+ return
+ }
+ case "kd":
+ (*z).KeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KeyDilution")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *ParticipationKeyIdentity) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ParticipationKeyIdentity)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *ParticipationKeyIdentity) Msgsize() (s int) {
+ s = 1 + 5 + (*z).Parent.Msgsize() + 6 + (*z).VRFSK.Msgsize() + 8 + (*z).VoteID.Msgsize() + 3 + (*z).FirstValid.Msgsize() + 3 + (*z).LastValid.Msgsize() + 3 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *ParticipationKeyIdentity) MsgIsZero() bool {
+ return ((*z).Parent.MsgIsZero()) && ((*z).VRFSK.MsgIsZero()) && ((*z).VoteID.MsgIsZero()) && ((*z).FirstValid.MsgIsZero()) && ((*z).LastValid.MsgIsZero()) && ((*z).KeyDilution == 0)
+}
diff --git a/data/pooldata/msgp_gen_test.go b/data/account/msgp_gen_test.go
index 68ed008d0..a8927e790 100644
--- a/data/pooldata/msgp_gen_test.go
+++ b/data/account/msgp_gen_test.go
@@ -1,6 +1,6 @@
// +build !skip_msgp_testing
-package pooldata
+package account
// Code generated by github.com/algorand/msgp DO NOT EDIT.
@@ -12,9 +12,9 @@ import (
"github.com/algorand/msgp/msgp"
)
-func TestMarshalUnmarshalSignedTxnSlice(t *testing.T) {
+func TestMarshalUnmarshalParticipationKeyIdentity(t *testing.T) {
partitiontest.PartitionTest(t)
- v := SignedTxnSlice{}
+ v := ParticipationKeyIdentity{}
bts := v.MarshalMsg(nil)
left, err := v.UnmarshalMsg(bts)
if err != nil {
@@ -33,12 +33,12 @@ func TestMarshalUnmarshalSignedTxnSlice(t *testing.T) {
}
}
-func TestRandomizedEncodingSignedTxnSlice(t *testing.T) {
- protocol.RunEncodingTest(t, &SignedTxnSlice{})
+func TestRandomizedEncodingParticipationKeyIdentity(t *testing.T) {
+ protocol.RunEncodingTest(t, &ParticipationKeyIdentity{})
}
-func BenchmarkMarshalMsgSignedTxnSlice(b *testing.B) {
- v := SignedTxnSlice{}
+func BenchmarkMarshalMsgParticipationKeyIdentity(b *testing.B) {
+ v := ParticipationKeyIdentity{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -46,8 +46,8 @@ func BenchmarkMarshalMsgSignedTxnSlice(b *testing.B) {
}
}
-func BenchmarkAppendMsgSignedTxnSlice(b *testing.B) {
- v := SignedTxnSlice{}
+func BenchmarkAppendMsgParticipationKeyIdentity(b *testing.B) {
+ v := ParticipationKeyIdentity{}
bts := make([]byte, 0, v.Msgsize())
bts = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
@@ -58,8 +58,8 @@ func BenchmarkAppendMsgSignedTxnSlice(b *testing.B) {
}
}
-func BenchmarkUnmarshalSignedTxnSlice(b *testing.B) {
- v := SignedTxnSlice{}
+func BenchmarkUnmarshalParticipationKeyIdentity(b *testing.B) {
+ v := ParticipationKeyIdentity{}
bts := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
diff --git a/data/account/participation.go b/data/account/participation.go
index 269163c99..474f6ca5e 100644
--- a/data/account/participation.go
+++ b/data/account/participation.go
@@ -41,6 +41,7 @@ import (
// For correctness, all Roots should have no more than one Participation
// globally active at any time. If this condition is violated, the Root may
// equivocate. (Algorand tolerates a limited fraction of misbehaving accounts.)
+//msgp:ignore Participation
type Participation struct {
Parent basics.Address
@@ -56,9 +57,50 @@ type Participation struct {
KeyDilution uint64
}
+// ParticipationKeyIdentity is for msgpack encoding the participation data.
+type ParticipationKeyIdentity struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Parent basics.Address `codec:"addr"`
+ VRFSK crypto.VrfPrivkey `codec:"vrfsk"`
+ VoteID crypto.OneTimeSignatureVerifier `codec:"vote-id"`
+ FirstValid basics.Round `codec:"fv"`
+ LastValid basics.Round `codec:"lv"`
+ KeyDilution uint64 `codec:"kd"`
+}
+
+// ToBeHashed implements the Hashable interface.
+func (id *ParticipationKeyIdentity) ToBeHashed() (protocol.HashID, []byte) {
+ return protocol.ParticipationKeys, protocol.Encode(id)
+}
+
+// ID creates a ParticipationID hash from the identity file.
+func (id ParticipationKeyIdentity) ID() ParticipationID {
+ return ParticipationID(crypto.HashObj(&id))
+}
+
+// ID computes a ParticipationID.
+func (part Participation) ID() ParticipationID {
+ idData := ParticipationKeyIdentity{
+ Parent: part.Parent,
+ FirstValid: part.FirstValid,
+ LastValid: part.LastValid,
+ KeyDilution: part.KeyDilution,
+ }
+ if part.VRF != nil {
+ copy(idData.VRFSK[:], part.VRF.SK[:])
+ }
+ if part.Voting != nil {
+ copy(idData.VoteID[:], part.Voting.OneTimeSignatureVerifier[:])
+ }
+
+ return idData.ID()
+}
+
// PersistedParticipation encapsulates the static state of the participation
// for a single address at any given moment, while providing the ability
// to handle persistence and deletion of secrets.
+//msgp:ignore PersistedParticipation
type PersistedParticipation struct {
Participation
@@ -164,7 +206,7 @@ func (part PersistedParticipation) PersistNewParent() error {
// FillDBWithParticipationKeys initializes the passed database with participation keys
func FillDBWithParticipationKeys(store db.Accessor, address basics.Address, firstValid, lastValid basics.Round, keyDilution uint64) (part PersistedParticipation, err error) {
if lastValid < firstValid {
- err = fmt.Errorf("FillDBWithParticipationKeys: lastValid %d is after firstValid %d", lastValid, firstValid)
+ err = fmt.Errorf("FillDBWithParticipationKeys: firstValid %d is after lastValid %d", firstValid, lastValid)
return
}
diff --git a/data/account/participationRegistry.go b/data/account/participationRegistry.go
new file mode 100644
index 000000000..213e2be50
--- /dev/null
+++ b/data/account/participationRegistry.go
@@ -0,0 +1,953 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package account
+
+import (
+ "context"
+ "database/sql"
+ "encoding/base32"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/algorand/go-deadlock"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/db"
+)
+
+const defaultTimeout = 5 * time.Second
+
+// ParticipationID identifies a particular set of participation keys.
+//msgp:ignore ParticipationID
+type ParticipationID crypto.Digest
+
+// IsZero returns true if the ParticipationID is all zero bytes.
+func (pid ParticipationID) IsZero() bool {
+ return (crypto.Digest(pid)).IsZero()
+}
+
+// String prints a b32 version of this ID.
+func (pid ParticipationID) String() string {
+ return base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(pid[:])
+}
+
+// ParseParticipationID takes a string and returns a ParticipationID object
+func ParseParticipationID(str string) (d ParticipationID, err error) {
+ decoded, err := base32.StdEncoding.WithPadding(base32.NoPadding).DecodeString(str)
+ if err != nil {
+ return d, err
+ }
+ if len(decoded) != len(d) {
+ return d, fmt.Errorf(`attempted to decode a string which was not a participation id: "%s"`, str)
+ }
+ copy(d[:], decoded[:])
+ return d, err
+}
+
+// ParticipationRecord contains all metadata relating to a set of participation keys.
+type ParticipationRecord struct {
+ ParticipationID ParticipationID
+
+ Account basics.Address
+ FirstValid basics.Round
+ LastValid basics.Round
+ KeyDilution uint64
+
+ LastVote basics.Round
+ LastBlockProposal basics.Round
+ LastStateProof basics.Round
+ EffectiveFirst basics.Round
+ EffectiveLast basics.Round
+
+ VRF *crypto.VRFSecrets
+ Voting *crypto.OneTimeSignatureSecrets
+}
+
+var zeroParticipationRecord = ParticipationRecord{}
+
+// IsZero returns true if the object contains zero values.
+func (r ParticipationRecord) IsZero() bool {
+ return r == zeroParticipationRecord
+}
+
+// Duplicate creates a copy of the current object. This is required once secrets are stored.
+func (r ParticipationRecord) Duplicate() ParticipationRecord {
+ var vrf crypto.VRFSecrets
+ if r.VRF != nil {
+ copy(vrf.SK[:], r.VRF.SK[:])
+ copy(vrf.PK[:], r.VRF.PK[:])
+ }
+
+ var voting crypto.OneTimeSignatureSecrets
+ if r.Voting != nil {
+ voting = r.Voting.Snapshot()
+ }
+ return ParticipationRecord{
+ ParticipationID: r.ParticipationID,
+ Account: r.Account,
+ FirstValid: r.FirstValid,
+ LastValid: r.LastValid,
+ KeyDilution: r.KeyDilution,
+ LastVote: r.LastVote,
+ LastBlockProposal: r.LastBlockProposal,
+ LastStateProof: r.LastStateProof,
+ EffectiveFirst: r.EffectiveFirst,
+ EffectiveLast: r.EffectiveLast,
+ VRF: &vrf,
+ Voting: &voting,
+ }
+}
+
+// ParticipationAction is used when recording participation actions.
+//msgp:ignore ParticipationAction
+type ParticipationAction int
+
+// ParticipationAction types
+const (
+ Vote ParticipationAction = iota
+ BlockProposal
+ StateProof
+)
+
+// ErrParticipationIDNotFound is used when attempting to update a set of keys which do not exist.
+var ErrParticipationIDNotFound = errors.New("the participation ID was not found")
+
+// ErrInvalidRegisterRange is used when attempting to register a participation key on a round that is out of range.
+var ErrInvalidRegisterRange = errors.New("key would not be active within range")
+
+// ErrUnknownParticipationAction is used when record is given something other than the known actions.
+var ErrUnknownParticipationAction = errors.New("unknown participation action")
+
+// ErrAlreadyInserted is used when inserting a key which already exists in the registry.
+var ErrAlreadyInserted = errors.New("these participation keys are already inserted")
+
+// ErrActiveKeyNotFound is used when attempting to update an account with no active key
+var ErrActiveKeyNotFound = errors.New("no active participation key found for account")
+
+// ErrMultipleValidKeys is used when recording a result but multiple valid keys were found. This should not be possible.
+var ErrMultipleValidKeys = errors.New("multiple valid keys found while recording key usage")
+
+// ErrMultipleKeysForID this should never happen. Multiple keys with the same participationID
+var ErrMultipleKeysForID = errors.New("multiple valid keys found for the same participationID")
+
+// ErrNoKeyForID there may be cases where a key is deleted and used at the same time, so this error should be handled.
+var ErrNoKeyForID = errors.New("no valid key found for the participationID")
+
+// ParticipationRegistry contain all functions for interacting with the Participation Registry.
+type ParticipationRegistry interface {
+ // Insert adds a record to storage and computes the ParticipationID
+ Insert(record Participation) (ParticipationID, error)
+
+ // Delete removes a record from storage.
+ Delete(id ParticipationID) error
+
+ // DeleteExpired removes all records from storage which are expired on the given round.
+ DeleteExpired(round basics.Round) error
+
+ // Get a participation record.
+ Get(id ParticipationID) ParticipationRecord
+
+ // GetAll of the participation records.
+ GetAll() []ParticipationRecord
+
+ // Register updates the EffectiveFirst and EffectiveLast fields. If there are multiple records for the account
+ // then it is possible for multiple records to be updated.
+ Register(id ParticipationID, on basics.Round) error
+
+ // Record sets the Last* field for the active ParticipationID for the given account.
+ Record(account basics.Address, round basics.Round, participationType ParticipationAction) error
+
+ // Flush ensures that all changes have been written to the underlying data store.
+ Flush(timeout time.Duration) error
+
+ // Close any resources used to implement the interface.
+ Close()
+}
+
+// MakeParticipationRegistry creates a db.Accessor backed ParticipationRegistry.
+func MakeParticipationRegistry(accessor db.Pair, log logging.Logger) (ParticipationRegistry, error) {
+ return makeParticipationRegistry(accessor, log)
+}
+
+// makeParticipationRegistry creates a db.Accessor backed ParticipationRegistry.
+func makeParticipationRegistry(accessor db.Pair, log logging.Logger) (*participationDB, error) {
+ if log == nil {
+ return nil, errors.New("invalid logger provided")
+ }
+
+ migrations := []db.Migration{
+ dbSchemaUpgrade0,
+ }
+
+ err := db.Initialize(accessor.Wdb, migrations)
+ if err != nil {
+ accessor.Close()
+ return nil, fmt.Errorf("unable to initialize participation registry database: %w", err)
+ }
+
+ registry := &participationDB{
+ log: log,
+ store: accessor,
+ writeQueue: make(chan partDBWriteRecord, 10),
+ writeQueueDone: make(chan struct{}),
+ flushTimeout: defaultTimeout,
+ }
+ go registry.writeThread()
+
+ err = registry.initializeCache()
+ if err != nil {
+ registry.Close()
+ return nil, fmt.Errorf("unable to initialize participation registry cache: %w", err)
+ }
+
+ return registry, nil
+}
+
+// Queries
+const (
+ createKeysets = `CREATE TABLE Keysets (
+ pk INTEGER PRIMARY KEY NOT NULL,
+
+ participationID BLOB NOT NULL,
+ account BLOB NOT NULL,
+
+ firstValidRound INTEGER NOT NULL,
+ lastValidRound INTEGER NOT NULL,
+ keyDilution INTEGER NOT NULL,
+
+ vrf BLOB, --* msgpack encoding of ParticipationAccount.vrf
+ stateProof BLOB --* msgpack encoding of ParticipationAccount.BlockProof
+ )`
+
+ createRolling = `CREATE TABLE Rolling (
+ pk INTEGER PRIMARY KEY NOT NULL,
+
+ lastVoteRound INTEGER,
+ lastBlockProposalRound INTEGER,
+ lastStateProofRound INTEGER,
+ effectiveFirstRound INTEGER,
+ effectiveLastRound INTEGER,
+
+ voting BLOB --* msgpack encoding of ParticipationAccount.voting
+ )`
+
+ createStateProof = `CREATE TABLE StateProofKeys (
+ pk INTEGER NOT NULL, --* join with keyset to find key for a particular participation id
+ round INTEGER NOT NULL, --* committed round for this key
+ key BLOB NOT NULL, --* msgpack encoding of ParticipationAccount.BlockProof.SignatureAlgorithm
+ PRIMARY KEY (pk, round)
+ )`
+ insertKeysetQuery = `INSERT INTO Keysets (participationID, account, firstValidRound, lastValidRound, keyDilution, vrf) VALUES (?, ?, ?, ?, ?, ?)`
+ insertRollingQuery = `INSERT INTO Rolling (pk, voting) VALUES (?, ?)`
+
+ // SELECT pk FROM Keysets WHERE participationID = ?
+ selectPK = `SELECT pk FROM Keysets WHERE participationID = ? LIMIT 1`
+ selectLastPK = `SELECT pk FROM Keysets ORDER BY pk DESC LIMIT 1`
+ selectRecords = `SELECT
+ k.participationID, k.account, k.firstValidRound,
+ k.lastValidRound, k.keyDilution, k.vrf,
+ r.lastVoteRound, r.lastBlockProposalRound, r.lastStateProofRound,
+ r.effectiveFirstRound, r.effectiveLastRound, r.voting
+ FROM Keysets k
+ INNER JOIN Rolling r
+ ON k.pk = r.pk`
+ deleteKeysets = `DELETE FROM Keysets WHERE pk=?`
+ deleteRolling = `DELETE FROM Rolling WHERE pk=?`
+ updateRollingFieldsSQL = `UPDATE Rolling
+ SET lastVoteRound=?,
+ lastBlockProposalRound=?,
+ lastStateProofRound=?,
+ effectiveFirstRound=?,
+ effectiveLastRound=?
+ WHERE pk IN (SELECT pk FROM Keysets WHERE participationID=?)`
+)
+
+// dbSchemaUpgrade0 initialize the tables.
+func dbSchemaUpgrade0(ctx context.Context, tx *sql.Tx, newDatabase bool) error {
+ // Keysets is for the immutable data.
+ _, err := tx.Exec(createKeysets)
+ if err != nil {
+ return err
+ }
+
+ // Rolling may change over time.
+ _, err = tx.Exec(createRolling)
+ if err != nil {
+ return err
+ }
+
+ // For performance reasons, state proofs are in a separate table.
+ _, err = tx.Exec(createStateProof)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// participationDB provides a concrete implementation of the ParticipationRegistry interface.
+type participationDB struct {
+ cache map[ParticipationID]ParticipationRecord
+
+ // dirty marked on Record(), cleared on Register(), Delete(), Flush()
+ dirty map[ParticipationID]struct{}
+
+ log logging.Logger
+ store db.Pair
+ mutex deadlock.RWMutex
+
+ writeQueue chan partDBWriteRecord
+ writeQueueDone chan struct{}
+
+ flushTimeout time.Duration
+}
+
+type updatingParticipationRecord struct {
+ ParticipationRecord
+
+ required bool
+}
+
+// partDBWriteRecord event object sent to the writeThread to facilitate async
+// database writes. Only one set of event fields should be set at a time.
+type partDBWriteRecord struct {
+ insertID ParticipationID
+ insert Participation
+
+ registerUpdated map[ParticipationID]updatingParticipationRecord
+
+ delete ParticipationID
+
+ flushResultChannel chan error
+}
+
+func (db *participationDB) initializeCache() error {
+ db.mutex.Lock()
+ defer db.mutex.Unlock()
+
+ records, err := db.getAllFromDB()
+ if err != nil {
+ return err
+ }
+
+ cache := make(map[ParticipationID]ParticipationRecord)
+ for _, record := range records {
+ // Check if it already exists
+ if _, ok := cache[record.ParticipationID]; ok {
+ return ErrMultipleKeysForID
+ }
+ cache[record.ParticipationID] = record
+ }
+
+ db.cache = cache
+ db.dirty = make(map[ParticipationID]struct{})
+ return nil
+}
+
+func (db *participationDB) writeThread() {
+ defer close(db.writeQueueDone)
+ var err error
+ var lastErr error
+ for {
+ var wr partDBWriteRecord
+ var chanOk bool
+
+ // blocking read until next activity or close
+ wr, chanOk = <-db.writeQueue
+ if !chanOk {
+ return // chan closed
+ }
+
+ if len(wr.registerUpdated) != 0 {
+ err = db.registerInner(wr.registerUpdated)
+ } else if !wr.insertID.IsZero() {
+ err = db.insertInner(wr.insert, wr.insertID)
+ } else if !wr.delete.IsZero() {
+ err = db.deleteInner(wr.delete)
+ } else if wr.flushResultChannel != nil {
+ err = db.flushInner()
+ }
+ if err != nil {
+ lastErr = err
+ }
+
+ if wr.flushResultChannel != nil {
+ wr.flushResultChannel <- lastErr
+ lastErr = nil
+ }
+ }
+}
+
+// verifyExecWithOneRowEffected checks for a successful Exec and also verifies exactly 1 row was affected
+func verifyExecWithOneRowEffected(err error, result sql.Result, operationName string) error {
+ if err != nil {
+ return fmt.Errorf("unable to execute %s: %w", operationName, err)
+ }
+ rows, err := result.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("unable to get %s rows affected: %w", operationName, err)
+ }
+ if rows != 1 {
+ return fmt.Errorf("unexpected number of %s rows affected, expected 1 found %d", operationName, rows)
+ }
+ return nil
+}
+
+func (db *participationDB) insertInner(record Participation, id ParticipationID) (err error) {
+
+ var rawVRF []byte
+ var rawVoting []byte
+
+ if record.VRF != nil {
+ rawVRF = protocol.Encode(record.VRF)
+ }
+ if record.Voting != nil {
+ voting := record.Voting.Snapshot()
+ rawVoting = protocol.Encode(&voting)
+ }
+
+ err = db.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ result, err := tx.Exec(
+ insertKeysetQuery,
+ id[:],
+ record.Parent[:],
+ record.FirstValid,
+ record.LastValid,
+ record.KeyDilution,
+ rawVRF)
+ if err := verifyExecWithOneRowEffected(err, result, "insert keyset"); err != nil {
+ return err
+ }
+ pk, err := result.LastInsertId()
+ if err != nil {
+ return fmt.Errorf("unable to get pk from keyset: %w", err)
+ }
+
+ // Create Rolling entry
+ result, err = tx.Exec(insertRollingQuery, pk, rawVoting)
+ if err := verifyExecWithOneRowEffected(err, result, "insert rolling"); err != nil {
+ return err
+ }
+
+ return nil
+ })
+ return err
+}
+
+func (db *participationDB) registerInner(updated map[ParticipationID]updatingParticipationRecord) error {
+ var cacheDeletes []ParticipationID
+ err := db.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ // Disable active key if there is one
+ for id, record := range updated {
+ err := updateRollingFields(ctx, tx, record.ParticipationRecord)
+ // Repair the case when no keys were updated
+ if err == ErrNoKeyForID {
+ db.log.Warn("participationDB unable to update key in cache. Removing from cache.")
+ cacheDeletes = append(cacheDeletes, id)
+ if !record.required {
+ err = nil
+ }
+ }
+ if err != nil {
+ return fmt.Errorf("unable to disable old key when registering %s: %w", id, err)
+ }
+ }
+ return nil
+ })
+
+ // Update cache
+ if err == nil && len(cacheDeletes) != 0 {
+ db.mutex.Lock()
+ defer db.mutex.Unlock()
+ for _, id := range cacheDeletes {
+ delete(db.cache, id)
+ delete(db.dirty, id)
+ }
+ }
+ return err
+}
+
+func (db *participationDB) deleteInner(id ParticipationID) error {
+ err := db.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ // Fetch primary key
+ var pk int
+ row := tx.QueryRow(selectPK, id[:])
+ err := row.Scan(&pk)
+ if err == sql.ErrNoRows {
+ // nothing to do.
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("unable to scan pk: %w", err)
+ }
+
+ // Delete rows
+ result, err := tx.Exec(deleteKeysets, pk)
+ if err := verifyExecWithOneRowEffected(err, result, "delete keyset"); err != nil {
+ return err
+ }
+
+ result, err = tx.Exec(deleteRolling, pk)
+ if err := verifyExecWithOneRowEffected(err, result, "delete rolling"); err != nil {
+ return err
+ }
+
+ return nil
+ })
+ return err
+}
+
+func (db *participationDB) flushInner() error {
+ var dirty map[ParticipationID]struct{}
+ db.mutex.Lock()
+ if len(db.dirty) != 0 {
+ dirty = db.dirty
+ db.dirty = make(map[ParticipationID]struct{})
+ } else {
+ dirty = nil
+ }
+
+ var needsUpdate []ParticipationRecord
+ // Verify that the dirty flag has not desynchronized from the cache.
+ for id := range dirty {
+ if rec, ok := db.cache[id]; !ok {
+ db.log.Warnf("participationDB fixing dirty flag de-synchronization for %s", id)
+ delete(db.cache, id)
+ } else {
+ needsUpdate = append(needsUpdate, rec)
+ }
+ }
+ db.mutex.Unlock()
+
+ if dirty == nil {
+ return nil
+ }
+
+ err := db.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ var errorStr strings.Builder
+ for _, record := range needsUpdate {
+ err := updateRollingFields(ctx, tx, record)
+ // This should only be updating key usage so ignoring missing keys is not a problem.
+ if err != nil && err != ErrNoKeyForID {
+ if errorStr.Len() > 0 {
+ errorStr.WriteString(", ")
+ }
+ errorStr.WriteString(err.Error())
+ }
+ }
+ if errorStr.Len() > 0 {
+ return errors.New(errorStr.String())
+ }
+ return nil
+ })
+
+ if err != nil {
+ // put back what we didn't finish with
+ db.mutex.Lock()
+ for id, v := range dirty {
+ db.dirty[id] = v
+ }
+ db.mutex.Unlock()
+ }
+
+ return err
+}
+
+func (db *participationDB) Insert(record Participation) (id ParticipationID, err error) {
+ db.mutex.Lock()
+ defer db.mutex.Unlock()
+
+ id = record.ID()
+ if _, ok := db.cache[id]; ok {
+ return id, ErrAlreadyInserted
+ }
+
+ db.writeQueue <- partDBWriteRecord{
+ insertID: id,
+ insert: record,
+ }
+
+ // Make some copies.
+ var vrf *crypto.VRFSecrets
+ if record.VRF != nil {
+ vrf = new(crypto.VRFSecrets)
+ copy(vrf.SK[:], record.VRF.SK[:])
+ copy(vrf.PK[:], record.VRF.PK[:])
+ }
+
+ var voting *crypto.OneTimeSignatureSecrets
+ if record.Voting != nil {
+ voting = new(crypto.OneTimeSignatureSecrets)
+ *voting = record.Voting.Snapshot()
+ }
+
+ // update cache.
+ db.cache[id] = ParticipationRecord{
+ ParticipationID: id,
+ Account: record.Address(),
+ FirstValid: record.FirstValid,
+ LastValid: record.LastValid,
+ KeyDilution: record.KeyDilution,
+ LastVote: 0,
+ LastBlockProposal: 0,
+ LastStateProof: 0,
+ EffectiveFirst: 0,
+ EffectiveLast: 0,
+ Voting: voting,
+ VRF: vrf,
+ }
+
+ return
+}
+
+func (db *participationDB) Delete(id ParticipationID) error {
+ db.mutex.Lock()
+ defer db.mutex.Unlock()
+
+ // NoOp if key does not exist.
+ if _, ok := db.cache[id]; !ok {
+ return nil
+ }
+ delete(db.dirty, id)
+ delete(db.cache, id)
+ // do the db part async
+ db.writeQueue <- partDBWriteRecord{
+ delete: id,
+ }
+ return nil
+}
+
+func (db *participationDB) DeleteExpired(round basics.Round) error {
+ // This could be optimized to delete everything with one query.
+ for _, v := range db.GetAll() {
+ if v.LastValid < round {
+ err := db.Delete(v.ParticipationID)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// scanRecords is a helper to manage scanning participation records.
+func scanRecords(rows *sql.Rows) ([]ParticipationRecord, error) {
+ results := make([]ParticipationRecord, 0)
+ for rows.Next() {
+ var record ParticipationRecord
+ var rawParticipation []byte
+ var rawAccount []byte
+ var rawVRF []byte
+ var rawVoting []byte
+
+ var lastVote sql.NullInt64
+ var lastBlockProposal sql.NullInt64
+ var lastCompactCertificate sql.NullInt64
+ var effectiveFirst sql.NullInt64
+ var effectiveLast sql.NullInt64
+
+ err := rows.Scan(
+ &rawParticipation,
+ &rawAccount,
+ &record.FirstValid,
+ &record.LastValid,
+ &record.KeyDilution,
+ &rawVRF,
+ &lastVote,
+ &lastBlockProposal,
+ &lastCompactCertificate,
+ &effectiveFirst,
+ &effectiveLast,
+ &rawVoting,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ copy(record.ParticipationID[:], rawParticipation)
+ copy(record.Account[:], rawAccount)
+
+ if len(rawVRF) > 0 {
+ record.VRF = &crypto.VRFSecrets{}
+ err = protocol.Decode(rawVRF, record.VRF)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode VRF: %w", err)
+ }
+ }
+
+ if len(rawVoting) > 0 {
+ record.Voting = &crypto.OneTimeSignatureSecrets{}
+ err = protocol.Decode(rawVoting, record.Voting)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode Voting: %w", err)
+ }
+ }
+
+ // Check optional values.
+ if lastVote.Valid {
+ record.LastVote = basics.Round(lastVote.Int64)
+ }
+
+ if lastBlockProposal.Valid {
+ record.LastBlockProposal = basics.Round(lastBlockProposal.Int64)
+ }
+
+ if lastCompactCertificate.Valid {
+ record.LastStateProof = basics.Round(lastCompactCertificate.Int64)
+ }
+
+ if effectiveFirst.Valid {
+ record.EffectiveFirst = basics.Round(effectiveFirst.Int64)
+ }
+
+ if effectiveLast.Valid {
+ record.EffectiveLast = basics.Round(effectiveLast.Int64)
+ }
+
+ results = append(results, record)
+ }
+
+ return results, nil
+}
+
+func (db *participationDB) getAllFromDB() (records []ParticipationRecord, err error) {
+ err = db.store.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ rows, err := tx.Query(selectRecords)
+ if err != nil {
+ return fmt.Errorf("unable to query records: %w", err)
+ }
+
+ records, err = scanRecords(rows)
+ if err != nil {
+ records = nil
+ return fmt.Errorf("problem scanning records: %w", err)
+ }
+
+ return nil
+ })
+
+ return
+}
+
+func (db *participationDB) Get(id ParticipationID) ParticipationRecord {
+ db.mutex.RLock()
+ defer db.mutex.RUnlock()
+
+ record, ok := db.cache[id]
+ if !ok {
+ return ParticipationRecord{}
+ }
+ return record.Duplicate()
+}
+
+func (db *participationDB) GetAll() []ParticipationRecord {
+ db.mutex.RLock()
+ defer db.mutex.RUnlock()
+
+ results := make([]ParticipationRecord, 0, len(db.cache))
+ for _, record := range db.cache {
+ results = append(results, record.Duplicate())
+ }
+ return results
+}
+
+// updateRollingFields sets all of the rolling fields according to the record object.
+func updateRollingFields(ctx context.Context, tx *sql.Tx, record ParticipationRecord) error {
+ result, err := tx.ExecContext(ctx, updateRollingFieldsSQL,
+ record.LastVote,
+ record.LastBlockProposal,
+ record.LastStateProof,
+ record.EffectiveFirst,
+ record.EffectiveLast,
+ record.ParticipationID[:])
+ if err != nil {
+ return err
+ }
+
+ numRows, err := result.RowsAffected()
+ if err != nil {
+ return err
+ }
+
+ if numRows > 1 {
+ return ErrMultipleKeysForID
+ }
+
+ if numRows < 1 {
+ return ErrNoKeyForID
+ }
+
+ return nil
+}
+
+func recordActive(record ParticipationRecord, on basics.Round) bool {
+ return record.EffectiveLast != 0 && record.EffectiveFirst <= on && on <= record.EffectiveLast
+}
+
+// PKI TODO: Register needs a bit more work to make sure EffectiveFirst and
+// EffectiveLast are set at the right time. Specifically, the node
+// doesn't call Register until the key becomes active and is about
+// to be used, so effective first/last is updated just-in-time. It
+// would be better to update them when the KeyRegistration occurs.
+func (db *participationDB) Register(id ParticipationID, on basics.Round) error {
+ // Lookup recordToRegister for first/last valid and account.
+ recordToRegister := db.Get(id)
+ if recordToRegister.IsZero() {
+ return ErrParticipationIDNotFound
+ }
+
+ // No-op If the record is already active
+ if recordActive(recordToRegister, on) {
+ return nil
+ }
+
+ // round out of valid range.
+ if on < recordToRegister.FirstValid || on > recordToRegister.LastValid {
+ return ErrInvalidRegisterRange
+ }
+
+ var toUpdate []ParticipationRecord
+ db.mutex.Lock()
+ for _, record := range db.cache {
+ if record.Account == recordToRegister.Account && record.ParticipationID != id && recordActive(record, on) {
+ toUpdate = append(toUpdate, record)
+ }
+ }
+ db.mutex.Unlock()
+
+ updated := make(map[ParticipationID]updatingParticipationRecord)
+
+ // Disable active key if there is one
+ for _, record := range toUpdate {
+ record.EffectiveLast = on - 1
+ updated[record.ParticipationID] = updatingParticipationRecord{
+ record.Duplicate(),
+ false,
+ }
+ }
+ // Mark registered.
+ recordToRegister.EffectiveFirst = on
+ recordToRegister.EffectiveLast = recordToRegister.LastValid
+ updated[recordToRegister.ParticipationID] = updatingParticipationRecord{
+ recordToRegister,
+ true,
+ }
+
+ if len(updated) != 0 {
+ db.writeQueue <- partDBWriteRecord{
+ registerUpdated: updated,
+ }
+ db.mutex.Lock()
+ for id, record := range updated {
+ delete(db.dirty, id)
+ db.cache[id] = record.ParticipationRecord
+ }
+ db.mutex.Unlock()
+ }
+
+ db.log.Infof("Registered key (%s) for account (%s) first valid (%d) last valid (%d)\n",
+ id, recordToRegister.Account, recordToRegister.FirstValid, recordToRegister.LastValid)
+ return nil
+}
+
+func (db *participationDB) Record(account basics.Address, round basics.Round, participationAction ParticipationAction) error {
+ db.mutex.Lock()
+ defer db.mutex.Unlock()
+
+ matches := make([]ParticipationRecord, 0, 1)
+
+ // At most one id should be updated, exit with error if a second is found.
+ for _, record := range db.cache {
+ if record.Account == account && recordActive(record, round) {
+ if len(matches) != 0 {
+ // This probably means there is a bug in the key participation registry Register implementation.
+ return ErrMultipleValidKeys
+ }
+ matches = append(matches, record)
+ }
+ }
+
+ if len(matches) == 0 {
+ // This indicates the participation registry is not synchronized with agreement.
+ return ErrActiveKeyNotFound
+ }
+
+ record := matches[0]
+ // Good case, one key found.
+ switch participationAction {
+ case Vote:
+ record.LastVote = round
+ case BlockProposal:
+ record.LastBlockProposal = round
+ case StateProof:
+ record.LastStateProof = round
+ default:
+ return ErrUnknownParticipationAction
+ }
+
+ db.dirty[record.ParticipationID] = struct{}{}
+ db.cache[record.ParticipationID] = record
+ return nil
+}
+
+// Flush waits until all enqueued asynchronous IO has completed.
+// Waiting for all asynchronous IO to complete includes actions from other threads.
+// Flush waits for the participation registry to be idle.
+// Flush returns the latest error generated by async IO, if any.
+func (db *participationDB) Flush(timeout time.Duration) error {
+ resultCh := make(chan error, 1)
+ timeoutCh := time.After(timeout)
+ writeRecord := partDBWriteRecord{
+ flushResultChannel: resultCh,
+ }
+
+ select {
+ case db.writeQueue <- writeRecord:
+ case <-timeoutCh:
+ return fmt.Errorf("timeout while requesting flush, check results manually")
+ }
+
+ select {
+ case err := <-resultCh:
+ return err
+ case <-timeoutCh:
+ return fmt.Errorf("timeout while flushing changes, check results manually")
+ }
+}
+
+// Close attempts to flush with db.flushTimeout, then waits for the write queue for another db.flushTimeout.
+func (db *participationDB) Close() {
+ if err := db.Flush(db.flushTimeout); err != nil {
+ db.log.Warnf("participationDB unhandled error during Close/Flush: %w", err)
+ }
+
+ db.store.Close()
+ close(db.writeQueue)
+
+ // Wait for write queue to close.
+ select {
+ case <-db.writeQueueDone:
+ return
+ case <-time.After(db.flushTimeout):
+ db.log.Warnf("Close(): timeout while waiting for WriteQueue to finish.")
+ }
+}
diff --git a/data/account/participationRegistry_test.go b/data/account/participationRegistry_test.go
new file mode 100644
index 000000000..d000f16cb
--- /dev/null
+++ b/data/account/participationRegistry_test.go
@@ -0,0 +1,769 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package account
+
+import (
+ "context"
+ "database/sql"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/db"
+)
+
+func getRegistry(t *testing.T) *participationDB {
+ rootDB, err := db.OpenPair(t.Name(), true)
+ require.NoError(t, err)
+
+ registry, err := makeParticipationRegistry(rootDB, logging.TestingLog(t))
+ require.NoError(t, err)
+ require.NotNil(t, registry)
+
+ return registry
+}
+
+func assertParticipation(t *testing.T, p Participation, pr ParticipationRecord) {
+ require.Equal(t, p.FirstValid, pr.FirstValid)
+ require.Equal(t, p.LastValid, pr.LastValid)
+ require.Equal(t, p.KeyDilution, pr.KeyDilution)
+ require.Equal(t, p.Parent, pr.Account)
+}
+
+func makeTestParticipation(addrID int, first, last basics.Round, dilution uint64) Participation {
+ p := Participation{
+ FirstValid: first,
+ LastValid: last,
+ KeyDilution: dilution,
+ Voting: &crypto.OneTimeSignatureSecrets{},
+ VRF: &crypto.VRFSecrets{},
+ }
+ binary.LittleEndian.PutUint32(p.Parent[:], uint32(addrID))
+ return p
+}
+
+func registryCloseTest(t *testing.T, registry *participationDB) {
+ start := time.Now()
+ registry.Close()
+ duration := time.Since(start)
+ assert.Less(t, uint64(duration), uint64(defaultTimeout))
+}
+
+// Insert participation records and make sure they can be fetched.
+func TestParticipation_InsertGet(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ p := makeTestParticipation(1, 1, 2, 3)
+ p2 := makeTestParticipation(2, 4, 5, 6)
+
+ insertAndVerify := func(part Participation) {
+ id, err := registry.Insert(part)
+ a.NoError(err)
+ a.Equal(part.ID(), id)
+
+ record := registry.Get(part.ID())
+ a.False(record.IsZero())
+ assertParticipation(t, part, record)
+ }
+
+ // Verify inserting some records.
+ insertAndVerify(p)
+ insertAndVerify(p2)
+
+ // Data should be available immediately
+ results := registry.GetAll()
+ a.Len(results, 2)
+ for _, record := range results {
+ if record.Account == p.Parent {
+ assertParticipation(t, p, record)
+ } else if record.Account == p2.Parent {
+ assertParticipation(t, p2, record)
+ } else {
+ a.Fail("unexpected account")
+ }
+ }
+
+ // Check that Flush works, re-initialize cache and verify GetAll.
+ a.NoError(registry.Flush(defaultTimeout))
+ a.NoError(registry.initializeCache())
+ results = registry.GetAll()
+ a.Len(results, 2)
+ for _, record := range results {
+ if record.Account == p.Parent {
+ assertParticipation(t, p, record)
+ } else if record.Account == p2.Parent {
+ assertParticipation(t, p2, record)
+ } else {
+ a.Fail("unexpected account")
+ }
+ }
+}
+
+// Make sure a record can be deleted by id.
+func TestParticipation_Delete(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ p := makeTestParticipation(1, 1, 2, 3)
+ p2 := makeTestParticipation(2, 4, 5, 6)
+
+ id, err := registry.Insert(p)
+ a.NoError(err)
+ a.Equal(p.ID(), id)
+
+ id, err = registry.Insert(p2)
+ a.NoError(err)
+ a.Equal(p2.ID(), id)
+
+ err = registry.Delete(p.ID())
+ a.NoError(err)
+
+ results := registry.GetAll()
+ a.Len(results, 1)
+ assertParticipation(t, p2, results[0])
+
+ // Check that result was persisted.
+ a.NoError(registry.Flush(defaultTimeout))
+ a.NoError(registry.initializeCache())
+ results = registry.GetAll()
+ a.Len(results, 1)
+ assertParticipation(t, p2, results[0])
+}
+
+func TestParticipation_DeleteExpired(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ for i := 10; i < 20; i++ {
+ p := makeTestParticipation(i, 1, basics.Round(i), 1)
+ id, err := registry.Insert(p)
+ a.NoError(err)
+ a.Equal(p.ID(), id)
+ }
+
+ err := registry.DeleteExpired(15)
+ a.NoError(err)
+
+ a.Len(registry.GetAll(), 5, "The first 5 should be deleted.")
+
+ // Check persisting. Verify by re-initializing the cache.
+ a.NoError(registry.Flush(defaultTimeout))
+ a.NoError(registry.initializeCache())
+ a.Len(registry.GetAll(), 5, "The first 5 should be deleted.")
+}
+
+// Make sure the register function properly sets effective first/last for all effected records.
+func TestParticipation_Register(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ // Overlapping keys.
+ p := makeTestParticipation(1, 250000, 3000000, 1)
+ p2 := makeTestParticipation(1, 200000, 4000000, 2)
+
+ id, err := registry.Insert(p)
+ a.NoError(err)
+ a.Equal(p.ID(), id)
+
+ id, err = registry.Insert(p2)
+ a.NoError(err)
+ a.Equal(p2.ID(), id)
+
+ verifyEffectiveRound := func(id ParticipationID, first, last int) {
+ record := registry.Get(id)
+ a.False(record.IsZero())
+ require.Equal(t, first, int(record.EffectiveFirst))
+ require.Equal(t, last, int(record.EffectiveLast))
+ }
+
+ // Register the first key.
+ err = registry.Register(p.ID(), 500000)
+ a.NoError(err)
+ verifyEffectiveRound(p.ID(), 500000, int(p.LastValid))
+
+ // Register second key.
+ err = registry.Register(p2.ID(), 2500000)
+ a.NoError(err)
+ verifyEffectiveRound(p.ID(), 500000, 2499999)
+ verifyEffectiveRound(p2.ID(), 2500000, int(p2.LastValid))
+}
+
+// Test error when registering a non-existing participation ID.
+func TestParticipation_RegisterInvalidID(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ p := makeTestParticipation(0, 250000, 3000000, 1)
+
+ err := registry.Register(p.ID(), 10000000)
+ a.EqualError(err, ErrParticipationIDNotFound.Error())
+}
+
+// Test error attempting to register a key with an invalid range.
+func TestParticipation_RegisterInvalidRange(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ p := makeTestParticipation(0, 250000, 3000000, 1)
+
+ id, err := registry.Insert(p)
+ a.NoError(err)
+ a.Equal(p.ID(), id)
+
+ // Register the first key.
+ err = registry.Register(p.ID(), 1000000000)
+ a.EqualError(err, ErrInvalidRegisterRange.Error())
+}
+
+// Test the recording function.
+func TestParticipation_Record(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ // Setup p
+ p := makeTestParticipation(1, 0, 3000000, 1)
+ // Setup some other keys to make sure they are not updated.
+ p2 := makeTestParticipation(2, 0, 3000000, 1)
+ p3 := makeTestParticipation(3, 0, 3000000, 1)
+
+ // Install and register all of the keys
+ for _, part := range []Participation{p, p2, p3} {
+ id, err := registry.Insert(part)
+ a.NoError(err)
+ a.Equal(part.ID(), id)
+ err = registry.Register(part.ID(), 0)
+ a.NoError(err)
+ }
+
+ a.NotNil(registry.GetAll())
+
+ a.NoError(registry.Record(p.Parent, 1000, Vote))
+ a.NoError(registry.Record(p.Parent, 2000, BlockProposal))
+ a.NoError(registry.Record(p.Parent, 3000, StateProof))
+
+ // Verify that one and only one key was updated.
+ test := func(registry ParticipationRegistry) {
+ records := registry.GetAll()
+ a.Len(records, 3)
+ for _, record := range records {
+ if record.ParticipationID == p.ID() {
+ require.Equal(t, 1000, int(record.LastVote))
+ require.Equal(t, 2000, int(record.LastBlockProposal))
+ require.Equal(t, 3000, int(record.LastStateProof))
+ } else {
+ require.Equal(t, 0, int(record.LastVote))
+ require.Equal(t, 0, int(record.LastBlockProposal))
+ require.Equal(t, 0, int(record.LastStateProof))
+ }
+ }
+ }
+
+ test(registry)
+ a.NoError(registry.Flush(defaultTimeout))
+ a.Len(registry.dirty, 0)
+
+ // Re-initialize
+ a.NoError(registry.initializeCache())
+ test(registry)
+}
+
+// Test that attempting to record an invalid action generates an error.
+func TestParticipation_RecordInvalidActionAndOutOfRange(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ p := makeTestParticipation(1, 0, 3000000, 1)
+ id, err := registry.Insert(p)
+ a.NoError(err)
+ err = registry.Register(id, 0)
+ a.NoError(err)
+
+ err = registry.Record(p.Parent, 0, ParticipationAction(9000))
+ a.EqualError(err, ErrUnknownParticipationAction.Error())
+
+ err = registry.Record(p.Parent, 3000000, ParticipationAction(9000))
+ a.EqualError(err, ErrUnknownParticipationAction.Error())
+
+ err = registry.Record(p.Parent, 3000001, ParticipationAction(9000))
+ a.EqualError(err, ErrActiveKeyNotFound.Error())
+}
+
+func TestParticipation_RecordNoKey(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ err := registry.Record(basics.Address{}, 0, Vote)
+ a.EqualError(err, ErrActiveKeyNotFound.Error())
+}
+
+// Test that an error is generated if the record function updates multiple records.
+// This would only happen if the DB was in an inconsistent state.
+func TestParticipation_RecordMultipleUpdates(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ // We'll test that recording at this round fails because both keys are active
+ testRound := basics.Round(5000)
+
+ p := makeTestParticipation(1, 0, 3000000, 1)
+ p2 := makeTestParticipation(1, 1, 3000000, 1)
+
+ _, err := registry.Insert(p)
+ a.NoError(err)
+ _, err = registry.Insert(p2)
+ a.NoError(err)
+ err = registry.Register(p.ID(), p.FirstValid)
+ a.NoError(err)
+
+ // Force the DB to have 2 active keys for one account by tampering with the private cache variable
+ recordCopy := registry.cache[p2.ID()]
+ recordCopy.EffectiveFirst = p2.FirstValid
+ recordCopy.EffectiveLast = p2.LastValid
+ registry.cache[p2.ID()] = recordCopy
+ registry.dirty[p2.ID()] = struct{}{}
+ a.NoError(registry.Flush(defaultTimeout))
+ a.Len(registry.dirty, 0)
+ a.NoError(registry.initializeCache())
+
+ // Verify bad state - both records are valid until round 3 million
+ a.NotEqual(p.ID(), p2.ID())
+ recordTest := make([]ParticipationRecord, 0)
+
+ recordP := registry.Get(p.ID())
+ a.False(recordP.IsZero())
+ recordTest = append(recordTest, recordP)
+
+ recordP2 := registry.Get(p2.ID())
+ a.False(recordP2.IsZero())
+ recordTest = append(recordTest, recordP2)
+
+ // Make sure both accounts are active for the test round
+ for _, record := range recordTest {
+ a.True(recordActive(record, testRound), "both records should be active")
+ }
+
+ err = registry.Record(p.Parent, testRound, Vote)
+ a.EqualError(err, ErrMultipleValidKeys.Error())
+}
+
+func TestParticipation_MultipleInsertError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ p := makeTestParticipation(1, 1, 2, 3)
+
+ _, err := registry.Insert(p)
+ a.NoError(err)
+ _, err = registry.Insert(p)
+ a.Error(err, ErrAlreadyInserted.Error())
+}
+
+// This is a contrived test on every level. To workaround errors we setup the
+// DB and cache in ways that are impossible with public methods.
+//
+// Basically multiple records with the same ParticipationID are a big no-no and
+// it should be detected as quickly as possible.
+func TestParticipation_RecordMultipleUpdates_DB(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+
+ p := makeTestParticipation(1, 1, 2000000, 3)
+ id := p.ID()
+
+ // Insert the same record twice
+ // Pretty much copied from the Insert function without error checking.
+ err := registry.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ for i := 0; i < 2; i++ {
+ record := p
+ _, err := tx.Exec(
+ insertKeysetQuery,
+ id[:],
+ record.Parent[:],
+ record.FirstValid,
+ record.LastValid,
+ record.KeyDilution,
+ nil)
+ if err != nil {
+ return fmt.Errorf("unable to insert keyset: %w", err)
+ }
+
+ // Fetch primary key
+ var pk int
+ row := tx.QueryRow(selectLastPK, id[:])
+ err = row.Scan(&pk)
+ if err != nil {
+ return fmt.Errorf("unable to scan pk: %w", err)
+ }
+
+ // Create Rolling entry
+ _, err = tx.Exec(`INSERT INTO Rolling (pk, effectiveFirstRound, effectiveLastRound) VALUES (?, ?, ?)`, pk, 1, 200000)
+ if err != nil {
+ return fmt.Errorf("unable insert rolling: %w", err)
+ }
+
+ var num int
+ row = tx.QueryRow(`SELECT COUNT(*) FROM Keysets WHERE participationID=?`, id[:])
+ err = row.Scan(&num)
+ if err != nil {
+ return fmt.Errorf("unable to scan pk: %w", err)
+ }
+ }
+
+ return nil
+ })
+
+ a.NoError(err)
+
+ // Now that the DB has multiple records for one participation ID, check that all the methods notice.
+
+ // Initializing the cache
+ err = registry.initializeCache()
+ a.EqualError(err, ErrMultipleKeysForID.Error())
+
+ // Registering the ID - No error because it is already registered so we don't try to re-register.
+ registry.cache[id] = ParticipationRecord{
+ ParticipationID: id,
+ Account: p.Parent,
+ FirstValid: p.FirstValid,
+ LastValid: p.LastValid,
+ KeyDilution: p.KeyDilution,
+ EffectiveFirst: p.FirstValid,
+ EffectiveLast: p.LastValid,
+ }
+ err = registry.Register(id, 1)
+ a.NoError(err)
+
+ // Clear the first/last so that the no-op registration can't be detected
+ record := registry.cache[id]
+ record.EffectiveFirst = 0
+ record.EffectiveLast = 0
+ registry.cache[id] = record
+
+ err = registry.Register(id, 1)
+ a.NoError(err)
+ err = registry.Flush(defaultTimeout)
+ a.Error(err)
+ a.Contains(err.Error(), "unable to disable old key")
+ a.EqualError(errors.Unwrap(err), ErrMultipleKeysForID.Error())
+
+ // Flushing changes detects that multiple records are updated
+ registry.dirty[id] = struct{}{}
+ err = registry.Flush(defaultTimeout)
+ a.EqualError(err, ErrMultipleKeysForID.Error())
+ a.Len(registry.dirty, 1)
+
+ err = registry.Flush(defaultTimeout)
+ a.EqualError(err, ErrMultipleKeysForID.Error())
+
+ // Make sure the error message is logged when closing the registry.
+ var logOutput strings.Builder
+ registry.log.SetOutput(&logOutput)
+ registry.Close()
+ a.Contains(logOutput.String(), "participationDB unhandled error during Close/Flush")
+ a.Contains(logOutput.String(), ErrMultipleKeysForID.Error())
+}
+
+func TestParticipation_NoKeyToUpdate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := assert.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ registry.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ record := ParticipationRecord{
+ ParticipationID: ParticipationID{},
+ Account: basics.Address{},
+ FirstValid: 1,
+ LastValid: 2,
+ KeyDilution: 3,
+ EffectiveFirst: 4,
+ EffectiveLast: 5,
+ }
+ err := updateRollingFields(ctx, tx, record)
+ a.EqualError(err, ErrNoKeyForID.Error())
+ return nil
+ })
+}
+
+// TestParticipion_Blobs adds some secrets to the registry and makes sure the same ones are returned.
+func TestParticipion_Blobs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ access, err := db.MakeAccessor("writetest_root", false, true)
+ if err != nil {
+ panic(err)
+ }
+ root, err := GenerateRoot(access)
+ access.Close()
+ a.NoError(err)
+
+ access, err = db.MakeAccessor("writetest", false, true)
+ if err != nil {
+ panic(err)
+ }
+ part, err := FillDBWithParticipationKeys(access, root.Address(), 0, 101, config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution)
+ access.Close()
+ a.NoError(err)
+
+ check := func(id ParticipationID) {
+ record := registry.Get(id)
+ a.NotEqual(ParticipationRecord{}, record)
+ a.Equal(id, record.ParticipationID)
+ a.Equal(part.VRF, record.VRF)
+ a.Equal(part.Voting.Snapshot(), record.Voting.Snapshot())
+ }
+
+ id, err := registry.Insert(part.Participation)
+ a.NoError(err)
+ a.NoError(registry.Flush(defaultTimeout))
+ a.Equal(id, part.ID())
+ // check the initial caching
+ check(id)
+
+ // check the re-initialized object
+ a.NoError(registry.initializeCache())
+ check(id)
+}
+
+// TestParticipion_EmptyBlobs makes sure empty blobs are set to nil
+func TestParticipion_EmptyBlobs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := assert.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ access, err := db.MakeAccessor("writetest_root", false, true)
+ if err != nil {
+ panic(err)
+ }
+ root, err := GenerateRoot(access)
+ access.Close()
+ a.NoError(err)
+
+ access, err = db.MakeAccessor("writetest", false, true)
+ if err != nil {
+ panic(err)
+ }
+ part, err := FillDBWithParticipationKeys(access, root.Address(), 0, 101, config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution)
+ access.Close()
+ a.NoError(err)
+ part.VRF = nil
+ part.Voting = nil
+
+ check := func(id ParticipationID) {
+ record := registry.Get(id)
+ a.NotEqual(ParticipationRecord{}, record)
+ a.Equal(id, record.ParticipationID)
+ a.True(record.VRF.MsgIsZero())
+ a.True(record.Voting.MsgIsZero())
+ }
+
+ id, err := registry.Insert(part.Participation)
+ a.NoError(err)
+ a.NoError(registry.Flush(defaultTimeout))
+ a.Equal(id, part.ID())
+ // check the initial caching
+ check(id)
+
+ // check the re-initialized object
+ a.NoError(registry.initializeCache())
+ check(id)
+}
+
+func TestRegisterUpdatedEvent(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := assert.New(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ p := makeTestParticipation(1, 1, 2, 3)
+ p2 := makeTestParticipation(2, 4, 5, 6)
+
+ id1, err := registry.Insert(p)
+ a.NoError(err)
+ a.Equal(p.ID(), id1)
+
+ id2, err := registry.Insert(p2)
+ a.NoError(err)
+ a.Equal(p2.ID(), id2)
+
+ record1 := registry.Get(id1)
+ a.False(record1.IsZero())
+ record2 := registry.Get(id2)
+ a.False(record2.IsZero())
+
+ // Delete the second one to make sure it can't be updated.
+ a.NoError(registry.Delete(id2))
+ a.NoError(registry.Flush(defaultTimeout))
+
+ // Ignore optional error
+ updates := make(map[ParticipationID]updatingParticipationRecord)
+ updates[id1] = updatingParticipationRecord{
+ ParticipationRecord: record1,
+ required: true,
+ }
+ updates[id2] = updatingParticipationRecord{
+ ParticipationRecord: record2,
+ required: false,
+ }
+
+ registry.writeQueue <- partDBWriteRecord{
+ registerUpdated: updates,
+ }
+
+ a.NoError(registry.Flush(defaultTimeout))
+
+ // This time, make it required and we should have an error
+ updates[id2] = updatingParticipationRecord{
+ ParticipationRecord: record2,
+ required: true,
+ }
+
+ registry.writeQueue <- partDBWriteRecord{
+ registerUpdated: updates,
+ }
+
+ err = registry.Flush(defaultTimeout)
+ a.Contains(err.Error(), "unable to disable old key when registering")
+ a.Contains(err.Error(), ErrNoKeyForID.Error())
+}
+
+// TestFlushDeadlock reproduced a deadlock when calling Flush repeatedly. This test reproduced the deadlock and
+// verifies the fix.
+func TestFlushDeadlock(t *testing.T) {
+ var wg sync.WaitGroup
+
+ partitiontest.PartitionTest(t)
+ registry := getRegistry(t)
+ defer registryCloseTest(t, registry)
+
+ spam := func() {
+ defer wg.Done()
+ timeout := time.After(time.Second)
+ for {
+ select {
+ case <-timeout:
+ return
+ default:
+ // If there is a deadlock, this timeout will trigger.
+ assert.NoError(t, registry.Flush(2*time.Second))
+ }
+ }
+ }
+
+ // Start spammers.
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go spam()
+ }
+
+ wg.Wait()
+}
+
+func benchmarkKeyRegistration(numKeys int, b *testing.B) {
+ // setup
+ rootDB, err := db.OpenPair(b.Name(), true)
+ if err != nil {
+ b.Fail()
+ }
+ registry, err := makeParticipationRegistry(rootDB, logging.TestingLog(b))
+ if err != nil {
+ b.Fail()
+ }
+
+ // Insert records so that we can t
+ b.Run(fmt.Sprintf("KeyInsert_%d", numKeys), func(b *testing.B) {
+ for n := 0; n < b.N; n++ {
+ for key := 0; key < numKeys; key++ {
+ p := makeTestParticipation(key, basics.Round(0), basics.Round(1000000), 3)
+ registry.Insert(p)
+ }
+ }
+ })
+
+ // The first call to Register updates the DB.
+ b.Run(fmt.Sprintf("KeyRegistered_%d", numKeys), func(b *testing.B) {
+ for n := 0; n < b.N; n++ {
+ for key := 0; key < numKeys; key++ {
+ p := makeTestParticipation(key, basics.Round(0), basics.Round(1000000), 3)
+
+ // Unfortunately we need to repeatedly clear out the registration fields to ensure the
+ // db update runs each time this is called.
+ record := registry.cache[p.ID()]
+ record.EffectiveFirst = 0
+ record.EffectiveLast = 0
+ registry.cache[p.ID()] = record
+ registry.Register(p.ID(), 50)
+ }
+ }
+ })
+
+ // The keys should now be updated, so Register is a no-op.
+ b.Run(fmt.Sprintf("NoOp_%d", numKeys), func(b *testing.B) {
+ for n := 0; n < b.N; n++ {
+ for key := 0; key < numKeys; key++ {
+ p := makeTestParticipation(key, basics.Round(0), basics.Round(1000000), 3)
+ registry.Register(p.ID(), 50)
+ }
+ }
+ })
+}
+
+func BenchmarkKeyRegistration1(b *testing.B) { benchmarkKeyRegistration(1, b) }
+func BenchmarkKeyRegistration5(b *testing.B) { benchmarkKeyRegistration(5, b) }
+func BenchmarkKeyRegistration10(b *testing.B) { benchmarkKeyRegistration(10, b) }
+func BenchmarkKeyRegistration50(b *testing.B) { benchmarkKeyRegistration(50, b) }
diff --git a/data/accountManager.go b/data/accountManager.go
index 79a57287b..b615a211f 100644
--- a/data/accountManager.go
+++ b/data/accountManager.go
@@ -18,11 +18,11 @@ package data
import (
"fmt"
+ "time"
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
@@ -31,37 +31,27 @@ import (
"github.com/algorand/go-algorand/protocol"
)
-// A ParticipationKeyIdentity defines the parameters that makes a pariticpation key unique.
-type ParticipationKeyIdentity struct {
- basics.Address // the address this participation key is used to vote for.
-
- // FirstValid and LastValid are inclusive.
- FirstValid basics.Round
- LastValid basics.Round
-
- VoteID crypto.OneTimeSignatureVerifier
- SelectionID crypto.VrfPubkey
-}
-
// AccountManager loads and manages accounts for the node
type AccountManager struct {
mu deadlock.Mutex
- partKeys map[ParticipationKeyIdentity]account.PersistedParticipation
+ partKeys map[account.ParticipationKeyIdentity]account.PersistedParticipation
// Map to keep track of accounts for which we've sent
// AccountRegistered telemetry events
registeredAccounts map[string]bool
- log logging.Logger
+ registry account.ParticipationRegistry
+ log logging.Logger
}
// MakeAccountManager creates a new AccountManager with a custom logger
-func MakeAccountManager(log logging.Logger) *AccountManager {
+func MakeAccountManager(log logging.Logger, registry account.ParticipationRegistry) *AccountManager {
manager := &AccountManager{}
manager.log = log
- manager.partKeys = make(map[ParticipationKeyIdentity]account.PersistedParticipation)
+ manager.partKeys = make(map[account.ParticipationKeyIdentity]account.PersistedParticipation)
manager.registeredAccounts = make(map[string]bool)
+ manager.registry = registry
return manager
}
@@ -77,6 +67,31 @@ func (manager *AccountManager) Keys(rnd basics.Round) (out []account.Participati
}
}
return out
+
+ // PKI TODO: source keys from the registry.
+ // This kinda works, but voting keys are not updated.
+ /*
+ for _, record := range manager.registry.GetAll() {
+ part := account.Participation{
+ Parent: record.Account,
+ VRF: record.VRF,
+ Voting: record.Voting,
+ FirstValid: record.FirstValid,
+ LastValid: record.LastValid,
+ KeyDilution: record.KeyDilution,
+ }
+
+ if part.OverlapsInterval(rnd, rnd) {
+ out = append(out, part)
+
+ id := part.ID()
+ if !bytes.Equal(id[:], record.ParticipationID[:]) {
+ manager.log.Warnf("Participation IDs do not equal while fetching keys... %s != %s\n", id, record.ParticipationID)
+ }
+ }
+ }
+ return out
+ */
}
// HasLiveKeys returns true if we have any Participation
@@ -97,18 +112,28 @@ func (manager *AccountManager) HasLiveKeys(from, to basics.Round) bool {
// The return value indicates if the key has been added (true) or
// if this is a duplicate key (false).
func (manager *AccountManager) AddParticipation(participation account.PersistedParticipation) bool {
+ // Tell the ParticipationRegistry about the Participation. Duplicate entries
+ // are ignored.
+ pid, err := manager.registry.Insert(participation.Participation)
+ if err != nil && err != account.ErrAlreadyInserted {
+ manager.log.Warnf("Failed to insert participation key.")
+ }
+ manager.log.Infof("Inserted key (%s) for account (%s) first valid (%d) last valid (%d)\n",
+ pid, participation.Parent, participation.FirstValid, participation.LastValid)
+
manager.mu.Lock()
defer manager.mu.Unlock()
address := participation.Address()
first, last := participation.ValidInterval()
- partkeyID := ParticipationKeyIdentity{
- Address: address,
+ partkeyID := account.ParticipationKeyIdentity{
+ Parent: address,
FirstValid: first,
LastValid: last,
+ VRFSK: participation.VRF.SK,
VoteID: participation.Voting.OneTimeSignatureVerifier,
- SelectionID: participation.VRF.PK,
+ KeyDilution: participation.KeyDilution,
}
// Check if we already have participation keys for this address in this interval
@@ -177,11 +202,40 @@ func (manager *AccountManager) DeleteOldKeys(latestHdr bookkeeping.BlockHeader,
}
}()
- // wait all all disk flushes, and report errors as they appear.
+ // wait for all disk flushes, and report errors as they appear.
for errString, errCh := range pendingItems {
err := <-errCh
if err != nil {
logging.Base().Warnf("%s: %v", errString, err)
}
}
+
+ // PKI TODO: This needs to update the partkeys also, see the 'DeleteOldKeys' function above, it's part
+ // is part of PersistedParticipation, but just calls 'part.Voting.DeleteBeforeFineGrained'
+ // Delete expired records from participation registry.
+ if err := manager.registry.DeleteExpired(latestHdr.Round); err != nil {
+ manager.log.Warnf("error while deleting expired records from participation registry: %w", err)
+ }
+}
+
+// Registry fetches the ParticipationRegistry.
+func (manager *AccountManager) Registry() account.ParticipationRegistry {
+ return manager.registry
+}
+
+// FlushRegistry tells the underlying participation registry to flush it's change cache to the DB.
+func (manager *AccountManager) FlushRegistry(timeout time.Duration) {
+ err := manager.registry.Flush(timeout)
+ if err != nil {
+ manager.log.Warnf("error while flushing the registry: %w", err)
+ }
+}
+
+// Record asynchronously records a participation key usage event.
+func (manager *AccountManager) Record(account basics.Address, round basics.Round, participationType account.ParticipationAction) {
+ // This function updates a cache in the ParticipationRegistry, we must call Flush to persist the changes.
+ err := manager.registry.Record(account, round, participationType)
+ if err != nil {
+ manager.log.Warnf("node.Record: Account %v not able to record participation (%d) on round %d: %w", account, participationType, round, err)
+ }
}
diff --git a/data/basics/ccertpart.go b/data/basics/ccertpart.go
new file mode 100644
index 000000000..097cb6c27
--- /dev/null
+++ b/data/basics/ccertpart.go
@@ -0,0 +1,50 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package basics
+
+import (
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// A Participant corresponds to an account whose AccountData.Status
+// is Online, and for which the expected sigRound satisfies
+// AccountData.VoteFirstValid <= sigRound <= AccountData.VoteLastValid.
+//
+// In the Algorand ledger, it is possible for multiple accounts to have
+// the same PK. Thus, the PK is not necessarily unique among Participants.
+// However, each account will produce a unique Participant struct, to avoid
+// potential DoS attacks where one account claims to have the same VoteID PK
+// as another account.
+type Participant struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ // PK is AccountData.VoteID.
+ PK crypto.OneTimeSignatureVerifier `codec:"p"`
+
+ // Weight is AccountData.MicroAlgos.
+ Weight uint64 `codec:"w"`
+
+ // KeyDilution is AccountData.KeyDilution() with the protocol for sigRound
+ // as expected by the Builder.
+ KeyDilution uint64 `codec:"d"`
+}
+
+// ToBeHashed implements the crypto.Hashable interface.
+func (p Participant) ToBeHashed() (protocol.HashID, []byte) {
+ return protocol.CompactCertPart, protocol.Encode(&p)
+}
diff --git a/data/basics/fields_test.go b/data/basics/fields_test.go
new file mode 100644
index 000000000..8027fa29a
--- /dev/null
+++ b/data/basics/fields_test.go
@@ -0,0 +1,201 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package basics_test
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+type typePath []string
+
+func (p typePath) addMapKey() typePath {
+ return append(p, "map_key")
+}
+
+func (p typePath) addValue() typePath {
+ return append(p, "value")
+}
+
+func (p typePath) addField(fieldName string) typePath {
+ return append(p, "field "+fieldName)
+}
+
+func (p typePath) validatePathFrom(t reflect.Type) error {
+ if len(p) == 0 {
+ // path is empty, so it's vacuously valid
+ return nil
+ }
+
+ value := p[0]
+ switch {
+ case value == "map_key":
+ return p[1:].validatePathFrom(t.Key())
+ case value == "value":
+ return p[1:].validatePathFrom(t.Elem())
+ case strings.HasPrefix(value, "field "):
+ fieldName := value[len("field "):]
+ fieldType, ok := t.FieldByName(fieldName)
+ if !ok {
+ return fmt.Errorf("Type '%s' does not have the field '%s'", t.Name(), fieldName)
+ }
+ return p[1:].validatePathFrom(fieldType.Type)
+ default:
+ return fmt.Errorf("Unexpected item in path: %s", value)
+ }
+}
+
+func (p typePath) Equals(other typePath) bool {
+ if len(p) != len(other) {
+ return false
+ }
+ for i := range p {
+ if p[i] != other[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (p typePath) String() string {
+ return strings.Join(p, "->")
+}
+
+func checkReferencedTypes(seen map[reflect.Type]bool, path typePath, typeStack []reflect.Type, check func(path typePath, stack []reflect.Type) bool) {
+ currentType := typeStack[len(typeStack)-1]
+
+ if _, seenType := seen[currentType]; seenType {
+ return
+ }
+
+ if !check(path, typeStack) {
+ // if currentType is not ok, don't visit its children
+ return
+ }
+
+ // add currentType to seen set, to avoid infinite recursion if currentType references itself
+ seen[currentType] = true
+
+ // after currentType's children are visited, "forget" the type, so we can examine it again if needed
+ // if this didn't happen, only 1 error per invalid type would get reported
+ defer delete(seen, currentType)
+
+ switch currentType.Kind() {
+ case reflect.Map:
+ newPath := path.addMapKey()
+ newStack := append(typeStack, currentType.Key())
+ checkReferencedTypes(seen, newPath, newStack, check)
+ fallthrough
+ case reflect.Array, reflect.Slice, reflect.Ptr:
+ newPath := path.addValue()
+ newStack := append(typeStack, currentType.Elem())
+ checkReferencedTypes(seen, newPath, newStack, check)
+ case reflect.Struct:
+ for i := 0; i < currentType.NumField(); i++ {
+ field := currentType.Field(i)
+ newPath := path.addField(field.Name)
+ newStack := append(typeStack, field.Type)
+ checkReferencedTypes(seen, newPath, newStack, check)
+ }
+ }
+}
+
+func makeTypeCheckFunction(t *testing.T, exceptions []typePath, startType reflect.Type) func(path typePath, stack []reflect.Type) bool {
+ for _, exception := range exceptions {
+ err := exception.validatePathFrom(startType)
+ require.NoError(t, err)
+ }
+
+ return func(path typePath, stack []reflect.Type) bool {
+ currentType := stack[len(stack)-1]
+
+ for _, exception := range exceptions {
+ if path.Equals(exception) {
+ t.Logf("Skipping exception for path: %s", path.String())
+ return true
+ }
+ }
+
+ switch currentType.Kind() {
+ case reflect.String:
+ t.Errorf("Invalid string type referenced from %s. Use []byte instead. Full path: %s", startType.Name(), path.String())
+ return false
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.UnsafePointer:
+ // raise an error if one of these strange types is referenced too
+ t.Errorf("Invalid type %s referenced from %s. Full path: %s", currentType.Name(), startType.Name(), path.String())
+ return false
+ default:
+ return true
+ }
+ }
+}
+
+func TestBlockFields(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ typeToCheck := reflect.TypeOf(bookkeeping.Block{})
+
+ // These exceptions are for pre-existing usages of string. Only add to this list if you really need to use string.
+ exceptions := []typePath{
+ typePath{}.addField("BlockHeader").addField("GenesisID"),
+ typePath{}.addField("BlockHeader").addField("UpgradeState").addField("CurrentProtocol"),
+ typePath{}.addField("BlockHeader").addField("UpgradeState").addField("NextProtocol"),
+ typePath{}.addField("BlockHeader").addField("UpgradeVote").addField("UpgradePropose"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("Type"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("Header").addField("GenesisID"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("AssetConfigTxnFields").addField("AssetParams").addField("UnitName"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("AssetConfigTxnFields").addField("AssetParams").addField("AssetName"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("AssetConfigTxnFields").addField("AssetParams").addField("URL"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("GlobalDelta").addMapKey(),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("GlobalDelta").addValue().addField("Bytes"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("LocalDeltas").addValue().addMapKey(),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("LocalDeltas").addValue().addValue().addField("Bytes"),
+ typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("Logs").addValue(),
+ }
+
+ seen := make(map[reflect.Type]bool)
+
+ checkReferencedTypes(seen, nil, []reflect.Type{typeToCheck}, makeTypeCheckFunction(t, exceptions, typeToCheck))
+}
+
+func TestAccountDataFields(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ typeToCheck := reflect.TypeOf(basics.AccountData{})
+
+ // These exceptions are for pre-existing usages of string. Only add to this list if you really need to use string.
+ exceptions := []typePath{
+ typePath{}.addField("AssetParams").addValue().addField("UnitName"),
+ typePath{}.addField("AssetParams").addValue().addField("AssetName"),
+ typePath{}.addField("AssetParams").addValue().addField("URL"),
+ typePath{}.addField("AppLocalStates").addValue().addField("KeyValue").addMapKey(),
+ typePath{}.addField("AppLocalStates").addValue().addField("KeyValue").addValue().addField("Bytes"),
+ typePath{}.addField("AppParams").addValue().addField("GlobalState").addMapKey(),
+ typePath{}.addField("AppParams").addValue().addField("GlobalState").addValue().addField("Bytes"),
+ }
+
+ seen := make(map[reflect.Type]bool)
+
+ checkReferencedTypes(seen, nil, []reflect.Type{typeToCheck}, makeTypeCheckFunction(t, exceptions, typeToCheck))
+}
diff --git a/data/basics/msgp_gen.go b/data/basics/msgp_gen.go
index 73a759331..f76a37db5 100644
--- a/data/basics/msgp_gen.go
+++ b/data/basics/msgp_gen.go
@@ -107,6 +107,14 @@ import (
// |-----> Msgsize
// |-----> MsgIsZero
//
+// Participant
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// Round
// |-----> MarshalMsg
// |-----> CanMarshalMsg
@@ -2533,7 +2541,7 @@ func (z *AssetParams) MarshalMsg(b []byte) (o []byte) {
// omitempty: check for empty values
zb0002Len := uint32(11)
var zb0002Mask uint16 /* 12 bits */
- if (*z).MetadataHash == ([MetadataHashLength]byte{}) {
+ if (*z).MetadataHash == ([32]byte{}) {
zb0002Len--
zb0002Mask |= 0x2
}
@@ -2854,13 +2862,13 @@ func (_ *AssetParams) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *AssetParams) Msgsize() (s int) {
- s = 1 + 2 + msgp.Uint64Size + 3 + msgp.Uint32Size + 3 + msgp.BoolSize + 3 + msgp.StringPrefixSize + len((*z).UnitName) + 3 + msgp.StringPrefixSize + len((*z).AssetName) + 3 + msgp.StringPrefixSize + len((*z).URL) + 3 + msgp.ArrayHeaderSize + (MetadataHashLength * (msgp.ByteSize)) + 2 + (*z).Manager.Msgsize() + 2 + (*z).Reserve.Msgsize() + 2 + (*z).Freeze.Msgsize() + 2 + (*z).Clawback.Msgsize()
+ s = 1 + 2 + msgp.Uint64Size + 3 + msgp.Uint32Size + 3 + msgp.BoolSize + 3 + msgp.StringPrefixSize + len((*z).UnitName) + 3 + msgp.StringPrefixSize + len((*z).AssetName) + 3 + msgp.StringPrefixSize + len((*z).URL) + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 2 + (*z).Manager.Msgsize() + 2 + (*z).Reserve.Msgsize() + 2 + (*z).Freeze.Msgsize() + 2 + (*z).Clawback.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *AssetParams) MsgIsZero() bool {
- return ((*z).Total == 0) && ((*z).Decimals == 0) && ((*z).DefaultFrozen == false) && ((*z).UnitName == "") && ((*z).AssetName == "") && ((*z).URL == "") && ((*z).MetadataHash == ([MetadataHashLength]byte{})) && ((*z).Manager.MsgIsZero()) && ((*z).Reserve.MsgIsZero()) && ((*z).Freeze.MsgIsZero()) && ((*z).Clawback.MsgIsZero())
+ return ((*z).Total == 0) && ((*z).Decimals == 0) && ((*z).DefaultFrozen == false) && ((*z).UnitName == "") && ((*z).AssetName == "") && ((*z).URL == "") && ((*z).MetadataHash == ([32]byte{})) && ((*z).Manager.MsgIsZero()) && ((*z).Reserve.MsgIsZero()) && ((*z).Freeze.MsgIsZero()) && ((*z).Clawback.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
@@ -4118,6 +4126,158 @@ func (z DeltaAction) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *Participant) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(3)
+ var zb0001Mask uint8 /* 4 bits */
+ if (*z).KeyDilution == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).PK.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).Weight == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x8
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "d"
+ o = append(o, 0xa1, 0x64)
+ o = msgp.AppendUint64(o, (*z).KeyDilution)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "p"
+ o = append(o, 0xa1, 0x70)
+ o = (*z).PK.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
+ // string "w"
+ o = append(o, 0xa1, 0x77)
+ o = msgp.AppendUint64(o, (*z).Weight)
+ }
+ }
+ return
+}
+
+func (_ *Participant) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*Participant)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *Participant) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).PK.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PK")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Weight, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Weight")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).KeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KeyDilution")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = Participant{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "p":
+ bts, err = (*z).PK.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PK")
+ return
+ }
+ case "w":
+ (*z).Weight, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Weight")
+ return
+ }
+ case "d":
+ (*z).KeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KeyDilution")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *Participant) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*Participant)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *Participant) Msgsize() (s int) {
+ s = 1 + 2 + (*z).PK.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *Participant) MsgIsZero() bool {
+ return ((*z).PK.MsgIsZero()) && ((*z).Weight == 0) && ((*z).KeyDilution == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z Round) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendUint64(o, uint64(z))
diff --git a/data/basics/msgp_gen_test.go b/data/basics/msgp_gen_test.go
index 5ac65240f..8756c3c4e 100644
--- a/data/basics/msgp_gen_test.go
+++ b/data/basics/msgp_gen_test.go
@@ -372,6 +372,66 @@ func BenchmarkUnmarshalBalanceRecord(b *testing.B) {
}
}
+func TestMarshalUnmarshalParticipant(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := Participant{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingParticipant(t *testing.T) {
+ protocol.RunEncodingTest(t, &Participant{})
+}
+
+func BenchmarkMarshalMsgParticipant(b *testing.B) {
+ v := Participant{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgParticipant(b *testing.B) {
+ v := Participant{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalParticipant(b *testing.B) {
+ v := Participant{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalStateDelta(t *testing.T) {
partitiontest.PartitionTest(t)
v := StateDelta{}
diff --git a/data/basics/userBalance.go b/data/basics/userBalance.go
index 7f4390cee..a619438d3 100644
--- a/data/basics/userBalance.go
+++ b/data/basics/userBalance.go
@@ -96,6 +96,19 @@ func UnmarshalStatus(value string) (s Status, err error) {
return
}
+// OnlineAccountData contains the voting information for a single account.
+//msgp:ignore OnlineAccountData
+type OnlineAccountData struct {
+ MicroAlgosWithRewards MicroAlgos
+
+ VoteID crypto.OneTimeSignatureVerifier
+ SelectionID crypto.VRFVerifier
+
+ VoteFirstValid Round
+ VoteLastValid Round
+ VoteKeyDilution uint64
+}
+
// AccountData contains the data associated with a given address.
//
// This includes the account balance, cryptographic public keys,
@@ -310,9 +323,6 @@ const (
// AppCreatable is the CreatableType corresponds to apps
AppCreatable CreatableType = 1
-
- // MetadataHashLength is the number of bytes of the MetadataHash
- MetadataHashLength int = 32
)
// CreatableLocator stores both the creator, whose balance record contains
@@ -364,7 +374,7 @@ type AssetParams struct {
// MetadataHash specifies a commitment to some unspecified asset
// metadata. The format of this metadata is up to the application.
- MetadataHash [MetadataHashLength]byte `codec:"am"`
+ MetadataHash [32]byte `codec:"am"`
// Manager specifies an account that is allowed to change the
// non-zero addresses in this AssetParams.
@@ -400,6 +410,16 @@ func MakeAccountData(status Status, algos MicroAlgos) AccountData {
return AccountData{Status: status, MicroAlgos: algos}
}
+// ClearOnlineState resets the account's fields to indicate that the account is an offline account
+func (u *AccountData) ClearOnlineState() {
+ u.Status = Offline
+ u.VoteFirstValid = Round(0)
+ u.VoteLastValid = Round(0)
+ u.VoteKeyDilution = 0
+ u.VoteID = crypto.OneTimeSignatureVerifier{}
+ u.SelectionID = crypto.VRFVerifier{}
+}
+
// Money returns the amount of MicroAlgos associated with the user's account
func (u AccountData) Money(proto config.ConsensusParams, rewardsLevel uint64) (money MicroAlgos, rewards MicroAlgos) {
e := u.WithUpdatedRewards(proto, rewardsLevel)
@@ -469,20 +489,35 @@ func (u AccountData) MinBalance(proto *config.ConsensusParams) (res MicroAlgos)
return res
}
-// VotingStake returns the amount of MicroAlgos associated with the user's account
-// for the purpose of participating in the Algorand protocol. It assumes the
-// caller has already updated rewards appropriately using WithUpdatedRewards().
-func (u AccountData) VotingStake() MicroAlgos {
+// OnlineAccountData returns subset of AccountData as OnlineAccountData data structure.
+// Account is expected to be Online otherwise its is cleared out
+func (u AccountData) OnlineAccountData() OnlineAccountData {
if u.Status != Online {
- return MicroAlgos{Raw: 0}
+ // if the account is not Online and agreement requests it for some reason, clear it out
+ return OnlineAccountData{}
+ }
+
+ return OnlineAccountData{
+ MicroAlgosWithRewards: u.MicroAlgos,
+
+ VoteID: u.VoteID,
+ SelectionID: u.SelectionID,
+ VoteFirstValid: u.VoteFirstValid,
+ VoteLastValid: u.VoteLastValid,
+ VoteKeyDilution: u.VoteKeyDilution,
}
+}
- return u.MicroAlgos
+// VotingStake returns the amount of MicroAlgos associated with the user's account
+// for the purpose of participating in the Algorand protocol. It assumes the
+// caller has already updated rewards appropriately using WithUpdatedRewards().
+func (u OnlineAccountData) VotingStake() MicroAlgos {
+ return u.MicroAlgosWithRewards
}
// KeyDilution returns the key dilution for this account,
// returning the default key dilution if not explicitly specified.
-func (u AccountData) KeyDilution(proto config.ConsensusParams) uint64 {
+func (u OnlineAccountData) KeyDilution(proto config.ConsensusParams) uint64 {
if u.VoteKeyDilution != 0 {
return u.VoteKeyDilution
}
diff --git a/data/basics/userBalance_test.go b/data/basics/userBalance_test.go
index 1670fe58d..04a770d92 100644
--- a/data/basics/userBalance_test.go
+++ b/data/basics/userBalance_test.go
@@ -105,16 +105,11 @@ func makeString(len int) string {
return s
}
-func TestEncodedAccountDataSize(t *testing.T) {
- partitiontest.PartitionTest(t)
-
+func getSampleAccountData() AccountData {
oneTimeSecrets := crypto.GenerateOneTimeSignatureSecrets(0, 1)
vrfSecrets := crypto.GenerateVRFSecrets()
- maxStateSchema := StateSchema{
- NumUint: 0x1234123412341234,
- NumByteSlice: 0x1234123412341234,
- }
- ad := AccountData{
+
+ return AccountData{
Status: NotParticipating,
MicroAlgos: MicroAlgos{},
RewardsBase: 0x1234123412341234,
@@ -128,9 +123,19 @@ func TestEncodedAccountDataSize(t *testing.T) {
Assets: make(map[AssetIndex]AssetHolding),
AppLocalStates: make(map[AppIndex]AppLocalState),
AppParams: make(map[AppIndex]AppParams),
- TotalAppSchema: maxStateSchema,
AuthAddr: Address(crypto.Hash([]byte{1, 2, 3, 4})),
}
+}
+
+func TestEncodedAccountDataSize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ maxStateSchema := StateSchema{
+ NumUint: 0x1234123412341234,
+ NumByteSlice: 0x1234123412341234,
+ }
+ ad := getSampleAccountData()
+ ad.TotalAppSchema = maxStateSchema
// TODO after applications enabled: change back to protocol.ConsensusCurrentVersion
currentConsensusParams := config.Consensus[protocol.ConsensusFuture]
@@ -253,3 +258,20 @@ func TestAppIndexHashing(t *testing.T) {
i = AppIndex(77)
require.Equal(t, "PCYUFPA2ZTOYWTP43MX2MOX2OWAIAXUDNC2WFCXAGMRUZ3DYD6BWFDL5YM", i.Address().String())
}
+
+func TestOnlineAccountData(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ad := getSampleAccountData()
+ ad.MicroAlgos.Raw = 1000000
+ ad.Status = Offline
+
+ oad := ad.OnlineAccountData()
+ require.Empty(t, oad)
+
+ ad.Status = Online
+ oad = ad.OnlineAccountData()
+ require.Equal(t, ad.MicroAlgos, oad.MicroAlgosWithRewards)
+ require.Equal(t, ad.VoteID, oad.VoteID)
+ require.Equal(t, ad.SelectionID, oad.SelectionID)
+}
diff --git a/data/bookkeeping/block.go b/data/bookkeeping/block.go
index a60a9362c..bb9ae321b 100644
--- a/data/bookkeeping/block.go
+++ b/data/bookkeeping/block.go
@@ -125,6 +125,21 @@ type (
// for multiple types of certs.
//msgp:sort protocol.CompactCertType protocol.SortCompactCertType
CompactCert map[protocol.CompactCertType]CompactCertState `codec:"cc,allocbound=protocol.NumCompactCertTypes"`
+
+ // ParticipationUpdates contains the information needed to mark
+ // certain accounts offline because their participation keys expired
+ ParticipationUpdates
+ }
+
+ // ParticipationUpdates represents participation account data that
+ // needs to be checked/acted on by the network
+ ParticipationUpdates struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ // ExpiredParticipationAccounts contains a list of online accounts
+ // that needs to be converted to offline since their
+ // participation key expired.
+ ExpiredParticipationAccounts []basics.Address `codec:"partupdrmv,allocbound=config.MaxProposedExpiredOnlineAccounts"`
}
// RewardsState represents the global parameters controlling the rate
diff --git a/data/bookkeeping/msgp_gen.go b/data/bookkeeping/msgp_gen.go
index 8b88b8fbe..e7a810e3d 100644
--- a/data/bookkeeping/msgp_gen.go
+++ b/data/bookkeeping/msgp_gen.go
@@ -5,7 +5,9 @@ package bookkeeping
import (
"sort"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/msgp/msgp"
)
@@ -59,6 +61,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// ParticipationUpdates
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// RewardsState
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -80,108 +90,112 @@ import (
func (z *Block) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0003Len := uint32(24)
- var zb0003Mask uint32 /* 27 bits */
+ zb0004Len := uint32(25)
+ var zb0004Mask uint32 /* 29 bits */
if len((*z).BlockHeader.CompactCert) == 0 {
- zb0003Len--
- zb0003Mask |= 0x8
+ zb0004Len--
+ zb0004Mask |= 0x10
}
if (*z).BlockHeader.RewardsState.RewardsLevel == 0 {
- zb0003Len--
- zb0003Mask |= 0x10
+ zb0004Len--
+ zb0004Mask |= 0x20
}
if (*z).BlockHeader.RewardsState.FeeSink.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20
+ zb0004Len--
+ zb0004Mask |= 0x40
}
if (*z).BlockHeader.RewardsState.RewardsResidue == 0 {
- zb0003Len--
- zb0003Mask |= 0x40
+ zb0004Len--
+ zb0004Mask |= 0x80
}
if (*z).BlockHeader.GenesisID == "" {
- zb0003Len--
- zb0003Mask |= 0x80
+ zb0004Len--
+ zb0004Mask |= 0x100
}
if (*z).BlockHeader.GenesisHash.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x100
+ zb0004Len--
+ zb0004Mask |= 0x200
}
if (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x200
+ zb0004Len--
+ zb0004Mask |= 0x400
}
if (*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400
+ zb0004Len--
+ zb0004Mask |= 0x800
}
if (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800
+ zb0004Len--
+ zb0004Mask |= 0x1000
}
if (*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0 {
- zb0003Len--
- zb0003Mask |= 0x1000
+ zb0004Len--
+ zb0004Mask |= 0x2000
+ }
+ if len((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x4000
}
if (*z).BlockHeader.Branch.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x2000
+ zb0004Len--
+ zb0004Mask |= 0x8000
}
if (*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x4000
+ zb0004Len--
+ zb0004Mask |= 0x10000
}
if (*z).BlockHeader.RewardsState.RewardsRate == 0 {
- zb0003Len--
- zb0003Mask |= 0x8000
+ zb0004Len--
+ zb0004Mask |= 0x20000
}
if (*z).BlockHeader.Round.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000
+ zb0004Len--
+ zb0004Mask |= 0x40000
}
if (*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000
+ zb0004Len--
+ zb0004Mask |= 0x80000
}
if (*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x40000
+ zb0004Len--
+ zb0004Mask |= 0x100000
}
if (*z).BlockHeader.Seed.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x80000
+ zb0004Len--
+ zb0004Mask |= 0x200000
}
if (*z).BlockHeader.TxnCounter == 0 {
- zb0003Len--
- zb0003Mask |= 0x100000
+ zb0004Len--
+ zb0004Mask |= 0x400000
}
if (*z).BlockHeader.TimeStamp == 0 {
- zb0003Len--
- zb0003Mask |= 0x200000
+ zb0004Len--
+ zb0004Mask |= 0x800000
}
if (*z).BlockHeader.TxnRoot.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400000
+ zb0004Len--
+ zb0004Mask |= 0x1000000
}
if (*z).Payset.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800000
+ zb0004Len--
+ zb0004Mask |= 0x2000000
}
if (*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x1000000
+ zb0004Len--
+ zb0004Mask |= 0x4000000
}
if (*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x2000000
+ zb0004Len--
+ zb0004Mask |= 0x8000000
}
if (*z).BlockHeader.UpgradeVote.UpgradeApprove == false {
- zb0003Len--
- zb0003Mask |= 0x4000000
+ zb0004Len--
+ zb0004Mask |= 0x10000000
}
- // variable map header, size zb0003Len
- o = msgp.AppendMapHeader(o, zb0003Len)
- if zb0003Len != 0 {
- if (zb0003Mask & 0x8) == 0 { // if not empty
+ // variable map header, size zb0004Len
+ o = msgp.AppendMapHeader(o, zb0004Len)
+ if zb0004Len != 0 {
+ if (zb0004Mask & 0x10) == 0 { // if not empty
// string "cc"
o = append(o, 0xa2, 0x63, 0x63)
if (*z).BlockHeader.CompactCert == nil {
@@ -201,117 +215,129 @@ func (z *Block) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
- if (zb0003Mask & 0x10) == 0 { // if not empty
+ if (zb0004Mask & 0x20) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).BlockHeader.RewardsState.RewardsLevel)
}
- if (zb0003Mask & 0x20) == 0 { // if not empty
+ if (zb0004Mask & 0x40) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).BlockHeader.RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0003Mask & 0x40) == 0 { // if not empty
+ if (zb0004Mask & 0x80) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).BlockHeader.RewardsState.RewardsResidue)
}
- if (zb0003Mask & 0x80) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).BlockHeader.GenesisID)
}
- if (zb0003Mask & 0x100) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).BlockHeader.GenesisHash.MarshalMsg(o)
}
- if (zb0003Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0003Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).BlockHeader.UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).BlockHeader.UpgradeState.NextProtocolApprovals)
}
- if (zb0003Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
+ // string "partupdrmv"
+ o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
+ if (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)))
+ }
+ for zb0003 := range (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ o = (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
+ }
+ }
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).BlockHeader.Branch.MarshalMsg(o)
}
- if (zb0003Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).BlockHeader.RewardsState.RewardsRate)
}
- if (zb0003Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).BlockHeader.Round.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).BlockHeader.RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0003Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).BlockHeader.Seed.MarshalMsg(o)
}
- if (zb0003Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
o = msgp.AppendUint64(o, (*z).BlockHeader.TxnCounter)
}
- if (zb0003Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "ts"
o = append(o, 0xa2, 0x74, 0x73)
o = msgp.AppendInt64(o, (*z).BlockHeader.TimeStamp)
}
- if (zb0003Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000000) == 0 { // if not empty
// string "txn"
o = append(o, 0xa3, 0x74, 0x78, 0x6e)
o = (*z).BlockHeader.TxnRoot.MarshalMsg(o)
}
- if (zb0003Mask & 0x800000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000000) == 0 { // if not empty
// string "txns"
o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73)
o = (*z).Payset.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000000) == 0 { // if not empty
// string "upgradedelay"
o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79)
o = (*z).BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o)
}
- if (zb0003Mask & 0x2000000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000000) == 0 { // if not empty
// string "upgradeprop"
o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70)
o = (*z).BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o)
}
- if (zb0003Mask & 0x4000000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000000) == 0 { // if not empty
// string "upgradeyes"
o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73)
o = msgp.AppendBool(o, (*z).BlockHeader.UpgradeVote.UpgradeApprove)
@@ -329,214 +355,214 @@ func (_ *Block) CanMarshalMsg(z interface{}) bool {
func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.Round.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Round")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.Branch.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Branch")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.Seed.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Seed")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.TxnRoot.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnRoot")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.TimeStamp, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TimeStamp")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.GenesisHash.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisHash")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.RewardsState.FeeSink.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FeeSink")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.RewardsState.RewardsPool.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsPool")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.RewardsState.RewardsLevel, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsLevel")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.RewardsState.RewardsRate, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRate")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.RewardsState.RewardsResidue, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsResidue")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.RewardsState.RewardsRecalculationRound.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRecalculationRound")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.UpgradeState.CurrentProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CurrentProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.UpgradeState.NextProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolVoteBefore")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolSwitchOn")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.UpgradeVote.UpgradePropose.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradePropose")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).BlockHeader.UpgradeVote.UpgradeDelay.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeDelay")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.UpgradeVote.UpgradeApprove, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeApprove")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).BlockHeader.TxnCounter, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnCounter")
return
}
}
- if zb0003 > 0 {
- zb0003--
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if zb0004 > 0 {
+ zb0004--
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0005 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(protocol.NumCompactCertTypes))
+ if zb0006 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0006 {
+ if zb0007 {
(*z).BlockHeader.CompactCert = nil
} else if (*z).BlockHeader.CompactCert == nil {
- (*z).BlockHeader.CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0005)
+ (*z).BlockHeader.CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0006)
}
- for zb0005 > 0 {
+ for zb0006 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 CompactCertState
- zb0005--
+ zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
@@ -550,16 +576,45 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).BlockHeader.CompactCert[zb0001] = zb0002
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0008 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0009 {
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else {
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ }
+ for zb0003 := range (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Payset.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Payset")
return
}
}
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
+ if zb0004 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0004)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -570,11 +625,11 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
- if zb0004 {
+ if zb0005 {
(*z) = Block{}
}
- for zb0003 > 0 {
- zb0003--
+ for zb0004 > 0 {
+ zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -714,27 +769,27 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "cc":
- var zb0007 int
- var zb0008 bool
- zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0010 int
+ var zb0011 bool
+ zb0010, zb0011, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0007 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(protocol.NumCompactCertTypes))
+ if zb0010 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0010), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0008 {
+ if zb0011 {
(*z).BlockHeader.CompactCert = nil
} else if (*z).BlockHeader.CompactCert == nil {
- (*z).BlockHeader.CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0007)
+ (*z).BlockHeader.CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0010)
}
- for zb0007 > 0 {
+ for zb0010 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 CompactCertState
- zb0007--
+ zb0010--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
@@ -747,6 +802,33 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
(*z).BlockHeader.CompactCert[zb0001] = zb0002
}
+ case "partupdrmv":
+ var zb0012 int
+ var zb0013 bool
+ zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0012 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0013 {
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0012 {
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0012]
+ } else {
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0012)
+ }
+ for zb0003 := range (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
case "txns":
bts, err = (*z).Payset.UnmarshalMsg(bts)
if err != nil {
@@ -781,13 +863,17 @@ func (z *Block) Msgsize() (s int) {
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
}
}
+ s += 11 + msgp.ArrayHeaderSize
+ for zb0003 := range (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
+ s += (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].Msgsize()
+ }
s += 5 + (*z).Payset.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *Block) MsgIsZero() bool {
- return ((*z).BlockHeader.Round.MsgIsZero()) && ((*z).BlockHeader.Branch.MsgIsZero()) && ((*z).BlockHeader.Seed.MsgIsZero()) && ((*z).BlockHeader.TxnRoot.MsgIsZero()) && ((*z).BlockHeader.TimeStamp == 0) && ((*z).BlockHeader.GenesisID == "") && ((*z).BlockHeader.GenesisHash.MsgIsZero()) && ((*z).BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).BlockHeader.RewardsState.RewardsRate == 0) && ((*z).BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).BlockHeader.TxnCounter == 0) && (len((*z).BlockHeader.CompactCert) == 0) && ((*z).Payset.MsgIsZero())
+ return ((*z).BlockHeader.Round.MsgIsZero()) && ((*z).BlockHeader.Branch.MsgIsZero()) && ((*z).BlockHeader.Seed.MsgIsZero()) && ((*z).BlockHeader.TxnRoot.MsgIsZero()) && ((*z).BlockHeader.TimeStamp == 0) && ((*z).BlockHeader.GenesisID == "") && ((*z).BlockHeader.GenesisHash.MsgIsZero()) && ((*z).BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).BlockHeader.RewardsState.RewardsRate == 0) && ((*z).BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).BlockHeader.TxnCounter == 0) && (len((*z).BlockHeader.CompactCert) == 0) && (len((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).Payset.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
@@ -822,104 +908,108 @@ func (z *BlockHash) MsgIsZero() bool {
func (z *BlockHeader) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0003Len := uint32(23)
- var zb0003Mask uint32 /* 26 bits */
+ zb0004Len := uint32(24)
+ var zb0004Mask uint32 /* 28 bits */
if len((*z).CompactCert) == 0 {
- zb0003Len--
- zb0003Mask |= 0x8
+ zb0004Len--
+ zb0004Mask |= 0x10
}
if (*z).RewardsState.RewardsLevel == 0 {
- zb0003Len--
- zb0003Mask |= 0x10
+ zb0004Len--
+ zb0004Mask |= 0x20
}
if (*z).RewardsState.FeeSink.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20
+ zb0004Len--
+ zb0004Mask |= 0x40
}
if (*z).RewardsState.RewardsResidue == 0 {
- zb0003Len--
- zb0003Mask |= 0x40
+ zb0004Len--
+ zb0004Mask |= 0x80
}
if (*z).GenesisID == "" {
- zb0003Len--
- zb0003Mask |= 0x80
+ zb0004Len--
+ zb0004Mask |= 0x100
}
if (*z).GenesisHash.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x100
+ zb0004Len--
+ zb0004Mask |= 0x200
}
if (*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x200
+ zb0004Len--
+ zb0004Mask |= 0x400
}
if (*z).UpgradeState.NextProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400
+ zb0004Len--
+ zb0004Mask |= 0x800
}
if (*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800
+ zb0004Len--
+ zb0004Mask |= 0x1000
}
if (*z).UpgradeState.NextProtocolApprovals == 0 {
- zb0003Len--
- zb0003Mask |= 0x1000
+ zb0004Len--
+ zb0004Mask |= 0x2000
+ }
+ if len((*z).ParticipationUpdates.ExpiredParticipationAccounts) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x4000
}
if (*z).Branch.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x2000
+ zb0004Len--
+ zb0004Mask |= 0x8000
}
if (*z).UpgradeState.CurrentProtocol.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x4000
+ zb0004Len--
+ zb0004Mask |= 0x10000
}
if (*z).RewardsState.RewardsRate == 0 {
- zb0003Len--
- zb0003Mask |= 0x8000
+ zb0004Len--
+ zb0004Mask |= 0x20000
}
if (*z).Round.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x10000
+ zb0004Len--
+ zb0004Mask |= 0x40000
}
if (*z).RewardsState.RewardsRecalculationRound.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x20000
+ zb0004Len--
+ zb0004Mask |= 0x80000
}
if (*z).RewardsState.RewardsPool.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x40000
+ zb0004Len--
+ zb0004Mask |= 0x100000
}
if (*z).Seed.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x80000
+ zb0004Len--
+ zb0004Mask |= 0x200000
}
if (*z).TxnCounter == 0 {
- zb0003Len--
- zb0003Mask |= 0x100000
+ zb0004Len--
+ zb0004Mask |= 0x400000
}
if (*z).TimeStamp == 0 {
- zb0003Len--
- zb0003Mask |= 0x200000
+ zb0004Len--
+ zb0004Mask |= 0x800000
}
if (*z).TxnRoot.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x400000
+ zb0004Len--
+ zb0004Mask |= 0x1000000
}
if (*z).UpgradeVote.UpgradeDelay.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x800000
+ zb0004Len--
+ zb0004Mask |= 0x2000000
}
if (*z).UpgradeVote.UpgradePropose.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x1000000
+ zb0004Len--
+ zb0004Mask |= 0x4000000
}
if (*z).UpgradeVote.UpgradeApprove == false {
- zb0003Len--
- zb0003Mask |= 0x2000000
+ zb0004Len--
+ zb0004Mask |= 0x8000000
}
- // variable map header, size zb0003Len
- o = msgp.AppendMapHeader(o, zb0003Len)
- if zb0003Len != 0 {
- if (zb0003Mask & 0x8) == 0 { // if not empty
+ // variable map header, size zb0004Len
+ o = msgp.AppendMapHeader(o, zb0004Len)
+ if zb0004Len != 0 {
+ if (zb0004Mask & 0x10) == 0 { // if not empty
// string "cc"
o = append(o, 0xa2, 0x63, 0x63)
if (*z).CompactCert == nil {
@@ -939,112 +1029,124 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
- if (zb0003Mask & 0x10) == 0 { // if not empty
+ if (zb0004Mask & 0x20) == 0 { // if not empty
// string "earn"
o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e)
o = msgp.AppendUint64(o, (*z).RewardsState.RewardsLevel)
}
- if (zb0003Mask & 0x20) == 0 { // if not empty
+ if (zb0004Mask & 0x40) == 0 { // if not empty
// string "fees"
o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73)
o = (*z).RewardsState.FeeSink.MarshalMsg(o)
}
- if (zb0003Mask & 0x40) == 0 { // if not empty
+ if (zb0004Mask & 0x80) == 0 { // if not empty
// string "frac"
o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63)
o = msgp.AppendUint64(o, (*z).RewardsState.RewardsResidue)
}
- if (zb0003Mask & 0x80) == 0 { // if not empty
+ if (zb0004Mask & 0x100) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).GenesisID)
}
- if (zb0003Mask & 0x100) == 0 { // if not empty
+ if (zb0004Mask & 0x200) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).GenesisHash.MarshalMsg(o)
}
- if (zb0003Mask & 0x200) == 0 { // if not empty
+ if (zb0004Mask & 0x400) == 0 { // if not empty
// string "nextbefore"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
o = (*z).UpgradeState.NextProtocolVoteBefore.MarshalMsg(o)
}
- if (zb0003Mask & 0x400) == 0 { // if not empty
+ if (zb0004Mask & 0x800) == 0 { // if not empty
// string "nextproto"
o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).UpgradeState.NextProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x800) == 0 { // if not empty
+ if (zb0004Mask & 0x1000) == 0 { // if not empty
// string "nextswitch"
o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68)
o = (*z).UpgradeState.NextProtocolSwitchOn.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000) == 0 { // if not empty
// string "nextyes"
o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73)
o = msgp.AppendUint64(o, (*z).UpgradeState.NextProtocolApprovals)
}
- if (zb0003Mask & 0x2000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000) == 0 { // if not empty
+ // string "partupdrmv"
+ o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
+ if (*z).ParticipationUpdates.ExpiredParticipationAccounts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).ParticipationUpdates.ExpiredParticipationAccounts)))
+ }
+ for zb0003 := range (*z).ParticipationUpdates.ExpiredParticipationAccounts {
+ o = (*z).ParticipationUpdates.ExpiredParticipationAccounts[zb0003].MarshalMsg(o)
+ }
+ }
+ if (zb0004Mask & 0x8000) == 0 { // if not empty
// string "prev"
o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76)
o = (*z).Branch.MarshalMsg(o)
}
- if (zb0003Mask & 0x4000) == 0 { // if not empty
+ if (zb0004Mask & 0x10000) == 0 { // if not empty
// string "proto"
o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f)
o = (*z).UpgradeState.CurrentProtocol.MarshalMsg(o)
}
- if (zb0003Mask & 0x8000) == 0 { // if not empty
+ if (zb0004Mask & 0x20000) == 0 { // if not empty
// string "rate"
o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65)
o = msgp.AppendUint64(o, (*z).RewardsState.RewardsRate)
}
- if (zb0003Mask & 0x10000) == 0 { // if not empty
+ if (zb0004Mask & 0x40000) == 0 { // if not empty
// string "rnd"
o = append(o, 0xa3, 0x72, 0x6e, 0x64)
o = (*z).Round.MarshalMsg(o)
}
- if (zb0003Mask & 0x20000) == 0 { // if not empty
+ if (zb0004Mask & 0x80000) == 0 { // if not empty
// string "rwcalr"
o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72)
o = (*z).RewardsState.RewardsRecalculationRound.MarshalMsg(o)
}
- if (zb0003Mask & 0x40000) == 0 { // if not empty
+ if (zb0004Mask & 0x100000) == 0 { // if not empty
// string "rwd"
o = append(o, 0xa3, 0x72, 0x77, 0x64)
o = (*z).RewardsState.RewardsPool.MarshalMsg(o)
}
- if (zb0003Mask & 0x80000) == 0 { // if not empty
+ if (zb0004Mask & 0x200000) == 0 { // if not empty
// string "seed"
o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64)
o = (*z).Seed.MarshalMsg(o)
}
- if (zb0003Mask & 0x100000) == 0 { // if not empty
+ if (zb0004Mask & 0x400000) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
o = msgp.AppendUint64(o, (*z).TxnCounter)
}
- if (zb0003Mask & 0x200000) == 0 { // if not empty
+ if (zb0004Mask & 0x800000) == 0 { // if not empty
// string "ts"
o = append(o, 0xa2, 0x74, 0x73)
o = msgp.AppendInt64(o, (*z).TimeStamp)
}
- if (zb0003Mask & 0x400000) == 0 { // if not empty
+ if (zb0004Mask & 0x1000000) == 0 { // if not empty
// string "txn"
o = append(o, 0xa3, 0x74, 0x78, 0x6e)
o = (*z).TxnRoot.MarshalMsg(o)
}
- if (zb0003Mask & 0x800000) == 0 { // if not empty
+ if (zb0004Mask & 0x2000000) == 0 { // if not empty
// string "upgradedelay"
o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79)
o = (*z).UpgradeVote.UpgradeDelay.MarshalMsg(o)
}
- if (zb0003Mask & 0x1000000) == 0 { // if not empty
+ if (zb0004Mask & 0x4000000) == 0 { // if not empty
// string "upgradeprop"
o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70)
o = (*z).UpgradeVote.UpgradePropose.MarshalMsg(o)
}
- if (zb0003Mask & 0x2000000) == 0 { // if not empty
+ if (zb0004Mask & 0x8000000) == 0 { // if not empty
// string "upgradeyes"
o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73)
o = msgp.AppendBool(o, (*z).UpgradeVote.UpgradeApprove)
@@ -1062,214 +1164,214 @@ func (_ *BlockHeader) CanMarshalMsg(z interface{}) bool {
func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Round.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Round")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Branch.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Branch")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).Seed.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Seed")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).TxnRoot.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnRoot")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).TimeStamp, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TimeStamp")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).GenesisHash.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisHash")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).RewardsState.FeeSink.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FeeSink")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).RewardsState.RewardsPool.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsPool")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).RewardsState.RewardsLevel, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsLevel")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).RewardsState.RewardsRate, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRate")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).RewardsState.RewardsResidue, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsResidue")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).RewardsState.RewardsRecalculationRound.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RewardsRecalculationRound")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).UpgradeState.CurrentProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CurrentProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).UpgradeState.NextProtocol.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocol")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).UpgradeState.NextProtocolApprovals, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolApprovals")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).UpgradeState.NextProtocolVoteBefore.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolVoteBefore")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).UpgradeState.NextProtocolSwitchOn.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "NextProtocolSwitchOn")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).UpgradeVote.UpgradePropose.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradePropose")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
bts, err = (*z).UpgradeVote.UpgradeDelay.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeDelay")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).UpgradeVote.UpgradeApprove, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UpgradeApprove")
return
}
}
- if zb0003 > 0 {
- zb0003--
+ if zb0004 > 0 {
+ zb0004--
(*z).TxnCounter, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "TxnCounter")
return
}
}
- if zb0003 > 0 {
- zb0003--
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if zb0004 > 0 {
+ zb0004--
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0005 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(protocol.NumCompactCertTypes))
+ if zb0006 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
return
}
- if zb0006 {
+ if zb0007 {
(*z).CompactCert = nil
} else if (*z).CompactCert == nil {
- (*z).CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0005)
+ (*z).CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0006)
}
- for zb0005 > 0 {
+ for zb0006 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 CompactCertState
- zb0005--
+ zb0006--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CompactCert")
@@ -1283,8 +1385,37 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).CompactCert[zb0001] = zb0002
}
}
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
+ if zb0004 > 0 {
+ zb0004--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0008 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0009 {
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = ((*z).ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else {
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ }
+ for zb0003 := range (*z).ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0004)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -1295,11 +1426,11 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
- if zb0004 {
+ if zb0005 {
(*z) = BlockHeader{}
}
- for zb0003 > 0 {
- zb0003--
+ for zb0004 > 0 {
+ zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -1439,27 +1570,27 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "cc":
- var zb0007 int
- var zb0008 bool
- zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0010 int
+ var zb0011 bool
+ zb0010, zb0011, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0007 > protocol.NumCompactCertTypes {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(protocol.NumCompactCertTypes))
+ if zb0010 > protocol.NumCompactCertTypes {
+ err = msgp.ErrOverflow(uint64(zb0010), uint64(protocol.NumCompactCertTypes))
err = msgp.WrapError(err, "CompactCert")
return
}
- if zb0008 {
+ if zb0011 {
(*z).CompactCert = nil
} else if (*z).CompactCert == nil {
- (*z).CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0007)
+ (*z).CompactCert = make(map[protocol.CompactCertType]CompactCertState, zb0010)
}
- for zb0007 > 0 {
+ for zb0010 > 0 {
var zb0001 protocol.CompactCertType
var zb0002 CompactCertState
- zb0007--
+ zb0010--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "CompactCert")
@@ -1472,6 +1603,33 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
(*z).CompactCert[zb0001] = zb0002
}
+ case "partupdrmv":
+ var zb0012 int
+ var zb0013 bool
+ zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0012 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0013 {
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = nil
+ } else if (*z).ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).ParticipationUpdates.ExpiredParticipationAccounts) >= zb0012 {
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = ((*z).ParticipationUpdates.ExpiredParticipationAccounts)[:zb0012]
+ } else {
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0012)
+ }
+ for zb0003 := range (*z).ParticipationUpdates.ExpiredParticipationAccounts {
+ bts, err = (*z).ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts", zb0003)
+ return
+ }
+ }
default:
err = msgp.ErrNoField(string(field))
if err != nil {
@@ -1500,12 +1658,16 @@ func (z *BlockHeader) Msgsize() (s int) {
s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
}
}
+ s += 11 + msgp.ArrayHeaderSize
+ for zb0003 := range (*z).ParticipationUpdates.ExpiredParticipationAccounts {
+ s += (*z).ParticipationUpdates.ExpiredParticipationAccounts[zb0003].Msgsize()
+ }
return
}
// MsgIsZero returns whether this is a zero value
func (z *BlockHeader) MsgIsZero() bool {
- return ((*z).Round.MsgIsZero()) && ((*z).Branch.MsgIsZero()) && ((*z).Seed.MsgIsZero()) && ((*z).TxnRoot.MsgIsZero()) && ((*z).TimeStamp == 0) && ((*z).GenesisID == "") && ((*z).GenesisHash.MsgIsZero()) && ((*z).RewardsState.FeeSink.MsgIsZero()) && ((*z).RewardsState.RewardsPool.MsgIsZero()) && ((*z).RewardsState.RewardsLevel == 0) && ((*z).RewardsState.RewardsRate == 0) && ((*z).RewardsState.RewardsResidue == 0) && ((*z).RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocolApprovals == 0) && ((*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).UpgradeVote.UpgradeApprove == false) && ((*z).TxnCounter == 0) && (len((*z).CompactCert) == 0)
+ return ((*z).Round.MsgIsZero()) && ((*z).Branch.MsgIsZero()) && ((*z).Seed.MsgIsZero()) && ((*z).TxnRoot.MsgIsZero()) && ((*z).TimeStamp == 0) && ((*z).GenesisID == "") && ((*z).GenesisHash.MsgIsZero()) && ((*z).RewardsState.FeeSink.MsgIsZero()) && ((*z).RewardsState.RewardsPool.MsgIsZero()) && ((*z).RewardsState.RewardsLevel == 0) && ((*z).RewardsState.RewardsRate == 0) && ((*z).RewardsState.RewardsResidue == 0) && ((*z).RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocolApprovals == 0) && ((*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).UpgradeVote.UpgradeApprove == false) && ((*z).TxnCounter == 0) && (len((*z).CompactCert) == 0) && (len((*z).ParticipationUpdates.ExpiredParticipationAccounts) == 0)
}
// MarshalMsg implements msgp.Marshaler
@@ -2132,6 +2294,164 @@ func (z *GenesisAllocation) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *ParticipationUpdates) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0002Len := uint32(1)
+ var zb0002Mask uint8 /* 2 bits */
+ if len((*z).ExpiredParticipationAccounts) == 0 {
+ zb0002Len--
+ zb0002Mask |= 0x2
+ }
+ // variable map header, size zb0002Len
+ o = append(o, 0x80|uint8(zb0002Len))
+ if zb0002Len != 0 {
+ if (zb0002Mask & 0x2) == 0 { // if not empty
+ // string "partupdrmv"
+ o = append(o, 0xaa, 0x70, 0x61, 0x72, 0x74, 0x75, 0x70, 0x64, 0x72, 0x6d, 0x76)
+ if (*z).ExpiredParticipationAccounts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).ExpiredParticipationAccounts)))
+ }
+ for zb0001 := range (*z).ExpiredParticipationAccounts {
+ o = (*z).ExpiredParticipationAccounts[zb0001].MarshalMsg(o)
+ }
+ }
+ }
+ return
+}
+
+func (_ *ParticipationUpdates) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ParticipationUpdates)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *ParticipationUpdates) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0002 int
+ var zb0003 bool
+ zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 > 0 {
+ zb0002--
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0004 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0004), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0005 {
+ (*z).ExpiredParticipationAccounts = nil
+ } else if (*z).ExpiredParticipationAccounts != nil && cap((*z).ExpiredParticipationAccounts) >= zb0004 {
+ (*z).ExpiredParticipationAccounts = ((*z).ExpiredParticipationAccounts)[:zb0004]
+ } else {
+ (*z).ExpiredParticipationAccounts = make([]basics.Address, zb0004)
+ }
+ for zb0001 := range (*z).ExpiredParticipationAccounts {
+ bts, err = (*z).ExpiredParticipationAccounts[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts", zb0001)
+ return
+ }
+ }
+ }
+ if zb0002 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0002)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 {
+ (*z) = ParticipationUpdates{}
+ }
+ for zb0002 > 0 {
+ zb0002--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "partupdrmv":
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0006 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxProposedExpiredOnlineAccounts))
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts")
+ return
+ }
+ if zb0007 {
+ (*z).ExpiredParticipationAccounts = nil
+ } else if (*z).ExpiredParticipationAccounts != nil && cap((*z).ExpiredParticipationAccounts) >= zb0006 {
+ (*z).ExpiredParticipationAccounts = ((*z).ExpiredParticipationAccounts)[:zb0006]
+ } else {
+ (*z).ExpiredParticipationAccounts = make([]basics.Address, zb0006)
+ }
+ for zb0001 := range (*z).ExpiredParticipationAccounts {
+ bts, err = (*z).ExpiredParticipationAccounts[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredParticipationAccounts", zb0001)
+ return
+ }
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *ParticipationUpdates) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ParticipationUpdates)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *ParticipationUpdates) Msgsize() (s int) {
+ s = 1 + 11 + msgp.ArrayHeaderSize
+ for zb0001 := range (*z).ExpiredParticipationAccounts {
+ s += (*z).ExpiredParticipationAccounts[zb0001].Msgsize()
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *ParticipationUpdates) MsgIsZero() bool {
+ return (len((*z).ExpiredParticipationAccounts) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *RewardsState) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
diff --git a/data/bookkeeping/msgp_gen_test.go b/data/bookkeeping/msgp_gen_test.go
index 44ff62e2d..8bad96593 100644
--- a/data/bookkeeping/msgp_gen_test.go
+++ b/data/bookkeeping/msgp_gen_test.go
@@ -312,6 +312,66 @@ func BenchmarkUnmarshalGenesisAllocation(b *testing.B) {
}
}
+func TestMarshalUnmarshalParticipationUpdates(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := ParticipationUpdates{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingParticipationUpdates(t *testing.T) {
+ protocol.RunEncodingTest(t, &ParticipationUpdates{})
+}
+
+func BenchmarkMarshalMsgParticipationUpdates(b *testing.B) {
+ v := ParticipationUpdates{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgParticipationUpdates(b *testing.B) {
+ v := ParticipationUpdates{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalParticipationUpdates(b *testing.B) {
+ v := ParticipationUpdates{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalRewardsState(t *testing.T) {
partitiontest.PartitionTest(t)
v := RewardsState{}
diff --git a/data/committee/committee.go b/data/committee/committee.go
index 2bb5b0bcb..71409b56a 100644
--- a/data/committee/committee.go
+++ b/data/committee/committee.go
@@ -41,7 +41,7 @@ type Selector interface {
// This struct is used to decouple LedgerReader.AccountData from basics.BalanceRecord.
//msgp:ignore BalanceRecord
type BalanceRecord struct {
- basics.AccountData
+ basics.OnlineAccountData
Addr basics.Address
}
diff --git a/data/committee/common_test.go b/data/committee/common_test.go
index 0f1ec6b72..05fba36bd 100644
--- a/data/committee/common_test.go
+++ b/data/committee/common_test.go
@@ -124,7 +124,7 @@ func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward
if !ok {
return false, BalanceRecord{}, Seed{}, basics.MicroAlgos{Raw: 0}
}
- return true, BalanceRecord{Addr: addr, AccountData: data}, seed, total
+ return true, BalanceRecord{Addr: addr, OnlineAccountData: data.OnlineAccountData()}, seed, total
}
selParamsList := func(addrs []basics.Address) (ok bool, records []BalanceRecord, seed Seed, total basics.MicroAlgos) {
diff --git a/data/committee/credential_test.go b/data/committee/credential_test.go
index 2132a8d1f..2160d72e5 100644
--- a/data/committee/credential_test.go
+++ b/data/committee/credential_test.go
@@ -100,7 +100,7 @@ func TestRichAccountSelected(t *testing.T) {
}
TotalMoney := basics.MicroAlgos{Raw: 1 << 50}
- record.MicroAlgos.Raw = TotalMoney.Raw / 2
+ record.MicroAlgosWithRewards.Raw = TotalMoney.Raw / 2
sel := AgreementSelector{
Seed: selectionSeed,
Round: round,
@@ -163,7 +163,7 @@ func TestPoorAccountSelectedLeaders(t *testing.T) {
Step: Propose,
}
- record.MicroAlgos.Raw = uint64(1000 / len(addresses))
+ record.MicroAlgosWithRewards.Raw = uint64(1000 / len(addresses))
m := Membership{
Record: record,
Selector: sel,
@@ -209,7 +209,7 @@ func TestPoorAccountSelectedCommittee(t *testing.T) {
Step: step,
}
- record.MicroAlgos.Raw = uint64(2000 / len(addresses))
+ record.MicroAlgosWithRewards.Raw = uint64(2000 / len(addresses))
m := Membership{
Record: record,
Selector: sel,
@@ -247,7 +247,7 @@ func TestNoMoneyAccountNotSelected(t *testing.T) {
Step: Propose,
}
- record.MicroAlgos.Raw = 0
+ record.MicroAlgosWithRewards.Raw = 0
m := Membership{
Record: record,
Selector: sel,
@@ -272,7 +272,7 @@ func TestLeadersSelected(t *testing.T) {
t.Errorf("can't read selection params")
}
- record.MicroAlgos.Raw = 50000
+ record.MicroAlgosWithRewards.Raw = 50000
totalMoney := basics.MicroAlgos{Raw: 100000}
sel := AgreementSelector{
@@ -304,7 +304,7 @@ func TestCommitteeSelected(t *testing.T) {
t.Errorf("can't read selection params")
}
- record.MicroAlgos.Raw = 50000
+ record.MicroAlgosWithRewards.Raw = 50000
totalMoney := basics.MicroAlgos{Raw: 100000}
sel := AgreementSelector{
@@ -341,7 +341,7 @@ func TestAccountNotSelected(t *testing.T) {
Period: period,
Step: Propose,
}
- record.MicroAlgos.Raw = 0
+ record.MicroAlgosWithRewards.Raw = 0
m := Membership{
Record: record,
Selector: sel,
@@ -384,7 +384,7 @@ func BenchmarkSortition(b *testing.B) {
Step: step,
}
- record.MicroAlgos.Raw = uint64(money[i])
+ record.MicroAlgosWithRewards.Raw = uint64(money[i])
m := Membership{
Record: record,
Selector: sel,
diff --git a/data/datatest/impls.go b/data/datatest/impls.go
index 10e47fed7..30fe35ba9 100644
--- a/data/datatest/impls.go
+++ b/data/datatest/impls.go
@@ -19,7 +19,6 @@ package datatest
import (
"context"
"fmt"
- "time"
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
@@ -54,7 +53,7 @@ type entryFactoryImpl struct {
}
// AssembleBlock implements Ledger.AssembleBlock.
-func (i entryFactoryImpl) AssembleBlock(round basics.Round, deadline time.Time) (agreement.ValidatedBlock, error) {
+func (i entryFactoryImpl) AssembleBlock(round basics.Round) (agreement.ValidatedBlock, error) {
prev, err := i.l.BlockHdr(round - 1)
if err != nil {
return nil, fmt.Errorf("could not make proposals: could not read block from ledger at round %v: %v", round, err)
@@ -101,9 +100,10 @@ func (i ledgerImpl) LookupDigest(r basics.Round) (crypto.Digest, error) {
return crypto.Digest(blockhdr.Hash()), nil
}
-// Lookup implements Ledger.Lookup.
-func (i ledgerImpl) Lookup(r basics.Round, addr basics.Address) (basics.AccountData, error) {
- return i.l.Lookup(r, addr)
+// Lookup implements Ledger.LookupAgreement.
+func (i ledgerImpl) LookupAgreement(r basics.Round, addr basics.Address) (basics.OnlineAccountData, error) {
+ a, err := i.l.LookupAgreement(r, addr)
+ return a, err
}
// Circulation implements Ledger.Circulation.
diff --git a/data/ledger.go b/data/ledger.go
index d652d4c69..0767d2948 100644
--- a/data/ledger.go
+++ b/data/ledger.go
@@ -101,7 +101,7 @@ func LoadLedger(
l := &Ledger{
log: log,
}
- genesisInitState := ledger.InitState{
+ genesisInitState := ledgercore.InitState{
Block: genBlock,
Accounts: genesisBal.Balances,
GenesisHash: genesisHash,
@@ -316,7 +316,7 @@ func (l *Ledger) ConsensusVersion(r basics.Round) (protocol.ConsensusVersion, er
// EnsureValidatedBlock ensures that the block, and associated certificate c, are
// written to the ledger, or that some other block for the same round is
// written to the ledger.
-func (l *Ledger) EnsureValidatedBlock(vb *ledger.ValidatedBlock, c agreement.Certificate) {
+func (l *Ledger) EnsureValidatedBlock(vb *ledgercore.ValidatedBlock, c agreement.Certificate) {
round := vb.Block().Round()
for l.LastRound() < round {
diff --git a/data/ledger_test.go b/data/ledger_test.go
index d2683620d..c49598d3c 100644
--- a/data/ledger_test.go
+++ b/data/ledger_test.go
@@ -37,7 +37,7 @@ import (
var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
-func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion) (genesisInitState ledger.InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) {
+func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion) (genesisInitState ledgercore.InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) {
var poolSecret, sinkSecret *crypto.SignatureSecrets
var seed crypto.Seed
diff --git a/data/pooldata/msgp_gen.go b/data/pooldata/msgp_gen.go
deleted file mode 100644
index 02a11cfa2..000000000
--- a/data/pooldata/msgp_gen.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package pooldata
-
-// Code generated by github.com/algorand/msgp DO NOT EDIT.
-
-import (
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/msgp/msgp"
-)
-
-// The following msgp objects are implemented in this file:
-// SignedTxnSlice
-// |-----> MarshalMsg
-// |-----> CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> Msgsize
-// |-----> MsgIsZero
-//
-
-// MarshalMsg implements msgp.Marshaler
-func (z SignedTxnSlice) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- if z == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len(z)))
- }
- for za0001 := range z {
- o = z[za0001].MarshalMsg(o)
- }
- return
-}
-
-func (_ SignedTxnSlice) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(SignedTxnSlice)
- if !ok {
- _, ok = (z).(*SignedTxnSlice)
- }
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *SignedTxnSlice) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var zb0002 int
- var zb0003 bool
- zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 > config.MaxTxGroupSize {
- err = msgp.ErrOverflow(uint64(zb0002), uint64(config.MaxTxGroupSize))
- err = msgp.WrapError(err)
- return
- }
- if zb0003 {
- (*z) = nil
- } else if (*z) != nil && cap((*z)) >= zb0002 {
- (*z) = (*z)[:zb0002]
- } else {
- (*z) = make(SignedTxnSlice, zb0002)
- }
- for zb0001 := range *z {
- bts, err = (*z)[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, zb0001)
- return
- }
- }
- o = bts
- return
-}
-
-func (_ *SignedTxnSlice) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*SignedTxnSlice)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z SignedTxnSlice) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize
- for za0001 := range z {
- s += z[za0001].Msgsize()
- }
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z SignedTxnSlice) MsgIsZero() bool {
- return len(z) == 0
-}
diff --git a/data/pooldata/signedTxGroup.go b/data/pooldata/signedTxGroup.go
deleted file mode 100644
index fe4e1e24f..000000000
--- a/data/pooldata/signedTxGroup.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package pooldata
-
-import (
- "math"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/protocol"
-)
-
-// SignedTxGroup used as the in-memory representation of a signed transaction group.
-// unlike the plain array of signed transactions, this includes transaction origination and counter
-// used by the transaction pool and the transaction sync
-//msgp:ignore SignedTxGroup
-type SignedTxGroup struct {
- // Transactions contains the signed transactions that are included in this transaction group.
- Transactions SignedTxnSlice
- // LocallyOriginated specify whether the trancation group was inroduced via the REST API or
- // by the transaction sync.
- LocallyOriginated bool
- // GroupCounter is a monotonic increasing counter, that provides an identify for each transaction group.
- // The transaction sync is using it as a way to scan the transactions group list more efficiently, as it
- // can continue scanning the list from the place where it last stopped.
- // GroupCounter is local, assigned when the group is first seen by the local transaction pool.
- GroupCounter uint64
- // GroupTransactionID is the hash of the entire transaction group.
- GroupTransactionID transactions.Txid
- // EncodedLength is the length, in bytes, of the messagepack encoding of all the transaction
- // within this transaction group.
- EncodedLength int
-}
-
-// SignedTxnSlice is a slice of SignedTxn(s), allowing us to
-// easily define the ID() function.
-//msgp:allocbound SignedTxnSlice config.MaxTxGroupSize
-type SignedTxnSlice []transactions.SignedTxn
-
-// ID calculate the hash of the signed transaction group.
-func (s SignedTxnSlice) ID() transactions.Txid {
- enc := s.MarshalMsg(append(protocol.GetEncodingBuf(), []byte(protocol.TxGroup)...))
- defer protocol.PutEncodingBuf(enc)
- return transactions.Txid(crypto.Hash(enc))
-}
-
-// InvalidSignedTxGroupCounter is used to represent an invalid GroupCounter value. It's being used to indicate
-// the absence of an entry within a []SignedTxGroup with a particular GroupCounter value.
-const InvalidSignedTxGroupCounter = uint64(math.MaxUint64)
diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go
index 9477a380d..4295e82ff 100644
--- a/data/pools/transactionPool.go
+++ b/data/pools/transactionPool.go
@@ -28,7 +28,6 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/pooldata"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/ledgercore"
@@ -54,11 +53,6 @@ type TransactionPool struct {
// with atomic operations which require 64 bit alignment on arm.
feePerByte uint64
- // latestMeasuredDataExchangeRate is the average data exchange rate, as measured by the transaction sync.
- // we use the latestMeasuredDataExchangeRate in order to determine the desired proposal size, so that it
- // won't create undesired network bottlenecks.
- latestMeasuredDataExchangeRate uint64
-
// const
logProcessBlockStats bool
logAssembleStats bool
@@ -69,7 +63,7 @@ type TransactionPool struct {
mu deadlock.Mutex
cond sync.Cond
expiredTxCount map[basics.Round]int
- pendingBlockEvaluator *ledger.BlockEvaluator
+ pendingBlockEvaluator BlockEvaluator
numPendingWholeBlocks basics.Round
feeThresholdMultiplier uint64
statusCache *statusCache
@@ -81,31 +75,34 @@ type TransactionPool struct {
assemblyRound basics.Round
assemblyResults poolAsmResults
- // pendingMu protects pendingTxGroups, pendingTxids, pendingCounter and pendingLatestLocal
- pendingMu deadlock.RWMutex
- // pendingTxGroups is a slice of the pending transaction groups.
- pendingTxGroups []pooldata.SignedTxGroup
- // pendingTxids is a map of the pending *transaction ids* included in the pendingTxGroups array.
- pendingTxids map[transactions.Txid]transactions.SignedTxn
- // pendingCounter is a monotomic counter, indicating the next pending transaction group counter value.
- pendingCounter uint64
- // pendingLatestLocal is the value of the last transaction group counter which is associated with a transaction that was
- // locally originated ( i.e. posted to this node via the REST API )
- pendingLatestLocal uint64
+ // pendingMu protects pendingTxGroups and pendingTxids
+ pendingMu deadlock.RWMutex
+ pendingTxGroups [][]transactions.SignedTxn
+ pendingTxids map[transactions.Txid]transactions.SignedTxn
// Calls to remember() add transactions to rememberedTxGroups and
// rememberedTxids. Calling rememberCommit() adds them to the
// pendingTxGroups and pendingTxids. This allows us to batch the
// changes in OnNewBlock() without preventing a concurrent call
- // to PendingTxGroups().
- rememberedTxGroups []pooldata.SignedTxGroup
+ // to PendingTxGroups() or Verified().
+ rememberedTxGroups [][]transactions.SignedTxn
rememberedTxids map[transactions.Txid]transactions.SignedTxn
- // rememberedLatestLocal is the value of the last transaction group counter which is associated with a transaction that was
- // locally originated ( i.e. posted to this node via the REST API ). This variable is used when OnNewBlock is called and
- // we filter out the pending transaction through the evaluator.
- rememberedLatestLocal uint64
log logging.Logger
+
+ // proposalAssemblyTime is the ProposalAssemblyTime configured for this node.
+ proposalAssemblyTime time.Duration
+}
+
+// BlockEvaluator defines the block evaluator interface exposed by the ledger package.
+type BlockEvaluator interface {
+ TestTransactionGroup(txgroup []transactions.SignedTxn) error
+ Round() basics.Round
+ PaySetSize() int
+ TransactionGroup(txads []transactions.SignedTxnWithAD) error
+ Transaction(txn transactions.SignedTxn, ad transactions.ApplyData) error
+ GenerateBlock() (*ledgercore.ValidatedBlock, error)
+ ResetTxnBytes()
}
// MakeTransactionPool makes a transaction pool.
@@ -123,6 +120,7 @@ func MakeTransactionPool(ledger *ledger.Ledger, cfg config.Local, log logging.Lo
logAssembleStats: cfg.EnableAssembleStats,
expFeeFactor: cfg.TxPoolExponentialIncreaseFactor,
txPoolMaxSize: cfg.TxPoolSize,
+ proposalAssemblyTime: cfg.ProposalAssemblyTime,
log: log,
}
pool.cond.L = &pool.mu
@@ -137,7 +135,7 @@ type poolAsmResults struct {
// the ok variable indicates whether the assembly for the block roundStartedEvaluating was complete ( i.e. ok == true ) or
// whether it's still in-progress.
ok bool
- blk *ledger.ValidatedBlock
+ blk *ledgercore.ValidatedBlock
stats telemetryspec.AssembleBlockMetrics
err error
// roundStartedEvaluating is the round which we were attempted to evaluate last. It's a good measure for
@@ -164,14 +162,6 @@ const (
// duration it would take to execute the GenerateBlock() function
generateBlockBaseDuration = 2 * time.Millisecond
generateBlockTransactionDuration = 2155 * time.Nanosecond
-
- // minMaxTxnBytesPerBlock is the minimal maximum block size that the evaluator would be asked to create, in case
- // the local node doesn't have sufficient bandwidth to support higher throughputs.
- // for example: a node that has a very low bandwidth of 10KB/s. If we will follow the block size calculations, we
- // would get to an unrealistic block size of 20KB. This could be due to a temporary network bandwidth fluctuations
- // or other measuring issue. In order to ensure we have some more realistic block sizes to
- // work with, we clamp the block size to the range of [minMaxTxnBytesPerBlock .. proto.MaxTxnBytesPerBlock].
- minMaxTxnBytesPerBlock = 100 * 1024
)
// ErrStaleBlockAssemblyRequest returned by AssembleBlock when requested block number is older than the current transaction pool round
@@ -180,9 +170,11 @@ var ErrStaleBlockAssemblyRequest = fmt.Errorf("AssembleBlock: requested block as
// Reset resets the content of the transaction pool
func (pool *TransactionPool) Reset() {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+ defer pool.cond.Broadcast()
pool.pendingTxids = make(map[transactions.Txid]transactions.SignedTxn)
pool.pendingTxGroups = nil
- pool.pendingLatestLocal = pooldata.InvalidSignedTxGroupCounter
pool.rememberedTxids = make(map[transactions.Txid]transactions.SignedTxn)
pool.rememberedTxGroups = nil
pool.expiredTxCount = make(map[basics.Round]int)
@@ -216,15 +208,14 @@ func (pool *TransactionPool) PendingTxIDs() []transactions.Txid {
}
// PendingTxGroups returns a list of transaction groups that should be proposed
-// in the next block, in order. As the second return value, it returns the transaction
-// group counter of the latest local generated transaction group.
-func (pool *TransactionPool) PendingTxGroups() ([]pooldata.SignedTxGroup, uint64) {
+// in the next block, in order.
+func (pool *TransactionPool) PendingTxGroups() [][]transactions.SignedTxn {
pool.pendingMu.RLock()
defer pool.pendingMu.RUnlock()
// note that this operation is safe for the sole reason that arrays in go are immutable.
// if the underlaying array need to be expanded, the actual underlaying array would need
// to be reallocated.
- return pool.pendingTxGroups, pool.pendingLatestLocal
+ return pool.pendingTxGroups
}
// pendingTxIDsCount returns the number of pending transaction ids that are still waiting
@@ -248,26 +239,8 @@ func (pool *TransactionPool) rememberCommit(flush bool) {
if flush {
pool.pendingTxGroups = pool.rememberedTxGroups
pool.pendingTxids = pool.rememberedTxids
- pool.pendingLatestLocal = pool.rememberedLatestLocal
pool.ledger.VerifiedTransactionCache().UpdatePinned(pool.pendingTxids)
} else {
- // update the GroupCounter on all the transaction groups we're going to add.
- // this would ensure that each transaction group has a unique monotonic GroupCounter
- encodingBuf := protocol.GetEncodingBuf()
- for i, txGroup := range pool.rememberedTxGroups {
- pool.pendingCounter++
- txGroup.GroupCounter = pool.pendingCounter
- txGroup.EncodedLength = 0
- for _, txn := range txGroup.Transactions {
- encodingBuf = encodingBuf[:0]
- txGroup.EncodedLength += len(txn.MarshalMsg(encodingBuf))
- }
- pool.rememberedTxGroups[i] = txGroup
- if txGroup.LocallyOriginated {
- pool.pendingLatestLocal = txGroup.GroupCounter
- }
- }
- protocol.PutEncodingBuf(encodingBuf)
pool.pendingTxGroups = append(pool.pendingTxGroups, pool.rememberedTxGroups...)
for txid, txn := range pool.rememberedTxids {
@@ -275,15 +248,8 @@ func (pool *TransactionPool) rememberCommit(flush bool) {
}
}
- pool.resetRememberedTransactionGroups()
-}
-
-// resetRememberedTransactionGroups clears the remembered transaction groups.
-// The caller is assumed to be holding pool.mu.
-func (pool *TransactionPool) resetRememberedTransactionGroups() {
pool.rememberedTxGroups = nil
pool.rememberedTxids = make(map[transactions.Txid]transactions.SignedTxn)
- pool.rememberedLatestLocal = pooldata.InvalidSignedTxGroupCounter
}
// PendingCount returns the number of transactions currently pending in the pool.
@@ -298,7 +264,7 @@ func (pool *TransactionPool) PendingCount() int {
func (pool *TransactionPool) pendingCountNoLock() int {
var count int
for _, txgroup := range pool.pendingTxGroups {
- count += len(txgroup.Transactions)
+ count += len(txgroup)
}
return count
}
@@ -362,12 +328,12 @@ func (pool *TransactionPool) computeFeePerByte() uint64 {
// checkSufficientFee take a set of signed transactions and verifies that each transaction has
// sufficient fee to get into the transaction pool
-func (pool *TransactionPool) checkSufficientFee(txgroup pooldata.SignedTxGroup) error {
+func (pool *TransactionPool) checkSufficientFee(txgroup []transactions.SignedTxn) error {
// Special case: the compact cert transaction, if issued from the
// special compact-cert-sender address, in a singleton group, pays
// no fee.
- if len(txgroup.Transactions) == 1 {
- t := txgroup.Transactions[0].Txn
+ if len(txgroup) == 1 {
+ t := txgroup[0].Txn
if t.Type == protocol.CompactCertTx && t.Sender == transactions.CompactCertSender && t.Fee.IsZero() {
return nil
}
@@ -376,7 +342,7 @@ func (pool *TransactionPool) checkSufficientFee(txgroup pooldata.SignedTxGroup)
// get the current fee per byte
feePerByte := pool.computeFeePerByte()
- for _, t := range txgroup.Transactions {
+ for _, t := range txgroup {
feeThreshold := feePerByte * uint64(t.GetEncodedLength())
if t.Txn.Fee.Raw < feeThreshold {
return fmt.Errorf("fee %d below threshold %d (%d per byte * %d bytes)",
@@ -410,7 +376,7 @@ type poolIngestParams struct {
}
// remember attempts to add a transaction group to the pool.
-func (pool *TransactionPool) remember(txgroup pooldata.SignedTxGroup) error {
+func (pool *TransactionPool) remember(txgroup []transactions.SignedTxn) error {
params := poolIngestParams{
recomputing: false,
}
@@ -419,7 +385,7 @@ func (pool *TransactionPool) remember(txgroup pooldata.SignedTxGroup) error {
// add tries to add the transaction group to the pool, bypassing the fee
// priority checks.
-func (pool *TransactionPool) add(txgroup pooldata.SignedTxGroup, stats *telemetryspec.AssembleBlockMetrics) error {
+func (pool *TransactionPool) add(txgroup []transactions.SignedTxn, stats *telemetryspec.AssembleBlockMetrics) error {
params := poolIngestParams{
recomputing: true,
stats: stats,
@@ -432,7 +398,7 @@ func (pool *TransactionPool) add(txgroup pooldata.SignedTxGroup, stats *telemetr
//
// ingest assumes that pool.mu is locked. It might release the lock
// while it waits for OnNewBlock() to be called.
-func (pool *TransactionPool) ingest(txgroup pooldata.SignedTxGroup, params poolIngestParams) error {
+func (pool *TransactionPool) ingest(txgroup []transactions.SignedTxn, params poolIngestParams) error {
if pool.pendingBlockEvaluator == nil {
return fmt.Errorf("TransactionPool.ingest: no pending block evaluator")
}
@@ -454,10 +420,6 @@ func (pool *TransactionPool) ingest(txgroup pooldata.SignedTxGroup, params poolI
if err != nil {
return err
}
-
- // since this is the first time the transaction was added to the transaction pool, it would
- // be a good time now to figure the group's ID.
- txgroup.GroupTransactionID = txgroup.Transactions.ID()
}
err := pool.addToPendingBlockEvaluator(txgroup, params.recomputing, params.stats)
@@ -466,19 +428,22 @@ func (pool *TransactionPool) ingest(txgroup pooldata.SignedTxGroup, params poolI
}
pool.rememberedTxGroups = append(pool.rememberedTxGroups, txgroup)
- for _, t := range txgroup.Transactions {
+ for _, t := range txgroup {
pool.rememberedTxids[t.ID()] = t
}
-
return nil
}
+// RememberOne stores the provided transaction.
+// Precondition: Only RememberOne() properly-signed and well-formed transactions (i.e., ensure t.WellFormed())
+func (pool *TransactionPool) RememberOne(t transactions.SignedTxn) error {
+ return pool.Remember([]transactions.SignedTxn{t})
+}
+
// Remember stores the provided transaction group.
// Precondition: Only Remember() properly-signed and well-formed transactions (i.e., ensure t.WellFormed())
-// The function is called by the transaction handler ( i.e. txsync or gossip ) or by the node when
-// transaction is coming from a REST API call.
-func (pool *TransactionPool) Remember(txgroup pooldata.SignedTxGroup) error {
- if err := pool.checkPendingQueueSize(len(txgroup.Transactions)); err != nil {
+func (pool *TransactionPool) Remember(txgroup []transactions.SignedTxn) error {
+ if err := pool.checkPendingQueueSize(len(txgroup)); err != nil {
return err
}
@@ -494,34 +459,6 @@ func (pool *TransactionPool) Remember(txgroup pooldata.SignedTxGroup) error {
return nil
}
-// RememberArray stores the provided transaction group.
-// Precondition: Only RememberArray() properly-signed and well-formed transactions (i.e., ensure t.WellFormed())
-// The function is called by the transaction handler ( i.e. txsync )
-func (pool *TransactionPool) RememberArray(txgroups []pooldata.SignedTxGroup) error {
- totalSize := 0
- for _, txGroup := range txgroups {
- totalSize += len(txGroup.Transactions)
- }
- if err := pool.checkPendingQueueSize(totalSize); err != nil {
- return err
- }
-
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- for _, txGroup := range txgroups {
- err := pool.remember(txGroup)
- if err != nil {
- // we need to explicitly clear the remembered transaction groups here, since we might have added the first one successfully and then failing on the second one.
- pool.resetRememberedTransactionGroups()
- return fmt.Errorf("TransactionPool.RememberArray: %w", err)
- }
- }
-
- pool.rememberCommit(false)
- return nil
-}
-
// Lookup returns the error associated with a transaction that used
// to be in the pool. If no status information is available (e.g., because
// it was too long ago, or the transaction committed successfully), then
@@ -625,9 +562,9 @@ func (pool *TransactionPool) isAssemblyTimedOut() bool {
return time.Now().After(pool.assemblyDeadline.Add(-generateBlockDuration))
}
-func (pool *TransactionPool) addToPendingBlockEvaluatorOnce(txgroup pooldata.SignedTxGroup, recomputing bool, stats *telemetryspec.AssembleBlockMetrics) error {
+func (pool *TransactionPool) addToPendingBlockEvaluatorOnce(txgroup []transactions.SignedTxn, recomputing bool, stats *telemetryspec.AssembleBlockMetrics) error {
r := pool.pendingBlockEvaluator.Round() + pool.numPendingWholeBlocks
- for _, tx := range txgroup.Transactions {
+ for _, tx := range txgroup {
if tx.Txn.LastValid < r {
return transactions.TxnDeadError{
Round: r,
@@ -637,7 +574,7 @@ func (pool *TransactionPool) addToPendingBlockEvaluatorOnce(txgroup pooldata.Sig
}
}
- txgroupad := transactions.WrapSignedTxnsWithAD(txgroup.Transactions)
+ txgroupad := transactions.WrapSignedTxnsWithAD(txgroup)
transactionGroupStartsTime := time.Time{}
if recomputing {
@@ -660,10 +597,10 @@ func (pool *TransactionPool) addToPendingBlockEvaluatorOnce(txgroup pooldata.Sig
stats.StopReason = telemetryspec.AssembleBlockAbandon
pool.assemblyResults.stats = *stats
pool.assemblyCond.Broadcast()
- } else if err == ledger.ErrNoSpace || pool.isAssemblyTimedOut() {
+ } else if err == ledgercore.ErrNoSpace || pool.isAssemblyTimedOut() {
pool.assemblyResults.ok = true
pool.assemblyResults.assemblyCompletedOrAbandoned = true
- if err == ledger.ErrNoSpace {
+ if err == ledgercore.ErrNoSpace {
stats.StopReason = telemetryspec.AssembleBlockFull
} else {
stats.StopReason = telemetryspec.AssembleBlockTimeout
@@ -690,9 +627,9 @@ func (pool *TransactionPool) addToPendingBlockEvaluatorOnce(txgroup pooldata.Sig
return err
}
-func (pool *TransactionPool) addToPendingBlockEvaluator(txgroup pooldata.SignedTxGroup, recomputing bool, stats *telemetryspec.AssembleBlockMetrics) error {
+func (pool *TransactionPool) addToPendingBlockEvaluator(txgroup []transactions.SignedTxn, recomputing bool, stats *telemetryspec.AssembleBlockMetrics) error {
err := pool.addToPendingBlockEvaluatorOnce(txgroup, recomputing, stats)
- if err == ledger.ErrNoSpace {
+ if err == ledgercore.ErrNoSpace {
pool.numPendingWholeBlocks++
pool.pendingBlockEvaluator.ResetTxnBytes()
err = pool.addToPendingBlockEvaluatorOnce(txgroup, recomputing, stats)
@@ -747,8 +684,12 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact
if hint < 0 || int(knownCommitted) < 0 {
hint = 0
}
- pool.pendingBlockEvaluator, err = pool.ledger.StartEvaluator(next.BlockHeader, hint, pool.calculateMaxTxnBytesPerBlock(next.BlockHeader.CurrentProtocol))
+ pool.pendingBlockEvaluator, err = pool.ledger.StartEvaluator(next.BlockHeader, hint, 0)
if err != nil {
+ // The pendingBlockEvaluator is an interface, and in case of an evaluator error
+ // we want to remove the interface itself rather then keeping an interface
+ // to a nil.
+ pool.pendingBlockEvaluator = nil
var nonSeqBlockEval ledgercore.ErrNonSequentialBlockEval
if errors.As(err, &nonSeqBlockEval) {
if nonSeqBlockEval.EvaluatorRound <= nonSeqBlockEval.LatestRound {
@@ -768,17 +709,17 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact
// Feed the transactions in order
for _, txgroup := range txgroups {
- if len(txgroup.Transactions) == 0 {
+ if len(txgroup) == 0 {
asmStats.InvalidCount++
continue
}
- if _, alreadyCommitted := committedTxIds[txgroup.Transactions[0].ID()]; alreadyCommitted {
+ if _, alreadyCommitted := committedTxIds[txgroup[0].ID()]; alreadyCommitted {
asmStats.EarlyCommittedCount++
continue
}
err := pool.add(txgroup, &asmStats)
if err != nil {
- for _, tx := range txgroup.Transactions {
+ for _, tx := range txgroup {
pool.statusCache.put(tx, err.Error())
}
@@ -798,8 +739,6 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact
stats.RemovedInvalidCount++
pool.log.Warnf("Cannot re-add pending transaction to pool: %v", err)
}
- } else if txgroup.LocallyOriginated {
- pool.rememberedLatestLocal = txgroup.GroupCounter
}
}
@@ -809,7 +748,7 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact
// assembly. We want to figure out how long have we spent before trying to evaluate the first transaction.
// ( ideally it's near zero. The goal here is to see if we get to a near time-out situation before processing the
// first transaction group )
- asmStats.TransactionsLoopStartTime = int64(firstTxnGrpTime.Sub(pool.assemblyDeadline.Add(-config.ProposalAssemblyTime)))
+ asmStats.TransactionsLoopStartTime = int64(firstTxnGrpTime.Sub(pool.assemblyDeadline.Add(-pool.proposalAssemblyTime)))
}
if !pool.assemblyResults.ok && pool.assemblyRound <= pool.pendingBlockEvaluator.Round() {
@@ -834,7 +773,7 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact
// AssembleBlock assembles a block for a given round, trying not to
// take longer than deadline to finish.
-func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Time) (assembled *ledger.ValidatedBlock, err error) {
+func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Time) (assembled *ledgercore.ValidatedBlock, err error) {
var stats telemetryspec.AssembleBlockMetrics
if pool.logAssembleStats {
@@ -975,7 +914,7 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim
// assembleEmptyBlock construct a new block for the given round. Internally it's using the ledger database calls, so callers
// need to be aware that it might take a while before it would return.
-func (pool *TransactionPool) assembleEmptyBlock(round basics.Round) (assembled *ledger.ValidatedBlock, err error) {
+func (pool *TransactionPool) assembleEmptyBlock(round basics.Round) (assembled *ledgercore.ValidatedBlock, err error) {
prevRound := round - 1
prev, err := pool.ledger.BlockHdr(prevRound)
if err != nil {
@@ -983,7 +922,7 @@ func (pool *TransactionPool) assembleEmptyBlock(round basics.Round) (assembled *
return nil, err
}
next := bookkeeping.MakeBlock(prev)
- blockEval, err := pool.ledger.StartEvaluator(next.BlockHeader, 0, pool.calculateMaxTxnBytesPerBlock(next.BlockHeader.CurrentProtocol))
+ blockEval, err := pool.ledger.StartEvaluator(next.BlockHeader, 0, 0)
if err != nil {
var nonSeqBlockEval ledgercore.ErrNonSequentialBlockEval
if errors.As(err, &nonSeqBlockEval) {
@@ -999,48 +938,8 @@ func (pool *TransactionPool) assembleEmptyBlock(round basics.Round) (assembled *
return blockEval.GenerateBlock()
}
-// SetDataExchangeRate updates the data exchange rate this node is expected to have.
-func (pool *TransactionPool) SetDataExchangeRate(dataExchangeRate uint64) {
- atomic.StoreUint64(&pool.latestMeasuredDataExchangeRate, dataExchangeRate)
-}
-
-// calculateMaxTxnBytesPerBlock computes the optimal block size for the current node, based
-// on it's effective network capabilities. This number is bound by the protocol MaxTxnBytesPerBlock.
-func (pool *TransactionPool) calculateMaxTxnBytesPerBlock(consensusVersion protocol.ConsensusVersion) int {
- // get the latest data exchange rate we received from the transaction sync.
- dataExchangeRate := atomic.LoadUint64(&pool.latestMeasuredDataExchangeRate)
-
- // if we never received an update from the transaction sync connector about the data exchange rate,
- // just let the evaluator use the consensus's default value.
- if dataExchangeRate == 0 {
- return 0
- }
-
- // get the consensus parameters for the given consensus version.
- proto, ok := config.Consensus[consensusVersion]
- if !ok {
- // if we can't figure out the consensus version, just return 0.
- return 0
- }
-
- // calculate the amount of data we can send in half of the agreement period.
- halfMaxBlockSize := int(time.Duration(dataExchangeRate)*proto.AgreementFilterTimeoutPeriod0/time.Second) / 2
-
- // if the amount of data is too high, bound it by the consensus parameters.
- if halfMaxBlockSize > proto.MaxTxnBytesPerBlock {
- return proto.MaxTxnBytesPerBlock
- }
-
- // if the amount of data is too low, use the low transaction bytes threshold.
- if halfMaxBlockSize < minMaxTxnBytesPerBlock {
- return minMaxTxnBytesPerBlock
- }
-
- return halfMaxBlockSize
-}
-
// AssembleDevModeBlock assemble a new block from the existing transaction pool. The pending evaluator is being
-func (pool *TransactionPool) AssembleDevModeBlock() (assembled *ledger.ValidatedBlock, err error) {
+func (pool *TransactionPool) AssembleDevModeBlock() (assembled *ledgercore.ValidatedBlock, err error) {
pool.mu.Lock()
defer pool.mu.Unlock()
@@ -1049,6 +948,6 @@ func (pool *TransactionPool) AssembleDevModeBlock() (assembled *ledger.Validated
// The above was already pregenerating the entire block,
// so there won't be any waiting on this call.
- assembled, err = pool.AssembleBlock(pool.pendingBlockEvaluator.Round(), time.Now().Add(config.ProposalAssemblyTime))
+ assembled, err = pool.AssembleBlock(pool.pendingBlockEvaluator.Round(), time.Now().Add(pool.proposalAssemblyTime))
return
}
diff --git a/data/pools/transactionPool_test.go b/data/pools/transactionPool_test.go
index f9ddb4b00..4e65333b6 100644
--- a/data/pools/transactionPool_test.go
+++ b/data/pools/transactionPool_test.go
@@ -29,7 +29,6 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/pooldata"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger"
@@ -56,15 +55,6 @@ type TestingT interface {
var minBalance = config.Consensus[protocol.ConsensusCurrentVersion].MinBalance
-// RememberOne stores the provided transaction.
-// Precondition: Only RememberOne() properly-signed and well-formed transactions (i.e., ensure t.WellFormed())
-func (pool *TransactionPool) RememberOne(t transactions.SignedTxn) error {
- txgroup := pooldata.SignedTxGroup{
- Transactions: []transactions.SignedTxn{t},
- }
- return pool.Remember(txgroup)
-}
-
func mockLedger(t TestingT, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) *ledger.Ledger {
var hash crypto.Digest
crypto.RandBytes(hash[:])
@@ -95,9 +85,9 @@ func mockLedger(t TestingT, initAccounts map[basics.Address]basics.AccountData,
fn := fmt.Sprintf("/tmp/%s.%d.sqlite3", t.Name(), crypto.RandUint64())
const inMem = true
- genesisInitState := ledger.InitState{Block: initBlock, Accounts: initAccounts, GenesisHash: hash}
+ genesisInitState := ledgercore.InitState{Block: initBlock, Accounts: initAccounts, GenesisHash: hash}
cfg := config.GetDefaultLocal()
- cfg.Archival = false
+ cfg.Archival = true
l, err := ledger.OpenLedger(logging.Base(), fn, true, genesisInitState, cfg)
require.NoError(t, err)
return l
@@ -111,7 +101,7 @@ func makeMockLedgerFuture(t TestingT, initAccounts map[basics.Address]basics.Acc
return mockLedger(t, initAccounts, protocol.ConsensusFuture)
}
-func newBlockEvaluator(t TestingT, l *ledger.Ledger) *ledger.BlockEvaluator {
+func newBlockEvaluator(t TestingT, l *ledger.Ledger) BlockEvaluator {
latest := l.Latest()
prev, err := l.BlockHdr(latest)
require.NoError(t, err)
@@ -568,7 +558,7 @@ func TestRememberForget(t *testing.T) {
}
}
- pending, _ := transactionPool.PendingTxGroups()
+ pending := transactionPool.PendingTxGroups()
numberOfTxns := numOfAccounts*numOfAccounts - numOfAccounts
require.Len(t, pending, numberOfTxns)
@@ -579,7 +569,7 @@ func TestRememberForget(t *testing.T) {
require.NoError(t, err)
transactionPool.OnNewBlock(blk.Block(), ledgercore.StateDelta{})
- pending, _ = transactionPool.PendingTxGroups()
+ pending = transactionPool.PendingTxGroups()
require.Len(t, pending, 0)
}
@@ -644,7 +634,7 @@ func TestCleanUp(t *testing.T) {
transactionPool.OnNewBlock(blk.Block(), ledgercore.StateDelta{})
}
- pending, _ := transactionPool.PendingTxGroups()
+ pending := transactionPool.PendingTxGroups()
require.Zero(t, len(pending))
require.Zero(t, transactionPool.NumExpired(4))
require.Equal(t, issuedTransactions, transactionPool.NumExpired(5))
@@ -718,7 +708,7 @@ func TestFixOverflowOnNewBlock(t *testing.T) {
}
}
}
- pending, _ := transactionPool.PendingTxGroups()
+ pending := transactionPool.PendingTxGroups()
require.Len(t, pending, savedTransactions)
secret := keypair()
@@ -754,7 +744,7 @@ func TestFixOverflowOnNewBlock(t *testing.T) {
transactionPool.OnNewBlock(block.Block(), ledgercore.StateDelta{})
- pending, _ = transactionPool.PendingTxGroups()
+ pending = transactionPool.PendingTxGroups()
// only one transaction is missing
require.Len(t, pending, savedTransactions-1)
}
@@ -862,15 +852,7 @@ func TestRemove(t *testing.T) {
}
signedTx := tx.Sign(secrets[0])
require.NoError(t, transactionPool.RememberOne(signedTx))
- pendingTxGroups, _ := transactionPool.PendingTxGroups()
- require.Equal(t, []pooldata.SignedTxGroup{
- {
- Transactions: []transactions.SignedTxn{signedTx},
- GroupCounter: 1,
- GroupTransactionID: (pooldata.SignedTxnSlice{signedTx}).ID(),
- EncodedLength: len(signedTx.MarshalMsg([]byte{})),
- },
- }, pendingTxGroups)
+ require.Equal(t, transactionPool.PendingTxGroups(), [][]transactions.SignedTxn{{signedTx}})
}
func TestLogicSigOK(t *testing.T) {
@@ -1185,7 +1167,7 @@ func BenchmarkTransactionPoolSteadyState(b *testing.B) {
for len(ledgerTxnQueue) > 0 {
stx := ledgerTxnQueue[0]
err := eval.Transaction(stx, transactions.ApplyData{})
- if err == ledger.ErrNoSpace {
+ if err == ledgercore.ErrNoSpace {
break
}
require.NoError(b, err)
@@ -1256,7 +1238,7 @@ func TestTxPoolSizeLimits(t *testing.T) {
}
for groupSize := config.Consensus[protocol.ConsensusCurrentVersion].MaxTxGroupSize; groupSize > 0; groupSize-- {
- var txgroup pooldata.SignedTxGroup
+ var txgroup []transactions.SignedTxn
// fill the transaction group with groupSize transactions.
for i := 0; i < groupSize; i++ {
tx := transactions.Transaction{
@@ -1275,7 +1257,7 @@ func TestTxPoolSizeLimits(t *testing.T) {
},
}
signedTx := tx.Sign(secrets[0])
- txgroup.Transactions = append(txgroup.Transactions, signedTx)
+ txgroup = append(txgroup, signedTx)
uniqueTxID++
}
@@ -1285,7 +1267,7 @@ func TestTxPoolSizeLimits(t *testing.T) {
if groupSize > 1 {
// add a single transaction and ensure we succeed
// consume the transaction of allowed limit
- require.NoError(t, transactionPool.RememberOne(txgroup.Transactions[0]))
+ require.NoError(t, transactionPool.RememberOne(txgroup[0]))
}
}
}
diff --git a/data/transactions/application.go b/data/transactions/application.go
index 1faef7998..4588ed22f 100644
--- a/data/transactions/application.go
+++ b/data/transactions/application.go
@@ -23,29 +23,29 @@ import (
)
const (
- // EncodedMaxApplicationArgs sets the allocation bound for the maximum
+ // encodedMaxApplicationArgs sets the allocation bound for the maximum
// number of ApplicationArgs that a transaction decoded off of the wire
// can contain. Its value is verified against consensus parameters in
// TestEncodedAppTxnAllocationBounds
- EncodedMaxApplicationArgs = 32
+ encodedMaxApplicationArgs = 32
- // EncodedMaxAccounts sets the allocation bound for the maximum number
+ // encodedMaxAccounts sets the allocation bound for the maximum number
// of Accounts that a transaction decoded off of the wire can contain.
// Its value is verified against consensus parameters in
// TestEncodedAppTxnAllocationBounds
- EncodedMaxAccounts = 32
+ encodedMaxAccounts = 32
- // EncodedMaxForeignApps sets the allocation bound for the maximum
+ // encodedMaxForeignApps sets the allocation bound for the maximum
// number of ForeignApps that a transaction decoded off of the wire can
// contain. Its value is verified against consensus parameters in
// TestEncodedAppTxnAllocationBounds
- EncodedMaxForeignApps = 32
+ encodedMaxForeignApps = 32
- // EncodedMaxForeignAssets sets the allocation bound for the maximum
+ // encodedMaxForeignAssets sets the allocation bound for the maximum
// number of ForeignAssets that a transaction decoded off of the wire
// can contain. Its value is verified against consensus parameters in
// TestEncodedAppTxnAllocationBounds
- EncodedMaxForeignAssets = 32
+ encodedMaxForeignAssets = 32
)
// OnCompletion is an enum representing some layer 1 side effect that an
@@ -100,7 +100,7 @@ type ApplicationCallTxnFields struct {
// ApplicationArgs are arguments accessible to the executing
// ApprovalProgram or ClearStateProgram.
- ApplicationArgs [][]byte `codec:"apaa,allocbound=EncodedMaxApplicationArgs"`
+ ApplicationArgs [][]byte `codec:"apaa,allocbound=encodedMaxApplicationArgs"`
// Accounts are accounts whose balance records are accessible
// by the executing ApprovalProgram or ClearStateProgram. To
@@ -108,17 +108,17 @@ type ApplicationCallTxnFields struct {
// the sender, that account's address must be listed here (and
// since v4, the ForeignApp or ForeignAsset must also include
// the app or asset id).
- Accounts []basics.Address `codec:"apat,allocbound=EncodedMaxAccounts"`
+ Accounts []basics.Address `codec:"apat,allocbound=encodedMaxAccounts"`
// ForeignApps are application IDs for applications besides
// this one whose GlobalState (or Local, since v4) may be read
// by the executing ApprovalProgram or ClearStateProgram.
- ForeignApps []basics.AppIndex `codec:"apfa,allocbound=EncodedMaxForeignApps"`
+ ForeignApps []basics.AppIndex `codec:"apfa,allocbound=encodedMaxForeignApps"`
// ForeignAssets are asset IDs for assets whose AssetParams
// (and since v4, Holdings) may be read by the executing
// ApprovalProgram or ClearStateProgram.
- ForeignAssets []basics.AssetIndex `codec:"apas,allocbound=EncodedMaxForeignAssets"`
+ ForeignAssets []basics.AssetIndex `codec:"apas,allocbound=encodedMaxForeignAssets"`
// LocalStateSchema specifies the maximum number of each type that may
// appear in the local key/value store of users who opt in to this
diff --git a/data/transactions/application_test.go b/data/transactions/application_test.go
index 28a48e4c4..5d50d7126 100644
--- a/data/transactions/application_test.go
+++ b/data/transactions/application_test.go
@@ -103,16 +103,16 @@ func TestEncodedAppTxnAllocationBounds(t *testing.T) {
// ensure that all the supported protocols have value limits less or
// equal to their corresponding codec allocbounds
for protoVer, proto := range config.Consensus {
- if proto.MaxAppArgs > EncodedMaxApplicationArgs {
+ if proto.MaxAppArgs > encodedMaxApplicationArgs {
require.Failf(t, "proto.MaxAppArgs > encodedMaxApplicationArgs", "protocol version = %s", protoVer)
}
- if proto.MaxAppTxnAccounts > EncodedMaxAccounts {
+ if proto.MaxAppTxnAccounts > encodedMaxAccounts {
require.Failf(t, "proto.MaxAppTxnAccounts > encodedMaxAccounts", "protocol version = %s", protoVer)
}
- if proto.MaxAppTxnForeignApps > EncodedMaxForeignApps {
+ if proto.MaxAppTxnForeignApps > encodedMaxForeignApps {
require.Failf(t, "proto.MaxAppTxnForeignApps > encodedMaxForeignApps", "protocol version = %s", protoVer)
}
- if proto.MaxAppTxnForeignAssets > EncodedMaxForeignAssets {
+ if proto.MaxAppTxnForeignAssets > encodedMaxForeignAssets {
require.Failf(t, "proto.MaxAppTxnForeignAssets > encodedMaxForeignAssets", "protocol version = %s", protoVer)
}
}
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index 4ed8cacd7..6bdc19457 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -466,9 +466,10 @@ transaction types, are rejected by `itxn_submit`.
| Op | Description |
| --- | --- |
-| `itxn_begin` | begin preparation of a new inner transaction |
+| `itxn_begin` | begin preparation of a new inner transaction in a new transaction group |
+| `itxn_next` | begin preparation of a new inner transaction in the same transaction group |
| `itxn_field f` | set field F of the current inner transaction to X |
-| `itxn_submit` | execute the current inner transaction. Fail if 16 inner transactions have already been executed, or if the transaction itself fails. |
+| `itxn_submit` | execute the current inner transaction group. Fail if executing this group would exceed 16 total inner transactions, or if any transaction in the group fails. |
| `itxn f` | push field F of the last inner transaction to stack |
| `itxna f i` | push Ith value of the array field F of the last inner transaction to stack |
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index 9a28d3c7e..99f020af2 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -896,7 +896,7 @@ params: Txn.Accounts offset (or, since v4, an account address that appears in Tx
- LogicSigVersion >= 2
- Mode: Application
-params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.
+params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.
## app_global_get
@@ -918,7 +918,7 @@ params: state key. Return: value. The value is zero (of type uint64) if the key
- LogicSigVersion >= 2
- Mode: Application
-params: Txn.ForeignApps offset (or, since v4, an application id that appears in Txn.ForeignApps or is the CurrentApplicationID), state key. Return: did_exist flag (top of the stack, 1 if the application existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.
+params: Txn.ForeignApps offset (or, since v4, an application id that appears in Txn.ForeignApps or is the CurrentApplicationID), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.
## app_local_put
@@ -1296,7 +1296,7 @@ bitlen interprets arrays as big-endian integers, unlike setbit/getbit
- Opcode: 0xb1
- Pops: _None_
- Pushes: _None_
-- begin preparation of a new inner transaction
+- begin preparation of a new inner transaction in a new transaction group
- LogicSigVersion >= 5
- Mode: Application
@@ -1318,7 +1318,7 @@ bitlen interprets arrays as big-endian integers, unlike setbit/getbit
- Opcode: 0xb3
- Pops: _None_
- Pushes: _None_
-- execute the current inner transaction. Fail if 16 inner transactions have already been executed, or if the transaction itself fails.
+- execute the current inner transaction group. Fail if executing this group would exceed 16 total inner transactions, or if any transaction in the group fails.
- LogicSigVersion >= 5
- Mode: Application
@@ -1342,6 +1342,15 @@ bitlen interprets arrays as big-endian integers, unlike setbit/getbit
- LogicSigVersion >= 5
- Mode: Application
+## itxn_next
+
+- Opcode: 0xb6
+- Pops: _None_
+- Pushes: _None_
+- begin preparation of a new inner transaction in the same transaction group
+- LogicSigVersion >= 6
+- Mode: Application
+
## txnas f
- Opcode: 0xc0 {uint8 transaction field index}
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index 58e597ecd..5b4d72b38 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -27,7 +27,6 @@ import (
"errors"
"fmt"
"io"
- "os"
"sort"
"strconv"
"strings"
@@ -472,13 +471,16 @@ func asmPushInt(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
func asmPushBytes(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s needs one argument", spec.Name)
+ if len(args) == 0 {
+ return ops.errorf("%s operation needs byte literal argument", spec.Name)
}
- val, _, err := parseBinaryArgs(args)
+ val, consumed, err := parseBinaryArgs(args)
if err != nil {
return ops.error(err)
}
+ if len(args) != consumed {
+ return ops.errorf("%s operation with extraneous argument", spec.Name)
+ }
ops.pending.WriteByte(spec.Opcode)
var scratch [binary.MaxVarintLen64]byte
vlen := binary.PutUvarint(scratch[:], uint64(len(val)))
@@ -636,12 +638,15 @@ func parseStringLiteral(input string) (result []byte, err error) {
// byte "this is a string\n"
func assembleByte(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 0 {
- return ops.error("byte operation needs byte literal argument")
+ return ops.errorf("%s operation needs byte literal argument", spec.Name)
}
- val, _, err := parseBinaryArgs(args)
+ val, consumed, err := parseBinaryArgs(args)
if err != nil {
return ops.error(err)
}
+ if len(args) != consumed {
+ return ops.errorf("%s operation with extraneous argument", spec.Name)
+ }
ops.ByteLiteral(val)
return nil
}
@@ -1290,8 +1295,8 @@ func typeDig(ops *OpStream, args []string) (StackTypes, StackTypes) {
idx := len(ops.typeStack) - depth
if idx >= 0 {
returns[len(returns)-1] = ops.typeStack[idx]
- for i := idx + 1; i < len(ops.typeStack); i++ {
- returns[i-idx-1] = ops.typeStack[i]
+ for i := idx; i < len(ops.typeStack); i++ {
+ returns[i-idx] = ops.typeStack[i]
}
}
return anys, returns
@@ -1588,6 +1593,7 @@ func (ops *OpStream) assemble(fin io.Reader) error {
for scanner.Scan() {
ops.sourceLine++
line := scanner.Text()
+ line = strings.TrimSpace(line)
if len(line) == 0 {
ops.trace("%d: 0 line\n", ops.sourceLine)
continue
@@ -2102,19 +2108,27 @@ func (ops *OpStream) warnf(format string, a ...interface{}) error {
return ops.warn(fmt.Errorf(format, a...))
}
-// ReportProblems issues accumulated warnings and errors to stderr.
-func (ops *OpStream) ReportProblems(fname string) {
+// ReportProblems issues accumulated warnings and outputs errors to an io.Writer.
+func (ops *OpStream) ReportProblems(fname string, writer io.Writer) {
for i, e := range ops.Errors {
if i > 9 {
break
}
- fmt.Fprintf(os.Stderr, "%s: %s\n", fname, e)
+ if fname == "" {
+ fmt.Fprintf(writer, "%s\n", e)
+ } else {
+ fmt.Fprintf(writer, "%s: %s\n", fname, e)
+ }
}
for i, w := range ops.Warnings {
if i > 9 {
break
}
- fmt.Fprintf(os.Stderr, "%s: %s\n", fname, w)
+ if fname == "" {
+ fmt.Fprintf(writer, "%s\n", w)
+ } else {
+ fmt.Fprintf(writer, "%s: %s\n", fname, w)
+ }
}
}
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index 2fd6e993d..1c78f9d40 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -343,6 +343,7 @@ itxna Logs 3
`
const v6Nonsense = v5Nonsense + `
+itxn_next
`
var nonsense = map[uint64]string{
@@ -360,7 +361,7 @@ var compiled = map[uint64]string{
3: "032008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f4478222105531421055427042106552105082106564c4d4b02210538212106391c0081e80780046a6f686e",
4: "042004010200b7a60c26040242420c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482a50512a632223524100034200004322602261222b634848222862482864286548482228236628226724286828692422700048482471004848361c0037001a0031183119311b311d311e311f312024221e312131223123312431253126312731283129312a312b312c312d312e312f44782522531422542b2355220823564c4d4b0222382123391c0081e80780046a6f686e2281d00f24231f880003420001892223902291922394239593a0a1a2a3a4a5a6a7a8a9aaabacadae23af3a00003b003c003d8164",
5: "052004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b53a03",
- 6: "062004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b53a03",
+ 6: "062004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b53a03b6",
}
func pseudoOp(opcode string) bool {
@@ -474,6 +475,9 @@ func testProg(t testing.TB, source string, ver uint64, expected ...expect) *OpSt
require.NoError(t, err)
require.Equal(t, ops.Program, ops2.Program)
} else {
+ if err == nil {
+ t.Log(program)
+ }
require.Error(t, err)
errors := ops.Errors
for _, exp := range expected {
@@ -506,6 +510,7 @@ func testProg(t testing.TB, source string, ver uint64, expected ...expect) *OpSt
}
func testLine(t *testing.T, line string, ver uint64, expected string) {
+ t.Helper()
// By embedding the source line between two other lines, the
// test for the correct line number in the error is more
// meaningful.
@@ -516,6 +521,7 @@ func testLine(t *testing.T, line string, ver uint64, expected string) {
}
testProg(t, source, ver, expect{2, expected})
}
+
func TestAssembleTxna(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -660,6 +666,7 @@ func TestAssembleBytes(t *testing.T) {
variations := []string{
"byte b32 MFRGGZDFMY",
"byte base32 MFRGGZDFMY",
+ "byte base32 MFRGGZDFMY",
"byte base32(MFRGGZDFMY)",
"byte b32(MFRGGZDFMY)",
"byte b32 MFRGGZDFMY======",
@@ -678,6 +685,11 @@ func TestAssembleBytes(t *testing.T) {
expectedDefaultConsts := "0126010661626364656628"
expectedOptimizedConsts := "018006616263646566"
+ bad := [][]string{
+ {"byte", "...operation needs byte literal argument"},
+ {`byte "john" "doe"`, "...operation with extraneous argument"},
+ }
+
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
expected := expectedDefaultConsts
@@ -689,8 +701,19 @@ func TestAssembleBytes(t *testing.T) {
ops := testProg(t, vi, v)
s := hex.EncodeToString(ops.Program)
require.Equal(t, mutateProgVersion(v, expected), s)
+ // pushbytes should take the same input
+ if v >= 3 {
+ testProg(t, strings.Replace(vi, "byte", "pushbytes", 1), v)
+ }
}
+ for _, b := range bad {
+ testProg(t, b[0], v, expect{1, b[1]})
+ // pushbytes should produce the same errors
+ if v >= 3 {
+ testProg(t, strings.Replace(b[0], "byte", "pushbytes", 1), v, expect{1, b[1]})
+ }
+ }
})
}
}
@@ -1447,7 +1470,7 @@ func TestConstantArgs(t *testing.T) {
}
for v := uint64(3); v <= AssemblerMaxVersion; v++ {
testProg(t, "pushint", v, expect{1, "pushint needs one argument"})
- testProg(t, "pushbytes", v, expect{1, "pushbytes needs one argument"})
+ testProg(t, "pushbytes", v, expect{1, "pushbytes operation needs byte literal argument"})
}
}
@@ -2059,6 +2082,9 @@ func TestPragmas(t *testing.T) {
testProg(t, "#pragma version", assemblerNoVersion,
expect{1, "no version value"})
+
+ ops = testProg(t, " #pragma version 5 ", assemblerNoVersion)
+ require.Equal(t, uint64(5), ops.Version)
}
func TestAssemblePragmaVersion(t *testing.T) {
@@ -2206,7 +2232,8 @@ func TestDigAsm(t *testing.T) {
// Confirm that digging something out does not ruin our knowledge about the types in the middle
testProg(t, "int 1; byte 0x1234; byte 0x1234; dig 2; dig 3; +; pop; +", AssemblerMaxVersion,
- expect{6, "+ arg 1..."})
+ expect{8, "+ arg 1..."})
+ testProg(t, "int 3; pushbytes \"123456\"; int 1; dig 2; substring3", AssemblerMaxVersion)
}
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index 0c9c6f8fc..5468d808c 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -169,9 +169,10 @@ var opDocByName = map[string]string{
"b~": "X with all bits inverted",
"log": "write bytes to log state of the current application",
- "itxn_begin": "begin preparation of a new inner transaction",
+ "itxn_begin": "begin preparation of a new inner transaction in a new transaction group",
+ "itxn_next": "begin preparation of a new inner transaction in the same transaction group",
"itxn_field": "set field F of the current inner transaction to X",
- "itxn_submit": "execute the current inner transaction. Fail if 16 inner transactions have already been executed, or if the transaction itself fails.",
+ "itxn_submit": "execute the current inner transaction group. Fail if executing this group would exceed 16 total inner transactions, or if any transaction in the group fails.",
}
// OpDoc returns a description of the op
@@ -269,8 +270,8 @@ var opDocExtras = map[string]string{
"min_balance": "params: Before v4, Txn.Accounts offset. Since v4, Txn.Accounts offset or an account address that appears in Txn.Accounts or is Txn.Sender). Return: value.",
"app_opted_in": "params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), application id (or, since v4, a Txn.ForeignApps offset). Return: 1 if opted in and 0 otherwise.",
"app_local_get": "params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), state key. Return: value. The value is zero (of type uint64) if the key does not exist.",
- "app_local_get_ex": "params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
- "app_global_get_ex": "params: Txn.ForeignApps offset (or, since v4, an application id that appears in Txn.ForeignApps or is the CurrentApplicationID), state key. Return: did_exist flag (top of the stack, 1 if the application existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
+ "app_local_get_ex": "params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
+ "app_global_get_ex": "params: Txn.ForeignApps offset (or, since v4, an application id that appears in Txn.ForeignApps or is the CurrentApplicationID), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
"app_global_get": "params: state key. Return: value. The value is zero (of type uint64) if the key does not exist.",
"app_local_put": "params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), state key, value.",
"app_local_del": "params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), state key.\n\nDeleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.)",
@@ -300,7 +301,7 @@ var OpGroups = map[string][]string{
"Loading Values": {"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "bzero", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "args", "txn", "gtxn", "txna", "txnas", "gtxna", "gtxnas", "gtxns", "gtxnsa", "gtxnsas", "global", "load", "loads", "store", "stores", "gload", "gloads", "gaid", "gaids"},
"Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "dup", "dup2", "dig", "cover", "uncover", "swap", "select", "assert", "callsub", "retsub"},
"State Access": {"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get", "app_params_get", "log"},
- "Inner Transactions": {"itxn_begin", "itxn_field", "itxn_submit", "itxn", "itxna"},
+ "Inner Transactions": {"itxn_begin", "itxn_next", "itxn_field", "itxn_submit", "itxn", "itxna"},
}
// OpCost indicates the cost of an operation over the range of
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index 0dc7eb769..9a2530f36 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -349,8 +349,8 @@ type EvalContext struct {
version uint64
scratch scratchSpace
- subtxn *transactions.SignedTxn // place to build for itxn_submit
- // The transactions Performed() and their effects
+ subtxns []transactions.SignedTxn // place to build for itxn_submit
+ // Previous transactions Performed() and their effects
InnerTxns []transactions.SignedTxnWithAD
cost int // cost incurred so far
@@ -3676,33 +3676,68 @@ func authorizedSender(cx *EvalContext, addr basics.Address) bool {
return appAddr == authorizer
}
-func opTxBegin(cx *EvalContext) {
- if cx.subtxn != nil {
- cx.err = errors.New("itxn_begin without itxn_submit")
- return
- }
- // Start fresh
- cx.subtxn = &transactions.SignedTxn{}
- // Fill in defaults.
+// addInnerTxn appends a fresh SignedTxn to subtxns, populated with reasonable
+// defaults.
+func addInnerTxn(cx *EvalContext) error {
addr, err := cx.getApplicationAddress()
if err != nil {
- cx.err = err
- return
+ return err
}
- fee := cx.Proto.MinTxnFee
- if cx.FeeCredit != nil {
- // Use credit to shrink the fee, but don't change FeeCredit
- // here, because they might never itxn_submit, or they might
- // change the fee. Do it in itxn_submit.
- fee = basics.SubSaturate(fee, *cx.FeeCredit)
+ // For compatibility with v5, in which failures only occurred in the submit,
+ // we only fail here if we are OVER the MaxInnerTransactions limit. Thus
+ // this allows construction of one more Inner than is actually allowed, and
+ // will fail in submit. (But we do want the check here, so this can't become
+ // unbounded.) The MaxTxGroupSize check can be, and is, precise.
+ if len(cx.InnerTxns)+len(cx.subtxns) > cx.Proto.MaxInnerTransactions ||
+ len(cx.subtxns) >= cx.Proto.MaxTxGroupSize {
+ return errors.New("attempt to create too many inner transactions")
+ }
+
+ stxn := transactions.SignedTxn{}
+
+ groupFee := basics.MulSaturate(cx.Proto.MinTxnFee, uint64(len(cx.subtxns)+1))
+ groupPaid := uint64(0)
+ for _, ptxn := range cx.subtxns {
+ groupPaid = basics.AddSaturate(groupPaid, ptxn.Txn.Fee.Raw)
+ }
+
+ fee := uint64(0)
+ if groupPaid < groupFee {
+ fee = groupFee - groupPaid
+
+ if cx.FeeCredit != nil {
+ // Use credit to shrink the default populated fee, but don't change
+ // FeeCredit here, because they might never itxn_submit, or they
+ // might change the fee. Do it in itxn_submit.
+ fee = basics.SubSaturate(fee, *cx.FeeCredit)
+ }
}
- cx.subtxn.Txn.Header = transactions.Header{
- Sender: addr, // Default, to simplify usage
+
+ stxn.Txn.Header = transactions.Header{
+ Sender: addr,
Fee: basics.MicroAlgos{Raw: fee},
FirstValid: cx.Txn.Txn.FirstValid,
LastValid: cx.Txn.Txn.LastValid,
}
+ cx.subtxns = append(cx.subtxns, stxn)
+ return nil
+}
+
+func opTxBegin(cx *EvalContext) {
+ if len(cx.subtxns) > 0 {
+ cx.err = errors.New("itxn_begin without itxn_submit")
+ return
+ }
+ cx.err = addInnerTxn(cx)
+}
+
+func opTxNext(cx *EvalContext) {
+ if len(cx.subtxns) == 0 {
+ cx.err = errors.New("itxn_next without itxn_begin")
+ return
+ }
+ cx.err = addInnerTxn(cx)
}
// availableAccount is used instead of accountReference for more recent opcodes
@@ -3884,7 +3919,8 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs txnFieldSpec, txn *tr
}
func opTxField(cx *EvalContext) {
- if cx.subtxn == nil {
+ itx := len(cx.subtxns) - 1
+ if itx < 0 {
cx.err = errors.New("itxn_field without itxn_begin")
return
}
@@ -3896,7 +3932,7 @@ func opTxField(cx *EvalContext) {
return
}
sv := cx.stack[last]
- cx.err = cx.stackIntoTxnField(sv, fs, &cx.subtxn.Txn)
+ cx.err = cx.stackIntoTxnField(sv, fs, &cx.subtxns[itx].Txn)
cx.stack = cx.stack[:last] // pop
}
@@ -3906,64 +3942,69 @@ func opTxSubmit(cx *EvalContext) {
return
}
- if cx.subtxn == nil {
- cx.err = errors.New("itxn_submit without itxn_begin")
+ // Should never trigger, since itxn_next checks these too.
+ if len(cx.InnerTxns)+len(cx.subtxns) > cx.Proto.MaxInnerTransactions ||
+ len(cx.subtxns) > cx.Proto.MaxTxGroupSize {
+ cx.err = errors.New("too many inner transactions")
return
}
- if len(cx.InnerTxns) >= cx.Proto.MaxInnerTransactions {
- cx.err = errors.New("itxn_submit with MaxInnerTransactions")
+ if len(cx.subtxns) == 0 {
+ cx.err = errors.New("itxn_submit without itxn_begin")
return
}
- // The goal is to follow the same invariants used by the
- // transaction pool. Namely that any transaction that makes it
- // to Perform (which is equivalent to eval.applyTransaction)
- // is authorized, and WellFormed.
- if !authorizedSender(cx, cx.subtxn.Txn.Sender) {
- cx.err = fmt.Errorf("unauthorized")
- return
+ // Check fees across the group first. Allows fee pooling in inner groups.
+ groupFee := basics.MulSaturate(cx.Proto.MinTxnFee, uint64(len(cx.subtxns)))
+ groupPaid := uint64(0)
+ for _, ptxn := range cx.subtxns {
+ groupPaid = basics.AddSaturate(groupPaid, ptxn.Txn.Fee.Raw)
}
-
- // Recall that WellFormed does not care about individual
- // transaction fees because of fee pooling. So we check below.
- cx.err = cx.subtxn.Txn.WellFormed(*cx.Specials, *cx.Proto)
- if cx.err != nil {
- return
- }
-
- paid := cx.subtxn.Txn.Fee.Raw
- if paid >= cx.Proto.MinTxnFee {
- // Over paying - accumulate into FeeCredit
- overpaid := paid - cx.Proto.MinTxnFee
+ if groupPaid < groupFee {
+ // See if the FeeCredit is enough to cover the shortfall
+ shortfall := groupFee - groupPaid
+ if cx.FeeCredit == nil || *cx.FeeCredit < shortfall {
+ cx.err = fmt.Errorf("fee too small %#v", cx.subtxns)
+ return
+ }
+ *cx.FeeCredit -= shortfall
+ } else {
+ overpay := groupPaid - groupFee
if cx.FeeCredit == nil {
cx.FeeCredit = new(uint64)
}
- *cx.FeeCredit = basics.AddSaturate(*cx.FeeCredit, overpaid)
- } else {
- underpaid := cx.Proto.MinTxnFee - paid
- // Try to pay with FeeCredit, else fail.
- if cx.FeeCredit != nil && *cx.FeeCredit >= underpaid {
- *cx.FeeCredit -= underpaid
- } else {
- // We allow changing the fee. One pattern might be for an
- // app to unilaterally set its Fee to 0. The idea would be
- // that other transactions were supposed to overpay.
- cx.err = fmt.Errorf("fee too small")
+ *cx.FeeCredit = basics.AddSaturate(*cx.FeeCredit, overpay)
+ }
+
+ for itx := range cx.subtxns {
+ // The goal is to follow the same invariants used by the
+ // transaction pool. Namely that any transaction that makes it
+ // to Perform (which is equivalent to eval.applyTransaction)
+ // is authorized, and WellFormed.
+ if !authorizedSender(cx, cx.subtxns[itx].Txn.Sender) {
+ cx.err = fmt.Errorf("unauthorized")
return
}
- }
- ad, err := cx.Ledger.Perform(&cx.subtxn.Txn, *cx.Specials)
- if err != nil {
- cx.err = err
- return
+ // Recall that WellFormed does not care about individual
+ // transaction fees because of fee pooling. So we check below.
+ cx.err = cx.subtxns[itx].Txn.WellFormed(*cx.Specials, *cx.Proto)
+ if cx.err != nil {
+ return
+ }
+
+ ad, err := cx.Ledger.Perform(&cx.subtxns[itx].Txn, *cx.Specials)
+ if err != nil {
+ cx.err = err
+ return
+ }
+
+ cx.InnerTxns = append(cx.InnerTxns, transactions.SignedTxnWithAD{
+ SignedTxn: cx.subtxns[itx],
+ ApplyData: ad,
+ })
}
- cx.InnerTxns = append(cx.InnerTxns, transactions.SignedTxnWithAD{
- SignedTxn: *cx.subtxn,
- ApplyData: ad,
- })
- cx.subtxn = nil
+ cx.subtxns = nil
}
// PcDetails return PC and disassembled instructions at PC up to 2 opcodes back
diff --git a/data/transactions/logic/evalAppTxn_test.go b/data/transactions/logic/evalAppTxn_test.go
index 60e8d83fb..b24e10fc2 100644
--- a/data/transactions/logic/evalAppTxn_test.go
+++ b/data/transactions/logic/evalAppTxn_test.go
@@ -415,7 +415,7 @@ func TestNumInner(t *testing.T) {
testApp(t, pay+pay+pay+";int 1", ep)
testApp(t, pay+pay+pay+pay+";int 1", ep)
// In the sample proto, MaxInnerTransactions = 4
- testApp(t, pay+pay+pay+pay+pay+";int 1", ep, "itxn_submit with MaxInnerTransactions")
+ testApp(t, pay+pay+pay+pay+pay+";int 1", ep, "too many inner transactions")
}
func TestAssetCreate(t *testing.T) {
@@ -518,3 +518,65 @@ func TestFieldSetting(t *testing.T) {
testApp(t, "itxn_begin; int 13; bzero; itxn_field ConfigAssetName; int 1", ep,
"value is too long")
}
+
+func TestInnerGroup(t *testing.T) {
+ ep, ledger := makeSampleEnv()
+ ledger.NewApp(ep.Txn.Txn.Receiver, 888, basics.AppParams{})
+ // Need both fees and both payments
+ ledger.NewAccount(ledger.ApplicationID().Address(), 999+2*defaultEvalProto().MinTxnFee)
+ pay := `
+int pay; itxn_field TypeEnum;
+int 500; itxn_field Amount;
+txn Sender; itxn_field Receiver;
+`
+ testApp(t, "itxn_begin"+pay+"itxn_next"+pay+"itxn_submit; int 1", ep,
+ "insufficient balance")
+
+ // NewAccount overwrites the existing balance
+ ledger.NewAccount(ledger.ApplicationID().Address(), 1000+2*defaultEvalProto().MinTxnFee)
+ testApp(t, "itxn_begin"+pay+"itxn_next"+pay+"itxn_submit; int 1", ep)
+}
+
+func TestInnerFeePooling(t *testing.T) {
+ ep, ledger := makeSampleEnv()
+ ledger.NewApp(ep.Txn.Txn.Receiver, 888, basics.AppParams{})
+ ledger.NewAccount(ledger.ApplicationID().Address(), 50_000)
+ pay := `
+int pay; itxn_field TypeEnum;
+int 500; itxn_field Amount;
+txn Sender; itxn_field Receiver;
+`
+ // Force the first fee to 3, but the second will default to 2*fee-3 = 2002-3
+ testApp(t, "itxn_begin"+
+ pay+
+ "int 3; itxn_field Fee;"+
+ "itxn_next"+
+ pay+
+ "itxn_submit; itxn Fee; int 1999; ==", ep)
+
+ // Same first, but force the second too low
+ testApp(t, "itxn_begin"+
+ pay+
+ "int 3; itxn_field Fee;"+
+ "itxn_next"+
+ pay+
+ "int 1998; itxn_field Fee;"+
+ "itxn_submit; int 1", ep, "fee too small")
+
+ // Overpay in first itxn, the second will default to less
+ testApp(t, "itxn_begin"+
+ pay+
+ "int 2000; itxn_field Fee;"+
+ "itxn_next"+
+ pay+
+ "itxn_submit; itxn Fee; int 2; ==", ep)
+
+ // Same first, but force the second too low
+ testApp(t, "itxn_begin"+
+ pay+
+ "int 2000; itxn_field Fee;"+
+ "itxn_next"+
+ pay+
+ "int 1; itxn_field Fee;"+
+ "itxn_submit; itxn Fee; int 1", ep, "fee too small")
+}
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index cdb6c1d72..74b9f931c 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -64,6 +64,7 @@ func defaultEvalProtoWithVersion(version uint64) config.ConsensusParams {
SchemaBytesMinBalance: 1005,
MaxInnerTransactions: 4,
+ MaxTxGroupSize: 8,
// With the addition of itxn_field, itxn_submit, which rely on
// machinery outside logic package for validity checking, we
@@ -75,6 +76,12 @@ func defaultEvalProtoWithVersion(version uint64) config.ConsensusParams {
MaxAssetDecimals: 4,
SupportRekeying: true,
MaxTxnNoteBytes: 500,
+ EnableFeePooling: true,
+
+ // Chosen to be different from one another and from normal proto
+ MaxAppTxnAccounts: 3,
+ MaxAppTxnForeignApps: 5,
+ MaxAppTxnForeignAssets: 6,
}
}
diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go
index 0a6cb9c96..68ae0f12f 100644
--- a/data/transactions/logic/fields.go
+++ b/data/transactions/logic/fields.go
@@ -639,7 +639,7 @@ func init() {
txnFieldSpecByField = make(map[TxnField]txnFieldSpec, len(TxnFieldNames))
for i, s := range txnFieldSpecs {
if int(s.field) != i {
- panic("txnFieldTypePairs disjoint with TxnField enum")
+ panic("txnFieldSpecs disjoint with TxnField enum")
}
TxnFieldTypes[i] = s.ftype
txnFieldSpecByField[s.field] = s
@@ -655,8 +655,11 @@ func init() {
}
GlobalFieldTypes = make([]StackType, len(GlobalFieldNames))
globalFieldSpecByField = make(map[GlobalField]globalFieldSpec, len(GlobalFieldNames))
- for _, s := range globalFieldSpecs {
- GlobalFieldTypes[int(s.field)] = s.ftype
+ for i, s := range globalFieldSpecs {
+ if int(s.field) != i {
+ panic("globalFieldSpecs disjoint with GlobalField enum")
+ }
+ GlobalFieldTypes[i] = s.ftype
globalFieldSpecByField[s.field] = s
}
globalFieldSpecByName = make(gfNameSpecMap, len(GlobalFieldNames))
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index 83e608b93..604db789a 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -317,6 +317,7 @@ var OpSpecs = []OpSpec{
{0xb3, "itxn_submit", opTxSubmit, asmDefault, disDefault, nil, nil, 5, runModeApplication, opDefault},
{0xb4, "itxn", opItxn, asmItxn, disTxn, nil, oneAny, 5, runModeApplication, immediates("f")},
{0xb5, "itxna", opItxna, asmItxna, disTxna, nil, oneAny, 5, runModeApplication, immediates("f", "i")},
+ {0xb6, "itxn_next", opTxNext, asmDefault, disDefault, nil, nil, 6, runModeApplication, opDefault},
// Dynamic indexing
{0xc0, "txnas", opTxnas, assembleTxnas, disTxn, oneInt, oneAny, 5, modeAny, immediates("f")},
diff --git a/data/transactions/msgp_gen.go b/data/transactions/msgp_gen.go
index 218c86fa2..009de07b5 100644
--- a/data/transactions/msgp_gen.go
+++ b/data/transactions/msgp_gen.go
@@ -360,8 +360,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
return
}
- if zb0008 > EncodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(EncodedMaxApplicationArgs))
+ if zb0008 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
return
}
@@ -389,8 +389,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
err = msgp.WrapError(err, "struct-from-array", "Accounts")
return
}
- if zb0010 > EncodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(EncodedMaxAccounts))
+ if zb0010 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0010), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "struct-from-array", "Accounts")
return
}
@@ -418,8 +418,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
return
}
- if zb0012 > EncodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0012), uint64(EncodedMaxForeignApps))
+ if zb0012 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
return
}
@@ -447,8 +447,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
- if zb0014 > EncodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0014), uint64(EncodedMaxForeignAssets))
+ if zb0014 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0014), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
@@ -574,8 +574,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
err = msgp.WrapError(err, "ApplicationArgs")
return
}
- if zb0019 > EncodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0019), uint64(EncodedMaxApplicationArgs))
+ if zb0019 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0019), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "ApplicationArgs")
return
}
@@ -601,8 +601,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
err = msgp.WrapError(err, "Accounts")
return
}
- if zb0021 > EncodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0021), uint64(EncodedMaxAccounts))
+ if zb0021 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0021), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "Accounts")
return
}
@@ -628,8 +628,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
err = msgp.WrapError(err, "ForeignApps")
return
}
- if zb0023 > EncodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0023), uint64(EncodedMaxForeignApps))
+ if zb0023 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0023), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "ForeignApps")
return
}
@@ -655,8 +655,8 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
err = msgp.WrapError(err, "ForeignAssets")
return
}
- if zb0025 > EncodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0025), uint64(EncodedMaxForeignAssets))
+ if zb0025 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0025), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "ForeignAssets")
return
}
@@ -2056,7 +2056,7 @@ func (z *Header) MarshalMsg(b []byte) (o []byte) {
zb0002Len--
zb0002Mask |= 0x40
}
- if (*z).Lease == ([LeaseByteLength]byte{}) {
+ if (*z).Lease == ([32]byte{}) {
zb0002Len--
zb0002Mask |= 0x80
}
@@ -2350,13 +2350,13 @@ func (_ *Header) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Header) Msgsize() (s int) {
- s = 1 + 4 + (*z).Sender.Msgsize() + 4 + (*z).Fee.Msgsize() + 3 + (*z).FirstValid.Msgsize() + 3 + (*z).LastValid.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).Note) + 4 + msgp.StringPrefixSize + len((*z).GenesisID) + 3 + (*z).GenesisHash.Msgsize() + 4 + (*z).Group.Msgsize() + 3 + msgp.ArrayHeaderSize + (LeaseByteLength * (msgp.ByteSize)) + 6 + (*z).RekeyTo.Msgsize()
+ s = 1 + 4 + (*z).Sender.Msgsize() + 4 + (*z).Fee.Msgsize() + 3 + (*z).FirstValid.Msgsize() + 3 + (*z).LastValid.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).Note) + 4 + msgp.StringPrefixSize + len((*z).GenesisID) + 3 + (*z).GenesisHash.Msgsize() + 4 + (*z).Group.Msgsize() + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 6 + (*z).RekeyTo.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *Header) MsgIsZero() bool {
- return ((*z).Sender.MsgIsZero()) && ((*z).Fee.MsgIsZero()) && ((*z).FirstValid.MsgIsZero()) && ((*z).LastValid.MsgIsZero()) && (len((*z).Note) == 0) && ((*z).GenesisID == "") && ((*z).GenesisHash.MsgIsZero()) && ((*z).Group.MsgIsZero()) && ((*z).Lease == ([LeaseByteLength]byte{})) && ((*z).RekeyTo.MsgIsZero())
+ return ((*z).Sender.MsgIsZero()) && ((*z).Fee.MsgIsZero()) && ((*z).FirstValid.MsgIsZero()) && ((*z).LastValid.MsgIsZero()) && (len((*z).Note) == 0) && ((*z).GenesisID == "") && ((*z).GenesisHash.MsgIsZero()) && ((*z).Group.MsgIsZero()) && ((*z).Lease == ([32]byte{})) && ((*z).RekeyTo.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
@@ -4302,7 +4302,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
zb0006Len--
zb0006Mask |= 0x8000000000
}
- if (*z).Header.Lease == ([LeaseByteLength]byte{}) {
+ if (*z).Header.Lease == ([32]byte{}) {
zb0006Len--
zb0006Mask |= 0x10000000000
}
@@ -4906,8 +4906,8 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
return
}
- if zb0010 > EncodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(EncodedMaxApplicationArgs))
+ if zb0010 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0010), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
return
}
@@ -4935,8 +4935,8 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "struct-from-array", "Accounts")
return
}
- if zb0012 > EncodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0012), uint64(EncodedMaxAccounts))
+ if zb0012 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "struct-from-array", "Accounts")
return
}
@@ -4964,8 +4964,8 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
return
}
- if zb0014 > EncodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0014), uint64(EncodedMaxForeignApps))
+ if zb0014 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0014), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
return
}
@@ -4993,8 +4993,8 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
- if zb0016 > EncodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0016), uint64(EncodedMaxForeignAssets))
+ if zb0016 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0016), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
@@ -5334,8 +5334,8 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "ApplicationArgs")
return
}
- if zb0022 > EncodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0022), uint64(EncodedMaxApplicationArgs))
+ if zb0022 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0022), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "ApplicationArgs")
return
}
@@ -5361,8 +5361,8 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "Accounts")
return
}
- if zb0024 > EncodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0024), uint64(EncodedMaxAccounts))
+ if zb0024 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0024), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "Accounts")
return
}
@@ -5388,8 +5388,8 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "ForeignApps")
return
}
- if zb0026 > EncodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0026), uint64(EncodedMaxForeignApps))
+ if zb0026 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0026), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "ForeignApps")
return
}
@@ -5415,8 +5415,8 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "ForeignAssets")
return
}
- if zb0028 > EncodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0028), uint64(EncodedMaxForeignAssets))
+ if zb0028 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0028), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "ForeignAssets")
return
}
@@ -5522,7 +5522,7 @@ func (_ *Transaction) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Transaction) Msgsize() (s int) {
- s = 3 + 5 + (*z).Type.Msgsize() + 4 + (*z).Header.Sender.Msgsize() + 4 + (*z).Header.Fee.Msgsize() + 3 + (*z).Header.FirstValid.Msgsize() + 3 + (*z).Header.LastValid.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).Header.Note) + 4 + msgp.StringPrefixSize + len((*z).Header.GenesisID) + 3 + (*z).Header.GenesisHash.Msgsize() + 4 + (*z).Header.Group.Msgsize() + 3 + msgp.ArrayHeaderSize + (LeaseByteLength * (msgp.ByteSize)) + 6 + (*z).Header.RekeyTo.Msgsize() + 8 + (*z).KeyregTxnFields.VotePK.Msgsize() + 7 + (*z).KeyregTxnFields.SelectionPK.Msgsize() + 8 + (*z).KeyregTxnFields.VoteFirst.Msgsize() + 8 + (*z).KeyregTxnFields.VoteLast.Msgsize() + 7 + msgp.Uint64Size + 8 + msgp.BoolSize + 4 + (*z).PaymentTxnFields.Receiver.Msgsize() + 4 + (*z).PaymentTxnFields.Amount.Msgsize() + 6 + (*z).PaymentTxnFields.CloseRemainderTo.Msgsize() + 5 + (*z).AssetConfigTxnFields.ConfigAsset.Msgsize() + 5 + (*z).AssetConfigTxnFields.AssetParams.Msgsize() + 5 + (*z).AssetTransferTxnFields.XferAsset.Msgsize() + 5 + msgp.Uint64Size + 5 + (*z).AssetTransferTxnFields.AssetSender.Msgsize() + 5 + (*z).AssetTransferTxnFields.AssetReceiver.Msgsize() + 7 + (*z).AssetTransferTxnFields.AssetCloseTo.Msgsize() + 5 + (*z).AssetFreezeTxnFields.FreezeAccount.Msgsize() + 5 + (*z).AssetFreezeTxnFields.FreezeAsset.Msgsize() + 5 + msgp.BoolSize + 5 + (*z).ApplicationCallTxnFields.ApplicationID.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.ArrayHeaderSize
+ s = 3 + 5 + (*z).Type.Msgsize() + 4 + (*z).Header.Sender.Msgsize() + 4 + (*z).Header.Fee.Msgsize() + 3 + (*z).Header.FirstValid.Msgsize() + 3 + (*z).Header.LastValid.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).Header.Note) + 4 + msgp.StringPrefixSize + len((*z).Header.GenesisID) + 3 + (*z).Header.GenesisHash.Msgsize() + 4 + (*z).Header.Group.Msgsize() + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 6 + (*z).Header.RekeyTo.Msgsize() + 8 + (*z).KeyregTxnFields.VotePK.Msgsize() + 7 + (*z).KeyregTxnFields.SelectionPK.Msgsize() + 8 + (*z).KeyregTxnFields.VoteFirst.Msgsize() + 8 + (*z).KeyregTxnFields.VoteLast.Msgsize() + 7 + msgp.Uint64Size + 8 + msgp.BoolSize + 4 + (*z).PaymentTxnFields.Receiver.Msgsize() + 4 + (*z).PaymentTxnFields.Amount.Msgsize() + 6 + (*z).PaymentTxnFields.CloseRemainderTo.Msgsize() + 5 + (*z).AssetConfigTxnFields.ConfigAsset.Msgsize() + 5 + (*z).AssetConfigTxnFields.AssetParams.Msgsize() + 5 + (*z).AssetTransferTxnFields.XferAsset.Msgsize() + 5 + msgp.Uint64Size + 5 + (*z).AssetTransferTxnFields.AssetSender.Msgsize() + 5 + (*z).AssetTransferTxnFields.AssetReceiver.Msgsize() + 7 + (*z).AssetTransferTxnFields.AssetCloseTo.Msgsize() + 5 + (*z).AssetFreezeTxnFields.FreezeAccount.Msgsize() + 5 + (*z).AssetFreezeTxnFields.FreezeAsset.Msgsize() + 5 + msgp.BoolSize + 5 + (*z).ApplicationCallTxnFields.ApplicationID.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.ArrayHeaderSize
for zb0002 := range (*z).ApplicationCallTxnFields.ApplicationArgs {
s += msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ApplicationArgs[zb0002])
}
@@ -5544,7 +5544,7 @@ func (z *Transaction) Msgsize() (s int) {
// MsgIsZero returns whether this is a zero value
func (z *Transaction) MsgIsZero() bool {
- return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([LeaseByteLength]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) && ((*z).ApplicationCallTxnFields.ExtraProgramPages == 0) && ((*z).CompactCertTxnFields.CertRound.MsgIsZero()) && ((*z).CompactCertTxnFields.CertType.MsgIsZero()) && ((*z).CompactCertTxnFields.Cert.MsgIsZero())
+ return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([32]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) && ((*z).ApplicationCallTxnFields.ExtraProgramPages == 0) && ((*z).CompactCertTxnFields.CertRound.MsgIsZero()) && ((*z).CompactCertTxnFields.CertType.MsgIsZero()) && ((*z).CompactCertTxnFields.Cert.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
diff --git a/data/transactions/teal.go b/data/transactions/teal.go
index e2f6718b7..0826290b9 100644
--- a/data/transactions/teal.go
+++ b/data/transactions/teal.go
@@ -37,9 +37,6 @@ type EvalDelta struct {
Logs []string `codec:"lg,allocbound=config.MaxLogCalls"`
- // Intentionally, temporarily wrong - need to decide how to
- // allocbound properly when structure is recursive. Even a bound
- // of 2 would allow arbitrarily large object if deep.
InnerTxns []SignedTxnWithAD `codec:"itx,allocbound=config.MaxInnerTransactions"`
}
diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go
index baf40a312..fb356bb04 100644
--- a/data/transactions/transaction.go
+++ b/data/transactions/transaction.go
@@ -29,9 +29,6 @@ import (
// Txid is a hash used to uniquely identify individual transactions
type Txid crypto.Digest
-// LeaseByteLength is the byte length of a lease
-const LeaseByteLength = int(32)
-
// String converts txid to a pretty-printable string
func (txid Txid) String() string {
return fmt.Sprintf("%v", crypto.Digest(txid))
@@ -72,7 +69,7 @@ type Header struct {
// lease identified by the (Sender, Lease) pair of the transaction until
// the LastValid round passes. While this transaction possesses the
// lease, no other transaction specifying this lease can be confirmed.
- Lease [LeaseByteLength]byte `codec:"lx"`
+ Lease [32]byte `codec:"lx"`
// RekeyTo, if nonzero, sets the sender's AuthAddr to the given address
// If the RekeyTo address is the sender's actual address, the AuthAddr is set to zero
diff --git a/data/transactions/verify/verifiedTxnCache.go b/data/transactions/verify/verifiedTxnCache.go
index 2e228c4fc..0c9bbe2c0 100644
--- a/data/transactions/verify/verifiedTxnCache.go
+++ b/data/transactions/verify/verifiedTxnCache.go
@@ -21,7 +21,6 @@ import (
"github.com/algorand/go-deadlock"
- "github.com/algorand/go-algorand/data/pooldata"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/protocol"
@@ -68,8 +67,6 @@ type VerifiedTransactionCache interface {
UpdatePinned(pinnedTxns map[transactions.Txid]transactions.SignedTxn) error
// Pin function would mark the given transaction group as pinned.
Pin(txgroup []transactions.SignedTxn) error
- // PinGroups function would mark the given transaction groups as pinned.
- PinGroups(txgroups []pooldata.SignedTxGroup) error
}
// verifiedTransactionCache provides an implementation of the VerifiedTransactionCache interface
@@ -209,25 +206,6 @@ func (v *verifiedTransactionCache) UpdatePinned(pinnedTxns map[transactions.Txid
func (v *verifiedTransactionCache) Pin(txgroup []transactions.SignedTxn) (err error) {
v.bucketsLock.Lock()
defer v.bucketsLock.Unlock()
- return v.pin(txgroup)
-}
-
-// PinGroups function would mark the given transaction groups as pinned.
-func (v *verifiedTransactionCache) PinGroups(txgroups []pooldata.SignedTxGroup) error {
- v.bucketsLock.Lock()
- defer v.bucketsLock.Unlock()
- var outError error
- for _, txgroup := range txgroups {
- err := v.pin(txgroup.Transactions)
- if err != nil {
- outError = err
- }
- }
- return outError
-}
-
-// Pin sets a given transaction group as pinned items, after they have already been verified.
-func (v *verifiedTransactionCache) pin(txgroup []transactions.SignedTxn) (err error) {
transactionMissing := false
if len(v.pinned)+len(txgroup) > maxPinnedEntries {
// reaching this number likely means that we have an issue not removing entries from the pinned map.
@@ -305,10 +283,6 @@ func (v *mockedCache) UpdatePinned(pinnedTxns map[transactions.Txid]transactions
return nil
}
-func (v *mockedCache) PinGroups(txgroups []pooldata.SignedTxGroup) error {
- return nil
-}
-
func (v *mockedCache) Pin(txgroup []transactions.SignedTxn) (err error) {
return nil
}
diff --git a/data/txHandler.go b/data/txHandler.go
index 0ecef433c..fa8bf250b 100644
--- a/data/txHandler.go
+++ b/data/txHandler.go
@@ -25,11 +25,9 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/pooldata"
"github.com/algorand/go-algorand/data/pools"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/verify"
- "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
@@ -177,7 +175,7 @@ func (handler *TxHandler) postprocessCheckedTxn(wi *txBacklogMsg) {
verifiedTxGroup := wi.unverifiedTxGroup
// save the transaction, if it has high enough fee and not already in the cache
- err := handler.txPool.Remember(pooldata.SignedTxGroup{Transactions: verifiedTxGroup})
+ err := handler.txPool.Remember(verifiedTxGroup)
if err != nil {
logging.Base().Debugf("could not remember tx: %v", err)
return
@@ -265,6 +263,12 @@ func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) net
// Note that this also checks the consistency of the transaction's group hash,
// which is required for safe transaction signature caching behavior.
func (handler *TxHandler) checkAlreadyCommitted(tx *txBacklogMsg) (processingDone bool) {
+ txids := make([]transactions.Txid, len(tx.unverifiedTxGroup))
+ for i := range tx.unverifiedTxGroup {
+ txids[i] = tx.unverifiedTxGroup[i].ID()
+ }
+ logging.Base().Debugf("got a tx group with IDs %v", txids)
+
// do a quick test to check that this transaction could potentially be committed, to reject dup pending transactions
err := handler.txPool.Test(tx.unverifiedTxGroup)
if err != nil {
@@ -274,12 +278,12 @@ func (handler *TxHandler) checkAlreadyCommitted(tx *txBacklogMsg) (processingDon
return false
}
-func (handler *TxHandler) processDecoded(unverifiedTxGroup []transactions.SignedTxn) (disconnect bool) {
+func (handler *TxHandler) processDecoded(unverifiedTxGroup []transactions.SignedTxn) (outmsg network.OutgoingMessage, processingDone bool) {
tx := &txBacklogMsg{
unverifiedTxGroup: unverifiedTxGroup,
}
if handler.checkAlreadyCommitted(tx) {
- return false
+ return network.OutgoingMessage{}, true
}
// build the transaction verification context
@@ -287,7 +291,7 @@ func (handler *TxHandler) processDecoded(unverifiedTxGroup []transactions.Signed
latestHdr, err := handler.ledger.BlockHdr(latest)
if err != nil {
logging.Base().Warnf("Could not get header for previous block %v: %v", latest, err)
- return false
+ return network.OutgoingMessage{}, true
}
unverifiedTxnGroups := bookkeeping.SignedTxnsToGroups(unverifiedTxGroup)
@@ -295,7 +299,7 @@ func (handler *TxHandler) processDecoded(unverifiedTxGroup []transactions.Signed
if err != nil {
// transaction is invalid
logging.Base().Warnf("One or more transactions were malformed: %v", err)
- return true
+ return network.OutgoingMessage{Action: network.Disconnect}, true
}
// at this point, we've verified the transaction group,
@@ -303,10 +307,10 @@ func (handler *TxHandler) processDecoded(unverifiedTxGroup []transactions.Signed
verifiedTxGroup := unverifiedTxGroup
// save the transaction, if it has high enough fee and not already in the cache
- err = handler.txPool.Remember(pooldata.SignedTxGroup{Transactions: verifiedTxGroup})
+ err = handler.txPool.Remember(verifiedTxGroup)
if err != nil {
logging.Base().Debugf("could not remember tx: %v", err)
- return false
+ return network.OutgoingMessage{}, true
}
// if we remembered without any error ( i.e. txpool wasn't full ), then we should pin these transactions.
@@ -315,98 +319,7 @@ func (handler *TxHandler) processDecoded(unverifiedTxGroup []transactions.Signed
logging.Base().Warnf("unable to pin transaction: %v", err)
}
- return false
-}
-
-// filterAlreadyCommitted scan the list of signed transaction groups, and filter out the ones that have already been included,
-// or that should not be added to the transaction pool.
-// the resulting slice is using the *same* underlying array as the input slice, and the caller must ensure that this would not
-// cause issue on the caller side. The hasError describe whether any of the removed transacation groups was
-// removed for a reason *other* than being duplicate ( for instance, malformed transaction )
-func (handler *TxHandler) filterAlreadyCommitted(unverifiedTxGroups []pooldata.SignedTxGroup) (filteredGroups []pooldata.SignedTxGroup, hasError bool) {
- remainedTxnsGroupOffset := 0
- for idx, utxng := range unverifiedTxGroups {
- err := handler.txPool.Test(utxng.Transactions)
- switch err.(type) {
- case nil:
- // no error was generated.
- if remainedTxnsGroupOffset != idx {
- unverifiedTxGroups[remainedTxnsGroupOffset] = utxng
- }
- remainedTxnsGroupOffset++
- case *ledgercore.TransactionInLedgerError:
- // this is a duplicate transaction group.
- default:
- // some non-duplicate error was reported on this group.
- hasError = true
- }
- }
- return unverifiedTxGroups[:remainedTxnsGroupOffset], hasError
-}
-
-// processDecodedArray receives a slice of transaction groups and attempt to add them to the transaction pool.
-// The processDecodedArray returns whether the node should be disconnecting from the source of these transactions ( in case a malicious transaction is found )
-// as well as whether all the provided transactions were included in the transaction pool or committed.
-func (handler *TxHandler) processDecodedArray(unverifiedTxGroups []pooldata.SignedTxGroup) (disconnect, allTransactionIncluded bool) {
- var hasError bool
- unverifiedTxGroups, hasError = handler.filterAlreadyCommitted(unverifiedTxGroups)
-
- if len(unverifiedTxGroups) == 0 {
- return false, !hasError
- }
-
- // build the transaction verification context
- latest := handler.ledger.Latest()
- latestHdr, err := handler.ledger.BlockHdr(latest)
- if err != nil {
- // being unable to retrieve the last's block header is not something a working node is expected to expirience ( ever ).
- logging.Base().Errorf("Could not get header for previous block %d: %v", latest, err)
- // returning a disconnect=true, would not fix the problem for the local node, but would force the remote node to pick a different
- // relay, which ( hopefully ! ) would not have the same issue as this one.
- return true, false
- }
-
- unverifiedTxnGroups := make([][]transactions.SignedTxn, len(unverifiedTxGroups))
- for i, unverifiedGroup := range unverifiedTxGroups {
- unverifiedTxnGroups[i] = unverifiedGroup.Transactions
- }
-
- err = verify.PaysetGroups(context.Background(), unverifiedTxnGroups, latestHdr, handler.txVerificationPool, handler.ledger.VerifiedTransactionCache())
- if err != nil {
- // transaction is invalid
- logging.Base().Warnf("One or more transactions were malformed: %v", err)
- return true, false
- }
-
- // at this point, we've verified the transaction group,
- // so we can safely treat the transaction as a verified transaction.
- verifiedTxGroups := unverifiedTxGroups
-
- // before calling RememberArray we should reallocate the individual remaining
- // signed transactions - these transactions were allocated in bulk by the
- // transaction sync. By re-allocating the backing storage, we would allow the
- // original backing storage ( which includes transactions that won't go into the
- // transaction pool ) to be garbge collected.
- for i, group := range verifiedTxGroups {
- copiedTransactions := make(pooldata.SignedTxnSlice, len(group.Transactions))
- copy(copiedTransactions, group.Transactions)
- verifiedTxGroups[i].Transactions = copiedTransactions
- }
-
- // save the transaction, if it has high enough fee and not already in the cache
- err = handler.txPool.RememberArray(verifiedTxGroups)
- if err != nil {
- logging.Base().Debugf("could not remember tx: %v", err)
- return false, false
- }
-
- // if we remembered without any error ( i.e. txpool wasn't full ), then we should pin these transactions.
- err = handler.ledger.VerifiedTransactionCache().PinGroups(verifiedTxGroups)
- if err != nil {
- logging.Base().Warnf("unable to pin transaction: %v", err)
- }
-
- return false, !hasError
+ return network.OutgoingMessage{}, false
}
// SolicitedTxHandler handles messages received through channels other than the gossip network.
@@ -415,146 +328,19 @@ type SolicitedTxHandler interface {
Handle(txgroup []transactions.SignedTxn) error
}
-type solicitedTxHandler struct {
- txHandler *TxHandler
-}
-
// SolicitedTxHandler converts a transaction handler to a SolicitedTxHandler
func (handler *TxHandler) SolicitedTxHandler() SolicitedTxHandler {
return &solicitedTxHandler{txHandler: handler}
}
+type solicitedTxHandler struct {
+ txHandler *TxHandler
+}
+
func (handler *solicitedTxHandler) Handle(txgroup []transactions.SignedTxn) error {
- disconnect := handler.txHandler.processDecoded(txgroup)
- if disconnect {
+ outmsg, _ := handler.txHandler.processDecoded(txgroup)
+ if outmsg.Action == network.Disconnect {
return fmt.Errorf("invalid transaction")
}
return nil
}
-
-// SolicitedAsyncTxHandler handles slices of transaction groups received from the transaction sync.
-// It provides a non-blocking queueing for the processing of these transaction groups, which allows
-// the single-threaded transaction sync to keep processing other messages.
-type SolicitedAsyncTxHandler interface {
- // HandleTransactionGroups enqueues the given slice of transaction groups that came from the given network peer with
- // the given message sequence number. The provided acknowledgement channel provides a feedback for the transaction sync
- // that the entire transaction group slice was added ( or already included ) within the transaction pool. The method
- // return true if it's able to enqueue the processing task, or false if it's unable to enqueue the processing task.
- HandleTransactionGroups(networkPeer interface{}, ackCh chan uint64, messageSeq uint64, groups []pooldata.SignedTxGroup) bool
- Start()
- Stop()
-}
-
-type solicitedAsyncTxHandler struct {
- txHandler *TxHandler
- backlogGroups chan *txGroups
- stopped sync.WaitGroup
- stopCtxFunc context.CancelFunc
- // skipNextBacklogWarning is used to avoid repeated backlog full warning messages.
- skipNextBacklogWarning bool
-}
-
-type txGroups struct {
- // the network package opaque network peer
- networkPeer interface{}
- // the feedback channel, in case we've successfully added the transaction groups to the transaction pool.
- ackCh chan uint64
- // the message sequence number, which would be written back to the feedback channel
- messageSeq uint64
- // the transactions groups slice
- txGroups []pooldata.SignedTxGroup
-}
-
-// SolicitedAsyncTxHandler converts a transaction handler to a SolicitedTxHandler
-func (handler *TxHandler) SolicitedAsyncTxHandler() SolicitedAsyncTxHandler {
- return &solicitedAsyncTxHandler{
- txHandler: handler,
- backlogGroups: make(chan *txGroups, txBacklogSize),
- skipNextBacklogWarning: false,
- }
-}
-
-// HandleTransactionGroups implements the solicitedAsyncTxHandler.HandleTransactionGroups interface.
-// It enqueues the given slice of transaction groups that came from the given network peer with
-// the given message sequence number. The provided acknowledgement channel provides a feedback for the transaction sync
-// that the entire transaction group slice was added ( or already included ) within the transaction pool. The method
-// return true if it's able to enqueue the processing task, or false if it's unable to enqueue the processing task.
-func (handler *solicitedAsyncTxHandler) HandleTransactionGroups(networkPeer interface{}, ackCh chan uint64, messageSeq uint64, groups []pooldata.SignedTxGroup) (enqueued bool) {
- select {
- case handler.backlogGroups <- &txGroups{networkPeer: networkPeer, txGroups: groups, ackCh: ackCh, messageSeq: messageSeq}:
- // reset the skipNextBacklogWarning once the number of pending items on the backlogGroups channels goes to
- // less than half of it's capacity.
- if handler.skipNextBacklogWarning && (len(handler.backlogGroups)*2 < cap(handler.backlogGroups)) {
- handler.skipNextBacklogWarning = false
- }
- enqueued = true
- default:
- if !handler.skipNextBacklogWarning {
- logging.Base().Warnf("solicitedAsyncTxHandler exhusted groups backlog")
- handler.skipNextBacklogWarning = true
- }
- // if we failed here we want to increase the corresponding metric. It might suggest that we
- // want to increase the queue size.
- transactionMessagesDroppedFromBacklog.Inc(nil)
- }
- return
-}
-
-func (handler *solicitedAsyncTxHandler) Start() {
- if handler.stopCtxFunc == nil {
- handler.txHandler.Start()
- var ctx context.Context
- ctx, handler.stopCtxFunc = context.WithCancel(context.Background())
- handler.stopped.Add(1)
- go handler.loop(ctx)
- }
-}
-
-func (handler *solicitedAsyncTxHandler) Stop() {
- if handler.stopCtxFunc != nil {
- handler.stopCtxFunc()
- handler.stopped.Wait()
- handler.stopCtxFunc = nil
- handler.txHandler.Stop()
- }
-}
-
-func (handler *solicitedAsyncTxHandler) loop(ctx context.Context) {
- defer handler.stopped.Done()
- var groups *txGroups
- for {
- select {
- case <-ctx.Done():
- return
- case groups = <-handler.backlogGroups:
- }
- disconnect, allTransactionsIncluded := handler.txHandler.processDecodedArray(groups.txGroups)
- if disconnect {
- handler.txHandler.net.Disconnect(groups.networkPeer)
- handler.txHandler.net.RequestConnectOutgoing(false, make(chan struct{}))
- transactionMessagesDroppedFromPool.Inc(nil)
- } else if allTransactionsIncluded {
- for _, txnGroup := range groups.txGroups {
- // We reencode here instead of using rawmsg.Data to avoid broadcasting non-canonical encodings
- err := handler.txHandler.net.Relay(ctx, protocol.TxnTag, reencode(txnGroup.Transactions), false, groups.networkPeer)
- if err != nil {
- logging.Base().Infof("solicitedAsyncTxHandler was unable to relay transaction message : %v", err)
- break
- }
- }
- select {
- case groups.ackCh <- groups.messageSeq:
- // all good, write was successful.
- default:
- // unable to write since channel was full - log this:
- logging.Base().Warnf("solicitedAsyncTxHandler was unable to ack transaction groups inclusion since the acknowledgement channel was full")
- }
- // we've processed this message, so increase the counter.
- transactionMessagesHandled.Inc(nil)
- } else {
- transactionMessagesDroppedFromPool.Inc(nil)
- }
- // clear out the groups; that would allow the GC to collect the group's memory allocations while we wait for the next task.
- *groups = txGroups{}
- }
-}
diff --git a/data/txHandler_test.go b/data/txHandler_test.go
index ad3bbc833..a53d83163 100644
--- a/data/txHandler_test.go
+++ b/data/txHandler_test.go
@@ -29,18 +29,19 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/pooldata"
"github.com/algorand/go-algorand/data/pools"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/execpool"
)
-func makeTestingTransactionPoolAndLedger(tb testing.TB, N int) (*pools.TransactionPool, *Ledger, []*crypto.SignatureSecrets, []basics.Address) {
+func BenchmarkTxHandlerProcessDecoded(b *testing.B) {
+ b.StopTimer()
+ b.ResetTimer()
+ const numRounds = 10
const numUsers = 100
- log := logging.TestingLog(tb)
+ log := logging.TestingLog(b)
secrets := make([]*crypto.SignatureSecrets, numUsers)
addresses := make([]basics.Address, numUsers)
@@ -61,25 +62,20 @@ func makeTestingTransactionPoolAndLedger(tb testing.TB, N int) (*pools.Transacti
MicroAlgos: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinBalance},
}
- require.Equal(tb, len(genesis), numUsers+1)
+ require.Equal(b, len(genesis), numUsers+1)
genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
- ledgerName := fmt.Sprintf("%s-mem-%d", tb.Name(), N)
+ ledgerName := fmt.Sprintf("%s-mem-%d", b.Name(), b.N)
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg)
- require.NoError(tb, err)
+ require.NoError(b, err)
+
+ l := ledger
cfg.TxPoolSize = 20000
cfg.EnableProcessBlockStats = false
- tp := pools.MakeTransactionPool(ledger.Ledger, cfg, logging.Base())
- return tp, ledger, secrets, addresses
-}
-
-func BenchmarkTxHandlerProcessDecoded(b *testing.B) {
- const numUsers = 100
- tp, l, secrets, addresses := makeTestingTransactionPoolAndLedger(b, b.N)
- defer l.Close()
+ tp := pools.MakeTransactionPool(l.Ledger, cfg, logging.Base())
signedTransactions := make([]transactions.SignedTxn, 0, b.N)
for i := 0; i < b.N/numUsers; i++ {
for u := 0; u < numUsers; u++ {
@@ -104,17 +100,19 @@ func BenchmarkTxHandlerProcessDecoded(b *testing.B) {
}
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
txHandler := MakeTxHandler(tp, l, &mocks.MockNetwork{}, "", crypto.Digest{}, backlogPool)
- b.ResetTimer()
+ b.StartTimer()
for _, signedTxn := range signedTransactions {
txHandler.processDecoded([]transactions.SignedTxn{signedTxn})
}
}
func BenchmarkTimeAfter(b *testing.B) {
+ b.StopTimer()
+ b.ResetTimer()
deadline := time.Now().Add(5 * time.Second)
after := 0
before := 0
- b.ResetTimer()
+ b.StartTimer()
for i := 0; i < b.N; i++ {
if time.Now().After(deadline) {
after++
@@ -123,143 +121,3 @@ func BenchmarkTimeAfter(b *testing.B) {
}
}
}
-func TestFilterAlreadyCommitted(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- const numUsers = 100
- tp, l, secrets, addresses := makeTestingTransactionPoolAndLedger(t, 1)
- defer l.Close()
- signedTransactions := make([]transactions.SignedTxn, 0, 100)
-
- for u := 0; u < numUsers; u++ {
- // generate transactions
- tx := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: addresses[u],
- Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2},
- FirstValid: 0,
- LastValid: basics.Round(proto.MaxTxnLife),
- GenesisHash: l.GenesisHash(),
- Note: make([]byte, 2),
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: addresses[(u+1)%numUsers],
- Amount: basics.MicroAlgos{Raw: mockBalancesMinBalance + (rand.Uint64() % 10000)},
- },
- }
- signedTx := tx.Sign(secrets[u])
- signedTransactions = append(signedTransactions, signedTx)
- }
-
- backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
- txHandler := MakeTxHandler(tp, l, &mocks.MockNetwork{}, "", crypto.Digest{}, backlogPool)
-
- // add the first 10 transactions to the pool.
- for i := 0; i < 10; i++ {
- tp.Remember(pooldata.SignedTxGroup{Transactions: []transactions.SignedTxn{signedTransactions[i]}})
- }
-
- allNew := []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[10:11],
- },
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[11:12],
- },
- }
- allNewRef := []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[10:11],
- },
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[11:12],
- },
- }
- allNewTransactions, allNewNonDupFilteredGroups := txHandler.filterAlreadyCommitted(allNew)
- require.Equal(t, allNewRef, allNewTransactions)
- require.False(t, allNewNonDupFilteredGroups)
-
- firstTxDup := []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- Transactions: []transactions.SignedTxn{signedTransactions[1]},
- },
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[11:12],
- },
- }
- firstTxExpectedOutput := []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[11:12],
- },
- }
- firstTxDupTransactions, firstTxDupNonDupFilteredGroups := txHandler.filterAlreadyCommitted(firstTxDup)
- require.Equal(t, firstTxExpectedOutput, firstTxDupTransactions)
- require.False(t, firstTxDupNonDupFilteredGroups)
-
- lastTxDup := []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[11:12],
- },
- pooldata.SignedTxGroup{
- Transactions: []transactions.SignedTxn{signedTransactions[1]},
- },
- }
- lastTxExpectedOutput := []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[11:12],
- },
- }
- lastTxDupTransactions, lastTxDupNonDupFilteredGroups := txHandler.filterAlreadyCommitted(lastTxDup)
- require.Equal(t, lastTxExpectedOutput, lastTxDupTransactions)
- require.False(t, lastTxDupNonDupFilteredGroups)
-
- midTxDup := []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[10:11],
- },
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[11:12],
- },
- pooldata.SignedTxGroup{
- Transactions: []transactions.SignedTxn{signedTransactions[1]},
- },
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[13:14],
- },
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[14:15],
- },
- pooldata.SignedTxGroup{
- Transactions: []transactions.SignedTxn{signedTransactions[2]},
- },
- pooldata.SignedTxGroup{
- Transactions: []transactions.SignedTxn{signedTransactions[3]},
- },
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[15:16],
- },
- }
- midTxDupExpectedOutput := []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[10:11],
- },
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[11:12],
- },
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[13:14],
- },
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[14:15],
- },
- pooldata.SignedTxGroup{
- Transactions: signedTransactions[15:16],
- },
- }
- midTxDupTransactions, midTxDupNonDupFilteredGroups := txHandler.filterAlreadyCommitted(midTxDup)
- require.Equal(t, midTxDupExpectedOutput, midTxDupTransactions)
- require.False(t, midTxDupNonDupFilteredGroups)
-
- return
-}
diff --git a/go.mod b/go.mod
index eb84f8d10..00a9719d6 100644
--- a/go.mod
+++ b/go.mod
@@ -8,7 +8,7 @@ require (
github.com/algorand/graphtrace v0.0.0-20201117160756-e524ed1a6f64
github.com/algorand/msgp v1.1.48
github.com/algorand/oapi-codegen v1.3.5-algorand5
- github.com/algorand/websocket v1.4.3
+ github.com/algorand/websocket v1.4.4
github.com/algorand/xorfilter v0.2.0
github.com/aws/aws-sdk-go v1.16.5
github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e
@@ -26,7 +26,6 @@ require (
github.com/gopherjs/gopherwasm v1.0.1 // indirect
github.com/gorilla/context v1.1.1 // indirect
github.com/gorilla/mux v1.6.2
- github.com/gorilla/schema v1.0.2
github.com/gorilla/websocket v1.4.2 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmoiron/sqlx v1.2.0
@@ -47,7 +46,7 @@ require (
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a
golang.org/x/net v0.0.0-20200904194848-62affa334b73
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 // indirect
- golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f
+ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009
diff --git a/go.sum b/go.sum
index d86dba456..0ac28a945 100644
--- a/go.sum
+++ b/go.sum
@@ -10,8 +10,8 @@ github.com/algorand/msgp v1.1.48 h1:5P+gVmTnk0m37r+rA3ZsFZW219ZqmCLulW5f8Z+3nx8=
github.com/algorand/msgp v1.1.48/go.mod h1:LtOntbYiCHj/Sl/Sqxtf8CZOrDt2a8Dv3tLaS6mcnUE=
github.com/algorand/oapi-codegen v1.3.5-algorand5 h1:y576Ca2/guQddQrQA7dtL5KcOx5xQgPeIupiuFMGyCI=
github.com/algorand/oapi-codegen v1.3.5-algorand5/go.mod h1:/k0Ywn0lnt92uBMyE+yiRf/Wo3/chxHHsAfenD09EbY=
-github.com/algorand/websocket v1.4.3 h1:8YiA+ZtwqAyg0K30lQyl7gUdKUArYXvBtd/cTFwA4uQ=
-github.com/algorand/websocket v1.4.3/go.mod h1:0nFSn+xppw/GZS9hgWPS3b8/4FcA3Pj7XQxm+wqHGx8=
+github.com/algorand/websocket v1.4.4 h1:BL9atWs/7tkV73NCwiLZ5YqDENMBsSxozc5gDtPdsQ4=
+github.com/algorand/websocket v1.4.4/go.mod h1:0nFSn+xppw/GZS9hgWPS3b8/4FcA3Pj7XQxm+wqHGx8=
github.com/algorand/xorfilter v0.2.0 h1:YC31ANxdZ2jmtbwqv1+USskVSqjkeiRZcQGc6//ro9Q=
github.com/algorand/xorfilter v0.2.0/go.mod h1:f5cJsYrFbJhXkbjnV4odJB44np05/PvwvdBnABnQoUs=
github.com/aws/aws-sdk-go v1.16.5 h1:NVxzZXIuwX828VcJrpNxxWjur1tlOBISdMdDdHIKHcc=
@@ -20,6 +20,7 @@ github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e h1:CHPYEbz7
github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e/go.mod h1:6Xhs0ZlsRjXLIiSMLKafbZxML/j30pg9Z1priLuha5s=
github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M=
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
+github.com/cyberdelia/templates v0.0.0-20191230040416-20a325f050d4 h1:Fphwr1XDjkTR/KFbrrkLfY6D2CEOlHqFGomQQrxcHFs=
github.com/cyberdelia/templates v0.0.0-20191230040416-20a325f050d4/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -32,6 +33,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumC
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/gen2brain/beeep v0.0.0-20180718162406-4e430518395f h1:eyHMPp7tXlBMF8PZHdsL89G0ehuRNflu7zKUeoQjcJ0=
github.com/gen2brain/beeep v0.0.0-20180718162406-4e430518395f/go.mod h1:GprdPCZglWh5OMcIDpeKBxuUJI+fEDOTVUfxZeda4zo=
@@ -40,24 +42,33 @@ github.com/getkin/kin-openapi v0.22.0 h1:J5IFyKd/5yuB6AZAgwK0CMBKnabWcmkowtsl6bR
github.com/getkin/kin-openapi v0.22.0/go.mod h1:WGRs2ZMM1Q8LR1QBEwUxC6RJEfaBcD0s+pcEVXFuAjw=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-chi/chi v4.1.1+incompatible h1:MmTgB0R8Bt/jccxp+t6S/1VGIKdJw5J74CK/c9tTfA4=
github.com/go-chi/chi v4.1.1+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
+github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f h1:zlOR3rOlPAVvtfuxGKoghCmop5B0TRyu/ZieziZuGiM=
github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/gofrs/flock v0.7.0 h1:pGFUjl501gafK9HBt1VGL1KCOd/YhIooID+xgyJCf3g=
github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherwasm v1.0.1 h1:Gmj9RMDjh+P9EFzzQltoCtjAxR5mUkaJqaNPfeaNe2I=
github.com/gopherjs/gopherwasm v1.0.1/go.mod h1:SkZ8z7CWBz5VXbhJel8TxCmAcsQqzgWGR/8nMhyhZSI=
+github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/schema v1.0.2 h1:sAgNfOcNYvdDSrzGHVy9nzCQahG+qmsg+nE8dK85QRA=
github.com/gorilla/schema v1.0.2/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU=
+github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@@ -71,17 +82,22 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGi
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/labstack/echo/v4 v4.1.16 h1:8swiwjE5Jkai3RPfZoahp8kjVCRNq+y7Q0hPji2Kz0o=
github.com/labstack/echo/v4 v4.1.16/go.mod h1:awO+5TzAjvL8XpibdsfXxPgHr+orhtXZJZIQCVjogKI=
github.com/labstack/echo/v4 v4.1.17 h1:PQIBaRplyRy3OjwILGkPg89JRtH2x5bssi59G2EL3fo=
github.com/labstack/echo/v4 v4.1.17/go.mod h1:Tn2yRQL/UclUalpb5rPdXDevbkJ+lp/2svdyFBg6CHQ=
github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
+github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/matryer/moq v0.0.0-20200310130814-7721994d1b54 h1:p8zN0Xu28xyEkPpqLbFXAnjdgBVvTJCpfOtoDf/+/RQ=
github.com/matryer/moq v0.0.0-20200310130814-7721994d1b54/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -94,7 +110,9 @@ github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK86
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/miekg/dns v1.1.27 h1:aEH/kqUzUxGJ/UHcEKdJY+ugH6WEzsEBBSPa8zuy1aM=
github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ=
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U=
github.com/olivere/elastic v6.2.14+incompatible h1:k+KadwNP/dkXE0/eu+T6otk1+5fe0tEpPyQJ4XVm5i8=
github.com/olivere/elastic v6.2.14+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8=
@@ -120,13 +138,16 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31 h1:OXcKh35JaYsGMRzpvFkLv/MEyPuL49CThT1pZ8aSml4=
github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
+github.com/valyala/fasttemplate v1.1.0 h1:RZqt0yGBsps8NGvLSGW804QQqCUYYLsaOjTVHy1Ocw4=
github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
@@ -139,6 +160,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
@@ -151,6 +173,7 @@ golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOL
golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -165,7 +188,10 @@ golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f h1:Fqb3ao1hUmOR3GkUOg/Y+BadLwykBIzs5q8Ez2SbHyc=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -177,13 +203,17 @@ golang.org/x/tools v0.0.0-20200423205358-59e73619c742 h1:9OGWpORUXvk8AsaBJlpzzDx
golang.org/x/tools v0.0.0-20200423205358-59e73619c742/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009 h1:q/fZgS8MMadqFFGa8WL4Oyz+TmjiZfi8UrzWhTl8d5w=
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009/go.mod h1:O0bY1e/dSoxMYZYTHP0SWKxG5EWLEvKR9/cOjWPPMKU=
+gopkg.in/toast.v1 v1.0.0-20180812000517-0a84660828b2 h1:MZF6J7CV6s/h0HBkfqebrYfKCVEo5iN+wzE4QhV3Evo=
gopkg.in/toast.v1 v1.0.0-20180812000517-0a84660828b2/go.mod h1:s1Sn2yZos05Qfs7NKt867Xe18emOmtsO3eAKbDaon0o=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/installer/config.json.example b/installer/config.json.example
index b1a977e64..fac112201 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -1,5 +1,5 @@
{
- "Version": 18,
+ "Version": 19,
"AccountUpdatesStatsInterval": 5000000000,
"AccountsRebuildSynchronousMode": 1,
"AnnounceParticipationKey": true,
@@ -71,8 +71,9 @@
"OutgoingMessageFilterBucketSize": 128,
"ParticipationKeysRefreshInterval": 60000000000,
"PeerConnectionsUpdateInterval": 3600,
- "PeerPingPeriodSeconds": 10,
+ "PeerPingPeriodSeconds": 0,
"PriorityPeers": {},
+ "ProposalAssemblyTime": 250000000,
"PublicAddress": "",
"ReconnectTime": 60000000000,
"ReservedFDs": 256,
diff --git a/ledger/README.md b/ledger/README.md
index 04c088e4c..37d6baed7 100644
--- a/ledger/README.md
+++ b/ledger/README.md
@@ -124,13 +124,13 @@ three functions:
- Construct a new block, based on a pool of potential transactions
and rewards, that will be valid. This is done by using
- the `Ledger.StartEvaluator(hdr, paysetHint, maxTxnBytesPerBlock)` method.
- This returns a `BlockEvaluator`, which can then accept tentative transactions
+ the `Ledger.StartEvaluator(hdr, txcache)` method. This returns a
+ `BlockEvaluator`, which can then accept tentative transactions
and rewards (using `BlockEvaluator.Transaction()` and
`BlockEvaluator.Reward()`). The caller can finalize the block by
- calling `BlockEvaluator.GenerateBlock()`. `paysetHint` provides a hint
- to the evaluator for the upcoming number of transactions. `maxTxnBytesPerBlock`
- allows the evaluator to adjust the size of the block dynamically.
+ calling `BlockEvaluator.GenerateBlock()`. `txcache` represents a
+ cache of previously verified transactions, to avoid repeated checking
+ of transaction signatures.
- Validate a block. This is done by calling `Ledger.Validate(block, txcache)`.
Under the covers, it executes the same logic using a `BlockEvaluator`.
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index 1a8b6f973..2b12b42c2 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -682,8 +682,8 @@ func removeEmptyAccountData(tx *sql.Tx, queryAddresses bool) (num int64, address
// the full AccountData because we need to store a large number of these
// in memory (say, 1M), and storing that many AccountData could easily
// cause us to run out of memory.
-func accountDataToOnline(address basics.Address, ad *basics.AccountData, proto config.ConsensusParams) *onlineAccount {
- return &onlineAccount{
+func accountDataToOnline(address basics.Address, ad *basics.AccountData, proto config.ConsensusParams) *ledgercore.OnlineAccount {
+ return &ledgercore.OnlineAccount{
Address: address,
MicroAlgos: ad.MicroAlgos,
RewardsBase: ad.RewardsBase,
@@ -711,14 +711,18 @@ func accountsReset(tx *sql.Tx) error {
return err
}
-// accountsRound returns the tracker balances round number, and the round of the hash tree
-// if the hash of the tree doesn't exists, it returns zero.
-func accountsRound(tx *sql.Tx) (rnd basics.Round, hashrnd basics.Round, err error) {
+// accountsRound returns the tracker balances round number
+func accountsRound(tx *sql.Tx) (rnd basics.Round, err error) {
err = tx.QueryRow("SELECT rnd FROM acctrounds WHERE id='acctbase'").Scan(&rnd)
if err != nil {
return
}
+ return
+}
+// accountsHashRound returns the round of the hash tree
+// if the hash of the tree doesn't exists, it returns zero.
+func accountsHashRound(tx *sql.Tx) (hashrnd basics.Round, err error) {
err = tx.QueryRow("SELECT rnd FROM acctrounds WHERE id='hashbase'").Scan(&hashrnd)
if err == sql.ErrNoRows {
hashrnd = basics.Round(0)
@@ -727,7 +731,7 @@ func accountsRound(tx *sql.Tx) (rnd basics.Round, hashrnd basics.Round, err erro
return
}
-func accountsDbInit(r db.Queryable, w db.Queryable) (*accountsDbQueries, error) {
+func accountsInitDbQueries(r db.Queryable, w db.Queryable) (*accountsDbQueries, error) {
var err error
qs := &accountsDbQueries{}
@@ -1009,14 +1013,14 @@ func (qs *accountsDbQueries) close() {
//
// Note that this does not check if the accounts have a vote key valid for any
// particular round (past, present, or future).
-func accountsOnlineTop(tx *sql.Tx, offset, n uint64, proto config.ConsensusParams) (map[basics.Address]*onlineAccount, error) {
+func accountsOnlineTop(tx *sql.Tx, offset, n uint64, proto config.ConsensusParams) (map[basics.Address]*ledgercore.OnlineAccount, error) {
rows, err := tx.Query("SELECT address, data FROM accountbase WHERE normalizedonlinebalance>0 ORDER BY normalizedonlinebalance DESC, address DESC LIMIT ? OFFSET ?", n, offset)
if err != nil {
return nil, err
}
defer rows.Close()
- res := make(map[basics.Address]*onlineAccount, n)
+ res := make(map[basics.Address]*ledgercore.OnlineAccount, n)
for rows.Next() {
var addrbuf []byte
var buf []byte
@@ -1181,54 +1185,8 @@ func accountsNewRound(tx *sql.Tx, updates compactAccountDeltas, creatables map[b
return
}
-// totalsNewRounds updates the accountsTotals by applying series of round changes
-func totalsNewRounds(tx *sql.Tx, updates []ledgercore.AccountDeltas, compactUpdates compactAccountDeltas, accountTotals []ledgercore.AccountTotals, proto config.ConsensusParams) (err error) {
- var ot basics.OverflowTracker
- totals, err := accountsTotals(tx, false)
- if err != nil {
- return
- }
-
- // copy the updates base account map, since we don't want to modify the input map.
- accounts := make(map[basics.Address]basics.AccountData, compactUpdates.len())
- for i := 0; i < compactUpdates.len(); i++ {
- addr, acctData := compactUpdates.getByIdx(i)
- accounts[addr] = acctData.old.accountData
- }
-
- for i := 0; i < len(updates); i++ {
- totals.ApplyRewards(accountTotals[i].RewardsLevel, &ot)
-
- for j := 0; j < updates[i].Len(); j++ {
- addr, data := updates[i].GetByIdx(j)
-
- if oldAccountData, has := accounts[addr]; has {
- totals.DelAccount(proto, oldAccountData, &ot)
- } else {
- err = fmt.Errorf("missing old account data")
- return
- }
-
- totals.AddAccount(proto, data, &ot)
- accounts[addr] = data
- }
- }
-
- if ot.Overflowed {
- err = fmt.Errorf("overflow computing totals")
- return
- }
-
- err = accountsPutTotals(tx, totals, false)
- if err != nil {
- return
- }
-
- return
-}
-
// updates the round number associated with the current account data.
-func updateAccountsRound(tx *sql.Tx, rnd basics.Round, hashRound basics.Round) (err error) {
+func updateAccountsRound(tx *sql.Tx, rnd basics.Round) (err error) {
res, err := tx.Exec("UPDATE acctrounds SET rnd=? WHERE id='acctbase' AND rnd<?", rnd, rnd)
if err != nil {
return
@@ -1254,13 +1212,17 @@ func updateAccountsRound(tx *sql.Tx, rnd basics.Round, hashRound basics.Round) (
return
}
}
+ return
+}
- res, err = tx.Exec("INSERT OR REPLACE INTO acctrounds(id,rnd) VALUES('hashbase',?)", hashRound)
+// updates the round number associated with the hash of current account data.
+func updateAccountsHashRound(tx *sql.Tx, hashRound basics.Round) (err error) {
+ res, err := tx.Exec("INSERT OR REPLACE INTO acctrounds(id,rnd) VALUES('hashbase',?)", hashRound)
if err != nil {
return
}
- aff, err = res.RowsAffected()
+ aff, err := res.RowsAffected()
if err != nil {
return
}
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index 38dcf996b..ab64fe48f 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -35,320 +35,19 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/db"
)
-func randomAddress() basics.Address {
- var addr basics.Address
- crypto.RandBytes(addr[:])
- return addr
-}
-
-func randomNote() []byte {
- var note [16]byte
- crypto.RandBytes(note[:])
- return note[:]
-}
-
-func randomAccountData(rewardsLevel uint64) basics.AccountData {
- var data basics.AccountData
-
- // Avoid overflowing totals
- data.MicroAlgos.Raw = crypto.RandUint64() % (1 << 32)
-
- switch crypto.RandUint64() % 3 {
- case 0:
- data.Status = basics.Online
- case 1:
- data.Status = basics.Offline
- default:
- data.Status = basics.NotParticipating
- }
-
- data.RewardsBase = rewardsLevel
- data.VoteFirstValid = 0
- data.VoteLastValid = 1000
- return data
-}
-
-func randomFullAccountData(rewardsLevel, lastCreatableID uint64) (basics.AccountData, uint64) {
- data := randomAccountData(rewardsLevel)
-
- crypto.RandBytes(data.VoteID[:])
- crypto.RandBytes(data.SelectionID[:])
- data.VoteFirstValid = basics.Round(crypto.RandUint64())
- data.VoteLastValid = basics.Round(crypto.RandUint64())
- data.VoteKeyDilution = crypto.RandUint64()
- if 1 == (crypto.RandUint64() % 2) {
- // if account has created assets, have these defined.
- data.AssetParams = make(map[basics.AssetIndex]basics.AssetParams)
- createdAssetsCount := crypto.RandUint64()%20 + 1
- for i := uint64(0); i < createdAssetsCount; i++ {
- ap := basics.AssetParams{
- Total: crypto.RandUint64(),
- Decimals: uint32(crypto.RandUint64() % 20),
- DefaultFrozen: (crypto.RandUint64()%2 == 0),
- UnitName: fmt.Sprintf("un%x", uint32(crypto.RandUint64()%0x7fffffff)),
- AssetName: fmt.Sprintf("an%x", uint32(crypto.RandUint64()%0x7fffffff)),
- URL: fmt.Sprintf("url%x", uint32(crypto.RandUint64()%0x7fffffff)),
- }
- crypto.RandBytes(ap.MetadataHash[:])
- crypto.RandBytes(ap.Manager[:])
- crypto.RandBytes(ap.Reserve[:])
- crypto.RandBytes(ap.Freeze[:])
- crypto.RandBytes(ap.Clawback[:])
- lastCreatableID++
- data.AssetParams[basics.AssetIndex(lastCreatableID)] = ap
- }
- }
- if 1 == (crypto.RandUint64()%2) && lastCreatableID > 0 {
- // if account owns assets
- data.Assets = make(map[basics.AssetIndex]basics.AssetHolding)
- ownedAssetsCount := crypto.RandUint64()%20 + 1
- for i := uint64(0); i < ownedAssetsCount; i++ {
- ah := basics.AssetHolding{
- Amount: crypto.RandUint64(),
- Frozen: (crypto.RandUint64()%2 == 0),
- }
- data.Assets[basics.AssetIndex(crypto.RandUint64()%lastCreatableID)] = ah
- }
- }
- if 1 == (crypto.RandUint64() % 5) {
- crypto.RandBytes(data.AuthAddr[:])
- }
-
- if 1 == (crypto.RandUint64()%3) && lastCreatableID > 0 {
- data.AppLocalStates = make(map[basics.AppIndex]basics.AppLocalState)
- appStatesCount := crypto.RandUint64()%20 + 1
- for i := uint64(0); i < appStatesCount; i++ {
- ap := basics.AppLocalState{
- Schema: basics.StateSchema{
- NumUint: crypto.RandUint64()%5 + 1,
- NumByteSlice: crypto.RandUint64() % 5,
- },
- KeyValue: make(map[string]basics.TealValue),
- }
-
- for i := uint64(0); i < ap.Schema.NumUint; i++ {
- appName := fmt.Sprintf("lapp%x-%x", crypto.RandUint64(), i)
- ap.KeyValue[appName] = basics.TealValue{
- Type: basics.TealUintType,
- Uint: crypto.RandUint64(),
- }
- }
- for i := uint64(0); i < ap.Schema.NumByteSlice; i++ {
- appName := fmt.Sprintf("lapp%x-%x", crypto.RandUint64(), i)
- tv := basics.TealValue{
- Type: basics.TealBytesType,
- }
- bytes := make([]byte, crypto.RandUint64()%uint64(config.MaxBytesKeyValueLen-len(appName)))
- crypto.RandBytes(bytes[:])
- tv.Bytes = string(bytes)
- ap.KeyValue[appName] = tv
- }
- if len(ap.KeyValue) == 0 {
- ap.KeyValue = nil
- }
- data.AppLocalStates[basics.AppIndex(crypto.RandUint64()%lastCreatableID)] = ap
- }
- }
-
- if 1 == (crypto.RandUint64() % 3) {
- data.TotalAppSchema = basics.StateSchema{
- NumUint: crypto.RandUint64() % 50,
- NumByteSlice: crypto.RandUint64() % 50,
- }
- }
- if 1 == (crypto.RandUint64() % 3) {
- data.AppParams = make(map[basics.AppIndex]basics.AppParams)
- appParamsCount := crypto.RandUint64()%5 + 1
- for i := uint64(0); i < appParamsCount; i++ {
- ap := basics.AppParams{
- ApprovalProgram: make([]byte, int(crypto.RandUint63())%config.MaxAppProgramLen),
- ClearStateProgram: make([]byte, int(crypto.RandUint63())%config.MaxAppProgramLen),
- GlobalState: make(basics.TealKeyValue),
- StateSchemas: basics.StateSchemas{
- LocalStateSchema: basics.StateSchema{
- NumUint: crypto.RandUint64()%5 + 1,
- NumByteSlice: crypto.RandUint64() % 5,
- },
- GlobalStateSchema: basics.StateSchema{
- NumUint: crypto.RandUint64()%5 + 1,
- NumByteSlice: crypto.RandUint64() % 5,
- },
- },
- }
- if len(ap.ApprovalProgram) > 0 {
- crypto.RandBytes(ap.ApprovalProgram[:])
- } else {
- ap.ApprovalProgram = nil
- }
- if len(ap.ClearStateProgram) > 0 {
- crypto.RandBytes(ap.ClearStateProgram[:])
- } else {
- ap.ClearStateProgram = nil
- }
-
- for i := uint64(0); i < ap.StateSchemas.LocalStateSchema.NumUint+ap.StateSchemas.GlobalStateSchema.NumUint; i++ {
- appName := fmt.Sprintf("tapp%x-%x", crypto.RandUint64(), i)
- ap.GlobalState[appName] = basics.TealValue{
- Type: basics.TealUintType,
- Uint: crypto.RandUint64(),
- }
- }
- for i := uint64(0); i < ap.StateSchemas.LocalStateSchema.NumByteSlice+ap.StateSchemas.GlobalStateSchema.NumByteSlice; i++ {
- appName := fmt.Sprintf("tapp%x-%x", crypto.RandUint64(), i)
- tv := basics.TealValue{
- Type: basics.TealBytesType,
- }
- bytes := make([]byte, crypto.RandUint64()%uint64(config.MaxBytesKeyValueLen))
- crypto.RandBytes(bytes[:])
- tv.Bytes = string(bytes)
- ap.GlobalState[appName] = tv
- }
- if len(ap.GlobalState) == 0 {
- ap.GlobalState = nil
- }
- lastCreatableID++
- data.AppParams[basics.AppIndex(lastCreatableID)] = ap
- }
-
- }
- return data, lastCreatableID
-}
-
-func randomAccounts(niter int, simpleAccounts bool) map[basics.Address]basics.AccountData {
- res := make(map[basics.Address]basics.AccountData)
- if simpleAccounts {
- for i := 0; i < niter; i++ {
- res[randomAddress()] = randomAccountData(0)
- }
- } else {
- lastCreatableID := crypto.RandUint64() % 512
- for i := 0; i < niter; i++ {
- res[randomAddress()], lastCreatableID = randomFullAccountData(0, lastCreatableID)
- }
- }
- return res
-}
-
-func randomDeltas(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, imbalance int64) {
- updates, totals, imbalance, _ = randomDeltasImpl(niter, base, rewardsLevel, true, 0)
- return
-}
-
-func randomDeltasFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, imbalance int64, lastCreatableID uint64) {
- updates, totals, imbalance, lastCreatableID = randomDeltasImpl(niter, base, rewardsLevel, false, lastCreatableIDIn)
- return
-}
-
-func randomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, imbalance int64, lastCreatableID uint64) {
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- totals = make(map[basics.Address]basics.AccountData)
-
- // copy base -> totals
- for addr, data := range base {
- totals[addr] = data
- }
-
- // if making a full delta then need to determine max asset/app id to get rid of conflicts
- lastCreatableID = lastCreatableIDIn
- if !simple {
- for _, ad := range base {
- for aid := range ad.AssetParams {
- if uint64(aid) > lastCreatableID {
- lastCreatableID = uint64(aid)
- }
- }
- for aid := range ad.AppParams {
- if uint64(aid) > lastCreatableID {
- lastCreatableID = uint64(aid)
- }
- }
- }
- }
-
- // Change some existing accounts
- {
- i := 0
- for addr, old := range base {
- if i >= len(base)/2 || i >= niter {
- break
- }
-
- if addr == testPoolAddr {
- continue
- }
- i++
-
- var new basics.AccountData
- if simple {
- new = randomAccountData(rewardsLevel)
- } else {
- new, lastCreatableID = randomFullAccountData(rewardsLevel, lastCreatableID)
- }
- updates.Upsert(addr, new)
- imbalance += int64(old.WithUpdatedRewards(proto, rewardsLevel).MicroAlgos.Raw - new.MicroAlgos.Raw)
- totals[addr] = new
- break
- }
- }
-
- // Change some new accounts
- for i := 0; i < niter; i++ {
- addr := randomAddress()
- old := totals[addr]
- var new basics.AccountData
- if simple {
- new = randomAccountData(rewardsLevel)
- } else {
- new, lastCreatableID = randomFullAccountData(rewardsLevel, lastCreatableID)
- }
- updates.Upsert(addr, new)
- imbalance += int64(old.WithUpdatedRewards(proto, rewardsLevel).MicroAlgos.Raw - new.MicroAlgos.Raw)
- totals[addr] = new
- }
-
- return
-}
-
-func randomDeltasBalanced(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData) {
- updates, totals, _ = randomDeltasBalancedImpl(niter, base, rewardsLevel, true, 0)
- return
-}
-
-func randomDeltasBalancedFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, lastCreatableID uint64) {
- updates, totals, lastCreatableID = randomDeltasBalancedImpl(niter, base, rewardsLevel, false, lastCreatableIDIn)
- return
-}
-
-func randomDeltasBalancedImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, lastCreatableID uint64) {
- var imbalance int64
- if simple {
- updates, totals, imbalance = randomDeltas(niter, base, rewardsLevel)
- } else {
- updates, totals, imbalance, lastCreatableID = randomDeltasFull(niter, base, rewardsLevel, lastCreatableIDIn)
- }
-
- oldPool := base[testPoolAddr]
- newPool := oldPool
- newPool.MicroAlgos.Raw += uint64(imbalance)
-
- updates.Upsert(testPoolAddr, newPool)
- totals[testPoolAddr] = newPool
-
- return updates, totals, lastCreatableID
-}
-
func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.Address]basics.AccountData) {
- r, _, err := accountsRound(tx)
+ r, err := accountsRound(tx)
require.NoError(t, err)
require.Equal(t, r, rnd)
- aq, err := accountsDbInit(tx, tx)
+ aq, err := accountsInitDbQueries(tx, tx)
require.NoError(t, err)
defer aq.close()
@@ -382,18 +81,18 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.
totals, err := accountsTotals(tx, false)
require.NoError(t, err)
- require.Equal(t, totals.Online.Money.Raw, totalOnline)
+ require.Equal(t, totals.Online.Money.Raw, totalOnline, "mismatching total online money")
require.Equal(t, totals.Offline.Money.Raw, totalOffline)
require.Equal(t, totals.NotParticipating.Money.Raw, totalNotPart)
require.Equal(t, totals.Participating().Raw, totalOnline+totalOffline)
require.Equal(t, totals.All().Raw, totalOnline+totalOffline+totalNotPart)
- d, err := aq.lookup(randomAddress())
+ d, err := aq.lookup(ledgertesting.RandomAddress())
require.NoError(t, err)
require.Equal(t, rnd, d.round)
require.Equal(t, d.accountData, basics.AccountData{})
- onlineAccounts := make(map[basics.Address]*onlineAccount)
+ onlineAccounts := make(map[basics.Address]*ledgercore.OnlineAccount)
for addr, data := range accts {
if data.Status == basics.Online {
onlineAccounts[addr] = accountDataToOnline(addr, &data, proto)
@@ -406,7 +105,7 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.
require.Equal(t, i, len(dbtop))
// Compute the top-N accounts ourselves
- var testtop []onlineAccount
+ var testtop []ledgercore.OnlineAccount
for _, data := range onlineAccounts {
testtop = append(testtop, *data)
}
@@ -447,7 +146,7 @@ func TestAccountDBInit(t *testing.T) {
require.NoError(t, err)
defer tx.Rollback()
- accts := randomAccounts(20, true)
+ accts := ledgertesting.RandomAccounts(20, true)
newDB, err := accountsInit(tx, accts, proto)
require.NoError(t, err)
require.True(t, newDB)
@@ -540,10 +239,12 @@ func TestAccountDBRound(t *testing.T) {
require.NoError(t, err)
defer tx.Rollback()
- accts := randomAccounts(20, true)
+ accts := ledgertesting.RandomAccounts(20, true)
_, err = accountsInit(tx, accts, proto)
require.NoError(t, err)
checkAccounts(t, tx, 0, accts)
+ totals, err := accountsTotals(tx, false)
+ require.NoError(t, err)
// used to determine how many creatables element will be in the test per iteration
numElementsPerSegement := 10
@@ -553,11 +254,12 @@ func TestAccountDBRound(t *testing.T) {
ctbsList, randomCtbs := randomCreatables(numElementsPerSegement)
expectedDbImage := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
var baseAccounts lruAccounts
+ var newaccts map[basics.Address]basics.AccountData
baseAccounts.init(nil, 100, 80)
for i := 1; i < 10; i++ {
var updates ledgercore.AccountDeltas
- var newaccts map[basics.Address]basics.AccountData
- updates, newaccts, _, lastCreatableID = randomDeltasFull(20, accts, 0, lastCreatableID)
+ updates, newaccts, _, lastCreatableID = ledgertesting.RandomDeltasFull(20, accts, 0, lastCreatableID)
+ totals = ledgertesting.CalculateNewRoundAccountTotals(t, updates, 0, proto, accts, totals)
accts = newaccts
ctbsWithDeletes := randomCreatableSampling(i, ctbsList, randomCtbs,
expectedDbImage, numElementsPerSegement)
@@ -565,15 +267,26 @@ func TestAccountDBRound(t *testing.T) {
updatesCnt := makeCompactAccountDeltas([]ledgercore.AccountDeltas{updates}, baseAccounts)
err = updatesCnt.accountsLoadOld(tx)
require.NoError(t, err)
- err = totalsNewRounds(tx, []ledgercore.AccountDeltas{updates}, updatesCnt, []ledgercore.AccountTotals{{}}, proto)
+ err = accountsPutTotals(tx, totals, false)
require.NoError(t, err)
_, err = accountsNewRound(tx, updatesCnt, ctbsWithDeletes, proto, basics.Round(i))
require.NoError(t, err)
- err = updateAccountsRound(tx, basics.Round(i), 0)
+ err = updateAccountsRound(tx, basics.Round(i))
require.NoError(t, err)
checkAccounts(t, tx, basics.Round(i), accts)
checkCreatables(t, tx, i, expectedDbImage)
}
+
+ // test the accounts totals
+ var updates ledgercore.AccountDeltas
+ for addr, acctData := range newaccts {
+ updates.Upsert(addr, acctData)
+ }
+
+ expectedTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, 0, proto, nil, ledgercore.AccountTotals{})
+ actualTotals, err := accountsTotals(tx, false)
+ require.NoError(t, err)
+ require.Equal(t, expectedTotals, actualTotals)
}
// checkCreatables compares the expected database image to the actual databse content
@@ -683,7 +396,7 @@ func randomCreatable(uniqueAssetIds map[basics.CreatableIndex]bool) (
creatable := ledgercore.ModifiedCreatable{
Ctype: ctype,
Created: (crypto.RandUint64() % 2) == 1,
- Creator: randomAddress(),
+ Creator: ledgertesting.RandomAddress(),
Ndeltas: 1,
}
@@ -705,7 +418,7 @@ func generateRandomTestingAccountBalances(numAccounts int) (updates map[basics.A
updates = make(map[basics.Address]basics.AccountData, numAccounts)
for i := 0; i < numAccounts; i++ {
- addr := randomAddress()
+ addr := ledgertesting.RandomAddress()
updates[addr] = basics.AccountData{
MicroAlgos: basics.MicroAlgos{Raw: 0x000ffffffffffffff / uint64(numAccounts)},
Status: basics.NotParticipating,
@@ -804,7 +517,7 @@ func benchmarkReadingRandomBalances(b *testing.B, inMemory bool) {
accounts := benchmarkInitBalances(b, b.N, dbs, proto)
- qs, err := accountsDbInit(dbs.Rdb.Handle, dbs.Wdb.Handle)
+ qs, err := accountsInitDbQueries(dbs.Rdb.Handle, dbs.Wdb.Handle)
require.NoError(b, err)
// read all the balances in the database, shuffled
@@ -977,14 +690,14 @@ func TestAccountsReencoding(t *testing.T) {
}
for _, oldAccData := range oldEncodedAccountsData {
- addr := randomAddress()
+ addr := ledgertesting.RandomAddress()
_, err = tx.ExecContext(ctx, "INSERT INTO accountbase (address, data) VALUES (?, ?)", addr[:], oldAccData)
if err != nil {
return err
}
}
for i := 0; i < 100; i++ {
- addr := randomAddress()
+ addr := ledgertesting.RandomAddress()
accData := basics.AccountData{
MicroAlgos: basics.MicroAlgos{Raw: 0x000ffffffffffffff},
Status: basics.NotParticipating,
@@ -1058,7 +771,7 @@ func TestAccountsDbQueriesCreateClose(t *testing.T) {
return nil
})
require.NoError(t, err)
- qs, err := accountsDbInit(dbs.Rdb.Handle, dbs.Wdb.Handle)
+ qs, err := accountsInitDbQueries(dbs.Rdb.Handle, dbs.Wdb.Handle)
require.NoError(t, err)
require.NotNil(t, qs.listCreatablesStmt)
qs.close()
@@ -1069,7 +782,7 @@ func TestAccountsDbQueriesCreateClose(t *testing.T) {
func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder bool) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ := ledgertesting.GenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
const inMem = false
log := logging.TestingLog(b)
cfg := config.GetDefaultLocal()
@@ -1170,7 +883,7 @@ func TestCompactAccountDeltas(t *testing.T) {
a.Equal(-1, idx)
a.Equal(accountDelta{}, data)
- addr := randomAddress()
+ addr := ledgertesting.RandomAddress()
data, idx = ad.get(addr)
a.Equal(-1, idx)
a.Equal(accountDelta{}, data)
@@ -1217,7 +930,7 @@ func TestCompactAccountDeltas(t *testing.T) {
a.Equal(addr, address)
a.Equal(accountDelta{new: sample2.new, old: old1}, data)
- addr1 := randomAddress()
+ addr1 := ledgertesting.RandomAddress()
old2 := persistedAccountData{addr: addr1, accountData: basics.AccountData{MicroAlgos: basics.MicroAlgos{Raw: 789}}}
ad.upsertOld(old2)
a.Equal(2, ad.len())
@@ -1235,7 +948,7 @@ func TestCompactAccountDeltas(t *testing.T) {
a.Equal(addr, address)
a.Equal(accountDelta{new: sample2.new, old: old2}, data)
- addr2 := randomAddress()
+ addr2 := ledgertesting.RandomAddress()
idx = ad.insert(addr2, sample2)
a.Equal(3, ad.len())
a.Equal(2, idx)
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index a49ef95d7..16d933fbb 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -20,13 +20,9 @@ import (
"container/heap"
"context"
"database/sql"
- "encoding/hex"
"fmt"
"io"
- "os"
- "path/filepath"
"sort"
- "strconv"
"sync"
"sync/atomic"
"time"
@@ -35,7 +31,6 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/merkletrie"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -52,24 +47,8 @@ const (
balancesFlushInterval = 5 * time.Second
// pendingDeltasFlushThreshold is the deltas count threshold above we flush the pending balances regardless of the flush interval.
pendingDeltasFlushThreshold = 128
- // trieRebuildAccountChunkSize defines the number of accounts that would get read at a single chunk
- // before added to the trie during trie construction
- trieRebuildAccountChunkSize = 16384
- // trieRebuildCommitFrequency defines the number of accounts that would get added before we call evict to commit the changes and adjust the memory cache.
- trieRebuildCommitFrequency = 65536
- // trieAccumulatedChangesFlush defines the number of pending changes that would be applied to the merkle trie before
- // we attempt to commit them to disk while writing a batch of rounds balances to disk.
- trieAccumulatedChangesFlush = 256
)
-// trieCachedNodesCount defines how many balances trie nodes we would like to keep around in memory.
-// value was calibrated using BenchmarkCalibrateCacheNodeSize
-var trieCachedNodesCount = 9000
-
-// merkleCommitterNodesPerPage controls how many nodes will be stored in a single page
-// value was calibrated using BenchmarkCalibrateNodesPerPage
-var merkleCommitterNodesPerPage = int64(116)
-
// baseAccountsPendingAccountsBufferSize defines the size of the base account pending accounts buffer size.
// At the beginning of a new round, the entries from this buffer are being flushed into the base accounts map.
const baseAccountsPendingAccountsBufferSize = 100000
@@ -99,14 +78,6 @@ const initializingAccountCachesMessageTimeout = 3 * time.Second
// where we end up batching up to 1000 rounds in a single update.
const accountsUpdatePerRoundHighWatermark = 1 * time.Second
-// TrieMemoryConfig is the memory configuration setup used for the merkle trie.
-var TrieMemoryConfig = merkletrie.MemoryConfig{
- NodesCountPerPage: merkleCommitterNodesPerPage,
- CachedNodesCount: trieCachedNodesCount,
- PageFillFactor: 0.95,
- MaxChildrenPagesThreshold: 64,
-}
-
// A modifiedAccount represents an account that has been modified since
// the persistent state stored in the account DB (i.e., in the range of
// rounds covered by the accountUpdates tracker).
@@ -125,28 +96,9 @@ type modifiedAccount struct {
type accountUpdates struct {
// constant variables ( initialized on initialize, and never changed afterward )
- // initAccounts specifies initial account values for database.
- initAccounts map[basics.Address]basics.AccountData
-
- // initProto specifies the initial consensus parameters at the genesis block.
- initProto config.ConsensusParams
-
- // dbDirectory is the directory where the ledger and block sql file resides as well as the parent directory for the catchup files to be generated
- dbDirectory string
-
- // catchpointInterval is the configured interval at which the accountUpdates would generate catchpoint labels and catchpoint files.
- catchpointInterval uint64
-
// archivalLedger determines whether the associated ledger was configured as archival ledger or not.
archivalLedger bool
- // catchpointFileHistoryLength defines how many catchpoint files we want to store back.
- // 0 means don't store any, -1 mean unlimited and positive number suggest the number of most recent catchpoint files.
- catchpointFileHistoryLength int
-
- // vacuumOnStartup controls whether the accounts database would get vacuumed on startup.
- vacuumOnStartup bool
-
// dynamic variables
// Connection to the database.
@@ -155,9 +107,9 @@ type accountUpdates struct {
// Prepared SQL statements for fast accounts DB lookups.
accountsq *accountsDbQueries
- // dbRound is always exactly accountsRound(),
- // cached to avoid SQL queries.
- dbRound basics.Round
+ // cachedDBRound is always exactly tracker DB round (and therefore, accountsRound()),
+ // cached to use in lookup functions
+ cachedDBRound basics.Round
// deltas stores updates for every round after dbRound.
deltas []ledgercore.AccountDeltas
@@ -181,75 +133,29 @@ type accountUpdates struct {
// i.e., totals is one longer than deltas.
roundTotals []ledgercore.AccountTotals
- // roundDigest stores the digest of the block for every round starting with dbRound and every round after it.
- roundDigest []crypto.Digest
-
// log copied from ledger
log logging.Logger
- // lastFlushTime is the time we last flushed updates to
- // the accounts DB (bumping dbRound).
- lastFlushTime time.Time
-
// ledger is the source ledger, which is used to synchronize
// the rounds at which we need to flush the balances to disk
// in favor of the catchpoint to be generated.
ledger ledgerForTracker
- // The Trie tracking the current account balances. Always matches the balances that were
- // written to the database.
- balancesTrie *merkletrie.Trie
-
- // The last catchpoint label that was written to the database. Should always align with what's in the database.
- // note that this is the last catchpoint *label* and not the catchpoint file.
- lastCatchpointLabel string
-
- // catchpointWriting help to synchronize the catchpoint file writing. When this atomic variable is 0, no writing is going on.
- // Any non-zero value indicates a catchpoint being written.
- catchpointWriting int32
-
- // catchpointSlowWriting suggest to the accounts writer that it should finish writing up the catchpoint file ASAP.
- // when this channel is closed, the accounts writer would try and complete the writing as soon as possible.
- // otherwise, it would take it's time and perform periodic sleeps between chunks processing.
- catchpointSlowWriting chan struct{}
-
- // ctx is the context for the committing go-routine. It's also used as the "parent" of the catchpoint generation operation.
- ctx context.Context
-
- // ctxCancel is the canceling function for canceling the committing go-routine ( i.e. signaling the committing go-routine that it's time to abort )
- ctxCancel context.CancelFunc
-
// deltasAccum stores the accumulated deltas for every round starting dbRound-1.
deltasAccum []int
- // committedOffset is the offset at which we'd like to persist all the previous account information to disk.
- committedOffset chan deferredCommit
-
// accountsMu is the synchronization mutex for accessing the various non-static variables.
accountsMu deadlock.RWMutex
// accountsReadCond used to synchronize read access to the internal data structures.
accountsReadCond *sync.Cond
- // accountsWriting provides synchronization around the background writing of account balances.
- accountsWriting sync.WaitGroup
-
- // commitSyncerClosed is the blocking channel for synchronizing closing the commitSyncer goroutine. Once it's closed, the
- // commitSyncer can be assumed to have aborted.
- commitSyncerClosed chan struct{}
-
// voters keeps track of Merkle trees of online accounts, used for compact certificates.
voters *votersTracker
// baseAccounts stores the most recently used accounts, at exactly dbRound
baseAccounts lruAccounts
- // the synchronous mode that would be used for the account database.
- synchronousMode db.SynchronousMode
-
- // the synchronous mode that would be used while the accounts database is being rebuilt.
- accountsRebuildSynchronousMode db.SynchronousMode
-
// logAccountUpdatesMetrics is a flag for enable/disable metrics logging
logAccountUpdatesMetrics bool
@@ -300,106 +206,37 @@ func (e *MismatchingDatabaseRoundError) Error() string {
}
// initialize initializes the accountUpdates structure
-func (au *accountUpdates) initialize(cfg config.Local, dbPathPrefix string, genesisProto config.ConsensusParams, genesisAccounts map[basics.Address]basics.AccountData) {
- au.initProto = genesisProto
- au.initAccounts = genesisAccounts
- au.dbDirectory = filepath.Dir(dbPathPrefix)
+func (au *accountUpdates) initialize(cfg config.Local) {
au.archivalLedger = cfg.Archival
- switch cfg.CatchpointTracking {
- case -1:
- au.catchpointInterval = 0
- default:
- // give a warning, then fall thought
- logging.Base().Warnf("accountUpdates: the CatchpointTracking field in the config.json file contains an invalid value (%d). The default value of 0 would be used instead.", cfg.CatchpointTracking)
- fallthrough
- case 0:
- if au.archivalLedger {
- au.catchpointInterval = cfg.CatchpointInterval
- } else {
- au.catchpointInterval = 0
- }
- case 1:
- au.catchpointInterval = cfg.CatchpointInterval
- }
- au.catchpointFileHistoryLength = cfg.CatchpointFileHistoryLength
- if cfg.CatchpointFileHistoryLength < -1 {
- au.catchpointFileHistoryLength = -1
- }
- au.vacuumOnStartup = cfg.OptimizeAccountsDatabaseOnStartup
- // initialize the commitSyncerClosed with a closed channel ( since the commitSyncer go-routine is not active )
- au.commitSyncerClosed = make(chan struct{})
- close(au.commitSyncerClosed)
au.accountsReadCond = sync.NewCond(au.accountsMu.RLocker())
- au.synchronousMode = db.SynchronousMode(cfg.LedgerSynchronousMode)
- au.accountsRebuildSynchronousMode = db.SynchronousMode(cfg.AccountsRebuildSynchronousMode)
// log metrics
au.logAccountUpdatesMetrics = cfg.EnableAccountUpdatesStats
au.logAccountUpdatesInterval = cfg.AccountUpdatesStatsInterval
-
}
// loadFromDisk is the 2nd level initialization, and is required before the accountUpdates becomes functional
// The close function is expected to be call in pair with loadFromDisk
-func (au *accountUpdates) loadFromDisk(l ledgerForTracker) error {
+func (au *accountUpdates) loadFromDisk(l ledgerForTracker, lastBalancesRound basics.Round) error {
au.accountsMu.Lock()
defer au.accountsMu.Unlock()
- var writingCatchpointRound uint64
- lastBalancesRound, lastestBlockRound, err := au.initializeFromDisk(l)
-
- if err != nil {
- return err
- }
- var writingCatchpointDigest crypto.Digest
-
- writingCatchpointRound, _, err = au.accountsq.readCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint)
- if err != nil {
- return err
- }
-
- writingCatchpointDigest, err = au.initializeCaches(lastBalancesRound, lastestBlockRound, basics.Round(writingCatchpointRound))
+ au.cachedDBRound = lastBalancesRound
+ err := au.initializeFromDisk(l, lastBalancesRound)
if err != nil {
return err
}
-
- if writingCatchpointRound != 0 && au.catchpointInterval != 0 {
- au.generateCatchpoint(basics.Round(writingCatchpointRound), au.lastCatchpointLabel, writingCatchpointDigest, time.Duration(0))
- }
-
- au.voters = &votersTracker{}
- err = au.voters.loadFromDisk(l, au)
- if err != nil {
- return err
- }
-
return nil
}
-// waitAccountsWriting waits for all the pending ( or current ) account writing to be completed.
-func (au *accountUpdates) waitAccountsWriting() {
- au.accountsWriting.Wait()
-}
-
// close closes the accountUpdates, waiting for all the child go-routine to complete
func (au *accountUpdates) close() {
if au.voters != nil {
au.voters.close()
}
- if au.ctxCancel != nil {
- au.ctxCancel()
- }
- au.waitAccountsWriting()
- // this would block until the commitSyncerClosed channel get closed.
- <-au.commitSyncerClosed
- au.baseAccounts.prune(0)
-}
-// IsWritingCatchpointFile returns true when a catchpoint file is being generated. The function is used by the catchup service
-// to avoid memory pressure until the catchpoint file writing is complete.
-func (au *accountUpdates) IsWritingCatchpointFile() bool {
- return atomic.LoadInt32(&au.catchpointWriting) != 0
+ au.baseAccounts.prune(0)
}
// LookupWithRewards returns the account data for a given address at a given round.
@@ -428,7 +265,7 @@ func (au *accountUpdates) ListApplications(maxAppIdx basics.AppIndex, maxResults
func (au *accountUpdates) listCreatables(maxCreatableIdx basics.CreatableIndex, maxResults uint64, ctype basics.CreatableType) ([]basics.CreatableLocator, error) {
au.accountsMu.RLock()
for {
- currentDbRound := au.dbRound
+ currentDbRound := au.cachedDBRound
currentDeltaLen := len(au.deltas)
// Sort indices for creatables that have been created/deleted. If this
// turns out to be too inefficient, we could keep around a heap of
@@ -502,7 +339,7 @@ func (au *accountUpdates) listCreatables(maxCreatableIdx basics.CreatableIndex,
return []basics.CreatableLocator{}, &StaleDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDbRound}
}
au.accountsMu.RLock()
- for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) {
+ for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
au.accountsReadCond.Wait()
}
}
@@ -511,11 +348,11 @@ func (au *accountUpdates) listCreatables(maxCreatableIdx basics.CreatableIndex,
// onlineTop returns the top n online accounts, sorted by their normalized
// balance and address, whose voting keys are valid in voteRnd. See the
// normalization description in AccountData.NormalizedOnlineBalance().
-func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n uint64) ([]*onlineAccount, error) {
+func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n uint64) ([]*ledgercore.OnlineAccount, error) {
proto := au.ledger.GenesisProto()
au.accountsMu.RLock()
for {
- currentDbRound := au.dbRound
+ currentDbRound := au.cachedDBRound
currentDeltaLen := len(au.deltas)
offset, err := au.roundOffset(rnd)
if err != nil {
@@ -530,7 +367,7 @@ func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n ui
// is not valid in voteRnd. Otherwise, the *onlineAccount is the
// representation of the most recent state of the account, and it
// is online and can vote in voteRnd.
- modifiedAccounts := make(map[basics.Address]*onlineAccount)
+ modifiedAccounts := make(map[basics.Address]*ledgercore.OnlineAccount)
for o := uint64(0); o < offset; o++ {
for i := 0; i < au.deltas[o].Len(); i++ {
addr, d := au.deltas[o].GetByIdx(i)
@@ -558,12 +395,12 @@ func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n ui
//
// Keep asking for more accounts until we get the desired number,
// or there are no more accounts left.
- candidates := make(map[basics.Address]*onlineAccount)
+ candidates := make(map[basics.Address]*ledgercore.OnlineAccount)
batchOffset := uint64(0)
batchSize := uint64(1024)
var dbRound basics.Round
for uint64(len(candidates)) < n+uint64(len(modifiedAccounts)) {
- var accts map[basics.Address]*onlineAccount
+ var accts map[basics.Address]*ledgercore.OnlineAccount
start := time.Now()
ledgerAccountsonlinetopCount.Inc(nil)
err = au.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
@@ -571,7 +408,7 @@ func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n ui
if err != nil {
return
}
- dbRound, _, err = accountsRound(tx)
+ dbRound, err = accountsRound(tx)
return
})
ledgerAccountsonlinetopMicros.AddMicrosecondsSince(start, nil)
@@ -601,7 +438,7 @@ func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n ui
if dbRound != currentDbRound && dbRound != basics.Round(0) {
// database round doesn't match the last au.dbRound we sampled.
au.accountsMu.RLock()
- for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) {
+ for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
au.accountsReadCond.Wait()
}
continue
@@ -627,9 +464,9 @@ func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n ui
heap.Push(topHeap, data)
}
- var res []*onlineAccount
+ var res []*ledgercore.OnlineAccount
for topHeap.Len() > 0 && uint64(len(res)) < n {
- acct := heap.Pop(topHeap).(*onlineAccount)
+ acct := heap.Pop(topHeap).(*ledgercore.OnlineAccount)
res = append(res, acct)
}
@@ -637,129 +474,72 @@ func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n ui
}
}
-// GetLastCatchpointLabel retrieves the last catchpoint label that was stored to the database.
-func (au *accountUpdates) GetLastCatchpointLabel() string {
- au.accountsMu.RLock()
- defer au.accountsMu.RUnlock()
- return au.lastCatchpointLabel
-}
-
// GetCreatorForRound returns the creator for a given asset/app index at a given round
func (au *accountUpdates) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
return au.getCreatorForRound(rnd, cidx, ctype, true /* take the lock */)
}
-// committedUpTo enqueues committing the balances for round committedRound-lookback.
+// committedUpTo implements the ledgerTracker interface for accountUpdates.
+// The method informs the tracker that committedRound and all it's previous rounds have
+// been committed to the block database. The method returns what is the oldest round
+// number that can be removed from the blocks database as well as the lookback that this
+// tracker maintains.
+func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound, lookback basics.Round) {
+ au.accountsMu.RLock()
+ defer au.accountsMu.RUnlock()
+
+ retRound = basics.Round(0)
+ lookback = basics.Round(config.Consensus[au.versions[len(au.versions)-1]].MaxBalLookback)
+ if committedRound < lookback {
+ return
+ }
+
+ retRound = au.cachedDBRound
+ return
+}
+
+// produceCommittingTask enqueues committing the balances for round committedRound-lookback.
// The deferred committing is done so that we could calculate the historical balances lookback rounds back.
// Since we don't want to hold off the tracker's mutex for too long, we'll defer the database persistence of this
// operation to a syncer goroutine. The one caveat is that when storing a catchpoint round, we would want to
// wait until the catchpoint creation is done, so that the persistence of the catchpoint file would have an
// uninterrupted view of the balances at a given point of time.
-func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound basics.Round) {
- var isCatchpointRound, hasMultipleIntermediateCatchpoint bool
+func (au *accountUpdates) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
var offset uint64
- var dc deferredCommit
au.accountsMu.RLock()
- defer func() {
- au.accountsMu.RUnlock()
- if dc.offset != 0 {
- au.committedOffset <- dc
- }
- }()
- retRound = basics.Round(0)
- var pendingDeltas int
+ defer au.accountsMu.RUnlock()
- lookback := basics.Round(config.Consensus[au.versions[len(au.versions)-1]].MaxBalLookback)
- if committedRound < lookback {
- return
+ if committedRound < dcr.lookback {
+ return nil
}
- retRound = au.dbRound
- newBase := committedRound - lookback
- if newBase <= au.dbRound {
+ newBase := committedRound - dcr.lookback
+ if newBase <= dbRound {
// Already forgotten
- return
- }
-
- if newBase > au.dbRound+basics.Round(len(au.deltas)) {
- au.log.Panicf("committedUpTo: block %d too far in the future, lookback %d, dbRound %d, deltas %d", committedRound, lookback, au.dbRound, len(au.deltas))
+ return nil
}
- hasIntermediateCatchpoint := false
- hasMultipleIntermediateCatchpoint = false
- // check if there was a catchpoint between au.dbRound+lookback and newBase+lookback
- if au.catchpointInterval > 0 {
- nextCatchpointRound := ((uint64(au.dbRound+lookback) + au.catchpointInterval) / au.catchpointInterval) * au.catchpointInterval
-
- if nextCatchpointRound < uint64(newBase+lookback) {
- mostRecentCatchpointRound := (uint64(committedRound) / au.catchpointInterval) * au.catchpointInterval
- newBase = basics.Round(nextCatchpointRound) - lookback
- if mostRecentCatchpointRound > nextCatchpointRound {
- hasMultipleIntermediateCatchpoint = true
- // skip if there is more than one catchpoint in queue
- newBase = basics.Round(mostRecentCatchpointRound) - lookback
- }
- hasIntermediateCatchpoint = true
- }
- }
-
- // if we're still writing the previous balances, we can't move forward yet.
- if au.IsWritingCatchpointFile() {
- // if we hit this path, it means that we're still writing a catchpoint.
- // see if the new delta range contains another catchpoint.
- if hasIntermediateCatchpoint {
- // check if we're already attempting to perform fast-writing.
- select {
- case <-au.catchpointSlowWriting:
- // yes, we're already doing fast-writing.
- default:
- // no, we're not yet doing fast writing, make it so.
- close(au.catchpointSlowWriting)
- }
- }
- return
+ if newBase > dbRound+basics.Round(len(au.deltas)) {
+ au.log.Panicf("produceCommittingTask: block %d too far in the future, lookback %d, dbRound %d (cached %d), deltas %d", committedRound, dcr.lookback, dbRound, au.cachedDBRound, len(au.deltas))
}
if au.voters != nil {
newBase = au.voters.lowestRound(newBase)
}
- offset = uint64(newBase - au.dbRound)
+ offset = uint64(newBase - dbRound)
offset = au.consecutiveVersion(offset)
- // check to see if this is a catchpoint round
- isCatchpointRound = ((offset + uint64(lookback+au.dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+au.dbRound))) % au.catchpointInterval))
-
// calculate the number of pending deltas
- pendingDeltas = au.deltasAccum[offset] - au.deltasAccum[0]
-
- // If we recently flushed, wait to aggregate some more blocks.
- // ( unless we're creating a catchpoint, in which case we want to flush it right away
- // so that all the instances of the catchpoint would contain exactly the same data )
- flushTime := time.Now()
- if !flushTime.After(au.lastFlushTime.Add(balancesFlushInterval)) && !isCatchpointRound && pendingDeltas < pendingDeltasFlushThreshold {
- return au.dbRound
- }
-
- if isCatchpointRound && au.archivalLedger {
- // store non-zero ( all ones ) into the catchpointWriting atomic variable to indicate that a catchpoint is being written ( or, queued to be written )
- atomic.StoreInt32(&au.catchpointWriting, int32(-1))
- au.catchpointSlowWriting = make(chan struct{}, 1)
- if hasMultipleIntermediateCatchpoint {
- close(au.catchpointSlowWriting)
- }
- }
+ dcr.pendingDeltas = au.deltasAccum[offset] - au.deltasAccum[0]
- dc = deferredCommit{
- offset: offset,
- dbRound: au.dbRound,
- lookback: lookback,
- }
- if offset != 0 {
- au.accountsWriting.Add(1)
- }
- return
+ // submit committing task only if offset is non-zero in addition to
+ // 1) no pending catchpoint writes
+ // 2) batching requirements meet or catchpoint round
+ dcr.oldBase = dbRound
+ dcr.offset = offset
+ return dcr
}
func (au *accountUpdates) consecutiveVersion(offset uint64) uint64 {
@@ -822,64 +602,6 @@ func (r *readCloseSizer) Size() (int64, error) {
return r.size, nil
}
-// GetCatchpointStream returns a ReadCloseSizer to the catchpoint file associated with the provided round
-func (au *accountUpdates) GetCatchpointStream(round basics.Round) (ReadCloseSizer, error) {
- dbFileName := ""
- fileSize := int64(0)
- start := time.Now()
- ledgerGetcatchpointCount.Inc(nil)
- err := au.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- dbFileName, _, fileSize, err = getCatchpoint(tx, round)
- return
- })
- ledgerGetcatchpointMicros.AddMicrosecondsSince(start, nil)
- if err != nil && err != sql.ErrNoRows {
- // we had some sql error.
- return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to lookup catchpoint %d: %v", round, err)
- }
- if dbFileName != "" {
- catchpointPath := filepath.Join(au.dbDirectory, dbFileName)
- file, err := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
- if err == nil && file != nil {
- return &readCloseSizer{ReadCloser: file, size: fileSize}, nil
- }
- // else, see if this is a file-not-found error
- if os.IsNotExist(err) {
- // the database told us that we have this file.. but we couldn't find it.
- // delete it from the database.
- err := au.saveCatchpointFile(round, "", 0, "")
- if err != nil {
- au.log.Warnf("accountUpdates: getCatchpointStream: unable to delete missing catchpoint entry: %v", err)
- return nil, err
- }
-
- return nil, ledgercore.ErrNoEntry{}
- }
- // it's some other error.
- return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to open catchpoint file '%s' %v", catchpointPath, err)
- }
-
- // if the database doesn't know about that round, see if we have that file anyway:
- fileName := filepath.Join("catchpoints", catchpointRoundToPath(round))
- catchpointPath := filepath.Join(au.dbDirectory, fileName)
- file, err := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
- if err == nil && file != nil {
- // great, if found that we should have had this in the database.. add this one now :
- fileInfo, err := file.Stat()
- if err != nil {
- // we couldn't get the stat, so just return with the file.
- return &readCloseSizer{ReadCloser: file, size: -1}, nil
- }
-
- err = au.saveCatchpointFile(round, fileName, fileInfo.Size(), "")
- if err != nil {
- au.log.Warnf("accountUpdates: getCatchpointStream: unable to save missing catchpoint entry: %v", err)
- }
- return &readCloseSizer{ReadCloser: file, size: fileInfo.Size()}, nil
- }
- return nil, ledgercore.ErrNoEntry{}
-}
-
// functions below this line are all internal functions
// accountUpdatesLedgerEvaluator is a "ledger emulator" which is used *only* by initializeCaches, as a way to shortcut
@@ -903,7 +625,7 @@ func (aul *accountUpdatesLedgerEvaluator) GenesisHash() crypto.Digest {
}
// CompactCertVoters returns the top online accounts at round rnd.
-func (aul *accountUpdatesLedgerEvaluator) CompactCertVoters(rnd basics.Round) (voters *VotersForRound, err error) {
+func (aul *accountUpdatesLedgerEvaluator) CompactCertVoters(rnd basics.Round) (voters *ledgercore.VotersForRound, err error) {
return aul.au.voters.getVoters(rnd)
}
@@ -922,7 +644,7 @@ func (aul *accountUpdatesLedgerEvaluator) LatestTotals() (basics.Round, ledgerco
}
// CheckDup test to see if the given transaction id/lease already exists. It's not needed by the accountUpdatesLedgerEvaluator and implemented as a stub.
-func (aul *accountUpdatesLedgerEvaluator) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, TxLease) error {
+func (aul *accountUpdatesLedgerEvaluator) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
// this is a non-issue since this call will never be made on non-validating evaluation
return fmt.Errorf("accountUpdatesLedgerEvaluator: tried to check for dup during accountUpdates initialization ")
}
@@ -951,205 +673,20 @@ func (au *accountUpdates) totalsImpl(rnd basics.Round) (totals ledgercore.Accoun
// latestTotalsImpl returns the totals of all accounts for the most recent round, as well as the round number
func (au *accountUpdates) latestTotalsImpl() (basics.Round, ledgercore.AccountTotals, error) {
offset := len(au.deltas)
- rnd := au.dbRound + basics.Round(len(au.deltas))
+ rnd := au.cachedDBRound + basics.Round(len(au.deltas))
return rnd, au.roundTotals[offset], nil
}
-// initializeCaches fills up the accountUpdates cache with the most recent ~320 blocks ( on normal execution ).
-// the method also support balances recovery in cases where the difference between the lastBalancesRound and the lastestBlockRound
-// is far greater than 320; in these cases, it would flush to disk periodically in order to avoid high memory consumption.
-func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, writingCatchpointRound basics.Round) (catchpointBlockDigest crypto.Digest, err error) {
- var blk bookkeeping.Block
- var delta ledgercore.StateDelta
-
- accLedgerEval := accountUpdatesLedgerEvaluator{
- au: au,
- }
- if lastBalancesRound < lastestBlockRound {
- accLedgerEval.prevHeader, err = au.ledger.BlockHdr(lastBalancesRound)
- if err != nil {
- return
- }
- }
-
- skipAccountCacheMessage := make(chan struct{})
- writeAccountCacheMessageCompleted := make(chan struct{})
- defer func() {
- close(skipAccountCacheMessage)
- select {
- case <-writeAccountCacheMessageCompleted:
- if err == nil {
- au.log.Infof("initializeCaches completed initializing account data caches")
- }
- default:
- }
- }()
-
- // this goroutine logs a message once if the parent function have not completed in initializingAccountCachesMessageTimeout seconds.
- // the message is important, since we're blocking on the ledger block database here, and we want to make sure that we log a message
- // within the above timeout.
- go func() {
- select {
- case <-time.After(initializingAccountCachesMessageTimeout):
- au.log.Infof("initializeCaches is initializing account data caches")
- close(writeAccountCacheMessageCompleted)
- case <-skipAccountCacheMessage:
- }
- }()
-
- blocksStream := make(chan bookkeeping.Block, initializeCachesReadaheadBlocksStream)
- blockEvalFailed := make(chan struct{}, 1)
- var blockRetrievalError error
- go func() {
- defer close(blocksStream)
- for roundNumber := lastBalancesRound + 1; roundNumber <= lastestBlockRound; roundNumber++ {
- blk, blockRetrievalError = au.ledger.Block(roundNumber)
- if blockRetrievalError != nil {
- return
- }
- select {
- case blocksStream <- blk:
- case <-blockEvalFailed:
- return
- }
- }
- }()
-
- lastFlushedRound := lastBalancesRound
- const accountsCacheLoadingMessageInterval = 5 * time.Second
- lastProgressMessage := time.Now().Add(-accountsCacheLoadingMessageInterval / 2)
-
- // rollbackSynchronousMode ensures that we switch to "fast writing mode" when we start flushing out rounds to disk, and that
- // we exit this mode when we're done.
- rollbackSynchronousMode := false
- defer func() {
- if rollbackSynchronousMode {
- // restore default synchronous mode
- au.dbs.Wdb.SetSynchronousMode(context.Background(), au.synchronousMode, au.synchronousMode >= db.SynchronousModeFull)
- }
- }()
-
- for blk := range blocksStream {
- delta, err = au.ledger.trackerEvalVerified(blk, &accLedgerEval)
- if err != nil {
- close(blockEvalFailed)
- return
- }
-
- au.newBlockImpl(blk, delta)
-
- if blk.Round() == basics.Round(writingCatchpointRound) {
- catchpointBlockDigest = blk.Digest()
- }
-
- // flush to disk if any of the following applies:
- // 1. if we have loaded up more than initializeCachesRoundFlushInterval rounds since the last time we flushed the data to disk
- // 2. if we completed the loading and we loaded up more than 320 rounds.
- flushIntervalExceed := blk.Round()-lastFlushedRound > initializeCachesRoundFlushInterval
- loadCompleted := (lastestBlockRound == blk.Round() && lastBalancesRound+basics.Round(blk.ConsensusProtocol().MaxBalLookback) < lastestBlockRound)
- if flushIntervalExceed || loadCompleted {
- // adjust the last flush time, so that we would not hold off the flushing due to "working too fast"
- au.lastFlushTime = time.Now().Add(-balancesFlushInterval)
-
- if !rollbackSynchronousMode {
- // switch to rebuild synchronous mode to improve performance
- au.dbs.Wdb.SetSynchronousMode(context.Background(), au.accountsRebuildSynchronousMode, au.accountsRebuildSynchronousMode >= db.SynchronousModeFull)
-
- // flip the switch to rollback the synchronous mode once we're done.
- rollbackSynchronousMode = true
- }
-
- // The unlocking/relocking here isn't very elegant, but it does get the work done :
- // this method is called on either startup or when fast catchup is complete. In the former usecase, the
- // locking here is not really needed since the system is only starting up, and there are no other
- // consumers for the accounts update. On the latter usecase, the function would always have exactly 320 rounds,
- // and therefore this wouldn't be an issue.
- // However, to make sure we're not missing any other future codepath, unlocking here and re-locking later on is a pretty
- // safe bet.
- au.accountsMu.Unlock()
-
- // flush the account data
- au.committedUpTo(blk.Round())
-
- // wait for the writing to complete.
- au.waitAccountsWriting()
-
- // The au.dbRound after writing should be ~320 behind the block round.
- roundsBehind := blk.Round() - au.dbRound
-
- au.accountsMu.Lock()
-
- // are we too far behind ? ( taking into consideration the catchpoint writing, which can stall the writing for quite a bit )
- if roundsBehind > initializeCachesRoundFlushInterval+basics.Round(au.catchpointInterval) {
- // we're unable to persist changes. This is unexpected, but there is no point in keep trying batching additional changes since any further changes
- // would just accumulate in memory.
- close(blockEvalFailed)
- au.log.Errorf("initializeCaches was unable to fill up the account caches accounts round = %d, block round = %d. See above error for more details.", au.dbRound, blk.Round())
- err = fmt.Errorf("initializeCaches failed to initialize the account data caches")
- return
- }
-
- // and once we flushed it to disk, update the lastFlushedRound
- lastFlushedRound = blk.Round()
- }
-
- // if enough time have passed since the last time we wrote a message to the log file then give the user an update about the progess.
- if time.Now().Sub(lastProgressMessage) > accountsCacheLoadingMessageInterval {
- // drop the initial message if we're got to this point since a message saying "still initializing" that comes after "is initializing" doesn't seems to be right.
- select {
- case skipAccountCacheMessage <- struct{}{}:
- // if we got to this point, we should be able to close the writeAccountCacheMessageCompleted channel to have the "completed initializing" message written.
- close(writeAccountCacheMessageCompleted)
- default:
- }
- au.log.Infof("initializeCaches is still initializing account data caches, %d rounds loaded out of %d rounds", blk.Round()-lastBalancesRound, lastestBlockRound-lastBalancesRound)
- lastProgressMessage = time.Now()
- }
-
- // prepare for the next iteration.
- accLedgerEval.prevHeader = *delta.Hdr
- }
-
- if blockRetrievalError != nil {
- err = blockRetrievalError
- }
- return
-}
-
// initializeFromDisk performs the atomic operation of loading the accounts data information from disk
-// and preparing the accountUpdates for operation, including initializing the commitSyncer goroutine.
-func (au *accountUpdates) initializeFromDisk(l ledgerForTracker) (lastBalancesRound, lastestBlockRound basics.Round, err error) {
+// and preparing the accountUpdates for operation.
+func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRound basics.Round) (err error) {
au.dbs = l.trackerDB()
au.log = l.trackerLog()
au.ledger = l
- if au.initAccounts == nil {
- err = fmt.Errorf("accountUpdates.initializeFromDisk: initAccounts not set")
- return
- }
-
- lastestBlockRound = l.Latest()
start := time.Now()
ledgerAccountsinitCount.Inc(nil)
err = au.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- var err0 error
- au.dbRound, err0 = au.accountsInitialize(ctx, tx)
- if err0 != nil {
- return err0
- }
- // Check for blocks DB and tracker DB un-sync
- if au.dbRound > lastestBlockRound {
- au.log.Warnf("accountUpdates.initializeFromDisk: resetting accounts DB (on round %v, but blocks DB's latest is %v)", au.dbRound, lastestBlockRound)
- err0 = accountsReset(tx)
- if err0 != nil {
- return err0
- }
- au.dbRound, err0 = au.accountsInitialize(ctx, tx)
- if err0 != nil {
- return err0
- }
- }
-
totals, err0 := accountsTotals(tx, false)
if err0 != nil {
return err0
@@ -1164,19 +701,12 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker) (lastBalancesRo
return
}
- // the VacuumDatabase would be a no-op if au.vacuumOnStartup is cleared.
- au.vacuumDatabase(context.Background())
+ au.accountsq, err = accountsInitDbQueries(au.dbs.Rdb.Handle, au.dbs.Wdb.Handle)
if err != nil {
return
}
- au.accountsq, err = accountsDbInit(au.dbs.Rdb.Handle, au.dbs.Wdb.Handle)
- au.lastCatchpointLabel, _, err = au.accountsq.readCatchpointStateString(context.Background(), catchpointStateLastCatchpoint)
- if err != nil {
- return
- }
-
- hdr, err := l.BlockHdr(au.dbRound)
+ hdr, err := l.BlockHdr(lastBalancesRound)
if err != nil {
return
}
@@ -1187,498 +717,8 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker) (lastBalancesRo
au.accounts = make(map[basics.Address]modifiedAccount)
au.creatables = make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
au.deltasAccum = []int{0}
- au.roundDigest = nil
-
- au.catchpointWriting = 0
- // keep these channel closed if we're not generating catchpoint
- au.catchpointSlowWriting = make(chan struct{}, 1)
- close(au.catchpointSlowWriting)
- au.ctx, au.ctxCancel = context.WithCancel(context.Background())
- au.committedOffset = make(chan deferredCommit, 1)
- au.commitSyncerClosed = make(chan struct{})
- go au.commitSyncer(au.committedOffset)
-
- lastBalancesRound = au.dbRound
- au.baseAccounts.init(au.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold)
- return
-}
-
-// accountHashBuilder calculates the hash key used for the trie by combining the account address and the account data
-func accountHashBuilder(addr basics.Address, accountData basics.AccountData, encodedAccountData []byte) []byte {
- hash := make([]byte, 4+crypto.DigestSize)
- // write out the lowest 32 bits of the reward base. This should improve the caching of the trie by allowing
- // recent updated to be in-cache, and "older" nodes will be left alone.
- for i, rewards := 3, accountData.RewardsBase; i >= 0; i, rewards = i-1, rewards>>8 {
- // the following takes the rewards & 255 -> hash[i]
- hash[i] = byte(rewards)
- }
- entryHash := crypto.Hash(append(addr[:], encodedAccountData[:]...))
- copy(hash[4:], entryHash[:])
- return hash[:]
-}
-
-// accountsInitialize initializes the accounts DB if needed and return current account round.
-// as part of the initialization, it tests the current database schema version, and perform upgrade
-// procedures to bring it up to the database schema supported by the binary.
-func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (basics.Round, error) {
- // check current database version.
- dbVersion, err := db.GetUserVersion(ctx, tx)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to read database schema version : %v", err)
- }
-
- // if database version is greater than supported by current binary, write a warning. This would keep the existing
- // fallback behavior where we could use an older binary iff the schema happen to be backward compatible.
- if dbVersion > accountDBVersion {
- au.log.Warnf("accountsInitialize database schema version is %d, but algod supports only %d", dbVersion, accountDBVersion)
- }
-
- if dbVersion < accountDBVersion {
- au.log.Infof("accountsInitialize upgrading database schema from version %d to version %d", dbVersion, accountDBVersion)
- // newDatabase is determined during the tables creations. If we're filling the database with accounts,
- // then we set this variable to true, allowing some of the upgrades to be skipped.
- var newDatabase bool
- for dbVersion < accountDBVersion {
- au.log.Infof("accountsInitialize performing upgrade from version %d", dbVersion)
- // perform the initialization/upgrade
- switch dbVersion {
- case 0:
- dbVersion, newDatabase, err = au.upgradeDatabaseSchema0(ctx, tx)
- if err != nil {
- au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 0 : %v", err)
- return 0, err
- }
- case 1:
- dbVersion, err = au.upgradeDatabaseSchema1(ctx, tx, newDatabase)
- if err != nil {
- au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 1 : %v", err)
- return 0, err
- }
- case 2:
- dbVersion, err = au.upgradeDatabaseSchema2(ctx, tx, newDatabase)
- if err != nil {
- au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 2 : %v", err)
- return 0, err
- }
- case 3:
- dbVersion, err = au.upgradeDatabaseSchema3(ctx, tx, newDatabase)
- if err != nil {
- au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 3 : %v", err)
- return 0, err
- }
- case 4:
- dbVersion, err = au.upgradeDatabaseSchema4(ctx, tx, newDatabase)
- if err != nil {
- au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 4 : %v", err)
- return 0, err
- }
- default:
- return 0, fmt.Errorf("accountsInitialize unable to upgrade database from schema version %d", dbVersion)
- }
- }
-
- au.log.Infof("accountsInitialize database schema upgrade complete")
- }
-
- rnd, hashRound, err := accountsRound(tx)
- if err != nil {
- return 0, err
- }
-
- if hashRound != rnd {
- // if the hashed round is different then the base round, something was modified, and the accounts aren't in sync
- // with the hashes.
- err = resetAccountHashes(tx)
- if err != nil {
- return 0, err
- }
- // if catchpoint is disabled on this node, we could complete the initialization right here.
- if au.catchpointInterval == 0 {
- return rnd, nil
- }
- }
-
- // create the merkle trie for the balances
- committer, err := MakeMerkleCommitter(tx, false)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize was unable to makeMerkleCommitter: %v", err)
- }
-
- trie, err := merkletrie.MakeTrie(committer, TrieMemoryConfig)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize was unable to MakeTrie: %v", err)
- }
-
- // we might have a database that was previously initialized, and now we're adding the balances trie. In that case, we need to add all the existing balances to this trie.
- // we can figure this out by examining the hash of the root:
- rootHash, err := trie.RootHash()
- if err != nil {
- return rnd, fmt.Errorf("accountsInitialize was unable to retrieve trie root hash: %v", err)
- }
-
- if rootHash.IsZero() {
- au.log.Infof("accountsInitialize rebuilding merkle trie for round %d", rnd)
- accountBuilderIt := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize)
- defer accountBuilderIt.Close(ctx)
- startTrieBuildTime := time.Now()
- accountsCount := 0
- lastRebuildTime := startTrieBuildTime
- pendingAccounts := 0
- totalOrderedAccounts := 0
- for {
- accts, processedRows, err := accountBuilderIt.Next(ctx)
- if err == sql.ErrNoRows {
- // the account builder would return sql.ErrNoRows when no more data is available.
- break
- } else if err != nil {
- return rnd, err
- }
-
- if len(accts) > 0 {
- accountsCount += len(accts)
- pendingAccounts += len(accts)
- for _, acct := range accts {
- added, err := trie.Add(acct.digest)
- if err != nil {
- return rnd, fmt.Errorf("accountsInitialize was unable to add changes to trie: %v", err)
- }
- if !added {
- au.log.Warnf("accountsInitialize attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(acct.digest), acct.address)
- }
- }
-
- if pendingAccounts >= trieRebuildCommitFrequency {
- // this trie Evict will commit using the current transaction.
- // if anything goes wrong, it will still get rolled back.
- _, err = trie.Evict(true)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
- }
- pendingAccounts = 0
- }
-
- if time.Now().Sub(lastRebuildTime) > 5*time.Second {
- // let the user know that the trie is still being rebuilt.
- au.log.Infof("accountsInitialize still building the trie, and processed so far %d accounts", accountsCount)
- lastRebuildTime = time.Now()
- }
- } else if processedRows > 0 {
- totalOrderedAccounts += processedRows
- // if it's not ordered, we can ignore it for now; we'll just increase the counters and emit logs periodically.
- if time.Now().Sub(lastRebuildTime) > 5*time.Second {
- // let the user know that the trie is still being rebuilt.
- au.log.Infof("accountsInitialize still building the trie, and hashed so far %d accounts", totalOrderedAccounts)
- lastRebuildTime = time.Now()
- }
- }
- }
-
- // this trie Evict will commit using the current transaction.
- // if anything goes wrong, it will still get rolled back.
- _, err = trie.Evict(true)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
- }
-
- // we've just updated the merkle trie, update the hashRound to reflect that.
- err = updateAccountsRound(tx, rnd, rnd)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize was unable to update the account round to %d: %v", rnd, err)
- }
-
- au.log.Infof("accountsInitialize rebuilt the merkle trie with %d entries in %v", accountsCount, time.Now().Sub(startTrieBuildTime))
- }
- au.balancesTrie = trie
- return rnd, nil
-}
-
-// upgradeDatabaseSchema0 upgrades the database schema from version 0 to version 1
-//
-// Schema of version 0 is expected to be aligned with the schema used on version 2.0.8 or before.
-// Any database of version 2.0.8 would be of version 0. At this point, the database might
-// have the following tables : ( i.e. a newly created database would not have these )
-// * acctrounds
-// * accounttotals
-// * accountbase
-// * assetcreators
-// * storedcatchpoints
-// * accounthashes
-// * catchpointstate
-//
-// As the first step of the upgrade, the above tables are being created if they do not already exists.
-// Following that, the assetcreators table is being altered by adding a new column to it (ctype).
-// Last, in case the database was just created, it would get initialized with the following:
-// The accountbase would get initialized with the au.initAccounts
-// The accounttotals would get initialized to align with the initialization account added to accountbase
-// The acctrounds would get updated to indicate that the balance matches round 0
-//
-func (au *accountUpdates) upgradeDatabaseSchema0(ctx context.Context, tx *sql.Tx) (updatedDBVersion int32, newDatabase bool, err error) {
- au.log.Infof("accountsInitialize initializing schema")
- newDatabase, err = accountsInit(tx, au.initAccounts, au.initProto)
- if err != nil {
- return 0, newDatabase, fmt.Errorf("accountsInitialize unable to initialize schema : %v", err)
- }
- _, err = db.SetUserVersion(ctx, tx, 1)
- if err != nil {
- return 0, newDatabase, fmt.Errorf("accountsInitialize unable to update database schema version from 0 to 1: %v", err)
- }
- return 1, newDatabase, nil
-}
-
-// upgradeDatabaseSchema1 upgrades the database schema from version 1 to version 2
-//
-// The schema updated to version 2 intended to ensure that the encoding of all the accounts data is
-// both canonical and identical across the entire network. On release 2.0.5 we released an upgrade to the messagepack.
-// the upgraded messagepack was decoding the account data correctly, but would have different
-// encoding compared to it's predecessor. As a result, some of the account data that was previously stored
-// would have different encoded representation than the one on disk.
-// To address this, this startup procedure would attempt to scan all the accounts data. for each account data, we would
-// see if it's encoding aligns with the current messagepack encoder. If it doesn't we would update it's encoding.
-// then, depending if we found any such account data, we would reset the merkle trie and stored catchpoints.
-// once the upgrade is complete, the accountsInitialize would (if needed) rebuild the merkle trie using the new
-// encoded accounts.
-//
-// This upgrade doesn't change any of the actual database schema ( i.e. tables, indexes ) but rather just performing
-// a functional update to it's content.
-//
-func (au *accountUpdates) upgradeDatabaseSchema1(ctx context.Context, tx *sql.Tx, newDatabase bool) (updatedDBVersion int32, err error) {
- var modifiedAccounts uint
- if newDatabase {
- goto schemaUpdateComplete
- }
-
- // update accounts encoding.
- au.log.Infof("accountsInitialize verifying accounts data encoding")
- modifiedAccounts, err = reencodeAccounts(ctx, tx)
- if err != nil {
- return 0, err
- }
-
- if modifiedAccounts > 0 {
- au.log.Infof("accountsInitialize reencoded %d accounts", modifiedAccounts)
-
- au.log.Infof("accountsInitialize resetting account hashes")
- // reset the merkle trie
- err = resetAccountHashes(tx)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to reset account hashes : %v", err)
- }
-
- au.log.Infof("accountsInitialize preparing queries")
- // initialize a new accountsq with the incoming transaction.
- accountsq, err := accountsDbInit(tx, tx)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to prepare queries : %v", err)
- }
-
- // close the prepared statements when we're done with them.
- defer accountsq.close()
-
- au.log.Infof("accountsInitialize resetting prior catchpoints")
- // delete the last catchpoint label if we have any.
- _, err = accountsq.writeCatchpointStateString(ctx, catchpointStateLastCatchpoint, "")
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to clear prior catchpoint : %v", err)
- }
-
- au.log.Infof("accountsInitialize deleting stored catchpoints")
- // delete catchpoints.
- err = au.deleteStoredCatchpoints(ctx, accountsq)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to delete stored catchpoints : %v", err)
- }
- } else {
- au.log.Infof("accountsInitialize found that no accounts needed to be reencoded")
- }
-
-schemaUpdateComplete:
- // update version
- _, err = db.SetUserVersion(ctx, tx, 2)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 1 to 2: %v", err)
- }
- return 2, nil
-}
-
-// upgradeDatabaseSchema2 upgrades the database schema from version 2 to version 3
-//
-// This upgrade only enables the database vacuuming which will take place once the upgrade process is complete.
-// If the user has already specified the OptimizeAccountsDatabaseOnStartup flag in the configuration file, this
-// step becomes a no-op.
-//
-func (au *accountUpdates) upgradeDatabaseSchema2(ctx context.Context, tx *sql.Tx, newDatabase bool) (updatedDBVersion int32, err error) {
- if !newDatabase {
- au.vacuumOnStartup = true
- }
-
- // update version
- _, err = db.SetUserVersion(ctx, tx, 3)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 2 to 3: %v", err)
- }
- return 3, nil
-}
-
-// upgradeDatabaseSchema3 upgrades the database schema from version 3 to version 4,
-// adding the normalizedonlinebalance column to the accountbase table.
-func (au *accountUpdates) upgradeDatabaseSchema3(ctx context.Context, tx *sql.Tx, newDatabase bool) (updatedDBVersion int32, err error) {
- err = accountsAddNormalizedBalance(tx, au.ledger.GenesisProto())
- if err != nil {
- return 0, err
- }
-
- // update version
- _, err = db.SetUserVersion(ctx, tx, 4)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 3 to 4: %v", err)
- }
- return 4, nil
-}
-
-// upgradeDatabaseSchema4 does not change the schema but migrates data:
-// remove empty AccountData entries from accountbase table
-func (au *accountUpdates) upgradeDatabaseSchema4(ctx context.Context, tx *sql.Tx, newDatabase bool) (updatedDBVersion int32, err error) {
- queryAddresses := au.catchpointInterval != 0
- var numDeleted int64
- var addresses []basics.Address
-
- if newDatabase {
- goto done
- }
-
- numDeleted, addresses, err = removeEmptyAccountData(tx, queryAddresses)
- if err != nil {
- return 0, err
- }
-
- if queryAddresses && len(addresses) > 0 {
- mc, err := MakeMerkleCommitter(tx, false)
- if err != nil {
- // at this point record deleted and DB is pruned for account data
- // if hash deletion fails just log it and do not abort startup
- au.log.Errorf("upgradeDatabaseSchema4: failed to create merkle committer: %v", err)
- goto done
- }
- trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig)
- if err != nil {
- au.log.Errorf("upgradeDatabaseSchema4: failed to create merkle trie: %v", err)
- goto done
- }
-
- var totalHashesDeleted int
- for _, addr := range addresses {
- hash := accountHashBuilder(addr, basics.AccountData{}, []byte{0x80})
- deleted, err := trie.Delete(hash)
- if err != nil {
- au.log.Errorf("upgradeDatabaseSchema4: failed to delete hash '%s' from merkle trie for account %v: %v", hex.EncodeToString(hash), addr, err)
- } else {
- if !deleted {
- au.log.Warnf("upgradeDatabaseSchema4: failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(hash), addr)
- } else {
- totalHashesDeleted++
- }
- }
- }
-
- if _, err = trie.Commit(); err != nil {
- au.log.Errorf("upgradeDatabaseSchema4: failed to commit changes to merkle trie: %v", err)
- }
-
- au.log.Infof("upgradeDatabaseSchema4: deleted %d hashes", totalHashesDeleted)
- }
-
-done:
- au.log.Infof("upgradeDatabaseSchema4: deleted %d rows", numDeleted)
-
- // update version
- _, err = db.SetUserVersion(ctx, tx, 5)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 4 to 5: %v", err)
- }
- return 5, nil
-}
-
-// deleteStoredCatchpoints iterates over the storedcatchpoints table and deletes all the files stored on disk.
-// once all the files have been deleted, it would go ahead and remove the entries from the table.
-func (au *accountUpdates) deleteStoredCatchpoints(ctx context.Context, dbQueries *accountsDbQueries) (err error) {
- catchpointsFilesChunkSize := 50
- for {
- fileNames, err := dbQueries.getOldestCatchpointFiles(ctx, catchpointsFilesChunkSize, 0)
- if err != nil {
- return err
- }
- if len(fileNames) == 0 {
- break
- }
-
- for round, fileName := range fileNames {
- absCatchpointFileName := filepath.Join(au.dbDirectory, fileName)
- err = os.Remove(absCatchpointFileName)
- if err == nil || os.IsNotExist(err) {
- // it's ok if the file doesn't exist. just remove it from the database and we'll be good to go.
- } else {
- // we can't delete the file, abort -
- return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err)
- }
- // clear the entry from the database
- err = dbQueries.storeCatchpoint(ctx, round, "", "", 0)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// accountsUpdateBalances applies the given compactAccountDeltas to the merkle trie
-func (au *accountUpdates) accountsUpdateBalances(accountsDeltas compactAccountDeltas) (err error) {
- if au.catchpointInterval == 0 {
- return nil
- }
- var added, deleted bool
- accumulatedChanges := 0
-
- for i := 0; i < accountsDeltas.len(); i++ {
- addr, delta := accountsDeltas.getByIdx(i)
- if !delta.old.accountData.IsZero() {
- deleteHash := accountHashBuilder(addr, delta.old.accountData, protocol.Encode(&delta.old.accountData))
- deleted, err = au.balancesTrie.Delete(deleteHash)
- if err != nil {
- return fmt.Errorf("failed to delete hash '%s' from merkle trie for account %v: %w", hex.EncodeToString(deleteHash), addr, err)
- }
- if !deleted {
- au.log.Warnf("failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(deleteHash), addr)
- } else {
- accumulatedChanges++
- }
- }
-
- if !delta.new.IsZero() {
- addHash := accountHashBuilder(addr, delta.new, protocol.Encode(&delta.new))
- added, err = au.balancesTrie.Add(addHash)
- if err != nil {
- return fmt.Errorf("attempted to add duplicate hash '%s' to merkle trie for account %v: %w", hex.EncodeToString(addHash), addr, err)
- }
- if !added {
- au.log.Warnf("attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(addHash), addr)
- } else {
- accumulatedChanges++
- }
- }
- }
- if accumulatedChanges >= trieAccumulatedChangesFlush {
- accumulatedChanges = 0
- _, err = au.balancesTrie.Commit()
- if err != nil {
- return
- }
- }
-
- // write it all to disk.
- if accumulatedChanges > 0 {
- _, err = au.balancesTrie.Commit()
- }
+ au.baseAccounts.init(au.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold)
return
}
@@ -1693,12 +733,11 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
}
if rnd != au.latest()+1 {
- au.log.Panicf("accountUpdates: newBlockImpl %d too far in the future, dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas))
+ au.log.Panicf("accountUpdates: newBlockImpl %d too far in the future, dbRound %d, deltas %d", rnd, au.cachedDBRound, len(au.deltas))
}
au.deltas = append(au.deltas, delta.Accts)
au.versions = append(au.versions, blk.CurrentProtocol)
au.creatableDeltas = append(au.creatableDeltas, delta.Creatables)
- au.roundDigest = append(au.roundDigest, blk.Digest())
au.deltasAccum = append(au.deltasAccum, delta.Accts.Len()+au.deltasAccum[len(au.deltasAccum)-1])
au.baseAccounts.flushPendingWrites()
@@ -1748,7 +787,7 @@ func (au *accountUpdates) lookupWithRewards(rnd basics.Round, addr basics.Addres
var persistedData persistedAccountData
withRewards := true
for {
- currentDbRound := au.dbRound
+ currentDbRound := au.cachedDBRound
currentDeltaLen := len(au.deltas)
offset, err = au.roundOffset(rnd)
if err != nil {
@@ -1818,7 +857,7 @@ func (au *accountUpdates) lookupWithRewards(rnd basics.Round, addr basics.Addres
}
au.accountsMu.RLock()
needUnlock = true
- for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) {
+ for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
au.accountsReadCond.Wait()
}
}
@@ -1839,7 +878,7 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add
var offset uint64
var persistedData persistedAccountData
for {
- currentDbRound := au.dbRound
+ currentDbRound := au.cachedDBRound
currentDeltaLen := len(au.deltas)
offset, err = au.roundOffset(rnd)
if err != nil {
@@ -1904,7 +943,7 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add
}
au.accountsMu.RLock()
needUnlock = true
- for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) {
+ for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
au.accountsReadCond.Wait()
}
} else {
@@ -1930,7 +969,7 @@ func (au *accountUpdates) getCreatorForRound(rnd basics.Round, cidx basics.Creat
var dbRound basics.Round
var offset uint64
for {
- currentDbRound := au.dbRound
+ currentDbRound := au.cachedDBRound
currentDeltaLen := len(au.deltas)
offset, err = au.roundOffset(rnd)
if err != nil {
@@ -1978,7 +1017,7 @@ func (au *accountUpdates) getCreatorForRound(rnd basics.Round, cidx basics.Creat
}
au.accountsMu.RLock()
unlock = true
- for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) {
+ for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
au.accountsReadCond.Wait()
}
} else {
@@ -1988,274 +1027,156 @@ func (au *accountUpdates) getCreatorForRound(rnd basics.Round, cidx basics.Creat
}
}
-// accountsCreateCatchpointLabel creates a catchpoint label and write it.
-func (au *accountUpdates) accountsCreateCatchpointLabel(committedRound basics.Round, totals ledgercore.AccountTotals, ledgerBlockDigest crypto.Digest, trieBalancesHash crypto.Digest) (label string, err error) {
- cpLabel := ledgercore.MakeCatchpointLabel(committedRound, ledgerBlockDigest, trieBalancesHash, totals)
- label = cpLabel.String()
- _, err = au.accountsq.writeCatchpointStateString(context.Background(), catchpointStateLastCatchpoint, label)
- return
-}
-
// roundOffset calculates the offset of the given round compared to the current dbRound. Requires that the lock would be taken.
func (au *accountUpdates) roundOffset(rnd basics.Round) (offset uint64, err error) {
- if rnd < au.dbRound {
+ if rnd < au.cachedDBRound {
err = &RoundOffsetError{
round: rnd,
- dbRound: au.dbRound,
+ dbRound: au.cachedDBRound,
}
return
}
- off := uint64(rnd - au.dbRound)
+ off := uint64(rnd - au.cachedDBRound)
if off > uint64(len(au.deltas)) {
- err = fmt.Errorf("round %d too high: dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas))
+ err = fmt.Errorf("round %d too high: dbRound %d, deltas %d", rnd, au.cachedDBRound, len(au.deltas))
return
}
return off, nil
}
-// commitSyncer is the syncer go-routine function which perform the database updates. Internally, it dequeues deferredCommits and
-// send the tasks to commitRound for completing the operation.
-func (au *accountUpdates) commitSyncer(deferredCommits chan deferredCommit) {
- defer close(au.commitSyncerClosed)
- for {
- select {
- case committedOffset, ok := <-deferredCommits:
- if !ok {
- return
- }
- au.commitRound(committedOffset.offset, committedOffset.dbRound, committedOffset.lookback)
- case <-au.ctx.Done():
- // drain the pending commits queue:
- drained := false
- for !drained {
- select {
- case <-deferredCommits:
- au.accountsWriting.Done()
- default:
- drained = true
- }
- }
- return
- }
- }
-}
+func (au *accountUpdates) handleUnorderedCommit(offset uint64, dbRound basics.Round, lookback basics.Round) {
-// commitRound write to the database a "chunk" of rounds, and update the dbRound accordingly.
-func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookback basics.Round) {
- var stats telemetryspec.AccountsUpdateMetrics
- var updateStats bool
+}
+// prepareCommit prepares data to write to the database a "chunk" of rounds, and update the cached dbRound accordingly.
+func (au *accountUpdates) prepareCommit(dcc *deferredCommitContext) error {
if au.logAccountUpdatesMetrics {
now := time.Now()
if now.Sub(au.lastMetricsLogTime) >= au.logAccountUpdatesInterval {
- updateStats = true
+ dcc.updateStats = true
au.lastMetricsLogTime = now
}
}
- defer au.accountsWriting.Done()
- au.accountsMu.RLock()
-
- // we can exit right away, as this is the result of mis-ordered call to committedUpTo.
- if au.dbRound < dbRound || offset < uint64(au.dbRound-dbRound) {
- // if this is an archival ledger, we might need to update the catchpointWriting variable.
- if au.archivalLedger {
- // determine if this was a catchpoint round
- isCatchpointRound := ((offset + uint64(lookback+dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+dbRound))) % au.catchpointInterval))
- if isCatchpointRound {
- // it was a catchpoint round, so update the catchpointWriting to indicate that we're done.
- atomic.StoreInt32(&au.catchpointWriting, 0)
- }
- }
- au.accountsMu.RUnlock()
- return
- }
-
- // adjust the offset according to what happened meanwhile..
- offset -= uint64(au.dbRound - dbRound)
-
- // if this iteration need to flush out zero rounds, just return right away.
- // this usecase can happen when two subsequent calls to committedUpTo concludes that the same rounds range need to be
- // flush, without the commitRound have a chance of committing these rounds.
- if offset == 0 {
- au.accountsMu.RUnlock()
- return
- }
-
- dbRound = au.dbRound
+ offset := dcc.offset
- newBase := basics.Round(offset) + dbRound
- flushTime := time.Now()
- isCatchpointRound := ((offset + uint64(lookback+dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+dbRound))) % au.catchpointInterval))
+ au.accountsMu.RLock()
// create a copy of the deltas, round totals and protos for the range we're going to flush.
- deltas := make([]ledgercore.AccountDeltas, offset, offset)
- creatableDeltas := make([]map[basics.CreatableIndex]ledgercore.ModifiedCreatable, offset, offset)
- roundTotals := make([]ledgercore.AccountTotals, offset+1, offset+1)
- copy(deltas, au.deltas[:offset])
+ dcc.deltas = make([]ledgercore.AccountDeltas, offset)
+ creatableDeltas := make([]map[basics.CreatableIndex]ledgercore.ModifiedCreatable, offset)
+ dcc.roundTotals = au.roundTotals[offset]
+ copy(dcc.deltas, au.deltas[:offset])
copy(creatableDeltas, au.creatableDeltas[:offset])
- copy(roundTotals, au.roundTotals[:offset+1])
// verify version correctness : all the entries in the au.versions[1:offset+1] should have the *same* version, and the committedUpTo should be enforcing that.
if au.versions[1] != au.versions[offset] {
au.accountsMu.RUnlock()
- au.log.Errorf("attempted to commit series of rounds with non-uniform consensus versions")
- return
- }
- consensusVersion := au.versions[1]
-
- var committedRoundDigest crypto.Digest
- if isCatchpointRound {
- committedRoundDigest = au.roundDigest[offset+uint64(lookback)-1]
+ // in scheduleCommit, we expect that this function to update the catchpointWriting when
+ // it's on a catchpoint round and it's an archival ledger. Doing this in a deferred function
+ // here would prevent us from "forgetting" to update this variable later on.
+ // The same is repeated in commitRound on errors.
+ if dcc.isCatchpointRound && au.archivalLedger {
+ atomic.StoreInt32(dcc.catchpointWriting, 0)
+ }
+ return fmt.Errorf("attempted to commit series of rounds with non-uniform consensus versions")
}
// compact all the deltas - when we're trying to persist multiple rounds, we might have the same account
// being updated multiple times. When that happen, we can safely omit the intermediate updates.
- compactDeltas := makeCompactAccountDeltas(deltas, au.baseAccounts)
- compactCreatableDeltas := compactCreatableDeltas(creatableDeltas)
+ dcc.compactAccountDeltas = makeCompactAccountDeltas(dcc.deltas, au.baseAccounts)
+ dcc.compactCreatableDeltas = compactCreatableDeltas(creatableDeltas)
au.accountsMu.RUnlock()
- // in committedUpTo, we expect that this function to update the catchpointWriting when
- // it's on a catchpoint round and it's an archival ledger. Doing this in a deferred function
- // here would prevent us from "forgetting" to update this variable later on.
- defer func() {
- if isCatchpointRound && au.archivalLedger {
- atomic.StoreInt32(&au.catchpointWriting, 0)
- }
- }()
-
- var catchpointLabel string
- beforeUpdatingBalancesTime := time.Now()
- var trieBalancesHash crypto.Digest
+ dcc.genesisProto = au.ledger.GenesisProto()
- genesisProto := au.ledger.GenesisProto()
-
- start := time.Now()
- ledgerCommitroundCount.Inc(nil)
- var updatedPersistedAccounts []persistedAccountData
- if updateStats {
- stats.DatabaseCommitDuration = time.Duration(time.Now().UnixNano())
+ if dcc.updateStats {
+ dcc.stats.DatabaseCommitDuration = time.Duration(time.Now().UnixNano())
}
- err := au.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- treeTargetRound := basics.Round(0)
- if au.catchpointInterval > 0 {
- mc, err0 := MakeMerkleCommitter(tx, false)
- if err0 != nil {
- return err0
- }
- if au.balancesTrie == nil {
- trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig)
- if err != nil {
- au.log.Warnf("unable to create merkle trie during committedUpTo: %v", err)
- return err
- }
- au.balancesTrie = trie
- } else {
- au.balancesTrie.SetCommitter(mc)
- }
- treeTargetRound = dbRound + basics.Round(offset)
- }
- db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(accountsUpdatePerRoundHighWatermark*time.Duration(offset)))
-
- if updateStats {
- stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano())
- }
-
- err = compactDeltas.accountsLoadOld(tx)
- if err != nil {
- return err
- }
-
- if updateStats {
- stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano()) - stats.OldAccountPreloadDuration
- }
-
- err = totalsNewRounds(tx, deltas[:offset], compactDeltas, roundTotals[1:offset+1], config.Consensus[consensusVersion])
- if err != nil {
- return err
- }
+ return nil
+}
- if updateStats {
- stats.MerkleTrieUpdateDuration = time.Duration(time.Now().UnixNano())
- }
+// commitRound closure is called within the same transaction for all trackers
+// it receives current offset and dbRound
+func (au *accountUpdates) commitRound(ctx context.Context, tx *sql.Tx, dcc *deferredCommitContext) (err error) {
+ offset := dcc.offset
+ dbRound := dcc.oldBase
- err = au.accountsUpdateBalances(compactDeltas)
+ defer func() {
if err != nil {
- return err
+ if dcc.isCatchpointRound && au.archivalLedger {
+ atomic.StoreInt32(dcc.catchpointWriting, 0)
+ }
}
+ }()
- if updateStats {
- now := time.Duration(time.Now().UnixNano())
- stats.MerkleTrieUpdateDuration = now - stats.MerkleTrieUpdateDuration
- stats.AccountsWritingDuration = now
- }
+ _, err = db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(accountsUpdatePerRoundHighWatermark*time.Duration(offset)))
+ if err != nil {
+ return err
+ }
- // the updates of the actual account data is done last since the accountsNewRound would modify the compactDeltas old values
- // so that we can update the base account back.
- updatedPersistedAccounts, err = accountsNewRound(tx, compactDeltas, compactCreatableDeltas, genesisProto, dbRound+basics.Round(offset))
- if err != nil {
- return err
- }
+ if dcc.updateStats {
+ dcc.stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano())
+ }
- if updateStats {
- stats.AccountsWritingDuration = time.Duration(time.Now().UnixNano()) - stats.AccountsWritingDuration
- }
+ err = dcc.compactAccountDeltas.accountsLoadOld(tx)
+ if err != nil {
+ return err
+ }
- err = updateAccountsRound(tx, dbRound+basics.Round(offset), treeTargetRound)
- if err != nil {
- return err
- }
+ if dcc.updateStats {
+ dcc.stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano()) - dcc.stats.OldAccountPreloadDuration
+ }
- if isCatchpointRound {
- trieBalancesHash, err = au.balancesTrie.RootHash()
- if err != nil {
- return
- }
- }
- return nil
- })
- ledgerCommitroundMicros.AddMicrosecondsSince(start, nil)
+ err = accountsPutTotals(tx, dcc.roundTotals, false)
if err != nil {
- au.balancesTrie = nil
- au.log.Warnf("unable to advance account snapshot (%d-%d): %v", dbRound, dbRound+basics.Round(offset), err)
- return
+ return err
}
- if updateStats {
- stats.DatabaseCommitDuration = time.Duration(time.Now().UnixNano()) - stats.DatabaseCommitDuration - stats.AccountsWritingDuration - stats.MerkleTrieUpdateDuration - stats.OldAccountPreloadDuration
+ if dcc.updateStats {
+ dcc.stats.AccountsWritingDuration = time.Duration(time.Now().UnixNano())
}
- if isCatchpointRound {
- catchpointLabel, err = au.accountsCreateCatchpointLabel(dbRound+basics.Round(offset)+lookback, roundTotals[offset], committedRoundDigest, trieBalancesHash)
- if err != nil {
- au.log.Warnf("commitRound : unable to create a catchpoint label: %v", err)
- }
+ // the updates of the actual account data is done last since the accountsNewRound would modify the compactDeltas old values
+ // so that we can update the base account back.
+ dcc.updatedPersistedAccounts, err = accountsNewRound(tx, dcc.compactAccountDeltas, dcc.compactCreatableDeltas, dcc.genesisProto, dbRound+basics.Round(offset))
+ if err != nil {
+ return err
}
- if au.balancesTrie != nil {
- _, err = au.balancesTrie.Evict(false)
- if err != nil {
- au.log.Warnf("merkle trie failed to evict: %v", err)
- }
+
+ if dcc.updateStats {
+ dcc.stats.AccountsWritingDuration = time.Duration(time.Now().UnixNano()) - dcc.stats.AccountsWritingDuration
}
- if isCatchpointRound && catchpointLabel != "" {
- au.lastCatchpointLabel = catchpointLabel
+ return
+}
+
+func (au *accountUpdates) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+ if dcc.updateStats {
+ spentDuration := dcc.stats.DatabaseCommitDuration + dcc.stats.AccountsWritingDuration + dcc.stats.MerkleTrieUpdateDuration + dcc.stats.OldAccountPreloadDuration
+ dcc.stats.DatabaseCommitDuration = time.Duration(time.Now().UnixNano()) - spentDuration
}
- updatingBalancesDuration := time.Now().Sub(beforeUpdatingBalancesTime)
- if updateStats {
- stats.MemoryUpdatesDuration = time.Duration(time.Now().UnixNano())
+ offset := dcc.offset
+ dbRound := dcc.oldBase
+ newBase := dcc.newBase
+
+ dcc.updatingBalancesDuration = time.Since(dcc.flushTime)
+
+ if dcc.updateStats {
+ dcc.stats.MemoryUpdatesDuration = time.Duration(time.Now().UnixNano())
}
+
au.accountsMu.Lock()
// Drop reference counts to modified accounts, and evict them
// from in-memory cache when no references remain.
- for i := 0; i < compactDeltas.len(); i++ {
- addr, acctUpdate := compactDeltas.getByIdx(i)
+ for i := 0; i < dcc.compactAccountDeltas.len(); i++ {
+ addr, acctUpdate := dcc.compactAccountDeltas.getByIdx(i)
cnt := acctUpdate.ndeltas
macct, ok := au.accounts[addr]
if !ok {
@@ -2272,11 +1193,11 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb
}
}
- for _, persistedAcct := range updatedPersistedAccounts {
+ for _, persistedAcct := range dcc.updatedPersistedAccounts {
au.baseAccounts.write(persistedAcct)
}
- for cidx, modCrt := range compactCreatableDeltas {
+ for cidx, modCrt := range dcc.compactCreatableDeltas {
cnt := modCrt.Ndeltas
mcreat, ok := au.creatables[cidx]
if !ok {
@@ -2295,39 +1216,29 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb
au.deltas = au.deltas[offset:]
au.deltasAccum = au.deltasAccum[offset:]
- au.roundDigest = au.roundDigest[offset:]
au.versions = au.versions[offset:]
au.roundTotals = au.roundTotals[offset:]
au.creatableDeltas = au.creatableDeltas[offset:]
- au.dbRound = newBase
- au.lastFlushTime = flushTime
+ au.cachedDBRound = newBase
au.accountsMu.Unlock()
- if updateStats {
- stats.MemoryUpdatesDuration = time.Duration(time.Now().UnixNano()) - stats.MemoryUpdatesDuration
+ if dcc.updateStats {
+ dcc.stats.MemoryUpdatesDuration = time.Duration(time.Now().UnixNano()) - dcc.stats.MemoryUpdatesDuration
}
au.accountsReadCond.Broadcast()
- if isCatchpointRound && au.archivalLedger && catchpointLabel != "" {
- // generate the catchpoint file. This need to be done inline so that it will block any new accounts that from being written.
- // the generateCatchpoint expects that the accounts data would not be modified in the background during it's execution.
- au.generateCatchpoint(basics.Round(offset)+dbRound+lookback, catchpointLabel, committedRoundDigest, updatingBalancesDuration)
- }
-
// log telemetry event
- if updateStats {
- stats.StartRound = uint64(dbRound)
- stats.RoundsCount = offset
- stats.UpdatedAccountsCount = uint64(len(updatedPersistedAccounts))
- stats.UpdatedCreatablesCount = uint64(len(compactCreatableDeltas))
+ if dcc.updateStats {
+ dcc.stats.StartRound = uint64(dbRound)
+ dcc.stats.RoundsCount = offset
+ dcc.stats.UpdatedAccountsCount = uint64(len(dcc.updatedPersistedAccounts))
+ dcc.stats.UpdatedCreatablesCount = uint64(len(dcc.compactCreatableDeltas))
- var details struct {
- }
- au.log.Metrics(telemetryspec.Accounts, stats, details)
+ var details struct{}
+ au.log.Metrics(telemetryspec.Accounts, dcc.stats, details)
}
-
}
// compactCreatableDeltas takes an array of creatables map deltas ( one array entry per round ), and compact the array into a single
@@ -2363,189 +1274,11 @@ func compactCreatableDeltas(creatableDeltas []map[basics.CreatableIndex]ledgerco
// latest returns the latest round
func (au *accountUpdates) latest() basics.Round {
- return au.dbRound + basics.Round(len(au.deltas))
-}
-
-// generateCatchpoint generates a single catchpoint file
-func (au *accountUpdates) generateCatchpoint(committedRound basics.Round, label string, committedRoundDigest crypto.Digest, updatingBalancesDuration time.Duration) {
- beforeGeneratingCatchpointTime := time.Now()
- catchpointGenerationStats := telemetryspec.CatchpointGenerationEventDetails{
- BalancesWriteTime: uint64(updatingBalancesDuration.Nanoseconds()),
- }
-
- // the retryCatchpointCreation is used to repeat the catchpoint file generation in case the node crashed / aborted during startup
- // before the catchpoint file generation could be completed.
- retryCatchpointCreation := false
- au.log.Debugf("accountUpdates: generateCatchpoint: generating catchpoint for round %d", committedRound)
- defer func() {
- if !retryCatchpointCreation {
- // clear the writingCatchpoint flag
- _, err := au.accountsq.writeCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint, uint64(0))
- if err != nil {
- au.log.Warnf("accountUpdates: generateCatchpoint unable to clear catchpoint state '%s' for round %d: %v", catchpointStateWritingCatchpoint, committedRound, err)
- }
- }
- }()
-
- _, err := au.accountsq.writeCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint, uint64(committedRound))
- if err != nil {
- au.log.Warnf("accountUpdates: generateCatchpoint unable to write catchpoint state '%s' for round %d: %v", catchpointStateWritingCatchpoint, committedRound, err)
- return
- }
-
- relCatchpointFileName := filepath.Join("catchpoints", catchpointRoundToPath(committedRound))
- absCatchpointFileName := filepath.Join(au.dbDirectory, relCatchpointFileName)
-
- more := true
- const shortChunkExecutionDuration = 50 * time.Millisecond
- const longChunkExecutionDuration = 1 * time.Second
- var chunkExecutionDuration time.Duration
- select {
- case <-au.catchpointSlowWriting:
- chunkExecutionDuration = longChunkExecutionDuration
- default:
- chunkExecutionDuration = shortChunkExecutionDuration
- }
-
- var catchpointWriter *catchpointWriter
- start := time.Now()
- ledgerGeneratecatchpointCount.Inc(nil)
- err = au.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- catchpointWriter = makeCatchpointWriter(au.ctx, absCatchpointFileName, tx, committedRound, committedRoundDigest, label)
- for more {
- stepCtx, stepCancelFunction := context.WithTimeout(au.ctx, chunkExecutionDuration)
- writeStepStartTime := time.Now()
- more, err = catchpointWriter.WriteStep(stepCtx)
- // accumulate the actual time we've spent writing in this step.
- catchpointGenerationStats.CPUTime += uint64(time.Now().Sub(writeStepStartTime).Nanoseconds())
- stepCancelFunction()
- if more && err == nil {
- // we just wrote some data, but there is more to be written.
- // go to sleep for while.
- // before going to sleep, extend the transaction timeout so that we won't get warnings:
- db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(1*time.Second))
- select {
- case <-time.After(100 * time.Millisecond):
- // increase the time slot allocated for writing the catchpoint, but stop when we get to the longChunkExecutionDuration limit.
- // this would allow the catchpoint writing speed to ramp up while still leaving some cpu available.
- chunkExecutionDuration *= 2
- if chunkExecutionDuration > longChunkExecutionDuration {
- chunkExecutionDuration = longChunkExecutionDuration
- }
- case <-au.ctx.Done():
- retryCatchpointCreation = true
- err2 := catchpointWriter.Abort()
- if err2 != nil {
- return fmt.Errorf("error removing catchpoint file : %v", err2)
- }
- return nil
- case <-au.catchpointSlowWriting:
- chunkExecutionDuration = longChunkExecutionDuration
- }
- }
- if err != nil {
- err = fmt.Errorf("unable to create catchpoint : %v", err)
- err2 := catchpointWriter.Abort()
- if err2 != nil {
- au.log.Warnf("accountUpdates: generateCatchpoint: error removing catchpoint file : %v", err2)
- }
- return
- }
- }
- return
- })
- ledgerGeneratecatchpointMicros.AddMicrosecondsSince(start, nil)
-
- if err != nil {
- au.log.Warnf("accountUpdates: generateCatchpoint: %v", err)
- return
- }
- if catchpointWriter == nil {
- au.log.Warnf("accountUpdates: generateCatchpoint: nil catchpointWriter")
- return
- }
-
- err = au.saveCatchpointFile(committedRound, relCatchpointFileName, catchpointWriter.GetSize(), catchpointWriter.GetCatchpoint())
- if err != nil {
- au.log.Warnf("accountUpdates: generateCatchpoint: unable to save catchpoint: %v", err)
- return
- }
- catchpointGenerationStats.FileSize = uint64(catchpointWriter.GetSize())
- catchpointGenerationStats.WritingDuration = uint64(time.Now().Sub(beforeGeneratingCatchpointTime).Nanoseconds())
- catchpointGenerationStats.AccountsCount = catchpointWriter.GetTotalAccounts()
- catchpointGenerationStats.CatchpointLabel = catchpointWriter.GetCatchpoint()
- au.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.CatchpointGenerationEvent, catchpointGenerationStats)
- au.log.With("writingDuration", catchpointGenerationStats.WritingDuration).
- With("CPUTime", catchpointGenerationStats.CPUTime).
- With("balancesWriteTime", catchpointGenerationStats.BalancesWriteTime).
- With("accountsCount", catchpointGenerationStats.AccountsCount).
- With("fileSize", catchpointGenerationStats.FileSize).
- With("catchpointLabel", catchpointGenerationStats.CatchpointLabel).
- Infof("Catchpoint file was generated")
-}
-
-// catchpointRoundToPath calculate the catchpoint file path for a given round
-func catchpointRoundToPath(rnd basics.Round) string {
- irnd := int64(rnd) / 256
- outStr := ""
- for irnd > 0 {
- outStr = filepath.Join(outStr, fmt.Sprintf("%02x", irnd%256))
- irnd = irnd / 256
- }
- outStr = filepath.Join(outStr, strconv.FormatInt(int64(rnd), 10)+".catchpoint")
- return outStr
-}
-
-// saveCatchpointFile stores the provided fileName as the stored catchpoint for the given round.
-// after a successful insert operation to the database, it would delete up to 2 old entries, as needed.
-// deleting 2 entries while inserting single entry allow us to adjust the size of the backing storage and have the
-// database and storage realign.
-func (au *accountUpdates) saveCatchpointFile(round basics.Round, fileName string, fileSize int64, catchpoint string) (err error) {
- if au.catchpointFileHistoryLength != 0 {
- err = au.accountsq.storeCatchpoint(context.Background(), round, fileName, catchpoint, fileSize)
- if err != nil {
- au.log.Warnf("accountUpdates: saveCatchpoint: unable to save catchpoint: %v", err)
- return
- }
- } else {
- err = os.Remove(fileName)
- if err != nil {
- au.log.Warnf("accountUpdates: saveCatchpoint: unable to remove file (%s): %v", fileName, err)
- return
- }
- }
- if au.catchpointFileHistoryLength == -1 {
- return
- }
- var filesToDelete map[basics.Round]string
- filesToDelete, err = au.accountsq.getOldestCatchpointFiles(context.Background(), 2, au.catchpointFileHistoryLength)
- if err != nil {
- return fmt.Errorf("unable to delete catchpoint file, getOldestCatchpointFiles failed : %v", err)
- }
- for round, fileToDelete := range filesToDelete {
- absCatchpointFileName := filepath.Join(au.dbDirectory, fileToDelete)
- err = os.Remove(absCatchpointFileName)
- if err == nil || os.IsNotExist(err) {
- // it's ok if the file doesn't exist. just remove it from the database and we'll be good to go.
- err = nil
- } else {
- // we can't delete the file, abort -
- return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err)
- }
- err = au.accountsq.storeCatchpoint(context.Background(), round, "", "", 0)
- if err != nil {
- return fmt.Errorf("unable to delete old catchpoint entry '%s' : %v", fileToDelete, err)
- }
- }
- return
+ return au.cachedDBRound + basics.Round(len(au.deltas))
}
// the vacuumDatabase performs a full vacuum of the accounts database.
func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) {
- if !au.vacuumOnStartup {
- return
- }
-
// vaccumming the database would modify the some of the tables rowid, so we need to make sure any stored in-memory
// rowid are flushed.
au.baseAccounts.prune(0)
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index 90ca7d205..27306af53 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -24,7 +24,6 @@ import (
"fmt"
"io/ioutil"
"os"
- "path/filepath"
"runtime"
"strings"
"sync"
@@ -37,13 +36,18 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/internal"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/db"
)
+var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
+
type mockLedgerForTracker struct {
dbs db.Pair
blocks []blockEntry
@@ -52,6 +56,10 @@ type mockLedgerForTracker struct {
filename string
inMemory bool
consensusParams config.ConsensusParams
+ accts map[basics.Address]basics.AccountData
+
+ // trackerRegistry manages persistence into DB so we have to have it here even for a single tracker test
+ trackers trackerRegistry
}
func accumulateTotals(t testing.TB, consensusVersion protocol.ConsensusVersion, accts []map[basics.Address]basics.AccountData, rewardLevel uint64) (totals ledgercore.AccountTotals) {
@@ -84,7 +92,7 @@ func makeMockLedgerForTracker(t testing.TB, inMemory bool, initialBlocksCount in
}
}
consensusParams := config.Consensus[consensusVersion]
- return &mockLedgerForTracker{dbs: dbs, log: dblogger, filename: fileName, inMemory: inMemory, blocks: blocks, deltas: deltas, consensusParams: consensusParams}
+ return &mockLedgerForTracker{dbs: dbs, log: dblogger, filename: fileName, inMemory: inMemory, blocks: blocks, deltas: deltas, consensusParams: consensusParams, accts: accts[0]}
}
// fork creates another database which has the same content as the current one. Works only for non-memory databases.
@@ -102,8 +110,12 @@ func (ml *mockLedgerForTracker) fork(t testing.TB) *mockLedgerForTracker {
log: dblogger,
blocks: make([]blockEntry, len(ml.blocks)),
deltas: make([]ledgercore.StateDelta, len(ml.deltas)),
+ accts: make(map[basics.Address]basics.AccountData),
filename: fn,
}
+ for k, v := range ml.accts {
+ newLedgerTracker.accts[k] = v
+ }
copy(newLedgerTracker.blocks, ml.blocks)
copy(newLedgerTracker.deltas, ml.deltas)
@@ -126,6 +138,8 @@ func (ml *mockLedgerForTracker) fork(t testing.TB) *mockLedgerForTracker {
}
func (ml *mockLedgerForTracker) Close() {
+ ml.trackers.close()
+
ml.dbs.Close()
// delete the database files of non-memory instances.
if !ml.inMemory {
@@ -145,7 +159,7 @@ func (ml *mockLedgerForTracker) addMockBlock(be blockEntry, delta ledgercore.Sta
return nil
}
-func (ml *mockLedgerForTracker) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger ledgerForEvaluator) (ledgercore.StateDelta, error) {
+func (ml *mockLedgerForTracker) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger internal.LedgerForEvaluator) (ledgercore.StateDelta, error) {
// support returning the deltas if the client explicitly provided them by calling addMockBlock, otherwise,
// just return an empty state delta ( since the client clearly didn't care about these )
if len(ml.deltas) > int(blk.Round()) {
@@ -195,6 +209,10 @@ func (ml *mockLedgerForTracker) GenesisProto() config.ConsensusParams {
return ml.consensusParams
}
+func (ml *mockLedgerForTracker) GenesisAccounts() map[basics.Address]basics.AccountData {
+ return ml.accts
+}
+
// this function used to be in acctupdates.go, but we were never using it for production purposes. This
// function has a conceptual flaw in that it attempts to load the entire balances into memory. This might
// not work if we have large number of balances. On these unit testing, however, it's not the case, and it's
@@ -226,15 +244,28 @@ func (au *accountUpdates) allBalances(rnd basics.Round) (bals map[basics.Address
return
}
+func newAcctUpdates(tb testing.TB, l *mockLedgerForTracker, conf config.Local, dbPathPrefix string) *accountUpdates {
+ au := &accountUpdates{}
+ au.initialize(conf)
+ _, err := trackerDBInitialize(l, false, ".")
+ require.NoError(tb, err)
+
+ l.trackers.initialize(l, []ledgerTracker{au}, conf)
+ err = l.trackers.loadFromDisk(l)
+ require.NoError(tb, err)
+
+ return au
+}
+
func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, latestRnd basics.Round, accts []map[basics.Address]basics.AccountData, rewards []uint64, proto config.ConsensusParams) {
latest := au.latest()
- require.Equal(t, latest, latestRnd)
+ require.Equal(t, latestRnd, latest)
_, err := au.Totals(latest + 1)
require.Error(t, err)
var validThrough basics.Round
- _, validThrough, err = au.LookupWithoutRewards(latest+1, randomAddress())
+ _, validThrough, err = au.LookupWithoutRewards(latest+1, ledgertesting.RandomAddress())
require.Error(t, err)
require.Equal(t, basics.Round(0), validThrough)
@@ -242,7 +273,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates
_, err := au.Totals(base - 1)
require.Error(t, err)
- _, validThrough, err = au.LookupWithoutRewards(base-1, randomAddress())
+ _, validThrough, err = au.LookupWithoutRewards(base-1, ledgertesting.RandomAddress())
require.Error(t, err)
require.Equal(t, basics.Round(0), validThrough)
}
@@ -301,7 +332,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates
require.Equal(t, totals.Participating().Raw, totalOnline+totalOffline)
require.Equal(t, totals.All().Raw, totalOnline+totalOffline+totalNotPart)
- d, validThrough, err := au.LookupWithoutRewards(rnd, randomAddress())
+ d, validThrough, err := au.LookupWithoutRewards(rnd, ledgertesting.RandomAddress())
require.NoError(t, err)
require.GreaterOrEqualf(t, uint64(validThrough), uint64(rnd), fmt.Sprintf("validThrough :%v\nrnd :%v\n", validThrough, rnd))
require.Equal(t, d, basics.AccountData{})
@@ -334,7 +365,7 @@ func TestAcctUpdates(t *testing.T) {
}
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- accts := []map[basics.Address]basics.AccountData{randomAccounts(20, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
rewardsLevels := []uint64{0}
pooldata := basics.AccountData{}
@@ -350,13 +381,10 @@ func TestAcctUpdates(t *testing.T) {
ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
defer ml.Close()
- au := &accountUpdates{}
- au.initialize(config.GetDefaultLocal(), ".", proto, accts[0])
+ conf := config.GetDefaultLocal()
+ au := newAcctUpdates(t, ml, conf, ".")
defer au.close()
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
-
// cover 10 genesis blocks
rewardLevel := uint64(0)
for i := 1; i < 10; i++ {
@@ -369,13 +397,14 @@ func TestAcctUpdates(t *testing.T) {
// lastCreatableID stores asset or app max used index to get rid of conflicts
lastCreatableID := crypto.RandUint64() % 512
knownCreatables := make(map[basics.CreatableIndex]bool)
+
for i := basics.Round(10); i < basics.Round(proto.MaxBalLookback+15); i++ {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
var updates ledgercore.AccountDeltas
var totals map[basics.Address]basics.AccountData
base := accts[i-1]
- updates, totals, lastCreatableID = randomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
+ updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -405,14 +434,35 @@ func TestAcctUpdates(t *testing.T) {
for i := basics.Round(0); i < 15; i++ {
// Clear the timer to ensure a flush
- au.lastFlushTime = time.Time{}
+ ml.trackers.lastFlushTime = time.Time{}
- au.committedUpTo(basics.Round(proto.MaxBalLookback) + i)
- au.waitAccountsWriting()
+ ml.trackers.committedUpTo(basics.Round(proto.MaxBalLookback) + i)
+ ml.trackers.waitAccountsWriting()
checkAcctUpdates(t, au, i, basics.Round(proto.MaxBalLookback+14), accts, rewardsLevels, proto)
}
-}
+ // check the account totals.
+ var dbRound basics.Round
+ err := ml.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ dbRound, err = accountsRound(tx)
+ return
+ })
+ require.NoError(t, err)
+
+ var updates ledgercore.AccountDeltas
+ for addr, acctData := range accts[dbRound] {
+ updates.Upsert(addr, acctData)
+ }
+
+ expectedTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, rewardsLevels[dbRound], proto, nil, ledgercore.AccountTotals{})
+ var actualTotals ledgercore.AccountTotals
+ err = ml.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ actualTotals, err = accountsTotals(tx, false)
+ return
+ })
+ require.NoError(t, err)
+ require.Equal(t, expectedTotals, actualTotals)
+}
func TestAcctUpdatesFastUpdates(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -421,7 +471,7 @@ func TestAcctUpdatesFastUpdates(t *testing.T) {
}
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- accts := []map[basics.Address]basics.AccountData{randomAccounts(20, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
rewardsLevels := []uint64{0}
pooldata := basics.AccountData{}
@@ -437,15 +487,11 @@ func TestAcctUpdatesFastUpdates(t *testing.T) {
ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
defer ml.Close()
- au := &accountUpdates{}
conf := config.GetDefaultLocal()
conf.CatchpointInterval = 1
- au.initialize(conf, ".", proto, accts[0])
+ au := newAcctUpdates(t, ml, conf, ".")
defer au.close()
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
-
// cover 10 genesis blocks
rewardLevel := uint64(0)
for i := 1; i < 10; i++ {
@@ -460,7 +506,7 @@ func TestAcctUpdatesFastUpdates(t *testing.T) {
for i := basics.Round(10); i < basics.Round(proto.MaxBalLookback+15); i++ {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
- updates, totals := randomDeltasBalanced(1, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(1, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -487,7 +533,7 @@ func TestAcctUpdatesFastUpdates(t *testing.T) {
wg.Add(1)
go func(round basics.Round) {
defer wg.Done()
- au.committedUpTo(round)
+ ml.trackers.committedUpTo(round)
}(i)
}
wg.Wait()
@@ -513,7 +559,7 @@ func BenchmarkBalancesChanges(b *testing.B) {
initialRounds := uint64(1)
accountsCount := 5000
- accts := []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(accountsCount, true)}
rewardsLevels := []uint64{0}
pooldata := basics.AccountData{}
@@ -529,10 +575,8 @@ func BenchmarkBalancesChanges(b *testing.B) {
ml := makeMockLedgerForTracker(b, true, int(initialRounds), protocolVersion, accts)
defer ml.Close()
- au := &accountUpdates{}
- au.initialize(config.GetDefaultLocal(), ".", proto, accts[0])
- err := au.loadFromDisk(ml)
- require.NoError(b, err)
+ conf := config.GetDefaultLocal()
+ au := newAcctUpdates(b, ml, conf, ".")
defer au.close()
// cover initialRounds genesis blocks
@@ -550,7 +594,7 @@ func BenchmarkBalancesChanges(b *testing.B) {
accountChanges = accountsCount - 2 - int(basics.Round(proto.MaxBalLookback+uint64(b.N))+i)
}
- updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(b, err)
@@ -575,18 +619,18 @@ func BenchmarkBalancesChanges(b *testing.B) {
}
for i := proto.MaxBalLookback; i < proto.MaxBalLookback+initialRounds; i++ {
// Clear the timer to ensure a flush
- au.lastFlushTime = time.Time{}
- au.committedUpTo(basics.Round(i))
+ ml.trackers.lastFlushTime = time.Time{}
+ ml.trackers.committedUpTo(basics.Round(i))
}
- au.waitAccountsWriting()
+ ml.trackers.waitAccountsWriting()
b.ResetTimer()
startTime := time.Now()
for i := proto.MaxBalLookback + initialRounds; i < proto.MaxBalLookback+uint64(b.N); i++ {
// Clear the timer to ensure a flush
- au.lastFlushTime = time.Time{}
- au.committedUpTo(basics.Round(i))
+ ml.trackers.lastFlushTime = time.Time{}
+ ml.trackers.committedUpTo(basics.Round(i))
}
- au.waitAccountsWriting()
+ ml.trackers.waitAccountsWriting()
deltaTime := time.Now().Sub(startTime)
if deltaTime > time.Second {
return
@@ -644,7 +688,7 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
os.RemoveAll("./catchpoints")
}()
- accts := []map[basics.Address]basics.AccountData{randomAccounts(100000, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(100000, true)}
rewardsLevels := []uint64{0}
pooldata := basics.AccountData{}
@@ -660,14 +704,11 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, accts)
defer ml.Close()
- au := &accountUpdates{}
conf := config.GetDefaultLocal()
conf.CatchpointInterval = 1
conf.Archival = true
- au.initialize(conf, ".", protoParams, accts[0])
+ au := newAcctUpdates(t, ml, conf, ".")
defer au.close()
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
// cover 10 genesis blocks
rewardLevel := uint64(0)
@@ -679,7 +720,7 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
for i := basics.Round(10); i < basics.Round(protoParams.MaxBalLookback+5); i++ {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
- updates, totals := randomDeltasBalanced(1, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(1, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -703,9 +744,9 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
accts = append(accts, totals)
rewardsLevels = append(rewardsLevels, rewardLevel)
- au.committedUpTo(i)
+ ml.trackers.committedUpTo(i)
if i%2 == 1 {
- au.waitAccountsWriting()
+ ml.trackers.waitAccountsWriting()
}
}
}
@@ -736,7 +777,7 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) {
inMemory := true
testFunction := func(t *testing.T) {
- accts := []map[basics.Address]basics.AccountData{randomAccounts(9, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(9, true)}
pooldata := basics.AccountData{}
pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
@@ -768,13 +809,10 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) {
accts[0][addr] = accountData
}
- au := &accountUpdates{}
- au.initialize(config.GetDefaultLocal(), ".", protoParams, accts[0])
+ conf := config.GetDefaultLocal()
+ au := newAcctUpdates(t, ml, conf, ".")
defer au.close()
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
-
// cover 10 genesis blocks
rewardLevel := uint64(0)
for i := 1; i < 10; i++ {
@@ -865,10 +903,10 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) {
delta.Accts.Upsert(addr, ad)
}
au.newBlock(blk, delta)
- au.committedUpTo(i)
+ ml.trackers.committedUpTo(i)
}
lastRound := i - 1
- au.waitAccountsWriting()
+ ml.trackers.waitAccountsWriting()
for idx, addr := range moneyAccounts {
balance, validThrough, err := au.LookupWithoutRewards(lastRound, addr)
@@ -888,55 +926,6 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) {
t.Run("DiskDB", testFunction)
}
-// TestAcctUpdatesDeleteStoredCatchpoints - The goal of this test is to verify that the deleteStoredCatchpoints function works correctly.
-// it doing so by filling up the storedcatchpoints with dummy catchpoint file entries, as well as creating these dummy files on disk.
-// ( the term dummy is only because these aren't real catchpoint files, but rather a zero-length file ). Then, the test call the function
-// and ensures that it did not errored, the catchpoint files were correctly deleted, and that deleteStoredCatchpoints contains no more
-// entries.
-func TestAcctUpdatesDeleteStoredCatchpoints(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- accts := []map[basics.Address]basics.AccountData{randomAccounts(20, true)}
-
- ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
- defer ml.Close()
-
- au := &accountUpdates{}
- conf := config.GetDefaultLocal()
- conf.CatchpointInterval = 1
- au.initialize(conf, ".", proto, accts[0])
- defer au.close()
-
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
-
- dummyCatchpointFilesToCreate := 42
-
- for i := 0; i < dummyCatchpointFilesToCreate; i++ {
- f, err := os.Create(fmt.Sprintf("./dummy_catchpoint_file-%d", i))
- require.NoError(t, err)
- err = f.Close()
- require.NoError(t, err)
- }
-
- for i := 0; i < dummyCatchpointFilesToCreate; i++ {
- err := au.accountsq.storeCatchpoint(context.Background(), basics.Round(i), fmt.Sprintf("./dummy_catchpoint_file-%d", i), "", 0)
- require.NoError(t, err)
- }
- err = au.deleteStoredCatchpoints(context.Background(), au.accountsq)
- require.NoError(t, err)
-
- for i := 0; i < dummyCatchpointFilesToCreate; i++ {
- // ensure that all the files were deleted.
- _, err := os.Open(fmt.Sprintf("./dummy_catchpoint_file-%d", i))
- require.True(t, os.IsNotExist(err))
- }
- fileNames, err := au.accountsq.getOldestCatchpointFiles(context.Background(), dummyCatchpointFilesToCreate, 0)
- require.NoError(t, err)
- require.Equal(t, 0, len(fileNames))
-}
-
// listAndCompareComb lists the assets/applications and then compares against the expected
// It repeats with different combinations of the limit parameters
func listAndCompareComb(t *testing.T, au *accountUpdates, expected map[basics.CreatableIndex]ledgercore.ModifiedCreatable) {
@@ -1074,7 +1063,7 @@ func TestListCreatables(t *testing.T) {
require.NoError(t, err)
au := &accountUpdates{}
- au.accountsq, err = accountsDbInit(tx, tx)
+ au.accountsq, err = accountsInitDbQueries(tx, tx)
require.NoError(t, err)
// ******* All results are obtained from the cache. Empty database *******
@@ -1116,97 +1105,6 @@ func TestListCreatables(t *testing.T) {
listAndCompareComb(t, au, expectedDbImage)
}
-func TestIsWritingCatchpointFile(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- au := &accountUpdates{}
-
- au.catchpointWriting = -1
- ans := au.IsWritingCatchpointFile()
- require.True(t, ans)
-
- au.catchpointWriting = 0
- ans = au.IsWritingCatchpointFile()
- require.False(t, ans)
-}
-
-func TestGetCatchpointStream(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
-
- accts := []map[basics.Address]basics.AccountData{randomAccounts(20, true)}
-
- ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
- defer ml.Close()
-
- au := &accountUpdates{}
- conf := config.GetDefaultLocal()
- conf.CatchpointInterval = 1
- au.initialize(conf, ".", proto, accts[0])
- defer au.close()
-
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
-
- filesToCreate := 4
-
- temporaryDirectroy, err := ioutil.TempDir(os.TempDir(), "catchpoints")
- require.NoError(t, err)
- defer func() {
- os.RemoveAll(temporaryDirectroy)
- }()
- catchpointsDirectory := filepath.Join(temporaryDirectroy, "catchpoints")
- err = os.Mkdir(catchpointsDirectory, 0777)
- require.NoError(t, err)
-
- au.dbDirectory = temporaryDirectroy
-
- // Create the catchpoint files with dummy data
- for i := 0; i < filesToCreate; i++ {
- fileName := filepath.Join("catchpoints", fmt.Sprintf("%d.catchpoint", i))
- data := []byte{byte(i), byte(i + 1), byte(i + 2)}
- err = ioutil.WriteFile(filepath.Join(temporaryDirectroy, fileName), data, 0666)
- require.NoError(t, err)
-
- // Store the catchpoint into the database
- err := au.accountsq.storeCatchpoint(context.Background(), basics.Round(i), fileName, "", int64(len(data)))
- require.NoError(t, err)
- }
-
- dataRead := make([]byte, 3)
- var n int
-
- // File on disk, and database has the record
- reader, err := au.GetCatchpointStream(basics.Round(1))
- n, err = reader.Read(dataRead)
- require.NoError(t, err)
- require.Equal(t, 3, n)
- outData := []byte{1, 2, 3}
- require.Equal(t, outData, dataRead)
- len, err := reader.Size()
- require.NoError(t, err)
- require.Equal(t, int64(3), len)
-
- // File deleted, but record in the database
- err = os.Remove(filepath.Join(temporaryDirectroy, "catchpoints", "2.catchpoint"))
- reader, err = au.GetCatchpointStream(basics.Round(2))
- require.Equal(t, ledgercore.ErrNoEntry{}, err)
- require.Nil(t, reader)
-
- // File on disk, but database lost the record
- err = au.accountsq.storeCatchpoint(context.Background(), basics.Round(3), "", "", 0)
- reader, err = au.GetCatchpointStream(basics.Round(3))
- n, err = reader.Read(dataRead)
- require.NoError(t, err)
- require.Equal(t, 3, n)
- outData = []byte{3, 4, 5}
- require.Equal(t, outData, dataRead)
-
- err = au.deleteStoredCatchpoints(context.Background(), au.accountsq)
- require.NoError(t, err)
-}
-
func accountsAll(tx *sql.Tx) (bals map[basics.Address]basics.AccountData, err error) {
rows, err := tx.Query("SELECT address, data FROM accountbase")
if err != nil {
@@ -1246,7 +1144,7 @@ func accountsAll(tx *sql.Tx) (bals map[basics.Address]basics.AccountData, err er
func BenchmarkLargeMerkleTrieRebuild(b *testing.B) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- accts := []map[basics.Address]basics.AccountData{randomAccounts(5, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(5, true)}
pooldata := basics.AccountData{}
pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
@@ -1261,21 +1159,17 @@ func BenchmarkLargeMerkleTrieRebuild(b *testing.B) {
ml := makeMockLedgerForTracker(b, true, 10, protocol.ConsensusCurrentVersion, accts)
defer ml.Close()
- au := &accountUpdates{}
cfg := config.GetDefaultLocal()
cfg.Archival = true
- au.initialize(cfg, ".", proto, accts[0])
+ au := newAcctUpdates(b, ml, cfg, ".")
defer au.close()
- err := au.loadFromDisk(ml)
- require.NoError(b, err)
-
// at this point, the database was created. We want to fill the accounts data
accountsNumber := 6000000 * b.N
for i := 0; i < accountsNumber-5-2; { // subtract the account we've already created above, plus the sink/reward
var updates compactAccountDeltas
for k := 0; i < accountsNumber-5-2 && k < 1024; k++ {
- addr := randomAddress()
+ addr := ledgertesting.RandomAddress()
acctData := basics.AccountData{}
acctData.MicroAlgos.Raw = 1
updates.upsert(addr, accountDelta{new: acctData})
@@ -1289,87 +1183,20 @@ func BenchmarkLargeMerkleTrieRebuild(b *testing.B) {
require.NoError(b, err)
}
- err = ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- return updateAccountsRound(tx, 0, 1)
+ err := ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ return updateAccountsHashRound(tx, 1)
})
require.NoError(b, err)
au.close()
b.ResetTimer()
- err = au.loadFromDisk(ml)
+ err = au.loadFromDisk(ml, 0)
require.NoError(b, err)
b.StopTimer()
b.ReportMetric(float64(accountsNumber), "entries/trie")
}
-func BenchmarkLargeCatchpointWriting(b *testing.B) {
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
-
- accts := []map[basics.Address]basics.AccountData{randomAccounts(5, true)}
-
- pooldata := basics.AccountData{}
- pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
- pooldata.Status = basics.NotParticipating
- accts[0][testPoolAddr] = pooldata
-
- sinkdata := basics.AccountData{}
- sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
- sinkdata.Status = basics.NotParticipating
- accts[0][testSinkAddr] = sinkdata
-
- ml := makeMockLedgerForTracker(b, true, 10, protocol.ConsensusCurrentVersion, accts)
- defer ml.Close()
-
- au := &accountUpdates{}
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- au.initialize(cfg, ".", proto, accts[0])
- defer au.close()
-
- temporaryDirectroy, err := ioutil.TempDir(os.TempDir(), "catchpoints")
- require.NoError(b, err)
- defer func() {
- os.RemoveAll(temporaryDirectroy)
- }()
- catchpointsDirectory := filepath.Join(temporaryDirectroy, "catchpoints")
- err = os.Mkdir(catchpointsDirectory, 0777)
- require.NoError(b, err)
-
- au.dbDirectory = temporaryDirectroy
-
- err = au.loadFromDisk(ml)
- require.NoError(b, err)
-
- // at this point, the database was created. We want to fill the accounts data
- accountsNumber := 6000000 * b.N
- err = ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- for i := 0; i < accountsNumber-5-2; { // subtract the account we've already created above, plus the sink/reward
- var updates compactAccountDeltas
- for k := 0; i < accountsNumber-5-2 && k < 1024; k++ {
- addr := randomAddress()
- acctData := basics.AccountData{}
- acctData.MicroAlgos.Raw = 1
- updates.upsert(addr, accountDelta{new: acctData})
- i++
- }
-
- _, err = accountsNewRound(tx, updates, nil, proto, basics.Round(1))
- if err != nil {
- return
- }
- }
-
- return updateAccountsRound(tx, 0, 1)
- })
- require.NoError(b, err)
-
- b.ResetTimer()
- au.generateCatchpoint(basics.Round(0), "0#ABCD", crypto.Digest{}, time.Second)
- b.StopTimer()
- b.ReportMetric(float64(accountsNumber), "accounts")
-}
-
func BenchmarkCompactDeltas(b *testing.B) {
b.Run("account-deltas", func(b *testing.B) {
if b.N < 500 {
@@ -1461,129 +1288,6 @@ func TestCompactDeltas(t *testing.T) {
}
-func TestReproducibleCatchpointLabels(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
- t.Skip("This test is too slow on ARM and causes travis builds to time out")
- }
- // create new protocol version, which has lower lookback
- testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestReproducibleCatchpointLabels")
- protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
- protoParams.MaxBalLookback = 32
- protoParams.SeedLookback = 2
- protoParams.SeedRefreshInterval = 8
- config.Consensus[testProtocolVersion] = protoParams
- defer func() {
- delete(config.Consensus, testProtocolVersion)
- }()
-
- accts := []map[basics.Address]basics.AccountData{randomAccounts(20, true)}
- rewardsLevels := []uint64{0}
-
- pooldata := basics.AccountData{}
- pooldata.MicroAlgos.Raw = 100 * 1000 * 1000 * 1000 * 1000
- pooldata.Status = basics.NotParticipating
- accts[0][testPoolAddr] = pooldata
-
- sinkdata := basics.AccountData{}
- sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
- sinkdata.Status = basics.NotParticipating
- accts[0][testSinkAddr] = sinkdata
-
- ml := makeMockLedgerForTracker(t, false, 1, testProtocolVersion, accts)
- defer ml.Close()
-
- au := &accountUpdates{}
- cfg := config.GetDefaultLocal()
- cfg.CatchpointInterval = 50
- cfg.CatchpointTracking = 1
- au.initialize(cfg, ".", protoParams, accts[0])
- defer au.close()
-
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
-
- rewardLevel := uint64(0)
-
- const testCatchpointLabelsCount = 5
-
- // lastCreatableID stores asset or app max used index to get rid of conflicts
- lastCreatableID := crypto.RandUint64() % 512
- knownCreatables := make(map[basics.CreatableIndex]bool)
- catchpointLabels := make(map[basics.Round]string)
- ledgerHistory := make(map[basics.Round]*mockLedgerForTracker)
- roundDeltas := make(map[basics.Round]ledgercore.StateDelta)
- for i := basics.Round(1); i <= basics.Round(testCatchpointLabelsCount*cfg.CatchpointInterval); i++ {
- rewardLevelDelta := crypto.RandUint64() % 5
- rewardLevel += rewardLevelDelta
- var updates ledgercore.AccountDeltas
- var totals map[basics.Address]basics.AccountData
- base := accts[i-1]
- updates, totals, lastCreatableID = randomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
- prevTotals, err := au.Totals(basics.Round(i - 1))
- require.NoError(t, err)
-
- newPool := totals[testPoolAddr]
- newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta
- updates.Upsert(testPoolAddr, newPool)
- totals[testPoolAddr] = newPool
-
- blk := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: basics.Round(i),
- },
- }
- blk.RewardsLevel = rewardLevel
- blk.CurrentProtocol = testProtocolVersion
- delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
- delta.Accts.MergeAccounts(updates)
- delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
- au.newBlock(blk, delta)
- au.committedUpTo(i)
- ml.addMockBlock(blockEntry{block: blk}, delta)
- accts = append(accts, totals)
- rewardsLevels = append(rewardsLevels, rewardLevel)
- roundDeltas[i] = delta
-
- // if this is a catchpoint round, save the label.
- if uint64(i)%cfg.CatchpointInterval == 0 {
- au.waitAccountsWriting()
- catchpointLabels[i] = au.GetLastCatchpointLabel()
- ledgerHistory[i] = ml.fork(t)
- defer ledgerHistory[i].Close()
- }
- }
-
- // test in revese what happens when we try to repeat the exact same blocks.
- // start off with the catchpoint before the last one
- startingRound := basics.Round((testCatchpointLabelsCount - 1) * cfg.CatchpointInterval)
- for ; startingRound > basics.Round(cfg.CatchpointInterval); startingRound -= basics.Round(cfg.CatchpointInterval) {
- au.close()
- err := au.loadFromDisk(ledgerHistory[startingRound])
- require.NoError(t, err)
-
- for i := startingRound + 1; i <= basics.Round(testCatchpointLabelsCount*cfg.CatchpointInterval); i++ {
- blk := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: basics.Round(i),
- },
- }
- blk.RewardsLevel = rewardsLevels[i]
- blk.CurrentProtocol = testProtocolVersion
- delta := roundDeltas[i]
- au.newBlock(blk, delta)
- au.committedUpTo(i)
-
- // if this is a catchpoint round, check the label.
- if uint64(i)%cfg.CatchpointInterval == 0 {
- au.waitAccountsWriting()
- require.Equal(t, catchpointLabels[i], au.GetLastCatchpointLabel())
- }
- }
- }
-}
-
// TestCachesInitialization test the functionality of the initializeCaches cache.
func TestCachesInitialization(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1594,7 +1298,7 @@ func TestCachesInitialization(t *testing.T) {
initialRounds := uint64(1)
accountsCount := 5
- accts := []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(accountsCount, true)}
rewardsLevels := []uint64{0}
pooldata := basics.AccountData{}
@@ -1611,10 +1315,8 @@ func TestCachesInitialization(t *testing.T) {
ml.log.SetLevel(logging.Warn)
defer ml.Close()
- au := &accountUpdates{}
- au.initialize(config.GetDefaultLocal(), ".", proto, accts[0])
- err := au.loadFromDisk(ml)
- require.NoError(t, err)
+ conf := config.GetDefaultLocal()
+ au := newAcctUpdates(t, ml, conf, ".")
// cover initialRounds genesis blocks
rewardLevel := uint64(0)
@@ -1630,7 +1332,7 @@ func TestCachesInitialization(t *testing.T) {
rewardLevel += rewardLevelDelta
accountChanges := 2
- updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -1652,15 +1354,15 @@ func TestCachesInitialization(t *testing.T) {
delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]basics.AccountData{totals}, rewardLevel)
ml.addMockBlock(blockEntry{block: blk}, delta)
au.newBlock(blk, delta)
- au.committedUpTo(basics.Round(i))
- au.waitAccountsWriting()
+ ml.trackers.committedUpTo(basics.Round(i))
+ ml.trackers.waitAccountsWriting()
accts = append(accts, totals)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
au.close()
// reset the accounts, since their balances are now changed due to the rewards.
- accts = []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)}
+ accts = []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(accountsCount, true)}
// create another mocked ledger, but this time with a fresh new tracker database.
ml2 := makeMockLedgerForTracker(t, true, int(initialRounds), protocolVersion, accts)
@@ -1671,15 +1373,13 @@ func TestCachesInitialization(t *testing.T) {
ml2.blocks = ml.blocks
ml2.deltas = ml.deltas
- au = &accountUpdates{}
- au.initialize(config.GetDefaultLocal(), ".", proto, accts[0])
- err = au.loadFromDisk(ml2)
- require.NoError(t, err)
+ conf = config.GetDefaultLocal()
+ au = newAcctUpdates(t, ml2, conf, ".")
defer au.close()
// make sure the deltas array end up containing only the most recent 320 rounds.
require.Equal(t, int(proto.MaxBalLookback), len(au.deltas))
- require.Equal(t, recoveredLedgerRound-basics.Round(proto.MaxBalLookback), au.dbRound)
+ require.Equal(t, recoveredLedgerRound-basics.Round(proto.MaxBalLookback), au.cachedDBRound)
}
// TestSplittingConsensusVersionCommits tests the a sequence of commits that spans over multiple consensus versions works correctly.
@@ -1692,7 +1392,7 @@ func TestSplittingConsensusVersionCommits(t *testing.T) {
initialRounds := uint64(1)
accountsCount := 5
- accts := []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(accountsCount, true)}
rewardsLevels := []uint64{0}
pooldata := basics.AccountData{}
@@ -1709,9 +1409,10 @@ func TestSplittingConsensusVersionCommits(t *testing.T) {
ml.log.SetLevel(logging.Warn)
defer ml.Close()
- au := &accountUpdates{}
- au.initialize(config.GetDefaultLocal(), ".", initialProtoParams, accts[0])
- err := au.loadFromDisk(ml)
+ conf := config.GetDefaultLocal()
+ au := newAcctUpdates(t, ml, conf, ".")
+
+ err := au.loadFromDisk(ml, 0)
require.NoError(t, err)
defer au.close()
@@ -1730,7 +1431,7 @@ func TestSplittingConsensusVersionCommits(t *testing.T) {
rewardLevel += rewardLevelDelta
accountChanges := 2
- updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -1765,7 +1466,7 @@ func TestSplittingConsensusVersionCommits(t *testing.T) {
rewardLevel += rewardLevelDelta
accountChanges := 2
- updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -1790,10 +1491,10 @@ func TestSplittingConsensusVersionCommits(t *testing.T) {
accts = append(accts, totals)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
- // now, commit and verify that the committedUpTo method broken the range correctly.
- au.committedUpTo(lastRoundToWrite)
- au.waitAccountsWriting()
- require.Equal(t, basics.Round(initialRounds+extraRounds)-1, au.dbRound)
+ // now, commit and verify that the produceCommittingTask method broken the range correctly.
+ ml.trackers.committedUpTo(lastRoundToWrite)
+ ml.trackers.waitAccountsWriting()
+ require.Equal(t, basics.Round(initialRounds+extraRounds)-1, au.cachedDBRound)
}
@@ -1808,7 +1509,7 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
initialRounds := uint64(1)
accountsCount := 5
- accts := []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)}
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(accountsCount, true)}
rewardsLevels := []uint64{0}
pooldata := basics.AccountData{}
@@ -1825,9 +1526,10 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
ml.log.SetLevel(logging.Warn)
defer ml.Close()
- au := &accountUpdates{}
- au.initialize(config.GetDefaultLocal(), ".", initialProtoParams, accts[0])
- err := au.loadFromDisk(ml)
+ conf := config.GetDefaultLocal()
+ au := newAcctUpdates(t, ml, conf, ".")
+
+ err := au.loadFromDisk(ml, 0)
require.NoError(t, err)
defer au.close()
@@ -1846,7 +1548,7 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
rewardLevel += rewardLevelDelta
accountChanges := 2
- updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -1880,7 +1582,7 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
rewardLevel += rewardLevelDelta
accountChanges := 2
- updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -1905,10 +1607,10 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
accts = append(accts, totals)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
- // now, commit and verify that the committedUpTo method broken the range correctly.
- au.committedUpTo(endOfFirstNewProtocolSegment)
- au.waitAccountsWriting()
- require.Equal(t, basics.Round(initialRounds+extraRounds)-1, au.dbRound)
+ // now, commit and verify that the produceCommittingTask method broken the range correctly.
+ ml.trackers.committedUpTo(endOfFirstNewProtocolSegment)
+ ml.trackers.waitAccountsWriting()
+ require.Equal(t, basics.Round(initialRounds+extraRounds)-1, au.cachedDBRound)
// write additional extraRounds elements and verify these can be flushed.
for i := endOfFirstNewProtocolSegment + 1; i <= basics.Round(initialRounds+2*extraRounds+initialProtoParams.MaxBalLookback); i++ {
@@ -1916,7 +1618,7 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
rewardLevel += rewardLevelDelta
accountChanges := 2
- updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
+ updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
prevTotals, err := au.Totals(basics.Round(i - 1))
require.NoError(t, err)
@@ -1941,9 +1643,9 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) {
accts = append(accts, totals)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
- au.committedUpTo(endOfFirstNewProtocolSegment + basics.Round(extraRounds))
- au.waitAccountsWriting()
- require.Equal(t, basics.Round(initialRounds+2*extraRounds), au.dbRound)
+ ml.trackers.committedUpTo(endOfFirstNewProtocolSegment + basics.Round(extraRounds))
+ ml.trackers.waitAccountsWriting()
+ require.Equal(t, basics.Round(initialRounds+2*extraRounds), au.cachedDBRound)
}
// TestConsecutiveVersion tests the consecutiveVersion method correctness.
diff --git a/ledger/applications_test.go b/ledger/applications_test.go
index 038207749..776832968 100644
--- a/ledger/applications_test.go
+++ b/ledger/applications_test.go
@@ -17,356 +17,26 @@
package ledger
import (
- "crypto/rand"
"encoding/hex"
- "fmt"
"testing"
+ "time"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
-func getRandomAddress(a *require.Assertions) basics.Address {
- const rl = 16
- b := make([]byte, rl)
- n, err := rand.Read(b)
- a.NoError(err)
- a.Equal(rl, n)
-
- address := crypto.Hash(b)
- return basics.Address(address)
-}
-
-type creatableLocator struct {
- cidx basics.CreatableIndex
- ctype basics.CreatableType
-}
-type storeLocator struct {
- addr basics.Address
- aidx basics.AppIndex
- global bool
-}
-type mockCowForLogicLedger struct {
- rnd basics.Round
- ts int64
- cr map[creatableLocator]basics.Address
- brs map[basics.Address]basics.AccountData
- stores map[storeLocator]basics.TealKeyValue
- tcs map[int]basics.CreatableIndex
- txc uint64
-}
-
-func (c *mockCowForLogicLedger) Get(addr basics.Address, withPendingRewards bool) (basics.AccountData, error) {
- br, ok := c.brs[addr]
- if !ok {
- return basics.AccountData{}, fmt.Errorf("addr %s not in mock cow", addr.String())
- }
- return br, nil
-}
-
-func (c *mockCowForLogicLedger) GetCreatableID(groupIdx int) basics.CreatableIndex {
- return c.tcs[groupIdx]
-}
-
-func (c *mockCowForLogicLedger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
- addr, found := c.cr[creatableLocator{cidx, ctype}]
- return addr, found, nil
-}
-
-func (c *mockCowForLogicLedger) GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) {
- kv, ok := c.stores[storeLocator{addr, aidx, global}]
- if !ok {
- return basics.TealValue{}, false, fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
- }
- tv, found := kv[key]
- return tv, found, nil
-}
-
-func (c *mockCowForLogicLedger) BuildEvalDelta(aidx basics.AppIndex, txn *transactions.Transaction) (evalDelta transactions.EvalDelta, err error) {
- return transactions.EvalDelta{}, nil
-}
-
-func (c *mockCowForLogicLedger) SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error {
- kv, ok := c.stores[storeLocator{addr, aidx, global}]
- if !ok {
- return fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
- }
- kv[key] = value
- c.stores[storeLocator{addr, aidx, global}] = kv
- return nil
-}
-
-func (c *mockCowForLogicLedger) DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error {
- kv, ok := c.stores[storeLocator{addr, aidx, global}]
- if !ok {
- return fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
- }
- delete(kv, key)
- c.stores[storeLocator{addr, aidx, global}] = kv
- return nil
-}
-
-func (c *mockCowForLogicLedger) round() basics.Round {
- return c.rnd
-}
-
-func (c *mockCowForLogicLedger) prevTimestamp() int64 {
- return c.ts
-}
-
-func (c *mockCowForLogicLedger) allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error) {
- _, found := c.stores[storeLocator{addr, aidx, global}]
- return found, nil
-}
-
-func (c *mockCowForLogicLedger) incTxnCount() {
- c.txc++
-}
-
-func (c *mockCowForLogicLedger) txnCounter() uint64 {
- return c.txc
-}
-
-func newCowMock(creatables []modsData) *mockCowForLogicLedger {
- var m mockCowForLogicLedger
- m.cr = make(map[creatableLocator]basics.Address, len(creatables))
- for _, e := range creatables {
- m.cr[creatableLocator{e.cidx, e.ctype}] = e.addr
- }
- return &m
-}
-
-func TestLogicLedgerMake(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- _, err := newLogicLedger(nil, 0)
- a.Error(err)
- a.Contains(err.Error(), "cannot make logic ledger for app index 0")
-
- addr := getRandomAddress(a)
- aidx := basics.AppIndex(1)
-
- c := &mockCowForLogicLedger{}
- _, err = newLogicLedger(c, 0)
- a.Error(err)
- a.Contains(err.Error(), "cannot make logic ledger for app index 0")
-
- _, err = newLogicLedger(c, aidx)
- a.Error(err)
- a.Contains(err.Error(), fmt.Sprintf("app %d does not exist", aidx))
-
- c = newCowMock([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
- l, err := newLogicLedger(c, aidx)
- a.NoError(err)
- a.NotNil(l)
- a.Equal(aidx, l.aidx)
- a.Equal(c, l.cow)
-}
-
-func TestLogicLedgerBalances(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := getRandomAddress(a)
- aidx := basics.AppIndex(1)
- c := newCowMock([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
- l, err := newLogicLedger(c, aidx)
- a.NoError(err)
- a.NotNil(l)
-
- addr1 := getRandomAddress(a)
- ble := basics.MicroAlgos{Raw: 100}
- c.brs = map[basics.Address]basics.AccountData{addr1: {MicroAlgos: ble}}
- bla, err := l.Balance(addr1)
- a.NoError(err)
- a.Equal(ble, bla)
-}
-
-func TestLogicLedgerGetters(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := getRandomAddress(a)
- aidx := basics.AppIndex(1)
- c := newCowMock([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
- l, err := newLogicLedger(c, aidx)
- a.NoError(err)
- a.NotNil(l)
-
- round := basics.Round(1234)
- c.rnd = round
- ts := int64(11223344)
- c.ts = ts
-
- addr1 := getRandomAddress(a)
- c.stores = map[storeLocator]basics.TealKeyValue{{addr1, aidx, false}: {}}
- a.Equal(aidx, l.ApplicationID())
- a.Equal(round, l.Round())
- a.Equal(ts, l.LatestTimestamp())
- a.True(l.OptedIn(addr1, 0))
- a.True(l.OptedIn(addr1, aidx))
- a.False(l.OptedIn(addr, 0))
- a.False(l.OptedIn(addr, aidx))
-}
-
-func TestLogicLedgerAsset(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := getRandomAddress(a)
- addr1 := getRandomAddress(a)
- aidx := basics.AppIndex(1)
- assetIdx := basics.AssetIndex(2)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- {addr1, basics.CreatableIndex(assetIdx), basics.AssetCreatable},
- })
- l, err := newLogicLedger(c, aidx)
- a.NoError(err)
- a.NotNil(l)
-
- _, _, err = l.AssetParams(basics.AssetIndex(aidx))
- a.Error(err)
- a.Contains(err.Error(), fmt.Sprintf("asset %d does not exist", aidx))
-
- c.brs = map[basics.Address]basics.AccountData{
- addr1: {AssetParams: map[basics.AssetIndex]basics.AssetParams{assetIdx: {Total: 1000}}},
- }
-
- ap, creator, err := l.AssetParams(assetIdx)
- a.NoError(err)
- a.Equal(addr1, creator)
- a.Equal(uint64(1000), ap.Total)
-
- _, err = l.AssetHolding(addr1, assetIdx)
- a.Error(err)
- a.Contains(err.Error(), "has not opted in to asset")
-
- c.brs = map[basics.Address]basics.AccountData{
- addr1: {
- AssetParams: map[basics.AssetIndex]basics.AssetParams{assetIdx: {Total: 1000}},
- Assets: map[basics.AssetIndex]basics.AssetHolding{assetIdx: {Amount: 99}},
- },
- }
-
- ah, err := l.AssetHolding(addr1, assetIdx)
- a.NoError(err)
- a.Equal(uint64(99), ah.Amount)
-}
-
-func TestLogicLedgerGetKey(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := getRandomAddress(a)
- addr1 := getRandomAddress(a)
- aidx := basics.AppIndex(1)
- assetIdx := basics.AssetIndex(2)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- {addr1, basics.CreatableIndex(assetIdx), basics.AssetCreatable},
- })
- l, err := newLogicLedger(c, aidx)
- a.NoError(err)
- a.NotNil(l)
-
- _, ok, err := l.GetGlobal(basics.AppIndex(assetIdx), "gkey")
- a.Error(err)
- a.False(ok)
- a.Contains(err.Error(), fmt.Sprintf("app %d does not exist", assetIdx))
-
- tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx + 1, true}: {"gkey": tv}}
- val, ok, err := l.GetGlobal(aidx, "gkey")
- a.Error(err)
- a.False(ok)
- a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
-
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
- val, ok, err = l.GetGlobal(aidx, "gkey")
- a.NoError(err)
- a.True(ok)
- a.Equal(tv, val)
-
- // check local
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, false}: {"lkey": tv}}
- val, ok, err = l.GetLocal(addr, aidx, "lkey", 0)
- a.NoError(err)
- a.True(ok)
- a.Equal(tv, val)
-}
-
-func TestLogicLedgerSetKey(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := getRandomAddress(a)
- aidx := basics.AppIndex(1)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- })
- l, err := newLogicLedger(c, aidx)
- a.NoError(err)
- a.NotNil(l)
-
- tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
- err = l.SetGlobal("gkey", tv)
- a.Error(err)
- a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
-
- tv2 := basics.TealValue{Type: basics.TealUintType, Uint: 2}
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
- err = l.SetGlobal("gkey", tv2)
- a.NoError(err)
-
- // check local
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, false}: {"lkey": tv}}
- err = l.SetLocal(addr, "lkey", tv2, 0)
- a.NoError(err)
-}
-
-func TestLogicLedgerDelKey(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := getRandomAddress(a)
- aidx := basics.AppIndex(1)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- })
- l, err := newLogicLedger(c, aidx)
- a.NoError(err)
- a.NotNil(l)
-
- err = l.DelGlobal("gkey")
- a.Error(err)
- a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
-
- tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
- err = l.DelGlobal("gkey")
- a.NoError(err)
-
- addr1 := getRandomAddress(a)
- c.stores = map[storeLocator]basics.TealKeyValue{{addr1, aidx, false}: {"lkey": tv}}
- err = l.DelLocal(addr1, "lkey", 0)
- a.NoError(err)
+func commitRound(offset uint64, dbRound basics.Round, l *Ledger) {
+ l.trackers.lastFlushTime = time.Time{}
+ l.trackers.scheduleCommit(l.Latest(), l.Latest()-(dbRound+basics.Round(offset)))
+ l.trackers.waitAccountsWriting()
}
// test ensures that
@@ -374,6 +44,7 @@ func TestLogicLedgerDelKey(t *testing.T) {
// before and after application code refactoring
// 2) writing into empty (opted-in) local state's KeyValue works after reloading
// Hardcoded values are from commit 9a0b439 (pre app refactor commit)
+
func TestAppAccountDataStorage(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -433,7 +104,7 @@ return`
program := ops.Program
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -460,14 +131,6 @@ return`
l, err := OpenLedger(logging.Base(), "TestAppAccountData", true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
- l.accts.ctxCancel() // force commitSyncer to exit
-
- // wait commitSyncer to exit
- // the test calls commitRound directly and does not need commitSyncer/committedUpTo
- select {
- case <-l.accts.commitSyncerClosed:
- break
- }
txHeader := transactions.Header{
Sender: creator,
@@ -522,9 +185,7 @@ return`
a.NoError(err)
// save data into DB and write into local state
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(3, 0, 0)
- l.accts.accountsWriting.Wait()
+ commitRound(3, 0, l)
appCallFields = transactions.ApplicationCallTxnFields{
OnCompletion: 0,
@@ -543,9 +204,7 @@ return`
a.NoError(err)
// save data into DB
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(1, 3, 0)
- l.accts.accountsWriting.Wait()
+ commitRound(1, 3, l)
// dump accounts
var rowid int64
@@ -553,18 +212,23 @@ return`
var buf []byte
err = l.accts.accountsq.lookupStmt.QueryRow(creator[:]).Scan(&rowid, &dbRound, &buf)
a.NoError(err)
+ a.Equal(basics.Round(4), dbRound)
a.Equal(expectedCreator, buf)
err = l.accts.accountsq.lookupStmt.QueryRow(userOptin[:]).Scan(&rowid, &dbRound, &buf)
a.NoError(err)
+ a.Equal(basics.Round(4), dbRound)
a.Equal(expectedUserOptIn, buf)
pad, err := l.accts.accountsq.lookup(userOptin)
+ a.NoError(err)
a.Nil(pad.accountData.AppLocalStates[appIdx].KeyValue)
ad, err := l.Lookup(dbRound, userOptin)
+ a.NoError(err)
a.Nil(ad.AppLocalStates[appIdx].KeyValue)
err = l.accts.accountsq.lookupStmt.QueryRow(userLocal[:]).Scan(&rowid, &dbRound, &buf)
a.NoError(err)
+ a.Equal(basics.Round(4), dbRound)
a.Equal(expectedUserLocal, buf)
ad, err = l.Lookup(dbRound, userLocal)
@@ -664,7 +328,7 @@ return`
program := ops.Program
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -678,14 +342,6 @@ return`
l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
- l.accts.ctxCancel() // force commitSyncer to exit
-
- // wait commitSyncer to exit
- // the test calls commitRound directly and does not need commitSyncer/committedUpTo
- select {
- case <-l.accts.commitSyncerClosed:
- break
- }
genesisID := t.Name()
txHeader := transactions.Header{
@@ -751,9 +407,7 @@ return`
a.NoError(err)
// save data into DB and write into local state
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(3, 0, 0)
- l.accts.accountsWriting.Wait()
+ commitRound(3, 0, l)
// check first write
blk, err := l.Block(2)
@@ -807,9 +461,7 @@ return`
a.NoError(err)
// save data into DB
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(2, 3, 0)
- l.accts.accountsWriting.Wait()
+ commitRound(2, 3, l)
// check first write
blk, err = l.Block(4)
@@ -919,7 +571,7 @@ return`
program := ops.Program
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -933,14 +585,6 @@ return`
l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
- l.accts.ctxCancel() // force commitSyncer to exit
-
- // wait commitSyncer to exit
- // the test calls commitRound directly and does not need commitSyncer/committedUpTo
- select {
- case <-l.accts.commitSyncerClosed:
- break
- }
genesisID := t.Name()
txHeader := transactions.Header{
@@ -1034,9 +678,7 @@ return`
l.WaitForCommit(3)
// save data into DB and write into local state
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(3, 0, 0)
- l.accts.accountsWriting.Wait()
+ commitRound(3, 0, l)
// check first write
blk, err = l.Block(2)
@@ -1078,7 +720,7 @@ return`
program := ops.Program
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -1092,14 +734,6 @@ return`
l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
- l.accts.ctxCancel() // force commitSyncer to exit
-
- // wait commitSyncer to exit
- // the test calls commitRound directly and does not need commitSyncer/committedUpTo
- select {
- case <-l.accts.commitSyncerClosed:
- break
- }
genesisID := t.Name()
txHeader := transactions.Header{
@@ -1174,9 +808,7 @@ return`
a.NoError(err)
// save data into DB and write into local state
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(2, 0, 0)
- l.accts.accountsWriting.Wait()
+ commitRound(2, 0, l)
// check first write
blk, err = l.Block(1)
@@ -1279,7 +911,7 @@ func testAppAccountDeltaIndicesCompatibility(t *testing.T, source string, accoun
// explicitly trigger compatibility mode
proto := config.Consensus[protocol.ConsensusV24]
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusV24, 100)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusV24, 100)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -1293,14 +925,6 @@ func testAppAccountDeltaIndicesCompatibility(t *testing.T, source string, accoun
l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
- l.accts.ctxCancel() // force commitSyncer to exit
-
- // wait commitSyncer to exit
- // the test calls commitRound directly and does not need commitSyncer/committedUpTo
- select {
- case <-l.accts.commitSyncerClosed:
- break
- }
genesisID := t.Name()
txHeader := transactions.Header{
@@ -1361,9 +985,7 @@ func testAppAccountDeltaIndicesCompatibility(t *testing.T, source string, accoun
a.NoError(err)
// save data into DB and write into local state
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(2, 0, 0)
- l.accts.accountsWriting.Wait()
+ commitRound(2, 0, l)
// check first write
blk, err := l.Block(2)
diff --git a/ledger/apptxn_test.go b/ledger/apptxn_test.go
index 800668af9..6166c5d68 100644
--- a/ledger/apptxn_test.go
+++ b/ledger/apptxn_test.go
@@ -20,10 +20,21 @@ import (
"fmt"
"testing"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
)
// main wraps up some TEAL source in a header and footer so that it is
@@ -37,11 +48,152 @@ func main(source string) string {
end: int 1`, source)
}
+// newTestLedger creates a in memory Ledger that is as realistic as
+// possible. It has Rewards and FeeSink properly configured.
+func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *Ledger {
+ var genHash crypto.Digest
+ crypto.RandBytes(genHash[:])
+ genBlock, err := bookkeeping.MakeGenesisBlock(protocol.ConsensusFuture, balances, "test", genHash)
+ require.NoError(t, err)
+ require.False(t, genBlock.FeeSink.IsZero())
+ require.False(t, genBlock.RewardsPool.IsZero())
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ l, err := OpenLedger(logging.Base(), dbName, true, ledgercore.InitState{
+ Block: genBlock,
+ Accounts: balances.Balances,
+ GenesisHash: genHash,
+ }, cfg)
+ require.NoError(t, err)
+ return l
+}
+
+// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
+func (ledger *Ledger) nextBlock(t testing.TB) *internal.BlockEvaluator {
+ rnd := ledger.Latest()
+ hdr, err := ledger.BlockHdr(rnd)
+ require.NoError(t, err)
+
+ nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
+ eval, err := ledger.StartEvaluator(nextHdr, 0, 0)
+ require.NoError(t, err)
+ return eval
+}
+
+// endBlock completes the block being created, returns the ValidatedBlock for inspection
+func (ledger *Ledger) endBlock(t testing.TB, eval testingEvaluator) *ledgercore.ValidatedBlock {
+ validatedBlock, err := eval.BlockEvaluator.GenerateBlock()
+ require.NoError(t, err)
+ err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+ require.NoError(t, err)
+ return validatedBlock
+}
+
+// lookup gets the current accountdata for an address
+func (ledger *Ledger) lookup(t testing.TB, addr basics.Address) basics.AccountData {
+ rnd := ledger.Latest()
+ ad, err := ledger.Lookup(rnd, addr)
+ require.NoError(t, err)
+ return ad
+}
+
+// micros gets the current microAlgo balance for an address
+func (ledger *Ledger) micros(t testing.TB, addr basics.Address) uint64 {
+ return ledger.lookup(t, addr).MicroAlgos.Raw
+}
+
+// asa gets the current balance and optin status for some asa for an address
+func (ledger *Ledger) asa(t testing.TB, addr basics.Address, asset basics.AssetIndex) (uint64, bool) {
+ if holding, ok := ledger.lookup(t, addr).Assets[asset]; ok {
+ return holding.Amount, true
+ }
+ return 0, false
+}
+
+// asaParams gets the asset params for a given asa index
+func (ledger *Ledger) asaParams(t testing.TB, asset basics.AssetIndex) (basics.AssetParams, error) {
+ creator, ok, err := ledger.GetCreator(basics.CreatableIndex(asset), basics.AssetCreatable)
+ if err != nil {
+ return basics.AssetParams{}, err
+ }
+ if !ok {
+ return basics.AssetParams{}, fmt.Errorf("no asset (%d)", asset)
+ }
+ if params, ok := ledger.lookup(t, creator).AssetParams[asset]; ok {
+ return params, nil
+ }
+ return basics.AssetParams{}, fmt.Errorf("bad lookup (%d)", asset)
+}
+
+type testingEvaluator struct {
+ *internal.BlockEvaluator
+ ledger *Ledger
+}
+
+func (eval *testingEvaluator) fillDefaults(txn *txntest.Txn) {
+ if txn.GenesisHash.IsZero() {
+ txn.GenesisHash = eval.ledger.GenesisHash()
+ }
+ if txn.FirstValid == 0 {
+ txn.FirstValid = eval.Round()
+ }
+ txn.FillDefaults(eval.ledger.genesisProto)
+}
+
+func (eval *testingEvaluator) txn(t testing.TB, txn *txntest.Txn, problem ...string) {
+ t.Helper()
+ eval.fillDefaults(txn)
+ stxn := txn.SignedTxn()
+ err := eval.TestTransactionGroup([]transactions.SignedTxn{stxn})
+ if err != nil {
+ if len(problem) == 1 {
+ require.Contains(t, err.Error(), problem[0])
+ } else {
+ require.NoError(t, err) // Will obviously fail
+ }
+ return
+ }
+ err = eval.Transaction(stxn, transactions.ApplyData{})
+ if err != nil {
+ if len(problem) == 1 {
+ require.Contains(t, err.Error(), problem[0])
+ } else {
+ require.NoError(t, err) // Will obviously fail
+ }
+ return
+ }
+ require.Len(t, problem, 0)
+}
+
+func (eval *testingEvaluator) txns(t testing.TB, txns ...*txntest.Txn) {
+ t.Helper()
+ for _, txn := range txns {
+ eval.txn(t, txn)
+ }
+}
+
+func (eval *testingEvaluator) txgroup(t testing.TB, txns ...*txntest.Txn) error {
+ t.Helper()
+ for _, txn := range txns {
+ eval.fillDefaults(txn)
+ }
+ txgroup := txntest.SignedTxns(txns...)
+
+ err := eval.TestTransactionGroup(txgroup)
+ if err != nil {
+ return err
+ }
+
+ err = eval.TransactionGroup(transactions.WrapSignedTxnsWithAD(txgroup))
+ return err
+}
+
// TestPayAction ensures a pay in teal affects balances
func TestPayAction(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -75,12 +227,12 @@ func TestPayAction(t *testing.T) {
Accounts: []basics.Address{addrs[1]}, // pay self
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &create, &fund, &payout1)
vb := l.endBlock(t, eval)
// AD contains expected appIndex
- require.Equal(t, ai, vb.blk.Payset[0].ApplyData.ApplicationID)
+ require.Equal(t, ai, vb.Block().Payset[0].ApplyData.ApplicationID)
ad0 := l.micros(t, addrs[0])
ad1 := l.micros(t, addrs[1])
@@ -95,11 +247,11 @@ func TestPayAction(t *testing.T) {
// Build up Residue in RewardsState so it's ready to pay
for i := 1; i < 10; i++ {
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
l.endBlock(t, eval)
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
payout2 := txntest.Txn{
Type: "appl",
Sender: addrs[1],
@@ -109,15 +261,17 @@ func TestPayAction(t *testing.T) {
eval.txn(t, &payout2)
// confirm that modifiedAccounts can see account in inner txn
found := false
- for _, addr := range eval.state.modifiedAccounts() {
+ vb = l.endBlock(t, eval)
+
+ deltas := vb.Delta()
+ for _, addr := range deltas.Accts.ModifiedAccounts() {
if addr == addrs[2] {
found = true
}
}
require.True(t, found)
- l.endBlock(t, eval)
- payInBlock := eval.block.Payset[0]
+ payInBlock := vb.Block().Payset[0]
rewards := payInBlock.ApplyData.SenderRewards.Raw
require.Greater(t, rewards, uint64(2000)) // some biggish number
inners := payInBlock.ApplyData.EvalDelta.InnerTxns
@@ -150,23 +304,23 @@ func TestPayAction(t *testing.T) {
Receiver: ai.Address(),
Amount: 10 * 1000 * 1000000, // account min balance, plus fees
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &tenkalgos)
l.endBlock(t, eval)
beforepay := l.micros(t, ai.Address())
// Build up Residue in RewardsState so it's ready to pay again
for i := 1; i < 10; i++ {
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
l.endBlock(t, eval)
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, payout2.Noted("2"))
- l.endBlock(t, eval)
+ vb = l.endBlock(t, eval)
afterpay := l.micros(t, ai.Address())
- payInBlock = eval.block.Payset[0]
+ payInBlock = vb.Block().Payset[0]
inners = payInBlock.ApplyData.EvalDelta.InnerTxns
require.Len(t, inners, 1)
@@ -180,7 +334,7 @@ func TestPayAction(t *testing.T) {
func TestAxferAction(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -231,14 +385,14 @@ submit: itxn_submit
`),
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &asa, &app)
vb := l.endBlock(t, eval)
asaIndex := basics.AssetIndex(1)
- require.Equal(t, asaIndex, vb.blk.Payset[0].ApplyData.ConfigAsset)
+ require.Equal(t, asaIndex, vb.Block().Payset[0].ApplyData.ConfigAsset)
appIndex := basics.AppIndex(2)
- require.Equal(t, appIndex, vb.blk.Payset[1].ApplyData.ApplicationID)
+ require.Equal(t, appIndex, vb.Block().Payset[1].ApplyData.ApplicationID)
fund := txntest.Txn{
Type: "pay",
@@ -248,7 +402,7 @@ submit: itxn_submit
// stay under 1M, to avoid rewards complications
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &fund)
l.endBlock(t, eval)
@@ -261,7 +415,7 @@ submit: itxn_submit
}
// Fail, because app account is not opted in.
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &fundgold, fmt.Sprintf("asset %d missing", asaIndex))
l.endBlock(t, eval)
@@ -278,7 +432,7 @@ submit: itxn_submit
}
// Tell the app to opt itself in.
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &optin)
l.endBlock(t, eval)
@@ -287,7 +441,7 @@ submit: itxn_submit
require.Equal(t, amount, uint64(0))
// Now, suceed, because opted in.
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &fundgold)
l.endBlock(t, eval)
@@ -303,7 +457,7 @@ submit: itxn_submit
ForeignAssets: []basics.AssetIndex{asaIndex},
Accounts: []basics.Address{addrs[0]},
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &withdraw)
l.endBlock(t, eval)
@@ -311,7 +465,7 @@ submit: itxn_submit
require.True(t, in)
require.Equal(t, amount, uint64(10000))
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, withdraw.Noted("2"))
l.endBlock(t, eval)
@@ -319,7 +473,7 @@ submit: itxn_submit
require.True(t, in) // Zero left, but still opted in
require.Equal(t, amount, uint64(0))
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, withdraw.Noted("3"), "underflow on subtracting")
l.endBlock(t, eval)
@@ -336,7 +490,7 @@ submit: itxn_submit
Accounts: []basics.Address{addrs[0]},
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &close)
l.endBlock(t, eval)
@@ -345,13 +499,13 @@ submit: itxn_submit
require.Equal(t, amount, uint64(0))
// Now, fail again, opted out
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, fundgold.Noted("2"), fmt.Sprintf("asset %d missing", asaIndex))
l.endBlock(t, eval)
// Do it all again, so we can test closeTo when we have a non-zero balance
// Tell the app to opt itself in.
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txns(t, optin.Noted("a"), fundgold.Noted("a"))
l.endBlock(t, eval)
@@ -359,7 +513,7 @@ submit: itxn_submit
require.Equal(t, uint64(20000), amount)
left, _ := l.asa(t, addrs[0], asaIndex)
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, close.Noted("a"))
l.endBlock(t, eval)
@@ -373,7 +527,7 @@ submit: itxn_submit
func TestClawbackAction(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -424,12 +578,12 @@ func TestClawbackAction(t *testing.T) {
AssetReceiver: addrs[1],
XferAsset: asaIndex,
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &asa, &app, &optin)
vb := l.endBlock(t, eval)
- require.Equal(t, asaIndex, vb.blk.Payset[0].ApplyData.ConfigAsset)
- require.Equal(t, appIndex, vb.blk.Payset[1].ApplyData.ApplicationID)
+ require.Equal(t, asaIndex, vb.Block().Payset[0].ApplyData.ConfigAsset)
+ require.Equal(t, appIndex, vb.Block().Payset[1].ApplyData.ApplicationID)
bystander := addrs[2] // Has no authority of its own
overpay := txntest.Txn{
@@ -445,7 +599,7 @@ func TestClawbackAction(t *testing.T) {
ForeignAssets: []basics.AssetIndex{asaIndex},
Accounts: []basics.Address{addrs[0], addrs[1]},
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txgroup(t, &overpay, &clawmove)
l.endBlock(t, eval)
@@ -457,7 +611,7 @@ func TestClawbackAction(t *testing.T) {
func TestRekeyAction(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -493,7 +647,7 @@ skipclose:
RekeyTo: appIndex.Address(),
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &ezpayer, &rekey)
l.endBlock(t, eval)
@@ -503,7 +657,7 @@ skipclose:
ApplicationID: appIndex,
Accounts: []basics.Address{addrs[0], addrs[2]}, // pay 2 from 0 (which was rekeyed)
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &useacct)
l.endBlock(t, eval)
@@ -520,7 +674,7 @@ skipclose:
ApplicationID: appIndex,
Accounts: []basics.Address{addrs[2], addrs[0]}, // pay 0 from 2
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &baduse, "unauthorized")
l.endBlock(t, eval)
@@ -533,7 +687,7 @@ skipclose:
ApplicationID: appIndex,
Accounts: []basics.Address{addrs[0], addrs[2], addrs[3]}, // close to 3
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &close)
l.endBlock(t, eval)
@@ -545,13 +699,13 @@ skipclose:
Receiver: addrs[0],
Amount: 10_000_000,
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &payback)
l.endBlock(t, eval)
require.Equal(t, uint64(10_000_000), l.micros(t, addrs[0]))
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, useacct.Noted("2"), "unauthorized")
l.endBlock(t, eval)
}
@@ -561,7 +715,7 @@ skipclose:
func TestRekeyActionCloseAccount(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -617,7 +771,7 @@ func TestRekeyActionCloseAccount(t *testing.T) {
Amount: 1_000_000,
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &create, &rekey, &fund)
l.endBlock(t, eval)
@@ -627,7 +781,7 @@ func TestRekeyActionCloseAccount(t *testing.T) {
ApplicationID: appIndex,
Accounts: []basics.Address{addrs[0], addrs[2]},
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &useacct, "unauthorized")
l.endBlock(t, eval)
}
@@ -636,7 +790,7 @@ func TestRekeyActionCloseAccount(t *testing.T) {
func TestDuplicatePayAction(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -678,14 +832,14 @@ func TestDuplicatePayAction(t *testing.T) {
Accounts: []basics.Address{addrs[1]}, // pay self
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &create, &fund, &paytwice, create.Noted("in same block"))
vb := l.endBlock(t, eval)
- require.Equal(t, appIndex, vb.blk.Payset[0].ApplyData.ApplicationID)
- require.Equal(t, 4, len(vb.blk.Payset))
+ require.Equal(t, appIndex, vb.Block().Payset[0].ApplyData.ApplicationID)
+ require.Equal(t, 4, len(vb.Block().Payset))
// create=1, fund=2, payTwice=3,4,5
- require.Equal(t, basics.AppIndex(6), vb.blk.Payset[3].ApplyData.ApplicationID)
+ require.Equal(t, basics.AppIndex(6), vb.Block().Payset[3].ApplyData.ApplicationID)
ad0 := l.micros(t, addrs[0])
ad1 := l.micros(t, addrs[1])
@@ -699,19 +853,19 @@ func TestDuplicatePayAction(t *testing.T) {
require.Equal(t, 188000, int(app))
// Now create another app, and see if it gets the index we expect.
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txns(t, create.Noted("again"))
vb = l.endBlock(t, eval)
// create=1, fund=2, payTwice=3,4,5, insameblock=6
- require.Equal(t, basics.AppIndex(7), vb.blk.Payset[0].ApplyData.ApplicationID)
+ require.Equal(t, basics.AppIndex(7), vb.Block().Payset[0].ApplyData.ApplicationID)
}
// TestInnerTxCount ensures that inner transactions increment the TxnCounter
func TestInnerTxnCount(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -744,22 +898,22 @@ func TestInnerTxnCount(t *testing.T) {
Accounts: []basics.Address{addrs[1]}, // pay self
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &create, &fund)
vb := l.endBlock(t, eval)
- require.Equal(t, 2, int(vb.blk.TxnCounter))
+ require.Equal(t, 2, int(vb.Block().TxnCounter))
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &payout1)
vb = l.endBlock(t, eval)
- require.Equal(t, 4, int(vb.blk.TxnCounter))
+ require.Equal(t, 4, int(vb.Block().TxnCounter))
}
// TestAcfgAction ensures assets can be created and configured in teal
func TestAcfgAction(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -869,7 +1023,7 @@ submit: itxn_submit
Amount: 200_000, // exactly account min balance + one asset
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &app, &fund)
l.endBlock(t, eval)
@@ -880,14 +1034,14 @@ submit: itxn_submit
ApplicationArgs: [][]byte{[]byte("create")},
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
// Can't create an asset if you have exactly 200,000 and need to pay fee
eval.txn(t, &createAsa, "balance 199000 below min 200000")
// fund it some more and try again
eval.txns(t, fund.Noted("more!"), &createAsa)
vb := l.endBlock(t, eval)
- asaIndex := vb.blk.Payset[1].EvalDelta.InnerTxns[0].ConfigAsset
+ asaIndex := vb.Block().Payset[1].EvalDelta.InnerTxns[0].ConfigAsset
require.Equal(t, basics.AssetIndex(5), asaIndex)
asaParams, err := l.asaParams(t, basics.AssetIndex(5))
@@ -909,7 +1063,7 @@ submit: itxn_submit
ApplicationArgs: [][]byte{[]byte(a), []byte("junkjunkjunkjunkjunkjunkjunkjunk")},
ForeignAssets: []basics.AssetIndex{asaIndex},
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
t.Log(a)
eval.txn(t, &check)
l.endBlock(t, eval)
@@ -922,7 +1076,7 @@ submit: itxn_submit
ApplicationArgs: [][]byte{[]byte("freeze"), []byte("junkjunkjunkjunkjunkjunkjunkjunk")},
ForeignAssets: []basics.AssetIndex{asaIndex},
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &nodice, "this transaction should be issued by the manager")
l.endBlock(t, eval)
@@ -935,7 +1089,7 @@ submit: itxn_submit
func TestAsaDuringInit(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -975,20 +1129,20 @@ func TestAsaDuringInit(t *testing.T) {
`,
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &prefund, &app)
vb := l.endBlock(t, eval)
- require.Equal(t, appIndex, vb.blk.Payset[1].ApplicationID)
+ require.Equal(t, appIndex, vb.Block().Payset[1].ApplicationID)
- asaIndex := vb.blk.Payset[1].EvalDelta.InnerTxns[0].ConfigAsset
+ asaIndex := vb.Block().Payset[1].EvalDelta.InnerTxns[0].ConfigAsset
require.Equal(t, basics.AssetIndex(3), asaIndex)
}
func TestRekey(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -1012,10 +1166,10 @@ func TestRekey(t *testing.T) {
`),
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &app)
vb := l.endBlock(t, eval)
- appIndex := vb.blk.Payset[0].ApplicationID
+ appIndex := vb.Block().Payset[0].ApplicationID
require.Equal(t, basics.AppIndex(1), appIndex)
fund := txntest.Txn{
@@ -1029,7 +1183,7 @@ func TestRekey(t *testing.T) {
Sender: addrs[1],
ApplicationID: appIndex,
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &fund, &rekey)
eval.txn(t, rekey.Noted("2"), "unauthorized")
l.endBlock(t, eval)
@@ -1039,7 +1193,7 @@ func TestRekey(t *testing.T) {
func TestNote(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -1060,10 +1214,10 @@ func TestNote(t *testing.T) {
`),
}
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &app)
vb := l.endBlock(t, eval)
- appIndex := vb.blk.Payset[0].ApplicationID
+ appIndex := vb.Block().Payset[0].ApplicationID
require.Equal(t, basics.AppIndex(1), appIndex)
fund := txntest.Txn{
@@ -1077,17 +1231,17 @@ func TestNote(t *testing.T) {
Sender: addrs[1],
ApplicationID: appIndex,
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &fund, &note)
vb = l.endBlock(t, eval)
- alphabet := vb.blk.Payset[1].EvalDelta.InnerTxns[0].Txn.Note
+ alphabet := vb.Block().Payset[1].EvalDelta.InnerTxns[0].Txn.Note
require.Equal(t, "abcdefghijklmnopqrstuvwxyz01234567890", string(alphabet))
}
func TestKeyreg(t *testing.T) {
partitiontest.PartitionTest(t)
- genBalances, addrs, _ := newTestGenesis()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
defer l.Close()
@@ -1120,10 +1274,10 @@ nonpart:
}
// Create the app
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
eval.txns(t, &app)
vb := l.endBlock(t, eval)
- appIndex := vb.blk.Payset[0].ApplicationID
+ appIndex := vb.Block().Payset[0].ApplicationID
require.Equal(t, basics.AppIndex(1), appIndex)
// Give the app a lot of money
@@ -1133,15 +1287,15 @@ nonpart:
Receiver: appIndex.Address(),
Amount: 1_000_000_000,
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &fund)
- vb = l.endBlock(t, eval)
+ l.endBlock(t, eval)
require.Equal(t, 1_000_000_000, int(l.micros(t, appIndex.Address())))
// Build up Residue in RewardsState so it's ready to pay
for i := 1; i < 10; i++ {
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
l.endBlock(t, eval)
}
@@ -1152,7 +1306,7 @@ nonpart:
ApplicationID: appIndex,
ApplicationArgs: [][]byte{[]byte("pay")},
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &pay)
l.endBlock(t, eval)
// 2000 was earned in rewards (- 1000 fee, -1 pay)
@@ -1165,7 +1319,7 @@ nonpart:
ApplicationID: appIndex,
ApplicationArgs: [][]byte{[]byte("nonpart")},
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, &nonpart)
l.endBlock(t, eval)
require.Equal(t, 999_999_999, int(l.micros(t, appIndex.Address())))
@@ -1173,10 +1327,10 @@ nonpart:
// Build up Residue in RewardsState so it's ready to pay AGAIN
// But expect no rewards
for i := 1; i < 100; i++ {
- eval := l.nextBlock(t)
+ eval := testingEvaluator{l.nextBlock(t), l}
l.endBlock(t, eval)
}
- eval = l.nextBlock(t)
+ eval = testingEvaluator{l.nextBlock(t), l}
eval.txn(t, pay.Noted("again"))
eval.txn(t, nonpart.Noted("again"), "cannot change online/offline")
l.endBlock(t, eval)
diff --git a/ledger/archival_test.go b/ledger/archival_test.go
index 8ec3c6127..9d4f3b15d 100644
--- a/ledger/archival_test.go
+++ b/ledger/archival_test.go
@@ -40,6 +40,7 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/ledger/internal"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
@@ -68,7 +69,7 @@ func (wl *wrappedLedger) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, er
return wl.l.BlockHdr(rnd)
}
-func (wl *wrappedLedger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger ledgerForEvaluator) (ledgercore.StateDelta, error) {
+func (wl *wrappedLedger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger internal.LedgerForEvaluator) (ledgercore.StateDelta, error) {
return wl.l.trackerEvalVerified(blk, accUpdatesLedger)
}
@@ -96,7 +97,11 @@ func (wl *wrappedLedger) GenesisProto() config.ConsensusParams {
return wl.l.GenesisProto()
}
-func getInitState() (genesisInitState InitState) {
+func (wl *wrappedLedger) GenesisAccounts() map[basics.Address]basics.AccountData {
+ return wl.l.GenesisAccounts()
+}
+
+func getInitState() (genesisInitState ledgercore.InitState) {
blk := bookkeeping.Block{}
blk.CurrentProtocol = protocol.ConsensusCurrentVersion
blk.RewardsPool = testPoolAddr
@@ -790,9 +795,10 @@ func checkTrackers(t *testing.T, wl *wrappedLedger, rnd basics.Round) (basics.Ro
defer wl.l.trackerMu.RUnlock()
for _, trk := range wl.l.trackers.trackers {
if au, ok := trk.(*accountUpdates); ok {
- au.waitAccountsWriting()
- minSave = trk.committedUpTo(rnd)
- au.waitAccountsWriting()
+ wl.l.trackers.waitAccountsWriting()
+ minSave, _ = trk.committedUpTo(rnd)
+ wl.l.trackers.committedUpTo(rnd)
+ wl.l.trackers.waitAccountsWriting()
if minSave < minMinSave {
minMinSave = minSave
}
@@ -804,9 +810,9 @@ func checkTrackers(t *testing.T, wl *wrappedLedger, rnd basics.Round) (basics.Ro
au = cleanTracker.(*accountUpdates)
cfg := config.GetDefaultLocal()
cfg.Archival = true
- au.initialize(cfg, "", au.initProto, wl.l.accts.initAccounts)
+ au.initialize(cfg)
} else {
- minSave = trk.committedUpTo(rnd)
+ minSave, _ = trk.committedUpTo(rnd)
if minSave < minMinSave {
minMinSave = minSave
}
@@ -817,7 +823,7 @@ func checkTrackers(t *testing.T, wl *wrappedLedger, rnd basics.Round) (basics.Ro
}
cleanTracker.close()
- err := cleanTracker.loadFromDisk(wl)
+ err := cleanTracker.loadFromDisk(wl, wl.l.trackers.dbRound)
require.NoError(t, err)
cleanTracker.close()
diff --git a/ledger/blockqueue_test.go b/ledger/blockqueue_test.go
index 9b69d277d..55f2a39ef 100644
--- a/ledger/blockqueue_test.go
+++ b/ledger/blockqueue_test.go
@@ -28,6 +28,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -36,7 +37,7 @@ import (
func TestPutBlockTooOld(t *testing.T) {
partitiontest.PartitionTest(t)
- genesisInitState, _, _ := genesis(10)
+ genesisInitState, _, _ := ledgertesting.Genesis(10)
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
const inMem = true
@@ -67,7 +68,7 @@ func TestPutBlockTooOld(t *testing.T) {
func TestGetEncodedBlockCert(t *testing.T) {
partitiontest.PartitionTest(t)
- genesisInitState, _, _ := genesis(10)
+ genesisInitState, _, _ := ledgertesting.Genesis(10)
const inMem = true
cfg := config.GetDefaultLocal()
diff --git a/ledger/bulletin.go b/ledger/bulletin.go
index b7bb53ce7..1e95ee2ab 100644
--- a/ledger/bulletin.go
+++ b/ledger/bulletin.go
@@ -17,6 +17,8 @@
package ledger
import (
+ "context"
+ "database/sql"
"sync/atomic"
"github.com/algorand/go-deadlock"
@@ -78,7 +80,7 @@ func (b *bulletin) Wait(round basics.Round) chan struct{} {
return signal.signal
}
-func (b *bulletin) loadFromDisk(l ledgerForTracker) error {
+func (b *bulletin) loadFromDisk(l ledgerForTracker, _ basics.Round) error {
b.pendingNotificationRequests = make(map[basics.Round]notifier)
b.latestRound = l.Latest()
return nil
@@ -90,7 +92,7 @@ func (b *bulletin) close() {
func (b *bulletin) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
}
-func (b *bulletin) committedUpTo(rnd basics.Round) basics.Round {
+func (b *bulletin) committedUpTo(rnd basics.Round) (retRound, lookback basics.Round) {
b.mu.Lock()
defer b.mu.Unlock()
@@ -104,5 +106,22 @@ func (b *bulletin) committedUpTo(rnd basics.Round) basics.Round {
}
b.latestRound = rnd
- return rnd
+ return rnd, basics.Round(0)
+}
+
+func (b *bulletin) prepareCommit(dcc *deferredCommitContext) error {
+ return nil
+}
+
+func (b *bulletin) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error {
+ return nil
+}
+
+func (b *bulletin) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+}
+
+func (b *bulletin) handleUnorderedCommit(uint64, basics.Round, basics.Round) {
+}
+func (b *bulletin) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+ return dcr
}
diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go
new file mode 100644
index 000000000..e70b526da
--- /dev/null
+++ b/ledger/catchpointtracker.go
@@ -0,0 +1,901 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "context"
+ "database/sql"
+ "encoding/hex"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "sync/atomic"
+ "time"
+
+ "github.com/algorand/go-deadlock"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merkletrie"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/logging/telemetryspec"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/db"
+)
+
+// trieCachedNodesCount defines how many balances trie nodes we would like to keep around in memory.
+// value was calibrated using BenchmarkCalibrateCacheNodeSize
+var trieCachedNodesCount = 9000
+
+// merkleCommitterNodesPerPage controls how many nodes will be stored in a single page
+// value was calibrated using BenchmarkCalibrateNodesPerPage
+var merkleCommitterNodesPerPage = int64(116)
+
+const (
+ // trieRebuildAccountChunkSize defines the number of accounts that would get read at a single chunk
+ // before added to the trie during trie construction
+ trieRebuildAccountChunkSize = 16384
+ // trieRebuildCommitFrequency defines the number of accounts that would get added before we call evict to commit the changes and adjust the memory cache.
+ trieRebuildCommitFrequency = 65536
+ // trieAccumulatedChangesFlush defines the number of pending changes that would be applied to the merkle trie before
+ // we attempt to commit them to disk while writing a batch of rounds balances to disk.
+ trieAccumulatedChangesFlush = 256
+)
+
+// TrieMemoryConfig is the memory configuration setup used for the merkle trie.
+var TrieMemoryConfig = merkletrie.MemoryConfig{
+ NodesCountPerPage: merkleCommitterNodesPerPage,
+ CachedNodesCount: trieCachedNodesCount,
+ PageFillFactor: 0.95,
+ MaxChildrenPagesThreshold: 64,
+}
+
+type catchpointTracker struct {
+ // dbDirectory is the directory where the ledger and block sql file resides as well as the parent directory for the catchup files to be generated
+ dbDirectory string
+
+ // catchpointInterval is the configured interval at which the accountUpdates would generate catchpoint labels and catchpoint files.
+ catchpointInterval uint64
+
+ // catchpointFileHistoryLength defines how many catchpoint files we want to store back.
+ // 0 means don't store any, -1 mean unlimited and positive number suggest the number of most recent catchpoint files.
+ catchpointFileHistoryLength int
+
+ // archivalLedger determines whether the associated ledger was configured as archival ledger or not.
+ archivalLedger bool
+
+ // Prepared SQL statements for fast accounts DB lookups.
+ accountsq *accountsDbQueries
+
+ // log copied from ledger
+ log logging.Logger
+
+ // Connection to the database.
+ dbs db.Pair
+
+ // The last catchpoint label that was written to the database. Should always align with what's in the database.
+ // note that this is the last catchpoint *label* and not the catchpoint file.
+ lastCatchpointLabel string
+
+ // catchpointSlowWriting suggest to the accounts writer that it should finish writing up the catchpoint file ASAP.
+ // when this channel is closed, the accounts writer would try and complete the writing as soon as possible.
+ // otherwise, it would take it's time and perform periodic sleeps between chunks processing.
+ catchpointSlowWriting chan struct{}
+
+ // catchpointWriting help to synchronize the catchpoint file writing. When this atomic variable is 0, no writing is going on.
+ // Any non-zero value indicates a catchpoint being written, or scheduled to be written.
+ catchpointWriting int32
+
+ // The Trie tracking the current account balances. Always matches the balances that were
+ // written to the database.
+ balancesTrie *merkletrie.Trie
+
+ // catchpointsMu is the synchronization mutex for accessing the various non-static variables.
+ catchpointsMu deadlock.RWMutex
+
+ // roundDigest stores the digest of the block for every round starting with dbRound and every round after it.
+ roundDigest []crypto.Digest
+}
+
+// initialize initializes the catchpointTracker structure
+func (ct *catchpointTracker) initialize(cfg config.Local, dbPathPrefix string) {
+ ct.dbDirectory = filepath.Dir(dbPathPrefix)
+ ct.archivalLedger = cfg.Archival
+ switch cfg.CatchpointTracking {
+ case -1:
+ ct.catchpointInterval = 0
+ default:
+ // give a warning, then fall thought
+ logging.Base().Warnf("catchpointTracker: the CatchpointTracking field in the config.json file contains an invalid value (%d). The default value of 0 would be used instead.", cfg.CatchpointTracking)
+ fallthrough
+ case 0:
+ if ct.archivalLedger {
+ ct.catchpointInterval = cfg.CatchpointInterval
+ } else {
+ ct.catchpointInterval = 0
+ }
+ case 1:
+ ct.catchpointInterval = cfg.CatchpointInterval
+ }
+
+ ct.catchpointFileHistoryLength = cfg.CatchpointFileHistoryLength
+ if cfg.CatchpointFileHistoryLength < -1 {
+ ct.catchpointFileHistoryLength = -1
+ }
+}
+
+// GetLastCatchpointLabel retrieves the last catchpoint label that was stored to the database.
+func (ct *catchpointTracker) GetLastCatchpointLabel() string {
+ ct.catchpointsMu.RLock()
+ defer ct.catchpointsMu.RUnlock()
+ return ct.lastCatchpointLabel
+}
+
+// loadFromDisk loads the state of a tracker from persistent
+// storage. The ledger argument allows loadFromDisk to load
+// blocks from the database, or access its own state. The
+// ledgerForTracker interface abstracts away the details of
+// ledger internals so that individual trackers can be tested
+// in isolation.
+func (ct *catchpointTracker) loadFromDisk(l ledgerForTracker, lastBalancesRound basics.Round) (err error) {
+ ct.log = l.trackerLog()
+ ct.dbs = l.trackerDB()
+
+ ct.roundDigest = nil
+ ct.catchpointWriting = 0
+ // keep these channel closed if we're not generating catchpoint
+ ct.catchpointSlowWriting = make(chan struct{}, 1)
+ close(ct.catchpointSlowWriting)
+
+ err = ct.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ err0 := ct.accountsInitializeHashes(ctx, tx, lastBalancesRound)
+ if err0 != nil {
+ return err0
+ }
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ ct.accountsq, err = accountsInitDbQueries(ct.dbs.Rdb.Handle, ct.dbs.Wdb.Handle)
+ if err != nil {
+ return
+ }
+
+ ct.lastCatchpointLabel, _, err = ct.accountsq.readCatchpointStateString(context.Background(), catchpointStateLastCatchpoint)
+ if err != nil {
+ return
+ }
+
+ writingCatchpointRound, _, err := ct.accountsq.readCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint)
+ if err != nil {
+ return err
+ }
+ if writingCatchpointRound == 0 || !ct.catchpointEnabled() {
+ return nil
+ }
+ var dbRound basics.Round
+ // make sure that the database is at the desired round.
+ err = ct.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ dbRound, err = accountsRound(tx)
+ return
+ })
+ if err != nil {
+ return err
+ }
+ if dbRound != basics.Round(writingCatchpointRound) {
+ return nil
+ }
+
+ blk, err := l.Block(dbRound)
+ if err != nil {
+ return err
+ }
+ blockHeaderDigest := blk.Digest()
+
+ ct.generateCatchpoint(context.Background(), basics.Round(writingCatchpointRound), ct.lastCatchpointLabel, blockHeaderDigest, time.Duration(0))
+ return nil
+}
+
+// newBlock informs the tracker of a new block from round
+// rnd and a given ledgercore.StateDelta as produced by BlockEvaluator.
+func (ct *catchpointTracker) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
+ ct.catchpointsMu.Lock()
+ defer ct.catchpointsMu.Unlock()
+ ct.roundDigest = append(ct.roundDigest, blk.Digest())
+}
+
+// committedUpTo implements the ledgerTracker interface for catchpointTracker.
+// The method informs the tracker that committedRound and all it's previous rounds have
+// been committed to the block database. The method returns what is the oldest round
+// number that can be removed from the blocks database as well as the lookback that this
+// tracker maintains.
+func (ct *catchpointTracker) committedUpTo(rnd basics.Round) (retRound, lookback basics.Round) {
+ return rnd, basics.Round(0)
+}
+
+func (ct *catchpointTracker) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+ var hasMultipleIntermediateCatchpoint, hasIntermediateCatchpoint bool
+
+ newBase := dcr.oldBase + basics.Round(dcr.offset)
+
+ // check if there was a catchpoint between dcc.oldBase+lookback and dcc.oldBase+offset+lookback
+ if ct.catchpointInterval > 0 {
+ nextCatchpointRound := ((uint64(dcr.oldBase+dcr.lookback) + ct.catchpointInterval) / ct.catchpointInterval) * ct.catchpointInterval
+
+ if nextCatchpointRound < uint64(dcr.oldBase+dcr.lookback)+dcr.offset {
+ mostRecentCatchpointRound := (uint64(committedRound) / ct.catchpointInterval) * ct.catchpointInterval
+ newBase = basics.Round(nextCatchpointRound) - dcr.lookback
+ if mostRecentCatchpointRound > nextCatchpointRound {
+ hasMultipleIntermediateCatchpoint = true
+ // skip if there is more than one catchpoint in queue
+ newBase = basics.Round(mostRecentCatchpointRound) - dcr.lookback
+ }
+ hasIntermediateCatchpoint = true
+ }
+ }
+
+ // if we're still writing the previous balances, we can't move forward yet.
+ if ct.IsWritingCatchpointFile() {
+ // if we hit this path, it means that we're still writing a catchpoint.
+ // see if the new delta range contains another catchpoint.
+ if hasIntermediateCatchpoint {
+ // check if we're already attempting to perform fast-writing.
+ select {
+ case <-ct.catchpointSlowWriting:
+ // yes, we're already doing fast-writing.
+ default:
+ // no, we're not yet doing fast writing, make it so.
+ close(ct.catchpointSlowWriting)
+ }
+ }
+ return nil
+ }
+
+ dcr.offset = uint64(newBase - dcr.oldBase)
+
+ // check to see if this is a catchpoint round
+ dcr.isCatchpointRound = ct.isCatchpointRound(dcr.offset, dcr.oldBase, dcr.lookback)
+
+ if dcr.isCatchpointRound && ct.archivalLedger {
+ // store non-zero ( all ones ) into the catchpointWriting atomic variable to indicate that a catchpoint is being written ( or, queued to be written )
+ atomic.StoreInt32(&ct.catchpointWriting, int32(-1))
+ ct.catchpointSlowWriting = make(chan struct{}, 1)
+ if hasMultipleIntermediateCatchpoint {
+ close(ct.catchpointSlowWriting)
+ }
+ }
+
+ dcr.catchpointWriting = &ct.catchpointWriting
+
+ return dcr
+}
+
+// prepareCommit, commitRound and postCommit are called when it is time to commit tracker's data.
+// If an error returned the process is aborted.
+func (ct *catchpointTracker) prepareCommit(dcc *deferredCommitContext) error {
+ ct.catchpointsMu.RLock()
+ defer ct.catchpointsMu.RUnlock()
+ if dcc.isCatchpointRound {
+ dcc.committedRoundDigest = ct.roundDigest[dcc.offset+uint64(dcc.lookback)-1]
+ }
+ return nil
+}
+
+func (ct *catchpointTracker) commitRound(ctx context.Context, tx *sql.Tx, dcc *deferredCommitContext) (err error) {
+ treeTargetRound := basics.Round(0)
+ offset := dcc.offset
+ dbRound := dcc.oldBase
+
+ defer func() {
+ if err != nil {
+ if dcc.isCatchpointRound && ct.archivalLedger {
+ atomic.StoreInt32(&ct.catchpointWriting, 0)
+ }
+ }
+ }()
+
+ if ct.catchpointEnabled() {
+ var mc *MerkleCommitter
+ mc, err = MakeMerkleCommitter(tx, false)
+ if err != nil {
+ return
+ }
+
+ var trie *merkletrie.Trie
+ if ct.balancesTrie == nil {
+ trie, err = merkletrie.MakeTrie(mc, TrieMemoryConfig)
+ if err != nil {
+ ct.log.Warnf("unable to create merkle trie during committedUpTo: %v", err)
+ return err
+ }
+ ct.balancesTrie = trie
+ } else {
+ ct.balancesTrie.SetCommitter(mc)
+ }
+ treeTargetRound = dbRound + basics.Round(offset)
+ }
+
+ if dcc.updateStats {
+ dcc.stats.MerkleTrieUpdateDuration = time.Duration(time.Now().UnixNano())
+ }
+
+ err = ct.accountsUpdateBalances(dcc.compactAccountDeltas)
+ if err != nil {
+ return err
+ }
+
+ if dcc.updateStats {
+ now := time.Duration(time.Now().UnixNano())
+ dcc.stats.MerkleTrieUpdateDuration = now - dcc.stats.MerkleTrieUpdateDuration
+ }
+
+ err = updateAccountsHashRound(tx, treeTargetRound)
+ if err != nil {
+ return err
+ }
+
+ if dcc.isCatchpointRound {
+ dcc.trieBalancesHash, err = ct.balancesTrie.RootHash()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (ct *catchpointTracker) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+ var err error
+ if dcc.isCatchpointRound {
+ dcc.catchpointLabel, err = ct.accountsCreateCatchpointLabel(dcc.newBase+dcc.lookback, dcc.roundTotals, dcc.committedRoundDigest, dcc.trieBalancesHash)
+ if err != nil {
+ ct.log.Warnf("commitRound : unable to create a catchpoint label: %v", err)
+ }
+ }
+ if ct.balancesTrie != nil {
+ _, err = ct.balancesTrie.Evict(false)
+ if err != nil {
+ ct.log.Warnf("merkle trie failed to evict: %v", err)
+ }
+ }
+
+ if dcc.isCatchpointRound && dcc.catchpointLabel != "" {
+ ct.lastCatchpointLabel = dcc.catchpointLabel
+ }
+ dcc.updatingBalancesDuration = time.Since(dcc.flushTime)
+
+ if dcc.updateStats {
+ dcc.stats.MemoryUpdatesDuration = time.Duration(time.Now().UnixNano())
+ }
+
+ ct.catchpointsMu.Lock()
+
+ ct.roundDigest = ct.roundDigest[dcc.offset:]
+
+ ct.catchpointsMu.Unlock()
+
+ if dcc.isCatchpointRound && ct.archivalLedger && dcc.catchpointLabel != "" {
+ // generate the catchpoint file. This need to be done inline so that it will block any new accounts that from being written.
+ // the generateCatchpoint expects that the accounts data would not be modified in the background during it's execution.
+ ct.generateCatchpoint(ctx, basics.Round(dcc.offset)+dcc.oldBase+dcc.lookback, dcc.catchpointLabel, dcc.committedRoundDigest, dcc.updatingBalancesDuration)
+ }
+ // in scheduleCommit, we expect that this function to update the catchpointWriting when
+ // it's on a catchpoint round and it's an archival ledger. Doing this in a deferred function
+ // here would prevent us from "forgetting" to update this variable later on.
+ if dcc.isCatchpointRound && ct.archivalLedger {
+ atomic.StoreInt32(dcc.catchpointWriting, 0)
+ }
+}
+
+// handleUnorderedCommit is a special method for handling deferred commits that are out of order.
+// Tracker might update own state in this case. For example, account updates tracker cancels
+// scheduled catchpoint writing that deferred commit.
+func (ct *catchpointTracker) handleUnorderedCommit(offset uint64, dbRound basics.Round, lookback basics.Round) {
+ // if this is an archival ledger, we might need to update the catchpointWriting variable.
+ if ct.archivalLedger {
+ // determine if this was a catchpoint round
+ if ct.isCatchpointRound(offset, dbRound, lookback) {
+ // it was a catchpoint round, so update the catchpointWriting to indicate that we're done.
+ atomic.StoreInt32(&ct.catchpointWriting, 0)
+ }
+ }
+}
+
+// close terminates the tracker, reclaiming any resources
+// like open database connections or goroutines. close may
+// be called even if loadFromDisk() is not called or does
+// not succeed.
+func (ct *catchpointTracker) close() {
+
+}
+
+// accountsUpdateBalances applies the given compactAccountDeltas to the merkle trie
+func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccountDeltas) (err error) {
+ if !ct.catchpointEnabled() {
+ return nil
+ }
+ var added, deleted bool
+ accumulatedChanges := 0
+
+ for i := 0; i < accountsDeltas.len(); i++ {
+ addr, delta := accountsDeltas.getByIdx(i)
+ if !delta.old.accountData.IsZero() {
+ deleteHash := accountHashBuilder(addr, delta.old.accountData, protocol.Encode(&delta.old.accountData))
+ deleted, err = ct.balancesTrie.Delete(deleteHash)
+ if err != nil {
+ return fmt.Errorf("failed to delete hash '%s' from merkle trie for account %v: %w", hex.EncodeToString(deleteHash), addr, err)
+ }
+ if !deleted {
+ ct.log.Warnf("failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(deleteHash), addr)
+ } else {
+ accumulatedChanges++
+ }
+ }
+
+ if !delta.new.IsZero() {
+ addHash := accountHashBuilder(addr, delta.new, protocol.Encode(&delta.new))
+ added, err = ct.balancesTrie.Add(addHash)
+ if err != nil {
+ return fmt.Errorf("attempted to add duplicate hash '%s' to merkle trie for account %v: %w", hex.EncodeToString(addHash), addr, err)
+ }
+ if !added {
+ ct.log.Warnf("attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(addHash), addr)
+ } else {
+ accumulatedChanges++
+ }
+ }
+ }
+ if accumulatedChanges >= trieAccumulatedChangesFlush {
+ accumulatedChanges = 0
+ _, err = ct.balancesTrie.Commit()
+ if err != nil {
+ return
+ }
+ }
+
+ // write it all to disk.
+ if accumulatedChanges > 0 {
+ _, err = ct.balancesTrie.Commit()
+ }
+
+ return
+}
+
+// IsWritingCatchpointFile returns true when a catchpoint file is being generated. The function is used by the catchup service
+// to avoid memory pressure until the catchpoint file writing is complete.
+func (ct *catchpointTracker) IsWritingCatchpointFile() bool {
+ return atomic.LoadInt32(&ct.catchpointWriting) != 0
+}
+
+// isCatchpointRound returns true if the round at the given offset, dbRound with the provided lookback should be a catchpoint round.
+func (ct *catchpointTracker) isCatchpointRound(offset uint64, dbRound basics.Round, lookback basics.Round) bool {
+ return ((offset + uint64(lookback+dbRound)) > 0) && (ct.catchpointInterval != 0) && ((uint64((offset + uint64(lookback+dbRound))) % ct.catchpointInterval) == 0)
+}
+
+// accountsCreateCatchpointLabel creates a catchpoint label and write it.
+func (ct *catchpointTracker) accountsCreateCatchpointLabel(committedRound basics.Round, totals ledgercore.AccountTotals, ledgerBlockDigest crypto.Digest, trieBalancesHash crypto.Digest) (label string, err error) {
+ cpLabel := ledgercore.MakeCatchpointLabel(committedRound, ledgerBlockDigest, trieBalancesHash, totals)
+ label = cpLabel.String()
+ _, err = ct.accountsq.writeCatchpointStateString(context.Background(), catchpointStateLastCatchpoint, label)
+ return
+}
+
+// generateCatchpoint generates a single catchpoint file
+func (ct *catchpointTracker) generateCatchpoint(ctx context.Context, committedRound basics.Round, label string, committedRoundDigest crypto.Digest, updatingBalancesDuration time.Duration) {
+ beforeGeneratingCatchpointTime := time.Now()
+ catchpointGenerationStats := telemetryspec.CatchpointGenerationEventDetails{
+ BalancesWriteTime: uint64(updatingBalancesDuration.Nanoseconds()),
+ }
+
+ // the retryCatchpointCreation is used to repeat the catchpoint file generation in case the node crashed / aborted during startup
+ // before the catchpoint file generation could be completed.
+ retryCatchpointCreation := false
+ ct.log.Debugf("accountUpdates: generateCatchpoint: generating catchpoint for round %d", committedRound)
+ defer func() {
+ if !retryCatchpointCreation {
+ // clear the writingCatchpoint flag
+ _, err := ct.accountsq.writeCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint, uint64(0))
+ if err != nil {
+ ct.log.Warnf("accountUpdates: generateCatchpoint unable to clear catchpoint state '%s' for round %d: %v", catchpointStateWritingCatchpoint, committedRound, err)
+ }
+ }
+ }()
+
+ _, err := ct.accountsq.writeCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint, uint64(committedRound))
+ if err != nil {
+ ct.log.Warnf("accountUpdates: generateCatchpoint unable to write catchpoint state '%s' for round %d: %v", catchpointStateWritingCatchpoint, committedRound, err)
+ return
+ }
+
+ relCatchpointFileName := filepath.Join("catchpoints", catchpointRoundToPath(committedRound))
+ absCatchpointFileName := filepath.Join(ct.dbDirectory, relCatchpointFileName)
+
+ more := true
+ const shortChunkExecutionDuration = 50 * time.Millisecond
+ const longChunkExecutionDuration = 1 * time.Second
+ var chunkExecutionDuration time.Duration
+ select {
+ case <-ct.catchpointSlowWriting:
+ chunkExecutionDuration = longChunkExecutionDuration
+ default:
+ chunkExecutionDuration = shortChunkExecutionDuration
+ }
+
+ var catchpointWriter *catchpointWriter
+ start := time.Now()
+ ledgerGeneratecatchpointCount.Inc(nil)
+ err = ct.dbs.Rdb.Atomic(func(dbCtx context.Context, tx *sql.Tx) (err error) {
+ catchpointWriter = makeCatchpointWriter(ctx, absCatchpointFileName, tx, committedRound, committedRoundDigest, label)
+ for more {
+ stepCtx, stepCancelFunction := context.WithTimeout(ctx, chunkExecutionDuration)
+ writeStepStartTime := time.Now()
+ more, err = catchpointWriter.WriteStep(stepCtx)
+ // accumulate the actual time we've spent writing in this step.
+ catchpointGenerationStats.CPUTime += uint64(time.Since(writeStepStartTime).Nanoseconds())
+ stepCancelFunction()
+ if more && err == nil {
+ // we just wrote some data, but there is more to be written.
+ // go to sleep for while.
+ // before going to sleep, extend the transaction timeout so that we won't get warnings:
+ _, err0 := db.ResetTransactionWarnDeadline(dbCtx, tx, time.Now().Add(1*time.Second))
+ if err0 != nil {
+ ct.log.Warnf("catchpointTracker: generateCatchpoint: failed to reset transaction warn deadline : %v", err0)
+ }
+ select {
+ case <-time.After(100 * time.Millisecond):
+ // increase the time slot allocated for writing the catchpoint, but stop when we get to the longChunkExecutionDuration limit.
+ // this would allow the catchpoint writing speed to ramp up while still leaving some cpu available.
+ chunkExecutionDuration *= 2
+ if chunkExecutionDuration > longChunkExecutionDuration {
+ chunkExecutionDuration = longChunkExecutionDuration
+ }
+ case <-ctx.Done():
+ retryCatchpointCreation = true
+ err2 := catchpointWriter.Abort()
+ if err2 != nil {
+ return fmt.Errorf("error removing catchpoint file : %v", err2)
+ }
+ return nil
+ case <-ct.catchpointSlowWriting:
+ chunkExecutionDuration = longChunkExecutionDuration
+ }
+ }
+ if err != nil {
+ err = fmt.Errorf("unable to create catchpoint : %v", err)
+ err2 := catchpointWriter.Abort()
+ if err2 != nil {
+ ct.log.Warnf("accountUpdates: generateCatchpoint: error removing catchpoint file : %v", err2)
+ }
+ return
+ }
+ }
+ return
+ })
+ ledgerGeneratecatchpointMicros.AddMicrosecondsSince(start, nil)
+
+ if err != nil {
+ ct.log.Warnf("accountUpdates: generateCatchpoint: %v", err)
+ return
+ }
+ if catchpointWriter == nil {
+ ct.log.Warnf("accountUpdates: generateCatchpoint: nil catchpointWriter")
+ return
+ }
+
+ err = ct.saveCatchpointFile(committedRound, relCatchpointFileName, catchpointWriter.GetSize(), catchpointWriter.GetCatchpoint())
+ if err != nil {
+ ct.log.Warnf("accountUpdates: generateCatchpoint: unable to save catchpoint: %v", err)
+ return
+ }
+ catchpointGenerationStats.FileSize = uint64(catchpointWriter.GetSize())
+ catchpointGenerationStats.WritingDuration = uint64(time.Since(beforeGeneratingCatchpointTime).Nanoseconds())
+ catchpointGenerationStats.AccountsCount = catchpointWriter.GetTotalAccounts()
+ catchpointGenerationStats.CatchpointLabel = catchpointWriter.GetCatchpoint()
+ ct.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.CatchpointGenerationEvent, catchpointGenerationStats)
+ ct.log.With("writingDuration", catchpointGenerationStats.WritingDuration).
+ With("CPUTime", catchpointGenerationStats.CPUTime).
+ With("balancesWriteTime", catchpointGenerationStats.BalancesWriteTime).
+ With("accountsCount", catchpointGenerationStats.AccountsCount).
+ With("fileSize", catchpointGenerationStats.FileSize).
+ With("catchpointLabel", catchpointGenerationStats.CatchpointLabel).
+ Infof("Catchpoint file was generated")
+}
+
+// catchpointRoundToPath calculate the catchpoint file path for a given round
+func catchpointRoundToPath(rnd basics.Round) string {
+ irnd := int64(rnd) / 256
+ outStr := ""
+ for irnd > 0 {
+ outStr = filepath.Join(outStr, fmt.Sprintf("%02x", irnd%256))
+ irnd = irnd / 256
+ }
+ outStr = filepath.Join(outStr, strconv.FormatInt(int64(rnd), 10)+".catchpoint")
+ return outStr
+}
+
+// saveCatchpointFile stores the provided fileName as the stored catchpoint for the given round.
+// after a successful insert operation to the database, it would delete up to 2 old entries, as needed.
+// deleting 2 entries while inserting single entry allow us to adjust the size of the backing storage and have the
+// database and storage realign.
+func (ct *catchpointTracker) saveCatchpointFile(round basics.Round, fileName string, fileSize int64, catchpoint string) (err error) {
+ if ct.catchpointFileHistoryLength != 0 {
+ err = ct.accountsq.storeCatchpoint(context.Background(), round, fileName, catchpoint, fileSize)
+ if err != nil {
+ ct.log.Warnf("accountUpdates: saveCatchpoint: unable to save catchpoint: %v", err)
+ return
+ }
+ } else {
+ err = os.Remove(fileName)
+ if err != nil {
+ ct.log.Warnf("accountUpdates: saveCatchpoint: unable to remove file (%s): %v", fileName, err)
+ return
+ }
+ }
+ if ct.catchpointFileHistoryLength == -1 {
+ return
+ }
+ var filesToDelete map[basics.Round]string
+ filesToDelete, err = ct.accountsq.getOldestCatchpointFiles(context.Background(), 2, ct.catchpointFileHistoryLength)
+ if err != nil {
+ return fmt.Errorf("unable to delete catchpoint file, getOldestCatchpointFiles failed : %v", err)
+ }
+ for round, fileToDelete := range filesToDelete {
+ absCatchpointFileName := filepath.Join(ct.dbDirectory, fileToDelete)
+ err = os.Remove(absCatchpointFileName)
+ if err == nil || os.IsNotExist(err) {
+ // it's ok if the file doesn't exist. just remove it from the database and we'll be good to go.
+ err = nil
+ } else {
+ // we can't delete the file, abort -
+ return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err)
+ }
+ err = ct.accountsq.storeCatchpoint(context.Background(), round, "", "", 0)
+ if err != nil {
+ return fmt.Errorf("unable to delete old catchpoint entry '%s' : %v", fileToDelete, err)
+ }
+ }
+ return
+}
+
+// GetCatchpointStream returns a ReadCloseSizer to the catchpoint file associated with the provided round
+func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseSizer, error) {
+ dbFileName := ""
+ fileSize := int64(0)
+ start := time.Now()
+ ledgerGetcatchpointCount.Inc(nil)
+ err := ct.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ dbFileName, _, fileSize, err = getCatchpoint(tx, round)
+ return
+ })
+ ledgerGetcatchpointMicros.AddMicrosecondsSince(start, nil)
+ if err != nil && err != sql.ErrNoRows {
+ // we had some sql error.
+ return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to lookup catchpoint %d: %v", round, err)
+ }
+ if dbFileName != "" {
+ catchpointPath := filepath.Join(ct.dbDirectory, dbFileName)
+ file, err := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
+ if err == nil && file != nil {
+ return &readCloseSizer{ReadCloser: file, size: fileSize}, nil
+ }
+ // else, see if this is a file-not-found error
+ if os.IsNotExist(err) {
+ // the database told us that we have this file.. but we couldn't find it.
+ // delete it from the database.
+ err := ct.saveCatchpointFile(round, "", 0, "")
+ if err != nil {
+ ct.log.Warnf("accountUpdates: getCatchpointStream: unable to delete missing catchpoint entry: %v", err)
+ return nil, err
+ }
+
+ return nil, ledgercore.ErrNoEntry{}
+ }
+ // it's some other error.
+ return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to open catchpoint file '%s' %v", catchpointPath, err)
+ }
+
+ // if the database doesn't know about that round, see if we have that file anyway:
+ fileName := filepath.Join("catchpoints", catchpointRoundToPath(round))
+ catchpointPath := filepath.Join(ct.dbDirectory, fileName)
+ file, err := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
+ if err == nil && file != nil {
+ // great, if found that we should have had this in the database.. add this one now :
+ fileInfo, err := file.Stat()
+ if err != nil {
+ // we couldn't get the stat, so just return with the file.
+ return &readCloseSizer{ReadCloser: file, size: -1}, nil
+ }
+
+ err = ct.saveCatchpointFile(round, fileName, fileInfo.Size(), "")
+ if err != nil {
+ ct.log.Warnf("accountUpdates: getCatchpointStream: unable to save missing catchpoint entry: %v", err)
+ }
+ return &readCloseSizer{ReadCloser: file, size: fileInfo.Size()}, nil
+ }
+ return nil, ledgercore.ErrNoEntry{}
+}
+
+// deleteStoredCatchpoints iterates over the storedcatchpoints table and deletes all the files stored on disk.
+// once all the files have been deleted, it would go ahead and remove the entries from the table.
+func deleteStoredCatchpoints(ctx context.Context, dbQueries *accountsDbQueries, dbDirectory string) (err error) {
+ catchpointsFilesChunkSize := 50
+ for {
+ fileNames, err := dbQueries.getOldestCatchpointFiles(ctx, catchpointsFilesChunkSize, 0)
+ if err != nil {
+ return err
+ }
+ if len(fileNames) == 0 {
+ break
+ }
+
+ for round, fileName := range fileNames {
+ absCatchpointFileName := filepath.Join(dbDirectory, fileName)
+ err = os.Remove(absCatchpointFileName)
+ if err == nil || os.IsNotExist(err) {
+ // it's ok if the file doesn't exist. just remove it from the database and we'll be good to go.
+ } else {
+ // we can't delete the file, abort -
+ return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err)
+ }
+ // clear the entry from the database
+ err = dbQueries.storeCatchpoint(ctx, round, "", "", 0)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// accountHashBuilder calculates the hash key used for the trie by combining the account address and the account data
+func accountHashBuilder(addr basics.Address, accountData basics.AccountData, encodedAccountData []byte) []byte {
+ hash := make([]byte, 4+crypto.DigestSize)
+ // write out the lowest 32 bits of the reward base. This should improve the caching of the trie by allowing
+ // recent updated to be in-cache, and "older" nodes will be left alone.
+ for i, rewards := 3, accountData.RewardsBase; i >= 0; i, rewards = i-1, rewards>>8 {
+ // the following takes the rewards & 255 -> hash[i]
+ hash[i] = byte(rewards)
+ }
+ entryHash := crypto.Hash(append(addr[:], encodedAccountData[:]...))
+ copy(hash[4:], entryHash[:])
+ return hash[:]
+}
+
+func (ct *catchpointTracker) catchpointEnabled() bool {
+ return ct.catchpointInterval != 0
+}
+
+// accountsInitializeHashes initializes account hashes.
+// as part of the initialization, it tests if a hash table matches to account base and updates the former.
+func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *sql.Tx, rnd basics.Round) error {
+ hashRound, err := accountsHashRound(tx)
+ if err != nil {
+ return err
+ }
+
+ if hashRound != rnd {
+ // if the hashed round is different then the base round, something was modified, and the accounts aren't in sync
+ // with the hashes.
+ err = resetAccountHashes(tx)
+ if err != nil {
+ return err
+ }
+ // if catchpoint is disabled on this node, we could complete the initialization right here.
+ if !ct.catchpointEnabled() {
+ return nil
+ }
+ }
+
+ // create the merkle trie for the balances
+ committer, err := MakeMerkleCommitter(tx, false)
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to makeMerkleCommitter: %v", err)
+ }
+
+ trie, err := merkletrie.MakeTrie(committer, TrieMemoryConfig)
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to MakeTrie: %v", err)
+ }
+
+ // we might have a database that was previously initialized, and now we're adding the balances trie. In that case, we need to add all the existing balances to this trie.
+ // we can figure this out by examining the hash of the root:
+ rootHash, err := trie.RootHash()
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to retrieve trie root hash: %v", err)
+ }
+
+ if rootHash.IsZero() {
+ ct.log.Infof("accountsInitialize rebuilding merkle trie for round %d", rnd)
+ accountBuilderIt := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize)
+ defer accountBuilderIt.Close(ctx)
+ startTrieBuildTime := time.Now()
+ accountsCount := 0
+ lastRebuildTime := startTrieBuildTime
+ pendingAccounts := 0
+ totalOrderedAccounts := 0
+ for {
+ accts, processedRows, err := accountBuilderIt.Next(ctx)
+ if err == sql.ErrNoRows {
+ // the account builder would return sql.ErrNoRows when no more data is available.
+ break
+ } else if err != nil {
+ return err
+ }
+
+ if len(accts) > 0 {
+ accountsCount += len(accts)
+ pendingAccounts += len(accts)
+ for _, acct := range accts {
+ added, err := trie.Add(acct.digest)
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to add changes to trie: %v", err)
+ }
+ if !added {
+ ct.log.Warnf("accountsInitialize attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(acct.digest), acct.address)
+ }
+ }
+
+ if pendingAccounts >= trieRebuildCommitFrequency {
+ // this trie Evict will commit using the current transaction.
+ // if anything goes wrong, it will still get rolled back.
+ _, err = trie.Evict(true)
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
+ }
+ pendingAccounts = 0
+ }
+
+ if time.Since(lastRebuildTime) > 5*time.Second {
+ // let the user know that the trie is still being rebuilt.
+ ct.log.Infof("accountsInitialize still building the trie, and processed so far %d accounts", accountsCount)
+ lastRebuildTime = time.Now()
+ }
+ } else if processedRows > 0 {
+ totalOrderedAccounts += processedRows
+ // if it's not ordered, we can ignore it for now; we'll just increase the counters and emit logs periodically.
+ if time.Since(lastRebuildTime) > 5*time.Second {
+ // let the user know that the trie is still being rebuilt.
+ ct.log.Infof("accountsInitialize still building the trie, and hashed so far %d accounts", totalOrderedAccounts)
+ lastRebuildTime = time.Now()
+ }
+ }
+ }
+
+ // this trie Evict will commit using the current transaction.
+ // if anything goes wrong, it will still get rolled back.
+ _, err = trie.Evict(true)
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
+ }
+
+ // we've just updated the merkle trie, update the hashRound to reflect that.
+ err = updateAccountsHashRound(tx, rnd)
+ if err != nil {
+ return fmt.Errorf("accountsInitialize was unable to update the account hash round to %d: %v", rnd, err)
+ }
+
+ ct.log.Infof("accountsInitialize rebuilt the merkle trie with %d entries in %v", accountsCount, time.Since(startTrieBuildTime))
+ }
+ ct.balancesTrie = trie
+ return nil
+}
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
new file mode 100644
index 000000000..64db5f275
--- /dev/null
+++ b/ledger/catchpointtracker_test.go
@@ -0,0 +1,415 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestIsWritingCatchpointFile(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ct := &catchpointTracker{}
+
+ ct.catchpointWriting = -1
+ ans := ct.IsWritingCatchpointFile()
+ require.True(t, ans)
+
+ ct.catchpointWriting = 0
+ ans = ct.IsWritingCatchpointFile()
+ require.False(t, ans)
+}
+
+func newCatchpointTracker(tb testing.TB, l *mockLedgerForTracker, conf config.Local, dbPathPrefix string) *catchpointTracker {
+ au := &accountUpdates{}
+ ct := &catchpointTracker{}
+ au.initialize(conf)
+ ct.initialize(conf, dbPathPrefix)
+ _, err := trackerDBInitialize(l, ct.catchpointEnabled(), dbPathPrefix)
+ require.NoError(tb, err)
+
+ err = l.trackers.initialize(l, []ledgerTracker{au, ct}, conf)
+ require.NoError(tb, err)
+ err = l.trackers.loadFromDisk(l)
+ require.NoError(tb, err)
+ return ct
+}
+
+func TestGetCatchpointStream(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+
+ ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.CatchpointInterval = 1
+ ct := newCatchpointTracker(t, ml, conf, ".")
+ defer ct.close()
+
+ filesToCreate := 4
+
+ temporaryDirectroy, err := ioutil.TempDir(os.TempDir(), "catchpoints")
+ require.NoError(t, err)
+ defer func() {
+ os.RemoveAll(temporaryDirectroy)
+ }()
+ catchpointsDirectory := filepath.Join(temporaryDirectroy, "catchpoints")
+ err = os.Mkdir(catchpointsDirectory, 0777)
+ require.NoError(t, err)
+
+ ct.dbDirectory = temporaryDirectroy
+
+ // Create the catchpoint files with dummy data
+ for i := 0; i < filesToCreate; i++ {
+ fileName := filepath.Join("catchpoints", fmt.Sprintf("%d.catchpoint", i))
+ data := []byte{byte(i), byte(i + 1), byte(i + 2)}
+ err = ioutil.WriteFile(filepath.Join(temporaryDirectroy, fileName), data, 0666)
+ require.NoError(t, err)
+
+ // Store the catchpoint into the database
+ err := ct.accountsq.storeCatchpoint(context.Background(), basics.Round(i), fileName, "", int64(len(data)))
+ require.NoError(t, err)
+ }
+
+ dataRead := make([]byte, 3)
+ var n int
+
+ // File on disk, and database has the record
+ reader, err := ct.GetCatchpointStream(basics.Round(1))
+ n, err = reader.Read(dataRead)
+ require.NoError(t, err)
+ require.Equal(t, 3, n)
+ outData := []byte{1, 2, 3}
+ require.Equal(t, outData, dataRead)
+ len, err := reader.Size()
+ require.NoError(t, err)
+ require.Equal(t, int64(3), len)
+
+ // File deleted, but record in the database
+ err = os.Remove(filepath.Join(temporaryDirectroy, "catchpoints", "2.catchpoint"))
+ reader, err = ct.GetCatchpointStream(basics.Round(2))
+ require.Equal(t, ledgercore.ErrNoEntry{}, err)
+ require.Nil(t, reader)
+
+ // File on disk, but database lost the record
+ err = ct.accountsq.storeCatchpoint(context.Background(), basics.Round(3), "", "", 0)
+ reader, err = ct.GetCatchpointStream(basics.Round(3))
+ n, err = reader.Read(dataRead)
+ require.NoError(t, err)
+ require.Equal(t, 3, n)
+ outData = []byte{3, 4, 5}
+ require.Equal(t, outData, dataRead)
+
+ err = deleteStoredCatchpoints(context.Background(), ct.accountsq, ct.dbDirectory)
+ require.NoError(t, err)
+}
+
+// TestAcctUpdatesDeleteStoredCatchpoints - The goal of this test is to verify that the deleteStoredCatchpoints function works correctly.
+// it doing so by filling up the storedcatchpoints with dummy catchpoint file entries, as well as creating these dummy files on disk.
+// ( the term dummy is only because these aren't real catchpoint files, but rather a zero-length file ). Then, the test call the function
+// and ensures that it did not errored, the catchpoint files were correctly deleted, and that deleteStoredCatchpoints contains no more
+// entries.
+func TestAcctUpdatesDeleteStoredCatchpoints(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+
+ ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.CatchpointInterval = 1
+ ct := newCatchpointTracker(t, ml, conf, ".")
+ defer ct.close()
+
+ dummyCatchpointFilesToCreate := 42
+
+ for i := 0; i < dummyCatchpointFilesToCreate; i++ {
+ f, err := os.Create(fmt.Sprintf("./dummy_catchpoint_file-%d", i))
+ require.NoError(t, err)
+ err = f.Close()
+ require.NoError(t, err)
+ }
+
+ for i := 0; i < dummyCatchpointFilesToCreate; i++ {
+ err := ct.accountsq.storeCatchpoint(context.Background(), basics.Round(i), fmt.Sprintf("./dummy_catchpoint_file-%d", i), "", 0)
+ require.NoError(t, err)
+ }
+ err := deleteStoredCatchpoints(context.Background(), ct.accountsq, ct.dbDirectory)
+ require.NoError(t, err)
+
+ for i := 0; i < dummyCatchpointFilesToCreate; i++ {
+ // ensure that all the files were deleted.
+ _, err := os.Open(fmt.Sprintf("./dummy_catchpoint_file-%d", i))
+ require.True(t, os.IsNotExist(err))
+ }
+ fileNames, err := ct.accountsq.getOldestCatchpointFiles(context.Background(), dummyCatchpointFilesToCreate, 0)
+ require.NoError(t, err)
+ require.Equal(t, 0, len(fileNames))
+}
+
+func BenchmarkLargeCatchpointWriting(b *testing.B) {
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(5, true)}
+
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ accts[0][testPoolAddr] = pooldata
+
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ accts[0][testSinkAddr] = sinkdata
+
+ ml := makeMockLedgerForTracker(b, true, 10, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ ct := catchpointTracker{}
+ ct.initialize(cfg, ".")
+
+ temporaryDirectroy, err := ioutil.TempDir(os.TempDir(), "catchpoints")
+ require.NoError(b, err)
+ defer func() {
+ os.RemoveAll(temporaryDirectroy)
+ }()
+ catchpointsDirectory := filepath.Join(temporaryDirectroy, "catchpoints")
+ err = os.Mkdir(catchpointsDirectory, 0777)
+ require.NoError(b, err)
+
+ ct.dbDirectory = temporaryDirectroy
+
+ err = ct.loadFromDisk(ml, 0)
+ require.NoError(b, err)
+ defer ct.close()
+
+ // at this point, the database was created. We want to fill the accounts data
+ accountsNumber := 6000000 * b.N
+ err = ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ for i := 0; i < accountsNumber-5-2; { // subtract the account we've already created above, plus the sink/reward
+ var updates compactAccountDeltas
+ for k := 0; i < accountsNumber-5-2 && k < 1024; k++ {
+ addr := ledgertesting.RandomAddress()
+ acctData := basics.AccountData{}
+ acctData.MicroAlgos.Raw = 1
+ updates.upsert(addr, accountDelta{new: acctData})
+ i++
+ }
+
+ _, err = accountsNewRound(tx, updates, nil, proto, basics.Round(1))
+ if err != nil {
+ return
+ }
+ }
+
+ return updateAccountsHashRound(tx, 1)
+ })
+ require.NoError(b, err)
+
+ b.ResetTimer()
+ ct.generateCatchpoint(context.Background(), basics.Round(0), "0#ABCD", crypto.Digest{}, time.Second)
+ b.StopTimer()
+ b.ReportMetric(float64(accountsNumber), "accounts")
+}
+
+func TestReproducibleCatchpointLabels(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
+ t.Skip("This test is too slow on ARM and causes travis builds to time out")
+ }
+ // create new protocol version, which has lower lookback
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestReproducibleCatchpointLabels")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.MaxBalLookback = 32
+ protoParams.SeedLookback = 2
+ protoParams.SeedRefreshInterval = 8
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+ rewardsLevels := []uint64{0}
+
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = 100 * 1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ accts[0][testPoolAddr] = pooldata
+
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ accts[0][testSinkAddr] = sinkdata
+
+ ml := makeMockLedgerForTracker(t, false, 1, testProtocolVersion, accts)
+ defer ml.Close()
+
+ cfg := config.GetDefaultLocal()
+ cfg.CatchpointInterval = 50
+ cfg.CatchpointTracking = 1
+ ct := newCatchpointTracker(t, ml, cfg, ".")
+ au := ml.trackers.accts
+ defer ct.close()
+
+ rewardLevel := uint64(0)
+
+ const testCatchpointLabelsCount = 5
+
+ // lastCreatableID stores asset or app max used index to get rid of conflicts
+ lastCreatableID := crypto.RandUint64() % 512
+ knownCreatables := make(map[basics.CreatableIndex]bool)
+ catchpointLabels := make(map[basics.Round]string)
+ ledgerHistory := make(map[basics.Round]*mockLedgerForTracker)
+ roundDeltas := make(map[basics.Round]ledgercore.StateDelta)
+ for i := basics.Round(1); i <= basics.Round(testCatchpointLabelsCount*cfg.CatchpointInterval); i++ {
+ rewardLevelDelta := crypto.RandUint64() % 5
+ rewardLevel += rewardLevelDelta
+ var updates ledgercore.AccountDeltas
+ var totals map[basics.Address]basics.AccountData
+ base := accts[i-1]
+ updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
+ prevTotals, err := au.Totals(basics.Round(i - 1))
+ require.NoError(t, err)
+
+ newPool := totals[testPoolAddr]
+ newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta
+ updates.Upsert(testPoolAddr, newPool)
+ totals[testPoolAddr] = newPool
+
+ newTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, rewardLevel, protoParams, base, prevTotals)
+
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ },
+ }
+ blk.RewardsLevel = rewardLevel
+ blk.CurrentProtocol = testProtocolVersion
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
+ delta.Accts.MergeAccounts(updates)
+ delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
+ delta.Totals = newTotals
+
+ ml.trackers.newBlock(blk, delta)
+ ml.trackers.committedUpTo(i)
+ ml.addMockBlock(blockEntry{block: blk}, delta)
+ accts = append(accts, totals)
+ rewardsLevels = append(rewardsLevels, rewardLevel)
+ roundDeltas[i] = delta
+
+ // if this is a catchpoint round, save the label.
+ if uint64(i)%cfg.CatchpointInterval == 0 {
+ ml.trackers.waitAccountsWriting()
+ catchpointLabels[i] = ct.GetLastCatchpointLabel()
+ ledgerHistory[i] = ml.fork(t)
+ defer ledgerHistory[i].Close()
+ }
+ }
+
+ // test in revese what happens when we try to repeat the exact same blocks.
+ // start off with the catchpoint before the last one
+ startingRound := basics.Round((testCatchpointLabelsCount - 1) * cfg.CatchpointInterval)
+ for ; startingRound > basics.Round(cfg.CatchpointInterval); startingRound -= basics.Round(cfg.CatchpointInterval) {
+ au.close()
+ ml2 := ledgerHistory[startingRound]
+
+ ct := newCatchpointTracker(t, ml2, cfg, ".")
+ for i := startingRound + 1; i <= basics.Round(testCatchpointLabelsCount*cfg.CatchpointInterval); i++ {
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(i),
+ },
+ }
+ blk.RewardsLevel = rewardsLevels[i]
+ blk.CurrentProtocol = testProtocolVersion
+ delta := roundDeltas[i]
+ ml2.trackers.newBlock(blk, delta)
+ ml2.trackers.committedUpTo(i)
+
+ // if this is a catchpoint round, check the label.
+ if uint64(i)%cfg.CatchpointInterval == 0 {
+ ml2.trackers.waitAccountsWriting()
+ require.Equal(t, catchpointLabels[i], ct.GetLastCatchpointLabel())
+ }
+ }
+ }
+
+ // test to see that after loadFromDisk, all the tracker content is lost ( as expected )
+ require.NotZero(t, len(ct.roundDigest))
+ require.NoError(t, ct.loadFromDisk(ml, ml.Latest()))
+ require.Zero(t, len(ct.roundDigest))
+ require.Zero(t, ct.catchpointWriting)
+ select {
+ case _, closed := <-ct.catchpointSlowWriting:
+ require.False(t, closed)
+ default:
+ require.FailNow(t, "The catchpointSlowWriting should have been a closed channel; it seems to be a nil ?!")
+ }
+}
+
+func TestCatchpointTrackerPrepareCommit(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ct := &catchpointTracker{}
+ const maxOffset = 40
+ const maxLookback = 320
+ ct.roundDigest = make([]crypto.Digest, maxOffset+maxLookback)
+ for i := 0; i < len(ct.roundDigest); i++ {
+ ct.roundDigest[i] = crypto.Hash([]byte{byte(i), byte(i / 256)})
+ }
+ dcc := &deferredCommitContext{}
+ for offset := uint64(1); offset < maxOffset; offset++ {
+ dcc.offset = offset
+ for lookback := basics.Round(0); lookback < maxLookback; lookback += 20 {
+ dcc.lookback = lookback
+ for _, isCatchpointRound := range []bool{false, true} {
+ dcc.isCatchpointRound = isCatchpointRound
+ require.NoError(t, ct.prepareCommit(dcc))
+ if isCatchpointRound {
+ expectedRound := offset + uint64(lookback) - 1
+ expectedHash := crypto.Hash([]byte{byte(expectedRound), byte(expectedRound / 256)})
+ require.Equal(t, expectedHash[:], dcc.committedRoundDigest[:])
+ }
+ }
+ }
+ }
+}
diff --git a/ledger/catchpointwriter.go b/ledger/catchpointwriter.go
index 26fa65f71..349748176 100644
--- a/ledger/catchpointwriter.go
+++ b/ledger/catchpointwriter.go
@@ -294,7 +294,7 @@ func (cw *catchpointWriter) readDatabaseStep(ctx context.Context, tx *sql.Tx) (e
func (cw *catchpointWriter) readHeaderFromDatabase(ctx context.Context, tx *sql.Tx) (err error) {
var header CatchpointFileHeader
- header.BalancesRound, _, err = accountsRound(tx)
+ header.BalancesRound, err = accountsRound(tx)
if err != nil {
return
}
diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go
index b3882f1f7..0b14f5524 100644
--- a/ledger/catchpointwriter_test.go
+++ b/ledger/catchpointwriter_test.go
@@ -34,6 +34,8 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -182,18 +184,16 @@ func TestBasicCatchpointWriter(t *testing.T) {
delete(config.Consensus, testProtocolVersion)
os.RemoveAll(temporaryDirectroy)
}()
- accts := randomAccounts(300, false)
+ accts := ledgertesting.RandomAccounts(300, false)
ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
defer ml.Close()
- au := &accountUpdates{}
conf := config.GetDefaultLocal()
conf.CatchpointInterval = 1
conf.Archival = true
- au.initialize(conf, ".", protoParams, accts)
- defer au.close()
- err := au.loadFromDisk(ml)
+ au := newAcctUpdates(t, ml, conf, ".")
+ err := au.loadFromDisk(ml, 0)
require.NoError(t, err)
au.close()
fileName := filepath.Join(temporaryDirectroy, "15.catchpoint")
@@ -283,17 +283,15 @@ func TestFullCatchpointWriter(t *testing.T) {
os.RemoveAll(temporaryDirectroy)
}()
- accts := randomAccounts(BalancesPerCatchpointFileChunk*3, false)
+ accts := ledgertesting.RandomAccounts(BalancesPerCatchpointFileChunk*3, false)
ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
defer ml.Close()
- au := &accountUpdates{}
conf := config.GetDefaultLocal()
conf.CatchpointInterval = 1
conf.Archival = true
- au.initialize(conf, ".", protoParams, accts)
- defer au.close()
- err := au.loadFromDisk(ml)
+ au := newAcctUpdates(t, ml, conf, ".")
+ err := au.loadFromDisk(ml, 0)
require.NoError(t, err)
au.close()
fileName := filepath.Join(temporaryDirectroy, "15.catchpoint")
@@ -315,7 +313,7 @@ func TestFullCatchpointWriter(t *testing.T) {
require.NoError(t, err)
// create a ledger.
- var initState InitState
+ var initState ledgercore.InitState
initState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
l, err := OpenLedger(ml.log, "TestFullCatchpointWriter", true, initState, conf)
require.NoError(t, err)
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index 38b396b19..784a258c7 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -121,7 +121,7 @@ const (
func MakeCatchpointCatchupAccessor(ledger *Ledger, log logging.Logger) CatchpointCatchupAccessor {
rdb := ledger.trackerDB().Rdb
wdb := ledger.trackerDB().Wdb
- accountsq, err := accountsDbInit(rdb.Handle, wdb.Handle)
+ accountsq, err := accountsInitDbQueries(rdb.Handle, wdb.Handle)
if err != nil {
log.Warnf("unable to initialize account db in MakeCatchpointCatchupAccessor : %v", err)
return nil
@@ -193,7 +193,7 @@ func (c *CatchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context
return fmt.Errorf("unable to reset catchpoint catchup balances : %v", err)
}
if !newCatchup {
- sq, err := accountsDbInit(tx, tx)
+ sq, err := accountsInitDbQueries(tx, tx)
if err != nil {
return fmt.Errorf("unable to initialize accountsDbInit: %v", err)
}
@@ -271,7 +271,7 @@ func (c *CatchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex
start := time.Now()
ledgerProcessstagingcontentCount.Inc(nil)
err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- sq, err := accountsDbInit(tx, tx)
+ sq, err := accountsInitDbQueries(tx, tx)
if err != nil {
return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to initialize accountsDbInit: %v", err)
}
@@ -665,7 +665,7 @@ func (c *CatchpointCatchupAccessorImpl) StoreBalancesRound(ctx context.Context,
start := time.Now()
ledgerStorebalancesroundCount.Inc(nil)
err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- sq, err := accountsDbInit(tx, tx)
+ sq, err := accountsInitDbQueries(tx, tx)
if err != nil {
return fmt.Errorf("CatchpointCatchupAccessorImpl::StoreBalancesRound: unable to initialize accountsDbInit: %v", err)
}
@@ -769,7 +769,7 @@ func (c *CatchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err
var balancesRound uint64
var totals ledgercore.AccountTotals
- sq, err := accountsDbInit(tx, tx)
+ sq, err := accountsInitDbQueries(tx, tx)
if err != nil {
return fmt.Errorf("unable to initialize accountsDbInit: %v", err)
}
diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go
index 576280d8e..e3a073cdf 100644
--- a/ledger/catchupaccessor_test.go
+++ b/ledger/catchupaccessor_test.go
@@ -32,6 +32,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -69,7 +70,7 @@ func createTestingEncodedChunks(accountsCount uint64) (encodedAccountChunks [][]
}
func benchmarkRestoringFromCatchpointFileHelper(b *testing.B) {
- genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ := ledgertesting.GenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
const inMem = false
log := logging.TestingLog(b)
cfg := config.GetDefaultLocal()
@@ -145,7 +146,7 @@ func TestCatchupAcessorFoo(t *testing.T) {
log := logging.TestingLog(t)
dbBaseFileName := t.Name()
const inMem = true
- genesisInitState, _ /* initKeys */ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ /* initKeys */ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
cfg := config.GetDefaultLocal()
l, err := OpenLedger(log, dbBaseFileName, inMem, genesisInitState, cfg)
require.NoError(t, err, "could not open ledger")
@@ -201,7 +202,7 @@ func TestBuildMerkleTrie(t *testing.T) {
log := logging.TestingLog(t)
dbBaseFileName := t.Name()
const inMem = true
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
cfg := config.GetDefaultLocal()
l, err := OpenLedger(log, dbBaseFileName, inMem, genesisInitState, cfg)
require.NoError(t, err, "could not open ledger")
@@ -300,7 +301,7 @@ func TestCatchupAccessorBlockdb(t *testing.T) {
log := logging.TestingLog(t)
dbBaseFileName := t.Name()
const inMem = true
- genesisInitState, _ /*initKeys*/ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ /*initKeys*/ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
cfg := config.GetDefaultLocal()
l, err := OpenLedger(log, dbBaseFileName, inMem, genesisInitState, cfg)
require.NoError(t, err, "could not open ledger")
@@ -326,7 +327,7 @@ func TestVerifyCatchpoint(t *testing.T) {
log := logging.TestingLog(t)
dbBaseFileName := t.Name()
const inMem = true
- genesisInitState, _ /*initKeys*/ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ /*initKeys*/ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
cfg := config.GetDefaultLocal()
l, err := OpenLedger(log, dbBaseFileName, inMem, genesisInitState, cfg)
require.NoError(t, err, "could not open ledger")
diff --git a/ledger/eval_test.go b/ledger/eval_test.go
deleted file mode 100644
index 07ba2d099..000000000
--- a/ledger/eval_test.go
+++ /dev/null
@@ -1,1934 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package ledger
-
-import (
- "context"
- "errors"
- "fmt"
- "math/rand"
- "os"
- "path/filepath"
- "reflect"
- "runtime/pprof"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/algorand/go-deadlock"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/agreement"
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/compactcert"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/data/transactions/logic"
- "github.com/algorand/go-algorand/data/txntest"
- "github.com/algorand/go-algorand/ledger/ledgercore"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/algorand/go-algorand/util/execpool"
-)
-
-var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
-var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
-var minFee basics.MicroAlgos
-
-func init() {
- params := config.Consensus[protocol.ConsensusCurrentVersion]
- minFee = basics.MicroAlgos{Raw: params.MinTxnFee}
-}
-
-func TestBlockEvaluator(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genesisInitState, addrs, keys := genesis(10)
-
- dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- const inMem = true
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg)
- require.NoError(t, err)
- defer l.Close()
-
- newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
- require.Equal(t, eval.specials.FeeSink, testSinkAddr)
- require.NoError(t, err)
-
- genHash := genesisInitState.Block.BlockHeader.GenesisHash
- txn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: addrs[0],
- Fee: minFee,
- FirstValid: newBlock.Round(),
- LastValid: newBlock.Round(),
- GenesisHash: genHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: addrs[1],
- Amount: basics.MicroAlgos{Raw: 100},
- },
- }
-
- // Correct signature should work
- st := txn.Sign(keys[0])
- err = eval.Transaction(st, transactions.ApplyData{})
- require.NoError(t, err)
-
- // Broken signature should fail
- stbad := st
- st.Sig[2] ^= 8
- txgroup := []transactions.SignedTxn{stbad}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
-
- // Repeat should fail
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- err = eval.Transaction(st, transactions.ApplyData{})
- require.Error(t, err)
-
- // out of range should fail
- btxn := txn
- btxn.FirstValid++
- btxn.LastValid += 2
- st = btxn.Sign(keys[0])
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- err = eval.Transaction(st, transactions.ApplyData{})
- require.Error(t, err)
-
- // bogus group should fail
- btxn = txn
- btxn.Group[1] = 1
- st = btxn.Sign(keys[0])
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- err = eval.Transaction(st, transactions.ApplyData{})
- require.Error(t, err)
-
- // mixed fields should fail
- btxn = txn
- btxn.XferAsset = 3
- st = btxn.Sign(keys[0])
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- // We don't test eval.Transaction() here because it doesn't check txn.WellFormed(), instead relying on that to have already been checked by the transaction pool.
- // err = eval.Transaction(st, transactions.ApplyData{})
- // require.Error(t, err)
-
- selfTxn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: addrs[2],
- Fee: minFee,
- FirstValid: newBlock.Round(),
- LastValid: newBlock.Round(),
- GenesisHash: genHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: addrs[2],
- Amount: basics.MicroAlgos{Raw: 100},
- },
- }
- stxn := selfTxn.Sign(keys[2])
-
- // TestTransactionGroup() and Transaction() should have the same outcome, but work slightly different code paths.
- txgroup = []transactions.SignedTxn{stxn}
- err = eval.TestTransactionGroup(txgroup)
- require.NoError(t, err)
-
- err = eval.Transaction(stxn, transactions.ApplyData{})
- require.NoError(t, err)
-
- t3 := txn
- t3.Amount.Raw++
- t4 := selfTxn
- t4.Amount.Raw++
-
- // a group without .Group should fail
- s3 := t3.Sign(keys[0])
- s4 := t4.Sign(keys[2])
- txgroup = []transactions.SignedTxn{s3, s4}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- txgroupad := transactions.WrapSignedTxnsWithAD(txgroup)
- err = eval.TransactionGroup(txgroupad)
- require.Error(t, err)
-
- // Test a group that should work
- var group transactions.TxGroup
- group.TxGroupHashes = []crypto.Digest{crypto.HashObj(t3), crypto.HashObj(t4)}
- t3.Group = crypto.HashObj(group)
- t4.Group = t3.Group
- s3 = t3.Sign(keys[0])
- s4 = t4.Sign(keys[2])
- txgroup = []transactions.SignedTxn{s3, s4}
- err = eval.TestTransactionGroup(txgroup)
- require.NoError(t, err)
-
- // disagreement on Group id should fail
- t4bad := t4
- t4bad.Group[3] ^= 3
- s4bad := t4bad.Sign(keys[2])
- txgroup = []transactions.SignedTxn{s3, s4bad}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- txgroupad = transactions.WrapSignedTxnsWithAD(txgroup)
- err = eval.TransactionGroup(txgroupad)
- require.Error(t, err)
-
- // missing part of the group should fail
- txgroup = []transactions.SignedTxn{s3}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
-
- validatedBlock, err := eval.GenerateBlock()
- require.NoError(t, err)
-
- accts := genesisInitState.Accounts
- bal0 := accts[addrs[0]]
- bal1 := accts[addrs[1]]
- bal2 := accts[addrs[2]]
-
- l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
-
- bal0new, err := l.Lookup(newBlock.Round(), addrs[0])
- require.NoError(t, err)
- bal1new, err := l.Lookup(newBlock.Round(), addrs[1])
- require.NoError(t, err)
- bal2new, err := l.Lookup(newBlock.Round(), addrs[2])
- require.NoError(t, err)
-
- require.Equal(t, bal0new.MicroAlgos.Raw, bal0.MicroAlgos.Raw-minFee.Raw-100)
- require.Equal(t, bal1new.MicroAlgos.Raw, bal1.MicroAlgos.Raw+100)
- require.Equal(t, bal2new.MicroAlgos.Raw, bal2.MicroAlgos.Raw-minFee.Raw)
-}
-
-func TestRekeying(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- // Pretend rekeying is supported
- actual := config.Consensus[protocol.ConsensusCurrentVersion]
- pretend := actual
- pretend.SupportRekeying = true
- config.Consensus[protocol.ConsensusCurrentVersion] = pretend
- defer func() {
- config.Consensus[protocol.ConsensusCurrentVersion] = actual
- }()
-
- // Bring up a ledger
- genesisInitState, addrs, keys := genesis(10)
- dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- const inMem = true
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg)
- require.NoError(t, err)
- defer l.Close()
-
- // Make a new block
- nextRound := l.Latest() + basics.Round(1)
- genHash := genesisInitState.Block.BlockHeader.GenesisHash
-
- // Test plan
- // Syntax: [A -> B][C, D] means transaction from A that rekeys to B with authaddr C and actual sig from D
- makeTxn := func(sender, rekeyto, authaddr basics.Address, signer *crypto.SignatureSecrets, uniq uint8) transactions.SignedTxn {
- txn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: sender,
- Fee: minFee,
- FirstValid: nextRound,
- LastValid: nextRound,
- GenesisHash: genHash,
- RekeyTo: rekeyto,
- Note: []byte{uniq},
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: sender,
- },
- }
- sig := signer.Sign(txn)
- return transactions.SignedTxn{Txn: txn, Sig: sig, AuthAddr: authaddr}
- }
-
- tryBlock := func(stxns []transactions.SignedTxn) error {
- // We'll make a block using the evaluator.
- // When generating a block, the evaluator doesn't check transaction sigs -- it assumes the transaction pool already did that.
- // So the ValidatedBlock that comes out isn't necessarily actually a valid block. We'll call Validate ourselves.
-
- newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
- require.NoError(t, err)
-
- for _, stxn := range stxns {
- err = eval.Transaction(stxn, transactions.ApplyData{})
- if err != nil {
- return err
- }
- }
- validatedBlock, err := eval.GenerateBlock()
- if err != nil {
- return err
- }
-
- backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
- defer backlogPool.Shutdown()
- _, err = l.Validate(context.Background(), validatedBlock.Block(), backlogPool)
- return err
- }
-
- // Preamble transactions, which all of the blocks in this test will start with
- // [A -> 0][0,A] (normal transaction)
- // [A -> B][0,A] (rekey)
- txn0 := makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 0) // Normal transaction
- txn1 := makeTxn(addrs[0], addrs[1], basics.Address{}, keys[0], 1) // Rekey transaction
-
- // Test 1: Do only good things
- // (preamble)
- // [A -> 0][B,B] (normal transaction using new key)
- // [A -> A][B,B] (rekey back to A, transaction still signed by B)
- // [A -> 0][0,A] (normal transaction again)
- test1txns := []transactions.SignedTxn{
- txn0, txn1, // (preamble)
- makeTxn(addrs[0], basics.Address{}, addrs[1], keys[1], 2), // [A -> 0][B,B]
- makeTxn(addrs[0], addrs[0], addrs[1], keys[1], 3), // [A -> A][B,B]
- makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 4), // [A -> 0][0,A]
- }
- err = tryBlock(test1txns)
- require.NoError(t, err)
-
- // Test 2: Use old key after rekeying
- // (preamble)
- // [A -> A][0,A] (rekey back to A, but signed by A instead of B)
- test2txns := []transactions.SignedTxn{
- txn0, txn1, // (preamble)
- makeTxn(addrs[0], addrs[0], basics.Address{}, keys[0], 2), // [A -> A][0,A]
- }
- err = tryBlock(test2txns)
- require.Error(t, err)
-
- // TODO: More tests
-}
-
-func TestPrepareEvalParams(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- eval := BlockEvaluator{
- prevHeader: bookkeeping.BlockHeader{
- TimeStamp: 1234,
- Round: 2345,
- },
- }
-
- params := []config.ConsensusParams{
- {Application: true, MaxAppProgramCost: 700},
- config.Consensus[protocol.ConsensusV29],
- config.Consensus[protocol.ConsensusFuture],
- }
-
- // Create some sample transactions
- payment := txntest.Txn{
- Type: protocol.PaymentTx,
- Sender: basics.Address{1, 2, 3, 4},
- Receiver: basics.Address{4, 3, 2, 1},
- Amount: 100,
- }.SignedTxnWithAD()
-
- appcall1 := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: basics.Address{1, 2, 3, 4},
- ApplicationID: basics.AppIndex(1),
- }.SignedTxnWithAD()
-
- appcall2 := appcall1
- appcall2.SignedTxn.Txn.ApplicationCallTxnFields.ApplicationID = basics.AppIndex(2)
-
- type evalTestCase struct {
- group []transactions.SignedTxnWithAD
-
- // indicates if prepareAppEvaluators should return a non-nil
- // appTealEvaluator for the txn at index i
- expected []bool
-
- numAppCalls int
- // Used for checking transitive pointer equality in app calls
- // If there are no app calls in the group, it is set to -1
- firstAppCallIndex int
- }
-
- // Create some groups with these transactions
- cases := []evalTestCase{
- {[]transactions.SignedTxnWithAD{payment}, []bool{false}, 0, -1},
- {[]transactions.SignedTxnWithAD{appcall1}, []bool{true}, 1, 0},
- {[]transactions.SignedTxnWithAD{payment, payment}, []bool{false, false}, 0, -1},
- {[]transactions.SignedTxnWithAD{appcall1, payment}, []bool{true, false}, 1, 0},
- {[]transactions.SignedTxnWithAD{payment, appcall1}, []bool{false, true}, 1, 1},
- {[]transactions.SignedTxnWithAD{appcall1, appcall2}, []bool{true, true}, 2, 0},
- {[]transactions.SignedTxnWithAD{appcall1, appcall2, appcall1}, []bool{true, true, true}, 3, 0},
- {[]transactions.SignedTxnWithAD{payment, appcall1, payment}, []bool{false, true, false}, 1, 1},
- {[]transactions.SignedTxnWithAD{appcall1, payment, appcall2}, []bool{true, false, true}, 2, 0},
- }
-
- for i, param := range params {
- for j, testCase := range cases {
- t.Run(fmt.Sprintf("i=%d,j=%d", i, j), func(t *testing.T) {
- eval.proto = param
- res := eval.prepareEvalParams(testCase.group)
- require.Equal(t, len(res), len(testCase.group))
-
- // Compute the expected transaction group without ApplyData for
- // the test case
- expGroupNoAD := make([]transactions.SignedTxn, len(testCase.group))
- for k := range testCase.group {
- expGroupNoAD[k] = testCase.group[k].SignedTxn
- }
-
- // Ensure non app calls have a nil evaluator, and that non-nil
- // evaluators point to the right transactions and values
- for k, present := range testCase.expected {
- if present {
- require.NotNil(t, res[k])
- require.NotNil(t, res[k].PastSideEffects)
- require.Equal(t, res[k].GroupIndex, uint64(k))
- require.Equal(t, res[k].TxnGroup, expGroupNoAD)
- require.Equal(t, *res[k].Proto, eval.proto)
- require.Equal(t, *res[k].Txn, testCase.group[k].SignedTxn)
- require.Equal(t, res[k].MinTealVersion, res[testCase.firstAppCallIndex].MinTealVersion)
- require.Equal(t, res[k].PooledApplicationBudget, res[testCase.firstAppCallIndex].PooledApplicationBudget)
- if reflect.DeepEqual(param, config.Consensus[protocol.ConsensusV29]) {
- require.Equal(t, *res[k].PooledApplicationBudget, uint64(eval.proto.MaxAppProgramCost))
- } else if reflect.DeepEqual(param, config.Consensus[protocol.ConsensusFuture]) {
- require.Equal(t, *res[k].PooledApplicationBudget, uint64(eval.proto.MaxAppProgramCost*testCase.numAppCalls))
- }
- } else {
- require.Nil(t, res[k])
- }
- }
- })
- }
- }
-}
-
-func testLedgerCleanup(l *Ledger, dbName string, inMem bool) {
- l.Close()
- if !inMem {
- hits, err := filepath.Glob(dbName + "*.sqlite")
- if err != nil {
- return
- }
- for _, fname := range hits {
- os.Remove(fname)
- }
- }
-}
-
-func testEvalAppGroup(t *testing.T, schema basics.StateSchema) (*BlockEvaluator, basics.Address, error) {
- genesisInitState, addrs, keys := genesis(10)
-
- dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- const inMem = true
- cfg := config.GetDefaultLocal()
- l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg)
- require.NoError(t, err)
- defer l.Close()
-
- newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
- require.NoError(t, err)
- eval.validate = true
- eval.generate = false
-
- ops, err := logic.AssembleString(`#pragma version 2
- txn ApplicationID
- bz create
- byte "caller"
- txn Sender
- app_global_put
- b ok
-create:
- byte "creator"
- txn Sender
- app_global_put
-ok:
- int 1`)
- require.NoError(t, err, ops.Errors)
- approval := ops.Program
- ops, err = logic.AssembleString("#pragma version 2\nint 1")
- require.NoError(t, err)
- clear := ops.Program
-
- genHash := genesisInitState.Block.BlockHeader.GenesisHash
- header := transactions.Header{
- Sender: addrs[0],
- Fee: minFee,
- FirstValid: newBlock.Round(),
- LastValid: newBlock.Round(),
- GenesisHash: genHash,
- }
- appcall1 := transactions.Transaction{
- Type: protocol.ApplicationCallTx,
- Header: header,
- ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
- GlobalStateSchema: schema,
- ApprovalProgram: approval,
- ClearStateProgram: clear,
- },
- }
-
- appcall2 := transactions.Transaction{
- Type: protocol.ApplicationCallTx,
- Header: header,
- ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
- ApplicationID: 1,
- },
- }
-
- var group transactions.TxGroup
- group.TxGroupHashes = []crypto.Digest{crypto.HashObj(appcall1), crypto.HashObj(appcall2)}
- appcall1.Group = crypto.HashObj(group)
- appcall2.Group = crypto.HashObj(group)
- stxn1 := appcall1.Sign(keys[0])
- stxn2 := appcall2.Sign(keys[0])
-
- g := []transactions.SignedTxnWithAD{
- {
- SignedTxn: stxn1,
- ApplyData: transactions.ApplyData{
- EvalDelta: transactions.EvalDelta{GlobalDelta: map[string]basics.ValueDelta{
- "creator": {Action: basics.SetBytesAction, Bytes: string(addrs[0][:])}},
- },
- ApplicationID: 1,
- },
- },
- {
- SignedTxn: stxn2,
- ApplyData: transactions.ApplyData{
- EvalDelta: transactions.EvalDelta{GlobalDelta: map[string]basics.ValueDelta{
- "caller": {Action: basics.SetBytesAction, Bytes: string(addrs[0][:])}},
- }},
- },
- }
- txgroup := []transactions.SignedTxn{stxn1, stxn2}
- err = eval.TestTransactionGroup(txgroup)
- if err != nil {
- return eval, addrs[0], err
- }
- err = eval.transactionGroup(g)
- return eval, addrs[0], err
-}
-
-// TestEvalAppStateCountsWithTxnGroup ensures txns in a group can't violate app state schema limits
-// the test ensures that
-// commitToParent -> applyChild copies child's cow state usage counts into parent
-// and the usage counts correctly propagated from parent cow to child cow and back
-func TestEvalAppStateCountsWithTxnGroup(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- _, _, err := testEvalAppGroup(t, basics.StateSchema{NumByteSlice: 1})
- require.Error(t, err)
- require.Contains(t, err.Error(), "store bytes count 2 exceeds schema bytes count 1")
-}
-
-// TestEvalAppAllocStateWithTxnGroup ensures roundCowState.deltas and applyStorageDelta
-// produce correct results when a txn group has storage allocate and storage update actions
-func TestEvalAppAllocStateWithTxnGroup(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- eval, addr, err := testEvalAppGroup(t, basics.StateSchema{NumByteSlice: 2})
- require.NoError(t, err)
- deltas := eval.state.deltas()
- ad, _ := deltas.Accts.Get(addr)
- state := ad.AppParams[1].GlobalState
- require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addr[:])}, state["caller"])
- require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addr[:])}, state["creator"])
-}
-
-func testEvalAppPoolingGroup(t *testing.T, schema basics.StateSchema, approvalProgram string, consensusVersion protocol.ConsensusVersion) error {
- genBalances, addrs, _ := newTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- eval := l.nextBlock(t)
- eval.validate = true
- eval.generate = false
-
- eval.proto = config.Consensus[consensusVersion]
-
- appcall1 := txntest.Txn{
- Sender: addrs[0],
- Type: protocol.ApplicationCallTx,
- GlobalStateSchema: schema,
- ApprovalProgram: approvalProgram,
- }
-
- appcall2 := txntest.Txn{
- Sender: addrs[0],
- Type: protocol.ApplicationCallTx,
- ApplicationID: basics.AppIndex(1),
- }
-
- appcall3 := txntest.Txn{
- Sender: addrs[1],
- Type: protocol.ApplicationCallTx,
- ApplicationID: basics.AppIndex(1),
- }
-
- return eval.txgroup(t, &appcall1, &appcall2, &appcall3)
-}
-
-// TestEvalAppPooledBudgetWithTxnGroup ensures 3 app call txns can successfully pool
-// budgets in a group txn and return an error if the budget is exceeded
-func TestEvalAppPooledBudgetWithTxnGroup(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- source := func(n int, m int) string {
- return "#pragma version 4\nbyte 0x1337BEEF\n" + strings.Repeat("keccak256\n", n) +
- strings.Repeat("substring 0 4\n", m) + "pop\nint 1\n"
- }
-
- params := []protocol.ConsensusVersion{
- protocol.ConsensusV29,
- protocol.ConsensusFuture,
- }
-
- cases := []struct {
- prog string
- isSuccessV29 bool
- isSuccessVFuture bool
- expectedErrorV29 string
- expectedErrorVFuture string
- }{
- {source(5, 47), true, true,
- "",
- ""},
- {source(5, 48), false, true,
- "pc=157 dynamic cost budget exceeded, executing pushint: remaining budget is 700 but program cost was 701",
- ""},
- {source(16, 17), false, true,
- "pc= 12 dynamic cost budget exceeded, executing keccak256: remaining budget is 700 but program cost was 781",
- ""},
- {source(16, 18), false, false,
- "pc= 12 dynamic cost budget exceeded, executing keccak256: remaining budget is 700 but program cost was 781",
- "pc= 78 dynamic cost budget exceeded, executing pushint: remaining budget is 2100 but program cost was 2101"},
- }
-
- for i, param := range params {
- for j, testCase := range cases {
- t.Run(fmt.Sprintf("i=%d,j=%d", i, j), func(t *testing.T) {
- err := testEvalAppPoolingGroup(t, basics.StateSchema{NumByteSlice: 3}, testCase.prog, param)
- if !testCase.isSuccessV29 && reflect.DeepEqual(param, protocol.ConsensusV29) {
- require.Error(t, err)
- require.Contains(t, err.Error(), testCase.expectedErrorV29)
- } else if !testCase.isSuccessVFuture && reflect.DeepEqual(param, protocol.ConsensusFuture) {
- require.Error(t, err)
- require.Contains(t, err.Error(), testCase.expectedErrorVFuture)
- }
- })
- }
- }
-}
-
-// BenchTxnGenerator generates transactions as long as asked for
-type BenchTxnGenerator interface {
- // Prepare should be used for making pre-benchmark ledger initialization
- // like accounts funding, assets or apps creation
- Prepare(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) ([]transactions.SignedTxn, int)
- // Txn generates a single transaction
- Txn(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) transactions.SignedTxn
-}
-
-// BenchPaymentTxnGenerator generates payment transactions
-type BenchPaymentTxnGenerator struct {
- counter int
-}
-
-func (g *BenchPaymentTxnGenerator) Prepare(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) ([]transactions.SignedTxn, int) {
- return nil, 0
-}
-
-func (g *BenchPaymentTxnGenerator) Txn(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) transactions.SignedTxn {
- sender := g.counter % len(addrs)
- receiver := (g.counter + 1) % len(addrs)
- // The following would create more random selection of accounts, and prevent a cache of half of the accounts..
- // iDigest := crypto.Hash([]byte{byte(i), byte(i >> 8), byte(i >> 16), byte(i >> 24)})
- // sender := (uint64(iDigest[0]) + uint64(iDigest[1])*256 + uint64(iDigest[2])*256*256) % uint64(len(addrs))
- // receiver := (uint64(iDigest[4]) + uint64(iDigest[5])*256 + uint64(iDigest[6])*256*256) % uint64(len(addrs))
-
- txn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: addrs[sender],
- Fee: minFee,
- FirstValid: rnd,
- LastValid: rnd,
- GenesisHash: gh,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: addrs[receiver],
- Amount: basics.MicroAlgos{Raw: 100},
- },
- }
- stxn := txn.Sign(keys[sender])
- g.counter++
- return stxn
-}
-
-// BenchAppTxnGenerator generates app opt in transactions
-type BenchAppOptInsTxnGenerator struct {
- NumApps int
- Proto protocol.ConsensusVersion
- Program []byte
- OptedInAccts []basics.Address
- OptedInAcctsIndices []int
-}
-
-func (g *BenchAppOptInsTxnGenerator) Prepare(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) ([]transactions.SignedTxn, int) {
- maxLocalSchemaEntries := config.Consensus[g.Proto].MaxLocalSchemaEntries
- maxAppsOptedIn := config.Consensus[g.Proto].MaxAppsOptedIn
-
- // this function might create too much transaction even to fit into a single block
- // estimate number of smaller blocks needed in order to set LastValid properly
- const numAccts = 10000
- const maxTxnPerBlock = 10000
- expectedTxnNum := g.NumApps + numAccts*maxAppsOptedIn
- expectedNumOfBlocks := expectedTxnNum/maxTxnPerBlock + 1
-
- createTxns := make([]transactions.SignedTxn, 0, g.NumApps)
- for i := 0; i < g.NumApps; i++ {
- creatorIdx := rand.Intn(len(addrs))
- creator := addrs[creatorIdx]
- txn := transactions.Transaction{
- Type: protocol.ApplicationCallTx,
- Header: transactions.Header{
- Sender: creator,
- Fee: minFee,
- FirstValid: rnd,
- LastValid: rnd + basics.Round(expectedNumOfBlocks),
- GenesisHash: gh,
- Note: randomNote(),
- },
- ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
- ApprovalProgram: g.Program,
- ClearStateProgram: []byte{0x02, 0x20, 0x01, 0x01, 0x22},
- LocalStateSchema: basics.StateSchema{NumByteSlice: maxLocalSchemaEntries},
- },
- }
- stxn := txn.Sign(keys[creatorIdx])
- createTxns = append(createTxns, stxn)
- }
-
- appsOptedIn := make(map[basics.Address]map[basics.AppIndex]struct{}, numAccts)
-
- optInTxns := make([]transactions.SignedTxn, 0, numAccts*maxAppsOptedIn)
-
- for i := 0; i < numAccts; i++ {
- var senderIdx int
- var sender basics.Address
- for {
- senderIdx = rand.Intn(len(addrs))
- sender = addrs[senderIdx]
- if len(appsOptedIn[sender]) < maxAppsOptedIn {
- appsOptedIn[sender] = make(map[basics.AppIndex]struct{}, maxAppsOptedIn)
- break
- }
- }
- g.OptedInAccts = append(g.OptedInAccts, sender)
- g.OptedInAcctsIndices = append(g.OptedInAcctsIndices, senderIdx)
-
- acctOptIns := appsOptedIn[sender]
- for j := 0; j < maxAppsOptedIn; j++ {
- var appIdx basics.AppIndex
- for {
- appIdx = basics.AppIndex(rand.Intn(g.NumApps) + 1)
- if _, ok := acctOptIns[appIdx]; !ok {
- acctOptIns[appIdx] = struct{}{}
- break
- }
- }
-
- txn := transactions.Transaction{
- Type: protocol.ApplicationCallTx,
- Header: transactions.Header{
- Sender: sender,
- Fee: minFee,
- FirstValid: rnd,
- LastValid: rnd + basics.Round(expectedNumOfBlocks),
- GenesisHash: gh,
- },
- ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
- ApplicationID: basics.AppIndex(appIdx),
- OnCompletion: transactions.OptInOC,
- },
- }
- stxn := txn.Sign(keys[senderIdx])
- optInTxns = append(optInTxns, stxn)
- }
- appsOptedIn[sender] = acctOptIns
- }
-
- return append(createTxns, optInTxns...), maxTxnPerBlock
-}
-
-func (g *BenchAppOptInsTxnGenerator) Txn(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) transactions.SignedTxn {
- idx := rand.Intn(len(g.OptedInAcctsIndices))
- senderIdx := g.OptedInAcctsIndices[idx]
- sender := addrs[senderIdx]
- receiverIdx := rand.Intn(len(addrs))
-
- txn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: sender,
- Fee: minFee,
- FirstValid: rnd,
- LastValid: rnd,
- GenesisHash: gh,
- Note: randomNote(),
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: addrs[receiverIdx],
- Amount: basics.MicroAlgos{Raw: 100},
- },
- }
- stxn := txn.Sign(keys[senderIdx])
- return stxn
-}
-
-func BenchmarkBlockEvaluatorRAMCrypto(b *testing.B) {
- g := BenchPaymentTxnGenerator{}
- benchmarkBlockEvaluator(b, true, true, protocol.ConsensusCurrentVersion, &g)
-}
-func BenchmarkBlockEvaluatorRAMNoCrypto(b *testing.B) {
- g := BenchPaymentTxnGenerator{}
- benchmarkBlockEvaluator(b, true, false, protocol.ConsensusCurrentVersion, &g)
-}
-func BenchmarkBlockEvaluatorDiskCrypto(b *testing.B) {
- g := BenchPaymentTxnGenerator{}
- benchmarkBlockEvaluator(b, false, true, protocol.ConsensusCurrentVersion, &g)
-}
-func BenchmarkBlockEvaluatorDiskNoCrypto(b *testing.B) {
- g := BenchPaymentTxnGenerator{}
- benchmarkBlockEvaluator(b, false, false, protocol.ConsensusCurrentVersion, &g)
-}
-
-func BenchmarkBlockEvaluatorDiskAppOptIns(b *testing.B) {
- g := BenchAppOptInsTxnGenerator{
- NumApps: 500,
- Proto: protocol.ConsensusFuture,
- Program: []byte{0x02, 0x20, 0x01, 0x01, 0x22},
- }
- benchmarkBlockEvaluator(b, false, false, protocol.ConsensusFuture, &g)
-}
-
-func BenchmarkBlockEvaluatorDiskFullAppOptIns(b *testing.B) {
- // program sets all 16 available keys of len 64 bytes to same values of 64 bytes
- source := `#pragma version 5
- txn OnCompletion
- int OptIn
- ==
- bz done
- int 0
- store 0 // save loop var
-loop:
- int 0 // acct index
- byte "012345678901234567890123456789012345678901234567890123456789ABC0"
- int 63
- load 0 // loop var
- int 0x41
- +
- setbyte // str[63] = chr(i + 'A')
- dup // value is the same as key
- app_local_put
- load 0 // loop var
- int 1
- +
- dup
- store 0 // save loop var
- int 16
- <
- bnz loop
-done:
- int 1
-`
- ops, err := logic.AssembleString(source)
- require.NoError(b, err)
- prog := ops.Program
- g := BenchAppOptInsTxnGenerator{
- NumApps: 500,
- Proto: protocol.ConsensusFuture,
- Program: prog,
- }
- benchmarkBlockEvaluator(b, false, false, protocol.ConsensusFuture, &g)
-}
-
-// this variant focuses on benchmarking ledger.go `eval()`, the rest is setup, it runs eval() b.N times.
-func benchmarkBlockEvaluator(b *testing.B, inMem bool, withCrypto bool, proto protocol.ConsensusVersion, txnSource BenchTxnGenerator) {
- deadlockDisable := deadlock.Opts.Disable
- deadlock.Opts.Disable = true
- defer func() { deadlock.Opts.Disable = deadlockDisable }()
- start := time.Now()
- genesisInitState, addrs, keys := genesisWithProto(100000, proto)
- dbName := fmt.Sprintf("%s.%d", b.Name(), crypto.RandUint64())
- cparams := config.Consensus[genesisInitState.Block.CurrentProtocol]
- cparams.MaxTxnBytesPerBlock = 1000000000 // very big, no limit
- config.Consensus[protocol.ConsensusVersion(dbName)] = cparams
- genesisInitState.Block.CurrentProtocol = protocol.ConsensusVersion(dbName)
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg)
- require.NoError(b, err)
- defer testLedgerCleanup(l, dbName, inMem)
-
- dbName2 := dbName + "_2"
- l2, err := OpenLedger(logging.Base(), dbName2, inMem, genesisInitState, cfg)
- require.NoError(b, err)
- defer testLedgerCleanup(l2, dbName2, inMem)
-
- bepprof := os.Getenv("BLOCK_EVAL_PPROF")
- if len(bepprof) > 0 {
- profpath := dbName + "_cpuprof"
- profout, err := os.Create(profpath)
- if err != nil {
- b.Fatal(err)
- return
- }
- b.Logf("%s: cpu profile for b.N=%d", profpath, b.N)
- pprof.StartCPUProfile(profout)
- defer func() {
- pprof.StopCPUProfile()
- profout.Close()
- }()
- }
-
- newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
- bev, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
- require.NoError(b, err)
-
- genHash := genesisInitState.Block.BlockHeader.GenesisHash
-
- backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
- defer backlogPool.Shutdown()
-
- // apply initialization transations if any
- initSignedTxns, maxTxnPerBlock := txnSource.Prepare(b, addrs, keys, newBlock.Round(), genHash)
- if len(initSignedTxns) > 0 {
- // all init transactions need to be written to ledger before reopening and benchmarking
- for _, l := range []*Ledger{l, l2} {
- l.accts.ctxCancel() // force commitSyncer to exit
-
- // wait commitSyncer to exit
- // the test calls commitRound directly and does not need commitSyncer/committedUpTo
- select {
- case <-l.accts.commitSyncerClosed:
- break
- }
- }
-
- var numBlocks uint64 = 0
- var validatedBlock *ValidatedBlock
-
- // there are might more transactions than MaxTxnBytesPerBlock allows
- // so make smaller blocks to fit
- for i, stxn := range initSignedTxns {
- err = bev.Transaction(stxn, transactions.ApplyData{})
- require.NoError(b, err)
- if maxTxnPerBlock > 0 && i%maxTxnPerBlock == 0 || i == len(initSignedTxns)-1 {
- validatedBlock, err = bev.GenerateBlock()
- require.NoError(b, err)
- for _, l := range []*Ledger{l, l2} {
- err = l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
- require.NoError(b, err)
- }
- newBlock = bookkeeping.MakeBlock(validatedBlock.blk.BlockHeader)
- bev, err = l.StartEvaluator(newBlock.BlockHeader, 0, 0)
- require.NoError(b, err)
- numBlocks++
- }
- }
-
- // wait until everying is written and then reload ledgers in order
- // to start reading accounts from DB and not from caches/deltas
- var wg sync.WaitGroup
- for _, l := range []*Ledger{l, l2} {
- wg.Add(1)
- // committing might take a long time, do it parallel
- go func(l *Ledger) {
- l.accts.accountsWriting.Add(1)
- l.accts.commitRound(numBlocks, 0, 0)
- l.accts.accountsWriting.Wait()
- l.reloadLedger()
- wg.Done()
- }(l)
- }
- wg.Wait()
-
- newBlock = bookkeeping.MakeBlock(validatedBlock.blk.BlockHeader)
- bev, err = l.StartEvaluator(newBlock.BlockHeader, 0, 0)
- require.NoError(b, err)
- }
-
- setupDone := time.Now()
- setupTime := setupDone.Sub(start)
- b.Logf("BenchmarkBlockEvaluator setup time %s", setupTime.String())
-
- // test speed of block building
- numTxns := 50000
-
- for i := 0; i < numTxns; i++ {
- stxn := txnSource.Txn(b, addrs, keys, newBlock.Round(), genHash)
- err = bev.Transaction(stxn, transactions.ApplyData{})
- require.NoError(b, err)
- }
-
- validatedBlock, err := bev.GenerateBlock()
- require.NoError(b, err)
-
- blockBuildDone := time.Now()
- blockBuildTime := blockBuildDone.Sub(setupDone)
- b.ReportMetric(float64(blockBuildTime)/float64(numTxns), "ns/block_build_tx")
-
- err = l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
- require.NoError(b, err)
-
- avbDone := time.Now()
- avbTime := avbDone.Sub(blockBuildDone)
- b.ReportMetric(float64(avbTime)/float64(numTxns), "ns/AddValidatedBlock_tx")
-
- // test speed of block validation
- // This should be the same as the eval line in ledger.go AddBlock()
- // This is pulled out to isolate eval() time from db ops of AddValidatedBlock()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- if withCrypto {
- _, err = l2.Validate(context.Background(), validatedBlock.blk, backlogPool)
- } else {
- _, err = eval(context.Background(), l2, validatedBlock.blk, false, nil, nil)
- }
- require.NoError(b, err)
- }
-
- abDone := time.Now()
- abTime := abDone.Sub(avbDone)
- b.ReportMetric(float64(abTime)/float64(numTxns*b.N), "ns/eval_validate_tx")
-
- b.StopTimer()
-}
-
-func TestCowCompactCert(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var certRnd basics.Round
- var certType protocol.CompactCertType
- var cert compactcert.Cert
- var atRound basics.Round
- var validate bool
- accts0 := randomAccounts(20, true)
- blocks := make(map[basics.Round]bookkeeping.BlockHeader)
- blockErr := make(map[basics.Round]error)
- ml := mockLedger{balanceMap: accts0, blocks: blocks, blockErr: blockErr}
- c0 := makeRoundCowState(
- &ml, bookkeeping.BlockHeader{}, config.Consensus[protocol.ConsensusCurrentVersion],
- 0, ledgercore.AccountTotals{}, 0)
-
- certType = protocol.CompactCertType(1234) // bad cert type
- err := c0.compactCert(certRnd, certType, cert, atRound, validate)
- require.Error(t, err)
-
- // no certRnd block
- certType = protocol.CompactCertBasic
- noBlockErr := errors.New("no block")
- blockErr[3] = noBlockErr
- certRnd = 3
- err = c0.compactCert(certRnd, certType, cert, atRound, validate)
- require.Error(t, err)
-
- // no votersRnd block
- // this is slightly a mess of things that don't quite line up with likely usage
- validate = true
- var certHdr bookkeeping.BlockHeader
- certHdr.CurrentProtocol = "TestCowCompactCert"
- certHdr.Round = 1
- proto := config.Consensus[certHdr.CurrentProtocol]
- proto.CompactCertRounds = 2
- config.Consensus[certHdr.CurrentProtocol] = proto
- blocks[certHdr.Round] = certHdr
-
- certHdr.Round = 15
- blocks[certHdr.Round] = certHdr
- certRnd = certHdr.Round
- blockErr[13] = noBlockErr
- err = c0.compactCert(certRnd, certType, cert, atRound, validate)
- require.Error(t, err)
-
- // validate fail
- certHdr.Round = 1
- certRnd = certHdr.Round
- err = c0.compactCert(certRnd, certType, cert, atRound, validate)
- require.Error(t, err)
-
- // fall through to no err
- validate = false
- err = c0.compactCert(certRnd, certType, cert, atRound, validate)
- require.NoError(t, err)
-
- // 100% coverage
-}
-
-// a couple trivial tests that don't need setup
-// see TestBlockEvaluator for more
-func TestTestTransactionGroup(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var txgroup []transactions.SignedTxn
- eval := BlockEvaluator{}
- err := eval.TestTransactionGroup(txgroup)
- require.NoError(t, err) // nothing to do, no problem
-
- eval.proto = config.Consensus[protocol.ConsensusCurrentVersion]
- txgroup = make([]transactions.SignedTxn, eval.proto.MaxTxGroupSize+1)
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err) // too many
-}
-
-// test BlockEvaluator.transactionGroup()
-// some trivial checks that require no setup
-func TestPrivateTransactionGroup(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var txgroup []transactions.SignedTxnWithAD
- eval := BlockEvaluator{}
- err := eval.transactionGroup(txgroup)
- require.NoError(t, err) // nothing to do, no problem
-
- eval.proto = config.Consensus[protocol.ConsensusCurrentVersion]
- txgroup = make([]transactions.SignedTxnWithAD, eval.proto.MaxTxGroupSize+1)
- err = eval.transactionGroup(txgroup)
- require.Error(t, err) // too many
-}
-
-// BlockEvaluator.workaroundOverspentRewards() fixed a couple issues on testnet.
-// This is now part of history and has to be re-created when running catchup on testnet. So, test to ensure it keeps happenning.
-func TestTestnetFixup(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- eval := &BlockEvaluator{}
- var rewardPoolBalance basics.AccountData
- rewardPoolBalance.MicroAlgos.Raw = 1234
- var headerRound basics.Round
- testnetGenesisHash, _ := crypto.DigestFromString("JBR3KGFEWPEE5SAQ6IWU6EEBZMHXD4CZU6WCBXWGF57XBZIJHIRA")
-
- // not a fixup round, no change
- headerRound = 1
- poolOld, err := eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
- require.Equal(t, rewardPoolBalance, poolOld)
- require.NoError(t, err)
-
- eval.genesisHash = testnetGenesisHash
- eval.genesisHash[3]++
-
- specialRounds := []basics.Round{1499995, 2926564}
- for _, headerRound = range specialRounds {
- poolOld, err = eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
- require.Equal(t, rewardPoolBalance, poolOld)
- require.NoError(t, err)
- }
-
- for _, headerRound = range specialRounds {
- testnetFixupExecution(t, headerRound, 20000000000)
- }
- // do all the setup and do nothing for not a special round
- testnetFixupExecution(t, specialRounds[0]+1, 0)
-}
-
-func testnetFixupExecution(t *testing.T, headerRound basics.Round, poolBonus uint64) {
- testnetGenesisHash, _ := crypto.DigestFromString("JBR3KGFEWPEE5SAQ6IWU6EEBZMHXD4CZU6WCBXWGF57XBZIJHIRA")
- // big setup so we can move some algos
- // boilerplate like TestBlockEvaluator, but pretend to be testnet
- genesisInitState, addrs, keys := genesis(10)
- genesisInitState.Block.BlockHeader.GenesisHash = testnetGenesisHash
- genesisInitState.Block.BlockHeader.GenesisID = "testnet"
- genesisInitState.GenesisHash = testnetGenesisHash
-
- // for addr, adata := range genesisInitState.Accounts {
- // t.Logf("%s: %+v", addr.String(), adata)
- // }
- rewardPoolBalance := genesisInitState.Accounts[testPoolAddr]
- nextPoolBalance := rewardPoolBalance.MicroAlgos.Raw + poolBonus
-
- dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- const inMem = true
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg)
- require.NoError(t, err)
- defer l.Close()
-
- newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
- require.NoError(t, err)
-
- // won't work before funding bank
- if poolBonus > 0 {
- _, err = eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
- require.Error(t, err)
- }
-
- bankAddr, _ := basics.UnmarshalChecksumAddress("GD64YIY3TWGDMCNPP553DZPPR6LDUSFQOIJVFDPPXWEG3FVOJCCDBBHU5A")
-
- // put some algos in the bank so that fixup can pull from this account
- txn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: addrs[0],
- Fee: minFee,
- FirstValid: newBlock.Round(),
- LastValid: newBlock.Round(),
- GenesisHash: testnetGenesisHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: bankAddr,
- Amount: basics.MicroAlgos{Raw: 20000000000 * 10},
- },
- }
- st := txn.Sign(keys[0])
- err = eval.Transaction(st, transactions.ApplyData{})
- require.NoError(t, err)
-
- poolOld, err := eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
- require.Equal(t, nextPoolBalance, poolOld.MicroAlgos.Raw)
- require.NoError(t, err)
-}
-
-// Test that ModifiedAssetHoldings in StateDelta is set correctly.
-func TestModifiedAssetHoldings(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := newTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- const assetid basics.AssetIndex = 1
-
- createTxn := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- Fee: 2000,
- AssetParams: basics.AssetParams{
- Total: 3,
- Decimals: 0,
- Manager: addrs[0],
- Reserve: addrs[0],
- Freeze: addrs[0],
- Clawback: addrs[0],
- },
- }
-
- optInTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[1],
- Fee: 2000,
- XferAsset: assetid,
- AssetAmount: 0,
- AssetReceiver: addrs[1],
- }
-
- eval := l.nextBlock(t)
- eval.txns(t, &createTxn, &optInTxn)
- vb := l.endBlock(t, eval)
-
- {
- aa := ledgercore.AccountAsset{
- Address: addrs[0],
- Asset: assetid,
- }
- created, ok := vb.delta.ModifiedAssetHoldings[aa]
- require.True(t, ok)
- assert.True(t, created)
- }
- {
- aa := ledgercore.AccountAsset{
- Address: addrs[1],
- Asset: assetid,
- }
- created, ok := vb.delta.ModifiedAssetHoldings[aa]
- require.True(t, ok)
- assert.True(t, created)
- }
-
- optOutTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[1],
- Fee: 1000,
- XferAsset: assetid,
- AssetReceiver: addrs[0],
- AssetCloseTo: addrs[0],
- }
-
- closeTxn := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- Fee: 1000,
- ConfigAsset: assetid,
- }
-
- eval = l.nextBlock(t)
- eval.txns(t, &optOutTxn, &closeTxn)
- vb = l.endBlock(t, eval)
-
- {
- aa := ledgercore.AccountAsset{
- Address: addrs[0],
- Asset: assetid,
- }
- created, ok := vb.delta.ModifiedAssetHoldings[aa]
- require.True(t, ok)
- assert.False(t, created)
- }
- {
- aa := ledgercore.AccountAsset{
- Address: addrs[1],
- Asset: assetid,
- }
- created, ok := vb.delta.ModifiedAssetHoldings[aa]
- require.True(t, ok)
- assert.False(t, created)
- }
-}
-
-// newTestGenesis creates a bunch of accounts, splits up 10B algos
-// between them and the rewardspool and feesink, and gives out the
-// addresses and secrets it creates to enable tests. For special
-// scenarios, manipulate these return values before using newTestLedger.
-func newTestGenesis() (bookkeeping.GenesisBalances, []basics.Address, []*crypto.SignatureSecrets) {
- // irrelevant, but deterministic
- sink, err := basics.UnmarshalChecksumAddress("YTPRLJ2KK2JRFSZZNAF57F3K5Y2KCG36FZ5OSYLW776JJGAUW5JXJBBD7Q")
- if err != nil {
- panic(err)
- }
- rewards, err := basics.UnmarshalChecksumAddress("242H5OXHUEBYCGGWB3CQ6AZAMQB5TMCWJGHCGQOZPEIVQJKOO7NZXUXDQA")
- if err != nil {
- panic(err)
- }
-
- const count = 10
- addrs := make([]basics.Address, count)
- secrets := make([]*crypto.SignatureSecrets, count)
- accts := make(map[basics.Address]basics.AccountData)
-
- // 10 billion microalgos, across N accounts and pool and sink
- amount := 10 * 1000000000 * 1000000 / uint64(count+2)
-
- for i := 0; i < count; i++ {
- // Create deterministic addresses, so that output stays the same, run to run.
- var seed crypto.Seed
- seed[0] = byte(i)
- secrets[i] = crypto.GenerateSignatureSecrets(seed)
- addrs[i] = basics.Address(secrets[i].SignatureVerifier)
-
- adata := basics.AccountData{
- MicroAlgos: basics.MicroAlgos{Raw: amount},
- }
- accts[addrs[i]] = adata
- }
-
- accts[sink] = basics.AccountData{
- MicroAlgos: basics.MicroAlgos{Raw: amount},
- Status: basics.NotParticipating,
- }
-
- accts[rewards] = basics.AccountData{
- MicroAlgos: basics.MicroAlgos{Raw: amount},
- }
-
- genBalances := bookkeeping.MakeGenesisBalances(accts, sink, rewards)
-
- return genBalances, addrs, secrets
-}
-
-// newTestLedger creates a in memory Ledger that is as realistic as
-// possible. It has Rewards and FeeSink properly configured.
-func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *Ledger {
- l, _, _ := newTestLedgerImpl(t, balances, true)
- return l
-}
-
-func newTestLedgerOnDisk(t testing.TB, balances bookkeeping.GenesisBalances) (*Ledger, string, bookkeeping.Block) {
- return newTestLedgerImpl(t, balances, false)
-}
-
-func newTestLedgerImpl(t testing.TB, balances bookkeeping.GenesisBalances, inMem bool) (*Ledger, string, bookkeeping.Block) {
- var genHash crypto.Digest
- crypto.RandBytes(genHash[:])
- genBlock, err := bookkeeping.MakeGenesisBlock(protocol.ConsensusFuture,
- balances, "test", genHash)
- require.False(t, genBlock.FeeSink.IsZero())
- require.False(t, genBlock.RewardsPool.IsZero())
- dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- l, err := OpenLedger(logging.Base(), dbName, inMem, InitState{
- Block: genBlock,
- Accounts: balances.Balances,
- GenesisHash: genHash,
- }, cfg)
- require.NoError(t, err)
- return l, dbName, genBlock
-}
-
-// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
-func (ledger *Ledger) nextBlock(t testing.TB) *BlockEvaluator {
- rnd := ledger.Latest()
- hdr, err := ledger.BlockHdr(rnd)
- require.NoError(t, err)
-
- nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
- eval, err := ledger.StartEvaluator(nextHdr, 0, 0)
- require.NoError(t, err)
- return eval
-}
-
-// endBlock completes the block being created, returns the ValidatedBlock for inspection
-func (ledger *Ledger) endBlock(t testing.TB, eval *BlockEvaluator) *ValidatedBlock {
- validatedBlock, err := eval.GenerateBlock()
- require.NoError(t, err)
- err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
- require.NoError(t, err)
- return validatedBlock
-}
-
-// lookup gets the current accountdata for an address
-func (ledger *Ledger) lookup(t testing.TB, addr basics.Address) basics.AccountData {
- rnd := ledger.Latest()
- ad, err := ledger.Lookup(rnd, addr)
- require.NoError(t, err)
- return ad
-}
-
-// micros gets the current microAlgo balance for an address
-func (ledger *Ledger) micros(t testing.TB, addr basics.Address) uint64 {
- return ledger.lookup(t, addr).MicroAlgos.Raw
-}
-
-// asa gets the current balance and optin status for some asa for an address
-func (ledger *Ledger) asa(t testing.TB, addr basics.Address, asset basics.AssetIndex) (uint64, bool) {
- if holding, ok := ledger.lookup(t, addr).Assets[asset]; ok {
- return holding.Amount, true
- }
- return 0, false
-}
-
-// asaParams gets the asset params for a given asa index
-func (ledger *Ledger) asaParams(t testing.TB, asset basics.AssetIndex) (basics.AssetParams, error) {
- creator, ok, err := ledger.GetCreator(basics.CreatableIndex(asset), basics.AssetCreatable)
- if err != nil {
- return basics.AssetParams{}, err
- }
- if !ok {
- return basics.AssetParams{}, fmt.Errorf("no asset (%d)", asset)
- }
- if params, ok := ledger.lookup(t, creator).AssetParams[asset]; ok {
- return params, nil
- }
- return basics.AssetParams{}, fmt.Errorf("bad lookup (%d)", asset)
-}
-
-func (eval *BlockEvaluator) fillDefaults(txn *txntest.Txn) {
- if txn.GenesisHash.IsZero() {
- txn.GenesisHash = eval.genesisHash
- }
- if txn.FirstValid == 0 {
- txn.FirstValid = eval.Round()
- }
- txn.FillDefaults(eval.proto)
-}
-
-func (eval *BlockEvaluator) txn(t testing.TB, txn *txntest.Txn, problem ...string) {
- t.Helper()
- eval.fillDefaults(txn)
- stxn := txn.SignedTxn()
- err := eval.testTransaction(stxn, eval.state.child(1))
- if err != nil {
- if len(problem) == 1 {
- require.Contains(t, err.Error(), problem[0])
- } else {
- require.NoError(t, err) // Will obviously fail
- }
- return
- }
- err = eval.Transaction(stxn, transactions.ApplyData{})
- if err != nil {
- if len(problem) == 1 {
- require.Contains(t, err.Error(), problem[0])
- } else {
- require.NoError(t, err) // Will obviously fail
- }
- return
- }
- require.Len(t, problem, 0)
-}
-
-func (eval *BlockEvaluator) txns(t testing.TB, txns ...*txntest.Txn) {
- t.Helper()
- for _, txn := range txns {
- eval.txn(t, txn)
- }
-}
-
-func (eval *BlockEvaluator) txgroup(t testing.TB, txns ...*txntest.Txn) error {
- t.Helper()
- for _, txn := range txns {
- eval.fillDefaults(txn)
- }
- txgroup := txntest.SignedTxns(txns...)
-
- err := eval.TestTransactionGroup(txgroup)
- if err != nil {
- return err
- }
-
- err = eval.transactionGroup(transactions.WrapSignedTxnsWithAD(txgroup))
- return err
-}
-
-func TestRewardsInAD(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := newTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- payTxn := txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[1]}
-
- // Build up Residue in RewardsState so it's ready to pay
- for i := 1; i < 10; i++ {
- eval := l.nextBlock(t)
- l.endBlock(t, eval)
- }
-
- eval := l.nextBlock(t)
- eval.txn(t, &payTxn)
- payInBlock := eval.block.Payset[0]
- require.Greater(t, payInBlock.ApplyData.SenderRewards.Raw, uint64(1000))
- require.Greater(t, payInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
- require.Equal(t, payInBlock.ApplyData.SenderRewards, payInBlock.ApplyData.ReceiverRewards)
- l.endBlock(t, eval)
-}
-
-func TestMinBalanceChanges(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := newTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- createTxn := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- AssetParams: basics.AssetParams{
- Total: 3,
- Manager: addrs[1],
- Reserve: addrs[2],
- Freeze: addrs[3],
- Clawback: addrs[4],
- },
- }
-
- const expectedID basics.AssetIndex = 1
- optInTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[5],
- XferAsset: expectedID,
- AssetReceiver: addrs[5],
- }
-
- ad0init := l.lookup(t, addrs[0])
- ad5init := l.lookup(t, addrs[5])
-
- eval := l.nextBlock(t)
- eval.txns(t, &createTxn, &optInTxn)
- l.endBlock(t, eval)
-
- ad0new := l.lookup(t, addrs[0])
- ad5new := l.lookup(t, addrs[5])
-
- proto := config.Consensus[eval.block.BlockHeader.CurrentProtocol]
- // Check balance and min balance requirement changes
- require.Equal(t, ad0init.MicroAlgos.Raw, ad0new.MicroAlgos.Raw+1000) // fee
- require.Equal(t, ad0init.MinBalance(&proto).Raw, ad0new.MinBalance(&proto).Raw-100000) // create
- require.Equal(t, ad5init.MicroAlgos.Raw, ad5new.MicroAlgos.Raw+1000) // fee
- require.Equal(t, ad5init.MinBalance(&proto).Raw, ad5new.MinBalance(&proto).Raw-100000) // optin
-
- optOutTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[5],
- XferAsset: expectedID,
- AssetReceiver: addrs[0],
- AssetCloseTo: addrs[0],
- }
-
- closeTxn := txntest.Txn{
- Type: "acfg",
- Sender: addrs[1], // The manager, not the creator
- ConfigAsset: expectedID,
- }
-
- eval = l.nextBlock(t)
- eval.txns(t, &optOutTxn, &closeTxn)
- l.endBlock(t, eval)
-
- ad0final := l.lookup(t, addrs[0])
- ad5final := l.lookup(t, addrs[5])
- // Check we got our balance "back"
- require.Equal(t, ad0final.MinBalance(&proto), ad0init.MinBalance(&proto))
- require.Equal(t, ad5final.MinBalance(&proto), ad5init.MinBalance(&proto))
-}
-
-// Test that ModifiedAppLocalStates in StateDelta is set correctly.
-func TestModifiedAppLocalStates(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := newTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- const appid basics.AppIndex = 1
-
- createTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: "int 1",
- }
-
- optInTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appid,
- OnCompletion: transactions.OptInOC,
- }
-
- eval := l.nextBlock(t)
- eval.txns(t, &createTxn, &optInTxn)
- vb := l.endBlock(t, eval)
-
- assert.Len(t, vb.delta.ModifiedAppLocalStates, 1)
- {
- aa := ledgercore.AccountApp{
- Address: addrs[1],
- App: appid,
- }
- created, ok := vb.delta.ModifiedAppLocalStates[aa]
- require.True(t, ok)
- assert.True(t, created)
- }
-
- optOutTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appid,
- OnCompletion: transactions.CloseOutOC,
- }
-
- closeTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: appid,
- OnCompletion: transactions.DeleteApplicationOC,
- }
-
- eval = l.nextBlock(t)
- eval.txns(t, &optOutTxn, &closeTxn)
- vb = l.endBlock(t, eval)
-
- assert.Len(t, vb.delta.ModifiedAppLocalStates, 1)
- {
- aa := ledgercore.AccountApp{
- Address: addrs[1],
- App: appid,
- }
- created, ok := vb.delta.ModifiedAppLocalStates[aa]
- require.True(t, ok)
- assert.False(t, created)
- }
-}
-
-// TestAppInsMinBalance checks that accounts with MaxAppsOptedIn are accepted by block evaluator
-// and do not cause any MaximumMinimumBalance problems
-func TestAppInsMinBalance(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := newTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- const appid basics.AppIndex = 1
-
- maxAppsOptedIn := config.Consensus[protocol.ConsensusFuture].MaxAppsOptedIn
- require.Greater(t, maxAppsOptedIn, 0)
- maxAppsCreated := config.Consensus[protocol.ConsensusFuture].MaxAppsCreated
- require.Greater(t, maxAppsCreated, 0)
- maxLocalSchemaEntries := config.Consensus[protocol.ConsensusFuture].MaxLocalSchemaEntries
- require.Greater(t, maxLocalSchemaEntries, uint64(0))
-
- txnsCreate := make([]*txntest.Txn, 0, maxAppsOptedIn)
- txnsOptIn := make([]*txntest.Txn, 0, maxAppsOptedIn)
- appsCreated := make(map[basics.Address]int, len(addrs)-1)
-
- acctIdx := 0
- for i := 0; i < maxAppsOptedIn; i++ {
- creator := addrs[acctIdx]
- createTxn := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: creator,
- ApprovalProgram: "int 1",
- LocalStateSchema: basics.StateSchema{NumByteSlice: maxLocalSchemaEntries},
- Note: randomNote(),
- }
- txnsCreate = append(txnsCreate, &createTxn)
- count := appsCreated[creator]
- count++
- appsCreated[creator] = count
- if count == maxAppsCreated {
- acctIdx++
- }
-
- optInTxn := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: addrs[9],
- ApplicationID: appid + basics.AppIndex(i),
- OnCompletion: transactions.OptInOC,
- }
- txnsOptIn = append(txnsOptIn, &optInTxn)
- }
-
- eval := l.nextBlock(t)
- txns := append(txnsCreate, txnsOptIn...)
- eval.txns(t, txns...)
- vb := l.endBlock(t, eval)
- assert.Len(t, vb.delta.ModifiedAppLocalStates, 50)
-}
-
-// TestGhostTransactions confirms that accounts that don't even exist
-// can be the Sender in some situations. If some other transaction
-// covers the fee, and the transaction itself does not require an
-// asset or a min balance, it's fine.
-func TestGhostTransactions(t *testing.T) {
- t.Skip("Behavior should be changed so test passes.")
-
- /*
- I think we have a behavior we should fix. I’m going to call these
- transactions where the Sender has no account and the fee=0 “ghostâ€
- transactions. In a ghost transaction, we still call balances.Move to
- “pay†the fee. Further, Move does not short-circuit a Move of 0 (for
- good reason, allowing compounding). Therefore, in Move, we do rewards
- processing on the “ghost†account. That causes us to want to write a
- new accountdata for them. But if we do that, the minimum balance
- checker will catch it, and kill the transaction because the ghost isn’t
- allowed to have a balance of 0. I don’t think we can short-circuit
- Move(0) because a zero pay is a known way to get your rewards
- actualized. Instead, I advocate that we short-circuit the call to Move
- for 0 fees.
-
- // move fee to pool
- if !tx.Fee.IsZero() {
- err = balances.Move(tx.Sender, eval.specials.FeeSink, tx.Fee, &ad.SenderRewards, nil)
- if err != nil {
- return
- }
- }
-
- I think this must be controlled by consensus upgrade, but I would love
- to be told I’m wrong. The other option is to outlaw these
- transactions, but even that requires changing code if we want to be
- exactly correct, because they are currently allowed when there are no
- rewards to get paid out (as would happen in a new network, or if we
- stop participation rewards - notice that this test only fails on the
- 4th attempt, once rewards have accumulated).
-
- Will suggested that we could treat Ghost accounts as non-partipating.
- Maybe that would allow the Move code to avoid trying to update
- accountdata.
- */
-
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := newTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- asaIndex := basics.AssetIndex(1)
-
- asa := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- AssetParams: basics.AssetParams{
- Total: 1000000,
- Decimals: 3,
- UnitName: "oz",
- AssetName: "Gold",
- URL: "https://gold.rush/",
- Clawback: basics.Address{0x0c, 0x0b, 0x0a, 0x0c},
- Freeze: basics.Address{0x0f, 0x0e, 0xe, 0xe},
- Manager: basics.Address{0x0a, 0x0a, 0xe},
- },
- }
-
- eval := l.nextBlock(t)
- eval.txn(t, &asa)
- l.endBlock(t, eval)
-
- benefactor := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: addrs[0],
- Fee: 2000,
- }
-
- ghost := basics.Address{0x01}
- ephemeral := []txntest.Txn{
- {
- Type: "pay",
- Amount: 0,
- Sender: ghost,
- Receiver: ghost,
- Fee: 0,
- },
- {
- Type: "axfer",
- AssetAmount: 0,
- Sender: ghost,
- AssetReceiver: basics.Address{0x02},
- XferAsset: basics.AssetIndex(1),
- Fee: 0,
- },
- {
- Type: "axfer",
- AssetAmount: 0,
- Sender: basics.Address{0x0c, 0x0b, 0x0a, 0x0c},
- AssetReceiver: addrs[0],
- AssetSender: addrs[1],
- XferAsset: asaIndex,
- Fee: 0,
- },
- {
- Type: "afrz",
- Sender: basics.Address{0x0f, 0x0e, 0xe, 0xe},
- FreezeAccount: addrs[0], // creator, therefore is opted in
- FreezeAsset: asaIndex,
- AssetFrozen: true,
- Fee: 0,
- },
- {
- Type: "afrz",
- Sender: basics.Address{0x0f, 0x0e, 0xe, 0xe},
- FreezeAccount: addrs[0], // creator, therefore is opted in
- FreezeAsset: asaIndex,
- AssetFrozen: false,
- Fee: 0,
- },
- }
-
- for i, e := range ephemeral {
- eval = l.nextBlock(t)
- err := eval.txgroup(t, &benefactor, &e)
- require.NoError(t, err, "i=%d %s", i, e.Type)
- l.endBlock(t, eval)
- }
-}
-
-type getCreatorForRoundResult struct {
- address basics.Address
- exists bool
-}
-
-type testCowBaseLedger struct {
- creators []getCreatorForRoundResult
-}
-
-func (l *testCowBaseLedger) BlockHdr(basics.Round) (bookkeeping.BlockHeader, error) {
- return bookkeeping.BlockHeader{}, errors.New("not implemented")
-}
-
-func (l *testCowBaseLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, TxLease) error {
- return errors.New("not implemented")
-}
-
-func (l *testCowBaseLedger) LookupWithoutRewards(basics.Round, basics.Address) (basics.AccountData, basics.Round, error) {
- return basics.AccountData{}, basics.Round(0), errors.New("not implemented")
-}
-
-func (l *testCowBaseLedger) GetCreatorForRound(_ basics.Round, cindex basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
- res := l.creators[0]
- l.creators = l.creators[1:]
- return res.address, res.exists, nil
-}
-
-func TestCowBaseCreatorsCache(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- addresses := make([]basics.Address, 3)
- for i := 0; i < len(addresses); i++ {
- _, err := rand.Read(addresses[i][:])
- require.NoError(t, err)
- }
-
- creators := []getCreatorForRoundResult{
- {address: addresses[0], exists: true},
- {address: basics.Address{}, exists: false},
- {address: addresses[1], exists: true},
- {address: basics.Address{}, exists: false},
- }
- l := testCowBaseLedger{
- creators: creators,
- }
-
- base := roundCowBase{
- l: &l,
- creators: map[creatable]FoundAddress{},
- }
-
- cindex := []basics.CreatableIndex{9, 10, 9, 10}
- ctype := []basics.CreatableType{
- basics.AssetCreatable,
- basics.AssetCreatable,
- basics.AppCreatable,
- basics.AppCreatable,
- }
- for i := 0; i < 2; i++ {
- for j, expected := range creators {
- address, exists, err := base.getCreator(cindex[j], ctype[j])
- require.NoError(t, err)
-
- assert.Equal(t, expected.address, address)
- assert.Equal(t, expected.exists, exists)
- }
- }
-}
diff --git a/ledger/evalbench_test.go b/ledger/evalbench_test.go
new file mode 100644
index 000000000..52974c7d3
--- /dev/null
+++ b/ledger/evalbench_test.go
@@ -0,0 +1,440 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "runtime/pprof"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/algorand/go-deadlock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/execpool"
+)
+
+var minFee basics.MicroAlgos
+
+func init() {
+ params := config.Consensus[protocol.ConsensusCurrentVersion]
+ minFee = basics.MicroAlgos{Raw: params.MinTxnFee}
+}
+
+// BenchTxnGenerator generates transactions as long as asked for
+type BenchTxnGenerator interface {
+ // Prepare should be used for making pre-benchmark ledger initialization
+ // like accounts funding, assets or apps creation
+ Prepare(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) ([]transactions.SignedTxn, int)
+ // Txn generates a single transaction
+ Txn(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) transactions.SignedTxn
+}
+
+// BenchPaymentTxnGenerator generates payment transactions
+type BenchPaymentTxnGenerator struct {
+ counter int
+}
+
+func (g *BenchPaymentTxnGenerator) Prepare(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) ([]transactions.SignedTxn, int) {
+ return nil, 0
+}
+
+func (g *BenchPaymentTxnGenerator) Txn(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) transactions.SignedTxn {
+ sender := g.counter % len(addrs)
+ receiver := (g.counter + 1) % len(addrs)
+ // The following would create more random selection of accounts, and prevent a cache of half of the accounts..
+ // iDigest := crypto.Hash([]byte{byte(i), byte(i >> 8), byte(i >> 16), byte(i >> 24)})
+ // sender := (uint64(iDigest[0]) + uint64(iDigest[1])*256 + uint64(iDigest[2])*256*256) % uint64(len(addrs))
+ // receiver := (uint64(iDigest[4]) + uint64(iDigest[5])*256 + uint64(iDigest[6])*256*256) % uint64(len(addrs))
+
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addrs[sender],
+ Fee: minFee,
+ FirstValid: rnd,
+ LastValid: rnd,
+ GenesisHash: gh,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addrs[receiver],
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+ stxn := txn.Sign(keys[sender])
+ g.counter++
+ return stxn
+}
+
+// BenchAppTxnGenerator generates app opt in transactions
+type BenchAppOptInsTxnGenerator struct {
+ NumApps int
+ Proto protocol.ConsensusVersion
+ Program []byte
+ OptedInAccts []basics.Address
+ OptedInAcctsIndices []int
+}
+
+func (g *BenchAppOptInsTxnGenerator) Prepare(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) ([]transactions.SignedTxn, int) {
+ maxLocalSchemaEntries := config.Consensus[g.Proto].MaxLocalSchemaEntries
+ maxAppsOptedIn := config.Consensus[g.Proto].MaxAppsOptedIn
+
+ // this function might create too much transaction even to fit into a single block
+ // estimate number of smaller blocks needed in order to set LastValid properly
+ const numAccts = 10000
+ const maxTxnPerBlock = 10000
+ expectedTxnNum := g.NumApps + numAccts*maxAppsOptedIn
+ expectedNumOfBlocks := expectedTxnNum/maxTxnPerBlock + 1
+
+ createTxns := make([]transactions.SignedTxn, 0, g.NumApps)
+ for i := 0; i < g.NumApps; i++ {
+ creatorIdx := rand.Intn(len(addrs))
+ creator := addrs[creatorIdx]
+ txn := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: transactions.Header{
+ Sender: creator,
+ Fee: minFee,
+ FirstValid: rnd,
+ LastValid: rnd + basics.Round(expectedNumOfBlocks),
+ GenesisHash: gh,
+ Note: ledgertesting.RandomNote(),
+ },
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApprovalProgram: g.Program,
+ ClearStateProgram: []byte{0x02, 0x20, 0x01, 0x01, 0x22},
+ LocalStateSchema: basics.StateSchema{NumByteSlice: maxLocalSchemaEntries},
+ },
+ }
+ stxn := txn.Sign(keys[creatorIdx])
+ createTxns = append(createTxns, stxn)
+ }
+
+ appsOptedIn := make(map[basics.Address]map[basics.AppIndex]struct{}, numAccts)
+
+ optInTxns := make([]transactions.SignedTxn, 0, numAccts*maxAppsOptedIn)
+
+ for i := 0; i < numAccts; i++ {
+ var senderIdx int
+ var sender basics.Address
+ for {
+ senderIdx = rand.Intn(len(addrs))
+ sender = addrs[senderIdx]
+ if len(appsOptedIn[sender]) < maxAppsOptedIn {
+ appsOptedIn[sender] = make(map[basics.AppIndex]struct{}, maxAppsOptedIn)
+ break
+ }
+ }
+ g.OptedInAccts = append(g.OptedInAccts, sender)
+ g.OptedInAcctsIndices = append(g.OptedInAcctsIndices, senderIdx)
+
+ acctOptIns := appsOptedIn[sender]
+ for j := 0; j < maxAppsOptedIn; j++ {
+ var appIdx basics.AppIndex
+ for {
+ appIdx = basics.AppIndex(rand.Intn(g.NumApps) + 1)
+ if _, ok := acctOptIns[appIdx]; !ok {
+ acctOptIns[appIdx] = struct{}{}
+ break
+ }
+ }
+
+ txn := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: minFee,
+ FirstValid: rnd,
+ LastValid: rnd + basics.Round(expectedNumOfBlocks),
+ GenesisHash: gh,
+ },
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: basics.AppIndex(appIdx),
+ OnCompletion: transactions.OptInOC,
+ },
+ }
+ stxn := txn.Sign(keys[senderIdx])
+ optInTxns = append(optInTxns, stxn)
+ }
+ appsOptedIn[sender] = acctOptIns
+ }
+
+ return append(createTxns, optInTxns...), maxTxnPerBlock
+}
+
+func (g *BenchAppOptInsTxnGenerator) Txn(tb testing.TB, addrs []basics.Address, keys []*crypto.SignatureSecrets, rnd basics.Round, gh crypto.Digest) transactions.SignedTxn {
+ idx := rand.Intn(len(g.OptedInAcctsIndices))
+ senderIdx := g.OptedInAcctsIndices[idx]
+ sender := addrs[senderIdx]
+ receiverIdx := rand.Intn(len(addrs))
+
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: minFee,
+ FirstValid: rnd,
+ LastValid: rnd,
+ GenesisHash: gh,
+ Note: ledgertesting.RandomNote(),
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addrs[receiverIdx],
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+ stxn := txn.Sign(keys[senderIdx])
+ return stxn
+}
+
+func BenchmarkBlockEvaluatorRAMCrypto(b *testing.B) {
+ g := BenchPaymentTxnGenerator{}
+ benchmarkBlockEvaluator(b, true, true, protocol.ConsensusCurrentVersion, &g)
+}
+func BenchmarkBlockEvaluatorRAMNoCrypto(b *testing.B) {
+ g := BenchPaymentTxnGenerator{}
+ benchmarkBlockEvaluator(b, true, false, protocol.ConsensusCurrentVersion, &g)
+}
+func BenchmarkBlockEvaluatorDiskCrypto(b *testing.B) {
+ g := BenchPaymentTxnGenerator{}
+ benchmarkBlockEvaluator(b, false, true, protocol.ConsensusCurrentVersion, &g)
+}
+func BenchmarkBlockEvaluatorDiskNoCrypto(b *testing.B) {
+ g := BenchPaymentTxnGenerator{}
+ benchmarkBlockEvaluator(b, false, false, protocol.ConsensusCurrentVersion, &g)
+}
+
+func BenchmarkBlockEvaluatorDiskAppOptIns(b *testing.B) {
+ g := BenchAppOptInsTxnGenerator{
+ NumApps: 500,
+ Proto: protocol.ConsensusFuture,
+ Program: []byte{0x02, 0x20, 0x01, 0x01, 0x22},
+ }
+ benchmarkBlockEvaluator(b, false, false, protocol.ConsensusFuture, &g)
+}
+
+func BenchmarkBlockEvaluatorDiskFullAppOptIns(b *testing.B) {
+ // program sets all 16 available keys of len 64 bytes to same values of 64 bytes
+ source := `#pragma version 5
+ txn OnCompletion
+ int OptIn
+ ==
+ bz done
+ int 0
+ store 0 // save loop var
+loop:
+ int 0 // acct index
+ byte "012345678901234567890123456789012345678901234567890123456789ABC0"
+ int 63
+ load 0 // loop var
+ int 0x41
+ +
+ setbyte // str[63] = chr(i + 'A')
+ dup // value is the same as key
+ app_local_put
+ load 0 // loop var
+ int 1
+ +
+ dup
+ store 0 // save loop var
+ int 16
+ <
+ bnz loop
+done:
+ int 1
+`
+ ops, err := logic.AssembleString(source)
+ require.NoError(b, err)
+ prog := ops.Program
+ g := BenchAppOptInsTxnGenerator{
+ NumApps: 500,
+ Proto: protocol.ConsensusFuture,
+ Program: prog,
+ }
+ benchmarkBlockEvaluator(b, false, false, protocol.ConsensusFuture, &g)
+}
+
+func testLedgerCleanup(l *Ledger, dbName string, inMem bool) {
+ l.Close()
+ if !inMem {
+ hits, err := filepath.Glob(dbName + "*.sqlite")
+ if err != nil {
+ return
+ }
+ for _, fname := range hits {
+ os.Remove(fname)
+ }
+ }
+}
+
+// this variant focuses on benchmarking ledger.go `Eval()`, the rest is setup, it runs Eval() b.N times.
+func benchmarkBlockEvaluator(b *testing.B, inMem bool, withCrypto bool, proto protocol.ConsensusVersion, txnSource BenchTxnGenerator) {
+
+ deadlockDisable := deadlock.Opts.Disable
+ deadlock.Opts.Disable = true
+ defer func() { deadlock.Opts.Disable = deadlockDisable }()
+ start := time.Now()
+ genesisInitState, addrs, keys := ledgertesting.GenesisWithProto(100000, proto)
+ dbName := fmt.Sprintf("%s.%d", b.Name(), crypto.RandUint64())
+ cparams := config.Consensus[genesisInitState.Block.CurrentProtocol]
+ cparams.MaxTxnBytesPerBlock = 1000000000 // very big, no limit
+ config.Consensus[protocol.ConsensusVersion(dbName)] = cparams
+ genesisInitState.Block.CurrentProtocol = protocol.ConsensusVersion(dbName)
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg)
+ require.NoError(b, err)
+ defer testLedgerCleanup(l, dbName, inMem)
+
+ dbName2 := dbName + "_2"
+ l2, err := OpenLedger(logging.Base(), dbName2, inMem, genesisInitState, cfg)
+ require.NoError(b, err)
+ defer testLedgerCleanup(l2, dbName2, inMem)
+
+ bepprof := os.Getenv("BLOCK_EVAL_PPROF")
+ if len(bepprof) > 0 {
+ profpath := dbName + "_cpuprof"
+ profout, err := os.Create(profpath)
+ if err != nil {
+ b.Fatal(err)
+ return
+ }
+ b.Logf("%s: cpu profile for b.N=%d", profpath, b.N)
+ pprof.StartCPUProfile(profout)
+ defer func() {
+ pprof.StopCPUProfile()
+ profout.Close()
+ }()
+ }
+
+ newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
+ bev, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(b, err)
+
+ genHash := l.GenesisHash()
+
+ backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
+ defer backlogPool.Shutdown()
+
+ // apply initialization transations if any
+ initSignedTxns, maxTxnPerBlock := txnSource.Prepare(b, addrs, keys, newBlock.Round(), genHash)
+ if len(initSignedTxns) > 0 {
+
+ var numBlocks uint64 = 0
+ var validatedBlock *ledgercore.ValidatedBlock
+
+ // there are might more transactions than MaxTxnBytesPerBlock allows
+ // so make smaller blocks to fit
+ for i, stxn := range initSignedTxns {
+ err = bev.Transaction(stxn, transactions.ApplyData{})
+ require.NoError(b, err)
+ if maxTxnPerBlock > 0 && i%maxTxnPerBlock == 0 || i == len(initSignedTxns)-1 {
+ validatedBlock, err = bev.GenerateBlock()
+ require.NoError(b, err)
+ for _, l := range []*Ledger{l, l2} {
+ err = l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+ require.NoError(b, err)
+ }
+ newBlock = bookkeeping.MakeBlock(validatedBlock.Block().BlockHeader)
+ bev, err = l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(b, err)
+ numBlocks++
+ }
+ }
+
+ // wait until everying is written and then reload ledgers in order
+ // to start reading accounts from DB and not from caches/deltas
+ var wg sync.WaitGroup
+ for _, l := range []*Ledger{l, l2} {
+ wg.Add(1)
+ // committing might take a long time, do it parallel
+ go func(l *Ledger) {
+ commitRound(numBlocks, 0, l)
+ l.reloadLedger()
+ wg.Done()
+ }(l)
+ }
+ wg.Wait()
+
+ newBlock = bookkeeping.MakeBlock(validatedBlock.Block().BlockHeader)
+ bev, err = l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(b, err)
+ }
+
+ setupDone := time.Now()
+ setupTime := setupDone.Sub(start)
+ b.Logf("BenchmarkBlockEvaluator setup time %s", setupTime.String())
+
+ // test speed of block building
+ numTxns := 50000
+
+ for i := 0; i < numTxns; i++ {
+ stxn := txnSource.Txn(b, addrs, keys, newBlock.Round(), genHash)
+ err = bev.Transaction(stxn, transactions.ApplyData{})
+ require.NoError(b, err)
+ }
+
+ validatedBlock, err := bev.GenerateBlock()
+ require.NoError(b, err)
+
+ blockBuildDone := time.Now()
+ blockBuildTime := blockBuildDone.Sub(setupDone)
+ b.ReportMetric(float64(blockBuildTime)/float64(numTxns), "ns/block_build_tx")
+
+ err = l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+ require.NoError(b, err)
+
+ avbDone := time.Now()
+ avbTime := avbDone.Sub(blockBuildDone)
+ b.ReportMetric(float64(avbTime)/float64(numTxns), "ns/AddValidatedBlock_tx")
+
+ // test speed of block validation
+ // This should be the same as the eval line in ledger.go AddBlock()
+ // This is pulled out to isolate Eval() time from db ops of AddValidatedBlock()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if withCrypto {
+ _, err = l2.Validate(context.Background(), validatedBlock.Block(), backlogPool)
+ } else {
+ _, err = internal.Eval(context.Background(), l2, validatedBlock.Block(), false, nil, nil)
+ }
+ require.NoError(b, err)
+ }
+
+ abDone := time.Now()
+ abTime := abDone.Sub(avbDone)
+ b.ReportMetric(float64(abTime)/float64(numTxns*b.N), "ns/eval_validate_tx")
+
+ b.StopTimer()
+}
diff --git a/ledger/evalIndexer.go b/ledger/evalindexer.go
index a071c10fa..251e7c1f6 100644
--- a/ledger/evalIndexer.go
+++ b/ledger/evalindexer.go
@@ -25,11 +25,12 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/internal"
"github.com/algorand/go-algorand/ledger/ledgercore"
)
// A ledger interface that Indexer implements. This is a simplified version of the
-// ledgerForEvaluator interface. Certain functions that the evaluator doesn't use
+// LedgerForEvaluator interface. Certain functions that the evaluator doesn't use
// in the trusting mode are excluded, and the present functions only request data
// at the latest round.
type indexerLedgerForEval interface {
@@ -41,14 +42,36 @@ type indexerLedgerForEval interface {
LatestTotals() (ledgercore.AccountTotals, error)
}
+// FoundAddress is a wrapper for an address and a boolean.
+type FoundAddress struct {
+ Address basics.Address
+ Exists bool
+}
+
+// EvalForIndexerResources contains resources preloaded from the Indexer database.
+// Indexer is able to do the preloading more efficiently than the evaluator loading
+// resources one by one.
+type EvalForIndexerResources struct {
+ // The map value is nil iff the account does not exist. The account data is owned here.
+ Accounts map[basics.Address]*basics.AccountData
+ Creators map[Creatable]FoundAddress
+}
+
+// Creatable represent a single creatable object.
+type Creatable struct {
+ Index basics.CreatableIndex
+ Type basics.CreatableType
+}
+
// Converter between indexerLedgerForEval and ledgerForEvaluator interfaces.
type indexerLedgerConnector struct {
- il indexerLedgerForEval
- genesisHash crypto.Digest
- latestRound basics.Round
+ il indexerLedgerForEval
+ genesisHash crypto.Digest
+ latestRound basics.Round
+ roundResources EvalForIndexerResources
}
-// BlockHdr is part of ledgerForEvaluator interface.
+// BlockHdr is part of LedgerForEvaluator interface.
func (l indexerLedgerConnector) BlockHdr(round basics.Round) (bookkeeping.BlockHeader, error) {
if round != l.latestRound {
return bookkeeping.BlockHeader{}, fmt.Errorf(
@@ -59,14 +82,22 @@ func (l indexerLedgerConnector) BlockHdr(round basics.Round) (bookkeeping.BlockH
return l.il.LatestBlockHdr()
}
-// CheckDup is part of ledgerForEvaluator interface.
-func (l indexerLedgerConnector) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, TxLease) error {
+// CheckDup is part of LedgerForEvaluator interface.
+func (l indexerLedgerConnector) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
// This function is not used by evaluator.
return errors.New("CheckDup() not implemented")
}
-// LookupWithoutRewards is part of ledgerForEvaluator interface.
+// LookupWithoutRewards is part of LedgerForEvaluator interface.
func (l indexerLedgerConnector) LookupWithoutRewards(round basics.Round, address basics.Address) (basics.AccountData, basics.Round, error) {
+ // check to see if the account data in the cache.
+ if pad, has := l.roundResources.Accounts[address]; has {
+ if pad == nil {
+ return basics.AccountData{}, round, nil
+ }
+ return *pad, round, nil
+ }
+
accountDataMap, err :=
l.il.LookupWithoutRewards(map[basics.Address]struct{}{address: {}})
if err != nil {
@@ -80,9 +111,14 @@ func (l indexerLedgerConnector) LookupWithoutRewards(round basics.Round, address
return *accountData, round, nil
}
-// GetCreatorForRound is part of ledgerForEvaluator interface.
+// GetCreatorForRound is part of LedgerForEvaluator interface.
func (l indexerLedgerConnector) GetCreatorForRound(_ basics.Round, cindex basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
var foundAddress FoundAddress
+ var has bool
+ // check to see if the account data in the cache.
+ if foundAddress, has = l.roundResources.Creators[Creatable{Index: cindex, Type: ctype}]; has {
+ return foundAddress.Address, foundAddress.Exists, nil
+ }
switch ctype {
case basics.AssetCreatable:
@@ -106,107 +142,53 @@ func (l indexerLedgerConnector) GetCreatorForRound(_ basics.Round, cindex basics
return foundAddress.Address, foundAddress.Exists, nil
}
-// GenesisHash is part of ledgerForEvaluator interface.
+// GenesisHash is part of LedgerForEvaluator interface.
func (l indexerLedgerConnector) GenesisHash() crypto.Digest {
return l.genesisHash
}
-// Totals is part of ledgerForEvaluator interface.
+// Totals is part of LedgerForEvaluator interface.
func (l indexerLedgerConnector) LatestTotals() (rnd basics.Round, totals ledgercore.AccountTotals, err error) {
totals, err = l.il.LatestTotals()
rnd = l.latestRound
return
}
-// CompactCertVoters is part of ledgerForEvaluator interface.
-func (l indexerLedgerConnector) CompactCertVoters(_ basics.Round) (*VotersForRound, error) {
+// CompactCertVoters is part of LedgerForEvaluator interface.
+func (l indexerLedgerConnector) CompactCertVoters(_ basics.Round) (*ledgercore.VotersForRound, error) {
// This function is not used by evaluator.
return nil, errors.New("CompactCertVoters() not implemented")
}
-func makeIndexerLedgerConnector(il indexerLedgerForEval, genesisHash crypto.Digest, latestRound basics.Round) indexerLedgerConnector {
+func makeIndexerLedgerConnector(il indexerLedgerForEval, genesisHash crypto.Digest, latestRound basics.Round, roundResources EvalForIndexerResources) indexerLedgerConnector {
return indexerLedgerConnector{
- il: il,
- genesisHash: genesisHash,
- latestRound: latestRound,
+ il: il,
+ genesisHash: genesisHash,
+ latestRound: latestRound,
+ roundResources: roundResources,
}
}
-// Returns all addresses referenced in `block`.
-func getBlockAddresses(block *bookkeeping.Block) map[basics.Address]struct{} {
- // Reserve a reasonable memory size for the map.
- res := make(map[basics.Address]struct{}, len(block.Payset)+2)
- res[block.FeeSink] = struct{}{}
- res[block.RewardsPool] = struct{}{}
-
- var refAddresses []basics.Address
- for _, stib := range block.Payset {
- getTxnAddresses(&stib.Txn, &refAddresses)
- for _, address := range refAddresses {
- res[address] = struct{}{}
- }
- }
-
- return res
-}
-
// EvalForIndexer evaluates a block without validation using the given `proto`.
// Return the state delta and transactions with modified apply data according to `proto`.
// This function is used by Indexer which modifies `proto` to retrieve the asset
// close amount for each transaction even when the real consensus parameters do not
// support it.
-func EvalForIndexer(il indexerLedgerForEval, block *bookkeeping.Block, proto config.ConsensusParams) (ledgercore.StateDelta, []transactions.SignedTxnInBlock, error) {
- ilc := makeIndexerLedgerConnector(il, block.GenesisHash(), block.Round()-1)
-
- eval, err := startEvaluator(
- ilc, block.BlockHeader, proto, len(block.Payset), false, false, 0)
- if err != nil {
- return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{},
- fmt.Errorf("EvalForIndexer() err: %w", err)
- }
-
- // Preload most needed accounts.
- {
- accountDataMap, err := il.LookupWithoutRewards(getBlockAddresses(block))
- if err != nil {
- return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{},
- fmt.Errorf("EvalForIndexer() err: %w", err)
- }
- base := eval.state.lookupParent.(*roundCowBase)
- for address, accountData := range accountDataMap {
- if accountData == nil {
- base.accounts[address] = basics.AccountData{}
- } else {
- base.accounts[address] = *accountData
- }
- }
- }
-
- paysetgroups, err := block.DecodePaysetGroups()
- if err != nil {
- return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{},
- fmt.Errorf("EvalForIndexer() err: %w", err)
- }
-
- for _, group := range paysetgroups {
- err = eval.TransactionGroup(group)
- if err != nil {
- return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{},
- fmt.Errorf("EvalForIndexer() err: %w", err)
- }
- }
-
- // Finally, process any pending end-of-block state changes.
- err = eval.endOfBlock()
+func EvalForIndexer(il indexerLedgerForEval, block *bookkeeping.Block, proto config.ConsensusParams, resources EvalForIndexerResources) (ledgercore.StateDelta, []transactions.SignedTxnInBlock, error) {
+ ilc := makeIndexerLedgerConnector(il, block.GenesisHash(), block.Round()-1, resources)
+
+ eval, err := internal.StartEvaluator(
+ ilc, block.BlockHeader,
+ internal.EvaluatorOptions{
+ PaysetHint: len(block.Payset),
+ ProtoParams: &proto,
+ Generate: false,
+ Validate: false,
+ })
if err != nil {
return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{},
fmt.Errorf("EvalForIndexer() err: %w", err)
}
- // here, in the EvalForIndexer, we don't want to call finalValidation(). This would
- // skip the calculation of the account totals in the state delta, which is a serious
- // issue if it were to be used by algod, but it's perfectly fine for the indexer since
- // it doesn't track any totals and therefore cannot calculate the new totals.
-
- return eval.state.deltas(), eval.block.Payset, nil
+ return eval.ProcessBlockForIndexer(block)
}
diff --git a/ledger/evalIndexer_test.go b/ledger/evalindexer_test.go
index 30b9222d5..76e4d2e42 100644
--- a/ledger/evalIndexer_test.go
+++ b/ledger/evalindexer_test.go
@@ -19,6 +19,7 @@ package ledger
import (
"errors"
"fmt"
+ "math/rand"
"testing"
"github.com/stretchr/testify/assert"
@@ -31,6 +32,7 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/txntest"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -86,7 +88,7 @@ func (il indexerLedgerForEvalImpl) LatestTotals() (totals ledgercore.AccountTota
func TestEvalForIndexerCustomProtocolParams(t *testing.T) {
partitiontest.PartitionTest(t)
- genesisBalances, addrs, _ := newTestGenesis()
+ genesisBalances, addrs, _ := ledgertesting.NewTestGenesis()
var genHash crypto.Digest
crypto.RandBytes(genHash[:])
@@ -96,7 +98,7 @@ func TestEvalForIndexerCustomProtocolParams(t *testing.T) {
dbName := fmt.Sprintf("%s", t.Name())
cfg := config.GetDefaultLocal()
cfg.Archival = true
- l, err := OpenLedger(logging.Base(), dbName, true, InitState{
+ l, err := OpenLedger(logging.Base(), dbName, true, ledgercore.InitState{
Block: block,
Accounts: genesisBalances.Balances,
GenesisHash: genHash,
@@ -176,9 +178,130 @@ func TestEvalForIndexerCustomProtocolParams(t *testing.T) {
latestRound: 0,
}
proto.EnableAssetCloseAmount = true
- _, modifiedTxns, err := EvalForIndexer(il, &block, proto)
+ _, modifiedTxns, err := EvalForIndexer(il, &block, proto, EvalForIndexerResources{})
require.NoError(t, err)
require.Equal(t, 4, len(modifiedTxns))
assert.Equal(t, uint64(70), modifiedTxns[3].AssetClosingAmount)
}
+
+// TestEvalForIndexerForExpiredAccounts tests that the EvalForIndexer function will correctly mark accounts offline
+func TestEvalForIndexerForExpiredAccounts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisBalances, addrs, _ := ledgertesting.NewTestGenesis()
+
+ var genHash crypto.Digest
+ crypto.RandBytes(genHash[:])
+ block, err := bookkeeping.MakeGenesisBlock(protocol.ConsensusFuture,
+ genesisBalances, "test", genHash)
+
+ dbName := fmt.Sprintf("%s", t.Name())
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ l, err := OpenLedger(logging.Base(), dbName, true, ledgercore.InitState{
+ Block: block,
+ Accounts: genesisBalances.Balances,
+ GenesisHash: genHash,
+ }, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ proto := config.Consensus[protocol.ConsensusFuture]
+
+ block = bookkeeping.MakeBlock(block.BlockHeader)
+
+ il := indexerLedgerForEvalImpl{
+ l: l,
+ latestRound: 0,
+ }
+
+ _, _, err = EvalForIndexer(il, &block, proto, EvalForIndexerResources{})
+ require.NoError(t, err)
+
+ badBlock := block
+ // First validate that bad block is fine if we dont touch it...
+ _, _, err = EvalForIndexer(il, &badBlock, proto, EvalForIndexerResources{})
+ require.NoError(t, err)
+
+ // Introduce an unknown address, but this time the Eval function is called with parameters that
+ // don't necessarily mean that this will cause an error. Just that an empty address will be added
+ badBlock.ExpiredParticipationAccounts = append(badBlock.ExpiredParticipationAccounts, basics.Address{123})
+
+ _, _, err = EvalForIndexer(il, &badBlock, proto, EvalForIndexerResources{})
+ require.NoError(t, err)
+
+ badBlock = block
+
+ // Now we add way too many accounts which will cause resetExpiredOnlineAccountsParticipationKeys() to fail
+ addressToCopy := addrs[0]
+
+ for i := 0; i < proto.MaxProposedExpiredOnlineAccounts+1; i++ {
+ badBlock.ExpiredParticipationAccounts = append(badBlock.ExpiredParticipationAccounts, addressToCopy)
+ }
+
+ _, _, err = EvalForIndexer(il, &badBlock, proto, EvalForIndexerResources{})
+ require.Error(t, err)
+
+ // Sanity Check
+
+ badBlock = block
+
+ _, _, err = EvalForIndexer(il, &badBlock, proto, EvalForIndexerResources{})
+ require.NoError(t, err)
+}
+
+// Test that preloading data in cow base works as expected.
+func TestResourceCaching(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var address basics.Address
+ _, err := rand.Read(address[:])
+ require.NoError(t, err)
+
+ genesisInitState, _, _ := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture)
+
+ genesisBalances := bookkeeping.GenesisBalances{
+ Balances: genesisInitState.Accounts,
+ FeeSink: testSinkAddr,
+ RewardsPool: testPoolAddr,
+ Timestamp: 0,
+ }
+ l := newTestLedger(t, genesisBalances)
+
+ genesisBlockHeader, err := l.BlockHdr(basics.Round(0))
+ require.NoError(t, err)
+ block := bookkeeping.MakeBlock(genesisBlockHeader)
+
+ resources := EvalForIndexerResources{
+ Accounts: map[basics.Address]*basics.AccountData{
+ address: {
+ MicroAlgos: basics.MicroAlgos{Raw: 5},
+ },
+ },
+ Creators: map[Creatable]FoundAddress{
+ {Index: basics.CreatableIndex(6), Type: basics.AssetCreatable}: {Address: address, Exists: true},
+ {Index: basics.CreatableIndex(6), Type: basics.AppCreatable}: {Address: address, Exists: false},
+ },
+ }
+
+ ilc := makeIndexerLedgerConnector(indexerLedgerForEvalImpl{l: l, latestRound: basics.Round(0)}, block.GenesisHash(), block.Round()-1, resources)
+
+ {
+ accountData, rnd, err := ilc.LookupWithoutRewards(basics.Round(0), address)
+ require.NoError(t, err)
+ assert.Equal(t, basics.AccountData{MicroAlgos: basics.MicroAlgos{Raw: 5}}, accountData)
+ assert.Equal(t, basics.Round(0), rnd)
+ }
+ {
+ address, found, err := ilc.GetCreatorForRound(basics.Round(0), basics.CreatableIndex(6), basics.AssetCreatable)
+ require.NoError(t, err)
+ require.True(t, found)
+ assert.Equal(t, address, address)
+ }
+ {
+ _, found, err := ilc.GetCreatorForRound(basics.Round(0), basics.CreatableIndex(6), basics.AppCreatable)
+ require.NoError(t, err)
+ require.False(t, found)
+ }
+}
diff --git a/ledger/appcow.go b/ledger/internal/appcow.go
index 18a0f6f0a..9a136f4bf 100644
--- a/ledger/appcow.go
+++ b/ledger/internal/appcow.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"fmt"
@@ -29,6 +29,7 @@ import (
"github.com/algorand/go-algorand/protocol"
)
+//msgp: ignore storageAction
type storageAction uint64
const (
@@ -457,14 +458,8 @@ func (cb *roundCowState) DelKey(addr basics.Address, aidx basics.AppIndex, globa
}
// MakeDebugBalances creates a ledger suitable for dryrun and debugger
-func MakeDebugBalances(l ledgerForCowBase, round basics.Round, proto protocol.ConsensusVersion, prevTimestamp int64) apply.Balances {
- base := &roundCowBase{
- l: l,
- rnd: round - 1,
- proto: config.Consensus[proto],
- accounts: make(map[basics.Address]basics.AccountData),
- creators: make(map[creatable]FoundAddress),
- }
+func MakeDebugBalances(l LedgerForCowBase, round basics.Round, proto protocol.ConsensusVersion, prevTimestamp int64) apply.Balances {
+ base := makeRoundCowBase(l, round-1, 0, basics.Round(0), config.Consensus[proto])
hdr := bookkeeping.BlockHeader{
Round: round,
diff --git a/ledger/appcow_test.go b/ledger/internal/appcow_test.go
index d6582c8bd..978854eb8 100644
--- a/ledger/appcow_test.go
+++ b/ledger/internal/appcow_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"fmt"
@@ -29,6 +29,7 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -184,7 +185,7 @@ func randomAddrApps(n int) ([]storagePtr, []basics.Address) {
aidx: basics.AppIndex(rand.Intn(100000) + 1),
global: rand.Intn(2) == 0,
}
- outa[i] = randomAddress()
+ outa[i] = ledgertesting.RandomAddress()
}
return out, outa
}
@@ -363,8 +364,8 @@ func TestCowBuildDelta(t *testing.T) {
a := require.New(t)
- creator := randomAddress()
- sender := randomAddress()
+ creator := ledgertesting.RandomAddress()
+ sender := ledgertesting.RandomAddress()
aidx := basics.AppIndex(2)
cow := roundCowState{}
@@ -941,7 +942,7 @@ func TestCowAllocated(t *testing.T) {
aidx := basics.AppIndex(1)
c := getCow([]modsData{})
- addr1 := getRandomAddress(a)
+ addr1 := ledgertesting.RandomAddress()
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
addr1: {storagePtr{aidx, false}: &storageDelta{action: allocAction}},
}
@@ -950,7 +951,7 @@ func TestCowAllocated(t *testing.T) {
// ensure other requests go down to roundCowParent
a.Panics(func() { c.allocated(addr1, aidx+1, false) })
- a.Panics(func() { c.allocated(getRandomAddress(a), aidx, false) })
+ a.Panics(func() { c.allocated(ledgertesting.RandomAddress(), aidx, false) })
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
addr1: {storagePtr{aidx, true}: &storageDelta{action: allocAction}},
@@ -959,7 +960,7 @@ func TestCowAllocated(t *testing.T) {
// ensure other requests go down to roundCowParent
a.Panics(func() { c.allocated(addr1, aidx+1, true) })
- a.Panics(func() { c.allocated(getRandomAddress(a), aidx, true) })
+ a.Panics(func() { c.allocated(ledgertesting.RandomAddress(), aidx, true) })
}
func TestCowGetCreator(t *testing.T) {
@@ -967,7 +968,7 @@ func TestCowGetCreator(t *testing.T) {
a := require.New(t)
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
@@ -990,7 +991,7 @@ func TestCowGetters(t *testing.T) {
a := require.New(t)
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
@@ -1008,11 +1009,11 @@ func TestCowGet(t *testing.T) {
a := require.New(t)
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
- addr1 := getRandomAddress(a)
+ addr1 := ledgertesting.RandomAddress()
bre := basics.AccountData{MicroAlgos: basics.MicroAlgos{Raw: 100}}
c.mods.Accts.Upsert(addr1, bre)
@@ -1025,7 +1026,7 @@ func TestCowGet(t *testing.T) {
a.Equal(bre, bra)
// ensure other requests go down to roundCowParent
- a.Panics(func() { c.Get(getRandomAddress(a), true) })
+ a.Panics(func() { c.Get(ledgertesting.RandomAddress(), true) })
}
func TestCowGetKey(t *testing.T) {
@@ -1033,7 +1034,7 @@ func TestCowGetKey(t *testing.T) {
a := require.New(t)
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
@@ -1097,7 +1098,7 @@ func TestCowGetKey(t *testing.T) {
a.Equal(tv, val)
// ensure other requests go down to roundCowParent
- a.Panics(func() { c.GetKey(getRandomAddress(a), aidx, false, "lkey", 0) })
+ a.Panics(func() { c.GetKey(ledgertesting.RandomAddress(), aidx, false, "lkey", 0) })
a.Panics(func() { c.GetKey(addr, aidx+1, false, "lkey", 0) })
}
@@ -1106,7 +1107,7 @@ func TestCowSetKey(t *testing.T) {
a := require.New(t)
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{
{addr, basics.CreatableIndex(aidx), basics.AppCreatable},
@@ -1177,7 +1178,7 @@ func TestCowSetKey(t *testing.T) {
a.NoError(err)
// check local
- addr1 := getRandomAddress(a)
+ addr1 := ledgertesting.RandomAddress()
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
addr1: {
storagePtr{aidx, false}: &storageDelta{
@@ -1192,7 +1193,7 @@ func TestCowSetKey(t *testing.T) {
a.NoError(err)
// ensure other requests go down to roundCowParent
- a.Panics(func() { c.SetKey(getRandomAddress(a), aidx, false, key, tv, 0) })
+ a.Panics(func() { c.SetKey(ledgertesting.RandomAddress(), aidx, false, key, tv, 0) })
a.Panics(func() { c.SetKey(addr, aidx+1, false, key, tv, 0) })
}
@@ -1201,7 +1202,7 @@ func TestCowSetKeyVFuture(t *testing.T) {
a := require.New(t)
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{
{addr, basics.CreatableIndex(aidx), basics.AppCreatable},
@@ -1237,7 +1238,7 @@ func TestCowAccountIdx(t *testing.T) {
a := require.New(t)
l := emptyLedger{}
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{
{addr, basics.CreatableIndex(aidx), basics.AppCreatable},
@@ -1284,7 +1285,7 @@ func TestCowDelKey(t *testing.T) {
a := require.New(t)
- addr := getRandomAddress(a)
+ addr := ledgertesting.RandomAddress()
aidx := basics.AppIndex(1)
c := getCow([]modsData{
{addr, basics.CreatableIndex(aidx), basics.AppCreatable},
@@ -1327,6 +1328,6 @@ func TestCowDelKey(t *testing.T) {
a.NoError(err)
// ensure other requests go down to roundCowParent
- a.Panics(func() { c.DelKey(getRandomAddress(a), aidx, false, key, 0) })
+ a.Panics(func() { c.DelKey(ledgertesting.RandomAddress(), aidx, false, key, 0) })
a.Panics(func() { c.DelKey(addr, aidx+1, false, key, 0) })
}
diff --git a/ledger/applications.go b/ledger/internal/applications.go
index 5d4e8c3b7..fc18699b7 100644
--- a/ledger/applications.go
+++ b/ledger/internal/applications.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"fmt"
diff --git a/ledger/internal/applications_test.go b/ledger/internal/applications_test.go
new file mode 100644
index 000000000..94efcef1a
--- /dev/null
+++ b/ledger/internal/applications_test.go
@@ -0,0 +1,353 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package internal
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+type creatableLocator struct {
+ cidx basics.CreatableIndex
+ ctype basics.CreatableType
+}
+type storeLocator struct {
+ addr basics.Address
+ aidx basics.AppIndex
+ global bool
+}
+type mockCowForLogicLedger struct {
+ rnd basics.Round
+ ts int64
+ cr map[creatableLocator]basics.Address
+ brs map[basics.Address]basics.AccountData
+ stores map[storeLocator]basics.TealKeyValue
+ tcs map[int]basics.CreatableIndex
+ txc uint64
+}
+
+func (c *mockCowForLogicLedger) Get(addr basics.Address, withPendingRewards bool) (basics.AccountData, error) {
+ br, ok := c.brs[addr]
+ if !ok {
+ return basics.AccountData{}, fmt.Errorf("addr %s not in mock cow", addr.String())
+ }
+ return br, nil
+}
+
+func (c *mockCowForLogicLedger) GetCreatableID(groupIdx int) basics.CreatableIndex {
+ return c.tcs[groupIdx]
+}
+
+func (c *mockCowForLogicLedger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
+ addr, found := c.cr[creatableLocator{cidx, ctype}]
+ return addr, found, nil
+}
+
+func (c *mockCowForLogicLedger) GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) {
+ kv, ok := c.stores[storeLocator{addr, aidx, global}]
+ if !ok {
+ return basics.TealValue{}, false, fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
+ }
+ tv, found := kv[key]
+ return tv, found, nil
+}
+
+func (c *mockCowForLogicLedger) BuildEvalDelta(aidx basics.AppIndex, txn *transactions.Transaction) (evalDelta transactions.EvalDelta, err error) {
+ return transactions.EvalDelta{}, nil
+}
+
+func (c *mockCowForLogicLedger) SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error {
+ kv, ok := c.stores[storeLocator{addr, aidx, global}]
+ if !ok {
+ return fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
+ }
+ kv[key] = value
+ c.stores[storeLocator{addr, aidx, global}] = kv
+ return nil
+}
+
+func (c *mockCowForLogicLedger) DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error {
+ kv, ok := c.stores[storeLocator{addr, aidx, global}]
+ if !ok {
+ return fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
+ }
+ delete(kv, key)
+ c.stores[storeLocator{addr, aidx, global}] = kv
+ return nil
+}
+
+func (c *mockCowForLogicLedger) round() basics.Round {
+ return c.rnd
+}
+
+func (c *mockCowForLogicLedger) prevTimestamp() int64 {
+ return c.ts
+}
+
+func (c *mockCowForLogicLedger) allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error) {
+ _, found := c.stores[storeLocator{addr, aidx, global}]
+ return found, nil
+}
+
+func (c *mockCowForLogicLedger) incTxnCount() {
+ c.txc++
+}
+
+func (c *mockCowForLogicLedger) txnCounter() uint64 {
+ return c.txc
+}
+
+func newCowMock(creatables []modsData) *mockCowForLogicLedger {
+ var m mockCowForLogicLedger
+ m.cr = make(map[creatableLocator]basics.Address, len(creatables))
+ for _, e := range creatables {
+ m.cr[creatableLocator{e.cidx, e.ctype}] = e.addr
+ }
+ return &m
+}
+
+func TestLogicLedgerMake(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ _, err := newLogicLedger(nil, 0)
+ a.Error(err)
+ a.Contains(err.Error(), "cannot make logic ledger for app index 0")
+
+ addr := ledgertesting.RandomAddress()
+ aidx := basics.AppIndex(1)
+
+ c := &mockCowForLogicLedger{}
+ _, err = newLogicLedger(c, 0)
+ a.Error(err)
+ a.Contains(err.Error(), "cannot make logic ledger for app index 0")
+
+ _, err = newLogicLedger(c, aidx)
+ a.Error(err)
+ a.Contains(err.Error(), fmt.Sprintf("app %d does not exist", aidx))
+
+ c = newCowMock([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+ a.Equal(aidx, l.aidx)
+ a.Equal(c, l.cow)
+}
+
+func TestLogicLedgerBalances(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ addr := ledgertesting.RandomAddress()
+ aidx := basics.AppIndex(1)
+ c := newCowMock([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+
+ addr1 := ledgertesting.RandomAddress()
+ ble := basics.MicroAlgos{Raw: 100}
+ c.brs = map[basics.Address]basics.AccountData{addr1: {MicroAlgos: ble}}
+ bla, err := l.Balance(addr1)
+ a.NoError(err)
+ a.Equal(ble, bla)
+}
+
+func TestLogicLedgerGetters(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ addr := ledgertesting.RandomAddress()
+ aidx := basics.AppIndex(1)
+ c := newCowMock([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+
+ round := basics.Round(1234)
+ c.rnd = round
+ ts := int64(11223344)
+ c.ts = ts
+
+ addr1 := ledgertesting.RandomAddress()
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr1, aidx, false}: {}}
+ a.Equal(aidx, l.ApplicationID())
+ a.Equal(round, l.Round())
+ a.Equal(ts, l.LatestTimestamp())
+ a.True(l.OptedIn(addr1, 0))
+ a.True(l.OptedIn(addr1, aidx))
+ a.False(l.OptedIn(addr, 0))
+ a.False(l.OptedIn(addr, aidx))
+}
+
+func TestLogicLedgerAsset(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ addr := ledgertesting.RandomAddress()
+ addr1 := ledgertesting.RandomAddress()
+ aidx := basics.AppIndex(1)
+ assetIdx := basics.AssetIndex(2)
+ c := newCowMock([]modsData{
+ {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
+ {addr1, basics.CreatableIndex(assetIdx), basics.AssetCreatable},
+ })
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+
+ _, _, err = l.AssetParams(basics.AssetIndex(aidx))
+ a.Error(err)
+ a.Contains(err.Error(), fmt.Sprintf("asset %d does not exist", aidx))
+
+ c.brs = map[basics.Address]basics.AccountData{
+ addr1: {AssetParams: map[basics.AssetIndex]basics.AssetParams{assetIdx: {Total: 1000}}},
+ }
+
+ ap, creator, err := l.AssetParams(assetIdx)
+ a.NoError(err)
+ a.Equal(addr1, creator)
+ a.Equal(uint64(1000), ap.Total)
+
+ _, err = l.AssetHolding(addr1, assetIdx)
+ a.Error(err)
+ a.Contains(err.Error(), "has not opted in to asset")
+
+ c.brs = map[basics.Address]basics.AccountData{
+ addr1: {
+ AssetParams: map[basics.AssetIndex]basics.AssetParams{assetIdx: {Total: 1000}},
+ Assets: map[basics.AssetIndex]basics.AssetHolding{assetIdx: {Amount: 99}},
+ },
+ }
+
+ ah, err := l.AssetHolding(addr1, assetIdx)
+ a.NoError(err)
+ a.Equal(uint64(99), ah.Amount)
+}
+
+func TestLogicLedgerGetKey(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ addr := ledgertesting.RandomAddress()
+ addr1 := ledgertesting.RandomAddress()
+ aidx := basics.AppIndex(1)
+ assetIdx := basics.AssetIndex(2)
+ c := newCowMock([]modsData{
+ {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
+ {addr1, basics.CreatableIndex(assetIdx), basics.AssetCreatable},
+ })
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+
+ _, ok, err := l.GetGlobal(basics.AppIndex(assetIdx), "gkey")
+ a.Error(err)
+ a.False(ok)
+ a.Contains(err.Error(), fmt.Sprintf("app %d does not exist", assetIdx))
+
+ tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx + 1, true}: {"gkey": tv}}
+ val, ok, err := l.GetGlobal(aidx, "gkey")
+ a.Error(err)
+ a.False(ok)
+ a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
+
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
+ val, ok, err = l.GetGlobal(aidx, "gkey")
+ a.NoError(err)
+ a.True(ok)
+ a.Equal(tv, val)
+
+ // check local
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, false}: {"lkey": tv}}
+ val, ok, err = l.GetLocal(addr, aidx, "lkey", 0)
+ a.NoError(err)
+ a.True(ok)
+ a.Equal(tv, val)
+}
+
+func TestLogicLedgerSetKey(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ addr := ledgertesting.RandomAddress()
+ aidx := basics.AppIndex(1)
+ c := newCowMock([]modsData{
+ {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
+ })
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+
+ tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
+ err = l.SetGlobal("gkey", tv)
+ a.Error(err)
+ a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
+
+ tv2 := basics.TealValue{Type: basics.TealUintType, Uint: 2}
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
+ err = l.SetGlobal("gkey", tv2)
+ a.NoError(err)
+
+ // check local
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, false}: {"lkey": tv}}
+ err = l.SetLocal(addr, "lkey", tv2, 0)
+ a.NoError(err)
+}
+
+func TestLogicLedgerDelKey(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ addr := ledgertesting.RandomAddress()
+ aidx := basics.AppIndex(1)
+ c := newCowMock([]modsData{
+ {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
+ })
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+
+ err = l.DelGlobal("gkey")
+ a.Error(err)
+ a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
+
+ tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
+ err = l.DelGlobal("gkey")
+ a.NoError(err)
+
+ addr1 := ledgertesting.RandomAddress()
+ c.stores = map[storeLocator]basics.TealKeyValue{{addr1, aidx, false}: {"lkey": tv}}
+ err = l.DelLocal(addr1, "lkey", 0)
+ a.NoError(err)
+}
diff --git a/ledger/assetcow.go b/ledger/internal/assetcow.go
index ca35788dd..b28d09a7f 100644
--- a/ledger/assetcow.go
+++ b/ledger/internal/assetcow.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"github.com/algorand/go-algorand/data/basics"
diff --git a/ledger/compactcert.go b/ledger/internal/compactcert.go
index 9d2f2d66f..2f90c8b22 100644
--- a/ledger/compactcert.go
+++ b/ledger/internal/compactcert.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"fmt"
diff --git a/ledger/compactcert_test.go b/ledger/internal/compactcert_test.go
index 27e466811..690d49375 100644
--- a/ledger/compactcert_test.go
+++ b/ledger/internal/compactcert_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"testing"
diff --git a/ledger/cow.go b/ledger/internal/cow.go
index 656f5f380..4bf546de9 100644
--- a/ledger/cow.go
+++ b/ledger/internal/cow.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"errors"
diff --git a/ledger/cow_test.go b/ledger/internal/cow_test.go
index 27efbfb70..968df87a0 100644
--- a/ledger/cow_test.go
+++ b/ledger/internal/cow_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"testing"
@@ -26,6 +26,7 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -96,7 +97,7 @@ func checkCow(t *testing.T, cow *roundCowState, accts map[basics.Address]basics.
require.Equal(t, d, data)
}
- d, err := cow.lookup(randomAddress())
+ d, err := cow.lookup(ledgertesting.RandomAddress())
require.NoError(t, err)
require.Equal(t, d, basics.AccountData{})
}
@@ -111,7 +112,7 @@ func applyUpdates(cow *roundCowState, updates ledgercore.AccountDeltas) {
func TestCowBalance(t *testing.T) {
partitiontest.PartitionTest(t)
- accts0 := randomAccounts(20, true)
+ accts0 := ledgertesting.RandomAccounts(20, true)
ml := mockLedger{balanceMap: accts0}
c0 := makeRoundCowState(
@@ -123,7 +124,7 @@ func TestCowBalance(t *testing.T) {
checkCow(t, c0, accts0)
checkCow(t, c1, accts0)
- updates1, accts1, _ := randomDeltas(10, accts0, 0)
+ updates1, accts1, _ := ledgertesting.RandomDeltas(10, accts0, 0)
applyUpdates(c1, updates1)
checkCow(t, c0, accts0)
checkCow(t, c1, accts1)
@@ -133,7 +134,7 @@ func TestCowBalance(t *testing.T) {
checkCow(t, c1, accts1)
checkCow(t, c2, accts1)
- updates2, accts2, _ := randomDeltas(10, accts1, 0)
+ updates2, accts2, _ := ledgertesting.RandomDeltas(10, accts1, 0)
applyUpdates(c2, updates2)
checkCow(t, c0, accts0)
checkCow(t, c1, accts1)
diff --git a/ledger/eval.go b/ledger/internal/eval.go
index 1ddda76c1..264e8d1df 100644
--- a/ledger/eval.go
+++ b/ledger/internal/eval.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package ledger
+package internal
import (
"context"
@@ -27,7 +27,6 @@ import (
"github.com/algorand/go-algorand/crypto/compactcert"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/committee"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
@@ -38,35 +37,42 @@ import (
"github.com/algorand/go-algorand/util/execpool"
)
-// ErrNoSpace indicates insufficient space for transaction in block
-var ErrNoSpace = errors.New("block does not have space for transaction")
+// LedgerForCowBase represents subset of Ledger functionality needed for cow business
+type LedgerForCowBase interface {
+ BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
+ CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error
+ LookupWithoutRewards(basics.Round, basics.Address) (basics.AccountData, basics.Round, error)
+ GetCreatorForRound(basics.Round, basics.CreatableIndex, basics.CreatableType) (basics.Address, bool, error)
+}
// ErrRoundZero is self-explanatory
var ErrRoundZero = errors.New("cannot start evaluator for round 0")
-// maxPaysetHint makes sure that we don't allocate too much memory up front
-// in the block evaluator, since there cannot reasonably be more than this
-// many transactions in a block.
-const maxPaysetHint = 20000
+// averageEncodedTxnSizeHint is an estimation for the encoded transaction size
+// which is used for preallocating memory upfront in the payset. Preallocating
+// helps to avoid re-allocating storage during the evaluation/validation which
+// is considerably slower.
+const averageEncodedTxnSizeHint = 150
// asyncAccountLoadingThreadCount controls how many go routines would be used
-// to load the account data before the eval() start processing individual
+// to load the account data before the Eval() start processing individual
// transaction group.
const asyncAccountLoadingThreadCount = 4
+// Creatable represent a single creatable object.
type creatable struct {
cindex basics.CreatableIndex
ctype basics.CreatableType
}
-// FoundAddress is a wrapper for an address and a boolean.
-type FoundAddress struct {
- Address basics.Address
- Exists bool
+// foundAddress is a wrapper for an address and a boolean.
+type foundAddress struct {
+ address basics.Address
+ exists bool
}
type roundCowBase struct {
- l ledgerForCowBase
+ l LedgerForCowBase
// The round number of the previous block, for looking up prior state.
rnd basics.Round
@@ -92,14 +98,26 @@ type roundCowBase struct {
accounts map[basics.Address]basics.AccountData
// Similar cache for asset/app creators.
- creators map[creatable]FoundAddress
+ creators map[creatable]foundAddress
+}
+
+func makeRoundCowBase(l LedgerForCowBase, rnd basics.Round, txnCount uint64, compactCertNextRnd basics.Round, proto config.ConsensusParams) *roundCowBase {
+ return &roundCowBase{
+ l: l,
+ rnd: rnd,
+ txnCount: txnCount,
+ compactCertNextRnd: compactCertNextRnd,
+ proto: proto,
+ accounts: make(map[basics.Address]basics.AccountData),
+ creators: make(map[creatable]foundAddress),
+ }
}
func (x *roundCowBase) getCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
creatable := creatable{cindex: cidx, ctype: ctype}
if foundAddress, ok := x.creators[creatable]; ok {
- return foundAddress.Address, foundAddress.Exists, nil
+ return foundAddress.address, foundAddress.exists, nil
}
address, exists, err := x.l.GetCreatorForRound(x.rnd, cidx, ctype)
@@ -108,7 +126,7 @@ func (x *roundCowBase) getCreator(cidx basics.CreatableIndex, ctype basics.Creat
"roundCowBase.getCreator() cidx: %d ctype: %v err: %w", cidx, ctype, err)
}
- x.creators[creatable] = FoundAddress{Address: address, Exists: exists}
+ x.creators[creatable] = foundAddress{address: address, exists: exists}
return address, exists, nil
}
@@ -128,7 +146,7 @@ func (x *roundCowBase) lookup(addr basics.Address) (basics.AccountData, error) {
}
func (x *roundCowBase) checkDup(firstValid, lastValid basics.Round, txid transactions.Txid, txl ledgercore.Txlease) error {
- return x.l.CheckDup(x.proto, x.rnd+1, firstValid, lastValid, txid, TxLease{txl})
+ return x.l.CheckDup(x.proto, x.rnd+1, firstValid, lastValid, txid, txl)
}
func (x *roundCowBase) txnCounter() uint64 {
@@ -144,7 +162,7 @@ func (x *roundCowBase) blockHdr(r basics.Round) (bookkeeping.BlockHeader, error)
}
func (x *roundCowBase) allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error) {
- acct, _, err := x.l.LookupWithoutRewards(x.rnd, addr)
+ acct, err := x.lookup(addr)
if err != nil {
return false, err
}
@@ -163,7 +181,7 @@ func (x *roundCowBase) allocated(addr basics.Address, aidx basics.AppIndex, glob
// getKey gets the value for a particular key in some storage
// associated with an application globally or locally
func (x *roundCowBase) getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) {
- ad, _, err := x.l.LookupWithoutRewards(x.rnd, addr)
+ ad, err := x.lookup(addr)
if err != nil {
return basics.TealValue{}, false, err
}
@@ -193,7 +211,7 @@ func (x *roundCowBase) getKey(addr basics.Address, aidx basics.AppIndex, global
// getStorageCounts counts the storage types used by some account
// associated with an application globally or locally
func (x *roundCowBase) getStorageCounts(addr basics.Address, aidx basics.AppIndex, global bool) (basics.StateSchema, error) {
- ad, _, err := x.l.LookupWithoutRewards(x.rnd, addr)
+ ad, err := x.lookup(addr)
if err != nil {
return basics.StateSchema{}, err
}
@@ -376,51 +394,53 @@ type BlockEvaluator struct {
proto config.ConsensusParams
genesisHash crypto.Digest
- block bookkeeping.Block
- blockTxBytes int
- specials transactions.SpecialAddresses
- maxTxnBytesPerBlock int
+ block bookkeeping.Block
+ blockTxBytes int
+ specials transactions.SpecialAddresses
blockGenerated bool // prevent repeated GenerateBlock calls
- l ledgerForEvaluator
+ l LedgerForEvaluator
+
+ maxTxnBytesPerBlock int
}
-type ledgerForEvaluator interface {
- ledgerForCowBase
+// LedgerForEvaluator defines the ledger interface needed by the evaluator.
+type LedgerForEvaluator interface {
+ LedgerForCowBase
GenesisHash() crypto.Digest
LatestTotals() (basics.Round, ledgercore.AccountTotals, error)
- CompactCertVoters(basics.Round) (*VotersForRound, error)
+ CompactCertVoters(basics.Round) (*ledgercore.VotersForRound, error)
}
-// ledgerForCowBase represents subset of Ledger functionality needed for cow business
-type ledgerForCowBase interface {
- BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
- CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, TxLease) error
- LookupWithoutRewards(basics.Round, basics.Address) (basics.AccountData, basics.Round, error)
- GetCreatorForRound(basics.Round, basics.CreatableIndex, basics.CreatableType) (basics.Address, bool, error)
+// EvaluatorOptions defines the evaluator creation options
+type EvaluatorOptions struct {
+ PaysetHint int
+ Validate bool
+ Generate bool
+ MaxTxnBytesPerBlock int
+ ProtoParams *config.ConsensusParams
}
// StartEvaluator creates a BlockEvaluator, given a ledger and a block header
// of the block that the caller is planning to evaluate. If the length of the
// payset being evaluated is known in advance, a paysetHint >= 0 can be
-// passed, avoiding unnecessary payset slice growth. The optional maxTxnBytesPerBlock parameter
-// provides a cap on the size of a single generated block size, when a non-zero value is passed.
-// If a value of zero or less is passed to maxTxnBytesPerBlock, the consensus MaxTxnBytesPerBlock would
-// be used instead.
-func (l *Ledger) StartEvaluator(hdr bookkeeping.BlockHeader, paysetHint, maxTxnBytesPerBlock int) (*BlockEvaluator, error) {
- proto, ok := config.Consensus[hdr.CurrentProtocol]
- if !ok {
- return nil, protocol.Error(hdr.CurrentProtocol)
+// passed, avoiding unnecessary payset slice growth.
+func StartEvaluator(l LedgerForEvaluator, hdr bookkeeping.BlockHeader, evalOpts EvaluatorOptions) (*BlockEvaluator, error) {
+ var proto config.ConsensusParams
+ if evalOpts.ProtoParams == nil {
+ var ok bool
+ proto, ok = config.Consensus[hdr.CurrentProtocol]
+ if !ok {
+ return nil, protocol.Error(hdr.CurrentProtocol)
+ }
+ } else {
+ proto = *evalOpts.ProtoParams
}
- return startEvaluator(l, hdr, proto, paysetHint, true, true, maxTxnBytesPerBlock)
-}
-
-func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, proto config.ConsensusParams, paysetHint int, validate bool, generate bool, maxTxnBytesPerBlock int) (*BlockEvaluator, error) {
// if the caller did not provide a valid block size limit, default to the consensus params defaults.
- if maxTxnBytesPerBlock <= 0 || maxTxnBytesPerBlock > proto.MaxTxnBytesPerBlock {
- maxTxnBytesPerBlock = proto.MaxTxnBytesPerBlock
+ if evalOpts.MaxTxnBytesPerBlock <= 0 || evalOpts.MaxTxnBytesPerBlock > proto.MaxTxnBytesPerBlock {
+ evalOpts.MaxTxnBytesPerBlock = proto.MaxTxnBytesPerBlock
}
if hdr.Round == 0 {
@@ -438,22 +458,16 @@ func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, proto con
return nil, protocol.Error(prevHeader.CurrentProtocol)
}
- base := &roundCowBase{
- l: l,
- // round that lookups come from is previous block. We validate
- // the block at this round below, so underflow will be caught.
- // If we are not validating, we must have previously checked
- // an agreement.Certificate attesting that hdr is valid.
- rnd: hdr.Round - 1,
- txnCount: prevHeader.TxnCounter,
- proto: proto,
- accounts: make(map[basics.Address]basics.AccountData),
- creators: make(map[creatable]FoundAddress),
- }
+ // Round that lookups come from is previous block. We validate
+ // the block at this round below, so underflow will be caught.
+ // If we are not validating, we must have previously checked
+ // an agreement.Certificate attesting that hdr is valid.
+ base := makeRoundCowBase(
+ l, hdr.Round-1, prevHeader.TxnCounter, basics.Round(0), proto)
eval := &BlockEvaluator{
- validate: validate,
- generate: generate,
+ validate: evalOpts.Validate,
+ generate: evalOpts.Generate,
prevHeader: prevHeader,
block: bookkeeping.Block{BlockHeader: hdr},
specials: transactions.SpecialAddresses{
@@ -463,16 +477,17 @@ func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, proto con
proto: proto,
genesisHash: l.GenesisHash(),
l: l,
- maxTxnBytesPerBlock: maxTxnBytesPerBlock,
+ maxTxnBytesPerBlock: evalOpts.MaxTxnBytesPerBlock,
}
// Preallocate space for the payset so that we don't have to
// dynamically grow a slice (if evaluating a whole block).
- if paysetHint > 0 {
- if paysetHint > maxPaysetHint {
- paysetHint = maxPaysetHint
+ if evalOpts.PaysetHint > 0 {
+ maxPaysetHint := evalOpts.MaxTxnBytesPerBlock / averageEncodedTxnSizeHint
+ if evalOpts.PaysetHint > maxPaysetHint {
+ evalOpts.PaysetHint = maxPaysetHint
}
- eval.block.Payset = make([]transactions.SignedTxnInBlock, 0, paysetHint)
+ eval.block.Payset = make([]transactions.SignedTxnInBlock, 0, evalOpts.PaysetHint)
}
base.compactCertNextRnd = eval.prevHeader.CompactCert[protocol.CompactCertBasic].CompactCertNextRound
@@ -507,16 +522,16 @@ func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, proto con
// this is expected to be a no-op, but update the rewards on the rewards pool if it was configured to receive rewards ( unlike mainnet ).
incentivePoolData = incentivePoolData.WithUpdatedRewards(prevProto, eval.prevHeader.RewardsLevel)
- if generate {
+ if evalOpts.Generate {
if eval.proto.SupportGenesisHash {
eval.block.BlockHeader.GenesisHash = eval.genesisHash
}
eval.block.BlockHeader.RewardsState = eval.prevHeader.NextRewardsState(hdr.Round, proto, incentivePoolData.MicroAlgos, prevTotals.RewardUnits())
}
// set the eval state with the current header
- eval.state = makeRoundCowState(base, eval.block.BlockHeader, proto, eval.prevHeader.TimeStamp, prevTotals, paysetHint)
+ eval.state = makeRoundCowState(base, eval.block.BlockHeader, proto, eval.prevHeader.TimeStamp, prevTotals, evalOpts.PaysetHint)
- if validate {
+ if evalOpts.Validate {
err := eval.block.BlockHeader.PreCheck(eval.prevHeader)
if err != nil {
return nil, err
@@ -634,7 +649,7 @@ func (eval *BlockEvaluator) TestTransactionGroup(txgroup []transactions.SignedTx
var group transactions.TxGroup
for gi, txn := range txgroup {
- err := eval.testTransaction(txn, cow)
+ err := eval.TestTransaction(txn, cow)
if err != nil {
return err
}
@@ -666,10 +681,10 @@ func (eval *BlockEvaluator) TestTransactionGroup(txgroup []transactions.SignedTx
return nil
}
-// testTransaction performs basic duplicate detection and well-formedness checks
+// TestTransaction performs basic duplicate detection and well-formedness checks
// on a single transaction, but does not actually add the transaction to the block
// evaluator, or modify the block evaluator state in any other visible way.
-func (eval *BlockEvaluator) testTransaction(txn transactions.SignedTxn, cow *roundCowState) error {
+func (eval *BlockEvaluator) TestTransaction(txn transactions.SignedTxn, cow *roundCowState) error {
// Transaction valid (not expired)?
err := txn.Txn.Alive(eval.block)
if err != nil {
@@ -695,7 +710,7 @@ func (eval *BlockEvaluator) testTransaction(txn transactions.SignedTxn, cow *rou
// If the transaction cannot be added to the block without violating some constraints,
// an error is returned and the block evaluator state is unchanged.
func (eval *BlockEvaluator) Transaction(txn transactions.SignedTxn, ad transactions.ApplyData) error {
- return eval.transactionGroup([]transactions.SignedTxnWithAD{
+ return eval.TransactionGroup([]transactions.SignedTxnWithAD{
{
SignedTxn: txn,
ApplyData: ad,
@@ -703,13 +718,6 @@ func (eval *BlockEvaluator) Transaction(txn transactions.SignedTxn, ad transacti
})
}
-// TransactionGroup tentatively adds a new transaction group as part of this block evaluation.
-// If the transaction group cannot be added to the block without violating some constraints,
-// an error is returned and the block evaluator state is unchanged.
-func (eval *BlockEvaluator) TransactionGroup(txads []transactions.SignedTxnWithAD) error {
- return eval.transactionGroup(txads)
-}
-
// prepareEvalParams creates a logic.EvalParams for each ApplicationCall
// transaction in the group
func (eval *BlockEvaluator) prepareEvalParams(txgroup []transactions.SignedTxnWithAD) []*logic.EvalParams {
@@ -757,10 +765,10 @@ func (eval *BlockEvaluator) prepareEvalParams(txgroup []transactions.SignedTxnWi
return res
}
-// transactionGroup tentatively executes a group of transactions as part of this block evaluation.
+// TransactionGroup tentatively executes a group of transactions as part of this block evaluation.
// If the transaction group cannot be added to the block without violating some constraints,
// an error is returned and the block evaluator state is unchanged.
-func (eval *BlockEvaluator) transactionGroup(txgroup []transactions.SignedTxnWithAD) error {
+func (eval *BlockEvaluator) TransactionGroup(txgroup []transactions.SignedTxnWithAD) error {
// Nothing to do if there are no transactions.
if len(txgroup) == 0 {
return nil
@@ -793,7 +801,7 @@ func (eval *BlockEvaluator) transactionGroup(txgroup []transactions.SignedTxnWit
if eval.validate {
groupTxBytes += txib.GetEncodedLength()
if eval.blockTxBytes+groupTxBytes > eval.maxTxnBytesPerBlock {
- return ErrNoSpace
+ return ledgercore.ErrNoSpace
}
}
@@ -1035,13 +1043,17 @@ func (eval *BlockEvaluator) compactCertVotersAndTotal() (root crypto.Digest, tot
}
if voters != nil {
- root = voters.Tree.Root()
- total = voters.TotalWeight
+ root, total = voters.Tree.Root(), voters.TotalWeight
}
return
}
+// TestingTxnCounter - the method returns the current evaluator transaction counter. The method is used for testing purposes only.
+func (eval *BlockEvaluator) TestingTxnCounter() uint64 {
+ return eval.state.txnCounter()
+}
+
// Call "endOfBlock" after all the block's rewards and transactions are processed.
func (eval *BlockEvaluator) endOfBlock() error {
if eval.generate {
@@ -1057,6 +1069,8 @@ func (eval *BlockEvaluator) endOfBlock() error {
eval.block.TxnCounter = 0
}
+ eval.generateExpiredOnlineAccountsList()
+
if eval.proto.CompactCertRounds > 0 {
var basicCompactCert bookkeeping.CompactCertState
basicCompactCert.CompactCertVoters, basicCompactCert.CompactCertVotersTotal, err = eval.compactCertVotersAndTotal()
@@ -1071,12 +1085,18 @@ func (eval *BlockEvaluator) endOfBlock() error {
}
}
- return nil
-}
+ err := eval.validateExpiredOnlineAccounts()
+ if err != nil {
+ return err
+ }
+
+ err = eval.resetExpiredOnlineAccountsParticipationKeys()
+ if err != nil {
+ return err
+ }
-// FinalValidation does the validation that must happen after the block is built and all state updates are computed
-func (eval *BlockEvaluator) finalValidation() error {
eval.state.mods.OptimizeAllocatedMemory(eval.proto)
+
if eval.validate {
// check commitments
txnRoot, err := eval.block.PaysetCommit()
@@ -1115,7 +1135,132 @@ func (eval *BlockEvaluator) finalValidation() error {
}
}
- return eval.state.CalculateTotals()
+ err = eval.state.CalculateTotals()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// generateExpiredOnlineAccountsList creates the list of the expired participation accounts by traversing over the
+// modified accounts in the state deltas and testing if any of them needs to be reset.
+func (eval *BlockEvaluator) generateExpiredOnlineAccountsList() {
+ if !eval.generate {
+ return
+ }
+ // We are going to find the list of modified accounts and the
+ // current round that is being evaluated.
+ // Then we are going to go through each modified account and
+ // see if it meets the criteria for adding it to the expired
+ // participation accounts list.
+ modifiedAccounts := eval.state.mods.Accts.ModifiedAccounts()
+ currentRound := eval.Round()
+
+ expectedMaxNumberOfExpiredAccounts := eval.proto.MaxProposedExpiredOnlineAccounts
+
+ for i := 0; i < len(modifiedAccounts) && len(eval.block.ParticipationUpdates.ExpiredParticipationAccounts) < expectedMaxNumberOfExpiredAccounts; i++ {
+ accountAddr := modifiedAccounts[i]
+ acctDelta, found := eval.state.mods.Accts.Get(accountAddr)
+ if !found {
+ continue
+ }
+
+ // true if the account is online
+ isOnline := acctDelta.Status == basics.Online
+ // true if the accounts last valid round has passed
+ pastCurrentRound := acctDelta.VoteLastValid < currentRound
+
+ if isOnline && pastCurrentRound {
+ eval.block.ParticipationUpdates.ExpiredParticipationAccounts = append(
+ eval.block.ParticipationUpdates.ExpiredParticipationAccounts,
+ accountAddr,
+ )
+ }
+ }
+}
+
+// validateExpiredOnlineAccounts tests the expired online accounts specified in ExpiredParticipationAccounts, and verify
+// that they have all expired and need to be reset.
+func (eval *BlockEvaluator) validateExpiredOnlineAccounts() error {
+ if !eval.validate {
+ return nil
+ }
+ expectedMaxNumberOfExpiredAccounts := eval.proto.MaxProposedExpiredOnlineAccounts
+ lengthOfExpiredParticipationAccounts := len(eval.block.ParticipationUpdates.ExpiredParticipationAccounts)
+
+ // If the length of the array is strictly greater than our max then we have an error.
+ // This works when the expected number of accounts is zero (i.e. it is disabled) as well
+ if lengthOfExpiredParticipationAccounts > expectedMaxNumberOfExpiredAccounts {
+ return fmt.Errorf("length of expired accounts (%d) was greater than expected (%d)",
+ lengthOfExpiredParticipationAccounts, expectedMaxNumberOfExpiredAccounts)
+ }
+
+ // For security reasons, we need to make sure that all addresses in the expired participation accounts
+ // are unique. We make this map to keep track of previously seen address
+ addressSet := make(map[basics.Address]bool, lengthOfExpiredParticipationAccounts)
+
+ // Validate that all expired accounts meet the current criteria
+ currentRound := eval.Round()
+ for _, accountAddr := range eval.block.ParticipationUpdates.ExpiredParticipationAccounts {
+
+ if _, exists := addressSet[accountAddr]; exists {
+ // We shouldn't have duplicate addresses...
+ return fmt.Errorf("duplicate address found: %v", accountAddr)
+ }
+
+ // Record that we have seen this address
+ addressSet[accountAddr] = true
+
+ acctData, err := eval.state.lookup(accountAddr)
+ if err != nil {
+ return fmt.Errorf("endOfBlock was unable to retrieve account %v : %w", accountAddr, err)
+ }
+
+ // true if the account is online
+ isOnline := acctData.Status == basics.Online
+ // true if the accounts last valid round has passed
+ pastCurrentRound := acctData.VoteLastValid < currentRound
+
+ if !isOnline {
+ return fmt.Errorf("endOfBlock found %v was not online but %v", accountAddr, acctData.Status)
+ }
+
+ if !pastCurrentRound {
+ return fmt.Errorf("endOfBlock found %v round (%d) was not less than current round (%d)", accountAddr, acctData.VoteLastValid, currentRound)
+ }
+ }
+ return nil
+}
+
+// resetExpiredOnlineAccountsParticipationKeys after all transactions and rewards are processed, modify the accounts so that their status is offline
+func (eval *BlockEvaluator) resetExpiredOnlineAccountsParticipationKeys() error {
+ expectedMaxNumberOfExpiredAccounts := eval.proto.MaxProposedExpiredOnlineAccounts
+ lengthOfExpiredParticipationAccounts := len(eval.block.ParticipationUpdates.ExpiredParticipationAccounts)
+
+ // If the length of the array is strictly greater than our max then we have an error.
+ // This works when the expected number of accounts is zero (i.e. it is disabled) as well
+ if lengthOfExpiredParticipationAccounts > expectedMaxNumberOfExpiredAccounts {
+ return fmt.Errorf("length of expired accounts (%d) was greater than expected (%d)",
+ lengthOfExpiredParticipationAccounts, expectedMaxNumberOfExpiredAccounts)
+ }
+
+ for _, accountAddr := range eval.block.ParticipationUpdates.ExpiredParticipationAccounts {
+ acctData, err := eval.state.lookup(accountAddr)
+ if err != nil {
+ return fmt.Errorf("resetExpiredOnlineAccountsParticipationKeys was unable to retrieve account %v : %w", accountAddr, err)
+ }
+
+ // Reset the appropriate account data
+ acctData.ClearOnlineState()
+
+ // Update the account information
+ err = eval.state.Put(accountAddr, acctData)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
}
// GenerateBlock produces a complete block from the BlockEvaluator. This is
@@ -1125,7 +1270,7 @@ func (eval *BlockEvaluator) finalValidation() error {
// After a call to GenerateBlock, the BlockEvaluator can still be used to
// accept transactions. However, to guard against reuse, subsequent calls
// to GenerateBlock on the same BlockEvaluator will fail.
-func (eval *BlockEvaluator) GenerateBlock() (*ValidatedBlock, error) {
+func (eval *BlockEvaluator) GenerateBlock() (*ledgercore.ValidatedBlock, error) {
if !eval.generate {
logging.Base().Panicf("GenerateBlock() called but generate is false")
}
@@ -1139,15 +1284,7 @@ func (eval *BlockEvaluator) GenerateBlock() (*ValidatedBlock, error) {
return nil, err
}
- err = eval.finalValidation()
- if err != nil {
- return nil, err
- }
-
- vb := ValidatedBlock{
- blk: eval.block,
- delta: eval.state.deltas(),
- }
+ vb := ledgercore.MakeValidatedBlock(eval.block, eval.state.deltas())
eval.blockGenerated = true
proto, ok := config.Consensus[eval.block.BlockHeader.CurrentProtocol]
if !ok {
@@ -1200,18 +1337,19 @@ func (validator *evalTxValidator) run() {
}
}
+// Eval is the main evaluator entrypoint.
// used by Ledger.Validate() Ledger.AddBlock() Ledger.trackerEvalVerified()(accountUpdates.loadFromDisk())
//
-// Validate: eval(ctx, l, blk, true, txcache, executionPool, true)
-// AddBlock: eval(context.Background(), l, blk, false, txcache, nil, true)
-// tracker: eval(context.Background(), l, blk, false, txcache, nil, false)
-func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, validate bool, txcache verify.VerifiedTransactionCache, executionPool execpool.BacklogPool) (ledgercore.StateDelta, error) {
- proto, ok := config.Consensus[blk.BlockHeader.CurrentProtocol]
- if !ok {
- return ledgercore.StateDelta{}, protocol.Error(blk.BlockHeader.CurrentProtocol)
- }
-
- eval, err := startEvaluator(l, blk.BlockHeader, proto, len(blk.Payset), validate, false, 0)
+// Validate: Eval(ctx, l, blk, true, txcache, executionPool, true)
+// AddBlock: Eval(context.Background(), l, blk, false, txcache, nil, true)
+// tracker: Eval(context.Background(), l, blk, false, txcache, nil, false)
+func Eval(ctx context.Context, l LedgerForEvaluator, blk bookkeeping.Block, validate bool, txcache verify.VerifiedTransactionCache, executionPool execpool.BacklogPool) (ledgercore.StateDelta, error) {
+ eval, err := StartEvaluator(l, blk.BlockHeader,
+ EvaluatorOptions{
+ PaysetHint: len(blk.Payset),
+ Validate: validate,
+ Generate: false,
+ })
if err != nil {
return ledgercore.StateDelta{}, err
}
@@ -1307,11 +1445,6 @@ transactionGroupLoop:
}
}
- err = eval.finalValidation()
- if err != nil {
- return ledgercore.StateDelta{}, err
- }
-
return eval.state.deltas(), nil
}
@@ -1331,19 +1464,9 @@ func maxAddressesInTxn(proto *config.ConsensusParams) int {
return 7 + proto.MaxAppTxnAccounts
}
-// Write the list of addresses referenced in `txn` to `out`. Addresses might repeat.
-func getTxnAddresses(txn *transactions.Transaction, out *[]basics.Address) {
- *out = (*out)[:0]
-
- *out = append(
- *out, txn.Sender, txn.Receiver, txn.CloseRemainderTo, txn.AssetSender,
- txn.AssetReceiver, txn.AssetCloseTo, txn.FreezeAccount)
- *out = append(*out, txn.ApplicationCallTxnFields.Accounts...)
-}
-
// loadAccounts loads the account data for the provided transaction group list. It also loads the feeSink account and add it to the first returned transaction group.
// The order of the transaction groups returned by the channel is identical to the one in the input array.
-func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, groups [][]transactions.SignedTxnWithAD, feeSinkAddr basics.Address, consensusParams config.ConsensusParams) chan loadedTransactionGroup {
+func loadAccounts(ctx context.Context, l LedgerForEvaluator, rnd basics.Round, groups [][]transactions.SignedTxnWithAD, feeSinkAddr basics.Address, consensusParams config.ConsensusParams) chan loadedTransactionGroup {
outChan := make(chan loadedTransactionGroup, len(groups))
go func() {
// groupTask helps to organize the account loading for each transaction group.
@@ -1508,44 +1631,3 @@ func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, g
}()
return outChan
}
-
-// Validate uses the ledger to validate block blk as a candidate next block.
-// It returns an error if blk is not the expected next block, or if blk is
-// not a valid block (e.g., it has duplicate transactions, overspends some
-// account, etc).
-func (l *Ledger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ValidatedBlock, error) {
- delta, err := eval(ctx, l, blk, true, l.verifiedTxnCache, executionPool)
- if err != nil {
- return nil, err
- }
-
- vb := ValidatedBlock{
- blk: blk,
- delta: delta,
- }
- return &vb, nil
-}
-
-// ValidatedBlock represents the result of a block validation. It can
-// be used to efficiently add the block to the ledger, without repeating
-// the work of applying the block's changes to the ledger state.
-type ValidatedBlock struct {
- blk bookkeeping.Block
- delta ledgercore.StateDelta
-}
-
-// Block returns the underlying Block for a ValidatedBlock.
-func (vb ValidatedBlock) Block() bookkeeping.Block {
- return vb.blk
-}
-
-// WithSeed returns a copy of the ValidatedBlock with a modified seed.
-func (vb ValidatedBlock) WithSeed(s committee.Seed) ValidatedBlock {
- newblock := vb.blk
- newblock.BlockHeader.Seed = s
-
- return ValidatedBlock{
- blk: newblock,
- delta: vb.delta,
- }
-}
diff --git a/ledger/internal/eval_blackbox_test.go b/ledger/internal/eval_blackbox_test.go
new file mode 100644
index 000000000..816bd6e68
--- /dev/null
+++ b/ledger/internal/eval_blackbox_test.go
@@ -0,0 +1,1081 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package internal_test
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/execpool"
+)
+
+var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
+var minFee basics.MicroAlgos
+
+func init() {
+ params := config.Consensus[protocol.ConsensusCurrentVersion]
+ minFee = basics.MicroAlgos{Raw: params.MinTxnFee}
+}
+
+func TestBlockEvaluator(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, keys := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ genesisBlockHeader, err := l.BlockHdr(basics.Round(0))
+ newBlock := bookkeeping.MakeBlock(genesisBlockHeader)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ genHash := l.GenesisHash()
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addrs[0],
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round(),
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addrs[1],
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+
+ // Correct signature should work
+ st := txn.Sign(keys[0])
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ // Broken signature should fail
+ stbad := st
+ st.Sig[2] ^= 8
+ txgroup := []transactions.SignedTxn{stbad}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+
+ // Repeat should fail
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // out of range should fail
+ btxn := txn
+ btxn.FirstValid++
+ btxn.LastValid += 2
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // bogus group should fail
+ btxn = txn
+ btxn.Group[1] = 1
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // mixed fields should fail
+ btxn = txn
+ btxn.XferAsset = 3
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ // We don't test eval.Transaction() here because it doesn't check txn.WellFormed(), instead relying on that to have already been checked by the transaction pool.
+ // err = eval.Transaction(st, transactions.ApplyData{})
+ // require.Error(t, err)
+
+ selfTxn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addrs[2],
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round(),
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addrs[2],
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+ stxn := selfTxn.Sign(keys[2])
+
+ // TestTransactionGroup() and Transaction() should have the same outcome, but work slightly different code paths.
+ txgroup = []transactions.SignedTxn{stxn}
+ err = eval.TestTransactionGroup(txgroup)
+ require.NoError(t, err)
+
+ err = eval.Transaction(stxn, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ t3 := txn
+ t3.Amount.Raw++
+ t4 := selfTxn
+ t4.Amount.Raw++
+
+ // a group without .Group should fail
+ s3 := t3.Sign(keys[0])
+ s4 := t4.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ txgroupad := transactions.WrapSignedTxnsWithAD(txgroup)
+ err = eval.TransactionGroup(txgroupad)
+ require.Error(t, err)
+
+ // Test a group that should work
+ var group transactions.TxGroup
+ group.TxGroupHashes = []crypto.Digest{crypto.HashObj(t3), crypto.HashObj(t4)}
+ t3.Group = crypto.HashObj(group)
+ t4.Group = t3.Group
+ s3 = t3.Sign(keys[0])
+ s4 = t4.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4}
+ err = eval.TestTransactionGroup(txgroup)
+ require.NoError(t, err)
+
+ // disagreement on Group id should fail
+ t4bad := t4
+ t4bad.Group[3] ^= 3
+ s4bad := t4bad.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4bad}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ txgroupad = transactions.WrapSignedTxnsWithAD(txgroup)
+ err = eval.TransactionGroup(txgroupad)
+ require.Error(t, err)
+
+ // missing part of the group should fail
+ txgroup = []transactions.SignedTxn{s3}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+
+ validatedBlock, err := eval.GenerateBlock()
+ require.NoError(t, err)
+
+ accts := genesisInitState.Accounts
+ bal0 := accts[addrs[0]]
+ bal1 := accts[addrs[1]]
+ bal2 := accts[addrs[2]]
+
+ l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+
+ bal0new, err := l.Lookup(newBlock.Round(), addrs[0])
+ require.NoError(t, err)
+ bal1new, err := l.Lookup(newBlock.Round(), addrs[1])
+ require.NoError(t, err)
+ bal2new, err := l.Lookup(newBlock.Round(), addrs[2])
+ require.NoError(t, err)
+
+ require.Equal(t, bal0new.MicroAlgos.Raw, bal0.MicroAlgos.Raw-minFee.Raw-100)
+ require.Equal(t, bal1new.MicroAlgos.Raw, bal1.MicroAlgos.Raw+100)
+ require.Equal(t, bal2new.MicroAlgos.Raw, bal2.MicroAlgos.Raw-minFee.Raw)
+}
+
+func TestRekeying(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // Pretend rekeying is supported
+ actual := config.Consensus[protocol.ConsensusCurrentVersion]
+ pretend := actual
+ pretend.SupportRekeying = true
+ config.Consensus[protocol.ConsensusCurrentVersion] = pretend
+ defer func() {
+ config.Consensus[protocol.ConsensusCurrentVersion] = actual
+ }()
+
+ // Bring up a ledger
+ genesisInitState, addrs, keys := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ // Make a new block
+ nextRound := l.Latest() + basics.Round(1)
+ genHash := l.GenesisHash()
+
+ // Test plan
+ // Syntax: [A -> B][C, D] means transaction from A that rekeys to B with authaddr C and actual sig from D
+ makeTxn := func(sender, rekeyto, authaddr basics.Address, signer *crypto.SignatureSecrets, uniq uint8) transactions.SignedTxn {
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: minFee,
+ FirstValid: nextRound,
+ LastValid: nextRound,
+ GenesisHash: genHash,
+ RekeyTo: rekeyto,
+ Note: []byte{uniq},
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: sender,
+ },
+ }
+ sig := signer.Sign(txn)
+ return transactions.SignedTxn{Txn: txn, Sig: sig, AuthAddr: authaddr}
+ }
+
+ tryBlock := func(stxns []transactions.SignedTxn) error {
+ // We'll make a block using the evaluator.
+ // When generating a block, the evaluator doesn't check transaction sigs -- it assumes the transaction pool already did that.
+ // So the ValidatedBlock that comes out isn't necessarily actually a valid block. We'll call Validate ourselves.
+ genesisHdr, err := l.BlockHdr(basics.Round(0))
+ require.NoError(t, err)
+ newBlock := bookkeeping.MakeBlock(genesisHdr)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ for _, stxn := range stxns {
+ err = eval.Transaction(stxn, transactions.ApplyData{})
+ if err != nil {
+ return err
+ }
+ }
+ validatedBlock, err := eval.GenerateBlock()
+ if err != nil {
+ return err
+ }
+
+ backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
+ defer backlogPool.Shutdown()
+ _, err = l.Validate(context.Background(), validatedBlock.Block(), backlogPool)
+ return err
+ }
+
+ // Preamble transactions, which all of the blocks in this test will start with
+ // [A -> 0][0,A] (normal transaction)
+ // [A -> B][0,A] (rekey)
+ txn0 := makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 0) // Normal transaction
+ txn1 := makeTxn(addrs[0], addrs[1], basics.Address{}, keys[0], 1) // Rekey transaction
+
+ // Test 1: Do only good things
+ // (preamble)
+ // [A -> 0][B,B] (normal transaction using new key)
+ // [A -> A][B,B] (rekey back to A, transaction still signed by B)
+ // [A -> 0][0,A] (normal transaction again)
+ test1txns := []transactions.SignedTxn{
+ txn0, txn1, // (preamble)
+ makeTxn(addrs[0], basics.Address{}, addrs[1], keys[1], 2), // [A -> 0][B,B]
+ makeTxn(addrs[0], addrs[0], addrs[1], keys[1], 3), // [A -> A][B,B]
+ makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 4), // [A -> 0][0,A]
+ }
+ err = tryBlock(test1txns)
+ require.NoError(t, err)
+
+ // Test 2: Use old key after rekeying
+ // (preamble)
+ // [A -> A][0,A] (rekey back to A, but signed by A instead of B)
+ test2txns := []transactions.SignedTxn{
+ txn0, txn1, // (preamble)
+ makeTxn(addrs[0], addrs[0], basics.Address{}, keys[0], 2), // [A -> A][0,A]
+ }
+ err = tryBlock(test2txns)
+ require.Error(t, err)
+
+ // TODO: More tests
+}
+
+func testEvalAppGroup(t *testing.T, schema basics.StateSchema) (*internal.BlockEvaluator, basics.Address, error) {
+ genesisInitState, addrs, keys := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ blkHeader, err := l.BlockHdr(basics.Round(0))
+ require.NoError(t, err)
+ newBlock := bookkeeping.MakeBlock(blkHeader)
+
+ eval, err := internal.StartEvaluator(l, newBlock.BlockHeader, internal.EvaluatorOptions{
+ Generate: true,
+ Validate: true})
+ require.NoError(t, err)
+
+ ops, err := logic.AssembleString(`#pragma version 2
+ txn ApplicationID
+ bz create
+ byte "caller"
+ txn Sender
+ app_global_put
+ b ok
+create:
+ byte "creator"
+ txn Sender
+ app_global_put
+ok:
+ int 1`)
+ require.NoError(t, err, ops.Errors)
+ approval := ops.Program
+ ops, err = logic.AssembleString("#pragma version 2\nint 1")
+ require.NoError(t, err)
+ clear := ops.Program
+
+ genHash := l.GenesisHash()
+ header := transactions.Header{
+ Sender: addrs[0],
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round(),
+ GenesisHash: genHash,
+ }
+ appcall1 := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: header,
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ GlobalStateSchema: schema,
+ ApprovalProgram: approval,
+ ClearStateProgram: clear,
+ },
+ }
+
+ appcall2 := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: header,
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: 1,
+ },
+ }
+
+ var group transactions.TxGroup
+ group.TxGroupHashes = []crypto.Digest{crypto.HashObj(appcall1), crypto.HashObj(appcall2)}
+ appcall1.Group = crypto.HashObj(group)
+ appcall2.Group = crypto.HashObj(group)
+ stxn1 := appcall1.Sign(keys[0])
+ stxn2 := appcall2.Sign(keys[0])
+
+ g := []transactions.SignedTxnWithAD{
+ {
+ SignedTxn: stxn1,
+ ApplyData: transactions.ApplyData{
+ EvalDelta: transactions.EvalDelta{GlobalDelta: map[string]basics.ValueDelta{
+ "creator": {Action: basics.SetBytesAction, Bytes: string(addrs[0][:])}},
+ },
+ ApplicationID: 1,
+ },
+ },
+ {
+ SignedTxn: stxn2,
+ ApplyData: transactions.ApplyData{
+ EvalDelta: transactions.EvalDelta{GlobalDelta: map[string]basics.ValueDelta{
+ "caller": {Action: basics.SetBytesAction, Bytes: string(addrs[0][:])}},
+ }},
+ },
+ }
+ txgroup := []transactions.SignedTxn{stxn1, stxn2}
+ err = eval.TestTransactionGroup(txgroup)
+ if err != nil {
+ return eval, addrs[0], err
+ }
+ err = eval.TransactionGroup(g)
+ return eval, addrs[0], err
+}
+
+// TestEvalAppStateCountsWithTxnGroup ensures txns in a group can't violate app state schema limits
+// the test ensures that
+// commitToParent -> applyChild copies child's cow state usage counts into parent
+// and the usage counts correctly propagated from parent cow to child cow and back
+func TestEvalAppStateCountsWithTxnGroup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ _, _, err := testEvalAppGroup(t, basics.StateSchema{NumByteSlice: 1})
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "store bytes count 2 exceeds schema bytes count 1")
+}
+
+// TestEvalAppAllocStateWithTxnGroup ensures roundCowState.deltas and applyStorageDelta
+// produce correct results when a txn group has storage allocate and storage update actions
+func TestEvalAppAllocStateWithTxnGroup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ eval, addr, err := testEvalAppGroup(t, basics.StateSchema{NumByteSlice: 2})
+ require.NoError(t, err)
+
+ vb, err := eval.GenerateBlock()
+ require.NoError(t, err)
+ deltas := vb.Delta()
+
+ ad, _ := deltas.Accts.Get(addr)
+ state := ad.AppParams[1].GlobalState
+ require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addr[:])}, state["caller"])
+ require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addr[:])}, state["creator"])
+}
+
+// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
+func nextBlock(t testing.TB, ledger *ledger.Ledger, generate bool, protoParams *config.ConsensusParams) *internal.BlockEvaluator {
+ rnd := ledger.Latest()
+ hdr, err := ledger.BlockHdr(rnd)
+ require.NoError(t, err)
+
+ nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
+ eval, err := internal.StartEvaluator(ledger, nextHdr, internal.EvaluatorOptions{
+ Generate: generate,
+ Validate: false,
+ ProtoParams: protoParams,
+ })
+ require.NoError(t, err)
+ return eval
+}
+
+func fillDefaults(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn) {
+ if txn.GenesisHash.IsZero() {
+ txn.GenesisHash = ledger.GenesisHash()
+ }
+ if txn.FirstValid == 0 {
+ txn.FirstValid = eval.Round()
+ }
+
+ txn.FillDefaults(ledger.GenesisProto())
+}
+
+func txns(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) {
+ t.Helper()
+ for _, txn1 := range txns {
+ txn(t, ledger, eval, txn1)
+ }
+}
+
+func txn(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn, problem ...string) {
+ t.Helper()
+ fillDefaults(t, ledger, eval, txn)
+ stxn := txn.SignedTxn()
+ err := eval.TestTransactionGroup([]transactions.SignedTxn{stxn})
+ if err != nil {
+ if len(problem) == 1 {
+ require.Contains(t, err.Error(), problem[0])
+ } else {
+ require.NoError(t, err) // Will obviously fail
+ }
+ return
+ }
+ err = eval.Transaction(stxn, transactions.ApplyData{})
+ if err != nil {
+ if len(problem) == 1 {
+ require.Contains(t, err.Error(), problem[0])
+ } else {
+ require.NoError(t, err) // Will obviously fail
+ }
+ return
+ }
+ require.Len(t, problem, 0)
+}
+
+func txgroup(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) error {
+ t.Helper()
+ for _, txn := range txns {
+ fillDefaults(t, ledger, eval, txn)
+ }
+ txgroup := txntest.SignedTxns(txns...)
+
+ err := eval.TestTransactionGroup(txgroup)
+ if err != nil {
+ return err
+ }
+
+ err = eval.TransactionGroup(transactions.WrapSignedTxnsWithAD(txgroup))
+ return err
+}
+
+func testEvalAppPoolingGroup(t *testing.T, schema basics.StateSchema, approvalProgram string, consensusVersion protocol.ConsensusVersion) error {
+ genesisInitState, addrs, _ := ledgertesting.GenesisWithProto(10, consensusVersion)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ protoParams := config.Consensus[consensusVersion]
+ eval := nextBlock(t, l, false, &protoParams)
+
+ appcall1 := txntest.Txn{
+ Sender: addrs[0],
+ Type: protocol.ApplicationCallTx,
+ GlobalStateSchema: schema,
+ ApprovalProgram: approvalProgram,
+ }
+
+ appcall2 := txntest.Txn{
+ Sender: addrs[0],
+ Type: protocol.ApplicationCallTx,
+ ApplicationID: basics.AppIndex(1),
+ }
+
+ appcall3 := txntest.Txn{
+ Sender: addrs[1],
+ Type: protocol.ApplicationCallTx,
+ ApplicationID: basics.AppIndex(1),
+ }
+
+ return txgroup(t, l, eval, &appcall1, &appcall2, &appcall3)
+}
+
+// TestEvalAppPooledBudgetWithTxnGroup ensures 3 app call txns can successfully pool
+// budgets in a group txn and return an error if the budget is exceeded
+func TestEvalAppPooledBudgetWithTxnGroup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ source := func(n int, m int) string {
+ return "#pragma version 4\nbyte 0x1337BEEF\n" + strings.Repeat("keccak256\n", n) +
+ strings.Repeat("substring 0 4\n", m) + "pop\nint 1\n"
+ }
+
+ params := []protocol.ConsensusVersion{
+ protocol.ConsensusV29,
+ protocol.ConsensusFuture,
+ }
+
+ cases := []struct {
+ prog string
+ isSuccessV29 bool
+ isSuccessVFuture bool
+ expectedErrorV29 string
+ expectedErrorVFuture string
+ }{
+ {source(5, 47), true, true,
+ "",
+ ""},
+ {source(5, 48), false, true,
+ "pc=157 dynamic cost budget exceeded, executing pushint: remaining budget is 700 but program cost was 701",
+ ""},
+ {source(16, 17), false, true,
+ "pc= 12 dynamic cost budget exceeded, executing keccak256: remaining budget is 700 but program cost was 781",
+ ""},
+ {source(16, 18), false, false,
+ "pc= 12 dynamic cost budget exceeded, executing keccak256: remaining budget is 700 but program cost was 781",
+ "pc= 78 dynamic cost budget exceeded, executing pushint: remaining budget is 2100 but program cost was 2101"},
+ }
+
+ for i, param := range params {
+ for j, testCase := range cases {
+ t.Run(fmt.Sprintf("i=%d,j=%d", i, j), func(t *testing.T) {
+ err := testEvalAppPoolingGroup(t, basics.StateSchema{NumByteSlice: 3}, testCase.prog, param)
+ if !testCase.isSuccessV29 && reflect.DeepEqual(param, protocol.ConsensusV29) {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), testCase.expectedErrorV29)
+ } else if !testCase.isSuccessVFuture && reflect.DeepEqual(param, protocol.ConsensusFuture) {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), testCase.expectedErrorVFuture)
+ }
+ })
+ }
+ }
+}
+
+// endBlock completes the block being created, returns the ValidatedBlock for inspection
+func endBlock(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator) *ledgercore.ValidatedBlock {
+ validatedBlock, err := eval.GenerateBlock()
+ require.NoError(t, err)
+ err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+ require.NoError(t, err)
+ return validatedBlock
+}
+
+func TestRewardsInAD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ payTxn := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[1]}
+
+ // Build up Residue in RewardsState so it's ready to pay
+ for i := 1; i < 10; i++ {
+ eval := nextBlock(t, l, true, nil)
+ endBlock(t, l, eval)
+ }
+
+ eval := nextBlock(t, l, true, nil)
+ txn(t, l, eval, &payTxn)
+ vb, err := eval.GenerateBlock()
+ require.NoError(t, err)
+ payInBlock := vb.Block().Payset[0]
+ require.Greater(t, payInBlock.ApplyData.SenderRewards.Raw, uint64(1000))
+ require.Greater(t, payInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
+ require.Equal(t, payInBlock.ApplyData.SenderRewards, payInBlock.ApplyData.ReceiverRewards)
+}
+
+func TestMinBalanceChanges(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ createTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 3,
+ Manager: addrs[1],
+ Reserve: addrs[2],
+ Freeze: addrs[3],
+ Clawback: addrs[4],
+ },
+ }
+
+ const expectedID basics.AssetIndex = 1
+ optInTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[5],
+ XferAsset: expectedID,
+ AssetReceiver: addrs[5],
+ }
+
+ ad0init, err := l.Lookup(l.Latest(), addrs[0])
+ require.NoError(t, err)
+ ad5init, err := l.Lookup(l.Latest(), addrs[5])
+ require.NoError(t, err)
+
+ eval := nextBlock(t, l, true, nil)
+ txns(t, l, eval, &createTxn, &optInTxn)
+ endBlock(t, l, eval)
+
+ ad0new, err := l.Lookup(l.Latest(), addrs[0])
+ require.NoError(t, err)
+ ad5new, err := l.Lookup(l.Latest(), addrs[5])
+ require.NoError(t, err)
+
+ proto := l.GenesisProto()
+ // Check balance and min balance requirement changes
+ require.Equal(t, ad0init.MicroAlgos.Raw, ad0new.MicroAlgos.Raw+1000) // fee
+ require.Equal(t, ad0init.MinBalance(&proto).Raw, ad0new.MinBalance(&proto).Raw-100000) // create
+ require.Equal(t, ad5init.MicroAlgos.Raw, ad5new.MicroAlgos.Raw+1000) // fee
+ require.Equal(t, ad5init.MinBalance(&proto).Raw, ad5new.MinBalance(&proto).Raw-100000) // optin
+
+ optOutTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[5],
+ XferAsset: expectedID,
+ AssetReceiver: addrs[0],
+ AssetCloseTo: addrs[0],
+ }
+
+ closeTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[1], // The manager, not the creator
+ ConfigAsset: expectedID,
+ }
+
+ eval = nextBlock(t, l, true, nil)
+ txns(t, l, eval, &optOutTxn, &closeTxn)
+ endBlock(t, l, eval)
+
+ ad0final, err := l.Lookup(l.Latest(), addrs[0])
+ require.NoError(t, err)
+ ad5final, err := l.Lookup(l.Latest(), addrs[5])
+ require.NoError(t, err)
+ // Check we got our balance "back"
+ require.Equal(t, ad0final.MinBalance(&proto), ad0init.MinBalance(&proto))
+ require.Equal(t, ad5final.MinBalance(&proto), ad5init.MinBalance(&proto))
+}
+
+// Test that ModifiedAssetHoldings in StateDelta is set correctly.
+func TestModifiedAssetHoldings(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ const assetid basics.AssetIndex = 1
+
+ createTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ Fee: 2000,
+ AssetParams: basics.AssetParams{
+ Total: 3,
+ Decimals: 0,
+ Manager: addrs[0],
+ Reserve: addrs[0],
+ Freeze: addrs[0],
+ Clawback: addrs[0],
+ },
+ }
+
+ optInTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[1],
+ Fee: 2000,
+ XferAsset: assetid,
+ AssetAmount: 0,
+ AssetReceiver: addrs[1],
+ }
+
+ eval := nextBlock(t, l, true, nil)
+ txns(t, l, eval, &createTxn, &optInTxn)
+ vb := endBlock(t, l, eval)
+
+ {
+ aa := ledgercore.AccountAsset{
+ Address: addrs[0],
+ Asset: assetid,
+ }
+ created, ok := vb.Delta().ModifiedAssetHoldings[aa]
+ require.True(t, ok)
+ assert.True(t, created)
+ }
+ {
+ aa := ledgercore.AccountAsset{
+ Address: addrs[1],
+ Asset: assetid,
+ }
+ created, ok := vb.Delta().ModifiedAssetHoldings[aa]
+ require.True(t, ok)
+ assert.True(t, created)
+ }
+
+ optOutTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[1],
+ Fee: 1000,
+ XferAsset: assetid,
+ AssetReceiver: addrs[0],
+ AssetCloseTo: addrs[0],
+ }
+
+ closeTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ Fee: 1000,
+ ConfigAsset: assetid,
+ }
+
+ eval = nextBlock(t, l, true, nil)
+ txns(t, l, eval, &optOutTxn, &closeTxn)
+ vb = endBlock(t, l, eval)
+
+ {
+ aa := ledgercore.AccountAsset{
+ Address: addrs[0],
+ Asset: assetid,
+ }
+ created, ok := vb.Delta().ModifiedAssetHoldings[aa]
+ require.True(t, ok)
+ assert.False(t, created)
+ }
+ {
+ aa := ledgercore.AccountAsset{
+ Address: addrs[1],
+ Asset: assetid,
+ }
+ created, ok := vb.Delta().ModifiedAssetHoldings[aa]
+ require.True(t, ok)
+ assert.False(t, created)
+ }
+}
+
+// Test that ModifiedAppLocalStates in StateDelta is set correctly.
+func TestModifiedAppLocalStates(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ const appid basics.AppIndex = 1
+
+ createTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: "int 1",
+ }
+
+ optInTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appid,
+ OnCompletion: transactions.OptInOC,
+ }
+
+ eval := nextBlock(t, l, true, nil)
+ txns(t, l, eval, &createTxn, &optInTxn)
+ vb := endBlock(t, l, eval)
+
+ assert.Len(t, vb.Delta().ModifiedAppLocalStates, 1)
+ {
+ aa := ledgercore.AccountApp{
+ Address: addrs[1],
+ App: appid,
+ }
+ created, ok := vb.Delta().ModifiedAppLocalStates[aa]
+ require.True(t, ok)
+ assert.True(t, created)
+ }
+
+ optOutTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appid,
+ OnCompletion: transactions.CloseOutOC,
+ }
+
+ closeTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appid,
+ OnCompletion: transactions.DeleteApplicationOC,
+ }
+
+ eval = nextBlock(t, l, true, nil)
+ txns(t, l, eval, &optOutTxn, &closeTxn)
+ vb = endBlock(t, l, eval)
+
+ assert.Len(t, vb.Delta().ModifiedAppLocalStates, 1)
+ {
+ aa := ledgercore.AccountApp{
+ Address: addrs[1],
+ App: appid,
+ }
+ created, ok := vb.Delta().ModifiedAppLocalStates[aa]
+ require.True(t, ok)
+ assert.False(t, created)
+ }
+}
+
+// TestAppInsMinBalance checks that accounts with MaxAppsOptedIn are accepted by block evaluator
+// and do not cause any MaximumMinimumBalance problems
+func TestAppInsMinBalance(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ const appid basics.AppIndex = 1
+
+ maxAppsOptedIn := config.Consensus[protocol.ConsensusFuture].MaxAppsOptedIn
+ require.Greater(t, maxAppsOptedIn, 0)
+ maxAppsCreated := config.Consensus[protocol.ConsensusFuture].MaxAppsCreated
+ require.Greater(t, maxAppsCreated, 0)
+ maxLocalSchemaEntries := config.Consensus[protocol.ConsensusFuture].MaxLocalSchemaEntries
+ require.Greater(t, maxLocalSchemaEntries, uint64(0))
+
+ txnsCreate := make([]*txntest.Txn, 0, maxAppsOptedIn)
+ txnsOptIn := make([]*txntest.Txn, 0, maxAppsOptedIn)
+ appsCreated := make(map[basics.Address]int, len(addrs)-1)
+
+ acctIdx := 0
+ for i := 0; i < maxAppsOptedIn; i++ {
+ creator := addrs[acctIdx]
+ createTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: creator,
+ ApprovalProgram: "int 1",
+ LocalStateSchema: basics.StateSchema{NumByteSlice: maxLocalSchemaEntries},
+ Note: ledgertesting.RandomNote(),
+ }
+ txnsCreate = append(txnsCreate, &createTxn)
+ count := appsCreated[creator]
+ count++
+ appsCreated[creator] = count
+ if count == maxAppsCreated {
+ acctIdx++
+ }
+
+ optInTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[9],
+ ApplicationID: appid + basics.AppIndex(i),
+ OnCompletion: transactions.OptInOC,
+ }
+ txnsOptIn = append(txnsOptIn, &optInTxn)
+ }
+
+ eval := nextBlock(t, l, true, nil)
+ txns1 := append(txnsCreate, txnsOptIn...)
+ txns(t, l, eval, txns1...)
+ vb := endBlock(t, l, eval)
+ require.Len(t, vb.Delta().ModifiedAppLocalStates, 50)
+}
+
+// TestGhostTransactions confirms that accounts that don't even exist
+// can be the Sender in some situations. If some other transaction
+// covers the fee, and the transaction itself does not require an
+// asset or a min balance, it's fine.
+func TestGhostTransactions(t *testing.T) {
+ t.Skip("Behavior should be changed so test passes.")
+
+ /*
+ I think we have a behavior we should fix. I’m going to call these
+ transactions where the Sender has no account and the fee=0 “ghostâ€
+ transactions. In a ghost transaction, we still call balances.Move to
+ “pay†the fee. Further, Move does not short-circuit a Move of 0 (for
+ good reason, allowing compounding). Therefore, in Move, we do rewards
+ processing on the “ghost†account. That causes us to want to write a
+ new accountdata for them. But if we do that, the minimum balance
+ checker will catch it, and kill the transaction because the ghost isn’t
+ allowed to have a balance of 0. I don’t think we can short-circuit
+ Move(0) because a zero pay is a known way to get your rewards
+ actualized. Instead, I advocate that we short-circuit the call to Move
+ for 0 fees.
+
+ // move fee to pool
+ if !tx.Fee.IsZero() {
+ err = balances.Move(tx.Sender, eval.specials.FeeSink, tx.Fee, &ad.SenderRewards, nil)
+ if err != nil {
+ return
+ }
+ }
+
+ I think this must be controlled by consensus upgrade, but I would love
+ to be told I’m wrong. The other option is to outlaw these
+ transactions, but even that requires changing code if we want to be
+ exactly correct, because they are currently allowed when there are no
+ rewards to get paid out (as would happen in a new network, or if we
+ stop participation rewards - notice that this test only fails on the
+ 4th attempt, once rewards have accumulated).
+
+ Will suggested that we could treat Ghost accounts as non-partipating.
+ Maybe that would allow the Move code to avoid trying to update
+ accountdata.
+ */
+
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+
+ l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ asaIndex := basics.AssetIndex(1)
+
+ asa := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 1000000,
+ Decimals: 3,
+ UnitName: "oz",
+ AssetName: "Gold",
+ URL: "https://gold.rush/",
+ Clawback: basics.Address{0x0c, 0x0b, 0x0a, 0x0c},
+ Freeze: basics.Address{0x0f, 0x0e, 0xe, 0xe},
+ Manager: basics.Address{0x0a, 0x0a, 0xe},
+ },
+ }
+
+ eval := nextBlock(t, l, true, nil)
+ txn(t, l, eval, &asa)
+ endBlock(t, l, eval)
+
+ benefactor := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: addrs[0],
+ Fee: 2000,
+ }
+
+ ghost := basics.Address{0x01}
+ ephemeral := []txntest.Txn{
+ {
+ Type: "pay",
+ Amount: 0,
+ Sender: ghost,
+ Receiver: ghost,
+ Fee: 0,
+ },
+ {
+ Type: "axfer",
+ AssetAmount: 0,
+ Sender: ghost,
+ AssetReceiver: basics.Address{0x02},
+ XferAsset: basics.AssetIndex(1),
+ Fee: 0,
+ },
+ {
+ Type: "axfer",
+ AssetAmount: 0,
+ Sender: basics.Address{0x0c, 0x0b, 0x0a, 0x0c},
+ AssetReceiver: addrs[0],
+ AssetSender: addrs[1],
+ XferAsset: asaIndex,
+ Fee: 0,
+ },
+ {
+ Type: "afrz",
+ Sender: basics.Address{0x0f, 0x0e, 0xe, 0xe},
+ FreezeAccount: addrs[0], // creator, therefore is opted in
+ FreezeAsset: asaIndex,
+ AssetFrozen: true,
+ Fee: 0,
+ },
+ {
+ Type: "afrz",
+ Sender: basics.Address{0x0f, 0x0e, 0xe, 0xe},
+ FreezeAccount: addrs[0], // creator, therefore is opted in
+ FreezeAsset: asaIndex,
+ AssetFrozen: false,
+ Fee: 0,
+ },
+ }
+
+ for i, e := range ephemeral {
+ eval = nextBlock(t, l, true, nil)
+ err := txgroup(t, l, eval, &benefactor, &e)
+ require.NoError(t, err, "i=%d %s", i, e.Type)
+ endBlock(t, l, eval)
+ }
+}
diff --git a/ledger/internal/eval_test.go b/ledger/internal/eval_test.go
new file mode 100644
index 000000000..c3bae4613
--- /dev/null
+++ b/ledger/internal/eval_test.go
@@ -0,0 +1,1030 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package internal
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "math/rand"
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/compactcert"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/verify"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/execpool"
+)
+
+var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
+var minFee basics.MicroAlgos
+
+func init() {
+ params := config.Consensus[protocol.ConsensusCurrentVersion]
+ minFee = basics.MicroAlgos{Raw: params.MinTxnFee}
+}
+
+func TestBlockEvaluatorFeeSink(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, _, _ := ledgertesting.Genesis(10)
+
+ genesisBalances := bookkeeping.GenesisBalances{
+ Balances: genesisInitState.Accounts,
+ FeeSink: testSinkAddr,
+ RewardsPool: testPoolAddr,
+ Timestamp: 0,
+ }
+ l := newTestLedger(t, genesisBalances)
+
+ genesisBlockHeader, err := l.BlockHdr(basics.Round(0))
+ newBlock := bookkeeping.MakeBlock(genesisBlockHeader)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+ require.Equal(t, eval.specials.FeeSink, testSinkAddr)
+}
+
+func TestPrepareEvalParams(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ eval := BlockEvaluator{
+ prevHeader: bookkeeping.BlockHeader{
+ TimeStamp: 1234,
+ Round: 2345,
+ },
+ }
+
+ params := []config.ConsensusParams{
+ {Application: true, MaxAppProgramCost: 700},
+ config.Consensus[protocol.ConsensusV29],
+ config.Consensus[protocol.ConsensusFuture],
+ }
+
+ // Create some sample transactions
+ payment := txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: basics.Address{1, 2, 3, 4},
+ Receiver: basics.Address{4, 3, 2, 1},
+ Amount: 100,
+ }.SignedTxnWithAD()
+
+ appcall1 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: basics.Address{1, 2, 3, 4},
+ ApplicationID: basics.AppIndex(1),
+ }.SignedTxnWithAD()
+
+ appcall2 := appcall1
+ appcall2.SignedTxn.Txn.ApplicationCallTxnFields.ApplicationID = basics.AppIndex(2)
+
+ type evalTestCase struct {
+ group []transactions.SignedTxnWithAD
+
+ // indicates if prepareAppEvaluators should return a non-nil
+ // appTealEvaluator for the txn at index i
+ expected []bool
+
+ numAppCalls int
+ // Used for checking transitive pointer equality in app calls
+ // If there are no app calls in the group, it is set to -1
+ firstAppCallIndex int
+ }
+
+ // Create some groups with these transactions
+ cases := []evalTestCase{
+ {[]transactions.SignedTxnWithAD{payment}, []bool{false}, 0, -1},
+ {[]transactions.SignedTxnWithAD{appcall1}, []bool{true}, 1, 0},
+ {[]transactions.SignedTxnWithAD{payment, payment}, []bool{false, false}, 0, -1},
+ {[]transactions.SignedTxnWithAD{appcall1, payment}, []bool{true, false}, 1, 0},
+ {[]transactions.SignedTxnWithAD{payment, appcall1}, []bool{false, true}, 1, 1},
+ {[]transactions.SignedTxnWithAD{appcall1, appcall2}, []bool{true, true}, 2, 0},
+ {[]transactions.SignedTxnWithAD{appcall1, appcall2, appcall1}, []bool{true, true, true}, 3, 0},
+ {[]transactions.SignedTxnWithAD{payment, appcall1, payment}, []bool{false, true, false}, 1, 1},
+ {[]transactions.SignedTxnWithAD{appcall1, payment, appcall2}, []bool{true, false, true}, 2, 0},
+ }
+
+ for i, param := range params {
+ for j, testCase := range cases {
+ t.Run(fmt.Sprintf("i=%d,j=%d", i, j), func(t *testing.T) {
+ eval.proto = param
+ res := eval.prepareEvalParams(testCase.group)
+ require.Equal(t, len(res), len(testCase.group))
+
+ // Compute the expected transaction group without ApplyData for
+ // the test case
+ expGroupNoAD := make([]transactions.SignedTxn, len(testCase.group))
+ for k := range testCase.group {
+ expGroupNoAD[k] = testCase.group[k].SignedTxn
+ }
+
+ // Ensure non app calls have a nil evaluator, and that non-nil
+ // evaluators point to the right transactions and values
+ for k, present := range testCase.expected {
+ if present {
+ require.NotNil(t, res[k])
+ require.NotNil(t, res[k].PastSideEffects)
+ require.Equal(t, res[k].GroupIndex, uint64(k))
+ require.Equal(t, res[k].TxnGroup, expGroupNoAD)
+ require.Equal(t, *res[k].Proto, eval.proto)
+ require.Equal(t, *res[k].Txn, testCase.group[k].SignedTxn)
+ require.Equal(t, res[k].MinTealVersion, res[testCase.firstAppCallIndex].MinTealVersion)
+ require.Equal(t, res[k].PooledApplicationBudget, res[testCase.firstAppCallIndex].PooledApplicationBudget)
+ if reflect.DeepEqual(param, config.Consensus[protocol.ConsensusV29]) {
+ require.Equal(t, *res[k].PooledApplicationBudget, uint64(eval.proto.MaxAppProgramCost))
+ } else if reflect.DeepEqual(param, config.Consensus[protocol.ConsensusFuture]) {
+ require.Equal(t, *res[k].PooledApplicationBudget, uint64(eval.proto.MaxAppProgramCost*testCase.numAppCalls))
+ }
+ } else {
+ require.Nil(t, res[k])
+ }
+ }
+ })
+ }
+ }
+}
+
+func TestCowCompactCert(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var certRnd basics.Round
+ var certType protocol.CompactCertType
+ var cert compactcert.Cert
+ var atRound basics.Round
+ var validate bool
+ accts0 := ledgertesting.RandomAccounts(20, true)
+ blocks := make(map[basics.Round]bookkeeping.BlockHeader)
+ blockErr := make(map[basics.Round]error)
+ ml := mockLedger{balanceMap: accts0, blocks: blocks, blockErr: blockErr}
+ c0 := makeRoundCowState(
+ &ml, bookkeeping.BlockHeader{}, config.Consensus[protocol.ConsensusCurrentVersion],
+ 0, ledgercore.AccountTotals{}, 0)
+
+ certType = protocol.CompactCertType(1234) // bad cert type
+ err := c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.Error(t, err)
+
+ // no certRnd block
+ certType = protocol.CompactCertBasic
+ noBlockErr := errors.New("no block")
+ blockErr[3] = noBlockErr
+ certRnd = 3
+ err = c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.Error(t, err)
+
+ // no votersRnd block
+ // this is slightly a mess of things that don't quite line up with likely usage
+ validate = true
+ var certHdr bookkeeping.BlockHeader
+ certHdr.CurrentProtocol = "TestCowCompactCert"
+ certHdr.Round = 1
+ proto := config.Consensus[certHdr.CurrentProtocol]
+ proto.CompactCertRounds = 2
+ config.Consensus[certHdr.CurrentProtocol] = proto
+ blocks[certHdr.Round] = certHdr
+
+ certHdr.Round = 15
+ blocks[certHdr.Round] = certHdr
+ certRnd = certHdr.Round
+ blockErr[13] = noBlockErr
+ err = c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.Error(t, err)
+
+ // validate fail
+ certHdr.Round = 1
+ certRnd = certHdr.Round
+ err = c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.Error(t, err)
+
+ // fall through to no err
+ validate = false
+ err = c0.compactCert(certRnd, certType, cert, atRound, validate)
+ require.NoError(t, err)
+
+ // 100% coverage
+}
+
+// a couple trivial tests that don't need setup
+// see TestBlockEvaluator for more
+func TestTestTransactionGroup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var txgroup []transactions.SignedTxn
+ eval := BlockEvaluator{}
+ err := eval.TestTransactionGroup(txgroup)
+ require.NoError(t, err) // nothing to do, no problem
+
+ eval.proto = config.Consensus[protocol.ConsensusCurrentVersion]
+ txgroup = make([]transactions.SignedTxn, eval.proto.MaxTxGroupSize+1)
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err) // too many
+}
+
+// test BlockEvaluator.transactionGroup()
+// some trivial checks that require no setup
+func TestPrivateTransactionGroup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var txgroup []transactions.SignedTxnWithAD
+ eval := BlockEvaluator{}
+ err := eval.TransactionGroup(txgroup)
+ require.NoError(t, err) // nothing to do, no problem
+
+ eval.proto = config.Consensus[protocol.ConsensusCurrentVersion]
+ txgroup = make([]transactions.SignedTxnWithAD, eval.proto.MaxTxGroupSize+1)
+ err = eval.TransactionGroup(txgroup)
+ require.Error(t, err) // too many
+}
+
+// BlockEvaluator.workaroundOverspentRewards() fixed a couple issues on testnet.
+// This is now part of history and has to be re-created when running catchup on testnet. So, test to ensure it keeps happenning.
+func TestTestnetFixup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ eval := &BlockEvaluator{}
+ var rewardPoolBalance basics.AccountData
+ rewardPoolBalance.MicroAlgos.Raw = 1234
+ var headerRound basics.Round
+ testnetGenesisHash, _ := crypto.DigestFromString("JBR3KGFEWPEE5SAQ6IWU6EEBZMHXD4CZU6WCBXWGF57XBZIJHIRA")
+
+ // not a fixup round, no change
+ headerRound = 1
+ poolOld, err := eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
+ require.Equal(t, rewardPoolBalance, poolOld)
+ require.NoError(t, err)
+
+ eval.genesisHash = testnetGenesisHash
+ eval.genesisHash[3]++
+
+ specialRounds := []basics.Round{1499995, 2926564}
+ for _, headerRound = range specialRounds {
+ poolOld, err = eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
+ require.Equal(t, rewardPoolBalance, poolOld)
+ require.NoError(t, err)
+ }
+
+ for _, headerRound = range specialRounds {
+ testnetFixupExecution(t, headerRound, 20000000000)
+ }
+ // do all the setup and do nothing for not a special round
+ testnetFixupExecution(t, specialRounds[0]+1, 0)
+}
+
+func testnetFixupExecution(t *testing.T, headerRound basics.Round, poolBonus uint64) {
+ testnetGenesisHash, _ := crypto.DigestFromString("JBR3KGFEWPEE5SAQ6IWU6EEBZMHXD4CZU6WCBXWGF57XBZIJHIRA")
+ // big setup so we can move some algos
+ // boilerplate like TestBlockEvaluator, but pretend to be testnet
+ genesisInitState, addrs, keys := ledgertesting.Genesis(10)
+ genesisInitState.Block.BlockHeader.GenesisHash = testnetGenesisHash
+ genesisInitState.Block.BlockHeader.GenesisID = "testnet"
+ genesisInitState.GenesisHash = testnetGenesisHash
+
+ rewardPoolBalance := genesisInitState.Accounts[testPoolAddr]
+ nextPoolBalance := rewardPoolBalance.MicroAlgos.Raw + poolBonus
+
+ l := newTestLedger(t, bookkeeping.GenesisBalances{
+ Balances: genesisInitState.Accounts,
+ FeeSink: testSinkAddr,
+ RewardsPool: testPoolAddr,
+ })
+ l.blocks[0] = genesisInitState.Block
+ l.genesisHash = genesisInitState.GenesisHash
+
+ newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ // won't work before funding bank
+ if poolBonus > 0 {
+ _, err = eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
+ require.Error(t, err)
+ }
+
+ bankAddr, _ := basics.UnmarshalChecksumAddress("GD64YIY3TWGDMCNPP553DZPPR6LDUSFQOIJVFDPPXWEG3FVOJCCDBBHU5A")
+
+ // put some algos in the bank so that fixup can pull from this account
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addrs[0],
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round(),
+ GenesisHash: testnetGenesisHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: bankAddr,
+ Amount: basics.MicroAlgos{Raw: 20000000000 * 10},
+ },
+ }
+ st := txn.Sign(keys[0])
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ poolOld, err := eval.workaroundOverspentRewards(rewardPoolBalance, headerRound)
+ require.Equal(t, nextPoolBalance, poolOld.MicroAlgos.Raw)
+ require.NoError(t, err)
+}
+
+// newTestGenesis creates a bunch of accounts, splits up 10B algos
+// between them and the rewardspool and feesink, and gives out the
+// addresses and secrets it creates to enable tests. For special
+// scenarios, manipulate these return values before using newTestLedger.
+func newTestGenesis() (bookkeeping.GenesisBalances, []basics.Address, []*crypto.SignatureSecrets) {
+ // irrelevant, but deterministic
+ sink, err := basics.UnmarshalChecksumAddress("YTPRLJ2KK2JRFSZZNAF57F3K5Y2KCG36FZ5OSYLW776JJGAUW5JXJBBD7Q")
+ if err != nil {
+ panic(err)
+ }
+ rewards, err := basics.UnmarshalChecksumAddress("242H5OXHUEBYCGGWB3CQ6AZAMQB5TMCWJGHCGQOZPEIVQJKOO7NZXUXDQA")
+ if err != nil {
+ panic(err)
+ }
+
+ const count = 10
+ addrs := make([]basics.Address, count)
+ secrets := make([]*crypto.SignatureSecrets, count)
+ accts := make(map[basics.Address]basics.AccountData)
+
+ // 10 billion microalgos, across N accounts and pool and sink
+ amount := 10 * 1000000000 * 1000000 / uint64(count+2)
+
+ for i := 0; i < count; i++ {
+ // Create deterministic addresses, so that output stays the same, run to run.
+ var seed crypto.Seed
+ seed[0] = byte(i)
+ secrets[i] = crypto.GenerateSignatureSecrets(seed)
+ addrs[i] = basics.Address(secrets[i].SignatureVerifier)
+
+ adata := basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: amount},
+ }
+ accts[addrs[i]] = adata
+ }
+
+ accts[sink] = basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: amount},
+ Status: basics.NotParticipating,
+ }
+
+ accts[rewards] = basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: amount},
+ }
+
+ genBalances := bookkeeping.MakeGenesisBalances(accts, sink, rewards)
+
+ return genBalances, addrs, secrets
+}
+
+type evalTestLedger struct {
+ blocks map[basics.Round]bookkeeping.Block
+ roundBalances map[basics.Round]map[basics.Address]basics.AccountData
+ genesisHash crypto.Digest
+ feeSink basics.Address
+ rewardsPool basics.Address
+ latestTotals ledgercore.AccountTotals
+}
+
+// newTestLedger creates a in memory Ledger that is as realistic as
+// possible. It has Rewards and FeeSink properly configured.
+func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *evalTestLedger {
+ l := &evalTestLedger{
+ blocks: make(map[basics.Round]bookkeeping.Block),
+ roundBalances: make(map[basics.Round]map[basics.Address]basics.AccountData),
+ feeSink: balances.FeeSink,
+ rewardsPool: balances.RewardsPool,
+ }
+
+ crypto.RandBytes(l.genesisHash[:])
+ genBlock, err := bookkeeping.MakeGenesisBlock(protocol.ConsensusFuture,
+ balances, "test", l.genesisHash)
+ require.NoError(t, err)
+ l.roundBalances[0] = balances.Balances
+ l.blocks[0] = genBlock
+
+ // calculate the accounts totals.
+ var ot basics.OverflowTracker
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ for _, acctData := range balances.Balances {
+ l.latestTotals.AddAccount(proto, acctData, &ot)
+ }
+
+ require.False(t, genBlock.FeeSink.IsZero())
+ require.False(t, genBlock.RewardsPool.IsZero())
+ return l
+}
+
+// Validate uses the ledger to validate block blk as a candidate next block.
+// It returns an error if blk is not the expected next block, or if blk is
+// not a valid block (e.g., it has duplicate transactions, overspends some
+// account, etc).
+func (ledger *evalTestLedger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledgercore.ValidatedBlock, error) {
+ verifiedTxnCache := verify.MakeVerifiedTransactionCache(config.GetDefaultLocal().VerifiedTranscationsCacheSize)
+
+ delta, err := Eval(ctx, ledger, blk, true, verifiedTxnCache, executionPool)
+ if err != nil {
+ return nil, err
+ }
+
+ vb := ledgercore.MakeValidatedBlock(blk, delta)
+ return &vb, nil
+}
+
+// StartEvaluator creates a BlockEvaluator, given a ledger and a block header
+// of the block that the caller is planning to evaluate. If the length of the
+// payset being evaluated is known in advance, a paysetHint >= 0 can be
+// passed, avoiding unnecessary payset slice growth.
+func (ledger *evalTestLedger) StartEvaluator(hdr bookkeeping.BlockHeader, paysetHint, maxTxnBytesPerBlock int) (*BlockEvaluator, error) {
+ return StartEvaluator(ledger, hdr,
+ EvaluatorOptions{
+ PaysetHint: paysetHint,
+ Validate: true,
+ Generate: true,
+ MaxTxnBytesPerBlock: maxTxnBytesPerBlock,
+ })
+}
+
+// GetCreatorForRound takes a CreatableIndex and a CreatableType and tries to
+// look up a creator address, setting ok to false if the query succeeded but no
+// creator was found.
+func (ledger *evalTestLedger) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
+ balances := ledger.roundBalances[rnd]
+ for addr, balance := range balances {
+ if _, has := balance.AssetParams[basics.AssetIndex(cidx)]; has {
+ return addr, true, nil
+ }
+ if _, has := balance.AppParams[basics.AppIndex(cidx)]; has {
+ return addr, true, nil
+ }
+ }
+ return basics.Address{}, false, nil
+}
+
+// LatestTotals returns the totals of all accounts for the most recent round, as well as the round number.
+func (ledger *evalTestLedger) LatestTotals() (basics.Round, ledgercore.AccountTotals, error) {
+ return basics.Round(len(ledger.blocks)).SubSaturate(1), ledger.latestTotals, nil
+}
+
+// LookupWithoutRewards is like Lookup but does not apply pending rewards up
+// to the requested round rnd.
+func (ledger *evalTestLedger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (basics.AccountData, basics.Round, error) {
+ return ledger.roundBalances[rnd][addr], rnd, nil
+}
+
+// GenesisHash returns the genesis hash for this ledger.
+func (ledger *evalTestLedger) GenesisHash() crypto.Digest {
+ return ledger.genesisHash
+}
+
+// Latest returns the latest known block round added to the ledger.
+func (ledger *evalTestLedger) Latest() basics.Round {
+ return basics.Round(len(ledger.blocks)).SubSaturate(1)
+}
+
+// AddValidatedBlock adds a new block to the ledger, after the block has
+// been validated by calling Ledger.Validate(). This saves the cost of
+// having to re-compute the effect of the block on the ledger state, if
+// the block has previously been validated. Otherwise, AddValidatedBlock
+// behaves like AddBlock.
+func (ledger *evalTestLedger) AddValidatedBlock(vb ledgercore.ValidatedBlock, cert agreement.Certificate) error {
+ blk := vb.Block()
+ ledger.blocks[blk.Round()] = blk
+ newBalances := make(map[basics.Address]basics.AccountData)
+
+ // copy the previous balances.
+ for k, v := range ledger.roundBalances[vb.Block().Round()-1] {
+ newBalances[k] = v
+ }
+ // update
+ deltas := vb.Delta()
+ for _, addr := range deltas.Accts.ModifiedAccounts() {
+ accountData, _ := deltas.Accts.Get(addr)
+ newBalances[addr] = accountData
+ }
+ ledger.roundBalances[vb.Block().Round()] = newBalances
+ ledger.latestTotals = vb.Delta().Totals
+ return nil
+}
+
+// Lookup uses the accounts tracker to return the account state for a
+// given account in a particular round. The account values reflect
+// the changes of all blocks up to and including rnd.
+func (ledger *evalTestLedger) Lookup(rnd basics.Round, addr basics.Address) (basics.AccountData, error) {
+ balances, has := ledger.roundBalances[rnd]
+ if !has {
+ return basics.AccountData{}, errors.New("invalid round specified")
+ }
+
+ return balances[addr], nil
+}
+func (ledger *evalTestLedger) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error) {
+ block, has := ledger.blocks[rnd]
+ if !has {
+ return bookkeeping.BlockHeader{}, errors.New("invalid round specified")
+ }
+ return block.BlockHeader, nil
+}
+
+func (ledger *evalTestLedger) CompactCertVoters(rnd basics.Round) (*ledgercore.VotersForRound, error) {
+ return nil, errors.New("untested code path")
+}
+
+// GetCreator is like GetCreatorForRound, but for the latest round and race-free
+// with respect to ledger.Latest()
+func (ledger *evalTestLedger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
+ latestRound := ledger.Latest()
+ return ledger.GetCreatorForRound(latestRound, cidx, ctype)
+}
+
+func (ledger *evalTestLedger) CheckDup(currentProto config.ConsensusParams, current basics.Round, firstValid basics.Round, lastValid basics.Round, txid transactions.Txid, txl ledgercore.Txlease) error {
+ for _, block := range ledger.blocks {
+ for _, txn := range block.Payset {
+ if lastValid != txn.Txn.LastValid {
+ continue
+ }
+ currentTxid := txn.Txn.ID()
+ if bytes.Equal(txid[:], currentTxid[:]) {
+ return &ledgercore.TransactionInLedgerError{Txid: txid}
+ }
+ }
+ }
+ // todo - support leases.
+ return nil
+}
+
+// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
+func (ledger *evalTestLedger) nextBlock(t testing.TB) *BlockEvaluator {
+ rnd := ledger.Latest()
+ hdr, err := ledger.BlockHdr(rnd)
+ require.NoError(t, err)
+
+ nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
+ eval, err := ledger.StartEvaluator(nextHdr, 0, 0)
+ require.NoError(t, err)
+ return eval
+}
+
+// endBlock completes the block being created, returns the ValidatedBlock for inspection
+func (ledger *evalTestLedger) endBlock(t testing.TB, eval *BlockEvaluator) *ledgercore.ValidatedBlock {
+ validatedBlock, err := eval.GenerateBlock()
+ require.NoError(t, err)
+ err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+ require.NoError(t, err)
+ return validatedBlock
+}
+
+// lookup gets the current accountdata for an address
+func (ledger *evalTestLedger) lookup(t testing.TB, addr basics.Address) basics.AccountData {
+ rnd := ledger.Latest()
+ ad, err := ledger.Lookup(rnd, addr)
+ require.NoError(t, err)
+ return ad
+}
+
+// micros gets the current microAlgo balance for an address
+func (ledger *evalTestLedger) micros(t testing.TB, addr basics.Address) uint64 {
+ return ledger.lookup(t, addr).MicroAlgos.Raw
+}
+
+// asa gets the current balance and optin status for some asa for an address
+func (ledger *evalTestLedger) asa(t testing.TB, addr basics.Address, asset basics.AssetIndex) (uint64, bool) {
+ if holding, ok := ledger.lookup(t, addr).Assets[asset]; ok {
+ return holding.Amount, true
+ }
+ return 0, false
+}
+
+// asaParams gets the asset params for a given asa index
+func (ledger *evalTestLedger) asaParams(t testing.TB, asset basics.AssetIndex) (basics.AssetParams, error) {
+ creator, ok, err := ledger.GetCreator(basics.CreatableIndex(asset), basics.AssetCreatable)
+ if err != nil {
+ return basics.AssetParams{}, err
+ }
+ if !ok {
+ return basics.AssetParams{}, fmt.Errorf("no asset (%d)", asset)
+ }
+ if params, ok := ledger.lookup(t, creator).AssetParams[asset]; ok {
+ return params, nil
+ }
+ return basics.AssetParams{}, fmt.Errorf("bad lookup (%d)", asset)
+}
+
+type getCreatorForRoundResult struct {
+ address basics.Address
+ exists bool
+}
+
+type testCowBaseLedger struct {
+ creators []getCreatorForRoundResult
+}
+
+func (l *testCowBaseLedger) BlockHdr(basics.Round) (bookkeeping.BlockHeader, error) {
+ return bookkeeping.BlockHeader{}, errors.New("not implemented")
+}
+
+func (l *testCowBaseLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
+ return errors.New("not implemented")
+}
+
+func (l *testCowBaseLedger) LookupWithoutRewards(basics.Round, basics.Address) (basics.AccountData, basics.Round, error) {
+ return basics.AccountData{}, basics.Round(0), errors.New("not implemented")
+}
+
+func (l *testCowBaseLedger) GetCreatorForRound(_ basics.Round, cindex basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
+ res := l.creators[0]
+ l.creators = l.creators[1:]
+ return res.address, res.exists, nil
+}
+
+func TestCowBaseCreatorsCache(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ addresses := make([]basics.Address, 3)
+ for i := 0; i < len(addresses); i++ {
+ _, err := rand.Read(addresses[i][:])
+ require.NoError(t, err)
+ }
+
+ creators := []getCreatorForRoundResult{
+ {address: addresses[0], exists: true},
+ {address: basics.Address{}, exists: false},
+ {address: addresses[1], exists: true},
+ {address: basics.Address{}, exists: false},
+ }
+ l := testCowBaseLedger{
+ creators: creators,
+ }
+
+ base := roundCowBase{
+ l: &l,
+ creators: map[creatable]foundAddress{},
+ }
+
+ cindex := []basics.CreatableIndex{9, 10, 9, 10}
+ ctype := []basics.CreatableType{
+ basics.AssetCreatable,
+ basics.AssetCreatable,
+ basics.AppCreatable,
+ basics.AppCreatable,
+ }
+ for i := 0; i < 2; i++ {
+ for j, expected := range creators {
+ address, exists, err := base.getCreator(cindex[j], ctype[j])
+ require.NoError(t, err)
+
+ assert.Equal(t, expected.address, address)
+ assert.Equal(t, expected.exists, exists)
+ }
+ }
+}
+
+// TestEvalFunctionForExpiredAccounts tests that the eval function will correctly mark accounts as offline
+func TestEvalFunctionForExpiredAccounts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, keys := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture)
+
+ sendAddr := addrs[0]
+ recvAddr := addrs[1]
+
+ // the last round that the recvAddr is valid for
+ recvAddrLastValidRound := basics.Round(2)
+
+ // the target round we want to advance the evaluator to
+ targetRound := basics.Round(4)
+
+ // Set all to online except the sending address
+ for _, addr := range addrs {
+ if addr == sendAddr {
+ continue
+ }
+ tmp := genesisInitState.Accounts[addr]
+ tmp.Status = basics.Online
+ genesisInitState.Accounts[addr] = tmp
+ }
+
+ // Choose recvAddr to have a last valid round less than genesis block round
+ {
+ tmp := genesisInitState.Accounts[recvAddr]
+ tmp.VoteLastValid = recvAddrLastValidRound
+ genesisInitState.Accounts[recvAddr] = tmp
+ }
+
+ genesisBalances := bookkeeping.GenesisBalances{
+ Balances: genesisInitState.Accounts,
+ FeeSink: testSinkAddr,
+ RewardsPool: testPoolAddr,
+ Timestamp: 0,
+ }
+ l := newTestLedger(t, genesisBalances)
+
+ newBlock := bookkeeping.MakeBlock(l.blocks[0].BlockHeader)
+
+ blkEval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ // Advance the evaluator a couple rounds...
+ for i := uint64(0); i < uint64(targetRound); i++ {
+ l.endBlock(t, blkEval)
+ blkEval = l.nextBlock(t)
+ }
+
+ require.Greater(t, uint64(blkEval.Round()), uint64(recvAddrLastValidRound))
+
+ genHash := l.GenesisHash()
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sendAddr,
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: blkEval.Round(),
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: recvAddr,
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+
+ st := txn.Sign(keys[0])
+ err = blkEval.Transaction(st, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ // Make sure we validate our block as well
+ blkEval.validate = true
+
+ validatedBlock, err := blkEval.GenerateBlock()
+ require.NoError(t, err)
+
+ _, err = Eval(context.Background(), l, validatedBlock.Block(), false, nil, nil)
+ require.NoError(t, err)
+
+ badBlock := *validatedBlock
+
+ // First validate that bad block is fine if we dont touch it...
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ require.NoError(t, err)
+
+ badBlock = *validatedBlock
+
+ // Introduce an unknown address to introduce an error
+ badBlockObj := badBlock.Block()
+ badBlockObj.ExpiredParticipationAccounts = append(badBlockObj.ExpiredParticipationAccounts, basics.Address{1})
+ badBlock = ledgercore.MakeValidatedBlock(badBlockObj, badBlock.Delta())
+
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ require.Error(t, err)
+
+ badBlock = *validatedBlock
+
+ addressToCopy := badBlock.Block().ExpiredParticipationAccounts[0]
+
+ // Add more than the expected number of accounts
+ badBlockObj = badBlock.Block()
+ for i := 0; i < blkEval.proto.MaxProposedExpiredOnlineAccounts+1; i++ {
+ badBlockObj.ExpiredParticipationAccounts = append(badBlockObj.ExpiredParticipationAccounts, addressToCopy)
+ }
+ badBlock = ledgercore.MakeValidatedBlock(badBlockObj, badBlock.Delta())
+
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ require.Error(t, err)
+
+ badBlock = *validatedBlock
+
+ // Duplicate an address
+ badBlockObj = badBlock.Block()
+ badBlockObj.ExpiredParticipationAccounts = append(badBlockObj.ExpiredParticipationAccounts, badBlockObj.ExpiredParticipationAccounts[0])
+ badBlock = ledgercore.MakeValidatedBlock(badBlockObj, badBlock.Delta())
+
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ require.Error(t, err)
+
+ badBlock = *validatedBlock
+ // sanity check that bad block is being actually copied and not just the pointer
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ require.NoError(t, err)
+
+}
+
+type failRoundCowParent struct {
+ roundCowBase
+}
+
+func (p *failRoundCowParent) lookup(basics.Address) (basics.AccountData, error) {
+ return basics.AccountData{}, fmt.Errorf("disk I/O fail (on purpose)")
+}
+
+// TestExpiredAccountGenerationWithDiskFailure tests edge cases where disk failures can lead to ledger look up failures
+func TestExpiredAccountGenerationWithDiskFailure(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, keys := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture)
+
+ sendAddr := addrs[0]
+ recvAddr := addrs[1]
+
+ // the last round that the recvAddr is valid for
+ recvAddrLastValidRound := basics.Round(10)
+
+ // the target round we want to advance the evaluator to
+ targetRound := basics.Round(4)
+
+ // Set all to online except the sending address
+ for _, addr := range addrs {
+ if addr == sendAddr {
+ continue
+ }
+ tmp := genesisInitState.Accounts[addr]
+ tmp.Status = basics.Online
+ genesisInitState.Accounts[addr] = tmp
+ }
+
+ // Choose recvAddr to have a last valid round less than genesis block round
+ {
+ tmp := genesisInitState.Accounts[recvAddr]
+ tmp.VoteLastValid = recvAddrLastValidRound
+ genesisInitState.Accounts[recvAddr] = tmp
+ }
+
+ l := newTestLedger(t, bookkeeping.GenesisBalances{
+ Balances: genesisInitState.Accounts,
+ FeeSink: testSinkAddr,
+ RewardsPool: testPoolAddr,
+ Timestamp: 0,
+ })
+
+ newBlock := bookkeeping.MakeBlock(l.blocks[0].BlockHeader)
+
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ // Advance the evaluator a couple rounds...
+ for i := uint64(0); i < uint64(targetRound); i++ {
+ l.endBlock(t, eval)
+ eval = l.nextBlock(t)
+ }
+
+ genHash := l.GenesisHash()
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sendAddr,
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: eval.Round(),
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: recvAddr,
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+
+ st := txn.Sign(keys[0])
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ eval.validate = true
+ eval.generate = false
+
+ eval.block.ExpiredParticipationAccounts = append(eval.block.ExpiredParticipationAccounts, recvAddr)
+
+ err = eval.endOfBlock()
+ require.Error(t, err)
+
+ eval.block.ExpiredParticipationAccounts = []basics.Address{
+ basics.Address{},
+ }
+ eval.state.mods.Accts = ledgercore.AccountDeltas{}
+ eval.state.lookupParent = &failRoundCowParent{}
+ err = eval.endOfBlock()
+ require.Error(t, err)
+
+ err = eval.resetExpiredOnlineAccountsParticipationKeys()
+ require.Error(t, err)
+
+}
+
+// TestExpiredAccountGeneration test that expired accounts are added to a block header and validated
+func TestExpiredAccountGeneration(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, addrs, keys := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture)
+
+ sendAddr := addrs[0]
+ recvAddr := addrs[1]
+
+ // the last round that the recvAddr is valid for
+ recvAddrLastValidRound := basics.Round(2)
+
+ // the target round we want to advance the evaluator to
+ targetRound := basics.Round(4)
+
+ // Set all to online except the sending address
+ for _, addr := range addrs {
+ if addr == sendAddr {
+ continue
+ }
+ tmp := genesisInitState.Accounts[addr]
+ tmp.Status = basics.Online
+ genesisInitState.Accounts[addr] = tmp
+ }
+
+ // Choose recvAddr to have a last valid round less than genesis block round
+ {
+ tmp := genesisInitState.Accounts[recvAddr]
+ tmp.VoteLastValid = recvAddrLastValidRound
+ genesisInitState.Accounts[recvAddr] = tmp
+ }
+
+ l := newTestLedger(t, bookkeeping.GenesisBalances{
+ Balances: genesisInitState.Accounts,
+ FeeSink: testSinkAddr,
+ RewardsPool: testPoolAddr,
+ Timestamp: 0,
+ })
+
+ newBlock := bookkeeping.MakeBlock(l.blocks[0].BlockHeader)
+
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ // Advance the evaluator a couple rounds...
+ for i := uint64(0); i < uint64(targetRound); i++ {
+ l.endBlock(t, eval)
+ eval = l.nextBlock(t)
+ }
+
+ require.Greater(t, uint64(eval.Round()), uint64(recvAddrLastValidRound))
+
+ genHash := l.GenesisHash()
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sendAddr,
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: eval.Round(),
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: recvAddr,
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+
+ st := txn.Sign(keys[0])
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ // Make sure we validate our block as well
+ eval.validate = true
+
+ validatedBlock, err := eval.GenerateBlock()
+ require.NoError(t, err)
+
+ listOfExpiredAccounts := validatedBlock.Block().ParticipationUpdates.ExpiredParticipationAccounts
+
+ require.Equal(t, 1, len(listOfExpiredAccounts))
+ expiredAccount := listOfExpiredAccounts[0]
+ require.Equal(t, expiredAccount, recvAddr)
+
+ recvAcct, err := eval.state.lookup(recvAddr)
+ require.NoError(t, err)
+ require.Equal(t, recvAcct.Status, basics.Offline)
+ require.Equal(t, recvAcct.VoteFirstValid, basics.Round(0))
+ require.Equal(t, recvAcct.VoteLastValid, basics.Round(0))
+ require.Equal(t, recvAcct.VoteKeyDilution, uint64(0))
+ require.Equal(t, recvAcct.VoteID, crypto.OneTimeSignatureVerifier{})
+ require.Equal(t, recvAcct.SelectionID, crypto.VRFVerifier{})
+
+}
diff --git a/ledger/internal/evalindexer.go b/ledger/internal/evalindexer.go
new file mode 100644
index 000000000..454ae2d7a
--- /dev/null
+++ b/ledger/internal/evalindexer.go
@@ -0,0 +1,51 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package internal
+
+import (
+ "fmt"
+
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+)
+
+// ProcessBlockForIndexer ..
+func (eval *BlockEvaluator) ProcessBlockForIndexer(block *bookkeeping.Block) (ledgercore.StateDelta, []transactions.SignedTxnInBlock, error) {
+ paysetgroups, err := block.DecodePaysetGroups()
+ if err != nil {
+ return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{},
+ fmt.Errorf("ProcessBlockForIndexer() err: %w", err)
+ }
+
+ for _, group := range paysetgroups {
+ err = eval.TransactionGroup(group)
+ if err != nil {
+ return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{},
+ fmt.Errorf("ProcessBlockForIndexer() err: %w", err)
+ }
+ }
+
+ // Finally, process any pending end-of-block state changes.
+ err = eval.endOfBlock()
+ if err != nil {
+ return ledgercore.StateDelta{}, []transactions.SignedTxnInBlock{},
+ fmt.Errorf("ProcessBlockForIndexer() err: %w", err)
+ }
+
+ return eval.state.deltas(), eval.block.Payset, nil
+}
diff --git a/ledger/ledger.go b/ledger/ledger.go
index e69c3c355..4c0ececaa 100644
--- a/ledger/ledger.go
+++ b/ledger/ledger.go
@@ -28,13 +28,18 @@ import (
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/compactcert"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/verify"
+ "github.com/algorand/go-algorand/ledger/apply"
+ "github.com/algorand/go-algorand/ledger/internal"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
+ "github.com/algorand/go-algorand/util/execpool"
"github.com/algorand/go-algorand/util/metrics"
)
@@ -70,11 +75,12 @@ type Ledger struct {
genesisProto config.ConsensusParams
// State-machine trackers
- accts accountUpdates
- txTail txTail
- bulletin bulletin
- notifier blockNotifier
- metrics metricsTracker
+ accts accountUpdates
+ catchpoint catchpointTracker
+ txTail txTail
+ bulletin bulletin
+ notifier blockNotifier
+ metrics metricsTracker
trackers trackerRegistry
trackerMu deadlock.RWMutex
@@ -83,13 +89,8 @@ type Ledger struct {
// verifiedTxnCache holds all the verified transactions state
verifiedTxnCache verify.VerifiedTransactionCache
-}
-// InitState structure defines blockchain init params
-type InitState struct {
- Block bookkeeping.Block
- Accounts map[basics.Address]basics.AccountData
- GenesisHash crypto.Digest
+ cfg config.Local
}
// OpenLedger creates a Ledger object, using SQLite database filenames
@@ -97,7 +98,7 @@ type InitState struct {
// genesisInitState.Accounts specify the initial blocks and accounts to use if the
// database wasn't initialized before.
func OpenLedger(
- log logging.Logger, dbPathPrefix string, dbMem bool, genesisInitState InitState, cfg config.Local,
+ log logging.Logger, dbPathPrefix string, dbMem bool, genesisInitState ledgercore.InitState, cfg config.Local,
) (*Ledger, error) {
var err error
verifiedCacheSize := cfg.VerifiedTranscationsCacheSize
@@ -115,6 +116,7 @@ func OpenLedger(
synchronousMode: db.SynchronousMode(cfg.LedgerSynchronousMode),
accountsRebuildSynchronousMode: db.SynchronousMode(cfg.AccountsRebuildSynchronousMode),
verifiedTxnCache: verify.MakeVerifiedTransactionCache(verifiedCacheSize),
+ cfg: cfg,
}
l.headerCache.maxEntries = 10
@@ -152,7 +154,8 @@ func OpenLedger(
l.genesisAccounts = make(map[basics.Address]basics.AccountData)
}
- l.accts.initialize(cfg, dbPathPrefix, l.genesisProto, l.genesisAccounts)
+ l.accts.initialize(cfg)
+ l.catchpoint.initialize(cfg, dbPathPrefix)
err = l.reloadLedger()
if err != nil {
@@ -178,7 +181,7 @@ func (l *Ledger) reloadLedger() error {
// close the trackers.
l.trackers.close()
- // reload -
+ // init block queue
var err error
l.blockQ, err = bqInit(l)
if err != nil {
@@ -186,11 +189,26 @@ func (l *Ledger) reloadLedger() error {
return err
}
- l.trackers.register(&l.accts) // update the balances
- l.trackers.register(&l.txTail) // update the transaction tail, tracking the recent 1000 txn
- l.trackers.register(&l.bulletin) // provide closed channel signaling support for completed rounds
- l.trackers.register(&l.notifier) // send OnNewBlocks to subscribers
- l.trackers.register(&l.metrics) // provides metrics reporting support
+ // init tracker db
+ trackerDBInitParams, err := trackerDBInitialize(l, l.catchpoint.catchpointEnabled(), l.catchpoint.dbDirectory)
+ if err != nil {
+ return err
+ }
+
+ // set account updates tracker as a driver to calculate tracker db round and committing offsets
+ trackers := []ledgerTracker{
+ &l.accts, // update the balances
+ &l.catchpoint, // catchpoints tracker : update catchpoint labels, create catchpoint files
+ &l.txTail, // update the transaction tail, tracking the recent 1000 txn
+ &l.bulletin, // provide closed channel signaling support for completed rounds
+ &l.notifier, // send OnNewBlocks to subscribers
+ &l.metrics, // provides metrics reporting support
+ }
+
+ err = l.trackers.initialize(l, trackers, l.cfg)
+ if err != nil {
+ return err
+ }
err = l.trackers.loadFromDisk(l)
if err != nil {
@@ -198,6 +216,14 @@ func (l *Ledger) reloadLedger() error {
return err
}
+ // post-init actions
+ if trackerDBInitParams.vacuumOnStartup || l.cfg.OptimizeAccountsDatabaseOnStartup {
+ err = l.accts.vacuumDatabase(context.Background())
+ if err != nil {
+ return err
+ }
+ }
+
// Check that the genesis hash, if present, matches.
err = l.verifyMatchingGenesisHash()
if err != nil {
@@ -384,7 +410,7 @@ func (l *Ledger) notifyCommit(r basics.Round) basics.Round {
func (l *Ledger) GetLastCatchpointLabel() string {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.accts.GetLastCatchpointLabel()
+ return l.catchpoint.GetLastCatchpointLabel()
}
// GetCreatorForRound takes a CreatableIndex and a CreatableType and tries to
@@ -407,7 +433,7 @@ func (l *Ledger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableTy
// CompactCertVoters returns the top online accounts at round rnd.
// The result might be nil, even with err=nil, if there are no voters
// for that round because compact certs were not enabled.
-func (l *Ledger) CompactCertVoters(rnd basics.Round) (voters *VotersForRound, err error) {
+func (l *Ledger) CompactCertVoters(rnd basics.Round) (*ledgercore.VotersForRound, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
return l.accts.voters.getVoters(rnd)
@@ -447,6 +473,20 @@ func (l *Ledger) Lookup(rnd basics.Round, addr basics.Address) (basics.AccountDa
return data, nil
}
+// LookupAgreement returns account data used by agreement.
+func (l *Ledger) LookupAgreement(rnd basics.Round, addr basics.Address) (basics.OnlineAccountData, error) {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+
+ // Intentionally apply (pending) rewards up to rnd.
+ data, err := l.accts.LookupWithRewards(rnd, addr)
+ if err != nil {
+ return basics.OnlineAccountData{}, err
+ }
+
+ return data.OnlineAccountData(), nil
+}
+
// LookupWithoutRewards is like Lookup but does not apply pending rewards up
// to the requested round rnd.
func (l *Ledger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (basics.AccountData, basics.Round, error) {
@@ -480,10 +520,10 @@ func (l *Ledger) OnlineTotals(rnd basics.Round) (basics.MicroAlgos, error) {
}
// CheckDup return whether a transaction is a duplicate one.
-func (l *Ledger) CheckDup(currentProto config.ConsensusParams, current basics.Round, firstValid basics.Round, lastValid basics.Round, txid transactions.Txid, txl TxLease) error {
+func (l *Ledger) CheckDup(currentProto config.ConsensusParams, current basics.Round, firstValid basics.Round, lastValid basics.Round, txid transactions.Txid, txl ledgercore.Txlease) error {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.txTail.checkDup(currentProto, current, firstValid, lastValid, txid, txl.Txlease)
+ return l.txTail.checkDup(currentProto, current, firstValid, lastValid, txid, txl)
}
// Latest returns the latest known block round added to the ledger.
@@ -536,15 +576,11 @@ func (l *Ledger) BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreem
func (l *Ledger) AddBlock(blk bookkeeping.Block, cert agreement.Certificate) error {
// passing nil as the executionPool is ok since we've asking the evaluator to skip verification.
- updates, err := eval(context.Background(), l, blk, false, l.verifiedTxnCache, nil)
+ updates, err := internal.Eval(context.Background(), l, blk, false, l.verifiedTxnCache, nil)
if err != nil {
return err
}
-
- vb := ValidatedBlock{
- blk: blk,
- delta: updates,
- }
+ vb := ledgercore.MakeValidatedBlock(blk, updates)
return l.AddValidatedBlock(vb, cert)
}
@@ -554,18 +590,19 @@ func (l *Ledger) AddBlock(blk bookkeeping.Block, cert agreement.Certificate) err
// having to re-compute the effect of the block on the ledger state, if
// the block has previously been validated. Otherwise, AddValidatedBlock
// behaves like AddBlock.
-func (l *Ledger) AddValidatedBlock(vb ValidatedBlock, cert agreement.Certificate) error {
+func (l *Ledger) AddValidatedBlock(vb ledgercore.ValidatedBlock, cert agreement.Certificate) error {
// Grab the tracker lock first, to ensure newBlock() is notified before committedUpTo().
l.trackerMu.Lock()
defer l.trackerMu.Unlock()
- err := l.blockQ.putBlock(vb.blk, cert)
+ blk := vb.Block()
+ err := l.blockQ.putBlock(blk, cert)
if err != nil {
return err
}
- l.headerCache.Put(vb.blk.Round(), vb.blk.BlockHeader)
- l.trackers.newBlock(vb.blk, vb.delta)
- l.log.Debugf("added blk %d", vb.blk.Round())
+ l.headerCache.Put(blk.Round(), blk.BlockHeader)
+ l.trackers.newBlock(blk, vb.Delta())
+ l.log.Debugf("added blk %d", blk.Round())
return nil
}
@@ -596,6 +633,11 @@ func (l *Ledger) GenesisProto() config.ConsensusParams {
return l.genesisProto
}
+// GenesisAccounts returns initial accounts for this ledger.
+func (l *Ledger) GenesisAccounts() map[basics.Address]basics.AccountData {
+ return l.genesisAccounts
+}
+
// GetCatchpointCatchupState returns the current state of the catchpoint catchup.
func (l *Ledger) GetCatchpointCatchupState(ctx context.Context) (state CatchpointCatchupState, err error) {
return MakeCatchpointCatchupAccessor(l, l.log).GetState(ctx)
@@ -609,7 +651,7 @@ func (l *Ledger) GetCatchpointCatchupState(ctx context.Context) (state Catchpoin
func (l *Ledger) GetCatchpointStream(round basics.Round) (ReadCloseSizer, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.accts.GetCatchpointStream(round)
+ return l.catchpoint.GetCatchpointStream(round)
}
// ledgerForTracker methods
@@ -629,9 +671,9 @@ func (l *Ledger) trackerLog() logging.Logger {
// trackerEvalVerified is used by the accountUpdates to reconstruct the ledgercore.StateDelta from a given block during it's loadFromDisk execution.
// when this function is called, the trackers mutex is expected already to be taken. The provided accUpdatesLedger would allow the
// evaluator to shortcut the "main" ledger ( i.e. this struct ) and avoid taking the trackers lock a second time.
-func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger ledgerForEvaluator) (ledgercore.StateDelta, error) {
+func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger internal.LedgerForEvaluator) (ledgercore.StateDelta, error) {
// passing nil as the executionPool is ok since we've asking the evaluator to skip verification.
- return eval(context.Background(), accUpdatesLedger, blk, false, l.verifiedTxnCache, nil)
+ return internal.Eval(context.Background(), accUpdatesLedger, blk, false, l.verifiedTxnCache, nil)
}
// IsWritingCatchpointFile returns true when a catchpoint file is being generated. The function is used by the catchup service
@@ -639,7 +681,7 @@ func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger led
func (l *Ledger) IsWritingCatchpointFile() bool {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.accts.IsWritingCatchpointFile()
+ return l.catchpoint.IsWritingCatchpointFile()
}
// VerifiedTransactionCache returns the verify.VerifiedTransactionCache
@@ -647,9 +689,61 @@ func (l *Ledger) VerifiedTransactionCache() verify.VerifiedTransactionCache {
return l.verifiedTxnCache
}
-// TxLease is an exported version of txlease
-type TxLease struct {
- ledgercore.Txlease
+// StartEvaluator creates a BlockEvaluator, given a ledger and a block header
+// of the block that the caller is planning to evaluate. If the length of the
+// payset being evaluated is known in advance, a paysetHint >= 0 can be
+// passed, avoiding unnecessary payset slice growth. The optional maxTxnBytesPerBlock parameter
+// provides a cap on the size of a single generated block size, when a non-zero value is passed.
+// If a value of zero or less is passed to maxTxnBytesPerBlock, the consensus MaxTxnBytesPerBlock would
+// be used instead.
+func (l *Ledger) StartEvaluator(hdr bookkeeping.BlockHeader, paysetHint, maxTxnBytesPerBlock int) (*internal.BlockEvaluator, error) {
+ return internal.StartEvaluator(l, hdr,
+ internal.EvaluatorOptions{
+ PaysetHint: paysetHint,
+ Generate: true,
+ Validate: true,
+ MaxTxnBytesPerBlock: maxTxnBytesPerBlock,
+ })
+}
+
+// Validate uses the ledger to validate block blk as a candidate next block.
+// It returns an error if blk is not the expected next block, or if blk is
+// not a valid block (e.g., it has duplicate transactions, overspends some
+// account, etc).
+func (l *Ledger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledgercore.ValidatedBlock, error) {
+ delta, err := internal.Eval(ctx, l, blk, true, l.verifiedTxnCache, executionPool)
+ if err != nil {
+ return nil, err
+ }
+
+ vb := ledgercore.MakeValidatedBlock(blk, delta)
+ return &vb, nil
+}
+
+// CompactCertParams computes the parameters for building or verifying
+// a compact cert for block hdr, using voters from block votersHdr.
+func CompactCertParams(votersHdr bookkeeping.BlockHeader, hdr bookkeeping.BlockHeader) (res compactcert.Params, err error) {
+ return internal.CompactCertParams(votersHdr, hdr)
+}
+
+// AcceptableCompactCertWeight computes the acceptable signed weight
+// of a compact cert if it were to appear in a transaction with a
+// particular firstValid round. Earlier rounds require a smaller cert.
+// votersHdr specifies the block that contains the Merkle commitment of
+// the voters for this compact cert (and thus the compact cert is for
+// votersHdr.Round() + CompactCertRounds).
+//
+// logger must not be nil; use at least logging.Base()
+func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid basics.Round, logger logging.Logger) uint64 {
+ return internal.AcceptableCompactCertWeight(votersHdr, firstValid, logger)
+}
+
+// DebuggerLedger defines the minimal set of method required for creating a debug balances.
+type DebuggerLedger = internal.LedgerForCowBase
+
+// MakeDebugBalances creates a ledger suitable for dryrun and debugger
+func MakeDebugBalances(l DebuggerLedger, round basics.Round, proto protocol.ConsensusVersion, prevTimestamp int64) apply.Balances {
+ return internal.MakeDebugBalances(l, round, proto, prevTimestamp)
}
var ledgerInitblocksdbCount = metrics.NewCounter("ledger_initblocksdb_count", "calls")
diff --git a/ledger/ledger_perf_test.go b/ledger/ledger_perf_test.go
index c29652b38..e3f36c4f7 100644
--- a/ledger/ledger_perf_test.go
+++ b/ledger/ledger_perf_test.go
@@ -37,6 +37,8 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
+ "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
@@ -261,8 +263,8 @@ func benchmarkFullBlocks(params testParams, b *testing.B) {
}
// check if block is full
- if err == ErrNoSpace {
- txPerBlock = len(eval.block.Payset)
+ if err == ledgercore.ErrNoSpace {
+ txPerBlock = eval.PaySetSize()
break
} else {
require.NoError(b, err)
@@ -271,7 +273,7 @@ func benchmarkFullBlocks(params testParams, b *testing.B) {
// First block just creates app + opts in accts if asa test
if i == 1 {
onCompletion = transactions.NoOpOC
- createdAppIdx = eval.state.txnCounter()
+ createdAppIdx = eval.TestingTxnCounter()
// On first block, opt in all accts to asa (accts is empty if not asa test)
k := 0
@@ -298,19 +300,19 @@ func benchmarkFullBlocks(params testParams, b *testing.B) {
// If this is the app creation block, add to both ledgers
if i == 1 {
- err = l0.AddBlock(lvb.blk, cert)
+ err = l0.AddBlock(lvb.Block(), cert)
require.NoError(b, err)
- err = l1.AddBlock(lvb.blk, cert)
+ err = l1.AddBlock(lvb.Block(), cert)
require.NoError(b, err)
continue
}
// For all other blocks, add just to the first ledger, and stash
// away to be replayed in the second ledger while running timer
- err = l0.AddBlock(lvb.blk, cert)
+ err = l0.AddBlock(lvb.Block(), cert)
require.NoError(b, err)
- blocks = append(blocks, lvb.blk)
+ blocks = append(blocks, lvb.Block())
}
b.Logf("built %d blocks, each with %d txns", numBlocks, txPerBlock)
@@ -319,7 +321,7 @@ func benchmarkFullBlocks(params testParams, b *testing.B) {
vc := verify.GetMockedCache(true)
b.ResetTimer()
for _, blk := range blocks {
- _, err = eval(context.Background(), l1, blk, true, vc, nil)
+ _, err = internal.Eval(context.Background(), l1, blk, true, vc, nil)
require.NoError(b, err)
err = l1.AddBlock(blk, cert)
require.NoError(b, err)
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index 077505fda..9fa3c08bd 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -36,26 +36,13 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/execpool"
)
-var poolSecret, sinkSecret *crypto.SignatureSecrets
-
-func init() {
- var seed crypto.Seed
-
- incentivePoolName := []byte("incentive pool")
- copy(seed[:], incentivePoolName)
- poolSecret = crypto.GenerateSignatureSecrets(seed)
-
- feeSinkName := []byte("fee sink")
- copy(seed[:], feeSinkName)
- sinkSecret = crypto.GenerateSignatureSecrets(seed)
-}
-
func sign(secrets map[basics.Address]*crypto.SignatureSecrets, t transactions.Transaction) transactions.SignedTxn {
var sig crypto.Signature
_, ok := secrets[t.Sender]
@@ -68,72 +55,6 @@ func sign(secrets map[basics.Address]*crypto.SignatureSecrets, t transactions.Tr
}
}
-func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion, baseAlgoPerAccount int) (genesisInitState InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) {
- params := config.Consensus[proto]
- poolAddr := testPoolAddr
- sinkAddr := testSinkAddr
-
- var zeroSeed crypto.Seed
- var genaddrs [10]basics.Address
- var gensecrets [10]*crypto.SignatureSecrets
- for i := range genaddrs {
- seed := zeroSeed
- seed[0] = byte(i)
- x := crypto.GenerateSignatureSecrets(seed)
- genaddrs[i] = basics.Address(x.SignatureVerifier)
- gensecrets[i] = x
- }
-
- initKeys = make(map[basics.Address]*crypto.SignatureSecrets)
- initAccounts := make(map[basics.Address]basics.AccountData)
- for i := range genaddrs {
- initKeys[genaddrs[i]] = gensecrets[i]
- // Give each account quite a bit more balance than MinFee or MinBalance
- initAccounts[genaddrs[i]] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: uint64((i + baseAlgoPerAccount) * 100000)})
- }
- initKeys[poolAddr] = poolSecret
- initAccounts[poolAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 1234567})
- initKeys[sinkAddr] = sinkSecret
- initAccounts[sinkAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 7654321})
-
- incentivePoolBalanceAtGenesis := initAccounts[poolAddr].MicroAlgos
- var initialRewardsPerRound uint64
- if params.InitialRewardsRateCalculation {
- initialRewardsPerRound = basics.SubSaturate(incentivePoolBalanceAtGenesis.Raw, params.MinBalance) / uint64(params.RewardsRateRefreshInterval)
- } else {
- initialRewardsPerRound = incentivePoolBalanceAtGenesis.Raw / uint64(params.RewardsRateRefreshInterval)
- }
-
- initBlock := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- GenesisID: tb.Name(),
- Round: 0,
- RewardsState: bookkeeping.RewardsState{
- RewardsRate: initialRewardsPerRound,
- RewardsPool: poolAddr,
- FeeSink: sinkAddr,
- },
- UpgradeState: bookkeeping.UpgradeState{
- CurrentProtocol: proto,
- },
- },
- }
-
- var err error
- initBlock.TxnRoot, err = initBlock.PaysetCommit()
- require.NoError(tb, err)
-
- if params.SupportGenesisHash {
- initBlock.BlockHeader.GenesisHash = crypto.Hash([]byte(tb.Name()))
- }
-
- genesisInitState.Block = initBlock
- genesisInitState.Accounts = initAccounts
- genesisInitState.GenesisHash = crypto.Hash([]byte(tb.Name()))
-
- return
-}
-
func (l *Ledger) appendUnvalidated(blk bookkeeping.Block) error {
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
@@ -258,7 +179,7 @@ func (l *Ledger) addBlockTxns(t *testing.T, accounts map[basics.Address]basics.A
func TestLedgerBasic(t *testing.T) {
partitiontest.PartitionTest(t)
- genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
@@ -273,7 +194,7 @@ func TestLedgerBlockHeaders(t *testing.T) {
a := require.New(t)
- genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
@@ -419,7 +340,7 @@ func TestLedgerSingleTx(t *testing.T) {
// V15 is the earliest protocol version in active use.
// The genesis for betanet and testnet is at V15
// The genesis for mainnet is at V17
- genesisInitState, initSecrets := testGenerateInitState(t, protocol.ConsensusV15, 100)
+ genesisInitState, initSecrets := ledgertesting.GenerateInitState(t, protocol.ConsensusV15, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -622,7 +543,7 @@ func TestLedgerSingleTxV24(t *testing.T) {
a := require.New(t)
protoName := protocol.ConsensusV24
- genesisInitState, initSecrets := testGenerateInitState(t, protoName, 100)
+ genesisInitState, initSecrets := ledgertesting.GenerateInitState(t, protoName, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -793,7 +714,7 @@ func TestLedgerAppCrossRoundWrites(t *testing.T) {
a := require.New(t)
protoName := protocol.ConsensusV24
- genesisInitState, initSecrets := testGenerateInitState(t, protoName, 100)
+ genesisInitState, initSecrets := ledgertesting.GenerateInitState(t, protoName, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -931,7 +852,7 @@ func TestLedgerAppMultiTxnWrites(t *testing.T) {
a := require.New(t)
protoName := protocol.ConsensusV24
- genesisInitState, initSecrets := testGenerateInitState(t, protoName, 100)
+ genesisInitState, initSecrets := ledgertesting.GenerateInitState(t, protoName, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -1096,7 +1017,7 @@ func testLedgerSingleTxApplyData(t *testing.T, version protocol.ConsensusVersion
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
- genesisInitState, initSecrets := testGenerateInitState(t, version, 100)
+ genesisInitState, initSecrets := ledgertesting.GenerateInitState(t, version, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -1368,7 +1289,7 @@ func TestLedgerRegressionFaultyLeaseFirstValidCheckFuture(t *testing.T) {
func testLedgerRegressionFaultyLeaseFirstValidCheck2f3880f7(t *testing.T, version protocol.ConsensusVersion) {
a := require.New(t)
- genesisInitState, initSecrets := testGenerateInitState(t, version, 100)
+ genesisInitState, initSecrets := ledgertesting.GenerateInitState(t, version, 100)
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
@@ -1494,7 +1415,7 @@ func TestGetLastCatchpointLabel(t *testing.T) {
partitiontest.PartitionTest(t)
//initLedger
- genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -1505,7 +1426,7 @@ func TestGetLastCatchpointLabel(t *testing.T) {
// set some value
lastCatchpointLabel := "someCatchpointLabel"
- ledger.accts.lastCatchpointLabel = lastCatchpointLabel
+ ledger.catchpoint.lastCatchpointLabel = lastCatchpointLabel
// verify the value is returned
require.Equal(t, lastCatchpointLabel, ledger.GetLastCatchpointLabel())
@@ -1573,7 +1494,7 @@ func TestListAssetsAndApplications(t *testing.T) {
numElementsPerSegement := 10 // This is multiplied by 10. see randomCreatables
//initLedger
- genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
const inMem = true
log := logging.TestingLog(t)
cfg := config.GetDefaultLocal()
@@ -1632,7 +1553,7 @@ func TestLedgerMemoryLeak(t *testing.T) {
t.Skip() // for manual runs only
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 10000000000)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 10000000000)
const inMem = false
cfg := config.GetDefaultLocal()
cfg.Archival = true
@@ -1756,7 +1677,7 @@ func BenchmarkLedgerStartup(b *testing.B) {
log := logging.TestingLog(b)
tmpDir, err := ioutil.TempDir(os.TempDir(), "BenchmarkLedgerStartup")
require.NoError(b, err)
- genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState, _ := ledgertesting.GenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
cfg := config.GetDefaultLocal()
cfg.Archival = false
diff --git a/ledger/ledgercore/error.go b/ledger/ledgercore/error.go
index c54c6173c..68185a573 100644
--- a/ledger/ledgercore/error.go
+++ b/ledger/ledgercore/error.go
@@ -17,12 +17,16 @@
package ledgercore
import (
+ "errors"
"fmt"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
)
+// ErrNoSpace indicates insufficient space for transaction in block
+var ErrNoSpace = errors.New("block does not have space for transaction")
+
// TransactionInLedgerError is returned when a transaction cannot be added because it has already been done
type TransactionInLedgerError struct {
Txid transactions.Txid
diff --git a/ledger/ledgercore/misc.go b/ledger/ledgercore/misc.go
new file mode 100644
index 000000000..f4fd21f50
--- /dev/null
+++ b/ledger/ledgercore/misc.go
@@ -0,0 +1,51 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledgercore
+
+import (
+ "fmt"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+)
+
+// ParticipantsArray implements merklearray.Array and is used to commit
+// to a Merkle tree of online accounts.
+//msgp:ignore ParticipantsArray
+type ParticipantsArray []basics.Participant
+
+// Length returns the ledger of the array.
+func (a ParticipantsArray) Length() uint64 {
+ return uint64(len(a))
+}
+
+// GetHash returns the hash for the given position.
+func (a ParticipantsArray) GetHash(pos uint64) (crypto.Digest, error) {
+ if pos >= uint64(len(a)) {
+ return crypto.Digest{}, fmt.Errorf("array ParticipantsArray.Get(%d) out of bounds %d", pos, len(a))
+ }
+
+ return crypto.HashObj(a[pos]), nil
+}
+
+// InitState structure defines blockchain init params
+type InitState struct {
+ Block bookkeeping.Block
+ Accounts map[basics.Address]basics.AccountData
+ GenesisHash crypto.Digest
+}
diff --git a/agreement/fuzzer/keyManager_test.go b/ledger/ledgercore/onlineacct.go
index c888b4955..de786e9be 100644
--- a/agreement/fuzzer/keyManager_test.go
+++ b/ledger/ledgercore/onlineacct.go
@@ -14,21 +14,25 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package fuzzer
+package ledgercore
import (
- "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
)
-type simpleKeyManager []account.Participation
-
-func (m simpleKeyManager) VotingKeys(votingRound, _ basics.Round) []account.Participation {
- var km []account.Participation
- for _, acc := range m {
- if acc.OverlapsInterval(votingRound, votingRound) {
- km = append(km, acc)
- }
- }
- return km
+// An OnlineAccount corresponds to an account whose AccountData.Status
+// is Online. This is used for a Merkle tree commitment of online
+// accounts, which is subsequently used to validate participants for
+// a compact certificate.
+type OnlineAccount struct {
+ // These are a subset of the fields from the corresponding AccountData.
+ Address basics.Address
+ MicroAlgos basics.MicroAlgos
+ RewardsBase uint64
+ NormalizedOnlineBalance uint64
+ VoteID crypto.OneTimeSignatureVerifier
+ VoteFirstValid basics.Round
+ VoteLastValid basics.Round
+ VoteKeyDilution uint64
}
diff --git a/ledger/ledgercore/validatedBlock.go b/ledger/ledgercore/validatedBlock.go
new file mode 100644
index 000000000..ef6c8f250
--- /dev/null
+++ b/ledger/ledgercore/validatedBlock.go
@@ -0,0 +1,59 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledgercore
+
+import (
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/committee"
+)
+
+// ValidatedBlock represents the result of a block validation. It can
+// be used to efficiently add the block to the ledger, without repeating
+// the work of applying the block's changes to the ledger state.
+type ValidatedBlock struct {
+ blk bookkeeping.Block
+ delta StateDelta
+}
+
+// Block returns the underlying Block for a ValidatedBlock.
+func (vb ValidatedBlock) Block() bookkeeping.Block {
+ return vb.blk
+}
+
+// Delta returns the underlying Delta for a ValidatedBlock.
+func (vb ValidatedBlock) Delta() StateDelta {
+ return vb.delta
+}
+
+// WithSeed returns a copy of the ValidatedBlock with a modified seed.
+func (vb ValidatedBlock) WithSeed(s committee.Seed) ValidatedBlock {
+ newblock := vb.blk
+ newblock.BlockHeader.Seed = s
+
+ return ValidatedBlock{
+ blk: newblock,
+ delta: vb.delta,
+ }
+}
+
+// MakeValidatedBlock creates a validated block.
+func MakeValidatedBlock(blk bookkeeping.Block, delta StateDelta) ValidatedBlock {
+ return ValidatedBlock{
+ blk: blk,
+ delta: delta,
+ }
+}
diff --git a/ledger/ledgercore/votersForRound.go b/ledger/ledgercore/votersForRound.go
new file mode 100644
index 000000000..de76d2d29
--- /dev/null
+++ b/ledger/ledgercore/votersForRound.go
@@ -0,0 +1,164 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledgercore
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/algorand/go-deadlock"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto/merklearray"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+)
+
+// VotersForRound tracks the top online voting accounts as of a particular
+// round, along with a Merkle tree commitment to those voting accounts.
+type VotersForRound struct {
+ // Because it can take some time to compute the top participants and the
+ // corresponding Merkle tree, the votersForRound is constructed in
+ // the background. This means that fields (participants, adddToPos,
+ // tree, and totalWeight) could be nil/zero while a background thread
+ // is computing them. Once the fields are set, however, they are
+ // immutable, and it is no longer necessary to acquire the lock.
+ //
+ // If an error occurs while computing the tree in the background,
+ // loadTreeError might be set to non-nil instead. That also finalizes
+ // the state of this VotersForRound.
+ mu deadlock.Mutex
+ cond *sync.Cond
+ loadTreeError error
+
+ // Proto is the ConsensusParams for the round whose balances are reflected
+ // in participants.
+ Proto config.ConsensusParams
+
+ // Participants is the array of top #CompactCertVoters online accounts
+ // in this round, sorted by normalized balance (to make sure heavyweight
+ // accounts are biased to the front).
+ Participants ParticipantsArray
+
+ // AddrToPos specifies the position of a given account address (if present)
+ // in the Participants array. This allows adding a vote from a given account
+ // to the certificate builder.
+ AddrToPos map[basics.Address]uint64
+
+ // Tree is a constructed Merkle tree of the Participants array.
+ Tree *merklearray.Tree
+
+ // TotalWeight is the sum of the weights from the Participants array.
+ TotalWeight basics.MicroAlgos
+}
+
+// TopOnlineAccounts is the function signature for a method that would return the top online accounts.
+type TopOnlineAccounts func(rnd basics.Round, voteRnd basics.Round, n uint64) ([]*OnlineAccount, error)
+
+// MakeVotersForRound create a new VotersForRound object and initialize it's cond.
+func MakeVotersForRound() *VotersForRound {
+ vr := &VotersForRound{}
+ vr.cond = sync.NewCond(&vr.mu)
+ return vr
+}
+
+// LoadTree todo
+func (tr *VotersForRound) LoadTree(onlineTop TopOnlineAccounts, hdr bookkeeping.BlockHeader) error {
+ r := hdr.Round
+
+ // certRound is the block that we expect to form a compact certificate for,
+ // using the balances from round r.
+ certRound := r + basics.Round(tr.Proto.CompactCertVotersLookback+tr.Proto.CompactCertRounds)
+
+ // sigKeyRound is the ephemeral key ID that we expect to be used for signing
+ // the block from certRound. It is one higher because the keys for certRound
+ // might be deleted by the time consensus is reached on the block and we try
+ // to sign the compact cert for block certRound.
+ sigKeyRound := certRound + 1
+
+ top, err := onlineTop(r, sigKeyRound, tr.Proto.CompactCertTopVoters)
+ if err != nil {
+ return err
+ }
+
+ participants := make(ParticipantsArray, len(top))
+ addrToPos := make(map[basics.Address]uint64)
+ var totalWeight basics.MicroAlgos
+
+ for i, acct := range top {
+ var ot basics.OverflowTracker
+ rewards := basics.PendingRewards(&ot, tr.Proto, acct.MicroAlgos, acct.RewardsBase, hdr.RewardsLevel)
+ money := ot.AddA(acct.MicroAlgos, rewards)
+ if ot.Overflowed {
+ return fmt.Errorf("votersTracker.LoadTree: overflow adding rewards %d + %d", acct.MicroAlgos, rewards)
+ }
+
+ totalWeight = ot.AddA(totalWeight, money)
+ if ot.Overflowed {
+ return fmt.Errorf("votersTracker.LoadTree: overflow computing totalWeight %d + %d", totalWeight.ToUint64(), money.ToUint64())
+ }
+
+ keyDilution := acct.VoteKeyDilution
+ if keyDilution == 0 {
+ keyDilution = tr.Proto.DefaultKeyDilution
+ }
+
+ participants[i] = basics.Participant{
+ PK: acct.VoteID,
+ Weight: money.ToUint64(),
+ KeyDilution: keyDilution,
+ }
+ addrToPos[acct.Address] = uint64(i)
+ }
+
+ tree, err := merklearray.Build(participants)
+ if err != nil {
+ return err
+ }
+
+ tr.mu.Lock()
+ tr.AddrToPos = addrToPos
+ tr.Participants = participants
+ tr.TotalWeight = totalWeight
+ tr.Tree = tree
+ tr.cond.Broadcast()
+ tr.mu.Unlock()
+
+ return nil
+}
+
+// BroadcastError broadcasts the error
+func (tr *VotersForRound) BroadcastError(err error) {
+ tr.mu.Lock()
+ tr.loadTreeError = err
+ tr.cond.Broadcast()
+ tr.mu.Unlock()
+}
+
+//Wait waits for the tree to get constructed.
+func (tr *VotersForRound) Wait() error {
+ tr.mu.Lock()
+ defer tr.mu.Unlock()
+ for tr.Tree == nil {
+ if tr.loadTreeError != nil {
+ return tr.loadTreeError
+ }
+
+ tr.cond.Wait()
+ }
+ return nil
+}
diff --git a/ledger/metrics.go b/ledger/metrics.go
index a2276c006..55a84d563 100644
--- a/ledger/metrics.go
+++ b/ledger/metrics.go
@@ -17,6 +17,9 @@
package ledger
import (
+ "context"
+ "database/sql"
+
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger/ledgercore"
@@ -29,7 +32,7 @@ type metricsTracker struct {
ledgerRound *metrics.Gauge
}
-func (mt *metricsTracker) loadFromDisk(l ledgerForTracker) error {
+func (mt *metricsTracker) loadFromDisk(l ledgerForTracker, _ basics.Round) error {
mt.ledgerTransactionsTotal = metrics.MakeCounter(metrics.LedgerTransactionsTotal)
mt.ledgerRewardClaimsTotal = metrics.MakeCounter(metrics.LedgerRewardClaimsTotal)
mt.ledgerRound = metrics.MakeGauge(metrics.LedgerRound)
@@ -47,6 +50,23 @@ func (mt *metricsTracker) newBlock(blk bookkeeping.Block, delta ledgercore.State
mt.ledgerRewardClaimsTotal.Add(float64(1), map[string]string{})
}
-func (mt *metricsTracker) committedUpTo(committedRnd basics.Round) basics.Round {
- return committedRnd
+func (mt *metricsTracker) committedUpTo(committedRnd basics.Round) (retRound, lookback basics.Round) {
+ return committedRnd, basics.Round(0)
+}
+
+func (mt *metricsTracker) prepareCommit(dcc *deferredCommitContext) error {
+ return nil
+}
+
+func (mt *metricsTracker) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error {
+ return nil
+}
+
+func (mt *metricsTracker) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+}
+
+func (mt *metricsTracker) handleUnorderedCommit(uint64, basics.Round, basics.Round) {
+}
+func (mt *metricsTracker) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+ return dcr
}
diff --git a/ledger/msgp_gen.go b/ledger/msgp_gen.go
index b3eb7d89d..f35709100 100644
--- a/ledger/msgp_gen.go
+++ b/ledger/msgp_gen.go
@@ -47,14 +47,6 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
-// storageAction
-// |-----> MarshalMsg
-// |-----> CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> Msgsize
-// |-----> MsgIsZero
-//
// MarshalMsg implements msgp.Marshaler
func (z CatchpointCatchupState) MarshalMsg(b []byte) (o []byte) {
@@ -851,49 +843,3 @@ func (z *encodedBalanceRecord) Msgsize() (s int) {
func (z *encodedBalanceRecord) MsgIsZero() bool {
return ((*z).Address.MsgIsZero()) && ((*z).AccountData.MsgIsZero())
}
-
-// MarshalMsg implements msgp.Marshaler
-func (z storageAction) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- o = msgp.AppendUint64(o, uint64(z))
- return
-}
-
-func (_ storageAction) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(storageAction)
- if !ok {
- _, ok = (z).(*storageAction)
- }
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *storageAction) UnmarshalMsg(bts []byte) (o []byte, err error) {
- {
- var zb0001 uint64
- zb0001, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- (*z) = storageAction(zb0001)
- }
- o = bts
- return
-}
-
-func (_ *storageAction) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*storageAction)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z storageAction) Msgsize() (s int) {
- s = msgp.Uint64Size
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z storageAction) MsgIsZero() bool {
- return z == 0
-}
diff --git a/ledger/notifier.go b/ledger/notifier.go
index d5a0d4886..e922c73e1 100644
--- a/ledger/notifier.go
+++ b/ledger/notifier.go
@@ -17,6 +17,8 @@
package ledger
import (
+ "context"
+ "database/sql"
"sync"
"github.com/algorand/go-deadlock"
@@ -85,7 +87,7 @@ func (bn *blockNotifier) close() {
bn.closing.Wait()
}
-func (bn *blockNotifier) loadFromDisk(l ledgerForTracker) error {
+func (bn *blockNotifier) loadFromDisk(l ledgerForTracker, _ basics.Round) error {
bn.cond = sync.NewCond(&bn.mu)
bn.running = true
bn.pendingBlocks = nil
@@ -108,6 +110,24 @@ func (bn *blockNotifier) newBlock(blk bookkeeping.Block, delta ledgercore.StateD
bn.cond.Broadcast()
}
-func (bn *blockNotifier) committedUpTo(rnd basics.Round) basics.Round {
- return rnd
+func (bn *blockNotifier) committedUpTo(rnd basics.Round) (retRound, lookback basics.Round) {
+ return rnd, basics.Round(0)
+}
+
+func (bn *blockNotifier) prepareCommit(dcc *deferredCommitContext) error {
+ return nil
+}
+
+func (bn *blockNotifier) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error {
+ return nil
+}
+
+func (bn *blockNotifier) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+}
+
+func (bn *blockNotifier) handleUnorderedCommit(uint64, basics.Round, basics.Round) {
+}
+
+func (bn *blockNotifier) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+ return dcr
}
diff --git a/ledger/onlineacct.go b/ledger/onlinetopheap.go
index 687f0c595..72a81d88a 100644
--- a/ledger/onlineacct.go
+++ b/ledger/onlinetopheap.go
@@ -19,29 +19,12 @@ package ledger
import (
"bytes"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
)
-// An onlineAccount corresponds to an account whose AccountData.Status
-// is Online. This is used for a Merkle tree commitment of online
-// accounts, which is subsequently used to validate participants for
-// a compact certificate.
-type onlineAccount struct {
- // These are a subset of the fields from the corresponding AccountData.
- Address basics.Address
- MicroAlgos basics.MicroAlgos
- RewardsBase uint64
- NormalizedOnlineBalance uint64
- VoteID crypto.OneTimeSignatureVerifier
- VoteFirstValid basics.Round
- VoteLastValid basics.Round
- VoteKeyDilution uint64
-}
-
// onlineTopHeap implements heap.Interface for tracking top N online accounts.
type onlineTopHeap struct {
- accts []*onlineAccount
+ accts []*ledgercore.OnlineAccount
}
// Len implements sort.Interface
@@ -78,7 +61,7 @@ func (h *onlineTopHeap) Swap(i, j int) {
// Push implements heap.Interface
func (h *onlineTopHeap) Push(x interface{}) {
- h.accts = append(h.accts, x.(*onlineAccount))
+ h.accts = append(h.accts, x.(*ledgercore.OnlineAccount))
}
// Pop implements heap.Interface
diff --git a/ledger/onlineacct_test.go b/ledger/onlinetopheap_test.go
index c1d05fa15..11c85d599 100644
--- a/ledger/onlineacct_test.go
+++ b/ledger/onlinetopheap_test.go
@@ -23,6 +23,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -30,7 +31,7 @@ func TestOnlineTopHeap_Less(t *testing.T) {
partitiontest.PartitionTest(t)
h := onlineTopHeap{
- accts: []*onlineAccount{
+ accts: []*ledgercore.OnlineAccount{
{
Address: basics.Address{},
NormalizedOnlineBalance: 0,
@@ -59,7 +60,7 @@ func TestOnlineTopHeap_Swap(t *testing.T) {
partitiontest.PartitionTest(t)
h := onlineTopHeap{
- accts: []*onlineAccount{
+ accts: []*ledgercore.OnlineAccount{
{
Address: basics.Address{},
NormalizedOnlineBalance: 0,
@@ -88,7 +89,7 @@ func TestOnlineTopHeap_Push(t *testing.T) {
partitiontest.PartitionTest(t)
h := onlineTopHeap{
- accts: []*onlineAccount{
+ accts: []*ledgercore.OnlineAccount{
{
Address: basics.Address{},
NormalizedOnlineBalance: 0,
@@ -102,7 +103,7 @@ func TestOnlineTopHeap_Push(t *testing.T) {
acct0 := h.accts[0]
acct1 := h.accts[1]
- acct2 := &onlineAccount{
+ acct2 := &ledgercore.OnlineAccount{
Address: basics.Address(crypto.Hash([]byte("address"))),
NormalizedOnlineBalance: 0,
}
@@ -119,7 +120,7 @@ func TestOnlineTopHeap_Pop(t *testing.T) {
partitiontest.PartitionTest(t)
h := onlineTopHeap{
- accts: []*onlineAccount{
+ accts: []*ledgercore.OnlineAccount{
{
Address: basics.Address{},
NormalizedOnlineBalance: 0,
diff --git a/ledger/perf_test.go b/ledger/perf_test.go
index c1e520fef..15aca599f 100644
--- a/ledger/perf_test.go
+++ b/ledger/perf_test.go
@@ -30,64 +30,18 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/execpool"
)
-func genesis(naccts int) (InitState, []basics.Address, []*crypto.SignatureSecrets) {
- return genesisWithProto(naccts, protocol.ConsensusCurrentVersion)
-}
-func genesisWithProto(naccts int, proto protocol.ConsensusVersion) (InitState, []basics.Address, []*crypto.SignatureSecrets) {
- blk := bookkeeping.Block{}
- blk.CurrentProtocol = proto
- blk.BlockHeader.GenesisID = "test"
- blk.FeeSink = testSinkAddr
- blk.RewardsPool = testPoolAddr
- crypto.RandBytes(blk.BlockHeader.GenesisHash[:])
-
- addrs := []basics.Address{}
- keys := []*crypto.SignatureSecrets{}
- accts := make(map[basics.Address]basics.AccountData)
-
- // 10 billion microalgos, across N accounts and pool and sink
- amount := 10 * 1000000000 * 1000000 / uint64(naccts+2)
-
- for i := 0; i < naccts; i++ {
- var seed crypto.Seed
- crypto.RandBytes(seed[:])
- key := crypto.GenerateSignatureSecrets(seed)
- addr := basics.Address(key.SignatureVerifier)
-
- keys = append(keys, key)
- addrs = append(addrs, addr)
-
- adata := basics.AccountData{}
- adata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000 / uint64(naccts)
- accts[addr] = adata
- }
-
- pooldata := basics.AccountData{}
- pooldata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000
- pooldata.Status = basics.NotParticipating
- accts[testPoolAddr] = pooldata
-
- sinkdata := basics.AccountData{}
- sinkdata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000
- sinkdata.Status = basics.NotParticipating
- accts[testSinkAddr] = sinkdata
-
- genesisHash := blk.BlockHeader.GenesisHash
-
- return InitState{blk, accts, genesisHash}, addrs, keys
-}
-
func BenchmarkManyAccounts(b *testing.B) {
deadlock.Opts.Disable = true
b.StopTimer()
- genesisInitState, addrs, _ := genesis(1)
+ genesisInitState, addrs, _ := ledgertesting.Genesis(1)
addr := addrs[0]
dbName := fmt.Sprintf("%s.%d", b.Name(), crypto.RandUint64())
@@ -138,7 +92,7 @@ func BenchmarkManyAccounts(b *testing.B) {
func BenchmarkValidate(b *testing.B) {
b.StopTimer()
- genesisInitState, addrs, keys := genesis(10000)
+ genesisInitState, addrs, keys := ledgertesting.Genesis(10000)
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
diff --git a/ledger/testing/accountsTotals.go b/ledger/testing/accountsTotals.go
new file mode 100644
index 000000000..b646a6829
--- /dev/null
+++ b/ledger/testing/accountsTotals.go
@@ -0,0 +1,41 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package testing
+
+import (
+ gotesting "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+)
+
+// CalculateNewRoundAccountTotals calculates the accounts totals for a given round
+func CalculateNewRoundAccountTotals(t *gotesting.T, newRoundDeltas ledgercore.AccountDeltas, newRoundRewardLevel uint64, newRoundConsensusParams config.ConsensusParams, prevRoundBalances map[basics.Address]basics.AccountData, prevRoundTotals ledgercore.AccountTotals) (newTotals ledgercore.AccountTotals) {
+ newTotals = prevRoundTotals
+ var ot basics.OverflowTracker
+ newTotals.ApplyRewards(newRoundRewardLevel, &ot)
+ for i := 0; i < newRoundDeltas.Len(); i++ {
+ addr, ad := newRoundDeltas.GetByIdx(i)
+ newTotals.DelAccount(newRoundConsensusParams, prevRoundBalances[addr], &ot)
+ newTotals.AddAccount(newRoundConsensusParams, ad, &ot)
+ }
+ require.False(t, ot.Overflowed)
+ return
+}
diff --git a/ledger/testing/initState.go b/ledger/testing/initState.go
new file mode 100644
index 000000000..ad96e1f76
--- /dev/null
+++ b/ledger/testing/initState.go
@@ -0,0 +1,111 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package testing
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+var poolSecret, sinkSecret *crypto.SignatureSecrets
+
+func init() {
+ var seed crypto.Seed
+
+ incentivePoolName := []byte("incentive pool")
+ copy(seed[:], incentivePoolName)
+ poolSecret = crypto.GenerateSignatureSecrets(seed)
+
+ feeSinkName := []byte("fee sink")
+ copy(seed[:], feeSinkName)
+ sinkSecret = crypto.GenerateSignatureSecrets(seed)
+}
+
+// GenerateInitState generates testing init state
+func GenerateInitState(tb testing.TB, proto protocol.ConsensusVersion, baseAlgoPerAccount int) (genesisInitState ledgercore.InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) {
+ params := config.Consensus[proto]
+ poolAddr := testPoolAddr
+ sinkAddr := testSinkAddr
+
+ var zeroSeed crypto.Seed
+ var genaddrs [10]basics.Address
+ var gensecrets [10]*crypto.SignatureSecrets
+ for i := range genaddrs {
+ seed := zeroSeed
+ seed[0] = byte(i)
+ x := crypto.GenerateSignatureSecrets(seed)
+ genaddrs[i] = basics.Address(x.SignatureVerifier)
+ gensecrets[i] = x
+ }
+
+ initKeys = make(map[basics.Address]*crypto.SignatureSecrets)
+ initAccounts := make(map[basics.Address]basics.AccountData)
+ for i := range genaddrs {
+ initKeys[genaddrs[i]] = gensecrets[i]
+ // Give each account quite a bit more balance than MinFee or MinBalance
+ initAccounts[genaddrs[i]] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: uint64((i + baseAlgoPerAccount) * 100000)})
+ }
+ initKeys[poolAddr] = poolSecret
+ initAccounts[poolAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 1234567})
+ initKeys[sinkAddr] = sinkSecret
+ initAccounts[sinkAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 7654321})
+
+ incentivePoolBalanceAtGenesis := initAccounts[poolAddr].MicroAlgos
+ var initialRewardsPerRound uint64
+ if params.InitialRewardsRateCalculation {
+ initialRewardsPerRound = basics.SubSaturate(incentivePoolBalanceAtGenesis.Raw, params.MinBalance) / uint64(params.RewardsRateRefreshInterval)
+ } else {
+ initialRewardsPerRound = incentivePoolBalanceAtGenesis.Raw / uint64(params.RewardsRateRefreshInterval)
+ }
+
+ initBlock := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ GenesisID: tb.Name(),
+ Round: 0,
+ RewardsState: bookkeeping.RewardsState{
+ RewardsRate: initialRewardsPerRound,
+ RewardsPool: poolAddr,
+ FeeSink: sinkAddr,
+ },
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: proto,
+ },
+ },
+ }
+
+ var err error
+ initBlock.TxnRoot, err = initBlock.PaysetCommit()
+ require.NoError(tb, err)
+
+ if params.SupportGenesisHash {
+ initBlock.BlockHeader.GenesisHash = crypto.Hash([]byte(tb.Name()))
+ }
+
+ genesisInitState.Block = initBlock
+ genesisInitState.Accounts = initAccounts
+ genesisInitState.GenesisHash = crypto.Hash([]byte(tb.Name()))
+
+ return
+}
diff --git a/ledger/testing/randomAccounts.go b/ledger/testing/randomAccounts.go
new file mode 100644
index 000000000..86ec6a60c
--- /dev/null
+++ b/ledger/testing/randomAccounts.go
@@ -0,0 +1,344 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package testing
+
+import (
+ "fmt"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/protocol"
+ //"github.com/algorand/go-algorand/data/bookkeeping"
+
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+)
+
+var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
+
+// RandomAddress generates a random address
+func RandomAddress() basics.Address {
+ var addr basics.Address
+ crypto.RandBytes(addr[:])
+ return addr
+}
+
+// RandomNote generates a random notes data
+func RandomNote() []byte {
+ var note [16]byte
+ crypto.RandBytes(note[:])
+ return note[:]
+}
+
+// RandomAccountData generates a random AccountData
+func RandomAccountData(rewardsLevel uint64) basics.AccountData {
+ var data basics.AccountData
+
+ // Avoid overflowing totals
+ data.MicroAlgos.Raw = crypto.RandUint64() % (1 << 32)
+
+ switch crypto.RandUint64() % 3 {
+ case 0:
+ data.Status = basics.Online
+ case 1:
+ data.Status = basics.Offline
+ default:
+ data.Status = basics.NotParticipating
+ }
+
+ data.RewardsBase = rewardsLevel
+ data.VoteFirstValid = 0
+ data.VoteLastValid = 1000
+ return data
+}
+
+// RandomFullAccountData generates a random AccountData
+func RandomFullAccountData(rewardsLevel, lastCreatableID uint64) (basics.AccountData, uint64) {
+ data := RandomAccountData(rewardsLevel)
+
+ crypto.RandBytes(data.VoteID[:])
+ crypto.RandBytes(data.SelectionID[:])
+ data.VoteFirstValid = basics.Round(crypto.RandUint64())
+ data.VoteLastValid = basics.Round(crypto.RandUint64())
+ data.VoteKeyDilution = crypto.RandUint64()
+ if 1 == (crypto.RandUint64() % 2) {
+ // if account has created assets, have these defined.
+ data.AssetParams = make(map[basics.AssetIndex]basics.AssetParams)
+ createdAssetsCount := crypto.RandUint64()%20 + 1
+ for i := uint64(0); i < createdAssetsCount; i++ {
+ ap := basics.AssetParams{
+ Total: crypto.RandUint64(),
+ Decimals: uint32(crypto.RandUint64() % 20),
+ DefaultFrozen: (crypto.RandUint64()%2 == 0),
+ UnitName: fmt.Sprintf("un%x", uint32(crypto.RandUint64()%0x7fffffff)),
+ AssetName: fmt.Sprintf("an%x", uint32(crypto.RandUint64()%0x7fffffff)),
+ URL: fmt.Sprintf("url%x", uint32(crypto.RandUint64()%0x7fffffff)),
+ }
+ crypto.RandBytes(ap.MetadataHash[:])
+ crypto.RandBytes(ap.Manager[:])
+ crypto.RandBytes(ap.Reserve[:])
+ crypto.RandBytes(ap.Freeze[:])
+ crypto.RandBytes(ap.Clawback[:])
+ lastCreatableID++
+ data.AssetParams[basics.AssetIndex(lastCreatableID)] = ap
+ }
+ }
+ if 1 == (crypto.RandUint64()%2) && lastCreatableID > 0 {
+ // if account owns assets
+ data.Assets = make(map[basics.AssetIndex]basics.AssetHolding)
+ ownedAssetsCount := crypto.RandUint64()%20 + 1
+ for i := uint64(0); i < ownedAssetsCount; i++ {
+ ah := basics.AssetHolding{
+ Amount: crypto.RandUint64(),
+ Frozen: (crypto.RandUint64()%2 == 0),
+ }
+ data.Assets[basics.AssetIndex(crypto.RandUint64()%lastCreatableID)] = ah
+ }
+ }
+ if 1 == (crypto.RandUint64() % 5) {
+ crypto.RandBytes(data.AuthAddr[:])
+ }
+
+ if 1 == (crypto.RandUint64()%3) && lastCreatableID > 0 {
+ data.AppLocalStates = make(map[basics.AppIndex]basics.AppLocalState)
+ appStatesCount := crypto.RandUint64()%20 + 1
+ for i := uint64(0); i < appStatesCount; i++ {
+ ap := basics.AppLocalState{
+ Schema: basics.StateSchema{
+ NumUint: crypto.RandUint64()%5 + 1,
+ NumByteSlice: crypto.RandUint64() % 5,
+ },
+ KeyValue: make(map[string]basics.TealValue),
+ }
+
+ for i := uint64(0); i < ap.Schema.NumUint; i++ {
+ appName := fmt.Sprintf("lapp%x-%x", crypto.RandUint64(), i)
+ ap.KeyValue[appName] = basics.TealValue{
+ Type: basics.TealUintType,
+ Uint: crypto.RandUint64(),
+ }
+ }
+ for i := uint64(0); i < ap.Schema.NumByteSlice; i++ {
+ appName := fmt.Sprintf("lapp%x-%x", crypto.RandUint64(), i)
+ tv := basics.TealValue{
+ Type: basics.TealBytesType,
+ }
+ bytes := make([]byte, crypto.RandUint64()%uint64(config.MaxBytesKeyValueLen-len(appName)))
+ crypto.RandBytes(bytes[:])
+ tv.Bytes = string(bytes)
+ ap.KeyValue[appName] = tv
+ }
+ if len(ap.KeyValue) == 0 {
+ ap.KeyValue = nil
+ }
+ data.AppLocalStates[basics.AppIndex(crypto.RandUint64()%lastCreatableID)] = ap
+ }
+ }
+
+ if 1 == (crypto.RandUint64() % 3) {
+ data.TotalAppSchema = basics.StateSchema{
+ NumUint: crypto.RandUint64() % 50,
+ NumByteSlice: crypto.RandUint64() % 50,
+ }
+ }
+ if 1 == (crypto.RandUint64() % 3) {
+ data.AppParams = make(map[basics.AppIndex]basics.AppParams)
+ appParamsCount := crypto.RandUint64()%5 + 1
+ for i := uint64(0); i < appParamsCount; i++ {
+ ap := basics.AppParams{
+ ApprovalProgram: make([]byte, int(crypto.RandUint63())%config.MaxAppProgramLen),
+ ClearStateProgram: make([]byte, int(crypto.RandUint63())%config.MaxAppProgramLen),
+ GlobalState: make(basics.TealKeyValue),
+ StateSchemas: basics.StateSchemas{
+ LocalStateSchema: basics.StateSchema{
+ NumUint: crypto.RandUint64()%5 + 1,
+ NumByteSlice: crypto.RandUint64() % 5,
+ },
+ GlobalStateSchema: basics.StateSchema{
+ NumUint: crypto.RandUint64()%5 + 1,
+ NumByteSlice: crypto.RandUint64() % 5,
+ },
+ },
+ }
+ if len(ap.ApprovalProgram) > 0 {
+ crypto.RandBytes(ap.ApprovalProgram[:])
+ } else {
+ ap.ApprovalProgram = nil
+ }
+ if len(ap.ClearStateProgram) > 0 {
+ crypto.RandBytes(ap.ClearStateProgram[:])
+ } else {
+ ap.ClearStateProgram = nil
+ }
+
+ for i := uint64(0); i < ap.StateSchemas.LocalStateSchema.NumUint+ap.StateSchemas.GlobalStateSchema.NumUint; i++ {
+ appName := fmt.Sprintf("tapp%x-%x", crypto.RandUint64(), i)
+ ap.GlobalState[appName] = basics.TealValue{
+ Type: basics.TealUintType,
+ Uint: crypto.RandUint64(),
+ }
+ }
+ for i := uint64(0); i < ap.StateSchemas.LocalStateSchema.NumByteSlice+ap.StateSchemas.GlobalStateSchema.NumByteSlice; i++ {
+ appName := fmt.Sprintf("tapp%x-%x", crypto.RandUint64(), i)
+ tv := basics.TealValue{
+ Type: basics.TealBytesType,
+ }
+ bytes := make([]byte, crypto.RandUint64()%uint64(config.MaxBytesKeyValueLen))
+ crypto.RandBytes(bytes[:])
+ tv.Bytes = string(bytes)
+ ap.GlobalState[appName] = tv
+ }
+ if len(ap.GlobalState) == 0 {
+ ap.GlobalState = nil
+ }
+ lastCreatableID++
+ data.AppParams[basics.AppIndex(lastCreatableID)] = ap
+ }
+
+ }
+ return data, lastCreatableID
+}
+
+// RandomAccounts generates a random set of accounts map
+func RandomAccounts(niter int, simpleAccounts bool) map[basics.Address]basics.AccountData {
+ res := make(map[basics.Address]basics.AccountData)
+ if simpleAccounts {
+ for i := 0; i < niter; i++ {
+ res[RandomAddress()] = RandomAccountData(0)
+ }
+ } else {
+ lastCreatableID := crypto.RandUint64() % 512
+ for i := 0; i < niter; i++ {
+ res[RandomAddress()], lastCreatableID = RandomFullAccountData(0, lastCreatableID)
+ }
+ }
+ return res
+}
+
+// RandomDeltas generates a random set of accounts delta
+func RandomDeltas(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, imbalance int64) {
+ updates, totals, imbalance, _ = RandomDeltasImpl(niter, base, rewardsLevel, true, 0)
+ return
+}
+
+// RandomDeltasFull generates a random set of accounts delta
+func RandomDeltasFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, imbalance int64, lastCreatableID uint64) {
+ updates, totals, imbalance, lastCreatableID = RandomDeltasImpl(niter, base, rewardsLevel, false, lastCreatableIDIn)
+ return
+}
+
+// RandomDeltasImpl generates a random set of accounts delta
+func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, imbalance int64, lastCreatableID uint64) {
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ totals = make(map[basics.Address]basics.AccountData)
+
+ // copy base -> totals
+ for addr, data := range base {
+ totals[addr] = data
+ }
+
+ // if making a full delta then need to determine max asset/app id to get rid of conflicts
+ lastCreatableID = lastCreatableIDIn
+ if !simple {
+ for _, ad := range base {
+ for aid := range ad.AssetParams {
+ if uint64(aid) > lastCreatableID {
+ lastCreatableID = uint64(aid)
+ }
+ }
+ for aid := range ad.AppParams {
+ if uint64(aid) > lastCreatableID {
+ lastCreatableID = uint64(aid)
+ }
+ }
+ }
+ }
+
+ // Change some existing accounts
+ {
+ i := 0
+ for addr, old := range base {
+ if i >= len(base)/2 || i >= niter {
+ break
+ }
+
+ if addr == testPoolAddr {
+ continue
+ }
+ i++
+
+ var new basics.AccountData
+ if simple {
+ new = RandomAccountData(rewardsLevel)
+ } else {
+ new, lastCreatableID = RandomFullAccountData(rewardsLevel, lastCreatableID)
+ }
+ updates.Upsert(addr, new)
+ imbalance += int64(old.WithUpdatedRewards(proto, rewardsLevel).MicroAlgos.Raw - new.MicroAlgos.Raw)
+ totals[addr] = new
+ }
+ }
+
+ // Change some new accounts
+ for i := 0; i < niter; i++ {
+ addr := RandomAddress()
+ old := totals[addr]
+ var new basics.AccountData
+ if simple {
+ new = RandomAccountData(rewardsLevel)
+ } else {
+ new, lastCreatableID = RandomFullAccountData(rewardsLevel, lastCreatableID)
+ }
+ updates.Upsert(addr, new)
+ imbalance += int64(old.WithUpdatedRewards(proto, rewardsLevel).MicroAlgos.Raw - new.MicroAlgos.Raw)
+ totals[addr] = new
+ }
+
+ return
+}
+
+// RandomDeltasBalanced generates a random set of accounts delta
+func RandomDeltasBalanced(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData) {
+ updates, totals, _ = RandomDeltasBalancedImpl(niter, base, rewardsLevel, true, 0)
+ return
+}
+
+// RandomDeltasBalancedFull generates a random set of accounts delta
+func RandomDeltasBalancedFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, lastCreatableID uint64) {
+ updates, totals, lastCreatableID = RandomDeltasBalancedImpl(niter, base, rewardsLevel, false, lastCreatableIDIn)
+ return
+}
+
+// RandomDeltasBalancedImpl generates a random set of accounts delta
+func RandomDeltasBalancedImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]basics.AccountData, lastCreatableID uint64) {
+ var imbalance int64
+ if simple {
+ updates, totals, imbalance = RandomDeltas(niter, base, rewardsLevel)
+ } else {
+ updates, totals, imbalance, lastCreatableID = RandomDeltasFull(niter, base, rewardsLevel, lastCreatableIDIn)
+ }
+
+ oldPool := base[testPoolAddr]
+ newPool := oldPool
+ newPool.MicroAlgos.Raw += uint64(imbalance)
+
+ updates.Upsert(testPoolAddr, newPool)
+ totals[testPoolAddr] = newPool
+
+ return updates, totals, lastCreatableID
+}
diff --git a/ledger/testing/testGenesis.go b/ledger/testing/testGenesis.go
new file mode 100644
index 000000000..a24c46c57
--- /dev/null
+++ b/ledger/testing/testGenesis.go
@@ -0,0 +1,137 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package testing
+
+import (
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// NewTestGenesis creates a bunch of accounts, splits up 10B algos
+// between them and the rewardspool and feesink, and gives out the
+// addresses and secrets it creates to enable tests. For special
+// scenarios, manipulate these return values before using newTestLedger.
+func NewTestGenesis() (bookkeeping.GenesisBalances, []basics.Address, []*crypto.SignatureSecrets) {
+ // irrelevant, but deterministic
+ sink, err := basics.UnmarshalChecksumAddress("YTPRLJ2KK2JRFSZZNAF57F3K5Y2KCG36FZ5OSYLW776JJGAUW5JXJBBD7Q")
+ if err != nil {
+ panic(err)
+ }
+ rewards, err := basics.UnmarshalChecksumAddress("242H5OXHUEBYCGGWB3CQ6AZAMQB5TMCWJGHCGQOZPEIVQJKOO7NZXUXDQA")
+ if err != nil {
+ panic(err)
+ }
+
+ const count = 10
+ addrs := make([]basics.Address, count)
+ secrets := make([]*crypto.SignatureSecrets, count)
+ accts := make(map[basics.Address]basics.AccountData)
+
+ // 10 billion microalgos, across N accounts and pool and sink
+ amount := 10 * 1000000000 * 1000000 / uint64(count+2)
+
+ for i := 0; i < count; i++ {
+ // Create deterministic addresses, so that output stays the same, run to run.
+ var seed crypto.Seed
+ seed[0] = byte(i)
+ secrets[i] = crypto.GenerateSignatureSecrets(seed)
+ addrs[i] = basics.Address(secrets[i].SignatureVerifier)
+
+ adata := basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: amount},
+ }
+ accts[addrs[i]] = adata
+ }
+
+ accts[sink] = basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: amount},
+ Status: basics.NotParticipating,
+ }
+
+ accts[rewards] = basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: amount},
+ }
+
+ genBalances := bookkeeping.MakeGenesisBalances(accts, sink, rewards)
+
+ return genBalances, addrs, secrets
+}
+
+// Genesis creates a genesis state for naccts accounts using the ConsensusCurrentVersion
+func Genesis(naccts int) (ledgercore.InitState, []basics.Address, []*crypto.SignatureSecrets) {
+ return GenesisWithProto(naccts, protocol.ConsensusCurrentVersion)
+}
+
+// GenesisWithProto creates a genesis state for naccts accounts using the proto consensus protocol
+func GenesisWithProto(naccts int, proto protocol.ConsensusVersion) (ledgercore.InitState, []basics.Address, []*crypto.SignatureSecrets) {
+ blk := bookkeeping.Block{}
+ blk.CurrentProtocol = proto
+ blk.BlockHeader.GenesisID = "test"
+ blk.FeeSink = testSinkAddr
+ blk.RewardsPool = testPoolAddr
+
+ crypto.RandBytes(blk.BlockHeader.GenesisHash[:])
+
+ addrs := []basics.Address{}
+ keys := []*crypto.SignatureSecrets{}
+ accts := make(map[basics.Address]basics.AccountData)
+
+ // 10 billion microalgos, across N accounts and pool and sink
+ amount := 10 * 1000000000 * 1000000 / uint64(naccts+2)
+
+ for i := 0; i < naccts; i++ {
+ var seed crypto.Seed
+ crypto.RandBytes(seed[:])
+ key := crypto.GenerateSignatureSecrets(seed)
+ addr := basics.Address(key.SignatureVerifier)
+
+ keys = append(keys, key)
+ addrs = append(addrs, addr)
+
+ adata := basics.AccountData{}
+ adata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000 / uint64(naccts)
+ accts[addr] = adata
+ }
+
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ accts[testPoolAddr] = pooldata
+
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ accts[testSinkAddr] = sinkdata
+
+ genesisHash := blk.BlockHeader.GenesisHash
+
+ incentivePoolBalanceAtGenesis := pooldata.MicroAlgos
+ var initialRewardsPerRound uint64
+ params := config.Consensus[proto]
+ if params.InitialRewardsRateCalculation {
+ initialRewardsPerRound = basics.SubSaturate(incentivePoolBalanceAtGenesis.Raw, params.MinBalance) / uint64(params.RewardsRateRefreshInterval)
+ } else {
+ initialRewardsPerRound = incentivePoolBalanceAtGenesis.Raw / uint64(params.RewardsRateRefreshInterval)
+ }
+ blk.RewardsRate = initialRewardsPerRound
+
+ return ledgercore.InitState{Block: blk, Accounts: accts, GenesisHash: genesisHash}, addrs, keys
+}
diff --git a/ledger/tracker.go b/ledger/tracker.go
index 40dd725a5..855995665 100644
--- a/ledger/tracker.go
+++ b/ledger/tracker.go
@@ -17,16 +17,24 @@
package ledger
import (
+ "context"
+ "database/sql"
+ "errors"
"fmt"
"reflect"
+ "sync"
+ "time"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/internal"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/util/db"
+ "github.com/algorand/go-deadlock"
)
// ledgerTracker defines part of the API for any state machine that
@@ -55,26 +63,56 @@ type ledgerTracker interface {
// blocks from the database, or access its own state. The
// ledgerForTracker interface abstracts away the details of
// ledger internals so that individual trackers can be tested
- // in isolation.
- loadFromDisk(ledgerForTracker) error
+ // in isolation. The provided round number represents the
+ // current accounts storage round number.
+ loadFromDisk(ledgerForTracker, basics.Round) error
- // newBlock informs the tracker of a new block from round
- // rnd and a given ledgercore.StateDelta as produced by BlockEvaluator.
+ // newBlock informs the tracker of a new block along with
+ // a given ledgercore.StateDelta as produced by BlockEvaluator.
newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta)
- // committedUpTo informs the tracker that the database has
+ // committedUpTo informs the tracker that the block database has
// committed all blocks up to and including rnd to persistent
- // storage (the SQL database). This can allow the tracker
+ // storage. This can allow the tracker
// to garbage-collect state that will not be needed.
//
// committedUpTo() returns the round number of the earliest
- // block that this tracker needs to be stored in the ledger
- // for subsequent calls to loadFromDisk(). All blocks with
- // round numbers before that may be deleted to save space,
- // and the tracker is expected to still function after a
- // restart and a call to loadFromDisk(). For example,
- // returning 0 means that no blocks can be deleted.
- committedUpTo(basics.Round) basics.Round
+ // block that this tracker needs to be stored in the block
+ // database for subsequent calls to loadFromDisk().
+ // All blocks with round numbers before that may be deleted to
+ // save space, and the tracker is expected to still function
+ // after a restart and a call to loadFromDisk().
+ // For example, returning 0 means that no blocks can be deleted.
+ // Separetly, the method returns the lookback that is being
+ // maintained by the tracker.
+ committedUpTo(basics.Round) (minRound, lookback basics.Round)
+
+ // produceCommittingTask prepares a deferredCommitRange; Preparing a deferredCommitRange is a joint
+ // effort, and all the trackers contribute to that effort. All the trackers are being handed a
+ // pointer to the deferredCommitRange, and have the ability to either modify it, or return a
+ // nil. If nil is returned, the commit would be skipped.
+ produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange
+
+ // prepareCommit, commitRound and postCommit are called when it is time to commit tracker's data.
+ // If an error returned the process is aborted.
+
+ // prepareCommit aligns the data structures stored in the deferredCommitContext with the current
+ // state of the tracker. It allows the tracker to decide what data is going to be persisted
+ // on the coming commitRound.
+ prepareCommit(*deferredCommitContext) error
+ // commitRound is called for each of the trackers after a deferredCommitContext was agreed upon
+ // by all the prepareCommit calls. The commitRound is being executed within a single transactional
+ // context, and so, if any of the tracker's commitRound calls fails, the transaction is rolled back.
+ commitRound(context.Context, *sql.Tx, *deferredCommitContext) error
+ // postCommit is called only on a successful commitRound. In that case, each of the trackers have
+ // the chance to update it's internal data structures, knowing that the given deferredCommitContext
+ // has completed. An optional context is provided for long-running operations.
+ postCommit(context.Context, *deferredCommitContext)
+
+ // handleUnorderedCommit is a special method for handling deferred commits that are out of order.
+ // Tracker might update own state in this case. For example, account updates tracker cancels
+ // scheduled catchpoint writing that deferred commit.
+ handleUnorderedCommit(uint64, basics.Round, basics.Round)
// close terminates the tracker, reclaiming any resources
// like open database connections or goroutines. close may
@@ -89,26 +127,142 @@ type ledgerForTracker interface {
trackerDB() db.Pair
blockDB() db.Pair
trackerLog() logging.Logger
- trackerEvalVerified(bookkeeping.Block, ledgerForEvaluator) (ledgercore.StateDelta, error)
+ trackerEvalVerified(bookkeeping.Block, internal.LedgerForEvaluator) (ledgercore.StateDelta, error)
Latest() basics.Round
Block(basics.Round) (bookkeeping.Block, error)
BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
GenesisHash() crypto.Digest
GenesisProto() config.ConsensusParams
+ GenesisAccounts() map[basics.Address]basics.AccountData
}
type trackerRegistry struct {
trackers []ledgerTracker
+ // the accts has some exceptional usages in the tracker registry.
+ accts *accountUpdates
+
+ // ctx is the context for the committing go-routine.
+ ctx context.Context
+ // ctxCancel is the canceling function for canceling the committing go-routine ( i.e. signaling the committing go-routine that it's time to abort )
+ ctxCancel context.CancelFunc
+
+ // deferredCommits is the channel of pending deferred commits
+ deferredCommits chan *deferredCommitContext
+
+ // commitSyncerClosed is the blocking channel for synchronizing closing the commitSyncer goroutine. Once it's closed, the
+ // commitSyncer can be assumed to have aborted.
+ commitSyncerClosed chan struct{}
+
+ // accountsWriting provides synchronization around the background writing of account balances.
+ accountsWriting sync.WaitGroup
+
+ // dbRound is always exactly accountsRound(),
+ // cached to avoid SQL queries.
+ dbRound basics.Round
+
+ dbs db.Pair
+ log logging.Logger
+
+ // the synchronous mode that would be used for the account database.
+ synchronousMode db.SynchronousMode
+
+ // the synchronous mode that would be used while the accounts database is being rebuilt.
+ accountsRebuildSynchronousMode db.SynchronousMode
+
+ mu deadlock.RWMutex
+
+ // lastFlushTime is the time we last flushed updates to
+ // the accounts DB (bumping dbRound).
+ lastFlushTime time.Time
+}
+
+// deferredCommitRange is used during the calls to produceCommittingTask, and used as a data structure
+// to syncronize the various trackers and create a uniformity around which rounds need to be persisted
+// next.
+type deferredCommitRange struct {
+ offset uint64
+ oldBase basics.Round
+ lookback basics.Round
+
+ // pendingDeltas is the number of accounts that were modified within this commit context.
+ // note that in this number we might have the same account being modified several times.
+ pendingDeltas int
+
+ isCatchpointRound bool
+
+ // catchpointWriting is a pointer to a varible with the same name in the catchpointTracker.
+ // it's used in order to reset the catchpointWriting flag from the acctupdates's
+ // prepareCommit/commitRound ( which is called before the corresponding catchpoint tracker method )
+ catchpointWriting *int32
}
-func (tr *trackerRegistry) register(lt ledgerTracker) {
- tr.trackers = append(tr.trackers, lt)
+// deferredCommitContext is used in order to syncornize the persistence of a given deferredCommitRange.
+// prepareCommit, commitRound and postCommit are all using it to exchange data.
+type deferredCommitContext struct {
+ deferredCommitRange
+
+ newBase basics.Round
+ flushTime time.Time
+
+ genesisProto config.ConsensusParams
+
+ deltas []ledgercore.AccountDeltas
+ roundTotals ledgercore.AccountTotals
+ compactAccountDeltas compactAccountDeltas
+ compactCreatableDeltas map[basics.CreatableIndex]ledgercore.ModifiedCreatable
+
+ updatedPersistedAccounts []persistedAccountData
+
+ committedRoundDigest crypto.Digest
+ trieBalancesHash crypto.Digest
+ updatingBalancesDuration time.Duration
+ catchpointLabel string
+
+ stats telemetryspec.AccountsUpdateMetrics
+ updateStats bool
+}
+
+var errMissingAccountUpdateTracker = errors.New("initializeTrackerCaches : called without a valid accounts update tracker")
+
+func (tr *trackerRegistry) initialize(l ledgerForTracker, trackers []ledgerTracker, cfg config.Local) (err error) {
+ tr.dbs = l.trackerDB()
+ tr.log = l.trackerLog()
+
+ err = tr.dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ tr.dbRound, err = accountsRound(tx)
+ return err
+ })
+
+ if err != nil {
+ return err
+ }
+
+ tr.ctx, tr.ctxCancel = context.WithCancel(context.Background())
+ tr.deferredCommits = make(chan *deferredCommitContext, 1)
+ tr.commitSyncerClosed = make(chan struct{})
+ tr.synchronousMode = db.SynchronousMode(cfg.LedgerSynchronousMode)
+ tr.accountsRebuildSynchronousMode = db.SynchronousMode(cfg.AccountsRebuildSynchronousMode)
+ go tr.commitSyncer(tr.deferredCommits)
+
+ tr.trackers = append([]ledgerTracker{}, trackers...)
+
+ for _, tracker := range tr.trackers {
+ if accts, ok := tracker.(*accountUpdates); ok {
+ tr.accts = accts
+ break
+ }
+ }
+ return
}
func (tr *trackerRegistry) loadFromDisk(l ledgerForTracker) error {
+ tr.mu.RLock()
+ dbRound := tr.dbRound
+ tr.mu.RUnlock()
+
for _, lt := range tr.trackers {
- err := lt.loadFromDisk(l)
+ err := lt.loadFromDisk(l, dbRound)
if err != nil {
// find the tracker name.
trackerName := reflect.TypeOf(lt).String()
@@ -116,34 +270,382 @@ func (tr *trackerRegistry) loadFromDisk(l ledgerForTracker) error {
}
}
- return nil
+ err := tr.initializeTrackerCaches(l)
+ if err != nil {
+ return err
+ }
+ // the votes have a special dependency on the account updates, so we need to initialize these separetly.
+ tr.accts.voters = &votersTracker{}
+ err = tr.accts.voters.loadFromDisk(l, tr.accts)
+ return err
}
func (tr *trackerRegistry) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
for _, lt := range tr.trackers {
lt.newBlock(blk, delta)
}
- if len(tr.trackers) == 0 {
- fmt.Printf("trackerRegistry::newBlock - no trackers (%d)\n", blk.Round())
- }
}
func (tr *trackerRegistry) committedUpTo(rnd basics.Round) basics.Round {
minBlock := rnd
-
+ maxLookback := basics.Round(0)
for _, lt := range tr.trackers {
- retain := lt.committedUpTo(rnd)
- if retain < minBlock {
- minBlock = retain
+ retainRound, lookback := lt.committedUpTo(rnd)
+ if retainRound < minBlock {
+ minBlock = retainRound
+ }
+ if lookback > maxLookback {
+ maxLookback = lookback
}
}
+ tr.scheduleCommit(rnd, maxLookback)
+
return minBlock
}
+func (tr *trackerRegistry) scheduleCommit(blockqRound, maxLookback basics.Round) {
+ tr.mu.RLock()
+ dbRound := tr.dbRound
+ tr.mu.RUnlock()
+
+ dcc := &deferredCommitContext{
+ deferredCommitRange: deferredCommitRange{
+ lookback: maxLookback,
+ },
+ }
+ cdr := &dcc.deferredCommitRange
+ for _, lt := range tr.trackers {
+ cdr = lt.produceCommittingTask(blockqRound, dbRound, cdr)
+ if cdr == nil {
+ break
+ }
+ }
+ if cdr != nil {
+ dcc.deferredCommitRange = *cdr
+ }
+
+ tr.mu.RLock()
+ // If we recently flushed, wait to aggregate some more blocks.
+ // ( unless we're creating a catchpoint, in which case we want to flush it right away
+ // so that all the instances of the catchpoint would contain exactly the same data )
+ flushTime := time.Now()
+ if dcc != nil && !flushTime.After(tr.lastFlushTime.Add(balancesFlushInterval)) && !dcc.isCatchpointRound && dcc.pendingDeltas < pendingDeltasFlushThreshold {
+ dcc = nil
+ }
+ tr.mu.RUnlock()
+
+ if dcc != nil {
+ tr.accountsWriting.Add(1)
+ tr.deferredCommits <- dcc
+ }
+}
+
+// waitAccountsWriting waits for all the pending ( or current ) account writing to be completed.
+func (tr *trackerRegistry) waitAccountsWriting() {
+ tr.accountsWriting.Wait()
+}
+
func (tr *trackerRegistry) close() {
+ if tr.ctxCancel != nil {
+ tr.ctxCancel()
+ }
+
+ // close() is called from reloadLedger() when and trackerRegistry is not initialized yet
+ if tr.commitSyncerClosed != nil {
+ tr.waitAccountsWriting()
+ // this would block until the commitSyncerClosed channel get closed.
+ <-tr.commitSyncerClosed
+ }
+
for _, lt := range tr.trackers {
lt.close()
}
tr.trackers = nil
+ tr.accts = nil
+}
+
+// commitSyncer is the syncer go-routine function which perform the database updates. Internally, it dequeues deferredCommits and
+// send the tasks to commitRound for completing the operation.
+func (tr *trackerRegistry) commitSyncer(deferredCommits chan *deferredCommitContext) {
+ defer close(tr.commitSyncerClosed)
+ for {
+ select {
+ case commit, ok := <-deferredCommits:
+ if !ok {
+ return
+ }
+ tr.commitRound(commit)
+ case <-tr.ctx.Done():
+ // drain the pending commits queue:
+ drained := false
+ for !drained {
+ select {
+ case <-deferredCommits:
+ tr.accountsWriting.Done()
+ default:
+ drained = true
+ }
+ }
+ return
+ }
+ }
+}
+
+// commitRound commits the given deferredCommitContext via the trackers.
+func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) {
+ defer tr.accountsWriting.Done()
+ tr.mu.RLock()
+
+ offset := dcc.offset
+ dbRound := dcc.oldBase
+ lookback := dcc.lookback
+
+ // we can exit right away, as this is the result of mis-ordered call to committedUpTo.
+ if tr.dbRound < dbRound || offset < uint64(tr.dbRound-dbRound) {
+ tr.log.Warnf("out of order deferred commit: offset %d, dbRound %d but current tracker DB round is %d", offset, dbRound, tr.dbRound)
+ for _, lt := range tr.trackers {
+ lt.handleUnorderedCommit(offset, dbRound, lookback)
+ }
+ tr.mu.RUnlock()
+ return
+ }
+
+ // adjust the offset according to what happened meanwhile..
+ offset -= uint64(tr.dbRound - dbRound)
+
+ // if this iteration need to flush out zero rounds, just return right away.
+ // this usecase can happen when two subsequent calls to committedUpTo concludes that the same rounds range need to be
+ // flush, without the commitRound have a chance of committing these rounds.
+ if offset == 0 {
+ tr.mu.RUnlock()
+ return
+ }
+
+ dbRound = tr.dbRound
+ newBase := basics.Round(offset) + dbRound
+
+ dcc.offset = offset
+ dcc.oldBase = dbRound
+ dcc.newBase = newBase
+ dcc.flushTime = time.Now()
+
+ for _, lt := range tr.trackers {
+ err := lt.prepareCommit(dcc)
+ if err != nil {
+ tr.log.Errorf(err.Error())
+ tr.mu.RUnlock()
+ return
+ }
+ }
+ tr.mu.RUnlock()
+
+ start := time.Now()
+ ledgerCommitroundCount.Inc(nil)
+ err := tr.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ for _, lt := range tr.trackers {
+ err0 := lt.commitRound(ctx, tx, dcc)
+ if err0 != nil {
+ return err0
+ }
+ }
+
+ err = updateAccountsRound(tx, dbRound+basics.Round(offset))
+ if err != nil {
+ return err
+ }
+
+ return nil
+ })
+ ledgerCommitroundMicros.AddMicrosecondsSince(start, nil)
+
+ if err != nil {
+ tr.log.Warnf("unable to advance tracker db snapshot (%d-%d): %v", dbRound, dbRound+basics.Round(offset), err)
+ return
+ }
+
+ tr.mu.Lock()
+ tr.dbRound = newBase
+ for _, lt := range tr.trackers {
+ lt.postCommit(tr.ctx, dcc)
+ }
+ tr.lastFlushTime = dcc.flushTime
+ tr.mu.Unlock()
+
+}
+
+// initializeTrackerCaches fills up the accountUpdates cache with the most recent ~320 blocks ( on normal execution ).
+// the method also support balances recovery in cases where the difference between the lastBalancesRound and the lastestBlockRound
+// is far greater than 320; in these cases, it would flush to disk periodically in order to avoid high memory consumption.
+func (tr *trackerRegistry) initializeTrackerCaches(l ledgerForTracker) (err error) {
+ lastestBlockRound := l.Latest()
+ lastBalancesRound := tr.dbRound
+
+ var blk bookkeeping.Block
+ var delta ledgercore.StateDelta
+
+ if tr.accts == nil {
+ return errMissingAccountUpdateTracker
+ }
+
+ accLedgerEval := accountUpdatesLedgerEvaluator{
+ au: tr.accts,
+ }
+
+ if lastBalancesRound < lastestBlockRound {
+ accLedgerEval.prevHeader, err = l.BlockHdr(lastBalancesRound)
+ if err != nil {
+ return err
+ }
+ }
+
+ skipAccountCacheMessage := make(chan struct{})
+ writeAccountCacheMessageCompleted := make(chan struct{})
+ defer func() {
+ close(skipAccountCacheMessage)
+ select {
+ case <-writeAccountCacheMessageCompleted:
+ if err == nil {
+ tr.log.Infof("initializeTrackerCaches completed initializing account data caches")
+ }
+ default:
+ }
+ }()
+
+ catchpointInterval := uint64(0)
+ for _, tracker := range tr.trackers {
+ if catchpointTracker, ok := tracker.(*catchpointTracker); ok {
+ catchpointInterval = catchpointTracker.catchpointInterval
+ break
+ }
+ }
+
+ // this goroutine logs a message once if the parent function have not completed in initializingAccountCachesMessageTimeout seconds.
+ // the message is important, since we're blocking on the ledger block database here, and we want to make sure that we log a message
+ // within the above timeout.
+ go func() {
+ select {
+ case <-time.After(initializingAccountCachesMessageTimeout):
+ tr.log.Infof("initializeTrackerCaches is initializing account data caches")
+ close(writeAccountCacheMessageCompleted)
+ case <-skipAccountCacheMessage:
+ }
+ }()
+
+ blocksStream := make(chan bookkeeping.Block, initializeCachesReadaheadBlocksStream)
+ blockEvalFailed := make(chan struct{}, 1)
+ var blockRetrievalError error
+ go func() {
+ defer close(blocksStream)
+ for roundNumber := lastBalancesRound + 1; roundNumber <= lastestBlockRound; roundNumber++ {
+ blk, blockRetrievalError = l.Block(roundNumber)
+ if blockRetrievalError != nil {
+ return
+ }
+ select {
+ case blocksStream <- blk:
+ case <-blockEvalFailed:
+ return
+ }
+ }
+ }()
+
+ lastFlushedRound := lastBalancesRound
+ const accountsCacheLoadingMessageInterval = 5 * time.Second
+ lastProgressMessage := time.Now().Add(-accountsCacheLoadingMessageInterval / 2)
+
+ // rollbackSynchronousMode ensures that we switch to "fast writing mode" when we start flushing out rounds to disk, and that
+ // we exit this mode when we're done.
+ rollbackSynchronousMode := false
+ defer func() {
+ if rollbackSynchronousMode {
+ // restore default synchronous mode
+ err0 := tr.dbs.Wdb.SetSynchronousMode(context.Background(), tr.synchronousMode, tr.synchronousMode >= db.SynchronousModeFull)
+ // override the returned error only in case there is no error - since this
+ // operation has a lower criticality.
+ if err == nil {
+ err = err0
+ }
+ }
+ }()
+
+ for blk := range blocksStream {
+ delta, err = l.trackerEvalVerified(blk, &accLedgerEval)
+ if err != nil {
+ close(blockEvalFailed)
+ return
+ }
+ tr.newBlock(blk, delta)
+
+ // flush to disk if any of the following applies:
+ // 1. if we have loaded up more than initializeCachesRoundFlushInterval rounds since the last time we flushed the data to disk
+ // 2. if we completed the loading and we loaded up more than 320 rounds.
+ flushIntervalExceed := blk.Round()-lastFlushedRound > initializeCachesRoundFlushInterval
+ loadCompleted := (lastestBlockRound == blk.Round() && lastBalancesRound+basics.Round(blk.ConsensusProtocol().MaxBalLookback) < lastestBlockRound)
+ if flushIntervalExceed || loadCompleted {
+ // adjust the last flush time, so that we would not hold off the flushing due to "working too fast"
+ tr.lastFlushTime = time.Now().Add(-balancesFlushInterval)
+
+ if !rollbackSynchronousMode {
+ // switch to rebuild synchronous mode to improve performance
+ err0 := tr.dbs.Wdb.SetSynchronousMode(context.Background(), tr.accountsRebuildSynchronousMode, tr.accountsRebuildSynchronousMode >= db.SynchronousModeFull)
+ if err0 != nil {
+ tr.log.Warnf("initializeTrackerCaches was unable to switch to rbuild synchronous mode : %v", err0)
+ } else {
+ // flip the switch to rollback the synchronous mode once we're done.
+ rollbackSynchronousMode = true
+ }
+ }
+
+ var roundsBehind basics.Round
+
+ // flush the account data
+ tr.scheduleCommit(blk.Round(), basics.Round(config.Consensus[blk.BlockHeader.CurrentProtocol].MaxBalLookback))
+ // wait for the writing to complete.
+ tr.waitAccountsWriting()
+
+ func() {
+ tr.mu.RLock()
+ defer tr.mu.RUnlock()
+
+ // The au.dbRound after writing should be ~320 behind the block round.
+ roundsBehind = blk.Round() - tr.dbRound
+ }()
+
+ // are we too far behind ? ( taking into consideration the catchpoint writing, which can stall the writing for quite a bit )
+ if roundsBehind > initializeCachesRoundFlushInterval+basics.Round(catchpointInterval) {
+ // we're unable to persist changes. This is unexpected, but there is no point in keep trying batching additional changes since any further changes
+ // would just accumulate in memory.
+ close(blockEvalFailed)
+ tr.log.Errorf("initializeTrackerCaches was unable to fill up the account caches accounts round = %d, block round = %d. See above error for more details.", blk.Round()-roundsBehind, blk.Round())
+ err = fmt.Errorf("initializeTrackerCaches failed to initialize the account data caches")
+ return
+ }
+
+ // and once we flushed it to disk, update the lastFlushedRound
+ lastFlushedRound = blk.Round()
+ }
+
+ // if enough time have passed since the last time we wrote a message to the log file then give the user an update about the progess.
+ if time.Since(lastProgressMessage) > accountsCacheLoadingMessageInterval {
+ // drop the initial message if we're got to this point since a message saying "still initializing" that comes after "is initializing" doesn't seems to be right.
+ select {
+ case skipAccountCacheMessage <- struct{}{}:
+ // if we got to this point, we should be able to close the writeAccountCacheMessageCompleted channel to have the "completed initializing" message written.
+ close(writeAccountCacheMessageCompleted)
+ default:
+ }
+ tr.log.Infof("initializeTrackerCaches is still initializing account data caches, %d rounds loaded out of %d rounds", blk.Round()-lastBalancesRound, lastestBlockRound-lastBalancesRound)
+ lastProgressMessage = time.Now()
+ }
+
+ // prepare for the next iteration.
+ accLedgerEval.prevHeader = *delta.Hdr
+ }
+
+ if blockRetrievalError != nil {
+ err = blockRetrievalError
+ }
+ return
+
}
diff --git a/ledger/trackerdb.go b/ledger/trackerdb.go
new file mode 100644
index 000000000..dd73f1a8a
--- /dev/null
+++ b/ledger/trackerdb.go
@@ -0,0 +1,365 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "context"
+ "database/sql"
+ "encoding/hex"
+ "fmt"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto/merkletrie"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/util/db"
+)
+
+type trackerDBParams struct {
+ initAccounts map[basics.Address]basics.AccountData
+ initProto config.ConsensusParams
+ catchpointEnabled bool
+ dbPathPrefix string
+}
+
+type trackerDBSchemaInitializer struct {
+ trackerDBParams
+
+ // schemaVersion contains current db version
+ schemaVersion int32
+ // vacuumOnStartup controls whether the accounts database would get vacuumed on startup.
+ vacuumOnStartup bool
+ // newDatabase indicates if the db is newly created
+ newDatabase bool
+
+ log logging.Logger
+}
+
+type trackerDBInitParams struct {
+ schemaVersion int32
+ vacuumOnStartup bool
+}
+
+// trackerDBInitialize initializes the accounts DB if needed and return current account round.
+// as part of the initialization, it tests the current database schema version, and perform upgrade
+// procedures to bring it up to the database schema supported by the binary.
+func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefix string) (mgr trackerDBInitParams, err error) {
+ dbs := l.trackerDB()
+ log := l.trackerLog()
+
+ lastestBlockRound := l.Latest()
+
+ if l.GenesisAccounts() == nil {
+ err = fmt.Errorf("trackerDBInitialize: initAccounts not set")
+ return
+ }
+
+ err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ tp := trackerDBParams{l.GenesisAccounts(), l.GenesisProto(), catchpointEnabled, dbPathPrefix}
+ var err0 error
+ mgr, err0 = trackerDBInitializeImpl(ctx, tx, tp, log)
+ if err0 != nil {
+ return err0
+ }
+ lastBalancesRound, err := accountsRound(tx)
+ if err != nil {
+ return err
+ }
+ // Check for blocks DB and tracker DB un-sync
+ if lastBalancesRound > lastestBlockRound {
+ log.Warnf("trackerDBInitialize: resetting accounts DB (on round %v, but blocks DB's latest is %v)", lastBalancesRound, lastestBlockRound)
+ err0 = accountsReset(tx)
+ if err0 != nil {
+ return err0
+ }
+ mgr, err0 = trackerDBInitializeImpl(ctx, tx, tp, log)
+ if err0 != nil {
+ return err0
+ }
+ }
+ return nil
+ })
+
+ return
+}
+
+// trackerDBInitializeImpl initializes the accounts DB if needed and return current account round.
+// as part of the initialization, it tests the current database schema version, and perform upgrade
+// procedures to bring it up to the database schema supported by the binary.
+func trackerDBInitializeImpl(ctx context.Context, tx *sql.Tx, params trackerDBParams, log logging.Logger) (mgr trackerDBInitParams, err error) {
+ // check current database version.
+ dbVersion, err := db.GetUserVersion(ctx, tx)
+ if err != nil {
+ return trackerDBInitParams{}, fmt.Errorf("trackerDBInitialize unable to read database schema version : %v", err)
+ }
+
+ tu := trackerDBSchemaInitializer{
+ trackerDBParams: params,
+ schemaVersion: dbVersion,
+ log: log,
+ }
+
+ // if database version is greater than supported by current binary, write a warning. This would keep the existing
+ // fallback behavior where we could use an older binary iff the schema happen to be backward compatible.
+ if tu.version() > accountDBVersion {
+ tu.log.Warnf("trackerDBInitialize database schema version is %d, but algod supports only %d", tu.version(), accountDBVersion)
+ }
+
+ if tu.version() < accountDBVersion {
+ tu.log.Infof("trackerDBInitialize upgrading database schema from version %d to version %d", tu.version(), accountDBVersion)
+ // newDatabase is determined during the tables creations. If we're filling the database with accounts,
+ // then we set this variable to true, allowing some of the upgrades to be skipped.
+ for tu.version() < accountDBVersion {
+ tu.log.Infof("trackerDBInitialize performing upgrade from version %d", tu.version())
+ // perform the initialization/upgrade
+ switch tu.version() {
+ case 0:
+ err = tu.upgradeDatabaseSchema0(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 0 : %v", err)
+ return
+ }
+ case 1:
+ err = tu.upgradeDatabaseSchema1(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 1 : %v", err)
+ return
+ }
+ case 2:
+ err = tu.upgradeDatabaseSchema2(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 2 : %v", err)
+ return
+ }
+ case 3:
+ err = tu.upgradeDatabaseSchema3(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 3 : %v", err)
+ return
+ }
+ case 4:
+ err = tu.upgradeDatabaseSchema4(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 4 : %v", err)
+ return
+ }
+ default:
+ return trackerDBInitParams{}, fmt.Errorf("trackerDBInitialize unable to upgrade database from schema version %d", tu.schemaVersion)
+ }
+ }
+ tu.log.Infof("trackerDBInitialize database schema upgrade complete")
+ }
+
+ return trackerDBInitParams{tu.schemaVersion, tu.vacuumOnStartup}, nil
+}
+
+func (tu *trackerDBSchemaInitializer) setVersion(ctx context.Context, tx *sql.Tx, version int32) (err error) {
+ oldVersion := tu.schemaVersion
+ tu.schemaVersion = version
+ _, err = db.SetUserVersion(ctx, tx, tu.schemaVersion)
+ if err != nil {
+ return fmt.Errorf("trackerDBInitialize unable to update database schema version from %d to %d: %v", oldVersion, version, err)
+ }
+ return nil
+}
+
+func (tu trackerDBSchemaInitializer) version() int32 {
+ return tu.schemaVersion
+}
+
+// upgradeDatabaseSchema0 upgrades the database schema from version 0 to version 1
+//
+// Schema of version 0 is expected to be aligned with the schema used on version 2.0.8 or before.
+// Any database of version 2.0.8 would be of version 0. At this point, the database might
+// have the following tables : ( i.e. a newly created database would not have these )
+// * acctrounds
+// * accounttotals
+// * accountbase
+// * assetcreators
+// * storedcatchpoints
+// * accounthashes
+// * catchpointstate
+//
+// As the first step of the upgrade, the above tables are being created if they do not already exists.
+// Following that, the assetcreators table is being altered by adding a new column to it (ctype).
+// Last, in case the database was just created, it would get initialized with the following:
+// The accountbase would get initialized with the au.initAccounts
+// The accounttotals would get initialized to align with the initialization account added to accountbase
+// The acctrounds would get updated to indicate that the balance matches round 0
+//
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema0(ctx context.Context, tx *sql.Tx) (err error) {
+ tu.log.Infof("upgradeDatabaseSchema0 initializing schema")
+ tu.newDatabase, err = accountsInit(tx, tu.initAccounts, tu.initProto)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema0 unable to initialize schema : %v", err)
+ }
+ return tu.setVersion(ctx, tx, 1)
+}
+
+// upgradeDatabaseSchema1 upgrades the database schema from version 1 to version 2
+//
+// The schema updated to version 2 intended to ensure that the encoding of all the accounts data is
+// both canonical and identical across the entire network. On release 2.0.5 we released an upgrade to the messagepack.
+// the upgraded messagepack was decoding the account data correctly, but would have different
+// encoding compared to it's predecessor. As a result, some of the account data that was previously stored
+// would have different encoded representation than the one on disk.
+// To address this, this startup procedure would attempt to scan all the accounts data. for each account data, we would
+// see if it's encoding aligns with the current messagepack encoder. If it doesn't we would update it's encoding.
+// then, depending if we found any such account data, we would reset the merkle trie and stored catchpoints.
+// once the upgrade is complete, the trackerDBInitialize would (if needed) rebuild the merkle trie using the new
+// encoded accounts.
+//
+// This upgrade doesn't change any of the actual database schema ( i.e. tables, indexes ) but rather just performing
+// a functional update to it's content.
+//
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema1(ctx context.Context, tx *sql.Tx) (err error) {
+ var modifiedAccounts uint
+ if tu.newDatabase {
+ goto schemaUpdateComplete
+ }
+
+ // update accounts encoding.
+ tu.log.Infof("upgradeDatabaseSchema1 verifying accounts data encoding")
+ modifiedAccounts, err = reencodeAccounts(ctx, tx)
+ if err != nil {
+ return err
+ }
+
+ if modifiedAccounts > 0 {
+ tu.log.Infof("upgradeDatabaseSchema1 reencoded %d accounts", modifiedAccounts)
+
+ tu.log.Infof("upgradeDatabaseSchema1 resetting account hashes")
+ // reset the merkle trie
+ err = resetAccountHashes(tx)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema1 unable to reset account hashes : %v", err)
+ }
+
+ tu.log.Infof("upgradeDatabaseSchema1 preparing queries")
+ // initialize a new accountsq with the incoming transaction.
+ accountsq, err := accountsInitDbQueries(tx, tx)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema1 unable to prepare queries : %v", err)
+ }
+
+ // close the prepared statements when we're done with them.
+ defer accountsq.close()
+
+ tu.log.Infof("upgradeDatabaseSchema1 resetting prior catchpoints")
+ // delete the last catchpoint label if we have any.
+ _, err = accountsq.writeCatchpointStateString(ctx, catchpointStateLastCatchpoint, "")
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema1 unable to clear prior catchpoint : %v", err)
+ }
+
+ tu.log.Infof("upgradeDatabaseSchema1 deleting stored catchpoints")
+ // delete catchpoints.
+ err = deleteStoredCatchpoints(ctx, accountsq, tu.dbPathPrefix)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema1 unable to delete stored catchpoints : %v", err)
+ }
+ } else {
+ tu.log.Infof("upgradeDatabaseSchema1 found that no accounts needed to be reencoded")
+ }
+
+schemaUpdateComplete:
+ return tu.setVersion(ctx, tx, 2)
+}
+
+// upgradeDatabaseSchema2 upgrades the database schema from version 2 to version 3
+//
+// This upgrade only enables the database vacuuming which will take place once the upgrade process is complete.
+// If the user has already specified the OptimizeAccountsDatabaseOnStartup flag in the configuration file, this
+// step becomes a no-op.
+//
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema2(ctx context.Context, tx *sql.Tx) (err error) {
+ if !tu.newDatabase {
+ tu.vacuumOnStartup = true
+ }
+
+ // update version
+ return tu.setVersion(ctx, tx, 3)
+}
+
+// upgradeDatabaseSchema3 upgrades the database schema from version 3 to version 4,
+// adding the normalizedonlinebalance column to the accountbase table.
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema3(ctx context.Context, tx *sql.Tx) (err error) {
+ err = accountsAddNormalizedBalance(tx, tu.initProto)
+ if err != nil {
+ return err
+ }
+
+ // update version
+ return tu.setVersion(ctx, tx, 4)
+}
+
+// upgradeDatabaseSchema4 does not change the schema but migrates data:
+// remove empty AccountData entries from accountbase table
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema4(ctx context.Context, tx *sql.Tx) (err error) {
+ var numDeleted int64
+ var addresses []basics.Address
+
+ if tu.newDatabase {
+ goto done
+ }
+
+ numDeleted, addresses, err = removeEmptyAccountData(tx, tu.catchpointEnabled)
+ if err != nil {
+ return err
+ }
+
+ if tu.catchpointEnabled && len(addresses) > 0 {
+ mc, err := MakeMerkleCommitter(tx, false)
+ if err != nil {
+ // at this point record deleted and DB is pruned for account data
+ // if hash deletion fails just log it and do not abort startup
+ tu.log.Errorf("upgradeDatabaseSchema4: failed to create merkle committer: %v", err)
+ goto done
+ }
+ trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig)
+ if err != nil {
+ tu.log.Errorf("upgradeDatabaseSchema4: failed to create merkle trie: %v", err)
+ goto done
+ }
+
+ var totalHashesDeleted int
+ for _, addr := range addresses {
+ hash := accountHashBuilder(addr, basics.AccountData{}, []byte{0x80})
+ deleted, err := trie.Delete(hash)
+ if err != nil {
+ tu.log.Errorf("upgradeDatabaseSchema4: failed to delete hash '%s' from merkle trie for account %v: %v", hex.EncodeToString(hash), addr, err)
+ } else {
+ if !deleted {
+ tu.log.Warnf("upgradeDatabaseSchema4: failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(hash), addr)
+ } else {
+ totalHashesDeleted++
+ }
+ }
+ }
+
+ if _, err = trie.Commit(); err != nil {
+ tu.log.Errorf("upgradeDatabaseSchema4: failed to commit changes to merkle trie: %v", err)
+ }
+
+ tu.log.Infof("upgradeDatabaseSchema4: deleted %d hashes", totalHashesDeleted)
+ }
+
+done:
+ tu.log.Infof("upgradeDatabaseSchema4: deleted %d rows", numDeleted)
+
+ return tu.setVersion(ctx, tx, 5)
+}
diff --git a/ledger/txtail.go b/ledger/txtail.go
index 68cc0f5ed..a5d77e49c 100644
--- a/ledger/txtail.go
+++ b/ledger/txtail.go
@@ -17,6 +17,8 @@
package ledger
import (
+ "context"
+ "database/sql"
"fmt"
"github.com/algorand/go-algorand/config"
@@ -43,7 +45,7 @@ type txTail struct {
lowWaterMark basics.Round // the last round known to be committed to disk
}
-func (t *txTail) loadFromDisk(l ledgerForTracker) error {
+func (t *txTail) loadFromDisk(l ledgerForTracker, _ basics.Round) error {
latest := l.Latest()
hdr, err := l.BlockHdr(latest)
if err != nil {
@@ -141,7 +143,7 @@ func (t *txTail) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
}
}
-func (t *txTail) committedUpTo(rnd basics.Round) basics.Round {
+func (t *txTail) committedUpTo(rnd basics.Round) (retRound, lookback basics.Round) {
maxlife := basics.Round(t.recent[rnd].proto.MaxTxnLife)
for r := range t.recent {
if r+maxlife < rnd {
@@ -152,7 +154,25 @@ func (t *txTail) committedUpTo(rnd basics.Round) basics.Round {
delete(t.lastValid, t.lowWaterMark)
}
- return (rnd + 1).SubSaturate(maxlife)
+ return (rnd + 1).SubSaturate(maxlife), basics.Round(0)
+}
+
+func (t *txTail) prepareCommit(*deferredCommitContext) error {
+ return nil
+}
+
+func (t *txTail) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error {
+ return nil
+}
+
+func (t *txTail) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+}
+
+func (t *txTail) handleUnorderedCommit(uint64, basics.Round, basics.Round) {
+}
+
+func (t *txTail) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+ return dcr
}
// txtailMissingRound is returned by checkDup when requested for a round number below the low watermark
diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go
index 2bde8ffae..eaaf34a06 100644
--- a/ledger/txtail_test.go
+++ b/ledger/txtail_test.go
@@ -28,6 +28,7 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -35,11 +36,11 @@ import (
func TestTxTailCheckdup(t *testing.T) {
partitiontest.PartitionTest(t)
- accts := randomAccounts(10, false)
+ accts := ledgertesting.RandomAccounts(10, false)
ledger := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, []map[basics.Address]basics.AccountData{accts})
proto := config.Consensus[protocol.ConsensusCurrentVersion]
tail := txTail{}
- require.NoError(t, tail.loadFromDisk(ledger))
+ require.NoError(t, tail.loadFromDisk(ledger, 0))
lastRound := basics.Round(proto.MaxTxnLife)
lookback := basics.Round(100)
@@ -152,7 +153,7 @@ func TestTxTailLoadFromDisk(t *testing.T) {
var ledger txTailTestLedger
txtail := txTail{}
- err := txtail.loadFromDisk(&ledger)
+ err := txtail.loadFromDisk(&ledger, 0)
require.NoError(t, err)
require.Equal(t, int(config.Consensus[protocol.ConsensusCurrentVersion].MaxTxnLife), len(txtail.recent))
require.Equal(t, testTxTailValidityRange, len(txtail.lastValid))
diff --git a/ledger/voters.go b/ledger/voters.go
index 7f1749175..898604072 100644
--- a/ledger/voters.go
+++ b/ledger/voters.go
@@ -20,14 +20,10 @@ import (
"fmt"
"sync"
- "github.com/algorand/go-deadlock"
-
"github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/compactcert"
- "github.com/algorand/go-algorand/crypto/merklearray"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
)
@@ -63,7 +59,7 @@ type votersTracker struct {
// the Merkle commitment to online accounts from the previous such block.
// Thus, we maintain X in the round map until we form a compact certificate
// for round X+CompactCertVotersLookback+CompactCertRounds.
- round map[basics.Round]*VotersForRound
+ round map[basics.Round]*ledgercore.VotersForRound
l ledgerForTracker
au *accountUpdates
@@ -73,44 +69,6 @@ type votersTracker struct {
loadWaitGroup sync.WaitGroup
}
-// VotersForRound tracks the top online voting accounts as of a particular
-// round, along with a Merkle tree commitment to those voting accounts.
-type VotersForRound struct {
- // Because it can take some time to compute the top participants and the
- // corresponding Merkle tree, the votersForRound is constructed in
- // the background. This means that fields (participants, adddToPos,
- // tree, and totalWeight) could be nil/zero while a background thread
- // is computing them. Once the fields are set, however, they are
- // immutable, and it is no longer necessary to acquire the lock.
- //
- // If an error occurs while computing the tree in the background,
- // loadTreeError might be set to non-nil instead. That also finalizes
- // the state of this VotersForRound.
- mu deadlock.Mutex
- cond *sync.Cond
- loadTreeError error
-
- // Proto is the ConsensusParams for the round whose balances are reflected
- // in participants.
- Proto config.ConsensusParams
-
- // Participants is the array of top #CompactCertVoters online accounts
- // in this round, sorted by normalized balance (to make sure heavyweight
- // accounts are biased to the front).
- Participants participantsArray
-
- // AddrToPos specifies the position of a given account address (if present)
- // in the Participants array. This allows adding a vote from a given account
- // to the certificate builder.
- AddrToPos map[basics.Address]uint64
-
- // Tree is a constructed Merkle tree of the Participants array.
- Tree *merklearray.Tree
-
- // TotalWeight is the sum of the weights from the Participants array.
- TotalWeight basics.MicroAlgos
-}
-
// votersRoundForCertRound computes the round number whose voting participants
// will be used to sign the compact cert for certRnd.
func votersRoundForCertRound(certRnd basics.Round, proto config.ConsensusParams) basics.Round {
@@ -124,7 +82,7 @@ func votersRoundForCertRound(certRnd basics.Round, proto config.ConsensusParams)
func (vt *votersTracker) loadFromDisk(l ledgerForTracker, au *accountUpdates) error {
vt.l = l
vt.au = au
- vt.round = make(map[basics.Round]*VotersForRound)
+ vt.round = make(map[basics.Round]*ledgercore.VotersForRound)
latest := l.Latest()
hdr, err := l.BlockHdr(latest)
@@ -173,23 +131,20 @@ func (vt *votersTracker) loadTree(hdr bookkeeping.BlockHeader) {
return
}
- tr := &VotersForRound{
- Proto: proto,
- }
- tr.cond = sync.NewCond(&tr.mu)
+ tr := ledgercore.MakeVotersForRound()
+ tr.Proto = proto
+
vt.round[r] = tr
vt.loadWaitGroup.Add(1)
go func() {
defer vt.loadWaitGroup.Done()
- err := tr.loadTree(vt.l, vt.au, hdr)
+ onlineAccounts := ledgercore.TopOnlineAccounts(vt.au.onlineTop)
+ err := tr.LoadTree(onlineAccounts, hdr)
if err != nil {
- vt.au.log.Warnf("votersTracker.loadTree(%d): %v", hdr.Round, err)
+ vt.l.trackerLog().Warnf("votersTracker.loadTree(%d): %v", hdr.Round, err)
- tr.mu.Lock()
- tr.loadTreeError = err
- tr.cond.Broadcast()
- tr.mu.Unlock()
+ tr.BroadcastError(err)
}
}()
return
@@ -201,70 +156,6 @@ func (vt *votersTracker) close() {
vt.loadWaitGroup.Wait()
}
-func (tr *VotersForRound) loadTree(l ledgerForTracker, au *accountUpdates, hdr bookkeeping.BlockHeader) error {
- r := hdr.Round
-
- // certRound is the block that we expect to form a compact certificate for,
- // using the balances from round r.
- certRound := r + basics.Round(tr.Proto.CompactCertVotersLookback+tr.Proto.CompactCertRounds)
-
- // sigKeyRound is the ephemeral key ID that we expect to be used for signing
- // the block from certRound. It is one higher because the keys for certRound
- // might be deleted by the time consensus is reached on the block and we try
- // to sign the compact cert for block certRound.
- sigKeyRound := certRound + 1
-
- top, err := au.onlineTop(r, sigKeyRound, tr.Proto.CompactCertTopVoters)
- if err != nil {
- return err
- }
-
- participants := make(participantsArray, len(top))
- addrToPos := make(map[basics.Address]uint64)
- var totalWeight basics.MicroAlgos
-
- for i, acct := range top {
- var ot basics.OverflowTracker
- rewards := basics.PendingRewards(&ot, tr.Proto, acct.MicroAlgos, acct.RewardsBase, hdr.RewardsLevel)
- money := ot.AddA(acct.MicroAlgos, rewards)
- if ot.Overflowed {
- return fmt.Errorf("votersTracker.loadTree: overflow adding rewards %d + %d", acct.MicroAlgos, rewards)
- }
-
- totalWeight = ot.AddA(totalWeight, money)
- if ot.Overflowed {
- return fmt.Errorf("votersTracker.loadTree: overflow computing totalWeight %d + %d", totalWeight.ToUint64(), money.ToUint64())
- }
-
- keyDilution := acct.VoteKeyDilution
- if keyDilution == 0 {
- keyDilution = tr.Proto.DefaultKeyDilution
- }
-
- participants[i] = compactcert.Participant{
- PK: acct.VoteID,
- Weight: money.ToUint64(),
- KeyDilution: keyDilution,
- }
- addrToPos[acct.Address] = uint64(i)
- }
-
- tree, err := merklearray.Build(participants)
- if err != nil {
- return err
- }
-
- tr.mu.Lock()
- tr.AddrToPos = addrToPos
- tr.Participants = participants
- tr.TotalWeight = totalWeight
- tr.Tree = tree
- tr.cond.Broadcast()
- tr.mu.Unlock()
-
- return nil
-}
-
func (vt *votersTracker) newBlock(hdr bookkeeping.BlockHeader) {
proto := config.Consensus[hdr.CurrentProtocol]
if proto.CompactCertRounds == 0 {
@@ -288,7 +179,7 @@ func (vt *votersTracker) newBlock(hdr bookkeeping.BlockHeader) {
if (r+proto.CompactCertVotersLookback)%proto.CompactCertRounds == 0 {
_, ok := vt.round[basics.Round(r)]
if ok {
- vt.au.log.Errorf("votersTracker.newBlock: round %d already present", r)
+ vt.l.trackerLog().Errorf("votersTracker.newBlock: round %d already present", r)
} else {
vt.loadTree(hdr)
}
@@ -311,7 +202,7 @@ func (vt *votersTracker) lowestRound(base basics.Round) basics.Round {
}
// getVoters() returns the top online participants from round r.
-func (vt *votersTracker) getVoters(r basics.Round) (*VotersForRound, error) {
+func (vt *votersTracker) getVoters(r basics.Round) (*ledgercore.VotersForRound, error) {
tr, ok := vt.round[r]
if !ok {
// Not tracked: compact certs not enabled.
@@ -319,32 +210,10 @@ func (vt *votersTracker) getVoters(r basics.Round) (*VotersForRound, error) {
}
// Wait for the Merkle tree to be constructed.
- tr.mu.Lock()
- defer tr.mu.Unlock()
- for tr.Tree == nil {
- if tr.loadTreeError != nil {
- return nil, tr.loadTreeError
- }
-
- tr.cond.Wait()
+ err := tr.Wait()
+ if err != nil {
+ return nil, err
}
return tr, nil
}
-
-//msgp:ignore participantsArray
-// participantsArray implements merklearray.Array and is used to commit
-// to a Merkle tree of online accounts.
-type participantsArray []compactcert.Participant
-
-func (a participantsArray) Length() uint64 {
- return uint64(len(a))
-}
-
-func (a participantsArray) GetHash(pos uint64) (crypto.Digest, error) {
- if pos >= uint64(len(a)) {
- return crypto.Digest{}, fmt.Errorf("participantsArray.Get(%d) out of bounds %d", pos, len(a))
- }
-
- return crypto.HashObj(a[pos]), nil
-}
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index c67c86758..b7f8cf519 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -19,6 +19,7 @@ package libgoal
import (
"encoding/json"
"fmt"
+ "io/ioutil"
"os"
"path/filepath"
@@ -30,6 +31,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/daemon/algod/api/spec/common"
v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/daemon/kmd/lib/kmdapi"
@@ -891,6 +893,40 @@ func (c *Client) GetPendingTransactionsByAddress(addr string, maxTxns uint64) (r
return
}
+// AddParticipationKey takes a participation key file and sends it to the node.
+// The key will be loaded into the system when the function returns successfully.
+func (c *Client) AddParticipationKey(keyfile string) (resp generated.PostParticipationResponse, err error) {
+ data, err := ioutil.ReadFile(keyfile)
+ if err != nil {
+ return
+ }
+
+ algod, err := c.ensureAlgodClient()
+ if err != nil {
+ return
+ }
+
+ return algod.PostParticipationKey(data)
+}
+
+// GetParticipationKeys gets the currently installed participation keys.
+func (c *Client) GetParticipationKeys() (resp generated.ParticipationKeysResponse, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err == nil {
+ return algod.GetParticipationKeys()
+ }
+ return
+}
+
+// GetParticipationKeyByID looks up a specific participation key by its participationID.
+func (c *Client) GetParticipationKeyByID(id string) (resp generated.ParticipationKeyResponse, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err == nil {
+ return algod.GetParticipationKeyByID(id)
+ }
+ return
+}
+
// ExportKey exports the private key of the passed account, assuming it's available
func (c *Client) ExportKey(walletHandle []byte, password, account string) (resp kmdapi.APIV1POSTKeyExportResponse, err error) {
kmd, err := c.ensureKmdClient()
diff --git a/libgoal/participation.go b/libgoal/participation.go
index c95d4c3c6..66ba9e4a5 100644
--- a/libgoal/participation.go
+++ b/libgoal/participation.go
@@ -24,6 +24,7 @@ import (
"path/filepath"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
@@ -166,7 +167,7 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k
// Fill the database with new participation keys
newPart, err := account.FillDBWithParticipationKeys(partdb, parsedAddr, firstRound, lastRound, keyDilution)
part = newPart.Participation
- newPart.Close()
+ partdb.Close()
return part, partKeyPath, err
}
@@ -243,8 +244,18 @@ func (c *Client) InstallParticipationKeys(inputfile string) (part account.Partic
}
// ListParticipationKeys returns the available participation keys,
+// as a response object.
+func (c *Client) ListParticipationKeys() (partKeyFiles generated.ParticipationKeysResponse, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err == nil {
+ partKeyFiles, err = algod.GetParticipationKeys()
+ }
+ return
+}
+
+// ListParticipationKeyFiles returns the available participation keys,
// as a map from database filename to Participation key object.
-func (c *Client) ListParticipationKeys() (partKeyFiles map[string]account.Participation, err error) {
+func (c *Client) ListParticipationKeyFiles() (partKeyFiles map[string]account.Participation, err error) {
genID, err := c.GenesisID()
if err != nil {
return
diff --git a/logging/telemetryspec/metric.go b/logging/telemetryspec/metric.go
index 2e87502fd..349d3f972 100644
--- a/logging/telemetryspec/metric.go
+++ b/logging/telemetryspec/metric.go
@@ -83,67 +83,6 @@ func (m AssembleBlockMetrics) Identifier() Metric {
return assembleBlockMetricsIdentifier
}
-// the identifier for the transaction sync profiling metrics.
-const transactionSyncProfilingMetricsIdentifier Metric = "SyncProfile"
-
-// TransactionSyncProfilingMetrics is the profiling metrics of the recent transaction sync activity
-type TransactionSyncProfilingMetrics struct {
- // total number of operations
- TotalOps uint64
- // number of idle operations
- IdleOps uint64
- // number of transaction pool changes operations
- TransactionPoolChangedOps uint64
- // number of new rounds operations
- NewRoundOps uint64
- // number of peer state changes operations
- PeerStateOps uint64
- // number of incoming messages operations
- IncomingMsgOps uint64
- // number of outgoing message operations
- OutgoingMsgOps uint64
- // number of next offsets message operations
- NextOffsetOps uint64
- // number of times transaction sync was retrieving the transaction groups from the transaction pool
- GetTxnGroupsOps uint64
- // number of times the transaction sync was assembling messages
- AssembleMessageOps uint64
- // number of times the transaction sync was creating bloom filters
- MakeBloomFilterOps uint64
- // number of times the transaction sync was selecting pending transactions out of existing pool
- SelectPendingTransactionsOps uint64
-
- // total duration of this profiling session
- TotalDuration time.Duration
- // percent of time the transaction sync was idle
- IdlePercent float64
- // percent of time the transaction sync was processing transaction pool changes
- TransactionPoolChangedPercent float64
- // percent of time the transaction sync was processing new rounds
- NewRoundPercent float64
- // percent of time the transaction sync was processing peer state changes
- PeerStatePercent float64
- // percent of time the transaction sync was processing incoming messages
- IncomingMsgPercent float64
- // percent of time the transaction sync was processing outgoing messages
- OutgoingMsgPercent float64
- // percent of time the transaction sync was processing next offset messages
- NextOffsetPercent float64
- // percent of time the transaction sync was collecting next set of transaction groups from the transaction pool
- GetTxnGroupsPercent float64
- // percent of time the transaction sync was assembling messages
- AssembleMessagePercent float64
- // percent of time the transaction sync was creating bloom filter
- MakeBloomFilterPercent float64
- // percent of time the transaction sync was selecting transaction to be sent
- SelectPendingTransactionsPercent float64
-}
-
-// Identifier implements the required MetricDetails interface, retrieving the Identifier for this set of metrics.
-func (m TransactionSyncProfilingMetrics) Identifier() Metric {
- return transactionSyncProfilingMetricsIdentifier
-}
-
//-------------------------------------------------------
// ProcessBlock
diff --git a/logging/testingLogger.go b/logging/testingLogger.go
index bbdb0f32a..09b789fb0 100644
--- a/logging/testingLogger.go
+++ b/logging/testingLogger.go
@@ -22,7 +22,7 @@ import (
// TestLogWriter is an io.Writer that wraps a testing.T (or a testing.B) -- anything written to it gets logged with t.Log(...)
// Being an io.Writer lets us pass it to Logger.SetOutput() in testing code -- this way if we want we can use Go's built-in testing log instead of making a new base.log file for each test.
-// As a bonus, the detailed logs produced in a Travis test are now easily accessible and are printed if and only if that particular ttest fails.
+// As a bonus, the detailed logs produced in a Travis test are now easily accessible and are printed if and only if that particular test fails.
type TestLogWriter struct {
testing.TB
}
diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go
index 36b4618f5..1bf4184a6 100644
--- a/netdeploy/remote/deployedNetwork.go
+++ b/netdeploy/remote/deployedNetwork.go
@@ -36,6 +36,7 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/gen"
"github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util"
@@ -487,9 +488,9 @@ func keypair() *crypto.SignatureSecrets {
return s
}
-func generateInitState(accounts map[basics.Address]basics.AccountData, bootstrappedNet *netState) (ledger.InitState, error) {
+func generateInitState(accounts map[basics.Address]basics.AccountData, bootstrappedNet *netState) (ledgercore.InitState, error) {
- var initState ledger.InitState
+ var initState ledgercore.InitState
block := bookkeeping.Block{
BlockHeader: bookkeeping.BlockHeader{
diff --git a/network/latencyTracker.go b/network/latencyTracker.go
deleted file mode 100644
index ff503ddb2..000000000
--- a/network/latencyTracker.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package network
-
-import (
- "errors"
- "net"
- "strconv"
- "sync/atomic"
- "time"
-
- "github.com/algorand/websocket"
-
- "github.com/algorand/go-deadlock"
-
- "github.com/algorand/go-algorand/config"
-)
-
-const pongMessageWriteDuration = time.Second
-const pingMessageWriteDuration = time.Second
-
-var errInvalidPongMessageContent = errors.New("invalid pong message content")
-var errInvalidPingMessageContent = errors.New("invalid ping message content")
-
-// latencyTracker works in conjunction with the wspeer in measuring the
-// communication latency over the websocket connection.
-type latencyTracker struct {
- // receivedPacketCounter is a counter for all incoming messages
- // placed here to be aligned with 64bit address.
- receivedPacketCounter uint64
-
- // latency is the effective latency of the connection.
- // placed here to be aligned with 64bit address.
- latency int64
-
- // lastPingSentTime is the timestamp at which we last sent a message.
- // this variable is only touched by checkPingSending, and therefore doesn't
- // need to be syncronized. The "clone" of this variable lastPingSentTimeSynced,
- // is being used by both the checkPingSending as well as by the pongHandler
- // and therefore require synchronization.
- lastPingSentTime int64
-
- // static variables
- // ( doesn't get changed after init, hence, no synchronization needed )
-
- // conn is the underlying connection object.
- conn wsPeerWebsocketConn
-
- // enabled indicates whether the pingpong is currently enabled or not.
- enabled bool
-
- // pingInterval is the max interval at which the client would send ping messages.
- pingInterval time.Duration
-
- // lastPingMu synchronize the protected variables that might be modified across
- // the checkPingSending and the pongHandler. All the variable below this point
- // need to be syncronized with the mutex.
- lastPingMu deadlock.Mutex
-
- // lastPingID is the last ping ID, a monotonic growing number used to ensure
- // that the pong message we've receive corresponds to the latest ping message
- // that we've sent.
- lastPingID uint64
-
- // lastPingReceivedCounter stores message counter at the time we sent the ping.
- // In order to ensure the timing accuracy, we want to have no other messages
- // being exchanged. This, of course, would only delay the ping-pong until a
- // better measurement could be taken.
- lastPingReceivedCounter uint64
-
- // lastPingSentTimeSynced, as stated above, is the syncronized version of lastPingSentTime.
- // it is used only in the case where we end up sending the ping message.
- lastPingSentTimeSynced int64
-}
-
-func (lt *latencyTracker) init(conn wsPeerWebsocketConn, cfg config.Local, initialConnectionLatency time.Duration) {
- lt.conn = conn
- lt.enabled = cfg.PeerPingPeriodSeconds > 0 && cfg.EnablePingHandler
- lt.latency = int64(initialConnectionLatency)
- lt.pingInterval = time.Duration(cfg.PeerPingPeriodSeconds) * time.Second
- conn.SetPingHandler(lt.pingHandler)
- conn.SetPongHandler(lt.pongHandler)
-}
-
-func (lt *latencyTracker) pingHandler(message string) error {
- if _, err := strconv.Atoi(message); err != nil {
- return errInvalidPingMessageContent
- }
- err := lt.conn.WriteControl(websocket.PongMessage, []byte(message), time.Now().Add(pongMessageWriteDuration))
- if err == websocket.ErrCloseSent {
- return nil
- } else if e, ok := err.(net.Error); ok && e.Temporary() {
- return nil
- }
- return err
-}
-
-func (lt *latencyTracker) pongHandler(message string) error {
- pongID, err := strconv.Atoi(message)
- if err != nil {
- return errInvalidPongMessageContent
- }
-
- lt.lastPingMu.Lock()
- defer lt.lastPingMu.Unlock()
-
- if uint64(pongID) != lt.lastPingID {
- // we've sent more than one ping since; ignore this message.
- return nil
- }
- if lt.receivedPacketCounter != lt.lastPingReceivedCounter {
- // we've received other messages since the one that we sent. The timing
- // here would not be accurate.
- return nil
- }
- lastPingSentTime := time.Unix(0, lt.lastPingSentTimeSynced)
- roundtripDuration := time.Since(lastPingSentTime)
- atomic.StoreInt64(&lt.latency, roundtripDuration.Nanoseconds())
- return nil
-}
-
-func (lt *latencyTracker) getConnectionLatency() time.Duration {
- return time.Duration(atomic.LoadInt64(&lt.latency))
-}
-
-func (lt *latencyTracker) checkPingSending(now *time.Time) error {
- if !lt.enabled {
- return nil
- }
- if now.Sub(time.Unix(0, lt.lastPingSentTime)) < lt.pingInterval {
- return nil
- }
-
- // it looks like it's time to send a ping :
- lt.lastPingMu.Lock()
- defer lt.lastPingMu.Unlock()
-
- lt.lastPingID++
- err := lt.conn.WriteControl(websocket.PingMessage, []byte(strconv.Itoa(int(lt.lastPingID))), now.Add(pingMessageWriteDuration))
- if err == websocket.ErrCloseSent {
- return nil
- } else if e, ok := err.(net.Error); ok && e.Temporary() {
- return nil
- }
- if err != nil {
- return err
- }
- lt.lastPingSentTimeSynced = now.UnixNano()
- lt.lastPingReceivedCounter = atomic.LoadUint64(&lt.receivedPacketCounter)
- lt.lastPingSentTime = lt.lastPingSentTimeSynced
- return nil
-}
-
-func (lt *latencyTracker) increaseReceivedCounter() {
- atomic.AddUint64(&lt.receivedPacketCounter, 1)
-}
diff --git a/network/latencyTracker_test.go b/network/latencyTracker_test.go
deleted file mode 100644
index e7a62c1d7..000000000
--- a/network/latencyTracker_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package network
-
-import (
- "context"
- "math/rand"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-func TestLatencyTracker(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- netA := makeTestFilterWebsocketNode(t, "a")
- netA.config.GossipFanout = 1
- netA.config.PeerPingPeriodSeconds = 2
- netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
-
- netB := makeTestFilterWebsocketNode(t, "b")
- netB.config.GossipFanout = 1
- addrA, postListen := netA.Address()
- require.True(t, postListen)
- t.Log(addrA)
- netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
-
- netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
- counter := &messageCounterHandler{t: t, limit: 1, done: make(chan struct{})}
- netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: counter}})
- debugTag2 := protocol.ProposalPayloadTag
- counter2 := &messageCounterHandler{t: t, limit: 1, done: make(chan struct{})}
- netB.RegisterHandlers([]TaggedMessageHandler{{Tag: debugTag2, MessageHandler: counter2}})
-
- readyTimeout := time.NewTimer(2 * time.Second)
- waitReady(t, netA, readyTimeout.C)
- waitReady(t, netB, readyTimeout.C)
-
- msg := make([]byte, 200)
- rand.Read(msg)
- var lastMsgTime time.Time
-
- var connLatencyInitialA time.Duration
- // wait for up to 20 seconds for the network latency to be established.
- startTime := time.Now()
- for {
- if time.Since(lastMsgTime) > 100*time.Millisecond {
- netA.Broadcast(context.Background(), protocol.AgreementVoteTag, msg, true, nil)
- lastMsgTime = time.Now()
- }
-
- connLatencyA := netA.peers[0].GetConnectionLatency()
- if connLatencyA == time.Duration(0) {
- require.LessOrEqual(t, time.Since(startTime).Nanoseconds(), (20 * time.Second).Nanoseconds())
- time.Sleep(time.Millisecond)
- continue
- }
- require.LessOrEqual(t, connLatencyA.Nanoseconds(), (20 * time.Second).Nanoseconds())
- connLatencyInitialA = connLatencyA
- break
- }
-
- // wait for up to 20 seconds for the network latency to be established.
- startTime = time.Now()
- lastMsgTime = time.Time{}
- for {
- if time.Since(lastMsgTime) > 100*time.Millisecond {
- netB.Broadcast(context.Background(), protocol.AgreementVoteTag, msg, true, nil)
- lastMsgTime = time.Now()
- }
-
- connLatencyB := netB.peers[0].GetConnectionLatency()
- if connLatencyB == time.Duration(0) {
- require.LessOrEqual(t, time.Since(startTime).Nanoseconds(), (20 * time.Second).Nanoseconds())
- time.Sleep(time.Millisecond)
- continue
- }
- require.LessOrEqual(t, connLatencyB.Nanoseconds(), (20 * time.Second).Nanoseconds())
- break
- }
-
- // send the given message until we get a different latency.
- // wait for up to 20 seconds for the network latency to be established.
- startTime = time.Now()
- lastMsgTime = time.Time{}
- for {
- if time.Since(lastMsgTime) > 100*time.Millisecond {
- netA.Broadcast(context.Background(), protocol.AgreementVoteTag, msg, true, nil)
- lastMsgTime = time.Now()
- }
-
- connLatencyA := netA.peers[0].GetConnectionLatency()
- if connLatencyA != connLatencyInitialA {
- require.NotEqual(t, connLatencyA.Nanoseconds(), int64(0))
- waitTime := time.Since(lastMsgTime)
- require.Less(t, waitTime.Seconds(), float64(netA.config.PeerPingPeriodSeconds*2))
- break
- }
- time.Sleep(time.Millisecond)
- }
-}
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 1a86eadf0..9f4a1b281 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -52,11 +52,7 @@ import (
)
const incomingThreads = 20
-
-// messageFilterSize is the threshold beyond we send a request to the other peer to avoid sending us a message with that particular hash.
-// typically, this is beneficial for proposal messages, which tends to be large and uniform across the network. Non-uniform messages, such
-// as the transaction sync messages should not included in this filter.
-const messageFilterSize = 200000
+const messageFilterSize = 5000 // messages greater than that size may be blocked by incoming/outgoing filter
// httpServerReadHeaderTimeout is the amount of time allowed to read
// request headers. The connection's read deadline is reset
@@ -222,9 +218,6 @@ type IncomingMessage struct {
Err error
Net GossipNode
- // Sequence is the sequence number of the message for the specific tag and peer
- Sequence uint64
-
// Received is time.Time.UnixNano()
Received int64
@@ -1136,21 +1129,7 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt
// We are careful to encode this prior to starting the server to avoid needing 'messagesOfInterestMu' here.
if wn.messagesOfInterestEnc != nil {
- msg := wn.messagesOfInterestEnc
- // for older peers, we want to include also the "TX" message, for backward compatibility.
- // this statement could be safely removed once we've fully migrated.
- if peer.version == "2.1" {
- wn.messagesOfInterestMu.Lock()
- txSendMsgTags := make(map[protocol.Tag]bool)
- for tag := range wn.messagesOfInterest {
- txSendMsgTags[tag] = true
- }
- wn.messagesOfInterestMu.Unlock()
- txSendMsgTags[protocol.TxnTag] = true
- msg = MarshallMessageOfInterestMap(txSendMsgTags)
- }
- err = peer.Unicast(wn.ctx, msg, protocol.MsgOfInterestTag, nil)
-
+ err = peer.Unicast(wn.ctx, wn.messagesOfInterestEnc, protocol.MsgOfInterestTag)
if err != nil {
wn.log.Infof("ws send msgOfInterest: %v", err)
}
@@ -1432,7 +1411,7 @@ func (wn *WebsocketNetwork) innerBroadcast(request broadcastRequest, prio bool,
if peer == request.except {
continue
}
- ok := peer.writeNonBlockMsgs(request.ctx, data, prio, digests, request.enqueueTime, nil)
+ ok := peer.writeNonBlockMsgs(request.ctx, data, prio, digests, request.enqueueTime)
if ok {
sentMessageCount++
continue
@@ -1807,15 +1786,14 @@ const ProtocolVersionHeader = "X-Algorand-Version"
const ProtocolAcceptVersionHeader = "X-Algorand-Accept-Version"
// SupportedProtocolVersions contains the list of supported protocol versions by this node ( in order of preference ).
-var SupportedProtocolVersions = []string{"3.0", "2.1"}
+var SupportedProtocolVersions = []string{"2.1"}
// ProtocolVersion is the current version attached to the ProtocolVersionHeader header
/* Version history:
* 1 Catchup service over websocket connections with unicast messages between peers
* 2.1 Introduced topic key/data pairs and enabled services over the gossip connections
- * 3.0 Introduced new transaction gossiping protocol
*/
-const ProtocolVersion = "3.0"
+const ProtocolVersion = "2.1"
// TelemetryIDHeader HTTP header for telemetry-id for logging
const TelemetryIDHeader = "X-Algorand-TelId"
@@ -2081,7 +2059,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
resp := wn.prioScheme.MakePrioResponse(challenge)
if resp != nil {
mbytes := append([]byte(protocol.NetPrioResponseTag), resp...)
- sent := peer.writeNonBlock(context.Background(), mbytes, true, crypto.Digest{}, time.Now(), nil)
+ sent := peer.writeNonBlock(context.Background(), mbytes, true, crypto.Digest{}, time.Now())
if !sent {
wn.log.With("remote", addr).With("local", localAddr).Warnf("could not send priority response to %v", addr)
}
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index 1a5a3d427..74c690241 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -238,7 +238,7 @@ func TestWebsocketNetworkBasic(t *testing.T) {
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
counter := newMessageCounter(t, 2)
counterDone := counter.done
- netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: counter}})
+ netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
readyTimeout := time.NewTimer(2 * time.Second)
waitReady(t, netA, readyTimeout.C)
@@ -246,8 +246,8 @@ func TestWebsocketNetworkBasic(t *testing.T) {
waitReady(t, netB, readyTimeout.C)
t.Log("b ready")
- netA.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte("foo"), false, nil)
- netA.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte("bar"), false, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte("foo"), false, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte("bar"), false, nil)
select {
case <-counterDone:
@@ -274,7 +274,7 @@ func TestWebsocketNetworkUnicast(t *testing.T) {
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
counter := newMessageCounter(t, 2)
counterDone := counter.done
- netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: counter}})
+ netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
readyTimeout := time.NewTimer(2 * time.Second)
waitReady(t, netA, readyTimeout.C)
@@ -285,9 +285,9 @@ func TestWebsocketNetworkUnicast(t *testing.T) {
require.Equal(t, 1, len(netA.peers))
require.Equal(t, 1, len(netA.GetPeers(PeersConnectedIn)))
peerB := netA.peers[0]
- err := peerB.Unicast(context.Background(), []byte("foo"), protocol.AgreementVoteTag, nil)
+ err := peerB.Unicast(context.Background(), []byte("foo"), protocol.TxnTag)
assert.NoError(t, err)
- err = peerB.Unicast(context.Background(), []byte("bar"), protocol.AgreementVoteTag, nil)
+ err = peerB.Unicast(context.Background(), []byte("bar"), protocol.TxnTag)
assert.NoError(t, err)
select {
@@ -353,7 +353,7 @@ func TestWebsocketNetworkArray(t *testing.T) {
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
counter := newMessageCounter(t, 3)
counterDone := counter.done
- netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: counter}})
+ netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
readyTimeout := time.NewTimer(2 * time.Second)
waitReady(t, netA, readyTimeout.C)
@@ -361,7 +361,7 @@ func TestWebsocketNetworkArray(t *testing.T) {
waitReady(t, netB, readyTimeout.C)
t.Log("b ready")
- tags := []protocol.Tag{protocol.AgreementVoteTag, protocol.AgreementVoteTag, protocol.AgreementVoteTag}
+ tags := []protocol.Tag{protocol.TxnTag, protocol.TxnTag, protocol.TxnTag}
data := [][]byte{[]byte("foo"), []byte("bar"), []byte("algo")}
netA.BroadcastArray(context.Background(), tags, data, false, nil)
@@ -390,7 +390,7 @@ func TestWebsocketNetworkCancel(t *testing.T) {
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
counter := newMessageCounter(t, 100)
counterDone := counter.done
- netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: counter}})
+ netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
readyTimeout := time.NewTimer(2 * time.Second)
waitReady(t, netA, readyTimeout.C)
@@ -401,8 +401,8 @@ func TestWebsocketNetworkCancel(t *testing.T) {
tags := make([]protocol.Tag, 100)
data := make([][]byte, 100)
for i := range data {
- tags[i] = protocol.AgreementVoteTag
- data[i] = []byte(fmt.Sprintf("%d", i))
+ tags[i] = protocol.TxnTag
+ data[i] = []byte(string(rune(i)))
}
ctx, cancel := context.WithCancel(context.Background())
@@ -438,7 +438,7 @@ func TestWebsocketNetworkCancel(t *testing.T) {
mbytes := make([]byte, len(tbytes)+len(msg))
copy(mbytes, tbytes)
copy(mbytes[len(tbytes):], msg)
- msgs = append(msgs, sendMessage{data: mbytes, enqueued: time.Now(), peerEnqueued: enqueueTime, ctx: context.Background()})
+ msgs = append(msgs, sendMessage{data: mbytes, enqueued: time.Now(), peerEnqueued: enqueueTime, hash: crypto.Hash(mbytes), ctx: context.Background()})
}
msgs[50].ctx = ctx
@@ -476,7 +476,7 @@ func TestWebsocketNetworkNoAddress(t *testing.T) {
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
counter := newMessageCounter(t, 2)
counterDone := counter.done
- netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: counter}})
+ netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
readyTimeout := time.NewTimer(2 * time.Second)
waitReady(t, netA, readyTimeout.C)
@@ -484,8 +484,8 @@ func TestWebsocketNetworkNoAddress(t *testing.T) {
waitReady(t, netB, readyTimeout.C)
t.Log("b ready")
- netA.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte("foo"), false, nil)
- netA.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte("bar"), false, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte("foo"), false, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte("bar"), false, nil)
select {
case <-counterDone:
@@ -508,7 +508,7 @@ func lineNetwork(t *testing.T, numNodes int) (nodes []*WebsocketNetwork, counter
addrPrev, postListen := nodes[i-1].Address()
require.True(t, postListen)
nodes[i].phonebook.ReplacePeerList([]string{addrPrev}, "default", PhoneBookEntryRelayRole)
- nodes[i].RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: &counters[i]}})
+ nodes[i].RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: &counters[i]}})
}
nodes[i].Start()
counters[i].t = t
@@ -565,7 +565,7 @@ func TestLineNetwork(t *testing.T) {
sendTime := time.Now().UnixNano()
var timeblob [8]byte
binary.LittleEndian.PutUint64(timeblob[:], uint64(sendTime))
- nodes[0].Broadcast(context.Background(), protocol.AgreementVoteTag, timeblob[:], true, nil)
+ nodes[0].Broadcast(context.Background(), protocol.TxnTag, timeblob[:], true, nil)
}
select {
case <-counterDone:
@@ -792,124 +792,71 @@ func avgSendBufferHighPrioLength(wn *WebsocketNetwork) float64 {
return float64(sum) / float64(len(wn.peers))
}
-// TestSlowOutboundPeer tests what happens when one outbound peer is slow and the rest are fine.
+// TestSlowOutboundPeer tests what happens when one outbound peer is slow and the rest are fine. Current logic is to disconnect the one slow peer when its outbound channel is full.
+//
+// This is a deeply invasive test that reaches into the guts of WebsocketNetwork and wsPeer. If the implementation chainges consider throwing away or totally reimplementing this test.
func TestSlowOutboundPeer(t *testing.T) {
partitiontest.PartitionTest(t)
- nodeA := makeTestWebsocketNode(t)
- nodeA.config.GossipFanout = 0
- nodeA.Start()
- defer nodeA.Stop()
-
- addrA, postListenA := nodeA.Address()
- require.True(t, postListenA)
-
- nodeB := makeTestWebsocketNode(t)
- nodeB.config.GossipFanout = 0
- nodeB.Start()
- defer nodeB.Stop()
-
- addrB, postListenB := nodeB.Address()
- require.True(t, postListenB)
-
+ t.Skip() // todo - update this test to reflect the new implementation.
+ xtag := protocol.ProposalPayloadTag
node := makeTestWebsocketNode(t)
- node.config.GossipFanout = 2
- dl := eventsDetailsLogger{Logger: logging.TestingLog(t), eventReceived: make(chan interface{}, 100), eventIdentifier: telemetryspec.DisconnectPeerEvent}
- node.log = dl
-
- node.phonebook.ReplacePeerList([]string{addrA, addrB}, "default", PhoneBookEntryRelayRole)
- node.Start()
- defer node.Stop()
-
- msg := make([]byte, 100)
- rand.Read(msg)
-
- muHandleA := deadlock.Mutex{}
- numHandledA := 0
- waitMessageArriveHandlerA := func(msg IncomingMessage) (out OutgoingMessage) {
- muHandleA.Lock()
- defer muHandleA.Unlock()
- numHandledA++
- return
+ destPeers := make([]wsPeer, 5)
+ for i := range destPeers {
+ destPeers[i].closing = make(chan struct{})
+ destPeers[i].net = node
+ destPeers[i].sendBufferHighPrio = make(chan sendMessages, sendBufferLength)
+ destPeers[i].sendBufferBulk = make(chan sendMessages, sendBufferLength)
+ destPeers[i].conn = &nopConnSingleton
+ destPeers[i].rootURL = fmt.Sprintf("fake %d", i)
+ node.addPeer(&destPeers[i])
}
- nodeA.RegisterHandlers([]TaggedMessageHandler{
- {
- Tag: protocol.AgreementVoteTag,
- MessageHandler: HandlerFunc(waitMessageArriveHandlerA),
- }})
-
- muHandleB := deadlock.Mutex{}
- numHandledB := 0
- waitMessageArriveHandlerB := func(msg IncomingMessage) (out OutgoingMessage) {
- muHandleB.Lock()
- defer muHandleB.Unlock()
- numHandledB++
- return
+ node.Start()
+ tctx, cf := context.WithTimeout(context.Background(), 5*time.Second)
+ for i := 0; i < sendBufferLength; i++ {
+ t.Logf("broadcast %d", i)
+ sent := node.Broadcast(tctx, xtag, []byte{byte(i)}, true, nil)
+ require.NoError(t, sent)
}
- nodeB.RegisterHandlers([]TaggedMessageHandler{
- {
- Tag: protocol.AgreementVoteTag,
- MessageHandler: HandlerFunc(waitMessageArriveHandlerB),
- }})
-
- readyTimeout := time.NewTimer(2 * time.Second)
- waitReady(t, node, readyTimeout.C)
- require.Equal(t, 2, len(node.peers))
-
- callback := func(enqueued bool, sequenceNumber uint64) error {
- time.Sleep(2 * maxMessageQueueDuration)
- return nil
+ cf()
+ ok := false
+ for i := 0; i < 10; i++ {
+ time.Sleep(time.Millisecond)
+ aoql := avgSendBufferHighPrioLength(node)
+ if aoql == sendBufferLength {
+ ok = true
+ break
+ }
+ t.Logf("node.avgOutboundQueueLength() %f", aoql)
}
-
- rand.Read(msg)
- x := 0
-MAINLOOP:
- for ; x < 1000; x++ {
- select {
- case eventDetails := <-dl.eventReceived:
- switch disconnectPeerEventDetails := eventDetails.(type) {
- case telemetryspec.DisconnectPeerEventDetails:
- require.Equal(t, string(disconnectSlowConn), disconnectPeerEventDetails.Reason)
- default:
- require.FailNow(t, "Unexpected event was send : %v", eventDetails)
- }
- break MAINLOOP
- default:
+ require.True(t, ok)
+ for p := range destPeers {
+ if p == 0 {
+ continue
}
-
- node.Broadcast(context.Background(), protocol.AgreementVoteTag, msg, true, nil)
- if x == 0 {
- node.peers[0].Unicast(context.Background(), msg, protocol.AgreementVoteTag, callback)
+ for j := 0; j < sendBufferLength; j++ {
+ // throw away a message as if sent
+ <-destPeers[p].sendBufferHighPrio
}
- time.Sleep(200 * time.Millisecond)
}
+ aoql := avgSendBufferHighPrioLength(node)
+ if aoql > (sendBufferLength / 2) {
+ t.Fatalf("avgOutboundQueueLength=%f wanted <%f", aoql, sendBufferLength/2.0)
+ return
+ }
+ // it shouldn't have closed for just sitting on the limit of full
+ require.False(t, peerIsClosed(&destPeers[0]))
- require.Less(t, x, 1000)
+ // function context just to contain defer cf()
+ func() {
+ timeout, cf := context.WithTimeout(context.Background(), time.Second)
+ defer cf()
+ sent := node.Broadcast(timeout, xtag, []byte{byte(42)}, true, nil)
+ assert.NoError(t, sent)
+ }()
- maxNumHandled := 0
- minNumHandled := 0
- for i := 0; i < 10; i++ {
- muHandleA.Lock()
- a := numHandledA
- muHandleA.Unlock()
-
- muHandleB.Lock()
- b := numHandledB
- muHandleB.Unlock()
-
- maxNumHandled = b
- minNumHandled = a
- if maxNumHandled < a {
- maxNumHandled = a
- minNumHandled = b
- }
- if maxNumHandled == x {
- break
- }
- time.Sleep(100 * time.Millisecond)
- }
- require.Equal(t, maxNumHandled, x)
- require.Less(t, minNumHandled, x/2)
+ // and now with the rest of the peers well and this one slow, we closed the slow one
+ require.True(t, peerIsClosed(&destPeers[0]))
}
func makeTestFilterWebsocketNode(t *testing.T, nodename string) *WebsocketNetwork {
@@ -1097,7 +1044,7 @@ func BenchmarkWebsocketNetworkBasic(t *testing.B) {
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
returns := make(chan uint64, 100)
bhandler := benchmarkHandler{returns}
- netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: &bhandler}})
+ netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: &bhandler}})
readyTimeout := time.NewTimer(2 * time.Second)
waitReady(t, netA, readyTimeout.C)
@@ -1120,7 +1067,7 @@ func BenchmarkWebsocketNetworkBasic(t *testing.B) {
}
msg := make([]byte, msgSize)
binary.LittleEndian.PutUint64(msg, uint64(i))
- err := netA.Broadcast(context.Background(), protocol.AgreementVoteTag, msg, true, nil)
+ err := netA.Broadcast(context.Background(), protocol.TxnTag, msg, true, nil)
if err != nil {
t.Errorf("error on broadcast: %v", err)
return
@@ -1222,7 +1169,7 @@ func TestWebsocketNetworkPrioLimit(t *testing.T) {
netB.config.GossipFanout = 1
netB.config.NetAddress = ""
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
- netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: counterB}})
+ netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counterB}})
netB.Start()
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
@@ -1236,7 +1183,7 @@ func TestWebsocketNetworkPrioLimit(t *testing.T) {
netC.config.GossipFanout = 1
netC.config.NetAddress = ""
netC.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
- netC.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: counterC}})
+ netC.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counterC}})
netC.Start()
defer func() { t.Log("stopping C"); netC.Stop(); t.Log("C done") }()
@@ -1260,7 +1207,7 @@ func TestWebsocketNetworkPrioLimit(t *testing.T) {
waitReady(t, netA, time.After(time.Second))
firstPeer := netA.peers[0]
- netA.Broadcast(context.Background(), protocol.AgreementVoteTag, nil, true, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, nil, true, nil)
failed := false
select {
@@ -1450,7 +1397,7 @@ func TestDelayedMessageDrop(t *testing.T) {
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
counter := newMessageCounter(t, 5)
counterDone := counter.done
- netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: counter}})
+ netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
readyTimeout := time.NewTimer(2 * time.Second)
waitReady(t, netA, readyTimeout.C)
@@ -1458,7 +1405,7 @@ func TestDelayedMessageDrop(t *testing.T) {
currentTime := time.Now()
for i := 0; i < 10; i++ {
- err := netA.broadcastWithTimestamp(protocol.AgreementVoteTag, []byte("foo"), currentTime.Add(time.Hour*time.Duration(i-5)))
+ err := netA.broadcastWithTimestamp(protocol.TxnTag, []byte("foo"), currentTime.Add(time.Hour*time.Duration(i-5)))
require.NoErrorf(t, err, "No error was expected")
}
@@ -1553,7 +1500,7 @@ func TestForceMessageRelaying(t *testing.T) {
counter := newMessageCounter(t, 5)
counterDone := counter.done
- netA.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: counter}})
+ netA.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
netA.Start()
addrA, postListen := netA.Address()
require.Truef(t, postListen, "Listening network failed to start")
@@ -1580,9 +1527,9 @@ func TestForceMessageRelaying(t *testing.T) {
// send 5 messages from both netB and netC to netA
for i := 0; i < 5; i++ {
- err := netB.Relay(context.Background(), protocol.AgreementVoteTag, []byte{1, 2, 3}, true, nil)
+ err := netB.Relay(context.Background(), protocol.TxnTag, []byte{1, 2, 3}, true, nil)
require.NoError(t, err)
- err = netC.Relay(context.Background(), protocol.AgreementVoteTag, []byte{1, 2, 3}, true, nil)
+ err = netC.Relay(context.Background(), protocol.TxnTag, []byte{1, 2, 3}, true, nil)
require.NoError(t, err)
}
@@ -1598,13 +1545,13 @@ func TestForceMessageRelaying(t *testing.T) {
netA.ClearHandlers()
counter = newMessageCounter(t, 10)
counterDone = counter.done
- netA.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: counter}})
+ netA.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
// hack the relayMessages on the netB so that it would start sending messages.
netB.relayMessages = true
// send additional 10 messages from netB
for i := 0; i < 10; i++ {
- err := netB.Relay(context.Background(), protocol.AgreementVoteTag, []byte{1, 2, 3}, true, nil)
+ err := netB.Relay(context.Background(), protocol.TxnTag, []byte{1, 2, 3}, true, nil)
require.NoError(t, err)
}
@@ -1827,7 +1774,7 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
// send 5 messages of few types.
for i := 0; i < 5; i++ {
netA.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte{0, 1, 2, 3, 4}, true, nil)
- netA.Broadcast(context.Background(), protocol.CompactCertSigTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte{0, 1, 2, 3, 4}, true, nil)
netA.Broadcast(context.Background(), protocol.ProposalPayloadTag, []byte{0, 1, 2, 3, 4}, true, nil)
netA.Broadcast(context.Background(), protocol.VoteBundleTag, []byte{0, 1, 2, 3, 4}, true, nil)
}
@@ -1925,7 +1872,7 @@ func TestWebsocketDisconnection(t *testing.T) {
case eventDetails := <-dl.eventReceived:
switch disconnectPeerEventDetails := eventDetails.(type) {
case telemetryspec.DisconnectPeerEventDetails:
- require.Equal(t, string(disconnectRequestReceived), disconnectPeerEventDetails.Reason)
+ require.Equal(t, disconnectPeerEventDetails.Reason, string(disconnectRequestReceived))
default:
require.FailNow(t, "Unexpected event was send : %v", eventDetails)
}
@@ -2061,7 +2008,7 @@ func BenchmarkVariableTransactionMessageBlockSizes(t *testing.B) {
// register all the handlers.
taggedHandlersA := []TaggedMessageHandler{
{
- Tag: protocol.AgreementVoteTag,
+ Tag: protocol.TxnTag,
MessageHandler: HandlerFunc(msgHandlerA),
},
}
@@ -2085,7 +2032,7 @@ func BenchmarkVariableTransactionMessageBlockSizes(t *testing.B) {
t.ResetTimer()
startTime := time.Now()
for i := 0; i < t.N/txnCount; i++ {
- netB.Broadcast(context.Background(), protocol.AgreementVoteTag, dataBuffer, true, nil)
+ netB.Broadcast(context.Background(), protocol.TxnTag, dataBuffer, true, nil)
<-msgProcessed
}
deltaTime := time.Now().Sub(startTime)
diff --git a/network/wsPeer.go b/network/wsPeer.go
index c661629aa..f476cfa7e 100644
--- a/network/wsPeer.go
+++ b/network/wsPeer.go
@@ -76,10 +76,10 @@ var defaultSendMessageTags = map[protocol.Tag]bool{
protocol.ProposalPayloadTag: true,
protocol.TopicMsgRespTag: true,
protocol.MsgOfInterestTag: true,
+ protocol.TxnTag: true,
protocol.UniCatchupReqTag: true,
protocol.UniEnsBlockReqTag: true,
protocol.VoteBundleTag: true,
- protocol.Txn2Tag: true,
}
// interface allows substituting debug implementation for *websocket.Conn
@@ -96,10 +96,10 @@ type wsPeerWebsocketConn interface {
type sendMessage struct {
data []byte
- enqueued time.Time // the time at which the message was first generated
- peerEnqueued time.Time // the time at which the peer was attempting to enqueue the message
- msgTags map[protocol.Tag]bool // when msgTags is specified ( i.e. non-nil ), the send goroutine is to replace the message tag filter with this one. No data would be accompanied to this message.
- callback UnicastWebsocketMessageStateCallback // when non-nil, the callback function would be called after entry would be placed on the outgoing websocket queue
+ enqueued time.Time // the time at which the message was first generated
+ peerEnqueued time.Time // the time at which the peer was attempting to enqueue the message
+ msgTags map[protocol.Tag]bool // when msgTags is specified ( i.e. non-nil ), the send goroutine is to replace the message tag filter with this one. No data would be accompanied to this message.
+ hash crypto.Digest
ctx context.Context
}
@@ -124,7 +124,6 @@ const disconnectLeastPerformingPeer disconnectReason = "LeastPerformingPeer"
const disconnectCliqueResolve disconnectReason = "CliqueResolving"
const disconnectRequestReceived disconnectReason = "DisconnectRequest"
const disconnectStaleWrite disconnectReason = "DisconnectStaleWrite"
-const disconnectClientCallback disconnectReason = "ClientCallback"
// Response is the structure holding the response from the server
type Response struct {
@@ -175,7 +174,11 @@ type wsPeer struct {
processed chan struct{}
- latencyTracker latencyTracker
+ pingLock deadlock.Mutex
+ pingSent time.Time
+ pingData []byte
+ pingInFlight bool
+ lastPingRoundTripTime time.Duration
// Hint about position in wn.peers. Definitely valid if the peer
// is present in wn.peers.
@@ -221,9 +224,6 @@ type wsPeer struct {
// clientDataStoreMu synchronizes access to clientDataStore
clientDataStoreMu deadlock.Mutex
-
- // outgoingMessageCounters counts the number of messages send for each tag. It allows us to use implicit message counting.
- outgoingMessageCounters map[protocol.Tag]uint64
}
// HTTPPeer is what the opaque Peer might be.
@@ -233,22 +233,16 @@ type HTTPPeer interface {
GetHTTPClient() *http.Client
}
-// UnicastWebsocketMessageStateCallback provide asyncrounious feedback for the sequence number of a message
-// if the caller return an error, the network peer would disconnect
-type UnicastWebsocketMessageStateCallback func(enqueued bool, sequenceNumber uint64) error
-
// UnicastPeer is another possible interface for the opaque Peer.
// It is possible that we can only initiate a connection to a peer over websockets.
type UnicastPeer interface {
GetAddress() string
// Unicast sends the given bytes to this specific peer. Does not wait for message to be sent.
- Unicast(ctx context.Context, data []byte, tag protocol.Tag, callback UnicastWebsocketMessageStateCallback) error
+ Unicast(ctx context.Context, data []byte, tag protocol.Tag) error
// Version returns the matching version from network.SupportedProtocolVersions
Version() string
Request(ctx context.Context, tag Tag, topics Topics) (resp *Response, e error)
Respond(ctx context.Context, reqMsg IncomingMessage, topics Topics) (e error)
- IsOutgoing() bool
- GetConnectionLatency() time.Duration
}
// Create a wsPeerCore object
@@ -278,20 +272,9 @@ func (wp *wsPeer) Version() string {
return wp.version
}
-// IsOutgoing returns true if the connection is an outgoing connection or false if it the connection
-// is an incoming connection.
-func (wp *wsPeer) IsOutgoing() bool {
- return wp.outgoing
-}
-
-// GetConnectionLatency returns the connection latency between the local node and this peer.
-func (wp *wsPeer) GetConnectionLatency() time.Duration {
- return wp.latencyTracker.getConnectionLatency()
-}
-
// Unicast sends the given bytes to this specific peer. Does not wait for message to be sent.
// (Implements UnicastPeer)
-func (wp *wsPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag, callback UnicastWebsocketMessageStateCallback) error {
+func (wp *wsPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag) error {
var err error
tbytes := []byte(tag)
@@ -303,7 +286,7 @@ func (wp *wsPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag, cal
digest = crypto.Hash(mbytes)
}
- ok := wp.writeNonBlock(ctx, mbytes, false, digest, time.Now(), callback)
+ ok := wp.writeNonBlock(ctx, mbytes, false, digest, time.Now())
if !ok {
networkBroadcastsDropped.Inc(nil)
err = fmt.Errorf("wsPeer failed to unicast: %v", wp.GetAddress())
@@ -356,7 +339,6 @@ func (wp *wsPeer) init(config config.Local, sendBufferLength int) {
wp.responseChannels = make(map[uint64]chan *Response)
wp.sendMessageTag = defaultSendMessageTags
wp.clientDataStore = make(map[string]interface{})
- wp.outgoingMessageCounters = make(map[protocol.Tag]uint64)
// processed is a channel that messageHandlerThread writes to
// when it's done with one of our messages, so that we can queue
@@ -371,24 +353,6 @@ func (wp *wsPeer) init(config config.Local, sendBufferLength int) {
wp.outgoingMsgFilter = makeMessageFilter(config.OutgoingMessageFilterBucketCount, config.OutgoingMessageFilterBucketSize)
}
- // if we're on an older version, then add the old style transaction message to the send messages tag.
- // once we deprecate old style transaction sending, this part can go away.
- if wp.version != "3.0" {
- txSendMsgTags := make(map[protocol.Tag]bool)
- for tag := range wp.sendMessageTag {
- txSendMsgTags[tag] = true
- }
- txSendMsgTags[protocol.TxnTag] = true
- wp.sendMessageTag = txSendMsgTags
- }
-
- wp.latencyTracker.init(wp.conn, config, time.Duration(0))
- // send a ping right away.
- now := time.Now()
- if err := wp.latencyTracker.checkPingSending(&now); err != nil {
- wp.net.log.Infof("failed to send ping message to peer : %v", err)
- }
-
wp.wg.Add(2)
go wp.readLoop()
go wp.writeLoop()
@@ -421,7 +385,6 @@ func (wp *wsPeer) readLoop() {
}()
wp.conn.SetReadLimit(maxMessageLength)
slurper := MakeLimitedReaderSlurper(averageMessageLength, maxMessageLength)
- sequenceCounters := make(map[protocol.Tag]uint64)
for {
msg := IncomingMessage{}
mtype, reader, err := wp.conn.NextReader()
@@ -457,7 +420,7 @@ func (wp *wsPeer) readLoop() {
wp.reportReadErr(err)
return
}
- wp.latencyTracker.increaseReceivedCounter()
+
msg.processing = wp.processed
msg.Received = time.Now().UnixNano()
msg.Data = slurper.Bytes()
@@ -468,8 +431,6 @@ func (wp *wsPeer) readLoop() {
networkReceivedBytesByTag.Add(string(tag[:]), uint64(len(msg.Data)+2))
networkMessageReceivedByTag.Add(string(tag[:]), 1)
msg.Sender = wp
- msg.Sequence = sequenceCounters[msg.Tag]
- sequenceCounters[msg.Tag] = msg.Sequence + 1
// for outgoing connections, we want to notify the connection monitor that we've received
// a message. The connection monitor would update it's statistics accordingly.
@@ -623,12 +584,6 @@ func (wp *wsPeer) writeLoopSendMsg(msg sendMessage) disconnectReason {
if len(msg.data) > maxMessageLength {
wp.net.log.Errorf("trying to send a message longer than we would receive: %d > %d tag=%s", len(msg.data), maxMessageLength, string(msg.data[0:2]))
// just drop it, don't break the connection
- if msg.callback != nil {
- // let the callback know that the message was not sent.
- if nil != msg.callback(false, 0) {
- return disconnectClientCallback
- }
- }
return disconnectReasonNone
}
if msg.msgTags != nil {
@@ -641,12 +596,6 @@ func (wp *wsPeer) writeLoopSendMsg(msg sendMessage) disconnectReason {
tag := protocol.Tag(msg.data[:2])
if !wp.sendMessageTag[tag] {
// the peer isn't interested in this message.
- if msg.callback != nil {
- // let the callback know that the message was not sent.
- if nil != msg.callback(false, 0) {
- return disconnectClientCallback
- }
- }
return disconnectReasonNone
}
@@ -656,20 +605,9 @@ func (wp *wsPeer) writeLoopSendMsg(msg sendMessage) disconnectReason {
if msgWaitDuration > maxMessageQueueDuration {
wp.net.log.Warnf("peer stale enqueued message %dms", msgWaitDuration.Nanoseconds()/1000000)
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "stale message"})
- if msg.callback != nil {
- // let the callback know that the message was not sent.
- if nil != msg.callback(false, 0) {
- return disconnectClientCallback
- }
- }
return disconnectStaleWrite
}
- // is it time to send a ping message ?
- if err := wp.latencyTracker.checkPingSending(&now); err != nil {
- wp.net.log.Infof("failed to send ping message to peer : %v", err)
- }
-
atomic.StoreInt64(&wp.intermittentOutgoingMessageEnqueueTime, msg.enqueued.UnixNano())
defer atomic.StoreInt64(&wp.intermittentOutgoingMessageEnqueueTime, 0)
err := wp.conn.WriteMessage(websocket.BinaryMessage, msg.data)
@@ -678,12 +616,6 @@ func (wp *wsPeer) writeLoopSendMsg(msg sendMessage) disconnectReason {
wp.net.log.Warn("peer write error ", err)
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "write err"})
}
- if msg.callback != nil {
- // let the callback know that the message was not sent.
- if nil != msg.callback(false, 0) {
- return disconnectClientCallback
- }
- }
return disconnectWriteError
}
atomic.StoreInt64(&wp.lastPacketTime, time.Now().UnixNano())
@@ -692,16 +624,6 @@ func (wp *wsPeer) writeLoopSendMsg(msg sendMessage) disconnectReason {
networkMessageSentTotal.AddUint64(1, nil)
networkMessageSentByTag.Add(string(tag), 1)
networkMessageQueueMicrosTotal.AddUint64(uint64(time.Now().Sub(msg.peerEnqueued).Nanoseconds()/1000), nil)
-
- if msg.callback != nil {
- // for performance reasons, we count messages only for messages that request a callback. we might want to revisit this
- // in the future.
- seq := wp.outgoingMessageCounters[tag]
- if nil != msg.callback(true, seq) {
- return disconnectClientCallback
- }
- wp.outgoingMessageCounters[tag] = seq + 1
- }
return disconnectReasonNone
}
@@ -744,16 +666,16 @@ func (wp *wsPeer) writeLoopCleanup(reason disconnectReason) {
wp.wg.Done()
}
-func (wp *wsPeer) writeNonBlock(ctx context.Context, data []byte, highPrio bool, digest crypto.Digest, msgEnqueueTime time.Time, callback UnicastWebsocketMessageStateCallback) bool {
+func (wp *wsPeer) writeNonBlock(ctx context.Context, data []byte, highPrio bool, digest crypto.Digest, msgEnqueueTime time.Time) bool {
msgs := make([][]byte, 1, 1)
digests := make([]crypto.Digest, 1, 1)
msgs[0] = data
digests[0] = digest
- return wp.writeNonBlockMsgs(ctx, msgs, highPrio, digests, msgEnqueueTime, callback)
+ return wp.writeNonBlockMsgs(ctx, msgs, highPrio, digests, msgEnqueueTime)
}
// return true if enqueued/sent
-func (wp *wsPeer) writeNonBlockMsgs(ctx context.Context, data [][]byte, highPrio bool, digest []crypto.Digest, msgEnqueueTime time.Time, callback UnicastWebsocketMessageStateCallback) bool {
+func (wp *wsPeer) writeNonBlockMsgs(ctx context.Context, data [][]byte, highPrio bool, digest []crypto.Digest, msgEnqueueTime time.Time) bool {
includeIndices := make([]int, 0, len(data))
for i := range data {
if wp.outgoingMsgFilter != nil && len(data[i]) > messageFilterSize && wp.outgoingMsgFilter.CheckDigest(digest[i], false, false) {
@@ -775,7 +697,7 @@ func (wp *wsPeer) writeNonBlockMsgs(ctx context.Context, data [][]byte, highPrio
msgs := make([]sendMessage, 0, len(includeIndices))
enqueueTime := time.Now()
for _, index := range includeIndices {
- msgs = append(msgs, sendMessage{data: data[index], enqueued: msgEnqueueTime, peerEnqueued: enqueueTime, ctx: ctx, callback: callback})
+ msgs = append(msgs, sendMessage{data: data[index], enqueued: msgEnqueueTime, peerEnqueued: enqueueTime, hash: digest[index], ctx: ctx})
}
if highPrio {
@@ -791,6 +713,42 @@ func (wp *wsPeer) writeNonBlockMsgs(ctx context.Context, data [][]byte, highPrio
return false
}
+const pingLength = 8
+const maxPingWait = 60 * time.Second
+
+// sendPing sends a ping block to the peer.
+// return true if either a ping request was enqueued or there is already ping request in flight in the past maxPingWait time.
+func (wp *wsPeer) sendPing() bool {
+ wp.pingLock.Lock()
+ defer wp.pingLock.Unlock()
+ now := time.Now()
+ if wp.pingInFlight && (now.Sub(wp.pingSent) < maxPingWait) {
+ return true
+ }
+
+ tagBytes := []byte(protocol.PingTag)
+ mbytes := make([]byte, len(tagBytes)+pingLength)
+ copy(mbytes, tagBytes)
+ crypto.RandBytes(mbytes[len(tagBytes):])
+ wp.pingData = mbytes[len(tagBytes):]
+ sent := wp.writeNonBlock(context.Background(), mbytes, false, crypto.Digest{}, time.Now())
+
+ if sent {
+ wp.pingInFlight = true
+ wp.pingSent = now
+ }
+ return sent
+}
+
+// get some times out of the peer while observing the ping data lock
+func (wp *wsPeer) pingTimes() (lastPingSent time.Time, lastPingRoundTripTime time.Duration) {
+ wp.pingLock.Lock()
+ defer wp.pingLock.Unlock()
+ lastPingSent = wp.pingSent
+ lastPingRoundTripTime = wp.lastPingRoundTripTime
+ return
+}
+
// called when the connection had an error or closed remotely
func (wp *wsPeer) internalClose(reason disconnectReason) {
if atomic.CompareAndSwapInt32(&wp.didSignalClose, 0, 1) {
diff --git a/node/assemble_test.go b/node/assemble_test.go
index 937b039db..5f7dae7d5 100644
--- a/node/assemble_test.go
+++ b/node/assemble_test.go
@@ -30,7 +30,6 @@ import (
"github.com/algorand/go-algorand/data"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/pooldata"
"github.com/algorand/go-algorand/data/pools"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
@@ -71,7 +70,6 @@ func BenchmarkAssembleBlock(b *testing.B) {
Status: basics.Online,
MicroAlgos: basics.MicroAlgos{Raw: 10000000000000},
}
- //b.Log(addr)
}
genesis[poolAddr] = basics.AccountData{
@@ -133,8 +131,7 @@ func BenchmarkAssembleBlock(b *testing.B) {
if okcount == 0 {
worstTxID = signedTx.ID()
}
-
- err := tp.Remember(pooldata.SignedTxGroup{Transactions: []transactions.SignedTxn{signedTx}})
+ err := tp.Remember([]transactions.SignedTxn{signedTx})
if err != nil {
errcount++
b.Logf("(%d/%d) could not send [%d] %s -> [%d] %s: %s", errcount, okcount, sourcei, addresses[sourcei], desti, addresses[desti], err)
diff --git a/node/impls.go b/node/impls.go
index 67f9963a0..d7ced370b 100644
--- a/node/impls.go
+++ b/node/impls.go
@@ -114,8 +114,8 @@ func (l agreementLedger) EnsureDigest(cert agreement.Certificate, verifier *agre
}
// Wrapping error with a LedgerDroppedRoundError when an old round is requested but the ledger has already dropped the entry
-func (l agreementLedger) Lookup(rnd basics.Round, addr basics.Address) (basics.AccountData, error) {
- record, err := l.Ledger.Lookup(rnd, addr)
+func (l agreementLedger) LookupAgreement(rnd basics.Round, addr basics.Address) (basics.OnlineAccountData, error) {
+ record, err := l.Ledger.LookupAgreement(rnd, addr)
var e *ledger.RoundOffsetError
if errors.As(err, &e) {
err = &agreement.LedgerDroppedRoundError{
diff --git a/node/netprio.go b/node/netprio.go
index c65db60d4..d3a4e99b6 100644
--- a/node/netprio.go
+++ b/node/netprio.go
@@ -80,12 +80,12 @@ func (node *AlgorandFullNode) MakePrioResponse(challenge string) []byte {
voteRound := latest + 2
for _, part := range node.accountManager.Keys(voteRound) {
parent := part.Address()
- data, err := node.ledger.Lookup(latest, parent)
+ data, err := node.ledger.LookupAgreement(latest, parent)
if err != nil {
continue
}
- weight := data.MicroAlgos.ToUint64()
+ weight := data.MicroAlgosWithRewards.ToUint64()
if weight > maxWeight {
maxPart = part
maxWeight = weight
@@ -125,7 +125,7 @@ func (node *AlgorandFullNode) VerifyPrioResponse(challenge string, response []by
return
}
- data, err := node.ledger.Lookup(balanceRound, rs.Sender)
+ data, err := node.ledger.LookupAgreement(balanceRound, rs.Sender)
if err != nil {
return
}
@@ -143,10 +143,10 @@ func (node *AlgorandFullNode) VerifyPrioResponse(challenge string, response []by
// GetPrioWeight implements the network.NetPrioScheme interface
func (node *AlgorandFullNode) GetPrioWeight(addr basics.Address) uint64 {
latest := node.ledger.LastRound()
- data, err := node.ledger.Lookup(latest, addr)
+ data, err := node.ledger.LookupAgreement(latest, addr)
if err != nil {
return 0
}
- return data.MicroAlgos.ToUint64()
+ return data.MicroAlgosWithRewards.ToUint64()
}
diff --git a/node/node.go b/node/node.go
index 62256b348..e5a2f7e6a 100644
--- a/node/node.go
+++ b/node/node.go
@@ -24,6 +24,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "strings"
"sync"
"time"
@@ -38,7 +39,6 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/committee"
- "github.com/algorand/go-algorand/data/pooldata"
"github.com/algorand/go-algorand/data/pools"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/verify"
@@ -50,12 +50,12 @@ import (
"github.com/algorand/go-algorand/node/indexer"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/rpcs"
- "github.com/algorand/go-algorand/txnsync"
"github.com/algorand/go-algorand/util/db"
"github.com/algorand/go-algorand/util/execpool"
"github.com/algorand/go-algorand/util/metrics"
"github.com/algorand/go-algorand/util/timers"
"github.com/algorand/go-deadlock"
+ uuid "github.com/satori/go.uuid"
)
// StatusReport represents the current basic status of the node
@@ -108,7 +108,6 @@ type AlgorandFullNode struct {
blockService *rpcs.BlockService
ledgerService *rpcs.LedgerService
txPoolSyncerService *rpcs.TxSyncer
- txnSyncService *txnsync.Service
indexer *indexer.Indexer
@@ -136,8 +135,6 @@ type AlgorandFullNode struct {
tracer messagetracer.MessageTracer
compactCert *compactcert.Worker
-
- txnSyncConnector *transactionSyncNodeConnector
}
// TxnWithStatus represents information about a single transaction,
@@ -183,7 +180,6 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
}
p2pNode.SetPrioScheme(node)
node.net = p2pNode
- node.accountManager = data.MakeAccountManager(log)
accountListener := makeTopAccountListener(log)
@@ -215,6 +211,15 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
node.transactionPool = pools.MakeTransactionPool(node.ledger.Ledger, cfg, node.log)
+ blockListeners := []ledger.BlockListener{
+ node.transactionPool,
+ node,
+ }
+
+ if node.config.EnableTopAccountsReporting {
+ blockListeners = append(blockListeners, &accountListener)
+ }
+ node.ledger.RegisterBlockListeners(blockListeners)
node.txHandler = data.MakeTxHandler(node.transactionPool, node.ledger, node.net, node.genesisID, node.genesisHash, node.lowPriorityCryptoVerificationPool)
// Indexer setup
@@ -263,8 +268,13 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
node.catchupBlockAuth = blockAuthenticatorImpl{Ledger: node.ledger, AsyncVoteVerifier: agreement.MakeAsyncVoteVerifier(node.lowPriorityCryptoVerificationPool)}
node.catchupService = catchup.MakeService(node.log, node.config, p2pNode, node.ledger, node.catchupBlockAuth, agreementLedger.UnmatchedPendingCertificates, node.lowPriorityCryptoVerificationPool)
node.txPoolSyncerService = rpcs.MakeTxSyncer(node.transactionPool, node.net, node.txHandler.SolicitedTxHandler(), time.Duration(cfg.TxSyncIntervalSeconds)*time.Second, time.Duration(cfg.TxSyncTimeoutSeconds)*time.Second, cfg.TxSyncServeResponseSize)
- node.txnSyncConnector = makeTransactionSyncNodeConnector(node)
- node.txnSyncService = txnsync.MakeTransactionSyncService(node.log, node.txnSyncConnector, cfg.NetAddress != "", node.genesisID, node.genesisHash, node.config, node.lowPriorityCryptoVerificationPool)
+
+ registry, err := ensureParticipationDB(genesisDir, node.log)
+ if err != nil {
+ log.Errorf("unable to initialize the participation registry database: %v", err)
+ return nil, err
+ }
+ node.accountManager = data.MakeAccountManager(log, registry)
err = node.loadParticipationKeys()
if err != nil {
@@ -298,17 +308,6 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
}
node.compactCert = compactcert.NewWorker(compactCertAccess, node.log, node.accountManager, node.ledger.Ledger, node.net, node)
- blockListeners := []ledger.BlockListener{
- node.txnSyncConnector,
- node.transactionPool,
- node,
- }
-
- if node.config.EnableTopAccountsReporting {
- blockListeners = append(blockListeners, &accountListener)
- }
- node.ledger.RegisterBlockListeners(blockListeners)
-
return node, err
}
@@ -379,12 +378,9 @@ func (node *AlgorandFullNode) Start() {
node.txPoolSyncerService.Start(node.catchupService.InitialSyncDone)
node.blockService.Start()
node.ledgerService.Start()
+ node.txHandler.Start()
node.compactCert.Start()
- node.txnSyncService.Start()
- node.txnSyncConnector.start()
-
startNetwork()
-
// start indexer
if idx, err := node.Indexer(); err == nil {
err := idx.Start()
@@ -407,6 +403,7 @@ func (node *AlgorandFullNode) Start() {
func (node *AlgorandFullNode) startMonitoringRoutines() {
node.monitoringRoutinesWaitGroup.Add(3)
+ // PKI TODO: Remove this with #2596
// Periodically check for new participation keys
go node.checkForParticipationKeys()
@@ -451,8 +448,7 @@ func (node *AlgorandFullNode) Stop() {
if node.catchpointCatchupService != nil {
node.catchpointCatchupService.Stop()
} else {
- node.txnSyncService.Stop()
- node.txnSyncConnector.stop()
+ node.txHandler.Stop()
node.agreementService.Shutdown()
node.catchupService.Stop()
node.txPoolSyncerService.Stop()
@@ -487,7 +483,7 @@ func (node *AlgorandFullNode) Ledger() *data.Ledger {
// writeDevmodeBlock generates a new block for a devmode, and write it to the ledger.
func (node *AlgorandFullNode) writeDevmodeBlock() (err error) {
- var vb *ledger.ValidatedBlock
+ var vb *ledgercore.ValidatedBlock
vb, err = node.transactionPool.AssembleDevModeBlock()
if err != nil || vb == nil {
return
@@ -528,7 +524,7 @@ func (node *AlgorandFullNode) BroadcastSignedTxGroup(txgroup []transactions.Sign
return err
}
- err = node.transactionPool.Remember(pooldata.SignedTxGroup{Transactions: txgroup, LocallyOriginated: true})
+ err = node.transactionPool.Remember(txgroup)
if err != nil {
node.log.Infof("rejected by local pool: %v - transaction group was %+v", err, txgroup)
return err
@@ -539,8 +535,6 @@ func (node *AlgorandFullNode) BroadcastSignedTxGroup(txgroup []transactions.Sign
logging.Base().Infof("unable to pin transaction: %v", err)
}
- node.txnSyncConnector.onNewTransactionPoolEntry(node.transactionPool.PendingCount())
-
var enc []byte
var txids []transactions.Txid
for _, tx := range txgroup {
@@ -755,12 +749,17 @@ func (node *AlgorandFullNode) SuggestedFee() basics.MicroAlgos {
// GetPendingTxnsFromPool returns a snapshot of every pending transactions from the node's transaction pool in a slice.
// Transactions are sorted in decreasing order. If no transactions, returns an empty slice.
func (node *AlgorandFullNode) GetPendingTxnsFromPool() ([]transactions.SignedTxn, error) {
- poolGroups, _ := node.transactionPool.PendingTxGroups()
- txnGroups := make([][]transactions.SignedTxn, len(poolGroups))
- for i := range txnGroups {
- txnGroups[i] = poolGroups[i].Transactions
+ return bookkeeping.SignedTxnGroupsFlatten(node.transactionPool.PendingTxGroups()), nil
+}
+
+// ensureParticipationDB opens or creates a participation DB.
+func ensureParticipationDB(genesisDir string, log logging.Logger) (account.ParticipationRegistry, error) {
+ accessorFile := filepath.Join(genesisDir, config.ParticipationRegistryFilename)
+ accessor, err := db.OpenPair(accessorFile, false)
+ if err != nil {
+ return nil, err
}
- return bookkeeping.SignedTxnGroupsFlatten(txnGroups), nil
+ return account.MakeParticipationRegistry(accessor, log)
}
// Reload participation keys from disk periodically
@@ -781,6 +780,149 @@ func (node *AlgorandFullNode) checkForParticipationKeys() {
}
}
+// ListParticipationKeys returns all participation keys currently installed on the node
+func (node *AlgorandFullNode) ListParticipationKeys() (partKeys []account.ParticipationRecord, err error) {
+ return node.accountManager.Registry().GetAll(), nil
+}
+
+// GetParticipationKey retries the information of a participation id from the node
+func (node *AlgorandFullNode) GetParticipationKey(partKey account.ParticipationID) (account.ParticipationRecord, error) {
+ rval := node.accountManager.Registry().Get(partKey)
+
+ if rval.IsZero() {
+ return account.ParticipationRecord{}, account.ErrParticipationIDNotFound
+ }
+
+ return node.accountManager.Registry().Get(partKey), nil
+}
+
+// RemoveParticipationKey given a participation id, remove the records from the node
+func (node *AlgorandFullNode) RemoveParticipationKey(partKey account.ParticipationID) error {
+
+ // Need to remove the file and then remove the entry in the registry
+ // Let's first get the recorded information from the registry so we can lookup the file
+
+ partRecord := node.accountManager.Registry().Get(partKey)
+
+ if partRecord.IsZero() {
+ return account.ErrParticipationIDNotFound
+ }
+
+ genID := node.GenesisID()
+
+ outDir := filepath.Join(node.rootDir, genID)
+
+ filename := config.PartKeyFilename(partRecord.ParticipationID.String(), uint64(partRecord.FirstValid), uint64(partRecord.LastValid))
+ fullyQualifiedFilename := filepath.Join(outDir, filepath.Base(filename))
+
+ err := node.accountManager.Registry().Delete(partKey)
+ if err != nil {
+ return err
+ }
+
+ // PKI TODO: pick a better timeout, this is just something short. This could also be removed if we change
+ // POST /v2/participation and DELETE /v2/participation to return "202 OK Accepted" instead of waiting and getting
+ // the error message.
+ err = node.accountManager.Registry().Flush(500 * time.Millisecond)
+ if err != nil {
+ return err
+ }
+
+ // Only after deleting and flushing do we want to remove the file
+ _ = os.Remove(fullyQualifiedFilename)
+
+ return nil
+}
+
+func createTemporaryParticipationKey(outDir string, partKeyBinary []byte) (string, error) {
+ var sb strings.Builder
+
+ // Create a temporary filename with a UUID so that we can call this function twice
+ // in a row without worrying about collisions
+ sb.WriteString("tempPartKeyBinary.")
+ sb.WriteString(uuid.NewV4().String())
+ sb.WriteString(".bin")
+
+ tempFile := filepath.Join(outDir, filepath.Base(sb.String()))
+
+ file, err := os.Create(tempFile)
+
+ if err != nil {
+ return "", err
+ }
+
+ _, err = file.Write(partKeyBinary)
+
+ file.Close()
+
+ if err != nil {
+ os.Remove(tempFile)
+ return "", err
+ }
+
+ return tempFile, nil
+}
+
+// InstallParticipationKey Given a participation key binary stream install the participation key.
+func (node *AlgorandFullNode) InstallParticipationKey(partKeyBinary []byte) (account.ParticipationID, error) {
+ genID := node.GenesisID()
+
+ outDir := filepath.Join(node.rootDir, genID)
+
+ fullyQualifiedTempFile, err := createTemporaryParticipationKey(outDir, partKeyBinary)
+ // We need to make sure no tempfile is created/remains if there is an error
+ // However, we will eventually rename this file but if we fail in-between
+ // this point and the rename we want to ensure that we remove the temporary file
+ // After we rename, this will fail anyway since the file will not exist
+
+ // Explicitly ignore the error with a closure
+ defer func(name string) {
+ _ = os.Remove(name)
+ }(fullyQualifiedTempFile)
+
+ if err != nil {
+ return account.ParticipationID{}, err
+ }
+
+ inputdb, err := db.MakeErasableAccessor(fullyQualifiedTempFile)
+ if err != nil {
+ return account.ParticipationID{}, err
+ }
+ defer inputdb.Close()
+
+ partkey, err := account.RestoreParticipation(inputdb)
+ if err != nil {
+ return account.ParticipationID{}, err
+ }
+ defer partkey.Close()
+
+ if partkey.Parent == (basics.Address{}) {
+ return account.ParticipationID{}, fmt.Errorf("cannot install partkey with missing (zero) parent address")
+ }
+
+ // Tell the AccountManager about the Participation (dupes don't matter) so we ignore the return value
+ _ = node.accountManager.AddParticipation(partkey)
+
+ // PKI TODO: pick a better timeout, this is just something short. This could also be removed if we change
+ // POST /v2/participation and DELETE /v2/participation to return "202 OK Accepted" instead of waiting and getting
+ // the error message.
+ err = node.accountManager.Registry().Flush(500 * time.Millisecond)
+ if err != nil {
+ return account.ParticipationID{}, err
+ }
+
+ newFilename := config.PartKeyFilename(partkey.ID().String(), uint64(partkey.FirstValid), uint64(partkey.LastValid))
+ newFullyQualifiedFilename := filepath.Join(outDir, filepath.Base(newFilename))
+
+ err = os.Rename(fullyQualifiedTempFile, newFullyQualifiedFilename)
+
+ if err != nil {
+ return account.ParticipationID{}, nil
+ }
+
+ return partkey.ID(), nil
+}
+
func (node *AlgorandFullNode) loadParticipationKeys() error {
// Generate a list of all potential participation key files
genesisDir := filepath.Join(node.rootDir, node.genesisID)
@@ -802,7 +944,7 @@ func (node *AlgorandFullNode) loadParticipationKeys() error {
if err != nil {
if db.IsErrBusy(err) {
// this is a special case:
- // we might get "database is locked" when we attempt to access a database that is conurrently updates it's participation keys.
+ // we might get "database is locked" when we attempt to access a database that is concurrently updating its participation keys.
// that database is clearly already on the account manager, and doesn't need to be processed through this logic, and therefore
// we can safely ignore that fail case.
continue
@@ -862,17 +1004,10 @@ func (node *AlgorandFullNode) IsArchival() bool {
}
// OnNewBlock implements the BlockListener interface so we're notified after each block is written to the ledger
-// The method is being called *after* the transaction pool received it's OnNewBlock call.
func (node *AlgorandFullNode) OnNewBlock(block bookkeeping.Block, delta ledgercore.StateDelta) {
- blkRound := block.Round()
- if node.ledger.Latest() > blkRound {
+ if node.ledger.Latest() > block.Round() {
return
}
-
- // the transaction pool already updated its transactions (dumping out old and invalid transactions). At this point,
- // we need to let the txnsync know about the size of the transaction pool.
- node.txnSyncConnector.onNewTransactionPoolEntry(node.transactionPool.PendingCount())
-
node.syncStatusMu.Lock()
node.lastRoundTimestamp = time.Now()
node.hasSyncedSinceStartup = true
@@ -941,6 +1076,10 @@ func (node *AlgorandFullNode) oldKeyDeletionThread() {
node.mu.Lock()
node.accountManager.DeleteOldKeys(latestHdr, ccSigs, agreementProto)
node.mu.Unlock()
+
+ // PKI TODO: Maybe we don't even need to flush the registry.
+ // Persist participation registry metrics.
+ node.accountManager.FlushRegistry(2 * time.Second)
}
}
@@ -1040,8 +1179,7 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo
node.waitMonitoringRoutines()
}()
node.net.ClearHandlers()
- node.txnSyncConnector.stop()
- node.txnSyncService.Stop()
+ node.txHandler.Stop()
node.agreementService.Shutdown()
node.catchupService.Stop()
node.txPoolSyncerService.Stop()
@@ -1065,8 +1203,7 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo
node.txPoolSyncerService.Start(node.catchupService.InitialSyncDone)
node.blockService.Start()
node.ledgerService.Start()
- node.txnSyncService.Start()
- node.txnSyncConnector.start()
+ node.txHandler.Start()
// start indexer
if idx, err := node.Indexer(); err == nil {
@@ -1097,7 +1234,7 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo
// validatedBlock satisfies agreement.ValidatedBlock
type validatedBlock struct {
- vb *ledger.ValidatedBlock
+ vb *ledgercore.ValidatedBlock
}
// WithSeed satisfies the agreement.ValidatedBlock interface.
@@ -1113,7 +1250,8 @@ func (vb validatedBlock) Block() bookkeeping.Block {
}
// AssembleBlock implements Ledger.AssembleBlock.
-func (node *AlgorandFullNode) AssembleBlock(round basics.Round, deadline time.Time) (agreement.ValidatedBlock, error) {
+func (node *AlgorandFullNode) AssembleBlock(round basics.Round) (agreement.ValidatedBlock, error) {
+ deadline := time.Now().Add(node.config.ProposalAssemblyTime)
lvb, err := node.transactionPool.AssembleBlock(round, deadline)
if err != nil {
if errors.Is(err, pools.ErrStaleBlockAssemblyRequest) {
@@ -1142,7 +1280,7 @@ func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []
keys := node.accountManager.Keys(votingRound)
participations := make([]account.Participation, 0, len(keys))
- accountsData := make(map[basics.Address]basics.AccountData, len(keys))
+ accountsData := make(map[basics.Address]basics.OnlineAccountData, len(keys))
matchingAccountsKeys := make(map[basics.Address]bool)
mismatchingAccountsKeys := make(map[basics.Address]int)
const bitMismatchingVotingKey = 1
@@ -1151,7 +1289,7 @@ func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []
acctData, hasAccountData := accountsData[part.Parent]
if !hasAccountData {
var err error
- acctData, _, err = node.ledger.LookupWithoutRewards(keysRound, part.Parent)
+ acctData, err = node.ledger.LookupAgreement(keysRound, part.Parent)
if err != nil {
node.log.Warnf("node.VotingKeys: Account %v not participating: cannot locate account for round %d : %v", part.Address(), keysRound, err)
continue
@@ -1169,6 +1307,12 @@ func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []
}
participations = append(participations, part)
matchingAccountsKeys[part.Address()] = true
+
+ // Make sure the key is registered.
+ err := node.accountManager.Registry().Register(part.ID(), votingRound)
+ if err != nil {
+ node.log.Warnf("Failed to register participation key (%s) with participation registry: %v\n", part.ID(), err)
+ }
}
// write the warnings per account only if we couldn't find a single valid key for that account.
for mismatchingAddr, warningFlags := range mismatchingAccountsKeys {
@@ -1186,3 +1330,8 @@ func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []
}
return participations
}
+
+// Record forwards participation record calls to the participation registry.
+func (node *AlgorandFullNode) Record(account basics.Address, round basics.Round, participationType account.ParticipationAction) {
+ node.accountManager.Record(account, round, participationType)
+}
diff --git a/node/node_test.go b/node/node_test.go
index 411bb3360..bfd33a8f0 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -129,10 +129,10 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP
panic(err)
}
part, err := account.FillDBWithParticipationKeys(access, root.Address(), firstRound, lastRound, config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution)
- access.Close()
if err != nil {
panic(err)
}
+ access.Close()
data := basics.AccountData{
Status: basics.Online,
@@ -507,3 +507,49 @@ func TestMismatchingGenesisDirectoryPermissions(t *testing.T) {
require.NoError(t, os.Chmod(testDirectroy, 1700))
require.NoError(t, os.RemoveAll(testDirectroy))
}
+
+func TestAsyncRecord(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ testDirectroy, err := ioutil.TempDir(os.TempDir(), t.Name())
+ require.NoError(t, err)
+
+ genesis := bookkeeping.Genesis{
+ SchemaID: "go-test-node-record-async",
+ Proto: protocol.ConsensusCurrentVersion,
+ Network: config.Devtestnet,
+ FeeSink: sinkAddr.String(),
+ RewardsPool: poolAddr.String(),
+ }
+
+ cfg := config.GetDefaultLocal()
+ cfg.DisableNetworking = true
+ node, err := MakeFull(logging.TestingLog(t), testDirectroy, config.GetDefaultLocal(), []string{}, genesis)
+ require.NoError(t, err)
+ node.Start()
+ defer node.Stop()
+
+ var addr basics.Address
+ addr[0] = 1
+
+ p := account.Participation{
+ Parent: addr,
+ FirstValid: 0,
+ LastValid: 1000000,
+ Voting: &crypto.OneTimeSignatureSecrets{},
+ VRF: &crypto.VRFSecrets{},
+ }
+ id, err := node.accountManager.Registry().Insert(p)
+ require.NoError(t, err)
+ err = node.accountManager.Registry().Register(id, 0)
+ require.NoError(t, err)
+
+ node.Record(addr, 10000, account.Vote)
+ node.Record(addr, 20000, account.BlockProposal)
+
+ time.Sleep(5000 * time.Millisecond)
+ records := node.accountManager.Registry().GetAll()
+ require.Len(t, records, 1)
+ require.Equal(t, 10000, int(records[0].LastVote))
+ require.Equal(t, 20000, int(records[0].LastBlockProposal))
+}
diff --git a/node/txnSyncConn.go b/node/txnSyncConn.go
deleted file mode 100644
index 00f5607ea..000000000
--- a/node/txnSyncConn.go
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-// Package node is the Algorand node itself, with functions exposed to the frontend
-package node
-
-import (
- "context"
- "time"
-
- "github.com/algorand/go-algorand/data"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/ledger/ledgercore"
- "github.com/algorand/go-algorand/network"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/txnsync"
- "github.com/algorand/go-algorand/util/timers"
-)
-
-// txnsyncPeerDataKey is the key name by which we're going to store the
-// transaction sync internal data object inside the network peer.
-const txnsyncPeerDataKey = "txsync"
-
-// transactionSyncNodeConnector implements the txnsync.NodeConnector interface, allowing the
-// transaction sync communicate with the node and it's child objects.
-type transactionSyncNodeConnector struct {
- node *AlgorandFullNode
- eventsCh chan txnsync.Event
- clock timers.WallClock
- messageHandler txnsync.IncomingMessageHandler
- txHandler data.SolicitedAsyncTxHandler
- openStateCh chan struct{}
-}
-
-func makeTransactionSyncNodeConnector(node *AlgorandFullNode) *transactionSyncNodeConnector {
- return &transactionSyncNodeConnector{
- node: node,
- eventsCh: make(chan txnsync.Event, 1),
- clock: timers.MakeMonotonicClock(time.Now()),
- txHandler: node.txHandler.SolicitedAsyncTxHandler(),
- openStateCh: make(chan struct{}),
- }
-}
-
-func (tsnc *transactionSyncNodeConnector) Events() <-chan txnsync.Event {
- return tsnc.eventsCh
-}
-
-// GetCurrentRoundSettings is called when the txsync is starting up, proving
-// round information.
-func (tsnc *transactionSyncNodeConnector) GetCurrentRoundSettings() txnsync.RoundSettings {
- round := tsnc.node.ledger.Latest()
- return txnsync.RoundSettings{
- Round: round,
- FetchTransactions: tsnc.node.config.ForceFetchTransactions || tsnc.node.accountManager.HasLiveKeys(round, round),
- }
-}
-
-// NotifyMonitor is used for testing purposes only, and can remain(almost) empty on production code.
-func (tsnc *transactionSyncNodeConnector) NotifyMonitor() chan struct{} {
- return tsnc.openStateCh
-}
-
-func (tsnc *transactionSyncNodeConnector) Random(upperBound uint64) uint64 {
- return tsnc.node.Uint64() % upperBound
-}
-
-func (tsnc *transactionSyncNodeConnector) Clock() timers.WallClock {
- return tsnc.clock
-}
-
-func (tsnc *transactionSyncNodeConnector) GetPeer(networkPeer interface{}) txnsync.PeerInfo {
- unicastPeer := networkPeer.(network.UnicastPeer)
- if unicastPeer == nil {
- return txnsync.PeerInfo{}
- }
-
- peerData := tsnc.node.net.GetPeerData(networkPeer, txnsyncPeerDataKey)
- if peerData == nil {
- return txnsync.PeerInfo{
- IsOutgoing: unicastPeer.IsOutgoing(),
- NetworkPeer: unicastPeer,
- }
- }
- return txnsync.PeerInfo{
- IsOutgoing: unicastPeer.IsOutgoing(),
- NetworkPeer: unicastPeer,
- TxnSyncPeer: peerData.(*txnsync.Peer),
- }
-}
-
-func (tsnc *transactionSyncNodeConnector) GetPeers() (peersInfo []txnsync.PeerInfo) {
- networkPeers := tsnc.node.net.GetPeers(network.PeersConnectedOut, network.PeersConnectedIn)
- peersInfo = make([]txnsync.PeerInfo, len(networkPeers))
- k := 0
- for i := range networkPeers {
- unicastPeer := networkPeers[i].(network.UnicastPeer)
- if unicastPeer == nil {
- continue
- }
- // check version.
- if unicastPeer.Version() != "3.0" {
- continue
- }
- peersInfo[k].IsOutgoing = unicastPeer.IsOutgoing()
- peersInfo[k].NetworkPeer = networkPeers[i]
- peerData := tsnc.node.net.GetPeerData(networkPeers[i], txnsyncPeerDataKey)
- if peerData != nil {
- peersInfo[k].TxnSyncPeer = peerData.(*txnsync.Peer)
- }
- k++
- }
-
- return peersInfo[:k]
-}
-
-func (tsnc *transactionSyncNodeConnector) UpdatePeers(txnsyncPeers []*txnsync.Peer, netPeers []interface{}, averageDataExchangeRate uint64) {
- for i, netPeer := range netPeers {
- tsnc.node.net.SetPeerData(netPeer, txnsyncPeerDataKey, txnsyncPeers[i])
- }
- // The average peers data exchange rate has been updated.
- if averageDataExchangeRate > 0 {
- // update the transaction pool with the latest peers data exchange rate.
- tsnc.node.transactionPool.SetDataExchangeRate(averageDataExchangeRate)
- }
-}
-
-func (tsnc *transactionSyncNodeConnector) SendPeerMessage(netPeer interface{}, msg []byte, callback txnsync.SendMessageCallback) {
- unicastPeer := netPeer.(network.UnicastPeer)
- if unicastPeer == nil {
- return
- }
-
- // this might return an error to the network package callback routine. Returning an error signal the network package
- // that we want to disconnect from this peer. This aligns with the transaction sync txnsync.SendMessageCallback function
- // behaviour.
- if err := unicastPeer.Unicast(context.Background(), msg, protocol.Txn2Tag, network.UnicastWebsocketMessageStateCallback(callback)); err != nil {
- if callbackErr := callback(false, 0); callbackErr != nil {
- // disconnect from peer - the transaction sync wasn't able to process message sending confirmation
- tsnc.node.net.Disconnect(unicastPeer)
- }
- }
-}
-
-func (tsnc *transactionSyncNodeConnector) GetPeerLatency(netPeer interface{}) time.Duration {
- unicastPeer := netPeer.(network.UnicastPeer)
- return unicastPeer.GetConnectionLatency()
-}
-
-// GetPendingTransactionGroups is called by the transaction sync when it needs to look into the transaction
-// pool and get the updated set of pending transactions. The second returned argument is the latest locally originated
-// group counter within the given transaction groups list. If there is no group that is locally originated, the expected
-// value is InvalidSignedTxGroupCounter.
-func (tsnc *transactionSyncNodeConnector) GetPendingTransactionGroups() ([]pooldata.SignedTxGroup, uint64) {
- return tsnc.node.transactionPool.PendingTxGroups()
-}
-
-func (tsnc *transactionSyncNodeConnector) onNewTransactionPoolEntry(transactionPoolSize int) {
- select {
- case tsnc.eventsCh <- txnsync.MakeTransactionPoolChangeEvent(transactionPoolSize, false):
- default:
- }
-}
-
-// OnNewBlock receives a notification that we've moved to a new round from the ledger.
-// This notification would be received before the transaction pool get a similar notification, due
-// the ordering of the block notifier registration.
-func (tsnc *transactionSyncNodeConnector) OnNewBlock(block bookkeeping.Block, delta ledgercore.StateDelta) {
- blkRound := block.Round()
-
- fetchTransactions := tsnc.node.config.ForceFetchTransactions || tsnc.node.accountManager.HasLiveKeys(blkRound, blkRound)
- // if this is a relay, then we always want to fetch transactions, regardless if we have participation keys.
- if tsnc.node.config.NetAddress != "" {
- fetchTransactions = true
- }
-
- select {
- case tsnc.eventsCh <- txnsync.MakeNewRoundEvent(blkRound, fetchTransactions):
- default:
- }
-
-}
-
-func (tsnc *transactionSyncNodeConnector) start() {
- tsnc.txHandler.Start()
- tsnc.messageHandler = tsnc.node.txnSyncService.GetIncomingMessageHandler()
- handlers := []network.TaggedMessageHandler{
- {Tag: protocol.Txn2Tag, MessageHandler: tsnc},
- }
- tsnc.node.net.RegisterHandlers(handlers)
-}
-
-func (tsnc *transactionSyncNodeConnector) Handle(raw network.IncomingMessage) network.OutgoingMessage {
- unicastPeer := raw.Sender.(network.UnicastPeer)
- if unicastPeer != nil {
- // check version.
- if unicastPeer.Version() != "3.0" {
- return network.OutgoingMessage{
- Action: network.Ignore,
- }
- }
- }
- var peer *txnsync.Peer
- peerData := tsnc.node.net.GetPeerData(raw.Sender, txnsyncPeerDataKey)
- if peerData != nil {
- peer = peerData.(*txnsync.Peer)
- }
-
- err := tsnc.messageHandler(raw.Sender, peer, raw.Data, raw.Sequence, raw.Received)
- if err != nil {
- return network.OutgoingMessage{
- Action: network.Disconnect,
- }
- }
- return network.OutgoingMessage{
- Action: network.Ignore,
- }
-}
-
-func (tsnc *transactionSyncNodeConnector) stop() {
- tsnc.txHandler.Stop()
-}
-
-func (tsnc *transactionSyncNodeConnector) IncomingTransactionGroups(peer *txnsync.Peer, messageSeq uint64, txGroups []pooldata.SignedTxGroup) (transactionPoolSize int) {
- if tsnc.txHandler.HandleTransactionGroups(peer.GetNetworkPeer(), peer.GetTransactionPoolAckChannel(), messageSeq, txGroups) {
- transactionPoolSize = tsnc.node.transactionPool.PendingCount()
- } else {
- transactionPoolSize = -1
- }
- return
-}
diff --git a/protocol/hash.go b/protocol/hash.go
index 846b03c27..e17114679 100644
--- a/protocol/hash.go
+++ b/protocol/hash.go
@@ -49,6 +49,7 @@ const (
Program HashID = "Program"
ProgramData HashID = "ProgData"
ProposerSeed HashID = "PS"
+ ParticipationKeys HashID = "PK"
Seed HashID = "SD"
SpecialAddr HashID = "SpecialAddr"
SignedTxnInBlock HashID = "STIB"
diff --git a/protocol/tags.go b/protocol/tags.go
index 070460c1a..0cab25628 100644
--- a/protocol/tags.go
+++ b/protocol/tags.go
@@ -41,5 +41,4 @@ const (
//UniEnsBlockResTag Tag = "US" was used for wsfetcherservice
//UniCatchupResTag Tag = "UT" was used for wsfetcherservice
VoteBundleTag Tag = "VB"
- Txn2Tag Tag = "tx"
)
diff --git a/protocol/txntype.go b/protocol/txntype.go
index 8f6129139..919c576d4 100644
--- a/protocol/txntype.go
+++ b/protocol/txntype.go
@@ -47,15 +47,3 @@ const (
// UnknownTx signals an error
UnknownTx TxType = "unknown"
)
-
-// TxnTypes is an array containing all the defined transaction types
-var TxnTypes = []TxType{
- PaymentTx,
- KeyRegistrationTx,
- AssetConfigTx,
- AssetTransferTx,
- AssetFreezeTx,
- ApplicationCallTx,
- CompactCertTx,
- UnknownTx,
-}
diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go
index 38d07aba1..542e8783e 100644
--- a/rpcs/blockService_test.go
+++ b/rpcs/blockService_test.go
@@ -44,14 +44,11 @@ type mockUnicastPeer struct {
func (mup *mockUnicastPeer) GetAddress() string {
return ""
}
-func (mup *mockUnicastPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag, callback network.UnicastWebsocketMessageStateCallback) error {
+func (mup *mockUnicastPeer) Unicast(ctx context.Context, data []byte, tag protocol.Tag) error {
return nil
}
func (mup *mockUnicastPeer) Version() string {
- return network.ProtocolVersion
-}
-func (mup *mockUnicastPeer) IsOutgoing() bool {
- return false
+ return "2.1"
}
// GetConnectionLatency returns the connection latency between the local node and this peer.
diff --git a/rpcs/txService.go b/rpcs/txService.go
index 9201f96e2..df08114c6 100644
--- a/rpcs/txService.go
+++ b/rpcs/txService.go
@@ -194,11 +194,7 @@ func (txs *TxService) updateTxCache() (pendingTxGroups [][]transactions.SignedTx
// The txs.pool.PendingTxGroups() function allocates a new array on every call. That means that the old
// array ( if being used ) is still valid. There is no risk of data race here since
// the txs.pendingTxGroups is a slice (hence a pointer to the array) and not the array itself.
- pendingSignedTxGroups, _ := txs.pool.PendingTxGroups()
- txs.pendingTxGroups = make([][]transactions.SignedTxn, len(pendingSignedTxGroups))
- for i := range txs.pendingTxGroups {
- txs.pendingTxGroups[i] = pendingSignedTxGroups[i].Transactions
- }
+ txs.pendingTxGroups = txs.pool.PendingTxGroups()
txs.lastUpdate = currentUnixTime
}
return txs.pendingTxGroups
diff --git a/rpcs/txSyncer.go b/rpcs/txSyncer.go
index 4d9f344f1..0290d82a6 100644
--- a/rpcs/txSyncer.go
+++ b/rpcs/txSyncer.go
@@ -23,7 +23,6 @@ import (
"time"
"github.com/algorand/go-algorand/data"
- "github.com/algorand/go-algorand/data/pooldata"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
@@ -33,7 +32,7 @@ import (
// PendingTxAggregate is a container of pending transactions
type PendingTxAggregate interface {
PendingTxIDs() []transactions.Txid
- PendingTxGroups() ([]pooldata.SignedTxGroup, uint64)
+ PendingTxGroups() [][]transactions.SignedTxn
}
// TxSyncClient abstracts sync-ing pending transactions from a peer.
diff --git a/rpcs/txSyncer_test.go b/rpcs/txSyncer_test.go
index c9a0d274f..9b89fda1b 100644
--- a/rpcs/txSyncer_test.go
+++ b/rpcs/txSyncer_test.go
@@ -32,7 +32,6 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/pooldata"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
@@ -76,16 +75,8 @@ func (mock mockPendingTxAggregate) PendingTxIDs() []transactions.Txid {
}
return ids
}
-func makeSignedTxGroup(source [][]transactions.SignedTxn) (result []pooldata.SignedTxGroup) {
- result = make([]pooldata.SignedTxGroup, len(source))
- for i := range source {
- result[i].Transactions = source[i]
- }
- return
-}
-
-func (mock mockPendingTxAggregate) PendingTxGroups() ([]pooldata.SignedTxGroup, uint64) {
- return makeSignedTxGroup(bookkeeping.SignedTxnsToGroups(mock.txns)), pooldata.InvalidSignedTxGroupCounter
+func (mock mockPendingTxAggregate) PendingTxGroups() [][]transactions.SignedTxn {
+ return bookkeeping.SignedTxnsToGroups(mock.txns)
}
type mockHandler struct {
@@ -106,7 +97,7 @@ type mockRunner struct {
done chan *rpc.Call
failWithNil bool
failWithError bool
- txgroups []pooldata.SignedTxGroup
+ txgroups [][]transactions.SignedTxn
}
type mockRPCClient struct {
@@ -137,11 +128,7 @@ func (client *mockRPCClient) Sync(ctx context.Context, bloom *bloom.Filter) (txg
if client.client.failWithError {
return nil, errors.New("failing call")
}
- txgroups = make([][]transactions.SignedTxn, len(client.client.txgroups))
- for i := range txgroups {
- txgroups[i] = client.client.txgroups[i].Transactions
- }
- return txgroups, nil
+ return client.client.txgroups, nil
}
// network.HTTPPeer interface
@@ -181,8 +168,7 @@ func TestSyncFromClient(t *testing.T) {
clientPool := makeMockPendingTxAggregate(2)
serverPool := makeMockPendingTxAggregate(1)
- serverTxGroups, _ := serverPool.PendingTxGroups()
- runner := mockRunner{failWithNil: false, failWithError: false, txgroups: serverTxGroups[len(serverTxGroups)-1:], done: make(chan *rpc.Call)}
+ runner := mockRunner{failWithNil: false, failWithError: false, txgroups: serverPool.PendingTxGroups()[len(serverPool.PendingTxGroups())-1:], done: make(chan *rpc.Call)}
client := mockRPCClient{client: &runner, log: logging.TestingLog(t)}
clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
@@ -197,8 +183,7 @@ func TestSyncFromUnsupportedClient(t *testing.T) {
partitiontest.PartitionTest(t)
pool := makeMockPendingTxAggregate(3)
- poolTxGroups, _ := pool.PendingTxGroups()
- runner := mockRunner{failWithNil: true, failWithError: false, txgroups: poolTxGroups[len(poolTxGroups)-1:], done: make(chan *rpc.Call)}
+ runner := mockRunner{failWithNil: true, failWithError: false, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)}
client := mockRPCClient{client: &runner, log: logging.TestingLog(t)}
clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
@@ -213,8 +198,7 @@ func TestSyncFromClientAndQuit(t *testing.T) {
partitiontest.PartitionTest(t)
pool := makeMockPendingTxAggregate(3)
- poolTxGroups, _ := pool.PendingTxGroups()
- runner := mockRunner{failWithNil: false, failWithError: false, txgroups: poolTxGroups[len(poolTxGroups)-1:], done: make(chan *rpc.Call)}
+ runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)}
client := mockRPCClient{client: &runner, log: logging.TestingLog(t)}
clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
@@ -229,8 +213,7 @@ func TestSyncFromClientAndError(t *testing.T) {
partitiontest.PartitionTest(t)
pool := makeMockPendingTxAggregate(3)
- poolTxGroups, _ := pool.PendingTxGroups()
- runner := mockRunner{failWithNil: false, failWithError: true, txgroups: poolTxGroups[len(poolTxGroups)-1:], done: make(chan *rpc.Call)}
+ runner := mockRunner{failWithNil: false, failWithError: true, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)}
client := mockRPCClient{client: &runner, log: logging.TestingLog(t)}
clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
@@ -244,8 +227,7 @@ func TestSyncFromClientAndTimeout(t *testing.T) {
partitiontest.PartitionTest(t)
pool := makeMockPendingTxAggregate(3)
- poolTxGroups, _ := pool.PendingTxGroups()
- runner := mockRunner{failWithNil: false, failWithError: false, txgroups: poolTxGroups[len(poolTxGroups)-1:], done: make(chan *rpc.Call)}
+ runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)}
client := mockRPCClient{client: &runner, log: logging.TestingLog(t)}
clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
@@ -266,8 +248,7 @@ func TestSync(t *testing.T) {
nodeA.start()
nodeAURL := nodeA.rootURL()
- poolTxGroups, _ := pool.PendingTxGroups()
- runner := mockRunner{failWithNil: false, failWithError: false, txgroups: poolTxGroups[len(poolTxGroups)-1:], done: make(chan *rpc.Call)}
+ runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)}
client := mockRPCClient{client: &runner, rootURL: nodeAURL, log: logging.TestingLog(t)}
clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
@@ -302,8 +283,7 @@ func TestStartAndStop(t *testing.T) {
nodeA.start()
nodeAURL := nodeA.rootURL()
- poolTxGroups, _ := pool.PendingTxGroups()
- runner := mockRunner{failWithNil: false, failWithError: false, txgroups: poolTxGroups[len(poolTxGroups)-1:], done: make(chan *rpc.Call)}
+ runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)}
client := mockRPCClient{client: &runner, rootURL: nodeAURL, log: logging.TestingLog(t)}
clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
@@ -340,8 +320,7 @@ func TestStartAndQuit(t *testing.T) {
partitiontest.PartitionTest(t)
pool := makeMockPendingTxAggregate(3)
- poolTxGroups, _ := pool.PendingTxGroups()
- runner := mockRunner{failWithNil: false, failWithError: false, txgroups: poolTxGroups[len(poolTxGroups)-1:], done: make(chan *rpc.Call)}
+ runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)}
client := mockRPCClient{client: &runner, log: logging.TestingLog(t)}
clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
diff --git a/scripts/travis/codegen_verification.sh b/scripts/travis/codegen_verification.sh
index 395c85e90..ae5887366 100755
--- a/scripts/travis/codegen_verification.sh
+++ b/scripts/travis/codegen_verification.sh
@@ -50,6 +50,8 @@ function runGoLint() {
echo >&2 "golint must be clean. Please run the following to list issues(${warningCount}):"
echo >&2 " make lint"
+ # run the linter again to output the actual issues
+ "$GOPATH"/bin/golint $(go list ./... | grep -v /vendor/ | grep -v /test/e2e-go/) >&2
return 1
}
diff --git a/scripts/travis/deploy_packages.sh b/scripts/travis/deploy_packages.sh
index c98b95730..e7e517394 100755
--- a/scripts/travis/deploy_packages.sh
+++ b/scripts/travis/deploy_packages.sh
@@ -24,7 +24,9 @@ then
exit 1
fi
-scripts/travis/build.sh
+if [ -z "${NO_BUILD}" ] || [ "${NO_BUILD}" != "true" ]; then
+ scripts/travis/build.sh
+fi
export RELEASE_GENESIS_PROCESS=true
export NO_BUILD=true
diff --git a/test/README.md b/test/README.md
index 59e5760f6..2d8936460 100644
--- a/test/README.md
+++ b/test/README.md
@@ -51,4 +51,9 @@ To run a specific test:
~$ ./e2e_client_runner.py /full/path/to/e2e_subs/test_script.sh
```
+Make sure to install the Algorand Python SDK before running:
+```
+pip install py-algorand-sdk
+```
+
Tests in the `e2e_subs/serial` directory are executed serially instead of in parallel. This should only be used when absolutely necessary.
diff --git a/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp b/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
index 39f47e12c..f52e57a21 100644
--- a/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
+++ b/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
@@ -108,15 +108,25 @@ if { [catch {
::AlgorandGoal::StartNode $TEST_ROOT_DIR/Node False $WEBPROXY_LISTEN_ADDRESS
+ # once the node is started we can clear the ::GLOBAL_TEST_ALGO_DIR, so that shutdown would be done as a network.
+ unset ::GLOBAL_TEST_ALGO_DIR
+
::AlgorandGoal::WaitForRound 1 $TEST_ROOT_DIR/Node
set CATCHPOINT [::AlgorandGoal::GetNodeLastCatchpoint $TEST_ROOT_DIR/Primary]
puts "Catchpoint is $CATCHPOINT"
+ regexp -nocase {([0-9]*)#[A-Z2-7]*} $CATCHPOINT CATCHPOINT_ROUND CATCHPOINT_ROUND
+
+ puts "Catchpoint round is $CATCHPOINT_ROUND"
+
+ # wait for the primary to reach $CATCHPOINT_ROUND + 5, so that the catchpoint file would be saved
+ ::AlgorandGoal::WaitForRound [expr {int($CATCHPOINT_ROUND + 5)}] $TEST_ROOT_DIR/Primary
+
::AlgorandGoal::StartCatchup $TEST_ROOT_DIR/Node $CATCHPOINT
- ::AlgorandGoal::WaitForRound 37 $TEST_ROOT_DIR/Node
+ ::AlgorandGoal::WaitForRound $CATCHPOINT_ROUND $TEST_ROOT_DIR/Node
::AlgorandGoal::StopNode $TEST_ROOT_DIR/Node
@@ -164,6 +174,9 @@ if { [catch {
::AlgorandGoal::StartNode $TEST_ROOT_DIR/Node False $WEBPROXY_LISTEN_ADDRESS
+ # once the node is started we can clear the ::GLOBAL_TEST_ALGO_DIR, so that shutdown would be done as a network.
+ set ::GLOBAL_TEST_ALGO_DIR ""
+
::AlgorandGoal::WaitForRound 38 $TEST_ROOT_DIR/Node
::AlgorandGoal::StopNode $TEST_ROOT_DIR/Node
diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
index 273d02db9..345c1be6f 100644
--- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
+++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
@@ -934,7 +934,17 @@ proc ::AlgorandGoal::WaitForRound { WAIT_FOR_ROUND_NUMBER NODE_DATA_DIR } {
-re {Genesis ID: (\w+)} {set GENESIS_ID $expect_out(1,string); exp_continue }
-re {Genesis hash: ([A-Za-z0-9+/]+={0,2})} {set GENESIS_HASH $expect_out(1,string); exp_continue }
-re {Catchpoint: ([0-9]*#[A-Z2-7]*)} { set CATCHPOINT $expect_out(1,string); exp_continue }
- eof { catch wait result; if { [lindex $result 3] != 0 } { ::AlgorandGoal::Abort "failed to wait for round : error code [lindex $result 3]"} }
+ eof {
+ catch wait result;
+ if { [lindex $result 3] != 0 } {
+ log_user 1
+ set errLog [exec cat $NODE_DATA_DIR/algod-err.log]
+ puts "$NODE_DATA_DIR/algod-err.log :\r\n$errLog"
+ set nodeLog [exec -- tail -n 30 $NODE_DATA_DIR/node.log]
+ puts "$NODE_DATA_DIR/node.log :\r\n$nodeLog"
+ ::AlgorandGoal::Abort "failed to wait for round : error code [lindex $result 3]"
+ }
+ }
}
log_user 1
if { $BLOCK > -1 } {
diff --git a/test/e2e-go/cli/goal/expect/goalNodeTest.exp b/test/e2e-go/cli/goal/expect/goalNodeTest.exp
index d1af42d77..efab678f1 100644
--- a/test/e2e-go/cli/goal/expect/goalNodeTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalNodeTest.exp
@@ -53,35 +53,22 @@ if { [catch {
# Stop node
::AlgorandGoal::StopNode $TEST_PRIMARY_NODE_DIR
- set WAIT_FOR_EOF 0
# Try stopping the node again, should fail
spawn goal node stop -d $TEST_PRIMARY_NODE_DIR
expect {
timeout { close; ::AlgorandGoal::Abort "Goal Node Stop did not fail as expected" }
- "^Cannot kill node: no running node in directory '*'" {puts "Node failed to stop, as expected"; set WAIT_FOR_EOF 1; exp_continue }
- eof {
- if {$WAIT_FOR_EOF == 0} {
- close;
- ::AlgorandGoal::Abort "Goal Node Stop did not fail as expected"
- }
- set WAIT_FOR_EOF 0
- }
+ "^Cannot kill node: no running node in directory '*'" {puts "Node failed successfully"; close}
+ eof { close; ::AlgorandGoal::Abort "Goal Node Stop did not fail as expected" }
}
- # Try stopping node in invalid directory, should fail
+ #Try stopping node in invalid directory, should fail
spawn goal node stop -d ''
expect {
timeout { close; ::AlgorandGoal::Abort "Goal Node Fail did not fail as expected" }
- "^Cannot kill node: the provided directory '*' does not exist" {puts "Node failed to start on an invalid directory, as expected"; set WAIT_FOR_EOF 1; exp_continue }
- eof {
- if {$WAIT_FOR_EOF == 0} {
- close;
- ::AlgorandGoal::Abort "Goal Node Stop did not fail as expected"
- }
- set WAIT_FOR_EOF 0
- }
+ "^Cannot kill node: the provided directory '*' does not exist" {puts "Node failed successfully"; close}
+ eof { close; ::AlgorandGoal::Abort "Goal Node Stop did not fail as expected" }
}
-
+
# "break" the node by replacing it's ledger data files with "broken" ones.
lassign [exec find $TEST_PRIMARY_NODE_DIR -name "ledger.tracker.sqlite"] PRIMARY_TRACKER_DATABASE_FILE
exec find $TEST_PRIMARY_NODE_DIR -name "ledger.tracker.sqlite*" -delete
diff --git a/test/e2e-go/features/devmode/devmode_test.go b/test/e2e-go/features/devmode/devmode_test.go
new file mode 100644
index 000000000..95801258d
--- /dev/null
+++ b/test/e2e-go/features/devmode/devmode_test.go
@@ -0,0 +1,66 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+// Check that devmode is functioning as designed.
+package devmode
+
+import (
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestDevMode(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+
+ t.Parallel()
+
+ // Start devmode network, and make sure everything is primed by sending a transaction.
+ var fixture fixtures.RestClientFixture
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "DevModeNetwork.json"))
+ fixture.Start()
+ sender, err := fixture.GetRichestAccount()
+ require.NoError(t, err)
+ key := crypto.GenerateSignatureSecrets(crypto.Seed{})
+ receiver := basics.Address(key.SignatureVerifier)
+ txn := fixture.SendMoneyAndWait(0, 100000, 1000, sender.Address, receiver.String(), "")
+ firstRound := txn.ConfirmedRound + 1
+ start := time.Now()
+
+ // 2 transactions should be sent within one normal confirmation time.
+ for i := uint64(0); i < 2; i++ {
+ txn = fixture.SendMoneyAndWait(firstRound+i, 100000, 1000, sender.Address, receiver.String(), "")
+ require.Equal(t, firstRound+i, txn.FirstRound)
+ }
+ require.True(t, time.Since(start) < 2*time.Second, "Transactions should be quickly confirmed.")
+
+ // Without transactions there should be no rounds even after a normal confirmation time.
+ time.Sleep(10 * time.Second)
+ status, err := fixture.LibGoalClient.Status()
+ require.NoError(t, err)
+ require.Equal(t, txn.ConfirmedRound, status.LastRound, "There should be no rounds without a transaction.")
+}
diff --git a/test/e2e-go/features/participation/accountParticipationTransitions_test.go b/test/e2e-go/features/participation/accountParticipationTransitions_test.go
new file mode 100644
index 000000000..606ae7ce7
--- /dev/null
+++ b/test/e2e-go/features/participation/accountParticipationTransitions_test.go
@@ -0,0 +1,126 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package participation
+
+// Tests in this file are focused on testing how a specific account uses and
+// manages its participation keys. DevMode is used to make things more
+// deterministic.
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
+ "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/libgoal"
+ "github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+// installParticipationKey generates a new key for a given account and installs it with the client.
+func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid uint64) (resp generated.PostParticipationResponse, part account.Participation, err error) {
+ dir, err := ioutil.TempDir("", "temporary_partkey_dir")
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+
+ // Install overlapping participation keys...
+ part, filePath, err := client.GenParticipationKeysTo(addr, firstValid, lastValid, 100, dir)
+ require.NoError(t, err)
+ require.NotNil(t, filePath)
+ require.Equal(t, addr, part.Parent.String())
+
+ resp, err = client.AddParticipationKey(filePath)
+ return
+}
+
+func registerParticipationAndWait(t *testing.T, client libgoal.Client, part account.Participation) generated.NodeStatusResponse {
+ txParams, err := client.SuggestedParams()
+ require.NoError(t, err)
+ sAccount := part.Address().String()
+ sWH, err := client.GetUnencryptedWalletHandle()
+ require.NoError(t, err)
+ goOnlineTx, err := client.MakeUnsignedGoOnlineTx(sAccount, &part, txParams.LastRound+1, txParams.LastRound+1, txParams.Fee, [32]byte{})
+ require.NoError(t, err)
+ require.Equal(t, sAccount, goOnlineTx.Src().String())
+ onlineTxID, err := client.SignAndBroadcastTransaction(sWH, nil, goOnlineTx)
+ require.NoError(t, err)
+ require.NotEmpty(t, onlineTxID)
+ status, err := client.WaitForRound(txParams.LastRound)
+ require.NoError(t, err)
+ return status
+}
+
+func TestKeyRegistration(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+
+ t.Parallel()
+
+ // Start devmode network and initialize things for the test.
+ var fixture fixtures.RestClientFixture
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "DevModeOneWallet.json"))
+ fixture.Start()
+ sClient := fixture.GetLibGoalClientForNamedNode("Node")
+ minTxnFee, _, err := fixture.MinFeeAndBalance(0)
+ require.NoError(t, err)
+ accountResponse, err := fixture.GetRichestAccount()
+ require.NoError(t, err)
+ sAccount := accountResponse.Address
+
+ // Add an overlapping participation keys for the account on round 1 and 2
+ last := uint64(6_000_000)
+ numNew := 2
+ for i := 0; i < numNew; i++ {
+ response, part, err := installParticipationKey(t, sClient, sAccount, 0, last)
+ require.NoError(t, err)
+ require.NotNil(t, response)
+ registerParticipationAndWait(t, sClient, part)
+ }
+
+ // Make sure the new keys are installed.
+ keys, err := fixture.LibGoalClient.GetParticipationKeys()
+ require.NoError(t, err)
+ require.Len(t, keys, numNew+1)
+
+ // Zip ahead MaxBalLookback.
+ params, err := fixture.CurrentConsensusParams()
+ require.NoError(t, err)
+ lookback := params.MaxBalLookback
+ for i := uint64(1); i < lookback; i++ {
+ fixture.SendMoneyAndWait(2+i, 0, minTxnFee, sAccount, sAccount, "")
+ }
+
+ keys, err = fixture.LibGoalClient.GetParticipationKeys()
+ require.Equal(t, *(keys[0].EffectiveFirstValid), uint64(1))
+ require.Equal(t, *(keys[0].EffectiveLastValid), lookback)
+ require.Equal(t, *(keys[0].LastBlockProposal), lookback)
+
+ require.Equal(t, *(keys[1].EffectiveFirstValid), lookback+1)
+ require.Equal(t, *(keys[1].EffectiveLastValid), lookback+1)
+ require.Equal(t, *(keys[1].LastBlockProposal), lookback+1)
+
+ require.Equal(t, *(keys[2].EffectiveFirstValid), lookback+2)
+ require.Equal(t, *(keys[2].EffectiveLastValid), last)
+ require.Equal(t, *(keys[2].LastBlockProposal), lookback+2)
+}
diff --git a/test/e2e-go/features/participation/participationExpiration_test.go b/test/e2e-go/features/participation/participationExpiration_test.go
new file mode 100644
index 000000000..915691d03
--- /dev/null
+++ b/test/e2e-go/features/participation/participationExpiration_test.go
@@ -0,0 +1,196 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package participation
+
+import (
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, finalStatus basics.Status, protocolCheck string) {
+
+ a := require.New(fixtures.SynchronizedTest(t))
+ pClient := fixture.GetLibGoalClientForNamedNode("Primary")
+
+ sClient := fixture.GetLibGoalClientForNamedNode("Secondary")
+ sWH, err := sClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ sAccount, err := sClient.GenerateAddress(sWH)
+ a.NoError(err)
+
+ // send money to new account from some other account in the template, so that account can go online
+ accountList, err := fixture.GetWalletsSortedByBalance()
+ a.NoError(err)
+ richAccount := accountList[0].Address
+ _, initialRound := fixture.GetBalanceAndRound(richAccount)
+
+ minTxnFee, minAcctBalance, err := fixture.MinFeeAndBalance(initialRound)
+ a.NoError(err)
+
+ transactionFee := minTxnFee
+ amountToSendInitial := 5 * minAcctBalance
+
+ initialAmt, err := sClient.GetBalance(sAccount)
+ a.NoError(err)
+
+ fixture.SendMoneyAndWait(initialRound, amountToSendInitial, transactionFee, richAccount, sAccount, "")
+
+ newAmt, err := sClient.GetBalance(sAccount)
+ a.NoError(err)
+
+ a.GreaterOrEqual(newAmt, initialAmt)
+
+ newAccountStatus, err := pClient.AccountInformation(sAccount)
+ a.NoError(err)
+ a.Equal(basics.Offline.String(), newAccountStatus.Status)
+
+ var onlineTxID string
+ var partKeyLastValid uint64
+
+ startTime := time.Now()
+ for time.Since(startTime) < 2*time.Minute {
+ _, currentRound := fixture.GetBalanceAndRound(richAccount)
+ // account adds part key
+ partKeyFirstValid := uint64(0)
+ partKeyValidityPeriod := uint64(10)
+ partKeyLastValid = currentRound + partKeyValidityPeriod
+ partkeyResponse, _, err := sClient.GenParticipationKeys(sAccount, partKeyFirstValid, partKeyLastValid, 0)
+ a.NoError(err)
+ a.Equal(sAccount, partkeyResponse.Parent.String())
+
+ // account uses part key to go online
+ goOnlineTx, err := sClient.MakeUnsignedGoOnlineTx(sAccount, &partkeyResponse, 0, 0, transactionFee, [32]byte{})
+ a.NoError(err)
+
+ a.Equal(sAccount, goOnlineTx.Src().String())
+ onlineTxID, err = sClient.SignAndBroadcastTransaction(sWH, nil, goOnlineTx)
+
+ if err == nil {
+ break
+ }
+
+ if strings.Contains(err.Error(), "transaction tries to mark an account as online with last voting round in the past") {
+ continue
+ }
+
+ // Error occurred
+ logging.TestingLog(t).Errorf("signAndBroadcastTransaction error: %s", err.Error())
+ logging.TestingLog(t).Errorf("first valid: %d, last valid: %d, current round: %d", partKeyFirstValid, partKeyLastValid, currentRound)
+ a.NoError(err)
+ }
+
+ fixture.AssertValidTxid(onlineTxID)
+ maxRoundsToWaitForTxnConfirm := uint64(3)
+
+ sNodeStatus, err := sClient.Status()
+ a.NoError(err)
+ seededRound := sNodeStatus.LastRound
+
+ fixture.WaitForTxnConfirmation(seededRound+maxRoundsToWaitForTxnConfirm, sAccount, onlineTxID)
+ sNodeStatus, _ = sClient.Status()
+ newAccountStatus, err = pClient.AccountInformation(sAccount)
+ a.NoError(err)
+ a.Equal(basics.Online.String(), newAccountStatus.Status)
+ sAccountData, err := sClient.AccountData(sAccount)
+ a.NoError(err)
+
+ lastValidRound := sAccountData.VoteLastValid
+
+ a.Equal(basics.Round(partKeyLastValid), lastValidRound)
+
+ // We want to wait until we get to one round past the last valid round
+ err = fixture.WaitForRoundWithTimeout(uint64(lastValidRound) + 1)
+ newAccountStatus, err = pClient.AccountInformation(sAccount)
+ a.NoError(err)
+
+ // The account should be online still...
+ a.Equal(basics.Online.String(), newAccountStatus.Status)
+
+ // Now we want to send a transaction to the account and test that
+ // it was taken offline after we sent it something
+
+ _, initialRound = fixture.GetBalanceAndRound(richAccount)
+
+ blk, err := sClient.Block(initialRound)
+ a.NoError(err)
+ a.Equal(blk.CurrentProtocol, protocolCheck)
+
+ fixture.SendMoneyAndWait(initialRound, amountToSendInitial, transactionFee, richAccount, sAccount, "")
+
+ err = fixture.WaitForRoundWithTimeout(uint64(initialRound) + 3)
+
+ newAccountStatus, err = pClient.AccountInformation(sAccount)
+ a.NoError(err)
+
+ // The account should be equal to the target status now
+ a.Equal(finalStatus.String(), newAccountStatus.Status)
+}
+
+// TestParticipationAccountsExpirationFuture tests that sending a transaction to an account with
+// its last valid round being less than the current round will turn it offline. This test will only
+// work when the consensus protocol enables it (in this case the future protocol)
+func TestParticipationAccountsExpirationFuture(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+
+ t.Parallel()
+
+ var fixture fixtures.RestClientFixture
+
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodesExpiredOfflineVFuture.json"))
+
+ fixture.Start()
+ defer fixture.Shutdown()
+
+ testExpirationAccounts(t, &fixture, basics.Offline, "future")
+}
+
+// TestParticipationAccountsExpirationNonFuture tests that sending a transaction to an account with
+// its last valid round being less than the current round will NOT turn it offline. This tests that
+// when the consensus protocol is less than the required version, it will not turn nodes offline
+func TestParticipationAccountsExpirationNonFuture(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+
+ t.Parallel()
+
+ var fixture fixtures.RestClientFixture
+
+ // V29 is the version before participation key expiration checking was enabled
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodesExpiredOfflineV29.json"))
+
+ fixture.Start()
+ defer fixture.Shutdown()
+
+ testExpirationAccounts(t, &fixture, basics.Online, string(protocol.ConsensusV29))
+}
diff --git a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go
index d686ed798..3ec2c323d 100644
--- a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go
+++ b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go
@@ -23,6 +23,7 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/test/framework/fixtures"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -244,7 +245,16 @@ func TestPartitionHalfOffline(t *testing.T) {
// Start all but 10% of stake and verify we recover
var fixture fixtures.RestClientFixture
- fixture.Setup(t, filepath.Join("nettemplates", "TenNodesDistributedMultiWallet.json"))
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "TenNodesDistributedMultiWallet.json"))
+ for _, nodeDir := range fixture.NodeDataDirs() {
+ cfg, err := config.LoadConfigFromDisk(nodeDir)
+ a.NoError(err)
+ // adjust the refresh interval for one hour, so that we won't be reloading the participation key during this test.
+ cfg.ParticipationKeysRefreshInterval = time.Hour
+ cfg.SaveToDisk(nodeDir)
+ }
+ fixture.Start()
+
defer fixture.Shutdown()
// Get the 1st node (with Node1-3 wallets) so we can wait until it has reached the target round
diff --git a/test/e2e-go/features/transactions/accountv2_test.go b/test/e2e-go/features/transactions/accountv2_test.go
index b5cd81afb..3cefff35d 100644
--- a/test/e2e-go/features/transactions/accountv2_test.go
+++ b/test/e2e-go/features/transactions/accountv2_test.go
@@ -89,7 +89,7 @@ func TestAccountInformationV2(t *testing.T) {
proto.AgreementFilterTimeout = 400 * time.Millisecond
fixture.SetConsensus(config.ConsensusProtocols{protocol.ConsensusFuture: proto})
- fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
+ fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachV26.json"))
defer fixture.Shutdown()
client := fixture.LibGoalClient
@@ -105,13 +105,15 @@ func TestAccountInformationV2(t *testing.T) {
fee := uint64(1000)
- round, err := client.CurrentRound()
- a.NoError(err)
+ var txn transactions.Transaction
// Fund the manager, so it can issue transactions later on
- _, err = client.SendPaymentFromUnencryptedWallet(creator, user, fee, 10000000000, nil)
+ txn, err = client.SendPaymentFromUnencryptedWallet(creator, user, fee, 10000000000, nil)
a.NoError(err)
- client.WaitForRound(round + 4)
+
+ round, err := client.CurrentRound()
+ a.NoError(err)
+ fixture.WaitForConfirmedTxn(round+4, creator, txn.ID().String())
// There should be no apps to start with
ad, err := client.AccountData(creator)
@@ -165,10 +167,10 @@ int 1
a.NoError(err)
signedTxn, err := client.SignTransactionWithWallet(wh, nil, tx)
a.NoError(err)
- round, err = client.CurrentRound()
- a.NoError(err)
txid, err := client.BroadcastTransaction(signedTxn)
a.NoError(err)
+ round, err = client.CurrentRound()
+ a.NoError(err)
// ensure transaction is accepted into a block within 5 rounds.
confirmed := fixture.WaitForAllTxnsToConfirm(round+5, map[string]string{txid: signedTxn.Txn.Sender.String()})
a.True(confirmed)
@@ -214,10 +216,10 @@ int 1
a.NoError(err)
signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
a.NoError(err)
- round, err = client.CurrentRound()
- a.NoError(err)
txid, err = client.BroadcastTransaction(signedTxn)
a.NoError(err)
+ round, err = client.CurrentRound()
+ a.NoError(err)
_, err = client.WaitForRound(round + 3)
a.NoError(err)
// Ensure the txn committed
@@ -285,16 +287,23 @@ int 1
a.NoError(err)
signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
a.NoError(err)
- round, err = client.CurrentRound()
- a.NoError(err)
- _, err = client.BroadcastTransaction(signedTxn)
- a.NoError(err)
- _, err = client.WaitForRound(round + 2)
- a.NoError(err)
- // Ensure the txn committed
- resp, err = client.GetPendingTransactions(2)
+ txid, err = client.BroadcastTransaction(signedTxn)
a.NoError(err)
- a.Equal(uint64(0), resp.TotalTxns)
+ for {
+ round, err = client.CurrentRound()
+ a.NoError(err)
+ _, err = client.WaitForRound(round + 1)
+ a.NoError(err)
+ // Ensure the txn committed
+ resp, err = client.GetPendingTransactions(2)
+ a.NoError(err)
+ if resp.TotalTxns == 1 {
+ a.Equal(resp.TruncatedTxns.Transactions[0].TxID, txid)
+ continue
+ }
+ a.Equal(uint64(0), resp.TotalTxns)
+ break
+ }
ad, err = client.AccountData(creator)
a.NoError(err)
diff --git a/test/e2e-go/features/transactions/messageRate_test.go b/test/e2e-go/features/transactions/messageRate_test.go
deleted file mode 100644
index ac0394e64..000000000
--- a/test/e2e-go/features/transactions/messageRate_test.go
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package transactions
-
-import (
- "bufio"
- "context"
- "fmt"
- "io/ioutil"
- "math"
- "os"
- "path/filepath"
- "strings"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/account"
- "github.com/algorand/go-algorand/libgoal"
- "github.com/algorand/go-algorand/test/framework/fixtures"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/algorand/go-algorand/util/db"
-)
-
-// this test checks that the txsync outgoing message rate
-// varies according to the transaction rate
-func TestMessageRateChangesWithTxnRate(t *testing.T) {
- partitiontest.PartitionTest(t)
- if _, present := os.LookupEnv("GORACE"); present {
- t.Skip("Skipping MessageRateChangesWithTxnRate test when race mode is enabled")
- }
- a := require.New(fixtures.SynchronizedTest(t))
- txnRates := []uint{50, 300, 800, 1200}
- if testing.Short() {
- txnRates = []uint{50, 300}
- }
- prevMsgRate := 0.0
- for _, txnRate := range txnRates {
- avgTps, msgRate := testMessageRateChangesWithTxnRate(t, filepath.Join("nettemplates", "OneNodeTwoRelays.json"), txnRate, a)
- fmt.Printf("Message rate: %f Previous Message Rate: %f \nExpected Transaction rate: %f Actual Transaction rate: %f\n", msgRate, prevMsgRate, float64(txnRate), avgTps)
- aErrorMessage := fmt.Sprintf("TxSync message rate not monotonic for txn rate: %d", txnRate)
- a.GreaterOrEqual(msgRate, prevMsgRate, aErrorMessage)
- prevMsgRate = msgRate
- }
-
-}
-
-func throttleTransactionRate(startTime time.Time, txnRate uint, sentSoFar uint) float64 {
- timeDelta := time.Since(startTime)
- currentTps := float64(sentSoFar) / timeDelta.Seconds()
- if currentTps > float64(txnRate) {
- sleepDuration := float64(sentSoFar)/float64(txnRate) - timeDelta.Seconds()
- sleepTime := time.Duration(int64(math.Round(sleepDuration*1000))) * time.Millisecond
- time.Sleep(sleepTime)
- currentTps = float64(sentSoFar) / (sleepDuration + timeDelta.Seconds())
- }
- return currentTps
-}
-
-func testMessageRateChangesWithTxnRate(t *testing.T, templatePath string, txnRate uint, a *require.Assertions) (avgTps, msgRate float64) {
- var fixture fixtures.RestClientFixture
- fixture.SetupNoStart(t, templatePath)
- nodeDataDir, err := fixture.GetNodeDir("Node")
- a.NoError(err)
- cfg, err := config.LoadConfigFromDisk(nodeDataDir)
- a.NoError(err)
- cfg.EnableVerbosedTransactionSyncLogging = true
- cfg.SaveToDisk(nodeDataDir)
- fixture.Start()
-
- defer fixture.Shutdown()
-
- client := fixture.GetLibGoalClientForNamedNode("Node")
- accountsList, err := fixture.GetNodeWalletsSortedByBalance(client.DataDir())
- a.NoError(err)
- account := accountsList[0].Address
- clientAlgod := fixture.GetAlgodClientForController(fixture.GetNodeControllerForDataDir(nodeDataDir))
-
- // get the node account's secret key
- secretKey, err := fetchSecretKey(client, nodeDataDir)
- a.NoError(err)
- signatureSecrets, err := crypto.SecretKeyToSignatureSecrets(secretKey)
- a.NoError(err)
-
- // build the path for the primary node's log file
- logPath := filepath.Join(nodeDataDir, "node.log")
-
- // Get the relay's gossip port
- r1, err := fixture.GetNodeController("Relay1")
- a.NoError(err)
- listeningURLRaw, err := r1.GetListeningAddress()
- a.NoError(err)
- listeningURL := strings.Split(listeningURLRaw, "//")[1]
-
- errChan := make(chan error)
- resetChan := make(chan bool)
- msgRateChan := make(chan float64)
- ctx, stopParsing := context.WithCancel(context.Background())
- defer stopParsing()
-
- go parseLog(ctx, logPath, listeningURL, errChan, msgRateChan, resetChan)
-
- // get the min transaction fee
- minTxnFee, _, err := fixture.CurrentMinFeeAndBalance()
- a.NoError(err)
- transactionFee := minTxnFee * 1000 * 253
-
- startTime := time.Now()
- txnSentCount := uint(0)
-
- for {
- // send txns at txnRate for 30s
- timeSinceStart := time.Since(startTime)
- if timeSinceStart > 30*time.Second {
- break
- }
-
- tx, err := client.ConstructPayment(account, account, transactionFee, 0, GenerateRandomBytes(8), "", [32]byte{}, 0, 0)
- a.NoError(err)
- signedTxn := tx.Sign(signatureSecrets)
-
- _, err = clientAlgod.SendRawTransaction(signedTxn)
- a.NoError(err, "Unable to send raw txn")
-
- txnSentCount++
-
- avgTps = throttleTransactionRate(startTime, txnRate, txnSentCount)
- }
-
- // send reset on resetChan to signal the parseLog goroutine to send the msgRate and reset its counters
- resetChan <- true
-
- select {
- case err := <-errChan:
- a.Error(err)
- case msgRate = <-msgRateChan:
- break
- }
- return
-}
-
-// parseLog continuously monitors the log for txnsync messages sent to filterAddress
-// resetChan is used to signal it to send results on msgRate chan
-// and reset its internal counters
-// errChan is used to propagate errors if any
-func parseLog(ctx context.Context, logPath string, filterAddress string, errChan chan error, msgRateChan chan float64, resetChan chan bool) {
- file, err := os.Open(logPath)
- if err != nil {
- errChan <- err
- return
- }
- defer file.Close()
-
- messageCount := 0
- var firstTimestamp, lastTimestamp time.Time
- firstTimestamp = time.Now()
-
- scanner := bufio.NewScanner(file)
- for {
- select {
- case <-ctx.Done():
- return
- case <-resetChan:
- lastTimestamp = time.Now()
- msgRate := float64(messageCount) / float64(lastTimestamp.Sub(firstTimestamp)) * float64(time.Second)
- msgRateChan <- msgRate
- messageCount = 0
- firstTimestamp = time.Now()
- continue
- default:
- }
- scanned := scanner.Scan()
- if !scanned {
- if err := scanner.Err(); err != nil {
- errChan <- err
- return
- }
- time.Sleep(100 * time.Millisecond)
- scanner = bufio.NewScanner(file)
- continue
- }
-
- line := scanner.Text()
- // look for txnsync messages sent to `filterAddress`
- if strings.Contains(line, "Outgoing Txsync") && strings.Contains(line, filterAddress) {
- messageCount++
- }
- }
-}
-
-func fetchSecretKey(client libgoal.Client, dataDir string) (crypto.PrivateKey, error) {
- secretKey := crypto.PrivateKey{}
- genID, err := client.GenesisID()
- if err != nil {
- return secretKey, err
- }
-
- keyDir := filepath.Join(dataDir, genID)
- files, err := ioutil.ReadDir(keyDir)
- if err != nil {
- return secretKey, err
- }
-
- // For each of these files
- for _, info := range files {
- var handle db.Accessor
-
- filename := info.Name()
-
- // If it isn't a key file we care about, skip it
- if config.IsRootKeyFilename(filename) {
- handle, err = db.MakeAccessor(filepath.Join(keyDir, filename), true, false)
- if err != nil {
- // Couldn't open it, skip it
- continue
- }
-
- // Fetch an account.Root from the database
- root, err := account.RestoreRoot(handle)
- if err != nil {
- return secretKey, err
- }
-
- secretKey = crypto.PrivateKey(root.Secrets().SK)
- break
- }
-
- }
-
- return secretKey, nil
-}
diff --git a/test/e2e-go/features/transactions/onlineStatusChange_test.go b/test/e2e-go/features/transactions/onlineStatusChange_test.go
index ae11235be..3c12bf4c9 100644
--- a/test/e2e-go/features/transactions/onlineStatusChange_test.go
+++ b/test/e2e-go/features/transactions/onlineStatusChange_test.go
@@ -17,6 +17,7 @@
package transactions
import (
+ "fmt"
"path/filepath"
"testing"
@@ -145,3 +146,36 @@ func testAccountsCanChangeOnlineState(t *testing.T, templatePath string) {
a.Equal(unmarkedAccountStatus.Status, basics.NotParticipating.String())
}
}
+
+func TestCloseOnError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ t.Parallel()
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ var fixture fixtures.RestClientFixture
+ fixture.Setup(t, filepath.Join("nettemplates", "TwoNodesPartlyOfflineVFuture.json"))
+ defer fixture.Shutdown()
+ client := fixture.LibGoalClient
+
+ // Capture the account we're tracking
+ accountList, err := fixture.GetWalletsSortedByBalance()
+ a.NoError(err)
+
+ initiallyOnline := accountList[0].Address // 35% stake
+ initiallyOffline := accountList[1].Address // 20% stake
+
+ // get the current round for partkey creation
+ _, curRound := fixture.GetBalanceAndRound(initiallyOnline)
+
+ // make a participation key for initiallyOffline
+ _, _, err = client.GenParticipationKeys(initiallyOffline, 0, curRound+1000, 0)
+ a.NoError(err)
+ // check duplicate keys does not crash
+ _, _, err = client.GenParticipationKeys(initiallyOffline, 0, curRound+1000, 0)
+ a.Equal("PersistedParticipation.Persist: failed to install database: table ParticipationAccount already exists", err.Error())
+ // check lastValid < firstValid does not crash
+ _, _, err = client.GenParticipationKeys(initiallyOffline, curRound+1001, curRound+1000, 0)
+ expected := fmt.Sprintf("FillDBWithParticipationKeys: firstValid %d is after lastValid %d", int(curRound+1001), int(curRound+1000))
+ a.Equal(expected, err.Error())
+}
diff --git a/test/e2e-go/features/transactions/sendReceive_test.go b/test/e2e-go/features/transactions/sendReceive_test.go
index a04b86dd7..fadf5b620 100644
--- a/test/e2e-go/features/transactions/sendReceive_test.go
+++ b/test/e2e-go/features/transactions/sendReceive_test.go
@@ -17,14 +17,12 @@
package transactions
import (
- "fmt"
"math/rand"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
- "github.com/algorand/go-algorand/config"
v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/test/framework/fixtures"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -68,16 +66,11 @@ func TestDevModeAccountsCanSendMoney(t *testing.T) {
func testAccountsCanSendMoney(t *testing.T, templatePath string, numberOfSends int) {
t.Parallel()
+ a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
fixture.Setup(t, templatePath)
defer fixture.Shutdown()
- testAccountsCanSendMoneyFixture(t, &fixture, numberOfSends)
-}
-
-func testAccountsCanSendMoneyFixture(t *testing.T, fixture *fixtures.RestClientFixture, numberOfSends int) {
- a := require.New(fixtures.SynchronizedTest(t))
-
c := fixture.LibGoalClient
pingClient := fixture.LibGoalClient
@@ -166,104 +159,3 @@ func testAccountsCanSendMoneyFixture(t *testing.T, fixture *fixtures.RestClientF
a.True(expectedPingBalance <= pingBalance, "ping balance is different than expected.")
a.True(expectedPongBalance <= pongBalance, "pong balance is different than expected.")
}
-
-// this test checks that two accounts' balances stay up to date
-// as they send each other money many times
-func TestAccountsCanSendMoneyAcrossTxSync(t *testing.T) {
- partitiontest.PartitionTest(t)
- defer fixtures.ShutdownSynchronizedTest(t)
-
- numberOfSends := 3
- a := require.New(fixtures.SynchronizedTest(t))
-
- networkProtocolVersions := []string{"3.0", "2.1"}
-
- testMoneySending := func(t *testing.T, primaryNodeVersionIdx, nodeVersionIdx int) {
- t.Run(fmt.Sprintf("%s->%s", networkProtocolVersions[primaryNodeVersionIdx], networkProtocolVersions[nodeVersionIdx]),
- func(t *testing.T) {
- t.Parallel()
- var fixture fixtures.RestClientFixture
- fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
- defer fixture.Shutdown()
-
- cfg, err := config.LoadConfigFromDisk(fixture.PrimaryDataDir())
- a.NoError(err)
- cfg.NetworkProtocolVersion = networkProtocolVersions[primaryNodeVersionIdx]
- cfg.SaveToDisk(fixture.PrimaryDataDir())
-
- cfg, err = config.LoadConfigFromDisk(fixture.NodeDataDirs()[0])
- a.NoError(err)
- cfg.NetworkProtocolVersion = networkProtocolVersions[primaryNodeVersionIdx]
- cfg.SaveToDisk(fixture.NodeDataDirs()[0])
-
- fixture.Start()
- testAccountsCanSendMoneyFixture(t, &fixture, numberOfSends)
- })
- }
-
- // test to see that we can communicate correctly regardless of the network protocol version.
- for primaryNodeVersionIdx := 0; primaryNodeVersionIdx < 2; primaryNodeVersionIdx++ {
- for nodeVersionIdx := 0; nodeVersionIdx < 2; nodeVersionIdx++ {
- testMoneySending(t, primaryNodeVersionIdx, nodeVersionIdx)
- }
- }
-}
-
-// this test checks that a relay would relay a transaction
-// received via the txnsync onto a node that doesn't support
-// transaction sync.
-func TestTransactionSyncRelayBridge(t *testing.T) {
-
- partitiontest.PartitionTest(t)
- defer fixtures.ShutdownSynchronizedTest(t)
-
- a := require.New(fixtures.SynchronizedTest(t))
-
- var fixture fixtures.RestClientFixture
- fixture.SetupNoStart(t, filepath.Join("nettemplates", "ThreeNodesOneOnline.json"))
- defer fixture.Shutdown()
-
- onlineNodeController, err := fixture.GetNodeController("OnlineNode")
- a.NoError(err)
-
- cfg, err := config.LoadConfigFromDisk(onlineNodeController.GetDataDir())
- a.NoError(err)
- cfg.NetworkProtocolVersion = "2.1"
- cfg.SaveToDisk(onlineNodeController.GetDataDir())
-
- offlineNodeController, err := fixture.GetNodeController("OfflineNode")
- a.NoError(err)
-
- cfg, err = config.LoadConfigFromDisk(offlineNodeController.GetDataDir())
- a.NoError(err)
- cfg.NetworkProtocolVersion = "3.0"
- cfg.SaveToDisk(offlineNodeController.GetDataDir())
-
- fixture.Start()
-
- client := fixture.GetLibGoalClientFromNodeController(offlineNodeController)
- accounts, err := fixture.GetNodeWalletsSortedByBalance(client.DataDir())
- a.NoError(err)
-
- a.Equal(1, len(accounts))
-
- sendingAccount := accounts[0].Address
-
- _, err = client.SendPaymentFromUnencryptedWallet(sendingAccount, sendingAccount, 1024*1024, 1024, nil)
- a.NoError(err)
-
- startRoundStatus, err := client.Status()
- a.NoError(err)
- for {
- pendingTxns, err := client.GetPendingTransactions(2)
- a.NoError(err)
- if pendingTxns.TotalTxns == 0 {
- break
- }
- status, err := client.Status()
- a.NoError(err)
- _, err = client.WaitForRound(status.LastRound)
- a.NoError(err)
- a.Less(uint64(status.LastRound), uint64(startRoundStatus.LastRound+5), "transaction is still pending after 5 rounds, whereas it should have been confirmed within 2 rounds.")
- }
-}
diff --git a/test/e2e-go/features/transactions/txnsync_test.go b/test/e2e-go/features/transactions/txnsync_test.go
deleted file mode 100644
index ee373ea5e..000000000
--- a/test/e2e-go/features/transactions/txnsync_test.go
+++ /dev/null
@@ -1,376 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package transactions
-
-import (
- "context"
- "fmt"
- "math"
- "path/filepath"
- "testing"
- "time"
-
- "github.com/algorand/go-deadlock"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/libgoal"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/framework/fixtures"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-// This test sets up a network with 2 nodes and 2 relays.
-// The two nodes send payment transactions.
-
-// For each transaction, the test checks if the relays and the nodes
-// (including the node that originated the transaction) have the
-// transaction in the pool (i.e. the transactionInfo.ConfirmedRound ==
-// 0).
-
-// The tests needs to check for the transactions in the pool fast
-// enough before they get evicted from the pool to the block.
-
-// To achieve this, it sends transactions during the first half of the
-// round period, to give the test enough time to check for the
-// transactions.
-func TestTxnSync(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- maxNumberOfSends := 1200
- maxRate := 1000 // txn/sec
- if testing.Short() {
- maxNumberOfSends = 300
- }
- templatePath := filepath.Join("nettemplates", "TwoNodes50EachWithTwoRelays.json")
-
- var fixture fixtures.RestClientFixture
-
- roundTime := time.Duration(8 * 1000 * time.Millisecond)
-
- proto, ok := config.Consensus[protocol.ConsensusCurrentVersion]
- require.True(t, ok)
- proto.AgreementFilterTimeoutPeriod0 = roundTime
- proto.AgreementFilterTimeout = roundTime
- fixture.SetConsensus(config.ConsensusProtocols{protocol.ConsensusCurrentVersion: proto})
-
- fixture.Setup(t, templatePath)
- defer fixture.Shutdown()
-
- node1 := fixture.GetLibGoalClientForNamedNode("Node1")
- node2 := fixture.GetLibGoalClientForNamedNode("Node2")
- relay1 := fixture.GetLibGoalClientForNamedNode("Relay1")
- relay2 := fixture.GetLibGoalClientForNamedNode("Relay2")
-
- n1chan := make(chan string, maxNumberOfSends*2)
- n2chan := make(chan string, maxNumberOfSends*2)
- r1chan := make(chan string, maxNumberOfSends*2)
- r2chan := make(chan string, maxNumberOfSends*2)
-
- ctx, cancel := context.WithCancel(context.Background())
-
- account1List, err := fixture.GetNodeWalletsSortedByBalance(node1.DataDir())
- require.NoError(t, err)
- account1 := account1List[0].Address
-
- account2List, err := fixture.GetNodeWalletsSortedByBalance(node2.DataDir())
- require.NoError(t, err)
- account2 := account2List[0].Address
-
- ttn1 := transactionTracker{
- t: t,
- ctx: ctx,
- client: &node1,
- othersToVerify: []chan string{n2chan, r1chan, r2chan, n1chan},
- selfToVerify: n1chan,
- pendingVerification: make(map[string]bool),
- account1: account1,
- account2: account2,
- name: "node1",
- cancelFunc: cancel,
- }
-
- ttn2 := transactionTracker{
- t: t,
- ctx: ctx,
- client: &node2,
- othersToVerify: []chan string{n1chan, r1chan, r2chan, n2chan},
- selfToVerify: n2chan,
- pendingVerification: make(map[string]bool),
- account1: account1,
- account2: account2,
- name: "node2",
- cancelFunc: cancel,
- }
-
- ttr1 := transactionTracker{
- t: t,
- ctx: ctx,
- client: &relay1,
- othersToVerify: []chan string{n1chan, n2chan, r2chan, r1chan},
- selfToVerify: r1chan,
- pendingVerification: make(map[string]bool),
- account1: account1,
- account2: account2,
- name: "relay1",
- cancelFunc: cancel,
- }
-
- ttr2 := transactionTracker{
- t: t,
- ctx: ctx,
- client: &relay2,
- othersToVerify: []chan string{n1chan, n2chan, r1chan, r2chan},
- selfToVerify: r2chan,
- pendingVerification: make(map[string]bool),
- account1: account1,
- account2: account2,
- name: "relay2",
- cancelFunc: cancel,
- }
-
- minTxnFee, minAcctBalance, err := fixture.CurrentMinFeeAndBalance()
- require.NoError(t, err)
-
- transactionFee := minTxnFee * 1000
- amount1 := minAcctBalance / uint64(maxNumberOfSends)
- amount2 := minAcctBalance / uint64(maxNumberOfSends)
-
- defer ttn1.terminate()
- defer ttn2.terminate()
- defer ttr1.terminate()
- defer ttr2.terminate()
-
- defer cancel()
-
- go ttn1.passTxnsToVeirfy()
- go ttn2.passTxnsToVeirfy()
- go ttr1.passTxnsToVeirfy()
- go ttr2.passTxnsToVeirfy()
-
- go ttn1.checkAll()
- go ttn2.checkAll()
- go ttr1.checkAll()
- go ttr2.checkAll()
-
- // wait for the 1st round
- nextRound := uint64(1)
- err = fixture.ClientWaitForRound(fixture.AlgodClient, nextRound, 20*roundTime)
- require.NoError(t, err)
- nextRound++
-
- st := time.Now()
- timeout := time.NewTimer(roundTime / 2)
-
- for i := 0; i < maxNumberOfSends; i++ {
-
- select {
- case <-ctx.Done():
- require.True(t, false, "Context canceled due to an error at iteration %d", i)
- return
- case <-timeout.C:
- // Send the transactions only during the first half of the round
- // Wait for the next round, and stop sending transactions after the first half
- err = fixture.ClientWaitForRound(fixture.AlgodClient, nextRound, 10*roundTime)
- require.NoError(t, err)
- fmt.Printf("Round %d\n", int(nextRound))
- nextRound++
- timeout = time.NewTimer(roundTime / 2)
- default:
- }
- throttleRate(st, maxRate, i*2)
- tx1, err := node1.SendPaymentFromUnencryptedWallet(account1, account2, transactionFee, amount1, GenerateRandomBytes(8))
- require.NoError(t, err, "Failed to send transaction on iteration %d", i)
- ttn1.addTransactionToVerify(tx1.ID().String())
-
- tx2, err := node2.SendPaymentFromUnencryptedWallet(account2, account1, transactionFee, amount2, GenerateRandomBytes(8))
- require.NoError(t, err, "Failed to send transaction on iteration %d", i)
- ttn2.addTransactionToVerify(tx2.ID().String())
- if i%100 == 0 {
- fmt.Printf("txn sent %d / %d\n", i, maxNumberOfSends)
- }
- }
- close(ttn1.selfToVerify)
- close(ttn2.selfToVerify)
- close(ttr1.selfToVerify)
- close(ttr2.selfToVerify)
-
- // wait until all channels are empty for max 50 seconds
- for x := 0; x < 250; x++ {
- select {
- case <-ctx.Done():
- require.True(t, false, "Context canceled due to an error")
- default:
- }
-
- if ttn1.channelsAreEmpty() {
- break
- }
- time.Sleep(200 * time.Millisecond)
- if x%10 == 0 {
- fmt.Printf("waiting for channel flushing [%d %d %d %d] %d / %d\n", len(n1chan), len(n2chan), len(r1chan), len(r2chan), x, 250)
- }
- }
- require.True(t, ttn1.channelsAreEmpty())
-
- unprocessed := 0
- maxWait := 100
- for x := 0; x < maxWait; x++ {
- select {
- case <-ctx.Done():
- require.True(t, false, "Context canceled due to an error")
- default:
- }
- ttn1.mu.Lock()
- unprocessed = len(ttn1.pendingVerification)
- ttn1.mu.Unlock()
-
- ttn2.mu.Lock()
- unprocessed += len(ttn2.pendingVerification)
- ttn2.mu.Unlock()
-
- ttr1.mu.Lock()
- unprocessed += len(ttr1.pendingVerification)
- ttr1.mu.Unlock()
-
- ttr2.mu.Lock()
- unprocessed += len(ttr2.pendingVerification)
- ttr2.mu.Unlock()
-
- if unprocessed == 0 {
- break
- }
- time.Sleep(200 * time.Millisecond)
- if x%10 == 0 {
- fmt.Printf("waiting for pending verificaitons [%d] %d / %d\n", unprocessed, x, maxWait)
- }
- }
- require.Equal(t, 0, unprocessed, "missing %d transactions", unprocessed)
-}
-
-type transactionTracker struct {
- t *testing.T
- ctx context.Context
- mu deadlock.Mutex
- client *libgoal.Client
- othersToVerify []chan string
- selfToVerify chan string
- pendingVerification map[string]bool
- account1 string
- account2 string
- name string
- cancelFunc context.CancelFunc
-}
-
-// Adds the transaction to the channels of the nodes intended to receive the transaction
-// This should not block to maintain the transaction rate. Hence, the channel is large enough.
-func (tt *transactionTracker) addTransactionToVerify(transactionID string) {
- for _, c := range tt.othersToVerify {
- select {
- case <-tt.ctx.Done():
- return
- case c <- transactionID:
- }
- }
-}
-
-func (tt *transactionTracker) passTxnsToVeirfy() {
- for tid := range tt.selfToVerify {
- select {
- case <-tt.ctx.Done():
- return
- default:
- }
-
- tt.mu.Lock()
- tt.pendingVerification[tid] = true
- tt.mu.Unlock()
- }
-}
-
-func (tt *transactionTracker) checkAll() {
- for {
- select {
- case <-tt.ctx.Done():
- return
- case _, more := <-tt.selfToVerify:
- tt.mu.Lock()
- if !more && len(tt.pendingVerification) == 0 {
- tt.mu.Unlock()
- return
- }
- tt.mu.Unlock()
- default:
- }
- transactions, err := tt.client.GetPendingTransactionsByAddress(tt.account1, 1000000)
- if err != nil {
- tt.cancelFunc()
- require.NoError(tt.t, err)
- }
-
- for _, transactionInfo := range transactions.TruncatedTxns.Transactions {
- tt.mu.Lock()
- if _, ok := tt.pendingVerification[transactionInfo.TxID]; ok {
- delete(tt.pendingVerification, transactionInfo.TxID)
- }
- tt.mu.Unlock()
- }
-
- transactions, err = tt.client.GetPendingTransactionsByAddress(tt.account2, 1000000)
- if err != nil {
- tt.cancelFunc()
- require.NoError(tt.t, err)
- }
-
- for _, transactionInfo := range transactions.TruncatedTxns.Transactions {
- tt.mu.Lock()
- if _, ok := tt.pendingVerification[transactionInfo.TxID]; ok {
- delete(tt.pendingVerification, transactionInfo.TxID)
- }
- tt.mu.Unlock()
- }
- time.Sleep(time.Second)
- }
-}
-
-func (tt *transactionTracker) terminate() {
- tt.mu.Lock()
- defer tt.mu.Unlock()
- require.Equal(tt.t, 0, len(tt.pendingVerification), "%s is missing %d transactions", tt.name, len(tt.pendingVerification))
-}
-
-// Retruns true if all the associated channels are empty
-func (tt *transactionTracker) channelsAreEmpty() bool {
- for _, c := range tt.othersToVerify {
- if len(c) > 0 {
- return false
- }
- }
- return true
-}
-
-// throttle transaction rate
-func throttleRate(startTime time.Time, targetRate int, total int) {
- localTimeDelta := time.Now().Sub(startTime)
- currentTps := float64(total) / localTimeDelta.Seconds()
- if currentTps > float64(targetRate) {
- sleepSec := float64(total)/float64(targetRate) - localTimeDelta.Seconds()
- sleepTime := time.Duration(int64(math.Round(sleepSec*1000))) * time.Millisecond
- time.Sleep(sleepTime)
- }
-}
diff --git a/test/e2e-go/upgrades/rekey_support_test.go b/test/e2e-go/upgrades/rekey_support_test.go
index e6a6a58ab..3def211f9 100644
--- a/test/e2e-go/upgrades/rekey_support_test.go
+++ b/test/e2e-go/upgrades/rekey_support_test.go
@@ -59,10 +59,29 @@ func TestRekeyUpgrade(t *testing.T) {
addrB, err := basics.UnmarshalChecksumAddress(accountB)
a.NoError(err)
+ accountC, err := client.GenerateAddress(wh)
+ a.NoError(err)
+
+ accountD, err := client.GenerateAddress(wh)
+ a.NoError(err)
+
+ addrD, err := basics.UnmarshalChecksumAddress(accountD)
+ a.NoError(err)
+
fee := uint64(1000)
amount := uint64(1000000)
lease := [32]byte{}
+ // move some money from accountA -> accountC
+ tx, err := client.ConstructPayment(accountA, accountC, fee, amount*10, nil, "", lease, basics.Round(0), basics.Round(0))
+ a.NoError(err)
+
+ fundAccountC, err := client.SignTransactionWithWallet(wh, nil, tx)
+ a.NoError(err)
+
+ _, err = client.BroadcastTransaction(fundAccountC)
+ a.NoError(err)
+
curStatus, err := client.Status()
a.NoError(err)
initialStatus := curStatus
@@ -79,11 +98,11 @@ func TestRekeyUpgrade(t *testing.T) {
a.Equal(basics.Address{}, ad.AuthAddr)
// rekey A -> B (RekeyTo check)
- tx, err := client.ConstructPayment(accountA, accountB, fee, amount, nil, "", lease, basics.Round(round), basics.Round(initialStatus.NextVersionRound).SubSaturate(1))
+ tx, err = client.ConstructPayment(accountA, accountB, fee, amount, nil, "", lease, basics.Round(round), basics.Round(initialStatus.NextVersionRound).SubSaturate(1))
a.NoError(err)
tx.RekeyTo = addrB
- rekey, err := client.SignTransactionWithWalletAndSigner(wh, nil, "", tx)
+ rekey, err := client.SignTransactionWithWallet(wh, nil, tx)
a.NoError(err)
_, err = client.BroadcastTransaction(rekey)
@@ -137,12 +156,11 @@ func TestRekeyUpgrade(t *testing.T) {
}
// now that the network already upgraded:
-
- tx, err = client.ConstructPayment(accountA, accountB, fee, amount, nil, "", lease, basics.Round(round), basics.Round(round+1000))
+ tx, err = client.ConstructPayment(accountC, accountD, fee, amount, nil, "", lease, basics.Round(round), basics.Round(round+1000))
a.NoError(err)
- tx.RekeyTo = addrB
+ tx.RekeyTo = addrD
- rekey, err = client.SignTransactionWithWalletAndSigner(wh, nil, "", tx)
+ rekey, err = client.SignTransactionWithWallet(wh, nil, tx)
a.NoError(err)
// now, that we have upgraded to the new protocol which supports rekey, try again.
@@ -155,7 +173,7 @@ func TestRekeyUpgrade(t *testing.T) {
// use rekeyed key to authorize (AuthAddr check)
tx.RekeyTo = basics.Address{}
- rekeyed, err = client.SignTransactionWithWalletAndSigner(wh, nil, accountB, tx)
+ rekeyed, err = client.SignTransactionWithWalletAndSigner(wh, nil, accountD, tx)
a.NoError(err)
_, err = client.BroadcastTransaction(rekeyed)
diff --git a/test/framework/fixtures/fixture.go b/test/framework/fixtures/fixture.go
index 0693a7ecb..44ad4b132 100644
--- a/test/framework/fixtures/fixture.go
+++ b/test/framework/fixtures/fixture.go
@@ -109,7 +109,6 @@ func (st *synchTest) Error(args ...interface{}) {
st.Lock()
defer st.Unlock()
if !st.dontReportFailures {
- st.dontReportFailures = true
st.t.Error(args...)
}
}
@@ -117,7 +116,6 @@ func (st *synchTest) Errorf(format string, args ...interface{}) {
st.Lock()
defer st.Unlock()
if !st.dontReportFailures {
- st.dontReportFailures = true
st.t.Errorf(format, args...)
}
}
@@ -125,7 +123,6 @@ func (st *synchTest) Fail() {
st.Lock()
defer st.Unlock()
if !st.dontReportFailures {
- st.dontReportFailures = true
st.t.Fail()
}
}
diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go
index 0aea3989c..7e57f4bfa 100644
--- a/test/framework/fixtures/libgoalFixture.go
+++ b/test/framework/fixtures/libgoalFixture.go
@@ -170,6 +170,7 @@ func (f *LibGoalFixture) importRootKeys(lg *libgoal.Client, dataDir string) {
f.failOnError(err, "couldn't import secret: %v")
}
accountsWithRootKeys[root.Address().String()] = true
+ handle.Close()
} else if config.IsPartKeyFilename(filename) {
// Fetch a handle to this database
handle, err = db.MakeErasableAccessor(filepath.Join(keyDir, filename))
diff --git a/test/heapwatch/heapWatch.py b/test/heapwatch/heapWatch.py
index 276e6cb73..d91b780ae 100644
--- a/test/heapwatch/heapWatch.py
+++ b/test/heapwatch/heapWatch.py
@@ -218,7 +218,7 @@ class watcher:
if net in self.netseen:
return
self.netseen.add(net)
- net = net + ':8580'
+ net = net + ':' + self.args.port
try:
ad = algodDir(net, net=net, token=self.args.token, admin_token=self.args.admin_token)
self.they.append(ad)
@@ -279,6 +279,7 @@ def main():
ap.add_argument('--tf-roles', default='relay', help='comma separated list of terraform roles to follow')
ap.add_argument('--tf-name-re', action='append', default=[], help='regexp to match terraform node names, may be repeated')
ap.add_argument('--no-svg', dest='svg', default=True, action='store_false', help='do not automatically run `go tool pprof` to generate svg from collected data')
+ ap.add_argument('-p', '--port', default='8580', help='algod port on each host in terraform-inventory')
ap.add_argument('-o', '--out', default=None, help='directory to write to')
ap.add_argument('--verbose', default=False, action='store_true')
args = ap.parse_args()
diff --git a/test/scripts/e2e_subs/app-group.py b/test/scripts/e2e_subs/app-group.py
new file mode 100755
index 000000000..738e1cb79
--- /dev/null
+++ b/test/scripts/e2e_subs/app-group.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+import os
+import sys
+from goal import Goal
+
+from datetime import datetime
+
+stamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+print(f"{os.path.basename(sys.argv[0])} start {stamp}")
+
+goal = Goal(sys.argv[1], autosend=True)
+
+joe = goal.new_account()
+
+txinfo, err = goal.pay(goal.account, joe, amt=500_000)
+assert not err, err
+
+# Turn off rewards for precise balance checking
+txinfo, err = goal.keyreg(joe, nonpart=True)
+assert not err, err
+joeb = goal.balance(joe)
+
+teal = """
+#pragma version 6
+ txn ApplicationID
+ bz end
+ // Pay the sender and Accounts[1]. Force the second fee to default high
+ itxn_begin
+ int pay
+ itxn_field TypeEnum
+
+ txn Sender
+ itxn_field Receiver
+
+ int 5
+ itxn_field Amount
+
+ int 0
+ itxn_field Fee // No fee, so 2nd fee is doubled
+
+ itxn_next
+
+ int pay
+ itxn_field TypeEnum
+
+ txn Accounts 1
+ itxn_field Receiver
+
+ int 5
+ itxn_field Amount
+
+ itxn_submit
+
+ itxn Fee
+ int 2000
+ ==
+ assert
+
+end:
+ int 1
+"""
+
+txinfo, err = goal.app_create(joe, goal.assemble(teal))
+assert not err, err
+app_id = txinfo['application-index']
+assert app_id
+
+# Fund the app account
+txinfo, err = goal.pay(goal.account, goal.app_address(app_id), amt=400_000)
+assert not err, err
+
+
+txinfo, err = goal.app_call(joe, app_id, accounts=[goal.account])
+assert not err, err
+
+print(f"{os.path.basename(sys.argv[0])} OK {stamp}")
diff --git a/test/scripts/e2e_subs/app-rekey.py b/test/scripts/e2e_subs/app-rekey.py
index 189f7eed3..94bfcd22a 100755
--- a/test/scripts/e2e_subs/app-rekey.py
+++ b/test/scripts/e2e_subs/app-rekey.py
@@ -21,7 +21,6 @@ assert not err, err
txinfo, err = goal.keyreg(joe, nonpart=True)
assert not err, err
joeb = goal.balance(joe)
-print(joeb)
txinfo, err = goal.pay(goal.account, flo, amt=500_000)
assert not err, err
diff --git a/test/scripts/e2e_subs/e2e-app-abi-add.sh b/test/scripts/e2e_subs/e2e-app-abi-add.sh
new file mode 100755
index 000000000..60e1c1f3e
--- /dev/null
+++ b/test/scripts/e2e_subs/e2e-app-abi-add.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+date '+app-abi-add-test start %Y%m%d_%H%M%S'
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+# Directory of this bash program
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+gcmd="goal -w ${WALLET}"
+
+GLOBAL_INTS=2
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple.teal"
+PROGRAM=($(${gcmd} clerk compile "${TEMPDIR}/simple.teal"))
+APPID=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog ${DIR}/tealprogs/app-abi-add-example.teal --clear-prog ${TEMPDIR}/simple.teal --global-byteslices 0 --global-ints ${GLOBAL_INTS} --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }')
+
+# Should succeed to opt in
+${gcmd} app optin --app-id $APPID --from $ACCOUNT
+
+# Call should now succeed
+RES=$(${gcmd} app method --method "add(uint64,uint64)uint64" --arg 1 --arg 2 --app-id $APPID --from $ACCOUNT 2>&1 || true)
+EXPECTED="method add(uint64,uint64)uint64 output: 3"
+if [[ $RES != *"${EXPECTED}"* ]]; then
+ date '+app-abi-add-test FAIL the application creation should not fail %Y%m%d_%H%M%S'
+ false
+fi
+
+# Delete application should still succeed
+${gcmd} app delete --app-id $APPID --from $ACCOUNT
+
+# Clear should still succeed
+${gcmd} app clear --app-id $APPID --from $ACCOUNT
diff --git a/test/scripts/e2e_subs/e2e-app-abi-arg.sh b/test/scripts/e2e_subs/e2e-app-abi-arg.sh
new file mode 100755
index 000000000..c6f719a47
--- /dev/null
+++ b/test/scripts/e2e_subs/e2e-app-abi-arg.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+date '+app-abi-arg-test start %Y%m%d_%H%M%S'
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+# Directory of this bash program
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+gcmd="goal -w ${WALLET}"
+
+GLOBAL_INTS=2
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple.teal"
+PROGRAM=($(${gcmd} clerk compile "${TEMPDIR}/simple.teal"))
+APPID=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog ${DIR}/tealprogs/app-abi-arg.teal --clear-prog ${TEMPDIR}/simple.teal --global-byteslices 0 --global-ints ${GLOBAL_INTS} --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }')
+
+# Should succeed to opt in with string "optin"
+${gcmd} app optin --app-id $APPID --from $ACCOUNT --app-arg 'abi:string:"optin"'
+
+# Call should now succeed
+${gcmd} app call --app-id $APPID --from $ACCOUNT --app-arg 'abi:uint64:0'
+${gcmd} app call --app-id $APPID --from $ACCOUNT --app-arg 'abi:byte[3]:"AAEC"'
+${gcmd} app call --app-id $APPID --from $ACCOUNT --app-arg 'abi:(string,(byte[3],ufixed64x3)):["uwu",["AAEC",12.34]]'
+${gcmd} app call --app-id $APPID --from $ACCOUNT --app-arg 'abi:(uint64,string,bool[]):[399,"should pass",[true,false,false,true]]'
+
+# Delete application should still succeed
+${gcmd} app delete --app-id $APPID --from $ACCOUNT
+
+# Clear should still succeed
+${gcmd} app clear --app-id $APPID --from $ACCOUNT
diff --git a/test/scripts/e2e_subs/goal-partkey-information.sh b/test/scripts/e2e_subs/goal-partkey-information.sh
new file mode 100755
index 000000000..80dbd8d5c
--- /dev/null
+++ b/test/scripts/e2e_subs/goal-partkey-information.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+# TIMEOUT=300
+
+# errors are handled manually, so no -e
+set -x
+
+date "+$0 start %Y%m%d_%H%M%S"
+
+# Registered Account ParticipationID Last Used First round Last round
+# yes LFMT...RHJQ 4UPT6AQC... 4 0 3000000
+OUTPUT=$(goal account listpartkeys|tail -n 1|tr -s ' ')
+if [[ "$OUTPUT" != yes* ]]; then echo "Registered should be 'yes' but wasn't."; exit 1; fi
+if [[ $(echo "$OUTPUT" | cut -d' ' -f 4) == 0 ]]; then echo "Last Used shouldn't be 0 but was."; exit 1; fi
+if [[ $(echo "$OUTPUT" | cut -d' ' -f 5) != 0 ]]; then echo "First round should be 0 but wasn't."; exit 1; fi
+if [[ $(echo "$OUTPUT" | cut -d' ' -f 6) != 3000000 ]]; then echo "Last round should be 3000000 but wasn't."; exit 1; fi
+
+#Dumping participation key info from /tmp/tmpwtomya9x/net/Node...
+#
+#Participation ID: 4UPT6AQCFZU5ZDN3WKVPCFYOH2SFJ7SPHK7XPWI2CIDYKK7K3WMQ
+#Parent address: LFMTCXCY6WGSFSGLSNTFH532KVERJVNRD7W5H7GIQ4MPGM7SSVYMQYRHJQ
+#Last vote round: 3
+#Last block proposal round: 4
+#Effective first round: 0
+#Effective last round: 3000000
+#First round: 0
+#Last round: 3000000
+#Key dilution: 10000
+#Selection key: esIsBJB86P+sLeqO3gVoLBGfpuwYlWN4lNzz2AYslTo=
+#Voting key: W1OcXLZsaATyOd5FbhRgXHmcywvn++xEVUAQ0NejmW4=
+OUTPUT=$(goal account partkeyinfo)
+if ! echo "$OUTPUT" | grep -q 'First round:[[:space:]]* 0'; then echo "First round should have been 0."; exit 1; fi
+if ! echo "$OUTPUT" | grep -q 'Last round:[[:space:]]* 3000000'; then echo "Last round should have been 3000000."; exit 1; fi
+if ! echo "$OUTPUT" | grep -q 'Effective last round:[[:space:]]* 3000000'; then echo "Effective last round should have been 3000000."; exit 1; fi
+# 100 or 10000 due to arm64 bug
+if ! echo "$OUTPUT" | grep -q 'Key dilution:[[:space:]]* 100\(00\)\?'; then echo "Key dilution should have been 10000."; exit 1; fi
+if ! echo "$OUTPUT" | grep -q 'Participation ID:[[:space:]]*[[:alnum:]]\{52\}'; then echo "There should be a participation ID."; exit 1; fi
+
+# Test multiple data directory supported
+OUTPUT=$(goal account partkeyinfo -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2"|grep -c 'Participation ID')
+if [[ "$OUTPUT" != "2" ]]; then echo "Two Participation IDs should have been found."; exit 1; fi
+
+# get stderr from this one
+OUTPUT=$(goal account listpartkeys -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2" 2>&1)
+EXPECTED_ERR="Only one data directory can be specified for this command."
+if [[ "$OUTPUT" != "$EXPECTED_ERR" ]]; then echo -e "Unexpected output from multiple data directories with 'listpartkeys': \n$OUTPUT"; exit 1; fi
diff --git a/test/scripts/e2e_subs/rest-participation-key.sh b/test/scripts/e2e_subs/rest-participation-key.sh
new file mode 100755
index 000000000..30557ff2a
--- /dev/null
+++ b/test/scripts/e2e_subs/rest-participation-key.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+# TIMEOUT=300
+
+my_dir="$(dirname "$0")"
+source "$my_dir/rest.sh" "$@"
+
+date "+$0 start %Y%m%d_%H%M%S"
+
+# Use admin token for both get and post
+export USE_ADMIN=true
+
+pushd "${TEMPDIR}" || exit
+
+FIRST_ROUND=0
+# A really large (but arbitrary) last valid round
+LAST_ROUND=1200000
+
+NAME_OF_TEMP_PARTKEY="tmp.${FIRST_ROUND}.${LAST_ROUND}.partkey"
+
+algokey part generate --first ${FIRST_ROUND} --last ${LAST_ROUND} --keyfile ${NAME_OF_TEMP_PARTKEY} --parent ${ACCOUNT}
+
+popd || exit
+
+call_and_verify "Get List of Keys" "/v2/participation" 200 'address'
+
+# Find out how many keys there are installed so far
+NUM_IDS_1=$(echo "$RES" | python3 -c 'import json,sys;o=json.load(sys.stdin);print(len(o))')
+
+call_post_and_verify "Install a basic participation key" "/v2/participation" 200 ${NAME_OF_TEMP_PARTKEY} 'partId'
+
+# Get the returned participation id from the RESULT (aka $RES) variable
+INSTALLED_ID=$(echo "$RES" | python3 -c 'import json,sys;o=json.load(sys.stdin);print(o["partId"])')
+
+# Should contain the installed id
+call_and_verify "Get List of Keys" "/v2/participation" 200 'address' "${INSTALLED_ID}"
+
+# Get list of keys
+NUM_IDS_2=$(echo "$RES" | python3 -c 'import json,sys;o=json.load(sys.stdin);print(len(o))')
+
+if [[ $((NUM_IDS_1 + 1)) -ne $NUM_IDS_2 ]]; then
+ printf "\n\nFailed test. New number of IDs (%s) is not one more than old ID count(%s)\n\n" "${NUM_IDS_2}" "${NUM_IDS_1}"
+ exit 1
+fi
+
+call_and_verify "Get a specific ID" "/v2/participation/${INSTALLED_ID}" 200 "${INSTALLED_ID}"
+
+# Should return 200 but not return that error message
+call_delete_and_verify "Delete the specific ID" "/v2/participation/${INSTALLED_ID}" 200 false 'participation id not found'
+
+# Verify that it got called previously and now returns an error message saying that no key was found
+call_delete_and_verify "Delete the specific ID" "/v2/participation/${INSTALLED_ID}" 404 true 'participation id not found'
+
+# Get list of keys
+NUM_IDS_3=$(echo "$RES" | python3 -c 'import json,sys;o=json.load(sys.stdin);print(len(o))')
+
+if [[ "$NUM_IDS_3" -ne "$NUM_IDS_1" ]]; then
+ printf "\n\nFailed test. New number of IDs (%s) is not equal to original ID count (%s)\n\n" "${NUM_IDS_3}" "${NUM_IDS_1}"
+ exit 1
+fi
+
+
diff --git a/test/scripts/e2e_subs/rest.sh b/test/scripts/e2e_subs/rest.sh
index fab6f1d51..4613a7ba9 100755
--- a/test/scripts/e2e_subs/rest.sh
+++ b/test/scripts/e2e_subs/rest.sh
@@ -35,21 +35,155 @@ function base_call {
}
+function base_post_call {
+ curl -X POST --data-binary @/${TEMPDIR}/$4 -o "$3" -w "%{http_code}" -q -s -H "Authorization: Bearer $1" "$NET$2"
+}
+
+
+function base_delete_call {
+ curl -X DELETE -o "$3" -w "%{http_code}" -q -s -H "Authorization: Bearer $1" "$NET$2"
+}
+
function call_admin {
base_call "$ADMIN_TOKEN" "$1" "$2"
}
+function call_post_admin {
+ base_post_call "$ADMIN_TOKEN" "$1" "$2" "$3"
+}
+
+function call_delete_admin {
+ base_delete_call "$ADMIN_TOKEN" "$1" "$2" "$3"
+}
function call {
base_call "$PUB_TOKEN" "$1" "$2"
}
+function call_post {
+ base_post_call "$PUB_TOKEN" "$1" "$2"
+}
+
+function call_delete {
+ base_delete_call "$PUB_TOKEN" "$1" "$2"
+}
+
function fail_and_exit {
printf "\n\nFailed test - $1 ($2): $3\n\n"
exit 1
}
+# $1 - test description.
+# $2 - query
+# $3 - expected status code
+# $4 - the file to upload
+# $5... - substring that should be in the response
+function call_post_and_verify {
+ local DESCRIPTION="$1"
+ shift
+ local QUERY="$1"
+ shift
+ local EXPECTED_CODE="$1"
+ shift
+ local FILENAME_TO_UPLOAD="$1"
+ shift
+
+ echo "MATCHING $@"
+ curl_post_test "$DESCRIPTION" "$QUERY" "$EXPECTED_CODE" true "$FILENAME_TO_UPLOAD" "$@"
+}
+
+
+# CURL POST Test - POST query and verify results
+# $1 - test description.
+# $2 - query
+# $3 - expected status code
+# $4 - match result
+# $5 - the file to upload
+# $6... - substring(s) that should be in the response
+function curl_post_test {
+ local DESCRIPTION="$1"
+ shift
+ local QUERY="$1"
+ shift
+ local EXPECTED_CODE="$1"
+ shift
+ local MATCH_RESULT="$1"
+ shift
+ local FILENAME_TO_UPLOAD="$1"
+ shift
+
+ set +e
+ local CODE
+ if [[ "$USE_ADMIN" = true ]]; then
+ CODE=$(call_post_admin "$QUERY" "${TEMPDIR}/curl_out.txt" "$FILENAME_TO_UPLOAD")
+ else
+ CODE=$(call_post "$QUERY" "${TEMPDIR}/curl_out.txt" "$FILENAME_TO_UPLOAD")
+ fi
+
+ verify $? "$CODE" "$DESCRIPTION" "$QUERY" "$EXPECTED_CODE" "$MATCH_RESULT" "$@"
+
+}
+
+# CURL Test - query and verify results
+# $1 - test description.
+# $2 - query
+# $3 - expected status code
+# $4 - match result
+function call_delete_and_verify {
+ local DESCRIPTION="$1"
+ shift
+ local QUERY="$1"
+ shift
+ local EXPECTED_CODE="$1"
+ shift
+
+ local MATCH_RESULT="$1"
+ shift
+
+ set +e
+
+ local CODE
+ if [[ "$USE_ADMIN" = true ]]; then
+ CODE=$(call_delete_admin "$QUERY" "${TEMPDIR}/curl_out.txt")
+ else
+ CODE=$(call_delete "$QUERY" "${TEMPDIR}/curl_out.txt" )
+ fi
+
+ verify $? "$CODE" "$DESCRIPTION" "$QUERY" "$EXPECTED_CODE" "$MATCH_RESULT" "$@"
+
+}
+
+
+# CURL Test - query and verify results
+# $1 - test description.
+# $2 - query
+# $3 - expected status code
+# $4 - match result
+# $5... - substring(s) that should be in the response
+function curl_test {
+ local DESCRIPTION="$1"
+ shift
+ local QUERY="$1"
+ shift
+ local EXPECTED_CODE="$1"
+ shift
+ local MATCH_RESULT="$1"
+ shift
+
+ set +e
+
+ local CODE
+ if [[ "$USE_ADMIN" = true ]]; then
+ CODE=$(call_admin "$QUERY" "${TEMPDIR}/curl_out.txt")
+ else
+ CODE=$(call "$QUERY" "${TEMPDIR}/curl_out.txt" )
+ fi
+
+ verify $? "$CODE" "$DESCRIPTION" "$QUERY" "$EXPECTED_CODE" "$MATCH_RESULT" "$@"
+
+}
+
# $1 - test description.
# $2 - query
@@ -67,7 +201,7 @@ function call_and_verify {
curl_test "$DESCRIPTION" "$QUERY" "$EXPECTED_CODE" true "$@"
}
-# CURL Test - query and veryify results
+# CURL Test - query and verify results
# $1 - test description.
# $2 - query
# $3 - expected status code
@@ -82,16 +216,52 @@ function curl_test {
shift
local MATCH_RESULT="$1"
shift
- local SUBSTRING
-
- local START=$SECONDS
set +e
- local CODE=$(call "$QUERY" "${TEMPDIR}/curl_out.txt")
+
+ local CODE
+ if [[ "$USE_ADMIN" = true ]]; then
+ CODE=$(call_admin "$QUERY" "${TEMPDIR}/curl_out.txt")
+ else
+ CODE=$(call "$QUERY" "${TEMPDIR}/curl_out.txt" )
+ fi
+
if [[ $? != 0 ]]; then
cat $CURL_TEMPFILE
fail_and_exit "$DESCRIPTION" "$QUERY" "curl had a non-zero exit code."
fi
+
+ verify $? "$CODE" "$DESCRIPTION" "$QUERY" "$EXPECTED_CODE" "$MATCH_RESULT" "$@"
+
+}
+
+# verify - Common verification code
+# $1 - return code of CURL sub-shell command
+# $2 - HTTP status code
+# $3 - description of test
+# $4 - query to execute
+# $5 - expected HTTP status code to check
+# $6 - match result
+# $7... - substring(s) that should be in the response
+function verify {
+ local SUCCESS=$1
+ shift
+ local CODE=$1
+ shift
+ local DESCRIPTION="$1"
+ shift
+ local QUERY="$1"
+ shift
+ local EXPECTED_CODE="$1"
+ shift
+ local MATCH_RESULT="$1"
+ shift
+
+ if [[ $SUCCESS != 0 ]]; then
+ cat $CURL_TEMPFILE
+ fail_and_exit "$DESCRIPTION" "$QUERY" "curl had a non-zero exit code."
+ fi
+
set -e
RES=$(cat "${TEMPDIR}/curl_out.txt")
@@ -99,10 +269,7 @@ function curl_test {
fail_and_exit "$DESCRIPTION" "$QUERY" "unexpected HTTP status code expected $EXPECTED_CODE (actual $CODE): $RES"
fi
- #local ELAPSED=$(($SECONDS - $START))
- #if [[ $ELAPSED -gt $MAX_TIME ]]; then
- # fail_and_exit "$DESCRIPTION" "$QUERY" "query duration too long, $ELAPSED > $MAX_TIME"
- #fi
+ local SUBSTRING
# Check result substrings
for SUBSTRING in "$@"; do
diff --git a/test/scripts/e2e_subs/tealprogs/app-abi-add-example.teal b/test/scripts/e2e_subs/tealprogs/app-abi-add-example.teal
new file mode 100644
index 000000000..18d3b3e6e
--- /dev/null
+++ b/test/scripts/e2e_subs/tealprogs/app-abi-add-example.teal
@@ -0,0 +1,87 @@
+#pragma version 5
+intcblock 1 0
+bytecblock 0x151f7c75
+txn ApplicationID
+intc_1 // 0
+==
+bnz main_l12
+txn OnCompletion
+intc_0 // OptIn
+==
+bnz main_l11
+txn OnCompletion
+pushint 5 // DeleteApplication
+==
+bnz main_l10
+txn OnCompletion
+intc_1 // NoOp
+==
+txna ApplicationArgs 0
+pushbytes 0xfe6bdf69 // 0xfe6bdf69
+==
+&&
+bnz main_l9
+txn OnCompletion
+intc_1 // NoOp
+==
+txna ApplicationArgs 0
+pushbytes 0xa88c26a5 // 0xa88c26a5
+==
+&&
+bnz main_l8
+txn OnCompletion
+intc_1 // NoOp
+==
+txna ApplicationArgs 0
+pushbytes 0x535a47ba // 0x535a47ba
+==
+&&
+bnz main_l7
+intc_1 // 0
+return
+main_l7:
+txna ApplicationArgs 1
+callsub sub2
+intc_0 // 1
+return
+main_l8:
+callsub sub1
+intc_0 // 1
+return
+main_l9:
+txna ApplicationArgs 1
+txna ApplicationArgs 2
+callsub sub0
+intc_0 // 1
+return
+main_l10:
+intc_0 // 1
+return
+main_l11:
+intc_0 // 1
+return
+main_l12:
+intc_0 // 1
+return
+sub0: // add
+store 1
+store 0
+bytec_0 // 0x151f7c75
+load 0
+btoi
+load 1
+btoi
++
+itob
+concat
+log
+retsub
+sub1: // empty
+bytec_0 // 0x151f7c75
+log
+retsub
+sub2: // payment
+store 2
+pushbytes 0x151f7c7580 // 0x151f7c7580
+log
+retsub \ No newline at end of file
diff --git a/test/scripts/e2e_subs/tealprogs/app-abi-arg.teal b/test/scripts/e2e_subs/tealprogs/app-abi-arg.teal
new file mode 100644
index 000000000..900ee0e54
--- /dev/null
+++ b/test/scripts/e2e_subs/tealprogs/app-abi-arg.teal
@@ -0,0 +1,73 @@
+#pragma version 5
+intcblock 1 0
+txn ApplicationID
+intc_1 // 0
+==
+bnz main_l14
+txn OnCompletion
+pushint 5 // DeleteApplication
+==
+bnz main_l13
+txn OnCompletion
+intc_0 // OptIn
+==
+txna ApplicationArgs 0
+pushbytes 0x00056f7074696e // 0x00056f7074696e
+==
+&&
+bnz main_l12
+txn OnCompletion
+intc_1 // NoOp
+==
+txna ApplicationArgs 0
+pushbytes 0x0000000000000000 // 0x0000000000000000
+==
+&&
+bnz main_l11
+txn OnCompletion
+intc_1 // NoOp
+==
+txna ApplicationArgs 0
+pushbytes 0x000102 // 0x000102
+==
+&&
+bnz main_l10
+txn OnCompletion
+intc_1 // NoOp
+==
+txna ApplicationArgs 0
+pushbytes 0x000d00010200000000000030340003757775 // 0x000d00010200000000000030340003757775
+==
+&&
+bnz main_l9
+txn OnCompletion
+intc_1 // NoOp
+==
+txna ApplicationArgs 0
+pushbytes 0x000000000000018f000c0019000b73686f756c642070617373000490 // 0x000000000000018f000c0019000b73686f756c642070617373000490
+==
+&&
+bnz main_l8
+intc_1 // 0
+return
+main_l8:
+intc_0 // 1
+return
+main_l9:
+intc_0 // 1
+return
+main_l10:
+intc_0 // 1
+return
+main_l11:
+intc_0 // 1
+return
+main_l12:
+intc_0 // 1
+return
+main_l13:
+intc_0 // 1
+return
+main_l14:
+intc_0 // 1
+return \ No newline at end of file
diff --git a/test/testdata/configs/config-v18.json b/test/testdata/configs/config-v18.json
index b1a977e64..aa7a010b3 100644
--- a/test/testdata/configs/config-v18.json
+++ b/test/testdata/configs/config-v18.json
@@ -71,7 +71,7 @@
"OutgoingMessageFilterBucketSize": 128,
"ParticipationKeysRefreshInterval": 60000000000,
"PeerConnectionsUpdateInterval": 3600,
- "PeerPingPeriodSeconds": 10,
+ "PeerPingPeriodSeconds": 0,
"PriorityPeers": {},
"PublicAddress": "",
"ReconnectTime": 60000000000,
diff --git a/test/testdata/configs/config-v19.json b/test/testdata/configs/config-v19.json
new file mode 100644
index 000000000..fac112201
--- /dev/null
+++ b/test/testdata/configs/config-v19.json
@@ -0,0 +1,97 @@
+{
+ "Version": 19,
+ "AccountUpdatesStatsInterval": 5000000000,
+ "AccountsRebuildSynchronousMode": 1,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BlockServiceCustomFallbackEndpoints": "",
+ "BroadcastConnectionsLimit": -1,
+ "CadaverSizeTarget": 1073741824,
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchpointTracking": 0,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "DNSBootstrapID": "<network>.algorand.network",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": true,
+ "EnableCatchupFromArchiveServers": false,
+ "EnableDeveloperAPI": false,
+ "EnableGossipBlockService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableTopAccountsReporting": false,
+ "EnableVerbosedTransactionSyncLogging": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceFetchTransactions": false,
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "IncomingConnectionsLimit": 800,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "IsIndexerActive": false,
+ "LedgerSynchronousMode": 2,
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogSizeLimit": 1073741824,
+ "MaxCatchpointDownloadDuration": 7200000000000,
+ "MaxConnectionsPerIP": 30,
+ "MinCatchpointFileDownloadBytesPerSecond": 20480,
+ "NetAddress": "",
+ "NetworkMessageTraceServer": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "ParticipationKeysRefreshInterval": 60000000000,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "ProposalAssemblyTime": 250000000,
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TransactionSyncDataExchangeRate": 0,
+ "TransactionSyncSignificantMessageThreshold": 0,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 15000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": "",
+ "VerifiedTranscationsCacheSize": 30000
+}
diff --git a/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json b/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
index b37cf55bc..14b6c6151 100644
--- a/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
+++ b/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
@@ -201,12 +201,24 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-US-EAST-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "us-east-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-US-EAST-1-c5d.9xl",
"Provider": "AWS",
"Region": "us-east-1",
"BaseConfiguration": "c5d.9xlarge"
},
{
+ "Name": "AWS-US-EAST-1-c5d.18xl",
+ "Provider": "AWS",
+ "Region": "us-east-1",
+ "BaseConfiguration": "c5d.18xlarge"
+ },
+ {
"Name": "AWS-US-EAST-2-c5.xlarge",
"Provider": "AWS",
"Region": "us-east-2",
@@ -237,12 +249,24 @@
"BaseConfiguration": "m5d.4xlarge"
},
{
+ "Name": "AWS-US-EAST-2-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "us-east-2",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
+ {
"Name": "AWS-US-EAST-2-c5d.9xl",
"Provider": "AWS",
"Region": "us-east-2",
"BaseConfiguration": "c5d.9xlarge"
},
{
+ "Name": "AWS-US-EAST-2-c5d.18xl",
+ "Provider": "AWS",
+ "Region": "us-east-2",
+ "BaseConfiguration": "c5d.18xlarge"
+ },
+ {
"Name": "AWS-AP-SOUTH-1-c5.xlarge",
"Provider": "AWS",
"Region": "ap-south-1",
diff --git a/test/testdata/mainnetblocks b/test/testdata/mainnetblocks
deleted file mode 100644
index 1acd125b0..000000000
--- a/test/testdata/mainnetblocks
+++ /dev/null
Binary files differ
diff --git a/test/testdata/nettemplates/DevModeOneWallet.json b/test/testdata/nettemplates/DevModeOneWallet.json
new file mode 100644
index 000000000..fd29a927a
--- /dev/null
+++ b/test/testdata/nettemplates/DevModeOneWallet.json
@@ -0,0 +1,22 @@
+{
+ "Genesis": {
+ "NetworkName": "devmodenet",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 100,
+ "Online": true
+ }
+ ],
+ "DevMode": true
+ },
+ "Nodes": [
+ {
+ "Name": "Node",
+ "IsRelay": false,
+ "Wallets": [
+ { "Name": "Wallet1", "ParticipationOnly": false }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/nettemplates/OneNodeTwoRelays.json b/test/testdata/nettemplates/OneNodeTwoRelays.json
deleted file mode 100644
index 552930b3f..000000000
--- a/test/testdata/nettemplates/OneNodeTwoRelays.json
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "Genesis": {
- "NetworkName": "tbd",
- "Wallets": [
- {
- "Name": "Wallet1",
- "Stake": 100,
- "Online": true
- }
- ]
- },
- "Nodes": [
- {
- "Name": "Relay1",
- "IsRelay": true
-
- },
- {
- "Name": "Relay2",
- "IsRelay": true
- },
- {
- "Name": "Node",
- "Wallets": [
- { "Name": "Wallet1",
- "ParticipationOnly": false }
- ]
- }
- ]
-}
diff --git a/test/testdata/nettemplates/TenNodesDistributedMultiWallet.json b/test/testdata/nettemplates/TenNodesDistributedMultiWallet.json
index 987aefe9f..8c4fb39f6 100644
--- a/test/testdata/nettemplates/TenNodesDistributedMultiWallet.json
+++ b/test/testdata/nettemplates/TenNodesDistributedMultiWallet.json
@@ -1,6 +1,8 @@
{
"Genesis": {
"NetworkName": "tbd",
+ "PartKeyDilution": 50,
+ "LastPartKeyRound": 2000,
"Wallets": [
{
"Name": "20pct",
diff --git a/test/testdata/nettemplates/ThreeNodesOneOnline.json b/test/testdata/nettemplates/ThreeNodesOneOnline.json
deleted file mode 100644
index df214101f..000000000
--- a/test/testdata/nettemplates/ThreeNodesOneOnline.json
+++ /dev/null
@@ -1,39 +0,0 @@
-{
- "Genesis": {
- "NetworkName": "tbd",
- "Wallets": [
- {
- "Name": "Wallet1",
- "Stake": 50,
- "Online": true
- },
- {
- "Name": "Wallet2",
- "Stake": 50,
- "Online": false
- }
- ]
- },
- "Nodes": [
- {
- "Name": "Primary",
- "IsRelay": true,
- "Wallets": [
- ]
- },
- {
- "Name": "OnlineNode",
- "Wallets": [
- { "Name": "Wallet1",
- "ParticipationOnly": true }
- ]
- },
- {
- "Name": "OfflineNode",
- "Wallets": [
- { "Name": "Wallet2",
- "ParticipationOnly": false }
- ]
- }
- ]
-}
diff --git a/test/testdata/nettemplates/TwoNodes50EachWithTwoRelays.json b/test/testdata/nettemplates/TwoNodes50EachWithTwoRelays.json
deleted file mode 100644
index 70bae5b7e..000000000
--- a/test/testdata/nettemplates/TwoNodes50EachWithTwoRelays.json
+++ /dev/null
@@ -1,41 +0,0 @@
-{
- "Genesis": {
- "NetworkName": "tbd",
- "Wallets": [
- {
- "Name": "Wallet1",
- "Stake": 50,
- "Online": true
- },
- {
- "Name": "Wallet2",
- "Stake": 50,
- "Online": true
- }
- ]
- },
- "Nodes": [
- {
- "Name": "Relay1",
- "IsRelay": true
- },
- {
- "Name": "Relay2",
- "IsRelay": true
- },
- {
- "Name": "Node1",
- "Wallets": [
- { "Name": "Wallet1",
- "ParticipationOnly": false }
- ]
- },
- {
- "Name": "Node2",
- "Wallets": [
- { "Name": "Wallet2",
- "ParticipationOnly": false }
- ]
- }
- ]
-}
diff --git a/test/testdata/nettemplates/TwoNodesExpiredOfflineV29.json b/test/testdata/nettemplates/TwoNodesExpiredOfflineV29.json
new file mode 100644
index 000000000..198657b6c
--- /dev/null
+++ b/test/testdata/nettemplates/TwoNodesExpiredOfflineV29.json
@@ -0,0 +1,36 @@
+{
+ "Genesis": {
+ "NetworkName": "tbd",
+ "ConsensusProtocol": "https://github.com/algorandfoundation/specs/tree/abc54f79f9ad679d2d22f0fb9909fb005c16f8a1",
+ "Wallets": [
+ {
+ "Name": "Online1",
+ "Stake": 90,
+ "Online": true
+ },
+ {
+ "Name": "Online2",
+ "Stake": 10,
+ "Online": false
+ }
+ ]
+ },
+ "Nodes": [
+ {
+ "Name": "Primary",
+ "IsRelay": true,
+ "Wallets": [
+ { "Name": "Online1",
+ "ParticipationOnly": false }
+ ]
+ },
+ {
+ "Name": "Secondary",
+ "IsRelay": true,
+ "Wallets": [
+ { "Name": "Online2",
+ "ParticipationOnly": false }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/nettemplates/TwoNodesExpiredOfflineVFuture.json b/test/testdata/nettemplates/TwoNodesExpiredOfflineVFuture.json
new file mode 100644
index 000000000..8c2d2e669
--- /dev/null
+++ b/test/testdata/nettemplates/TwoNodesExpiredOfflineVFuture.json
@@ -0,0 +1,36 @@
+{
+ "Genesis": {
+ "NetworkName": "tbd",
+ "ConsensusProtocol": "future",
+ "Wallets": [
+ {
+ "Name": "Online1",
+ "Stake": 90,
+ "Online": true
+ },
+ {
+ "Name": "Online2",
+ "Stake": 10,
+ "Online": false
+ }
+ ]
+ },
+ "Nodes": [
+ {
+ "Name": "Primary",
+ "IsRelay": true,
+ "Wallets": [
+ { "Name": "Online1",
+ "ParticipationOnly": false }
+ ]
+ },
+ {
+ "Name": "Secondary",
+ "IsRelay": true,
+ "Wallets": [
+ { "Name": "Online2",
+ "ParticipationOnly": false }
+ ]
+ }
+ ]
+}
diff --git a/txnsync/bitmask.go b/txnsync/bitmask.go
deleted file mode 100644
index 6990fc1d7..000000000
--- a/txnsync/bitmask.go
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "errors"
-)
-
-var errIndexNotFound = errors.New("invalid bitmask: index not found")
-var errInvalidBitmaskType = errors.New("invalid bitmask type")
-
-//msgp:allocbound bitmask maxBitmaskSize
-type bitmask []byte
-
-// assumed to be in mode 0, sets bit at index to 1
-func (b *bitmask) setBit(index int) {
- // bitmask type is stored at index 0, so the rest of the data is stored after.
- byteIndex := index/8 + 1
- (*b)[byteIndex] |= 1 << (index % 8)
-}
-
-// trimBitmask compresses the bitmask into one of the 4 types:
-// type 0: input bitmask bit pos x b -> output bitmask bit pos x b
-// type 1: input bitmask bit pos x b -> output bitmask bit pos x !b
-// type 2: stores the positions of bits where the bit value is 1
-// input bitmask first bit 1 at pos A, second bit 1 at pos B, ...
-// output bitmask stores A, B-A, ...
-// type 3: same as type 2, but stores the positons where the bit is 0
-func (b *bitmask) trimBitmask(entries int) {
- if *b == nil {
- return
- }
- numBitsCase0 := 0
- numBitsCase1 := 0
- numExists := 0
- for i := 0; i < entries; i++ {
- byteIndex := i/8 + 1
- if (*b)[byteIndex]&(1<<(i%8)) != 0 {
- numBitsCase0 = i + 1
- numExists++
- } else {
- numBitsCase1 = i + 1
- }
- }
- bitmaskType := 0
- bestSize := bytesNeededBitmask(numBitsCase0)
- if bestSize > bytesNeededBitmask(numBitsCase1) {
- bitmaskType = 1
- bestSize = bytesNeededBitmask(numBitsCase1)
- }
- if bestSize > numExists*2+1 {
- bitmaskType = 2
- bestSize = numExists*2 + 1
- }
- if bestSize > (entries-numExists)*2+1 {
- bitmaskType = 3
- bestSize = (entries-numExists)*2 + 1
- }
- switch bitmaskType {
- case 0:
- *b = (*b)[:bestSize]
- case 1:
- (*b)[0] = 1
- for i := range *b {
- if i != 0 {
- (*b)[i] = 255 - (*b)[i] // invert bits
- }
- }
- *b = (*b)[:bestSize]
- case 2:
- newBitmask := make(bitmask, 1, bestSize)
- newBitmask[0] = 2
- last := 0
- for i := 0; i < entries; i++ {
- byteIndex := i/8 + 1
- if (*b)[byteIndex]&(1<<(i%8)) != 0 {
- diff := i - last
- newBitmask = append(newBitmask, byte(diff/256), byte(diff%256))
- last = i
- }
- }
- *b = newBitmask
- case 3:
- newBitmask := make(bitmask, 1, bestSize)
- newBitmask[0] = 3
- last := 0
- for i := 0; i < entries; i++ {
- byteIndex := i/8 + 1
- if (*b)[byteIndex]&(1<<(i%8)) == 0 {
- diff := i - last
- newBitmask = append(newBitmask, byte(diff/256), byte(diff%256))
- last = i
- }
- }
- *b = newBitmask
- }
-}
-
-// iterate through the elements of bitmask without expanding it.
-// call the func(entriesCount, setBitIndex) for every set bit
-// numTransactions: is the size of the array that transactionIndex is accessing: transactionIndex < numTransactions
-// numItems: is the size of the array that itemIndex is accessing: itemIndex < numItems (itemIndex is also the set bit counter)
-func (b *bitmask) iterate(numTransactions int, numItems int, callback func(int, int) error) error {
- option := 0
- if len(*b) > 0 {
- option = int((*b)[0])
- } else { // nothing to iterate
- return nil
- }
- itemIndex := 0
- switch option {
- case 0:
- transactionIndex := 0
- maxV := bytesNeededBitmask(numTransactions)
- if len(*b) > maxV {
- return errIndexNotFound
- }
- for i, v := range (*b)[1:] {
- for ; transactionIndex < numTransactions && v > 0; transactionIndex++ {
- if v&1 != 0 {
- if itemIndex >= numItems {
- return errDataMissing
- }
- if err := callback(transactionIndex, itemIndex); err != nil {
- return err
- }
- itemIndex++
- }
- v >>= 1
- }
- if v > 0 {
- // remaining set bits, but transactionIndex exceeded numTransactions
- return errIndexNotFound
- }
- // in case the loop is cut short because there are no more set bits in the byte
- transactionIndex = (i + 1) * 8
- }
- case 1:
- transactionIndex := 0
- maxV := bytesNeededBitmask(numTransactions)
- if len(*b) > maxV {
- return errIndexNotFound
- }
- for _, v := range (*b)[1:] {
- // after the first iteration of the loop below, v will be less than 255
- if v >= 255 {
- transactionIndex += 8
- continue
- }
- maxJ := 8
- if maxJ > numTransactions-transactionIndex {
- maxJ = numTransactions - transactionIndex
- }
- for j := 0; j < maxJ; j++ {
- if v&1 == 0 {
- if itemIndex >= numItems {
- return errDataMissing
- }
- if err := callback(transactionIndex, itemIndex); err != nil {
- return err
- }
- itemIndex++
- }
- v >>= 1
- transactionIndex++
- }
- if 255>>maxJ != v {
- // The remaining of the bits must be 1
- return errIndexNotFound
- }
- }
- if numTransactions-transactionIndex > numItems-itemIndex {
- return errDataMissing
- }
- for ; transactionIndex < numTransactions; transactionIndex++ {
- if err := callback(transactionIndex, itemIndex); err != nil {
- return err
- }
- itemIndex++
- }
- case 2:
- sum := 0 // transactionIndex
- elementsCount := (len(*b) - 1) / 2
- if elementsCount > numItems {
- return errDataMissing
- }
- for itemIndex := 0; itemIndex < elementsCount; itemIndex++ {
- sum += int((*b)[itemIndex*2+1])*256 + int((*b)[itemIndex*2+2])
- if sum >= numTransactions {
- return errIndexNotFound
- }
- if err := callback(sum, itemIndex); err != nil {
- return err
- }
- }
- case 3:
- sum := 0
- // This is the least amount of elements can be set.
- // There could be more, if the numbers are corrupted
- // i.e. when sum >= numTransactions
- elementsCount := numTransactions - (len(*b)-1)/2
- if elementsCount > numItems || elementsCount < 0 {
- return errDataMissing
- }
- transactionIndex := 0
- for i := 0; i*2+2 < len(*b); i++ {
- sum += int((*b)[i*2+1])*256 + int((*b)[i*2+2])
- if sum >= numTransactions {
- return errIndexNotFound
- }
- for transactionIndex < sum {
- if err := callback(transactionIndex, itemIndex); err != nil {
- return err
- }
- transactionIndex++
- itemIndex++
- }
- transactionIndex++
- }
- for transactionIndex < numTransactions {
- if err := callback(transactionIndex, itemIndex); err != nil {
- return err
- }
- transactionIndex++
- itemIndex++
- }
- default:
- return errInvalidBitmaskType
- }
- return nil
-}
-
-// bytesNeededBitmask returns the number of bytes needed to store entries bits.
-func bytesNeededBitmask(entries int) int {
- return (entries+7)/8 + 1
-}
diff --git a/txnsync/bitmask_test.go b/txnsync/bitmask_test.go
deleted file mode 100644
index 516b3bc20..000000000
--- a/txnsync/bitmask_test.go
+++ /dev/null
@@ -1,252 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "encoding/binary"
- "errors"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-func TestTrimBitmaskNil(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var b bitmask
- b.trimBitmask(0)
- require.Nil(t, b)
-}
-
-func TestIterateExceptions(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var b bitmask
- require.Nil(t, b.iterate(0, 0, nil))
-
-}
-
-func TestBitmaskType0(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- setBits := make([]int, 0, 5)
- setBits = append(setBits, 0)
- setBits = append(setBits, 2)
- setBits = append(setBits, 3)
- setBits = append(setBits, 10)
-
- trimIterateHelper(t, setBits)
-}
-
-func TestBitmaskType1(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- setBits := make([]int, 0, 80)
- entries := 80
- for i := 0; i < entries; i++ {
- if i%3 != 0 || i > entries-10 {
- setBits = append(setBits, i)
- }
- }
- trimIterateHelper(t, setBits)
-}
-
-func TestBitmaskType2(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- setBits := make([]int, 0, 5)
- setBits = append(setBits, 0)
- setBits = append(setBits, 2)
- setBits = append(setBits, 69)
-
- trimIterateHelper(t, setBits)
-}
-
-func TestBitmaskType3(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- entries := 80
- setBits := make([]int, 0, entries)
- for i := 0; i < entries; i++ {
- if i != 0 && i != 2 && i != 3 && i != 71 {
- setBits = append(setBits, i)
- }
- }
- trimIterateHelper(t, setBits)
-}
-
-// Test for corrupted bitmask
-func TestBitmaskType3Corrupted(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- // 10 entries, bitmask has 3 compliment bits (case 3): 10-3=7 set bits,
- // last valid index should be 6
- maxIndex := 7
- entries := 10
-
- var b bitmask
- b = make([]byte, 7)
- b[0] = 3
- b[1] = 0
- b[3] = 0
- b[5] = 0
-
- b[2] = 1 // index 1 is not set
- b[4] = 1 // index 2 is not set
- b[6] = 8 // index 2+8=10 is not set. 10 is outside the entries, and does not count
- // set bits: 0, 3, 4, 5, 6, 7, 8, 9
-
- require.Equal(t, errIndexNotFound, b.iterate(entries, maxIndex, func(entry, index int) error {
- return nil
- }))
-}
-
-func TestBitmaskTypeX(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- b := make(bitmask, bytesNeededBitmask(80))
- b[0] = 4
- require.Equal(t, b.iterate(0, 0, nil), errInvalidBitmaskType)
-}
-
-func trimIterateHelper(t *testing.T, setBits []int) {
- entries := 80
- b := make(bitmask, bytesNeededBitmask(entries))
-
- for _, x := range setBits {
- b.setBit(x)
- }
- iterated := make([]bool, entries)
- iterfunc := func(i int, index int) error {
- iterated[i] = true
- return nil
- }
- var errTestError = errors.New("some error")
- errorAfter := 0
- errfunc := func(i int, index int) error {
- if index > errorAfter {
- return errTestError
- }
- return nil
- }
-
- require.Equal(t, errTestError, b.iterate(entries, len(setBits), errfunc))
- require.Equal(t, errDataMissing, b.iterate(entries, len(setBits)-1, iterfunc)) // less than set bits
- require.NoError(t, b.iterate(entries, len(setBits), iterfunc))
-
- s := 0
- for i := 0; i < entries; i++ {
- if s < len(setBits) && i == setBits[s] {
- require.True(t, iterated[i], i)
- s++
- } else {
- require.False(t, iterated[i], i)
- }
- }
- b.trimBitmask(entries)
- if int(b[0]) < 2 {
- // make sure TrimRight is behaving as expected
- require.True(t, int(b[len(b)-1]) > 0)
- }
- iterated = make([]bool, entries)
-
- require.Equal(t, errTestError, b.iterate(entries, len(setBits), errfunc))
- require.Equal(t, errDataMissing, b.iterate(entries, len(setBits)-1, iterfunc))
-
- // For types 0 and 2, let the entries be smaller than what the bitmap will provide
- // This is the edge case, and will not be a problem for the compliment set bitmasks
- if int((b)[0]) == 0 || int((b)[0]) == 2 {
- require.Equal(t, errIndexNotFound, b.iterate(setBits[len(setBits)-1], len(setBits), iterfunc))
- require.Nil(t, b.iterate(setBits[len(setBits)-1]+1, len(setBits), iterfunc))
- }
-
- // For types 1 and 3, let the entries be smaller than what the bitmap will provide
- // This requires a much smaller entries limit, since it is only checked in the first stage
- if int((b)[0]) == 1 || int((b)[0]) == 3 {
- require.Equal(t, errIndexNotFound, b.iterate(70, len(setBits), iterfunc))
- }
-
- // For types 1 and 3, test the error handling in the second stage.
- errorAfter = len(setBits) - 1 - 8
- require.Equal(t, errTestError, b.iterate(entries, len(setBits), errfunc))
- require.Equal(t, errDataMissing, b.iterate(entries, len(setBits)-1-8, iterfunc))
-
- require.NoError(t, b.iterate(entries, len(setBits), func(i int, index int) error {
- iterated[i] = true
- return nil
- }))
-
- s = 0
- for i := 0; i < entries; i++ {
- if s < len(setBits) && i == setBits[s] {
- require.True(t, iterated[i], i)
- s++
- } else {
- require.False(t, iterated[i], i)
- }
- }
-}
-
-func TestFuzzBitmask(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- randSeed := uint64(0)
- rand := func() byte {
- bytes := [16]byte{}
- l := binary.PutUvarint(bytes[:], randSeed)
- h := crypto.Hash(bytes[:l])
- randSeed = 0
- for i := 0; i < 8; i++ {
- randSeed += uint64(h[i]) << (i * 8)
- }
- return byte(h[0])
- }
- for iterationsCount := 0; iterationsCount < 1000; iterationsCount++ {
- bitmaskType := rand() % 4
- blen := int(rand()%33) + 1
- var b bitmask
- b = make([]byte, blen)
- b[0] = byte(bitmaskType)
- for i := 1; i < blen; i++ {
- b[i] = rand()
- }
- entries := int(rand())
- maxIndex := int(rand())
- lastEntryIndex := -1
- b.iterate(entries, maxIndex, func(i, j int) error {
- require.Greater(t, i, lastEntryIndex)
- lastEntryIndex = i
- require.Less(t, i, entries)
- require.Less(t, j, maxIndex)
- return nil
- })
- // reset to mode 0
- b[0] = 0
- entries = (blen - 1) * 8
- err1 := b.iterate(entries, maxIndex, func(i, j int) error {
- return nil
- })
- b.trimBitmask(entries)
- err2 := b.iterate(entries, maxIndex, func(i, j int) error {
- return nil
- })
- require.Equal(t, err1, err2)
- }
-}
diff --git a/txnsync/bloomFilter.go b/txnsync/bloomFilter.go
deleted file mode 100644
index 013888fa8..000000000
--- a/txnsync/bloomFilter.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "encoding/binary"
- "errors"
- "math"
-
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/util/bloom"
-)
-
-// bloomFilterFalsePositiveRate is used as the target false positive rate for the multiHashBloomFilter implementation.
-// the xor based bloom filters have their own hard-coded false positive rate, and therefore require no configuration.
-const bloomFilterFalsePositiveRate = 0.01
-
-var errInvalidBloomFilterEncoding = errors.New("invalid bloom filter encoding")
-var errEncodingBloomFilterFailed = errors.New("encoding bloom filter failed")
-
-//msgp:ignore bloomFilterType
-type bloomFilterType byte
-
-const (
- invalidBloomFilter bloomFilterType = iota //nolint:deadcode,varcheck
- multiHashBloomFilter
- xorBloomFilter32
- xorBloomFilter8
-)
-
-// transactionsRange helps us to identify a subset of the transaction pool pending transaction groups.
-// it's being used as part of an optimization when we're attempting to recreate a bloom filter :
-// if the new bloom filter shares the same set of parameters, then the result is expected to be the
-// same and therefore the old bloom filter can be used.
-type transactionsRange struct {
- firstCounter uint64
- lastCounter uint64
- transactionsCount uint64
-}
-
-type bloomFilter struct {
- containedTxnsRange transactionsRange
-
- encoded encodedBloomFilter
-
- encodedLength int
-}
-
-// testableBloomFilter is used for a bloom filters that were received from the network, decoded
-// and are ready to be tested against.
-type testableBloomFilter struct {
- encodingParams requestParams
-
- filter bloom.GenericFilter
-
- clearPrevious bool
-}
-
-func decodeBloomFilter(enc encodedBloomFilter) (outFilter *testableBloomFilter, err error) {
- outFilter = &testableBloomFilter{
- encodingParams: enc.EncodingParams,
- clearPrevious: enc.ClearPrevious != 0,
- }
- switch bloomFilterType(enc.BloomFilterType) {
- case multiHashBloomFilter:
- outFilter.filter, err = bloom.UnmarshalBinary(enc.BloomFilter)
- case xorBloomFilter32:
- outFilter.filter = new(bloom.XorFilter)
- err = outFilter.filter.UnmarshalBinary(enc.BloomFilter)
- case xorBloomFilter8:
- outFilter.filter = new(bloom.XorFilter8)
- err = outFilter.filter.UnmarshalBinary(enc.BloomFilter)
- default:
- return nil, errInvalidBloomFilterEncoding
- }
-
- if err != nil {
- return nil, err
- }
- return
-}
-
-func (bf *bloomFilter) encode(filter bloom.GenericFilter, filterType bloomFilterType) (err error) {
- bf.encoded.BloomFilterType = byte(filterType)
- bf.encoded.BloomFilter, err = filter.MarshalBinary()
- bf.encodedLength = len(bf.encoded.BloomFilter)
- if err != nil || bf.encodedLength == 0 {
- return errEncodingBloomFilterFailed
- }
- // increase the counter for a successful bloom filter encoding
- txsyncEncodedBloomFiltersTotal.Inc(nil)
- return
-}
-
-func (bf *bloomFilter) sameParams(other bloomFilter) bool {
- return (bf.encoded.EncodingParams == other.encoded.EncodingParams) &&
- (bf.containedTxnsRange == other.containedTxnsRange)
-}
-
-func (bf *testableBloomFilter) test(txID transactions.Txid) bool {
- if bf.encodingParams.Modulator > 1 {
- if txidToUint64(txID)%uint64(bf.encodingParams.Modulator) != uint64(bf.encodingParams.Offset) {
- return false
- }
- }
- return bf.filter.Test(txID[:])
-}
-
-func filterFactoryBloom(numEntries int, s *syncState) (filter bloom.GenericFilter, filterType bloomFilterType) {
- shuffler := uint32(s.node.Random(math.MaxUint64))
- sizeBits, numHashes := bloom.Optimal(numEntries, bloomFilterFalsePositiveRate)
- return bloom.New(sizeBits, numHashes, shuffler), multiHashBloomFilter
-}
-
-func filterFactoryXor8(numEntries int, s *syncState) (filter bloom.GenericFilter, filterType bloomFilterType) { //nolint:deadcode,unused
- s.xorBuilder.RandomNumberGeneratorSeed = s.node.Random(math.MaxUint64)
- return bloom.NewXor8(numEntries, &s.xorBuilder), xorBloomFilter8
-}
-
-func filterFactoryXor32(numEntries int, s *syncState) (filter bloom.GenericFilter, filterType bloomFilterType) {
- s.xorBuilder.RandomNumberGeneratorSeed = s.node.Random(math.MaxUint64)
- return bloom.NewXor(numEntries, &s.xorBuilder), xorBloomFilter32
-}
-
-var filterFactory func(int, *syncState) (filter bloom.GenericFilter, filterType bloomFilterType) = filterFactoryXor32
-
-func (s *syncState) makeBloomFilter(encodingParams requestParams, txnGroups []pooldata.SignedTxGroup, excludeTransactions *transactionCache, hintPrevBloomFilter *bloomFilter) (result bloomFilter) {
- result.encoded.EncodingParams = encodingParams
- if encodingParams.Modulator == 0 {
- // we want none.
- return
- }
- if encodingParams.Modulator == 1 && excludeTransactions == nil {
- // we want all.
- if len(txnGroups) > 0 {
- result.containedTxnsRange.firstCounter = txnGroups[0].GroupCounter
- result.containedTxnsRange.lastCounter = txnGroups[len(txnGroups)-1].GroupCounter
- result.containedTxnsRange.transactionsCount = uint64(len(txnGroups))
- } else {
- return
- }
-
- if hintPrevBloomFilter != nil {
- if result.sameParams(*hintPrevBloomFilter) {
- return *hintPrevBloomFilter
- }
- }
-
- filter, filterType := filterFactory(len(txnGroups), s)
- for _, group := range txnGroups {
- filter.Set(group.GroupTransactionID[:])
- }
- err := result.encode(filter, filterType)
- if err != nil {
- // fall back to standard bloom filter
- filter, filterType = filterFactoryBloom(len(txnGroups), s)
- for _, group := range txnGroups {
- filter.Set(group.GroupTransactionID[:])
- }
- result.encode(filter, filterType) //nolint:errcheck
- // the error in the above case can be silently ignored.
- }
- return result
- }
-
- // we want subset.
- result.containedTxnsRange.firstCounter = math.MaxUint64
- filteredTransactionsIDs := getTxIDSliceBuffer(len(txnGroups))
- defer releaseTxIDSliceBuffer(filteredTransactionsIDs)
-
- excludedTransactions := 0
- for _, group := range txnGroups {
- txID := group.GroupTransactionID
- if txidToUint64(txID)%uint64(encodingParams.Modulator) != uint64(encodingParams.Offset) {
- continue
- }
-
- if result.containedTxnsRange.firstCounter == math.MaxUint64 {
- result.containedTxnsRange.firstCounter = group.GroupCounter
- }
- result.containedTxnsRange.lastCounter = group.GroupCounter
-
- if excludeTransactions != nil && excludeTransactions.contained(txID) {
- excludedTransactions++
- continue
- }
-
- filteredTransactionsIDs = append(filteredTransactionsIDs, txID)
- }
-
- result.containedTxnsRange.transactionsCount = uint64(len(filteredTransactionsIDs) + excludedTransactions)
-
- if hintPrevBloomFilter != nil {
- if result.sameParams(*hintPrevBloomFilter) {
- return *hintPrevBloomFilter
- }
- }
-
- if len(filteredTransactionsIDs) == 0 {
- return
- }
-
- filter, filterType := filterFactory(len(filteredTransactionsIDs), s)
-
- for _, txid := range filteredTransactionsIDs {
- filter.Set(txid[:])
- }
- err := result.encode(filter, filterType)
- if err != nil {
- // fall back to standard bloom filter
- filter, filterType = filterFactoryBloom(len(filteredTransactionsIDs), s)
- for _, txid := range filteredTransactionsIDs {
- filter.Set(txid[:])
- }
- result.encode(filter, filterType) //nolint:errcheck
- // the error in the above case can be silently ignored.
- }
-
- return result
-}
-
-func txidToUint64(txID transactions.Txid) uint64 {
- return binary.LittleEndian.Uint64(txID[:8])
-}
diff --git a/txnsync/bloomFilter_test.go b/txnsync/bloomFilter_test.go
deleted file mode 100644
index 57a1635fc..000000000
--- a/txnsync/bloomFilter_test.go
+++ /dev/null
@@ -1,372 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "encoding/binary"
- "math/rand"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/algorand/go-algorand/util/bloom"
- "github.com/algorand/go-algorand/util/timers"
-)
-
-func getTxnGroups(genesisHash crypto.Digest, genesisID string) []pooldata.SignedTxGroup {
- return []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- GroupCounter: 0,
- GroupTransactionID: transactions.Txid{1},
- Transactions: []transactions.SignedTxn{
- {
- Txn: transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: basics.Address(crypto.Hash([]byte("2"))),
- Fee: basics.MicroAlgos{Raw: 100},
- GenesisHash: genesisHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: basics.Address(crypto.Hash([]byte("4"))),
- Amount: basics.MicroAlgos{Raw: 1000},
- },
- },
- Sig: crypto.Signature{1},
- },
- },
- },
- pooldata.SignedTxGroup{
- GroupCounter: 1,
- GroupTransactionID: transactions.Txid{2},
- Transactions: []transactions.SignedTxn{
- {
- Txn: transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: basics.Address(crypto.Hash([]byte("1"))),
- Fee: basics.MicroAlgos{Raw: 100},
- GenesisHash: genesisHash,
- GenesisID: genesisID,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: basics.Address(crypto.Hash([]byte("2"))),
- Amount: basics.MicroAlgos{Raw: 1000},
- },
- },
- Sig: crypto.Signature{2},
- },
- {
- Txn: transactions.Transaction{
- Type: protocol.KeyRegistrationTx,
- Header: transactions.Header{
- Sender: basics.Address(crypto.Hash([]byte("1"))),
- GenesisHash: genesisHash,
- GenesisID: genesisID,
- },
- },
- Sig: crypto.Signature{3},
- },
- },
- },
- pooldata.SignedTxGroup{
- GroupCounter: 2,
- GroupTransactionID: transactions.Txid{3},
- Transactions: []transactions.SignedTxn{
- {
- Txn: transactions.Transaction{
- Type: protocol.AssetConfigTx,
- Header: transactions.Header{
- Sender: basics.Address(crypto.Hash([]byte("1"))),
- Fee: basics.MicroAlgos{Raw: 100},
- GenesisHash: genesisHash,
- },
- },
- Sig: crypto.Signature{4},
- },
- {
- Txn: transactions.Transaction{
- Type: protocol.AssetFreezeTx,
- Header: transactions.Header{
- Sender: basics.Address(crypto.Hash([]byte("1"))),
- GenesisHash: genesisHash,
- },
- },
- Sig: crypto.Signature{5},
- },
- {
- Txn: transactions.Transaction{
- Type: protocol.CompactCertTx,
- Header: transactions.Header{
- Sender: basics.Address(crypto.Hash([]byte("1"))),
- GenesisHash: genesisHash,
- },
- },
- Msig: crypto.MultisigSig{Version: 1},
- },
- },
- },
- }
-}
-
-func BenchmarkTxidToUint64(b *testing.B) {
- txID := transactions.Txid{1, 2, 3, 4, 5}
- for i := 0; i < b.N; i++ {
- txidToUint64(txID)
- }
-}
-
-const testingGenesisID = "gID"
-
-var testingGenesisHash = crypto.Hash([]byte("gh"))
-
-func TestBloomFallback(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var s syncState
- s.node = &justRandomFakeNode{}
- var encodingParams requestParams
-
- for encodingParams.Modulator = 1; encodingParams.Modulator < 3; encodingParams.Modulator++ {
- txnGroups := getTxnGroups(testingGenesisHash, testingGenesisID)
- bf := s.makeBloomFilter(encodingParams, txnGroups, nil, nil)
-
- switch bloomFilterType(bf.encoded.BloomFilterType) {
- case multiHashBloomFilter:
- t.Errorf("expected xorfilter but got classic bloom filter")
- case xorBloomFilter32:
- // ok
- case xorBloomFilter8:
- t.Errorf("expected xorBloomFilter32 but got xorBloomFilter8")
- default:
- t.Errorf("unknown internal bloom filter object : %d", bloomFilterType(bf.encoded.BloomFilterType))
- }
-
- // Duplicate first entry. xorfilter can't handle
- // duplicates. We _probably_ never have duplicate txid
- // prefixes when we grab the first 8 bytes of 32 bytes, but
- // that's not 100%, maybe only 99.999999%
- stg := txnGroups[1]
- txnGroups = append(txnGroups, stg)
-
- bf = s.makeBloomFilter(encodingParams, txnGroups, nil, nil)
- switch bloomFilterType(bf.encoded.BloomFilterType) {
- case multiHashBloomFilter:
- // ok
- case xorBloomFilter32:
- t.Errorf("expected bloom filter but got xor")
- case xorBloomFilter8:
- t.Errorf("expected bloom filter but got xor")
- default:
- t.Errorf("unknown internal bloom filter object : %d", bloomFilterType(bf.encoded.BloomFilterType))
- }
- }
-}
-
-// TestHint tests that the hint is used only when it should be used
-func TestHint(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var s syncState
- s.node = &justRandomFakeNode{}
- var encodingParams requestParams
- defaultFilterType := xorBloomFilter32
-
- for encodingParams.Modulator = 1; encodingParams.Modulator < 3; encodingParams.Modulator++ {
- txnGroups := getTxnGroups(testingGenesisHash, testingGenesisID)
- bf := s.makeBloomFilter(encodingParams, txnGroups, nil, nil)
-
- switch bloomFilterType(bf.encoded.BloomFilterType) {
- case xorBloomFilter32:
- // ok
- default:
- require.Fail(t, "expect xorBloomFilter32")
- }
- require.Equal(t, defaultFilterType, bloomFilterType(bf.encoded.BloomFilterType))
-
- // Change the filter of bf to other than the default filter i.e. XorFilter8
- bf.encoded.BloomFilterType = byte(xorBloomFilter8)
-
- // Pass bf as a hint.
- bf2 := s.makeBloomFilter(encodingParams, txnGroups, nil, &bf)
-
- // If the filter of bf2 is not defaultFilterType (i.e. is XorFilter8), then the hint was used.
- // The hint must be used, and the filter should not be the default filter.
- require.NotEqual(t, defaultFilterType, bf2.encoded.BloomFilterType)
- switch bloomFilterType(bf2.encoded.BloomFilterType) {
- case xorBloomFilter8:
- // ok
- default:
- require.Fail(t, "expect xorBloomFilter8")
- }
-
- // Now change txnGroups, so that the hint will not be used
- for i := range txnGroups {
- txnGroups[i].GroupCounter += uint64(len(txnGroups))
- }
- bf2 = s.makeBloomFilter(encodingParams, txnGroups, nil, &bf)
-
- // If the filter of bf2 is XorFilter (i.e. defaultFilterType), then the hint was not used
- switch bloomFilterType(bf2.encoded.BloomFilterType) {
- case xorBloomFilter32:
- // ok
- default:
- require.Fail(t, "expect xorBloomFilter32")
- }
- require.Equal(t, defaultFilterType, bloomFilterType(bf2.encoded.BloomFilterType))
- }
-}
-
-// TestEncodingDecoding checks the encoding/decoding of the filters
-func TestEncodingDecoding(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var s syncState
- s.node = &justRandomFakeNode{}
-
- filters := []func(int, *syncState) (filter bloom.GenericFilter, filterType bloomFilterType){
- filterFactoryXor8, filterFactoryXor32, filterFactoryBloom}
-
- var randomEntries [10]transactions.Txid
- for i := range randomEntries {
- crypto.RandBytes(randomEntries[i][:])
- }
- var err error
- var testableBf *testableBloomFilter
- var remarshaled []byte
- // For each filter type
- for _, ff := range filters {
-
- filter, filterType := ff(len(randomEntries), &s)
- for i := range randomEntries {
- filter.Set(randomEntries[i][:])
- }
- var enc encodedBloomFilter
- enc.BloomFilterType = byte(filterType)
- enc.BloomFilter, err = filter.MarshalBinary()
- require.NoError(t, err)
-
- testableBf, err = decodeBloomFilter(enc)
- require.NoError(t, err)
-
- remarshaled, err = testableBf.filter.MarshalBinary()
-
- require.NoError(t, err)
- require.Equal(t, enc.BloomFilter, remarshaled)
- }
-}
-
-func TestDecodingErrors(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- bf, err := decodeBloomFilter(encodedBloomFilter{})
- require.Equal(t, errInvalidBloomFilterEncoding, err)
- require.Equal(t, (*testableBloomFilter)(nil), bf)
-
- var ebf encodedBloomFilter
- ebf.BloomFilterType = byte(multiHashBloomFilter)
- _, err = decodeBloomFilter(ebf)
-
- require.Error(t, err)
-}
-
-func TestBloomFilterTest(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- filters := []func(int, *syncState) (filter bloom.GenericFilter, filterType bloomFilterType){
- filterFactoryXor8, filterFactoryXor32, filterFactoryBloom}
-
- for _, ff := range filters {
-
- var s syncState
- s.node = &justRandomFakeNode{}
- var err error
- txnGroups := getTxnGroups(testingGenesisHash, testingGenesisID)
-
- filter, filterType := ff(len(txnGroups), &s)
- for _, txnGroup := range txnGroups {
- filter.Set(txnGroup.GroupTransactionID[:])
- }
- var enc encodedBloomFilter
- enc.BloomFilterType = byte(filterType)
- enc.BloomFilter, err = filter.MarshalBinary()
- require.NoError(t, err)
-
- testableBf, err := decodeBloomFilter(enc)
- require.NoError(t, err)
-
- for testableBf.encodingParams.Modulator = 0; testableBf.encodingParams.Modulator < 7; testableBf.encodingParams.Modulator++ {
- for testableBf.encodingParams.Offset = 0; testableBf.encodingParams.Offset < testableBf.encodingParams.Modulator; testableBf.encodingParams.Offset++ {
- for _, tx := range txnGroups {
- ans := testableBf.test(tx.GroupTransactionID)
- expected := true
- if testableBf.encodingParams.Modulator > 1 {
- if txidToUint64(tx.GroupTransactionID)%uint64(testableBf.encodingParams.Modulator) != uint64(testableBf.encodingParams.Offset) {
- expected = false
- }
- }
- require.Equal(t, expected, ans)
- }
- }
- }
- }
-
-}
-
-type justRandomFakeNode struct {
-}
-
-func (fn *justRandomFakeNode) Events() <-chan Event { return nil }
-
-func (fn *justRandomFakeNode) GetCurrentRoundSettings() (out RoundSettings) { return }
-
-func (fn *justRandomFakeNode) Clock() (out timers.WallClock) { return }
-
-func (fn *justRandomFakeNode) Random(rng uint64) uint64 {
- var xb [8]byte
- rand.Read(xb[:])
- rv := binary.LittleEndian.Uint64(xb[:])
- return rv % rng
-}
-
-func (fn *justRandomFakeNode) GetPeers() []PeerInfo { return nil }
-
-func (fn *justRandomFakeNode) GetPeer(interface{}) (out PeerInfo) { return }
-
-func (fn *justRandomFakeNode) UpdatePeers(txsyncPeers []*Peer, netPeers []interface{}, peersAverageDataExchangeRate uint64) {
-}
-func (fn *justRandomFakeNode) SendPeerMessage(netPeer interface{}, msg []byte, callback SendMessageCallback) {
-}
-
-func (fn *justRandomFakeNode) GetPeerLatency(netPeer interface{}) time.Duration {
- return 0
-}
-
-func (fn *justRandomFakeNode) GetPendingTransactionGroups() (txGroups []pooldata.SignedTxGroup, latestLocallyOriginatedGroupCounter uint64) {
- return
-}
-func (fn *justRandomFakeNode) IncomingTransactionGroups(peer *Peer, messageSeq uint64, txGroups []pooldata.SignedTxGroup) (transactionPoolSize int) {
- return 0
-}
-func (fn *justRandomFakeNode) NotifyMonitor() chan struct{} { return nil }
diff --git a/txnsync/emulatorCore_test.go b/txnsync/emulatorCore_test.go
deleted file mode 100644
index 814539827..000000000
--- a/txnsync/emulatorCore_test.go
+++ /dev/null
@@ -1,267 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "context"
- "encoding/binary"
- "sort"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/util/execpool"
-)
-
-const roundDuration = 4 * time.Second
-
-type emulator struct {
- scenario scenario
- nodes []*emulatedNode
- syncers []*Service
- nodeCount int
- log logging.Logger
- currentRound basics.Round
- clock *guidedClock
- t *testing.T
- totalDuplicateTransactions uint64
- totalDuplicateTransactionSize uint64
- lastRandom uint64
- totalInitialTransactions uint64
-}
-
-type nodeTransaction struct {
- expirationRound basics.Round
- transactionSize int
-}
-
-type nodeTransactions []nodeTransaction
-
-type emulatorResult struct {
- nodes []nodeTransactions
-}
-
-func (a nodeTransactions) Len() int { return len(a) }
-func (a nodeTransactions) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a nodeTransactions) Less(i, j int) bool {
- if a[i].expirationRound < a[j].expirationRound {
- return true
- }
- if a[i].expirationRound > a[j].expirationRound {
- return false
- }
- return a[i].transactionSize < a[j].transactionSize
-}
-
-func emulateScenario(t *testing.T, scenario scenario) {
- e := &emulator{
- scenario: scenario,
- nodeCount: len(scenario.netConfig.nodes),
- log: logging.TestingLog(t),
- t: t,
- }
- e.initNodes()
- e.run()
-
- results := e.collectResult()
- for n := range scenario.expectedResults.nodes {
- sort.Stable(scenario.expectedResults.nodes[n])
- }
- for n := range results.nodes {
- sort.Stable(results.nodes[n])
- }
-
- t.Logf("Emulation Statistics:")
- t.Logf("Total duplicate transaction count: %d", e.totalDuplicateTransactions)
- t.Logf("Total duplicate transactions size: %d", e.totalDuplicateTransactionSize)
- for n := 0; n < e.nodeCount; n++ {
- t.Logf("%s transaction groups count : %d", e.nodes[n].name, len(results.nodes[n]))
- }
- for n := 0; n < e.nodeCount; n++ {
- require.Equalf(t, len(scenario.expectedResults.nodes[n]), len(results.nodes[n]), "node %d", n)
- }
-
- // calculating efficiency / overhead :
- // how many transaction need to be received ?
- // each node received all the transactions, minus the ones that it start up with.
- totalNeededSentTransactions := e.totalInitialTransactions*uint64(len(e.nodes)) - e.totalInitialTransactions
- actualReceivedTransactions := totalNeededSentTransactions + e.totalDuplicateTransactions
- t.Logf("Total transaction overhead: %d%%", (actualReceivedTransactions-totalNeededSentTransactions)*100/totalNeededSentTransactions)
-
- require.Equal(t, scenario.expectedResults, results)
- require.Equal(t, 1, 1)
-}
-
-func (e *emulator) run() {
- guidedClock := makeGuidedClock()
- lastRoundStarted := guidedClock.Since()
- e.clock = guidedClock
- e.start()
- // start the nodes
- for e.clock.Since() < e.scenario.testDuration {
- if guidedClock.Since() > lastRoundStarted+roundDuration {
- e.nextRound()
- lastRoundStarted = guidedClock.Since()
- }
- guidedClock.Advance(e.scenario.step)
- e.unblockStep()
- }
- // stop the nodes
- e.stop()
-}
-func (e *emulator) nextRound() {
- e.currentRound++
- for _, node := range e.nodes {
- node.onNewRound(e.currentRound, true)
- }
-}
-func (e *emulator) unblockStep() {
- for _, node := range e.nodes {
- // let it run through the clock advancement.
- node.unblock()
- node.waitBlocked()
-
- // make step
- node.step()
- }
-}
-func (e *emulator) start() {
- for i, node := range e.syncers {
- node.Start()
- e.nodes[i].waitBlocked()
- }
-}
-func (e *emulator) stop() {
- for i, node := range e.syncers {
- e.nodes[i].disableBlocking()
- node.Stop()
- }
-}
-
-func (e *emulator) initNodes() {
- e.nodes = make([]*emulatedNode, e.nodeCount, e.nodeCount)
- for i := 0; i < e.nodeCount; i++ {
- e.nodes[i] = makeEmulatedNode(e, i)
- syncer := MakeTransactionSyncService(
- makeNodeLogger(e.log, e.nodes[i]),
- e.nodes[i],
- e.scenario.netConfig.nodes[i].isRelay,
- "",
- crypto.Digest{},
- config.GetDefaultLocal(),
- e,
- )
- e.syncers = append(e.syncers, syncer)
- }
- randCounter := uint64(0)
- var randBuf [8]byte
- // we want to place a sender on all transaction so that they would be *somewhat* compressible.
- defaultSender := basics.Address{1, 2, 3, 4}
- const senderEncodingSize = 35
- encodingBuf := protocol.GetEncodingBuf()
- for _, initAlloc := range e.scenario.initialAlloc {
- node := e.nodes[initAlloc.node]
- for i := 0; i < initAlloc.transactionsCount; i++ {
- var group = pooldata.SignedTxGroup{}
- group.LocallyOriginated = true
- group.GroupCounter = uint64(len(node.txpoolEntries))
- group.Transactions = []transactions.SignedTxn{
- {
- Txn: transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Note: make([]byte, initAlloc.transactionSize-senderEncodingSize, initAlloc.transactionSize-senderEncodingSize),
- LastValid: initAlloc.expirationRound,
- Sender: defaultSender,
- },
- },
- },
- }
- // fill up the note field with pseudo-random data.
- for i := 0; i < len(group.Transactions[0].Txn.Note); i += crypto.DigestSize {
- binary.LittleEndian.PutUint64(randBuf[:], randCounter)
- digest := crypto.Hash(randBuf[:])
- copy(group.Transactions[0].Txn.Note[i:], digest[:])
- randCounter++
- }
- group.GroupTransactionID = group.Transactions.ID()
- encodingBuf = encodingBuf[:0]
- group.EncodedLength = len(group.Transactions[0].MarshalMsg(encodingBuf))
- node.txpoolIds[group.Transactions[0].ID()] = true
- node.txpoolEntries = append(node.txpoolEntries, group)
- }
- node.latestLocallyOriginatedGroupCounter = uint64(len(node.txpoolEntries) - 1)
- e.totalInitialTransactions += uint64(initAlloc.transactionsCount)
- node.txpoolGroupCounter += uint64(initAlloc.transactionsCount)
- node.onNewTransactionPoolEntry()
- }
- protocol.PutEncodingBuf(encodingBuf)
-}
-
-func (e *emulator) collectResult() (result emulatorResult) {
- result.nodes = make([]nodeTransactions, len(e.nodes))
- const senderEncodingSize = 35
- for i, node := range e.nodes {
- var txns nodeTransactions
- for _, txnGroup := range node.txpoolEntries {
- size := len(txnGroup.Transactions[0].Txn.Note)
- exp := txnGroup.Transactions[0].Txn.LastValid
- txns = append(txns, nodeTransaction{expirationRound: exp, transactionSize: size + senderEncodingSize})
- }
- for _, txnGroup := range node.expiredTx {
- size := len(txnGroup.Transactions[0].Txn.Note)
- exp := txnGroup.Transactions[0].Txn.LastValid
- txns = append(txns, nodeTransaction{expirationRound: exp, transactionSize: size + senderEncodingSize})
- }
- result.nodes[i] = txns
- }
- return result
-}
-
-// Dummy implementation of execpool.BacklogPool
-func (e *emulator) EnqueueBacklog(enqueueCtx context.Context, t execpool.ExecFunc, arg interface{}, out chan interface{}) error {
- t(arg)
- return nil
-}
-
-// Dummy implementation of execpool.BacklogPool
-func (e *emulator) Enqueue(enqueueCtx context.Context, t execpool.ExecFunc, arg interface{}, i execpool.Priority, out chan interface{}) error {
- return nil
-}
-
-// Dummy implementation of execpool.BacklogPool
-func (e *emulator) GetOwner() interface{} {
- return nil
-}
-
-// Dummy implementation of execpool.BacklogPool
-func (e *emulator) Shutdown() {
-
-}
-
-// Dummy implementation of execpool.BacklogPool
-func (e *emulator) GetParallelism() int {
- return 0
-}
diff --git a/txnsync/emulatorLogger_test.go b/txnsync/emulatorLogger_test.go
deleted file mode 100644
index 7020b7512..000000000
--- a/txnsync/emulatorLogger_test.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "fmt"
- "strconv"
- "strings"
- "testing"
-
- "github.com/algorand/go-algorand/logging"
-)
-
-// Foreground text colors
-const (
- reset = 0
- black = 30
- red = 31
- green = 32
- yellow = 33
- blue = 34
- magenta = 35
- cyan = 36
- white = 37
- hiblack = 90
- hired = 91
- higreen = 92
- hiyellow = 93
- hiblue = 94
- himagenta = 95
- hicyan = 96
- hiwhite = 97
-)
-
-const escape = "\x1b"
-
-var colors = []int{red, green, yellow, blue, magenta, cyan, hired, higreen, hiyellow, hiblue, himagenta, hicyan}
-var lowColors = []int{red, green, yellow, blue, magenta, cyan}
-
-type emulatorNodeLogger struct {
- algodlogger
- node *emulatedNode
- longestName int
-}
-
-func makeNodeLogger(l logging.Logger, node *emulatedNode) Logger {
- return &emulatorNodeLogger{
- algodlogger: l,
- node: node,
- }
-}
-
-type msgMode int
-
-const (
- modeZero msgMode = iota
- modeIncoming
- modeOutgoing
-)
-
-// implement local interface Logger
-func (e *emulatorNodeLogger) outgoingMessage(mstat msgStats) {
- e.printMsgStats(mstat, modeOutgoing)
-}
-
-// implement local interface Logger
-func (e *emulatorNodeLogger) incomingMessage(mstat msgStats) {
- e.printMsgStats(mstat, modeIncoming)
-}
-
-func (e emulatorNodeLogger) printMsgStats(mstat msgStats, mode msgMode) {
- seq := int(mstat.sequenceNumber)
- round := mstat.round
- transactions := mstat.transactions
- offset := mstat.offsetModulator.Offset
- modulator := mstat.offsetModulator.Modulator
- bloom := mstat.bloomSize
- nextTS := mstat.nextMsgMinDelay
- // emulator peer addresses are just an int
- destIndex, _ := strconv.Atoi(mstat.peerAddress)
-
- destName := e.node.emulator.nodes[destIndex].name
-
- if e.longestName == 0 {
- for _, node := range e.node.emulator.nodes {
- if len(node.name) > e.longestName {
- e.longestName = len(node.name) + 1
- }
- }
- }
-
- elapsed := e.node.emulator.clock.Since().Milliseconds()
- out := fmt.Sprintf("%3d.%03d ", elapsed/1000, elapsed%1000)
- out += fmt.Sprintf("%"+fmt.Sprintf("%d", e.longestName)+"s", e.node.name)
-
- bfColor := hiblack
- if bloom > 0 {
- bfColor = higreen
- }
- nextTSColor := hiblack
- if nextTS > 0 {
- nextTSColor = higreen
- }
- mid := fmt.Sprintf("Round %s Txns %s Req [%3d/%3d] %s %s",
- wrapRollingColor(int(round), fmt.Sprintf("%2d", round)),
- wrapRollingColor(transactions, fmt.Sprintf("%3d", transactions)),
- offset,
- modulator,
- wrapColor(bfColor, "BF"),
- wrapColor(nextTSColor, "TS"),
- )
- if mode == modeOutgoing {
- out += wrapRollingLowColor(seq, " [ ")
- out += mid + wrapRollingLowColor(seq, " --> ") + strings.Repeat(" ", 20)
- out += wrapColor(hiblack, " ] ")
- } else {
- out += wrapColor(hiblack, " [ ")
- out += strings.Repeat(" ", 20) + wrapRollingLowColor(seq, " <-- ") + mid
- out += wrapRollingLowColor(seq, " ] ")
- }
-
- out += fmt.Sprintf("%"+fmt.Sprintf("%d", e.longestName)+"s", destName)
- if testing.Verbose() {
- fmt.Printf("%s\n", out)
- }
-}
-
-func wrapRollingLowColor(color int, s string) (out string) {
- return wrapColor(lowColors[color%len(lowColors)], s)
-}
-
-func wrapRollingColor(color int, s string) (out string) {
- return wrapColor(colors[color%len(colors)], s)
-}
-func wrapColor(color int, s string) (out string) {
- return fmt.Sprintf("%s[1;%dm%s%s[1;%dm", escape, color, s, escape, reset)
-}
diff --git a/txnsync/emulatorNode_test.go b/txnsync/emulatorNode_test.go
deleted file mode 100644
index 378f1622f..000000000
--- a/txnsync/emulatorNode_test.go
+++ /dev/null
@@ -1,397 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "errors"
- "fmt"
- "sort"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/util/timers"
-)
-
-type queuedSentMessageCallback struct {
- callback SendMessageCallback
- seq uint64
-}
-type queuedMessage struct {
- bytes []byte
- readyAt time.Duration
-}
-type networkPeer struct {
- peer *Peer
- uploadSpeed uint64
- downloadSpeed uint64
- isOutgoing bool
- outSeq uint64
- inSeq uint64
- target int
-
- messageQ []queuedMessage // incoming message queue
-
- mu sync.Mutex `algofix:"allow sync.Mutex"`
-
- deferredSentMessages []queuedSentMessageCallback // outgoing messages callback queue
-}
-
-// emulatedNode implements the NodeConnector interface
-type emulatedNode struct {
- externalEvents chan Event
- emulator *emulator
- peers map[int]*networkPeer
- nodeIndex int
- expiredTx []pooldata.SignedTxGroup
- txpoolEntries []pooldata.SignedTxGroup
- txpoolIds map[transactions.Txid]bool
- latestLocallyOriginatedGroupCounter uint64
- name string
- blocked chan struct{}
- mu sync.Mutex `algofix:"allow sync.Mutex"`
- txpoolGroupCounter uint64
- blockingEnabled bool
- nodeBlocked chan struct{} // channel is closed when node is blocked.
- nodeRunning chan struct{} // channel is closed when node is running.
-}
-
-func makeEmulatedNode(emulator *emulator, nodeIdx int) *emulatedNode {
- en := &emulatedNode{
- emulator: emulator,
- peers: make(map[int]*networkPeer),
- externalEvents: make(chan Event, 10000),
- nodeIndex: nodeIdx,
- txpoolIds: make(map[transactions.Txid]bool),
- name: emulator.scenario.netConfig.nodes[nodeIdx].name,
- blockingEnabled: true,
- nodeBlocked: make(chan struct{}, 1),
- nodeRunning: make(chan struct{}, 1),
- }
- close(en.nodeRunning)
-
- // add outgoing connections
- for _, conn := range emulator.scenario.netConfig.nodes[nodeIdx].outgoingConnections {
- en.peers[conn.target] = &networkPeer{
- uploadSpeed: conn.uploadSpeed,
- downloadSpeed: conn.downloadSpeed,
- isOutgoing: true,
- target: conn.target,
- }
- }
- // add incoming connections
- for nodeID, nodeConfig := range emulator.scenario.netConfig.nodes {
- if nodeID == nodeIdx {
- continue
- }
- for _, conn := range nodeConfig.outgoingConnections {
- if conn.target != nodeIdx {
- continue
- }
- // the upload & download speeds are in reverse. This isn't a bug since we want the incoming
- // connection to be the opposite side of the connection.
- en.peers[nodeID] = &networkPeer{
- uploadSpeed: conn.downloadSpeed,
- downloadSpeed: conn.uploadSpeed,
- isOutgoing: false,
- target: nodeID,
- }
- }
- }
- return en
-}
-
-func (n *emulatedNode) Events() <-chan Event {
- return n.externalEvents
-}
-
-func (n *emulatedNode) NotifyMonitor() chan struct{} {
- var c chan struct{}
- n.mu.Lock()
- if n.blockingEnabled {
- c = make(chan struct{})
- n.blocked = c
- close(n.nodeBlocked)
- n.nodeRunning = make(chan struct{}, 1)
- n.mu.Unlock()
- <-c
- n.mu.Lock()
- close(n.nodeRunning)
- n.nodeBlocked = make(chan struct{}, 1)
- n.mu.Unlock()
- // return a closed channel.
- return c
- }
- n.mu.Unlock()
- // return an open channel
- return make(chan struct{})
-}
-func (n *emulatedNode) disableBlocking() {
- n.mu.Lock()
- n.blockingEnabled = false
- n.mu.Unlock()
- n.unblock()
-}
-func (n *emulatedNode) unblock() {
- n.mu.Lock()
- // wait until the state changes to StateMachineRunning
- select {
- case <-n.nodeBlocked:
- // we're blocked.
- if n.blocked != nil {
- close(n.blocked)
- n.blocked = nil
- }
- runningCh := n.nodeRunning
- n.mu.Unlock()
- <-runningCh
- return
- default:
- }
- n.mu.Unlock()
-}
-
-func (n *emulatedNode) waitBlocked() {
- n.mu.Lock()
- select {
- case <-n.nodeRunning:
- blockedCh := n.nodeBlocked
- n.mu.Unlock()
- <-blockedCh
- return
- default:
- }
- n.mu.Unlock()
-}
-
-func (n *emulatedNode) GetCurrentRoundSettings() RoundSettings {
- return RoundSettings{
- Round: n.emulator.currentRound,
- FetchTransactions: true,
- }
-
-}
-func (n *emulatedNode) Clock() timers.WallClock {
- return n.emulator.clock.Zero().(timers.WallClock)
-}
-
-func (n *emulatedNode) Random(x uint64) (out uint64) {
- limit := x
- x += uint64(n.nodeIndex) * 997
- x += uint64(n.emulator.currentRound) * 797
- x += uint64(n.emulator.lastRandom) * 797
- bytes := make([]byte, 8)
- for i := 0; i < 8; i++ {
- bytes[i] = byte(x >> (i * 8))
- }
- digest := crypto.Hash(bytes)
- out = 0
- for i := 0; i < 8; i++ {
- out = out << 8
- out += uint64(digest[i])
- }
- out = out % limit
- n.emulator.lastRandom ^= out
- return out
-}
-
-func (n *emulatedNode) orderedPeers() (out []*networkPeer) {
- peerToIndex := make(map[*networkPeer]int)
- for idx, peer := range n.peers {
- out = append(out, peer)
- peerToIndex[peer] = idx
- }
- // sort the peers, which we need in order to make the test deterministic.
- sort.Slice(out, func(i, j int) bool {
- netPeer1 := out[i]
- netPeer2 := out[j]
- return peerToIndex[netPeer1] < peerToIndex[netPeer2]
- })
- return
-}
-
-func (n *emulatedNode) GetPeers() (out []PeerInfo) {
- for _, peer := range n.orderedPeers() {
- out = append(out, PeerInfo{TxnSyncPeer: peer.peer, NetworkPeer: peer, IsOutgoing: peer.isOutgoing})
- }
- return out
-}
-
-func (n *emulatedNode) GetPeer(p interface{}) PeerInfo {
- netPeer := p.(*networkPeer)
- return PeerInfo{
- TxnSyncPeer: netPeer.peer,
- IsOutgoing: netPeer.isOutgoing,
- NetworkPeer: p,
- }
-}
-
-func (n *emulatedNode) UpdatePeers(txPeers []*Peer, netPeers []interface{}, _ uint64) {
- for i, peer := range netPeers {
- netPeer := peer.(*networkPeer)
- netPeer.peer = txPeers[i]
- }
-}
-
-func (n *emulatedNode) enqueueMessage(from int, msg queuedMessage) {
- n.peers[from].mu.Lock()
- baseTime := n.emulator.clock.Since()
- if len(n.peers[from].messageQ) > 0 {
- if n.peers[from].messageQ[len(n.peers[from].messageQ)-1].readyAt > baseTime {
- baseTime = n.peers[from].messageQ[len(n.peers[from].messageQ)-1].readyAt
- }
- }
- // the message bytes need to be copied, so that the originating bytes could be safely deleted.
- msgBytes := make([]byte, len(msg.bytes))
- copy(msgBytes[:], msg.bytes[:])
- n.peers[from].messageQ = append(n.peers[from].messageQ, queuedMessage{bytes: msgBytes, readyAt: baseTime + msg.readyAt})
- n.peers[from].mu.Unlock()
-}
-
-func (n *emulatedNode) SendPeerMessage(netPeer interface{}, msg []byte, callback SendMessageCallback) {
- peer := netPeer.(*networkPeer)
- otherNode := n.emulator.nodes[peer.target]
- sendTime := time.Duration(len(msg)) * time.Second / time.Duration(peer.uploadSpeed)
- otherNode.enqueueMessage(n.nodeIndex, queuedMessage{bytes: msg, readyAt: sendTime})
-
- peer.deferredSentMessages = append(peer.deferredSentMessages, queuedSentMessageCallback{callback: callback, seq: peer.outSeq})
- peer.outSeq++
-}
-
-func (n *emulatedNode) GetPeerLatency(netPeer interface{}) time.Duration {
- return 0
-}
-
-func (n *emulatedNode) GetPendingTransactionGroups() ([]pooldata.SignedTxGroup, uint64) {
- return n.txpoolEntries, n.latestLocallyOriginatedGroupCounter
-}
-
-func (n *emulatedNode) IncomingTransactionGroups(peer *Peer, messageSeq uint64, txGroups []pooldata.SignedTxGroup) (transactionPoolSize int) {
- // add to transaction pool.
- duplicateMessage := 0
- duplicateMessageSize := 0
- encodingBuf := protocol.GetEncodingBuf()
- transactionPoolSize = len(n.txpoolEntries)
- for _, group := range txGroups {
- if group.Transactions[0].Txn.LastValid < n.emulator.currentRound {
- continue
- }
- txID := group.Transactions[0].ID()
- if n.txpoolIds[txID] {
- duplicateMessage++
- duplicateMessageSize += len(group.Transactions[0].Txn.Note)
- continue
- }
- n.txpoolIds[txID] = true
- group.GroupCounter = n.txpoolGroupCounter
- n.txpoolGroupCounter++
- group.GroupTransactionID = group.Transactions.ID()
- for _, txn := range group.Transactions {
- encodingBuf = encodingBuf[:0]
- group.EncodedLength += len(txn.MarshalMsg(encodingBuf))
- }
- n.txpoolEntries = append(n.txpoolEntries, group)
- }
- protocol.PutEncodingBuf(encodingBuf)
- if duplicateMessage > 0 && testing.Verbose() {
- fmt.Printf("%s : %d duplicate messages recieved\n", n.name, duplicateMessage)
- }
- atomic.AddUint64(&n.emulator.totalDuplicateTransactions, uint64(duplicateMessage))
- atomic.AddUint64(&n.emulator.totalDuplicateTransactionSize, uint64(duplicateMessageSize))
- select {
- case peer.GetTransactionPoolAckChannel() <- messageSeq:
- default:
- panic(errors.New("IncomingTransactionGroups was unable to write messageSeq to the ack channel"))
- }
- return
-}
-
-func (n *emulatedNode) step() {
- msgHandler := n.emulator.syncers[n.nodeIndex].GetIncomingMessageHandler()
- now := n.emulator.clock.Since()
- // check if we have any pending network messages and forward them.
-
- for _, peer := range n.orderedPeers() {
- peer.mu.Lock()
-
- for i := len(peer.deferredSentMessages); i > 0; i-- {
- dm := peer.deferredSentMessages[0]
- peer.deferredSentMessages = peer.deferredSentMessages[1:]
- peer.mu.Unlock()
- err := dm.callback(true, dm.seq)
- if err != nil {
- panic(err)
- }
- n.unblock()
- n.waitBlocked()
- peer.mu.Lock()
- }
-
- for i := len(peer.messageQ); i > 0; i-- {
- if peer.messageQ[0].readyAt > now {
- break
- }
-
- msgBytes := peer.messageQ[0].bytes
- msgInSeq := peer.inSeq
-
- peer.inSeq++
- peer.messageQ = peer.messageQ[1:]
-
- peer.mu.Unlock()
-
- msgHandler(peer, peer.peer, msgBytes, msgInSeq, 0)
- n.unblock()
- n.waitBlocked()
- peer.mu.Lock()
-
- }
- peer.mu.Unlock()
- }
-
-}
-func (n *emulatedNode) onNewRound(round basics.Round, hasParticipationKeys bool) {
- // if this is a relay, then we always want to fetch transactions, regardless if we have participation keys.
- fetchTransactions := hasParticipationKeys
- if n.emulator.scenario.netConfig.nodes[n.nodeIndex].isRelay {
- fetchTransactions = true
- }
-
- for i := len(n.txpoolEntries) - 1; i >= 0; i-- {
- if n.txpoolEntries[i].Transactions[0].Txn.LastValid < round {
- delete(n.txpoolIds, n.txpoolEntries[i].Transactions[0].ID())
- n.expiredTx = append(n.expiredTx, n.txpoolEntries[i])
- n.txpoolEntries = append(n.txpoolEntries[0:i], n.txpoolEntries[i+1:]...)
- }
- }
-
- n.externalEvents <- MakeNewRoundEvent(round, fetchTransactions)
-}
-
-func (n *emulatedNode) onNewTransactionPoolEntry() {
- n.externalEvents <- MakeTransactionPoolChangeEvent(len(n.txpoolEntries), false)
-}
-
-func (p *networkPeer) GetAddress() string {
- return fmt.Sprintf("%d", p.target)
-}
diff --git a/txnsync/emulatorTimer_test.go b/txnsync/emulatorTimer_test.go
deleted file mode 100644
index f6d0dc57b..000000000
--- a/txnsync/emulatorTimer_test.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "sort"
- "sync"
- "time"
-
- "github.com/algorand/go-algorand/util/timers"
-)
-
-// guidedClock implements the WallClock interface
-type guidedClock struct {
- sync.Mutex `algofix:"allow sync.Mutex"`
- zero time.Time
- adv time.Duration
- timers map[time.Duration]chan time.Time
- children []*guidedClock
-}
-
-func makeGuidedClock() *guidedClock {
- return &guidedClock{
- zero: time.Now(),
- }
-}
-func (g *guidedClock) Zero() timers.Clock {
- // the real monotonic clock doesn't return the same clock object, which is fine.. but for our testing
- // we want to keep the same clock object so that we can tweak with it.
- child := &guidedClock{
- zero: g.zero.Add(g.adv),
- }
- g.Lock()
- defer g.Unlock()
- g.children = append(g.children, child)
- return child
-}
-
-func (g *guidedClock) TimeoutAt(delta time.Duration) <-chan time.Time {
- if delta <= g.adv {
- c := make(chan time.Time, 1)
- close(c)
- return c
- }
- g.Lock()
- defer g.Unlock()
- if g.timers == nil {
- g.timers = make(map[time.Duration]chan time.Time)
- }
- c, has := g.timers[delta]
- if has {
- return c
- }
- c = make(chan time.Time, 1)
- g.timers[delta] = c
- return c
-}
-
-func (g *guidedClock) Encode() []byte {
- return []byte{}
-}
-func (g *guidedClock) Decode([]byte) (timers.Clock, error) {
- return &guidedClock{}, nil
-}
-
-func (g *guidedClock) Since() time.Duration {
- return g.adv
-}
-
-func (g *guidedClock) DeadlineMonitorAt(at time.Duration) timers.DeadlineMonitor {
- return timers.MakeMonotonicDeadlineMonitor(g, at)
-}
-
-func (g *guidedClock) Advance(adv time.Duration) {
- g.adv += adv
-
- type entryStruct struct {
- duration time.Duration
- ch chan time.Time
- }
- expiredClocks := []entryStruct{}
- g.Lock()
- // find all the expired clocks.
- for delta, ch := range g.timers {
- if delta < g.adv {
- expiredClocks = append(expiredClocks, entryStruct{delta, ch})
- }
- }
- sort.SliceStable(expiredClocks, func(i, j int) bool {
- return expiredClocks[i].duration < expiredClocks[j].duration
- })
-
- // remove from map
- for _, entry := range expiredClocks {
- delete(g.timers, entry.duration)
- }
- g.Unlock()
- // fire expired clocks
- for _, entry := range expiredClocks {
- entry.ch <- g.zero.Add(g.adv)
- close(entry.ch)
- }
- g.Lock()
- defer g.Unlock()
- for _, child := range g.children {
- child.Advance(adv)
- }
-}
diff --git a/txnsync/emulator_test.go b/txnsync/emulator_test.go
deleted file mode 100644
index 18a4633b4..000000000
--- a/txnsync/emulator_test.go
+++ /dev/null
@@ -1,781 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "fmt"
- "testing"
- "time"
-
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-type connectionSettings struct {
- uploadSpeed uint64 // measured in bytes/second
- downloadSpeed uint64 // measured in bytes/second
- target int // node index in the networkConfiguration
-}
-
-type nodeConfiguration struct {
- outgoingConnections []connectionSettings
- name string
- isRelay bool
-}
-
-// networkConfiguration defines the nodes setup and their connections.
-type networkConfiguration struct {
- nodes []nodeConfiguration
-}
-
-// initialTransactionsAllocation defines how many transaction ( and what their sizes ) would be.
-type initialTransactionsAllocation struct {
- node int // node index in the networkConfiguration
- transactionsCount int
- transactionSize int
- expirationRound basics.Round
-}
-
-// scenario defines the emulator test scenario, which includes the network configuration,
-// initial transaction distribution, test duration, dynamic transactions creation as well
-// as expected test outcomes.
-type scenario struct {
- netConfig networkConfiguration
- testDuration time.Duration
- step time.Duration
- initialAlloc []initialTransactionsAllocation
- expectedResults emulatorResult
-}
-
-func TestEmulatedTrivialTransactionsExchange(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- testScenario := scenario{
- netConfig: networkConfiguration{
- nodes: []nodeConfiguration{
- {
- name: "relay",
- isRelay: true,
- },
- {
- name: "node",
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- },
- },
- },
- },
- testDuration: 500 * time.Millisecond,
- initialAlloc: []initialTransactionsAllocation{
- {
- node: 1,
- transactionsCount: 1,
- transactionSize: 250,
- expirationRound: basics.Round(5),
- },
- },
- expectedResults: emulatorResult{
- nodes: []nodeTransactions{
- {
- nodeTransaction{
- expirationRound: 5,
- transactionSize: 250,
- },
- },
- {
- nodeTransaction{
- expirationRound: 5,
- transactionSize: 250,
- },
- },
- },
- },
- step: 1 * time.Millisecond,
- }
- t.Run("NonRelay_To_Relay", func(t *testing.T) {
- testScenario.netConfig.nodes[0].name = "relay"
- testScenario.netConfig.nodes[0].isRelay = true
- testScenario.netConfig.nodes[1].name = "node"
- testScenario.initialAlloc[0].node = 1
- emulateScenario(t, testScenario)
- })
- t.Run("Relay_To_NonRelay", func(t *testing.T) {
- testScenario.netConfig.nodes[0].name = "relay"
- testScenario.netConfig.nodes[0].isRelay = true
- testScenario.netConfig.nodes[1].name = "node"
- testScenario.initialAlloc[0].node = 0
- emulateScenario(t, testScenario)
- })
- t.Run("OutgoingRelay_To_IncomingRelay", func(t *testing.T) {
- testScenario.netConfig.nodes[0].name = "incoming-relay"
- testScenario.netConfig.nodes[0].isRelay = true
- testScenario.netConfig.nodes[1].name = "outgoing-relay"
- testScenario.netConfig.nodes[1].isRelay = true
- testScenario.initialAlloc[0].node = 1
- emulateScenario(t, testScenario)
- })
- t.Run("IncomingRelay_To_OutgoingRelay", func(t *testing.T) {
- testScenario.netConfig.nodes[0].name = "incoming-relay"
- testScenario.netConfig.nodes[0].isRelay = true
- testScenario.netConfig.nodes[1].name = "outgoing-relay"
- testScenario.netConfig.nodes[1].isRelay = true
- testScenario.initialAlloc[0].node = 0
- emulateScenario(t, testScenario)
- })
-}
-
-func TestEmulatedTwoNodesToRelaysTransactionsExchange(t *testing.T) {
- partitiontest.PartitionTest(t)
- // this test creates the following network mode:
- //
- // relay1 ----------> relay2
- // ^ ^
- // | |
- // node1 node2
- //
-
- testScenario := scenario{
- netConfig: networkConfiguration{
- nodes: []nodeConfiguration{
- {
- name: "relay1",
- isRelay: true,
- },
- {
- name: "relay2",
- isRelay: true,
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- },
- },
- {
- name: "node1",
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- },
- },
- {
- name: "node2",
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 1,
- },
- },
- },
- },
- },
- testDuration: 1000 * time.Millisecond,
- initialAlloc: []initialTransactionsAllocation{
- {
- node: 2,
- transactionsCount: 1,
- transactionSize: 250,
- expirationRound: basics.Round(5),
- },
- },
- expectedResults: emulatorResult{
- nodes: []nodeTransactions{
- {
- nodeTransaction{
- expirationRound: 5,
- transactionSize: 250,
- },
- },
- {
- nodeTransaction{
- expirationRound: 5,
- transactionSize: 250,
- },
- },
- {
- nodeTransaction{
- expirationRound: 5,
- transactionSize: 250,
- },
- },
- {
- nodeTransaction{
- expirationRound: 5,
- transactionSize: 250,
- },
- },
- },
- },
- step: 1 * time.Millisecond,
- }
- emulateScenario(t, testScenario)
-}
-
-func TestEmulatedLargeSetTransactionsExchange(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- testScenario := scenario{
- netConfig: networkConfiguration{
- nodes: []nodeConfiguration{
- {
- name: "relay",
- isRelay: true,
- },
- {
- name: "node",
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- },
- },
- },
- },
- testDuration: 1000 * time.Millisecond,
- initialAlloc: []initialTransactionsAllocation{
- {
- node: 1,
- transactionsCount: 100,
- transactionSize: 800,
- expirationRound: basics.Round(5),
- },
- },
- expectedResults: emulatorResult{
- nodes: []nodeTransactions{
- {},
- {},
- },
- },
- step: 1 * time.Millisecond / 10,
- }
- // update the expected results to have the correct number of entries.
- for i := 0; i < testScenario.initialAlloc[0].transactionsCount; i++ {
- for n := range testScenario.expectedResults.nodes {
- testScenario.expectedResults.nodes[n] = append(testScenario.expectedResults.nodes[n], nodeTransaction{expirationRound: testScenario.initialAlloc[0].expirationRound, transactionSize: testScenario.initialAlloc[0].transactionSize})
- }
- }
-
- t.Run("NonRelay_To_Relay", func(t *testing.T) {
- testScenario.netConfig.nodes[0].name = "relay"
- testScenario.netConfig.nodes[0].isRelay = true
- testScenario.netConfig.nodes[1].name = "node"
- testScenario.initialAlloc[0].node = 1
- emulateScenario(t, testScenario)
- })
-
- t.Run("Relay_To_NonRelay", func(t *testing.T) {
- testScenario.netConfig.nodes[0].name = "relay"
- testScenario.netConfig.nodes[0].isRelay = true
- testScenario.netConfig.nodes[1].name = "node"
- testScenario.initialAlloc[0].node = 0
- emulateScenario(t, testScenario)
- })
-
- t.Run("OutgoingRelay_To_IncomingRelay", func(t *testing.T) {
- testScenario.netConfig.nodes[0].name = "incoming-relay"
- testScenario.netConfig.nodes[0].isRelay = true
- testScenario.netConfig.nodes[1].name = "outgoing-relay"
- testScenario.netConfig.nodes[1].isRelay = true
- testScenario.initialAlloc[0].node = 1
- emulateScenario(t, testScenario)
- })
-
- t.Run("OutgoingRelay_To_IncomingRelay", func(t *testing.T) {
- testScenario.netConfig.nodes[0].name = "incoming-relay"
- testScenario.netConfig.nodes[0].isRelay = true
- testScenario.netConfig.nodes[1].name = "outgoing-relay"
- testScenario.netConfig.nodes[1].isRelay = true
- testScenario.initialAlloc[0].node = 0
- emulateScenario(t, testScenario)
- })
-}
-
-func TestEmulatedLargeSetTransactionsExchangeIntermixed(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- testScenario := scenario{
- netConfig: networkConfiguration{
- nodes: []nodeConfiguration{
- {
- name: "relay",
- isRelay: true,
- },
- {
- name: "node",
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- },
- },
- },
- },
- initialAlloc: []initialTransactionsAllocation{
- {
- node: 0,
- transactionsCount: 200,
- transactionSize: 400,
- expirationRound: basics.Round(5),
- },
- {
- node: 1,
- transactionsCount: 100,
- transactionSize: 800,
- expirationRound: basics.Round(5),
- },
- },
- expectedResults: emulatorResult{
- nodes: []nodeTransactions{
- {},
- {},
- },
- },
- step: 1 * time.Millisecond / 10,
- testDuration: 1200 * time.Millisecond,
- }
- // update the expected results to have the correct number of entries.
- for j := range testScenario.initialAlloc {
- for i := 0; i < testScenario.initialAlloc[j].transactionsCount; i++ {
- for n := range testScenario.expectedResults.nodes {
- testScenario.expectedResults.nodes[n] = append(testScenario.expectedResults.nodes[n], nodeTransaction{expirationRound: testScenario.initialAlloc[j].expirationRound, transactionSize: testScenario.initialAlloc[j].transactionSize})
- }
- }
- }
-
- t.Run("NonRelay_To_Relay", func(t *testing.T) {
- testScenario.netConfig.nodes[0].name = "relay"
- testScenario.netConfig.nodes[0].isRelay = true
- testScenario.netConfig.nodes[1].name = "node"
- testScenario.initialAlloc[0].node = 1
- emulateScenario(t, testScenario)
- })
-
- t.Run("Relay_To_NonRelay", func(t *testing.T) {
- testScenario.netConfig.nodes[0].name = "relay"
- testScenario.netConfig.nodes[0].isRelay = true
- testScenario.netConfig.nodes[1].name = "node"
- testScenario.initialAlloc[0].node = 0
- emulateScenario(t, testScenario)
- })
-
- t.Run("OutgoingRelay_To_IncomingRelay", func(t *testing.T) {
- testScenario.netConfig.nodes[0].name = "incoming-relay"
- testScenario.netConfig.nodes[0].isRelay = true
- testScenario.netConfig.nodes[1].name = "outgoing-relay"
- testScenario.netConfig.nodes[1].isRelay = true
- testScenario.initialAlloc[0].node = 1
- emulateScenario(t, testScenario)
- })
-
- t.Run("IncomingRelay_To_OutgoingRelay", func(t *testing.T) {
- testScenario.netConfig.nodes[0].name = "incoming-relay"
- testScenario.netConfig.nodes[0].isRelay = true
- testScenario.netConfig.nodes[1].name = "outgoing-relay"
- testScenario.netConfig.nodes[1].isRelay = true
- testScenario.initialAlloc[0].node = 0
- emulateScenario(t, testScenario)
- })
-}
-
-func TestEmulatedNonRelayToMultipleRelays(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- testScenario := scenario{
- netConfig: networkConfiguration{
- nodes: []nodeConfiguration{
- {
- name: "relay-1",
- isRelay: true,
- },
- {
- name: "relay-2",
- isRelay: true,
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- },
- },
- {
- name: "relay-3",
- isRelay: true,
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 1,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- },
- },
- {
- name: "node-1",
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 1,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 2,
- },
- },
- },
- {
- name: "node-2",
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 2,
- },
- },
- },
- },
- },
- initialAlloc: []initialTransactionsAllocation{
- {
- node: 4, // i.e. node-2
- transactionsCount: 1000,
- transactionSize: 250,
- expirationRound: basics.Round(5),
- },
- },
- expectedResults: emulatorResult{
- nodes: []nodeTransactions{
- {},
- {},
- {},
- {},
- {},
- },
- },
- step: 1 * time.Millisecond / 10,
- testDuration: 2000 * time.Millisecond,
- }
- // update the expected results to have the correct number of entries.
- for j := range testScenario.initialAlloc {
- for i := 0; i < testScenario.initialAlloc[j].transactionsCount; i++ {
- for n := range testScenario.expectedResults.nodes {
- testScenario.expectedResults.nodes[n] = append(testScenario.expectedResults.nodes[n], nodeTransaction{expirationRound: testScenario.initialAlloc[j].expirationRound, transactionSize: testScenario.initialAlloc[j].transactionSize})
- }
- }
- }
-
- emulateScenario(t, testScenario)
-}
-
-func TestEmulatedTwoNodesFourRelays(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- testScenario := scenario{
- netConfig: networkConfiguration{
- nodes: []nodeConfiguration{
- {
- name: "relay-1",
- isRelay: true,
- },
- {
- name: "relay-2",
- isRelay: true,
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 3,
- },
- },
- },
- {
- name: "relay-3",
- isRelay: true,
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 1,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- },
- },
- {
- name: "relay-4",
- isRelay: true,
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 2,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- },
- },
- {
- name: "node-1",
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 1,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 2,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 3,
- },
- },
- },
- {
- name: "node-2",
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 1,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 2,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 3,
- },
- },
- },
- },
- },
- initialAlloc: []initialTransactionsAllocation{
- {
- node: 4, // i.e. node-1
- transactionsCount: 3000,
- transactionSize: 270,
- expirationRound: basics.Round(5),
- },
- {
- node: 5, // i.e. node-2
- transactionsCount: 1500,
- transactionSize: 320,
- expirationRound: basics.Round(5),
- },
- },
- expectedResults: emulatorResult{
- nodes: []nodeTransactions{
- {},
- {},
- {},
- {},
- {},
- {},
- },
- },
- step: 1 * time.Millisecond / 10,
- testDuration: 2100 * time.Millisecond,
- }
- // update the expected results to have the correct number of entries.
- for j := range testScenario.initialAlloc {
- for i := 0; i < testScenario.initialAlloc[j].transactionsCount; i++ {
- for n := range testScenario.expectedResults.nodes {
- testScenario.expectedResults.nodes[n] = append(testScenario.expectedResults.nodes[n], nodeTransaction{expirationRound: testScenario.initialAlloc[j].expirationRound, transactionSize: testScenario.initialAlloc[j].transactionSize})
- }
- }
- }
-
- emulateScenario(t, testScenario)
-}
-
-func TestEmulatedTwentyNodesFourRelays(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- if testing.Short() {
- t.Skip("TestEmulatedTwentyNodesFourRelays is a long test and therefore was skipped")
- }
- testScenario := scenario{
- netConfig: networkConfiguration{
- nodes: []nodeConfiguration{
- {
- name: "relay-1",
- isRelay: true,
- },
- {
- name: "relay-2",
- isRelay: true,
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 3,
- },
- },
- },
- {
- name: "relay-3",
- isRelay: true,
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 1,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- },
- },
- {
- name: "relay-4",
- isRelay: true,
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 2,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- },
- },
- },
- },
- initialAlloc: []initialTransactionsAllocation{},
- expectedResults: emulatorResult{
- nodes: []nodeTransactions{
- {},
- {},
- {},
- {},
- },
- },
- step: 1 * time.Millisecond / 10,
- testDuration: 2000 * time.Millisecond,
- }
-
- // add nodes.
- for i := 0; i < 20; i++ {
- testScenario.netConfig.nodes = append(testScenario.netConfig.nodes, nodeConfiguration{
- name: fmt.Sprintf("node-%d", i+1),
- outgoingConnections: []connectionSettings{
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 0,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 1,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 2,
- },
- {
- uploadSpeed: 1000000,
- downloadSpeed: 1000000,
- target: 3,
- },
- },
- })
-
- testScenario.initialAlloc = append(testScenario.initialAlloc, initialTransactionsAllocation{
- node: 4 + i, // i.e. node-1 + i
- transactionsCount: 250,
- transactionSize: 270,
- expirationRound: basics.Round(5),
- })
-
- testScenario.expectedResults.nodes = append(testScenario.expectedResults.nodes, nodeTransactions{})
- }
-
- // update the expected results to have the correct number of entries.
- for j := range testScenario.initialAlloc {
- for i := 0; i < testScenario.initialAlloc[j].transactionsCount; i++ {
- for n := range testScenario.expectedResults.nodes {
- testScenario.expectedResults.nodes[n] = append(testScenario.expectedResults.nodes[n], nodeTransaction{expirationRound: testScenario.initialAlloc[j].expirationRound, transactionSize: testScenario.initialAlloc[j].transactionSize})
- }
- }
- }
-
- emulateScenario(t, testScenario)
-}
diff --git a/txnsync/encodedgroups_test.go b/txnsync/encodedgroups_test.go
deleted file mode 100644
index 0f6cef4f9..000000000
--- a/txnsync/encodedgroups_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "math"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-func TestBadBitmask(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- txnGroups, genesisID, genesisHash, err := txnGroupsData(96)
- require.NoError(t, err)
-
- var s syncState
- ptg, err := badEncodeTransactionGroups(t, &s, txnGroups, 0)
- require.NoError(t, err)
- require.Equal(t, ptg.CompressionFormat, compressionFormatDeflate)
- _, err = decodeTransactionGroups(ptg, genesisID, genesisHash)
- require.Equal(t, errIndexNotFound, err)
-}
-
-// corrupted bitmask may bcause panic during decoding. This test is to make sure it is an error and not a panic
-func badEncodeTransactionGroups(t *testing.T, s *syncState, inTxnGroups []pooldata.SignedTxGroup, dataExchangeRate uint64) (packedTransactionGroups, error) {
- txnCount := 0
- for _, txGroup := range inTxnGroups {
- txnCount += len(txGroup.Transactions)
- }
- stub := txGroupsEncodingStub{
- TotalTransactionsCount: uint64(txnCount),
- TransactionGroupCount: uint64(len(inTxnGroups)),
- TransactionGroupSizes: make([]byte, 0, len(inTxnGroups)),
- }
-
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- index := 0
- for _, txGroup := range inTxnGroups {
- if len(txGroup.Transactions) > 1 {
- for _, txn := range txGroup.Transactions {
- err := stub.deconstructSignedTransaction(index, &txn)
- require.NoError(t, err)
- index++
- }
- stub.TransactionGroupSizes = append(stub.TransactionGroupSizes, byte(len(txGroup.Transactions)-1))
- }
- }
- compactNibblesArray(&stub.TransactionGroupSizes)
- for _, txGroup := range inTxnGroups {
- if len(txGroup.Transactions) == 1 {
- for _, txn := range txGroup.Transactions {
- if !txn.Txn.Group.MsgIsZero() {
- if len(stub.BitmaskGroup) == 0 {
- stub.BitmaskGroup = make(bitmask, bitmaskLen)
- }
- stub.BitmaskGroup.setBit(index)
- }
- err := stub.deconstructSignedTransaction(index, &txn)
- require.NoError(t, err)
- index++
- }
- }
- }
-
- stub.BitmaskAuthAddr.trimBitmask(int(stub.TotalTransactionsCount))
- stub.finishDeconstructMsigs()
- stub.finishDeconstructLsigs()
- stub.BitmaskSig.trimBitmask(int(stub.TotalTransactionsCount))
-
- stub.finishDeconstructTxType()
- // corrupted bitmask
- stub.BitmaskTxType = make(bitmask, bitmaskLen*10)
- stub.BitmaskTxType.setBit(bitmaskLen*10 - 10)
-
- stub.finishDeconstructTxnHeader()
- stub.finishDeconstructKeyregTxnFields()
- stub.finishDeconstructPaymentTxnFields()
- stub.finishDeconstructAssetConfigTxnFields()
- stub.finishDeconstructAssetTransferTxnFields()
- stub.finishDeconstructAssetFreezeTxnFields()
- stub.finishDeconstructApplicationCallTxnFields()
- stub.finishDeconstructCompactCertTxnFields()
-
- encoded := stub.MarshalMsg(getMessageBuffer())
-
- // check if time saved by compression: estimatedDeflateCompressionGains * len(msg) / dataExchangeRate
- // is greater than by time spent during compression: len(msg) / estimatedDeflateCompressionSpeed
- if len(encoded) > minEncodedTransactionGroupsCompressionThreshold && float32(dataExchangeRate) < (estimatedDeflateCompressionGains*estimatedDeflateCompressionSpeed) {
- compressedBytes, compressionFormat := s.compressTransactionGroupsBytes(encoded)
- if compressionFormat != compressionFormatNone {
- packedGroups := packedTransactionGroups{
- Bytes: compressedBytes,
- CompressionFormat: compressionFormat,
- LenDecompressedBytes: uint64(len(encoded)),
- }
- releaseMessageBuffer(encoded)
- return packedGroups, nil
- }
- }
-
- return packedTransactionGroups{
- Bytes: encoded,
- CompressionFormat: compressionFormatNone,
- }, nil
-}
-func TestInvalidByteToTxType(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- for i := len(protocol.TxnTypes); i <= math.MaxUint8; i++ {
- require.Equal(t, protocol.UnknownTx, ByteToTxType(byte(i)))
- }
-}
diff --git a/txnsync/encodedgroupsmarshalers.go b/txnsync/encodedgroupsmarshalers.go
deleted file mode 100644
index 583f9ede1..000000000
--- a/txnsync/encodedgroupsmarshalers.go
+++ /dev/null
@@ -1,795 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "fmt"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/protocol"
-)
-
-func compactNibblesArray(b *[]byte) {
- if len(*b)%2 == 1 {
- *b = append(*b, byte(0))
- }
- for index := 0; index*2 < len(*b); index++ {
- (*b)[index] = (*b)[index*2]*16 + (*b)[index*2+1]
- }
- *b = (*b)[0 : len(*b)/2]
-}
-
-// deconstructs SignedTxn's into lists of fields and bitmasks
-func (stub *txGroupsEncodingStub) deconstructSignedTransaction(i int, txn *transactions.SignedTxn) error {
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- if !txn.Sig.MsgIsZero() {
- if len(stub.BitmaskSig) == 0 {
- stub.BitmaskSig = make(bitmask, bitmaskLen)
- stub.Sig = make([]byte, 0, int(stub.TotalTransactionsCount)*len(crypto.Signature{}))
- }
- stub.BitmaskSig.setBit(i)
- stub.Sig = append(stub.Sig, txn.Sig[:]...)
- }
- stub.deconstructMsigs(i, txn)
- stub.deconstructLsigs(i, txn)
- if !txn.AuthAddr.MsgIsZero() {
- if len(stub.BitmaskAuthAddr) == 0 {
- stub.BitmaskAuthAddr = make(bitmask, bitmaskLen)
- stub.AuthAddr = make([]byte, 0, int(stub.TotalTransactionsCount)*crypto.DigestSize)
- }
- stub.BitmaskAuthAddr.setBit(i)
- stub.AuthAddr = append(stub.AuthAddr, txn.AuthAddr[:]...)
- }
- return stub.deconstructTransactions(i, txn)
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructSignedTransactions() {
- stub.BitmaskAuthAddr.trimBitmask(int(stub.TotalTransactionsCount))
- stub.finishDeconstructMsigs()
- stub.finishDeconstructLsigs()
- stub.BitmaskSig.trimBitmask(int(stub.TotalTransactionsCount))
- stub.finishDeconstructTransactions()
-}
-
-func (stub *txGroupsEncodingStub) deconstructMsigs(i int, txn *transactions.SignedTxn) {
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- if txn.Msig.Version != 0 {
- if len(stub.BitmaskVersion) == 0 {
- stub.BitmaskVersion = make(bitmask, bitmaskLen)
- stub.Version = make([]byte, 0, int(stub.TotalTransactionsCount))
- }
- stub.BitmaskVersion.setBit(i)
- stub.Version = append(stub.Version, txn.Msig.Version)
- }
- if txn.Msig.Threshold != 0 {
- if len(stub.BitmaskThreshold) == 0 {
- stub.BitmaskThreshold = make(bitmask, bitmaskLen)
- stub.Threshold = make([]byte, 0, int(stub.TotalTransactionsCount))
- }
- stub.BitmaskThreshold.setBit(i)
- stub.Threshold = append(stub.Threshold, txn.Msig.Threshold)
- }
- if txn.Msig.Subsigs != nil {
- if len(stub.BitmaskSubsigs) == 0 {
- stub.BitmaskSubsigs = make(bitmask, bitmaskLen)
- stub.Subsigs = make([][]crypto.MultisigSubsig, 0, int(stub.TotalTransactionsCount))
- }
- stub.BitmaskSubsigs.setBit(i)
- stub.Subsigs = append(stub.Subsigs, txn.Msig.Subsigs)
- }
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructMsigs() {
- stub.BitmaskVersion.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskThreshold.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskSubsigs.trimBitmask(int(stub.TotalTransactionsCount))
-}
-
-func (stub *txGroupsEncodingStub) deconstructLsigs(i int, txn *transactions.SignedTxn) {
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- if txn.Lsig.Logic != nil {
- if len(stub.BitmaskLogic) == 0 {
- stub.BitmaskLogic = make(bitmask, bitmaskLen)
- stub.Logic = make([][]byte, 0, int(stub.TotalTransactionsCount))
- }
- stub.BitmaskLogic.setBit(i)
- stub.Logic = append(stub.Logic, txn.Lsig.Logic)
- }
- if txn.Lsig.Args != nil {
- if len(stub.BitmaskLogicArgs) == 0 {
- stub.BitmaskLogicArgs = make(bitmask, bitmaskLen)
- stub.LogicArgs = make([][][]byte, 0, int(stub.TotalTransactionsCount))
- }
- stub.BitmaskLogicArgs.setBit(i)
- stub.LogicArgs = append(stub.LogicArgs, txn.Lsig.Args)
- }
- if !txn.Lsig.Sig.MsgIsZero() {
- if len(stub.BitmaskSig) == 0 {
- stub.BitmaskSig = make(bitmask, bitmaskLen)
- stub.Sig = make([]byte, 0, int(stub.TotalTransactionsCount)*len(crypto.Signature{}))
- }
- stub.BitmaskSig.setBit(i)
- stub.Sig = append(stub.Sig, txn.Lsig.Sig[:]...)
- }
- if txn.Lsig.Msig.Version != 0 {
- if len(stub.BitmaskVersion) == 0 {
- stub.BitmaskVersion = make(bitmask, bitmaskLen)
- stub.Version = make([]byte, 0, int(stub.TotalTransactionsCount))
- }
- stub.BitmaskVersion.setBit(i)
- stub.Version = append(stub.Version, txn.Lsig.Msig.Version)
- }
- if txn.Lsig.Msig.Threshold != 0 {
- if len(stub.BitmaskThreshold) == 0 {
- stub.BitmaskThreshold = make(bitmask, bitmaskLen)
- stub.Threshold = make([]byte, 0, int(stub.TotalTransactionsCount))
- }
- stub.BitmaskThreshold.setBit(i)
- stub.Threshold = append(stub.Threshold, txn.Lsig.Msig.Threshold)
- }
- if txn.Lsig.Msig.Subsigs != nil {
- if len(stub.BitmaskSubsigs) == 0 {
- stub.BitmaskSubsigs = make(bitmask, bitmaskLen)
- stub.Subsigs = make([][]crypto.MultisigSubsig, 0, int(stub.TotalTransactionsCount))
- }
- stub.BitmaskSubsigs.setBit(i)
- stub.Subsigs = append(stub.Subsigs, txn.Lsig.Msig.Subsigs)
- }
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructLsigs() {
- stub.BitmaskLogic.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskLogicArgs.trimBitmask(int(stub.TotalTransactionsCount))
-}
-
-func (stub *txGroupsEncodingStub) deconstructTransactions(i int, txn *transactions.SignedTxn) error {
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- txTypeByte, err := TxTypeToByte(txn.Txn.Type)
- if err != nil {
- return fmt.Errorf("failed to deconstructTransactions: %w", err)
- }
- if len(stub.BitmaskTxType) == 0 {
- stub.BitmaskTxType = make(bitmask, bitmaskLen)
- stub.TxType = make([]byte, 0, int(stub.TotalTransactionsCount))
- }
- stub.TxType = append(stub.TxType, txTypeByte)
- stub.deconstructTxnHeader(i, txn)
- switch txTypeByte {
- case paymentTx:
- stub.deconstructPaymentTxnFields(i, txn)
- case keyRegistrationTx:
- stub.deconstructKeyregTxnFields(i, txn)
- case assetConfigTx:
- stub.deconstructAssetConfigTxnFields(i, txn)
- case assetTransferTx:
- stub.deconstructAssetTransferTxnFields(i, txn)
- case assetFreezeTx:
- stub.deconstructAssetFreezeTxnFields(i, txn)
- case applicationCallTx:
- stub.deconstructApplicationCallTxnFields(i, txn)
- case compactCertTx:
- stub.deconstructCompactCertTxnFields(i, txn)
- }
- return nil
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructTransactions() {
- stub.finishDeconstructTxType()
- stub.finishDeconstructTxnHeader()
- stub.finishDeconstructKeyregTxnFields()
- stub.finishDeconstructPaymentTxnFields()
- stub.finishDeconstructAssetConfigTxnFields()
- stub.finishDeconstructAssetTransferTxnFields()
- stub.finishDeconstructAssetFreezeTxnFields()
- stub.finishDeconstructApplicationCallTxnFields()
- stub.finishDeconstructCompactCertTxnFields()
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructTxType() {
- offset := byte(0)
- count := make([]int, len(protocol.TxnTypes))
- maxcount := 0
- for _, t := range stub.TxType {
- count[int(t)]++
- }
- for i, c := range count {
- if c > maxcount {
- offset = byte(i)
- maxcount = c
- }
- }
- newTxTypes := make([]byte, 0, stub.TotalTransactionsCount)
- for i := 0; i < int(stub.TotalTransactionsCount); i++ {
- if stub.TxType[i] != offset {
- stub.BitmaskTxType.setBit(i)
- newTxTypes = append(newTxTypes, stub.TxType[i])
- }
- }
- stub.TxType = newTxTypes
- stub.TxTypeOffset = offset
- compactNibblesArray(&stub.TxType)
- stub.BitmaskTxType.trimBitmask(int(stub.TotalTransactionsCount))
-}
-
-func (stub *txGroupsEncodingStub) deconstructTxnHeader(i int, txn *transactions.SignedTxn) {
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- if !txn.Txn.Sender.MsgIsZero() {
- if len(stub.BitmaskSender) == 0 {
- stub.BitmaskSender = make(bitmask, bitmaskLen)
- stub.Sender = make([]byte, 0, int(stub.TotalTransactionsCount)*crypto.DigestSize)
- }
- stub.BitmaskSender.setBit(i)
- stub.Sender = append(stub.Sender, txn.Txn.Sender[:]...)
- }
- if !txn.Txn.Fee.MsgIsZero() {
- if len(stub.BitmaskFee) == 0 {
- stub.BitmaskFee = make(bitmask, bitmaskLen)
- stub.Fee = make([]basics.MicroAlgos, 0, int(stub.TotalTransactionsCount))
- }
- stub.BitmaskFee.setBit(i)
- stub.Fee = append(stub.Fee, txn.Txn.Fee)
- }
- if !txn.Txn.FirstValid.MsgIsZero() {
- if len(stub.BitmaskFirstValid) == 0 {
- stub.BitmaskFirstValid = make(bitmask, bitmaskLen)
- stub.FirstValid = make([]basics.Round, 0, int(stub.TotalTransactionsCount))
- }
- stub.BitmaskFirstValid.setBit(i)
- stub.FirstValid = append(stub.FirstValid, txn.Txn.FirstValid)
- }
- if !txn.Txn.LastValid.MsgIsZero() {
- if len(stub.BitmaskLastValid) == 0 {
- stub.BitmaskLastValid = make(bitmask, bitmaskLen)
- stub.LastValid = make([]basics.Round, 0, int(stub.TotalTransactionsCount))
- }
- stub.BitmaskLastValid.setBit(i)
- stub.LastValid = append(stub.LastValid, txn.Txn.LastValid)
- }
- if txn.Txn.Note != nil {
- if len(stub.BitmaskNote) == 0 {
- stub.BitmaskNote = make(bitmask, bitmaskLen)
- stub.Note = make([][]byte, 0, int(stub.TotalTransactionsCount))
- }
- stub.BitmaskNote.setBit(i)
- stub.Note = append(stub.Note, txn.Txn.Note)
- }
- if txn.Txn.GenesisID != "" {
- if len(stub.BitmaskGenesisID) == 0 {
- stub.BitmaskGenesisID = make(bitmask, bitmaskLen)
- }
- stub.BitmaskGenesisID.setBit(i)
- }
- if txn.Txn.Lease != ([32]byte{}) {
- if len(stub.BitmaskLease) == 0 {
- stub.BitmaskLease = make(bitmask, bitmaskLen)
- stub.Lease = make([]byte, 0, int(stub.TotalTransactionsCount)*transactions.LeaseByteLength)
- }
- stub.BitmaskLease.setBit(i)
- stub.Lease = append(stub.Lease, txn.Txn.Lease[:]...)
- }
- if !txn.Txn.RekeyTo.MsgIsZero() {
- if len(stub.BitmaskRekeyTo) == 0 {
- stub.BitmaskRekeyTo = make(bitmask, bitmaskLen)
- stub.RekeyTo = make([]byte, 0, int(stub.TotalTransactionsCount)*crypto.DigestSize)
- }
- stub.BitmaskRekeyTo.setBit(i)
- stub.RekeyTo = append(stub.RekeyTo, txn.Txn.RekeyTo[:]...)
- }
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructTxnHeader() {
- stub.BitmaskSender.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskFee.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskFirstValid.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskLastValid.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskNote.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskGenesisID.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskGroup.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskLease.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskRekeyTo.trimBitmask(int(stub.TotalTransactionsCount))
-}
-
-func (stub *txGroupsEncodingStub) deconstructKeyregTxnFields(i int, txn *transactions.SignedTxn) {
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- if !txn.Txn.VotePK.MsgIsZero() || !txn.Txn.SelectionPK.MsgIsZero() || txn.Txn.VoteKeyDilution != 0 {
- if len(stub.BitmaskKeys) == 0 {
- stub.BitmaskKeys = make(bitmask, bitmaskLen)
- stub.VotePK = make([]byte, 0, stub.TotalTransactionsCount*crypto.PublicKeyByteLength)
- stub.SelectionPK = make([]byte, 0, stub.TotalTransactionsCount*crypto.VrfPubkeyByteLength)
- stub.VoteKeyDilution = make([]uint64, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskKeys.setBit(i)
- stub.VotePK = append(stub.VotePK, txn.Txn.VotePK[:]...)
- stub.SelectionPK = append(stub.SelectionPK, txn.Txn.SelectionPK[:]...)
- stub.VoteKeyDilution = append(stub.VoteKeyDilution, txn.Txn.VoteKeyDilution)
- }
- if !txn.Txn.VoteFirst.MsgIsZero() {
- if len(stub.BitmaskVoteFirst) == 0 {
- stub.BitmaskVoteFirst = make(bitmask, bitmaskLen)
- stub.VoteFirst = make([]basics.Round, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskVoteFirst.setBit(i)
- stub.VoteFirst = append(stub.VoteFirst, txn.Txn.VoteFirst)
- }
- if !txn.Txn.VoteLast.MsgIsZero() {
- if len(stub.BitmaskVoteLast) == 0 {
- stub.BitmaskVoteLast = make(bitmask, bitmaskLen)
- stub.VoteLast = make([]basics.Round, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskVoteLast.setBit(i)
- stub.VoteLast = append(stub.VoteLast, txn.Txn.VoteLast)
- }
- if txn.Txn.Nonparticipation {
- if len(stub.BitmaskNonparticipation) == 0 {
- stub.BitmaskNonparticipation = make(bitmask, bitmaskLen)
- }
- stub.BitmaskNonparticipation.setBit(i)
- }
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructKeyregTxnFields() {
- stub.BitmaskKeys.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskVoteFirst.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskVoteLast.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskNonparticipation.trimBitmask(int(stub.TotalTransactionsCount))
-}
-
-func (stub *txGroupsEncodingStub) deconstructPaymentTxnFields(i int, txn *transactions.SignedTxn) {
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- if !txn.Txn.Receiver.MsgIsZero() {
- if len(stub.BitmaskReceiver) == 0 {
- stub.BitmaskReceiver = make(bitmask, bitmaskLen)
- stub.Receiver = make([]byte, 0, int(stub.TotalTransactionsCount)*crypto.DigestSize)
- }
- stub.BitmaskReceiver.setBit(i)
- stub.Receiver = append(stub.Receiver, txn.Txn.Receiver[:]...)
- }
- if !txn.Txn.Amount.MsgIsZero() {
- if len(stub.BitmaskAmount) == 0 {
- stub.BitmaskAmount = make(bitmask, bitmaskLen)
- stub.Amount = make([]basics.MicroAlgos, 0, int(stub.TotalTransactionsCount))
- }
- stub.BitmaskAmount.setBit(i)
- stub.Amount = append(stub.Amount, txn.Txn.Amount)
- }
- if !txn.Txn.CloseRemainderTo.MsgIsZero() {
- if len(stub.BitmaskCloseRemainderTo) == 0 {
- stub.BitmaskCloseRemainderTo = make(bitmask, bitmaskLen)
- stub.CloseRemainderTo = make([]byte, 0, int(stub.TotalTransactionsCount)*crypto.DigestSize)
- }
- stub.BitmaskCloseRemainderTo.setBit(i)
- stub.CloseRemainderTo = append(stub.CloseRemainderTo, txn.Txn.CloseRemainderTo[:]...)
- }
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructPaymentTxnFields() {
- stub.BitmaskReceiver.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskAmount.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskCloseRemainderTo.trimBitmask(int(stub.TotalTransactionsCount))
-}
-
-func (stub *txGroupsEncodingStub) deconstructAssetConfigTxnFields(i int, txn *transactions.SignedTxn) {
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- if !txn.Txn.ConfigAsset.MsgIsZero() {
- if len(stub.BitmaskConfigAsset) == 0 {
- stub.BitmaskConfigAsset = make(bitmask, bitmaskLen)
- stub.ConfigAsset = make([]basics.AssetIndex, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskConfigAsset.setBit(i)
- stub.ConfigAsset = append(stub.ConfigAsset, txn.Txn.ConfigAsset)
- }
- stub.deconstructAssetParams(i, txn)
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructAssetConfigTxnFields() {
- stub.BitmaskConfigAsset.trimBitmask(int(stub.TotalTransactionsCount))
- stub.finishDeconstructAssetParams()
-}
-
-func (stub *txGroupsEncodingStub) deconstructAssetParams(i int, txn *transactions.SignedTxn) {
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- if txn.Txn.AssetParams.Total != 0 {
- if len(stub.BitmaskTotal) == 0 {
- stub.BitmaskTotal = make(bitmask, bitmaskLen)
- stub.Total = make([]uint64, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskTotal.setBit(i)
- stub.Total = append(stub.Total, txn.Txn.AssetParams.Total)
- }
- if txn.Txn.AssetParams.Decimals != 0 {
- if len(stub.BitmaskDecimals) == 0 {
- stub.BitmaskDecimals = make(bitmask, bitmaskLen)
- stub.Decimals = make([]uint32, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskDecimals.setBit(i)
- stub.Decimals = append(stub.Decimals, txn.Txn.AssetParams.Decimals)
- }
- if txn.Txn.AssetParams.DefaultFrozen {
- if len(stub.BitmaskDefaultFrozen) == 0 {
- stub.BitmaskDefaultFrozen = make(bitmask, bitmaskLen)
- }
- stub.BitmaskDefaultFrozen.setBit(i)
- }
- if txn.Txn.AssetParams.UnitName != "" {
- if len(stub.BitmaskUnitName) == 0 {
- stub.BitmaskUnitName = make(bitmask, bitmaskLen)
- stub.UnitName = make([]string, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskUnitName.setBit(i)
- stub.UnitName = append(stub.UnitName, txn.Txn.AssetParams.UnitName)
- }
- if txn.Txn.AssetParams.AssetName != "" {
- if len(stub.BitmaskAssetName) == 0 {
- stub.BitmaskAssetName = make(bitmask, bitmaskLen)
- stub.AssetName = make([]string, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskAssetName.setBit(i)
- stub.AssetName = append(stub.AssetName, txn.Txn.AssetParams.AssetName)
- }
- if txn.Txn.AssetParams.URL != "" {
- if len(stub.BitmaskURL) == 0 {
- stub.BitmaskURL = make(bitmask, bitmaskLen)
- stub.URL = make([]string, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskURL.setBit(i)
- stub.URL = append(stub.URL, txn.Txn.AssetParams.URL)
- }
- if txn.Txn.AssetParams.MetadataHash != [32]byte{} {
- if len(stub.BitmaskMetadataHash) == 0 {
- stub.BitmaskMetadataHash = make(bitmask, bitmaskLen)
- stub.MetadataHash = make([]byte, 0, stub.TotalTransactionsCount*crypto.DigestSize)
- }
- stub.BitmaskMetadataHash.setBit(i)
- stub.MetadataHash = append(stub.MetadataHash, txn.Txn.AssetParams.MetadataHash[:]...)
- }
- if !txn.Txn.AssetParams.Manager.MsgIsZero() {
- if len(stub.BitmaskManager) == 0 {
- stub.BitmaskManager = make(bitmask, bitmaskLen)
- stub.Manager = make([]byte, 0, stub.TotalTransactionsCount*crypto.DigestSize)
- }
- stub.BitmaskManager.setBit(i)
- stub.Manager = append(stub.Manager, txn.Txn.AssetParams.Manager[:]...)
- }
- if !txn.Txn.AssetParams.Reserve.MsgIsZero() {
- if len(stub.BitmaskReserve) == 0 {
- stub.BitmaskReserve = make(bitmask, bitmaskLen)
- stub.Reserve = make([]byte, 0, stub.TotalTransactionsCount*crypto.DigestSize)
- }
- stub.BitmaskReserve.setBit(i)
- stub.Reserve = append(stub.Reserve, txn.Txn.AssetParams.Reserve[:]...)
- }
- if !txn.Txn.AssetParams.Freeze.MsgIsZero() {
- if len(stub.BitmaskFreeze) == 0 {
- stub.BitmaskFreeze = make(bitmask, bitmaskLen)
- stub.Freeze = make([]byte, 0, stub.TotalTransactionsCount*crypto.DigestSize)
- }
- stub.BitmaskFreeze.setBit(i)
- stub.Freeze = append(stub.Freeze, txn.Txn.AssetParams.Freeze[:]...)
- }
- if !txn.Txn.AssetParams.Clawback.MsgIsZero() {
- if len(stub.BitmaskClawback) == 0 {
- stub.BitmaskClawback = make(bitmask, bitmaskLen)
- stub.Clawback = make([]byte, 0, stub.TotalTransactionsCount*crypto.DigestSize)
- }
- stub.BitmaskClawback.setBit(i)
- stub.Clawback = append(stub.Clawback, txn.Txn.AssetParams.Clawback[:]...)
- }
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructAssetParams() {
- stub.BitmaskTotal.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskDecimals.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskDefaultFrozen.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskUnitName.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskAssetName.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskURL.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskMetadataHash.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskManager.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskReserve.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskFreeze.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskClawback.trimBitmask(int(stub.TotalTransactionsCount))
-}
-
-func (stub *txGroupsEncodingStub) deconstructAssetTransferTxnFields(i int, txn *transactions.SignedTxn) {
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- if !txn.Txn.XferAsset.MsgIsZero() {
- if len(stub.BitmaskXferAsset) == 0 {
- stub.BitmaskXferAsset = make(bitmask, bitmaskLen)
- stub.XferAsset = make([]basics.AssetIndex, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskXferAsset.setBit(i)
- stub.XferAsset = append(stub.XferAsset, txn.Txn.XferAsset)
- }
- if txn.Txn.AssetAmount != 0 {
- if len(stub.BitmaskAssetAmount) == 0 {
- stub.BitmaskAssetAmount = make(bitmask, bitmaskLen)
- stub.AssetAmount = make([]uint64, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskAssetAmount.setBit(i)
- stub.AssetAmount = append(stub.AssetAmount, txn.Txn.AssetAmount)
- }
- if !txn.Txn.AssetSender.MsgIsZero() {
- if len(stub.BitmaskAssetSender) == 0 {
- stub.BitmaskAssetSender = make(bitmask, bitmaskLen)
- stub.AssetSender = make([]byte, 0, stub.TotalTransactionsCount*crypto.DigestSize)
- }
- stub.BitmaskAssetSender.setBit(i)
- stub.AssetSender = append(stub.AssetSender, txn.Txn.AssetSender[:]...)
- }
- if !txn.Txn.AssetReceiver.MsgIsZero() {
- if len(stub.BitmaskAssetReceiver) == 0 {
- stub.BitmaskAssetReceiver = make(bitmask, bitmaskLen)
- stub.AssetReceiver = make([]byte, 0, stub.TotalTransactionsCount*crypto.DigestSize)
- }
- stub.BitmaskAssetReceiver.setBit(i)
- stub.AssetReceiver = append(stub.AssetReceiver, txn.Txn.AssetReceiver[:]...)
- }
- if !txn.Txn.AssetCloseTo.MsgIsZero() {
- if len(stub.BitmaskAssetCloseTo) == 0 {
- stub.BitmaskAssetCloseTo = make(bitmask, bitmaskLen)
- stub.AssetCloseTo = make([]byte, 0, stub.TotalTransactionsCount*crypto.DigestSize)
- }
- stub.BitmaskAssetCloseTo.setBit(i)
- stub.AssetCloseTo = append(stub.AssetCloseTo, txn.Txn.AssetCloseTo[:]...)
- }
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructAssetTransferTxnFields() {
- stub.BitmaskXferAsset.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskAssetAmount.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskAssetSender.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskAssetReceiver.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskAssetCloseTo.trimBitmask(int(stub.TotalTransactionsCount))
-}
-
-func (stub *txGroupsEncodingStub) deconstructAssetFreezeTxnFields(i int, txn *transactions.SignedTxn) {
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- if !txn.Txn.FreezeAccount.MsgIsZero() {
- if len(stub.BitmaskFreezeAccount) == 0 {
- stub.BitmaskFreezeAccount = make(bitmask, bitmaskLen)
- stub.FreezeAccount = make([]byte, 0, stub.TotalTransactionsCount*crypto.DigestSize)
- }
- stub.BitmaskFreezeAccount.setBit(i)
- stub.FreezeAccount = append(stub.FreezeAccount, txn.Txn.FreezeAccount[:]...)
- }
- if txn.Txn.FreezeAsset != 0 {
- if len(stub.BitmaskFreezeAsset) == 0 {
- stub.BitmaskFreezeAsset = make(bitmask, bitmaskLen)
- stub.FreezeAsset = make([]basics.AssetIndex, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskFreezeAsset.setBit(i)
- stub.FreezeAsset = append(stub.FreezeAsset, txn.Txn.FreezeAsset)
- }
- if txn.Txn.AssetFrozen {
- if len(stub.BitmaskAssetFrozen) == 0 {
- stub.BitmaskAssetFrozen = make(bitmask, bitmaskLen)
- }
- stub.BitmaskAssetFrozen.setBit(i)
- }
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructAssetFreezeTxnFields() {
- stub.BitmaskFreezeAccount.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskFreezeAsset.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskAssetFrozen.trimBitmask(int(stub.TotalTransactionsCount))
-}
-
-func (stub *txGroupsEncodingStub) deconstructApplicationCallTxnFields(i int, txn *transactions.SignedTxn) {
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- if !txn.Txn.ApplicationID.MsgIsZero() {
- if len(stub.BitmaskApplicationID) == 0 {
- stub.BitmaskApplicationID = make(bitmask, bitmaskLen)
- stub.ApplicationID = make([]basics.AppIndex, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskApplicationID.setBit(i)
- stub.ApplicationID = append(stub.ApplicationID, txn.Txn.ApplicationID)
- }
- if txn.Txn.OnCompletion != 0 {
- if len(stub.BitmaskOnCompletion) == 0 {
- stub.BitmaskOnCompletion = make(bitmask, bitmaskLen)
- stub.OnCompletion = make([]byte, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskOnCompletion.setBit(i)
- stub.OnCompletion = append(stub.OnCompletion, byte(txn.Txn.OnCompletion))
- }
- if txn.Txn.ApplicationArgs != nil {
- if len(stub.BitmaskApplicationArgs) == 0 {
- stub.BitmaskApplicationArgs = make(bitmask, bitmaskLen)
- stub.ApplicationArgs = make([]applicationArgs, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskApplicationArgs.setBit(i)
- stub.ApplicationArgs = append(stub.ApplicationArgs, txn.Txn.ApplicationArgs)
- }
- if txn.Txn.Accounts != nil {
- if len(stub.BitmaskAccounts) == 0 {
- stub.BitmaskAccounts = make(bitmask, bitmaskLen)
- stub.Accounts = make([]addresses, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskAccounts.setBit(i)
- stub.Accounts = append(stub.Accounts, txn.Txn.Accounts)
- }
- if txn.Txn.ForeignApps != nil {
- if len(stub.BitmaskForeignApps) == 0 {
- stub.BitmaskForeignApps = make(bitmask, bitmaskLen)
- stub.ForeignApps = make([]appIndices, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskForeignApps.setBit(i)
- stub.ForeignApps = append(stub.ForeignApps, txn.Txn.ForeignApps)
- }
- if txn.Txn.ForeignAssets != nil {
- if len(stub.BitmaskForeignAssets) == 0 {
- stub.BitmaskForeignAssets = make(bitmask, bitmaskLen)
- stub.ForeignAssets = make([]assetIndices, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskForeignAssets.setBit(i)
- stub.ForeignAssets = append(stub.ForeignAssets, txn.Txn.ForeignAssets)
- }
- if txn.Txn.LocalStateSchema.NumUint != 0 {
- if len(stub.BitmaskLocalNumUint) == 0 {
- stub.BitmaskLocalNumUint = make(bitmask, bitmaskLen)
- stub.LocalNumUint = make([]uint64, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskLocalNumUint.setBit(i)
- stub.LocalNumUint = append(stub.LocalNumUint, txn.Txn.LocalStateSchema.NumUint)
- }
- if txn.Txn.LocalStateSchema.NumByteSlice != 0 {
- if len(stub.BitmaskLocalNumByteSlice) == 0 {
- stub.BitmaskLocalNumByteSlice = make(bitmask, bitmaskLen)
- stub.LocalNumByteSlice = make([]uint64, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskLocalNumByteSlice.setBit(i)
- stub.LocalNumByteSlice = append(stub.LocalNumByteSlice, txn.Txn.LocalStateSchema.NumByteSlice)
- }
- if txn.Txn.GlobalStateSchema.NumUint != 0 {
- if len(stub.BitmaskGlobalNumUint) == 0 {
- stub.BitmaskGlobalNumUint = make(bitmask, bitmaskLen)
- stub.GlobalNumUint = make([]uint64, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskGlobalNumUint.setBit(i)
- stub.GlobalNumUint = append(stub.GlobalNumUint, txn.Txn.GlobalStateSchema.NumUint)
- }
- if txn.Txn.GlobalStateSchema.NumByteSlice != 0 {
- if len(stub.BitmaskGlobalNumByteSlice) == 0 {
- stub.BitmaskGlobalNumByteSlice = make(bitmask, bitmaskLen)
- stub.GlobalNumByteSlice = make([]uint64, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskGlobalNumByteSlice.setBit(i)
- stub.GlobalNumByteSlice = append(stub.GlobalNumByteSlice, txn.Txn.GlobalStateSchema.NumByteSlice)
- }
- if txn.Txn.ApprovalProgram != nil {
- if len(stub.BitmaskApprovalProgram) == 0 {
- stub.BitmaskApprovalProgram = make(bitmask, bitmaskLen)
- stub.ApprovalProgram = make([]program, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskApprovalProgram.setBit(i)
- stub.ApprovalProgram = append(stub.ApprovalProgram, txn.Txn.ApprovalProgram)
- }
- if txn.Txn.ClearStateProgram != nil {
- if len(stub.BitmaskClearStateProgram) == 0 {
- stub.BitmaskClearStateProgram = make(bitmask, bitmaskLen)
- stub.ClearStateProgram = make([]program, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskClearStateProgram.setBit(i)
- stub.ClearStateProgram = append(stub.ClearStateProgram, txn.Txn.ClearStateProgram)
- }
- if txn.Txn.ExtraProgramPages != 0 {
- if len(stub.BitmaskExtraProgramPages) == 0 {
- stub.BitmaskExtraProgramPages = make(bitmask, bitmaskLen)
- stub.ExtraProgramPages = make([]uint32, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskExtraProgramPages.setBit(i)
- stub.ExtraProgramPages = append(stub.ExtraProgramPages, txn.Txn.ExtraProgramPages)
- }
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructApplicationCallTxnFields() {
- compactNibblesArray(&stub.OnCompletion)
- stub.BitmaskApplicationID.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskOnCompletion.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskApplicationArgs.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskAccounts.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskForeignApps.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskForeignAssets.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskLocalNumUint.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskLocalNumByteSlice.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskGlobalNumUint.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskGlobalNumByteSlice.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskApprovalProgram.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskClearStateProgram.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskExtraProgramPages.trimBitmask(int(stub.TotalTransactionsCount))
-}
-
-func (stub *txGroupsEncodingStub) deconstructCompactCertTxnFields(i int, txn *transactions.SignedTxn) {
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- if !txn.Txn.CertRound.MsgIsZero() {
- if len(stub.BitmaskCertRound) == 0 {
- stub.BitmaskCertRound = make(bitmask, bitmaskLen)
- stub.CertRound = make([]basics.Round, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskCertRound.setBit(i)
- stub.CertRound = append(stub.CertRound, txn.Txn.CertRound)
- }
- if txn.Txn.CertType != 0 {
- if len(stub.BitmaskCertType) == 0 {
- stub.BitmaskCertType = make(bitmask, bitmaskLen)
- stub.CertType = make([]protocol.CompactCertType, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskCertType.setBit(i)
- stub.CertType = append(stub.CertType, txn.Txn.CertType)
- }
- stub.deconstructCert(i, txn)
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructCompactCertTxnFields() {
- stub.BitmaskCertRound.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskCertType.trimBitmask(int(stub.TotalTransactionsCount))
- stub.finishDeconstructCert()
-}
-
-func (stub *txGroupsEncodingStub) deconstructCert(i int, txn *transactions.SignedTxn) {
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- if !txn.Txn.Cert.SigCommit.MsgIsZero() {
- if len(stub.BitmaskSigCommit) == 0 {
- stub.BitmaskSigCommit = make(bitmask, bitmaskLen)
- stub.SigCommit = make([]byte, 0, stub.TotalTransactionsCount*crypto.DigestSize)
- }
- stub.BitmaskSigCommit.setBit(i)
- stub.SigCommit = append(stub.SigCommit, txn.Txn.Cert.SigCommit[:]...)
- }
- if txn.Txn.Cert.SignedWeight != 0 {
- if len(stub.BitmaskSignedWeight) == 0 {
- stub.BitmaskSignedWeight = make(bitmask, bitmaskLen)
- stub.SignedWeight = make([]uint64, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskSignedWeight.setBit(i)
- stub.SignedWeight = append(stub.SignedWeight, txn.Txn.Cert.SignedWeight)
- }
- if txn.Txn.Cert.SigProofs != nil {
- if len(stub.BitmaskSigProofs) == 0 {
- stub.BitmaskSigProofs = make(bitmask, bitmaskLen)
- stub.SigProofs = make([]certProofs, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskSigProofs.setBit(i)
- stub.SigProofs = append(stub.SigProofs, txn.Txn.Cert.SigProofs)
- }
- if txn.Txn.Cert.PartProofs != nil {
- if len(stub.BitmaskPartProofs) == 0 {
- stub.BitmaskPartProofs = make(bitmask, bitmaskLen)
- stub.PartProofs = make([]certProofs, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskPartProofs.setBit(i)
- stub.PartProofs = append(stub.PartProofs, txn.Txn.Cert.PartProofs)
- }
- if txn.Txn.Cert.Reveals != nil {
- if len(stub.BitmaskReveals) == 0 {
- stub.BitmaskReveals = make(bitmask, bitmaskLen)
- stub.Reveals = make([]revealMap, 0, stub.TotalTransactionsCount)
- }
- stub.BitmaskReveals.setBit(i)
- stub.Reveals = append(stub.Reveals, txn.Txn.Cert.Reveals)
- }
-}
-
-func (stub *txGroupsEncodingStub) finishDeconstructCert() {
- stub.BitmaskSigCommit.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskSignedWeight.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskSigProofs.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskPartProofs.trimBitmask(int(stub.TotalTransactionsCount))
- stub.BitmaskReveals.trimBitmask(int(stub.TotalTransactionsCount))
-}
diff --git a/txnsync/encodedgroupstypes.go b/txnsync/encodedgroupstypes.go
deleted file mode 100644
index 727d67ee8..000000000
--- a/txnsync/encodedgroupstypes.go
+++ /dev/null
@@ -1,372 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "errors"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/compactcert"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/protocol"
-)
-
-const maxEncodedTransactionGroups = 30000
-const maxEncodedTransactionGroupEntries = 30000
-const maxBitmaskSize = (maxEncodedTransactionGroupEntries+7)/8 + 1
-const maxSignatureBytes = maxEncodedTransactionGroupEntries * len(crypto.Signature{})
-const maxAddressBytes = maxEncodedTransactionGroupEntries * crypto.DigestSize
-
-var errInvalidTxType = errors.New("invalid txtype")
-
-//msgp:allocbound txnGroups maxEncodedTransactionGroupEntries
-type txnGroups pooldata.SignedTxnSlice //nolint:unused
-
-// old data structure for encoding (only used for testing)
-type txGroupsEncodingStubOld struct { //nolint:unused
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- TxnGroups []txnGroups `codec:"t,allocbound=maxEncodedTransactionGroups"`
-}
-
-type txGroupsEncodingStub struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- TotalTransactionsCount uint64 `codec:"ttc"`
- TransactionGroupCount uint64 `codec:"tgc"`
- TransactionGroupSizes []byte `codec:"tgs,allocbound=maxEncodedTransactionGroups"`
-
- encodedSignedTxns
-}
-
-type encodedSignedTxns struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- Sig []byte `codec:"sig,allocbound=maxSignatureBytes"`
- BitmaskSig bitmask `codec:"sigbm"`
-
- encodedMsigs
- encodedLsigs
-
- AuthAddr []byte `codec:"sgnr,allocbound=maxAddressBytes"`
- BitmaskAuthAddr bitmask `codec:"sgnrbm"`
-
- encodedTxns
-}
-
-type encodedMsigs struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- Version []byte `codec:"msigv,allocbound=maxEncodedTransactionGroups"`
- BitmaskVersion bitmask `codec:"msigvbm"`
- Threshold []byte `codec:"msigthr,allocbound=maxEncodedTransactionGroups"`
- BitmaskThreshold bitmask `codec:"msigthrbm"`
- // splitting subsigs further make the code much more complicated / does not give gains
- Subsigs [][]crypto.MultisigSubsig `codec:"subsig,allocbound=maxEncodedTransactionGroups,allocbound=crypto.MaxMultisig"`
- BitmaskSubsigs bitmask `codec:"subsigsbm"`
-}
-
-type encodedLsigs struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- Logic [][]byte `codec:"lsigl,allocbound=maxEncodedTransactionGroups,allocbound=config.MaxLogicSigMaxSize"`
- BitmaskLogic bitmask `codec:"lsiglbm"`
- LogicArgs [][][]byte `codec:"lsigarg,allocbound=maxEncodedTransactionGroups,allocbound=transactions.EvalMaxArgs,allocbound=config.MaxLogicSigMaxSize"`
- BitmaskLogicArgs bitmask `codec:"lsigargbm"`
-}
-
-type encodedTxns struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- TxType []byte `codec:"type,allocbound=maxEncodedTransactionGroups"`
- BitmaskTxType bitmask `codec:"typebm"`
- TxTypeOffset byte `codec:"typeo"`
-
- encodedTxnHeaders
- encodedKeyregTxnFields
- encodedPaymentTxnFields
- encodedAssetConfigTxnFields
- encodedAssetTransferTxnFields
- encodedAssetFreezeTxnFields
- encodedApplicationCallTxnFields
- encodedCompactCertTxnFields
-}
-
-type encodedTxnHeaders struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- Sender []byte `codec:"snd,allocbound=maxAddressBytes"`
- BitmaskSender bitmask `codec:"sndbm"`
- Fee []basics.MicroAlgos `codec:"fee,allocbound=maxEncodedTransactionGroups"`
- BitmaskFee bitmask `codec:"feebm"`
- FirstValid []basics.Round `codec:"fv,allocbound=maxEncodedTransactionGroups"`
- BitmaskFirstValid bitmask `codec:"fvbm"`
- LastValid []basics.Round `codec:"lv,allocbound=maxEncodedTransactionGroups"`
- BitmaskLastValid bitmask `codec:"lvbm"`
- Note [][]byte `codec:"note,allocbound=maxEncodedTransactionGroups,allocbound=config.MaxTxnNoteBytes"`
- BitmaskNote bitmask `codec:"notebm"`
- BitmaskGenesisID bitmask `codec:"genbm"`
-
- BitmaskGroup bitmask `codec:"grpbm"`
-
- Lease []byte `codec:"lx,allocbound=maxAddressBytes"`
- BitmaskLease bitmask `codec:"lxbm"`
-
- RekeyTo []byte `codec:"rekey,allocbound=maxAddressBytes"`
- BitmaskRekeyTo bitmask `codec:"rekeybm"`
-}
-
-type encodedKeyregTxnFields struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- VotePK []byte `codec:"votekey,allocbound=maxAddressBytes"`
- SelectionPK []byte `codec:"selkey,allocbound=maxAddressBytes"`
- VoteFirst []basics.Round `codec:"votefst,allocbound=maxEncodedTransactionGroups"`
- BitmaskVoteFirst bitmask `codec:"votefstbm"`
- VoteLast []basics.Round `codec:"votelst,allocbound=maxEncodedTransactionGroups"`
- BitmaskVoteLast bitmask `codec:"votelstbm"`
- VoteKeyDilution []uint64 `codec:"votekd,allocbound=maxEncodedTransactionGroups"`
- BitmaskKeys bitmask `codec:"votekbm"`
- BitmaskNonparticipation bitmask `codec:"nonpartbm"`
-}
-
-type encodedPaymentTxnFields struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- Receiver []byte `codec:"rcv,allocbound=maxAddressBytes"`
- BitmaskReceiver bitmask `codec:"rcvbm"`
- Amount []basics.MicroAlgos `codec:"amt,allocbound=maxEncodedTransactionGroups"`
- BitmaskAmount bitmask `codec:"amtbm"`
-
- CloseRemainderTo []byte `codec:"close,allocbound=maxAddressBytes"`
- BitmaskCloseRemainderTo bitmask `codec:"closebm"`
-}
-
-type encodedAssetConfigTxnFields struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- ConfigAsset []basics.AssetIndex `codec:"caid,allocbound=maxEncodedTransactionGroups"`
- BitmaskConfigAsset bitmask `codec:"caidbm"`
-
- encodedAssetParams
-}
-
-type encodedAssetParams struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- Total []uint64 `codec:"t,allocbound=maxEncodedTransactionGroups"`
- BitmaskTotal bitmask `codec:"tbm"`
-
- Decimals []uint32 `codec:"dc,allocbound=maxEncodedTransactionGroups"`
- BitmaskDecimals bitmask `codec:"dcbm"`
-
- BitmaskDefaultFrozen bitmask `codec:"dfbm"`
-
- UnitName []string `codec:"un,allocbound=maxEncodedTransactionGroups"`
- BitmaskUnitName bitmask `codec:"unbm"`
-
- AssetName []string `codec:"an,allocbound=maxEncodedTransactionGroups"`
- BitmaskAssetName bitmask `codec:"anbm"`
-
- URL []string `codec:"au,allocbound=maxEncodedTransactionGroups"`
- BitmaskURL bitmask `codec:"aubm"`
-
- MetadataHash []byte `codec:"am,allocbound=maxAddressBytes"`
- BitmaskMetadataHash bitmask `codec:"ambm"`
-
- Manager []byte `codec:"m,allocbound=maxAddressBytes"`
- BitmaskManager bitmask `codec:"mbm"`
-
- Reserve []byte `codec:"r,allocbound=maxAddressBytes"`
- BitmaskReserve bitmask `codec:"rbm"`
-
- Freeze []byte `codec:"f,allocbound=maxAddressBytes"`
- BitmaskFreeze bitmask `codec:"fbm"`
-
- Clawback []byte `codec:"c,allocbound=maxAddressBytes"`
- BitmaskClawback bitmask `codec:"cbm"`
-}
-
-type encodedAssetTransferTxnFields struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- XferAsset []basics.AssetIndex `codec:"xaid,allocbound=maxEncodedTransactionGroups"`
- BitmaskXferAsset bitmask `codec:"xaidbm"`
-
- AssetAmount []uint64 `codec:"aamt,allocbound=maxEncodedTransactionGroups"`
- BitmaskAssetAmount bitmask `codec:"aamtbm"`
-
- AssetSender []byte `codec:"asnd,allocbound=maxAddressBytes"`
- BitmaskAssetSender bitmask `codec:"asndbm"`
-
- AssetReceiver []byte `codec:"arcv,allocbound=maxAddressBytes"`
- BitmaskAssetReceiver bitmask `codec:"arcvbm"`
-
- AssetCloseTo []byte `codec:"aclose,allocbound=maxAddressBytes"`
- BitmaskAssetCloseTo bitmask `codec:"aclosebm"`
-}
-
-type encodedAssetFreezeTxnFields struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- FreezeAccount []byte `codec:"fadd,allocbound=maxAddressBytes"`
- BitmaskFreezeAccount bitmask `codec:"faddbm"`
-
- FreezeAsset []basics.AssetIndex `codec:"faid,allocbound=maxEncodedTransactionGroups"`
- BitmaskFreezeAsset bitmask `codec:"faidbm"`
-
- BitmaskAssetFrozen bitmask `codec:"afrzbm"`
-}
-
-//msgp:allocbound applicationArgs transactions.EncodedMaxApplicationArgs
-type applicationArgs [][]byte
-
-//msgp:allocbound addresses transactions.EncodedMaxAccounts
-type addresses []basics.Address
-
-//msgp:allocbound appIndices transactions.EncodedMaxForeignApps
-type appIndices []basics.AppIndex
-
-//msgp:allocbound assetIndices transactions.EncodedMaxForeignAssets
-type assetIndices []basics.AssetIndex
-
-//msgp:allocbound program config.MaxAvailableAppProgramLen
-type program []byte
-
-type encodedApplicationCallTxnFields struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- ApplicationID []basics.AppIndex `codec:"apid,allocbound=maxEncodedTransactionGroups"`
- BitmaskApplicationID bitmask `codec:"apidbm"`
-
- OnCompletion []byte `codec:"apan,allocbound=maxEncodedTransactionGroups"`
- BitmaskOnCompletion bitmask `codec:"apanbm"`
-
- ApplicationArgs []applicationArgs `codec:"apaa,allocbound=maxEncodedTransactionGroups"`
- BitmaskApplicationArgs bitmask `codec:"apaabm"`
-
- Accounts []addresses `codec:"apat,allocbound=maxEncodedTransactionGroups"`
- BitmaskAccounts bitmask `codec:"apatbm"`
-
- ForeignApps []appIndices `codec:"apfa,allocbound=maxEncodedTransactionGroups"`
- BitmaskForeignApps bitmask `codec:"apfabm"`
-
- ForeignAssets []assetIndices `codec:"apas,allocbound=maxEncodedTransactionGroups"`
- BitmaskForeignAssets bitmask `codec:"apasbm"`
-
- LocalNumUint []uint64 `codec:"lnui,allocbound=maxEncodedTransactionGroups"`
- BitmaskLocalNumUint bitmask `codec:"lnuibm"`
- LocalNumByteSlice []uint64 `codec:"lnbs,allocbound=maxEncodedTransactionGroups"`
- BitmaskLocalNumByteSlice bitmask `codec:"lnbsbm"`
-
- GlobalNumUint []uint64 `codec:"gnui,allocbound=maxEncodedTransactionGroups"`
- BitmaskGlobalNumUint bitmask `codec:"gnuibm"`
- GlobalNumByteSlice []uint64 `codec:"gnbs,allocbound=maxEncodedTransactionGroups"`
- BitmaskGlobalNumByteSlice bitmask `codec:"gnbsbm"`
-
- ApprovalProgram []program `codec:"apap,allocbound=maxEncodedTransactionGroups"`
- BitmaskApprovalProgram bitmask `codec:"apapbm"`
-
- ClearStateProgram []program `codec:"apsu,allocbound=maxEncodedTransactionGroups"`
- BitmaskClearStateProgram bitmask `codec:"apsubm"`
-
- ExtraProgramPages []uint32 `codec:"apep,allocbound=maxEncodedTransactionGroups"`
- BitmaskExtraProgramPages bitmask `codec:"apepbm"`
-}
-
-type encodedCompactCertTxnFields struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- CertRound []basics.Round `codec:"certrnd,allocbound=maxEncodedTransactionGroups"`
- BitmaskCertRound bitmask `codec:"certrndbm"`
-
- CertType []protocol.CompactCertType `codec:"certtype,allocbound=maxEncodedTransactionGroups"`
- BitmaskCertType bitmask `codec:"certtypebm"`
-
- encodedCert
-}
-
-//msgp:allocbound certProofs compactcert.MaxProofDigests
-type certProofs []crypto.Digest
-
-//msgp:allocbound revealMap compactcert.MaxReveals
-type revealMap map[uint64]compactcert.Reveal
-
-// SortUint64 implements sorting by uint64 keys for
-// canonical encoding of maps in msgpack format.
-type SortUint64 = compactcert.SortUint64
-
-type encodedCert struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- SigCommit []byte `codec:"certc,allocbound=maxAddressBytes"`
- BitmaskSigCommit bitmask `codec:"certcbm"`
-
- SignedWeight []uint64 `codec:"certw,allocbound=maxEncodedTransactionGroups"`
- BitmaskSignedWeight bitmask `codec:"certwbm"`
-
- SigProofs []certProofs `codec:"certS,allocbound=maxEncodedTransactionGroups"`
- BitmaskSigProofs bitmask `codec:"certSbm"`
-
- PartProofs []certProofs `codec:"certP,allocbound=maxEncodedTransactionGroups"`
- BitmaskPartProofs bitmask `codec:"certPbm"`
-
- Reveals []revealMap `codec:"certr,allocbound=maxEncodedTransactionGroups"`
- BitmaskReveals bitmask `codec:"certrbm"`
-}
-
-const (
- paymentTx = iota
- keyRegistrationTx
- assetConfigTx
- assetTransferTx
- assetFreezeTx
- applicationCallTx
- compactCertTx
- unknownTx
-)
-
-// TxTypeToByte converts a TxType to byte encoding
-func TxTypeToByte(t protocol.TxType) (byte, error) {
- switch t {
- case protocol.PaymentTx:
- return paymentTx, nil
- case protocol.KeyRegistrationTx:
- return keyRegistrationTx, nil
- case protocol.AssetConfigTx:
- return assetConfigTx, nil
- case protocol.AssetTransferTx:
- return assetTransferTx, nil
- case protocol.AssetFreezeTx:
- return assetFreezeTx, nil
- case protocol.ApplicationCallTx:
- return applicationCallTx, nil
- case protocol.CompactCertTx:
- return compactCertTx, nil
- default:
- return unknownTx, errInvalidTxType
- }
-}
-
-// ByteToTxType converts a byte encoding to TxType
-func ByteToTxType(b byte) protocol.TxType {
- if int(b) >= len(protocol.TxnTypes) {
- return protocol.UnknownTx
- }
- return protocol.TxnTypes[b]
-}
diff --git a/txnsync/encodedgroupsunmarshalers.go b/txnsync/encodedgroupsunmarshalers.go
deleted file mode 100644
index 750bb0871..000000000
--- a/txnsync/encodedgroupsunmarshalers.go
+++ /dev/null
@@ -1,648 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "errors"
- "fmt"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/data/transactions"
-)
-
-var errDataMissing = errors.New("failed to decode: data missing")
-
-// the nextSlice definition - copy the next slice and slide the src window.
-func nextSlice(src *[]byte, dst []byte, size int) error {
- if len(*src) < size {
- return errDataMissing
- }
- copy(dst[:], (*src)[:size])
- // slice the src window so next call would get the next entry.
- *src = (*src)[size:]
- return nil
-}
-
-// getNibble returns the nibble at the given index from the provided
-// byte array. A errDataMissing is returned if index is beyond the size
-// of the array.
-func getNibble(b []byte, index int) (byte, error) {
- if index >= len(b)*2 {
- return 0, errDataMissing
- }
- if index%2 == 0 {
- return b[index/2] / 16, nil
- }
- return b[index/2] % 16, nil
-}
-
-func addGroupHashes(txnGroups []pooldata.SignedTxGroup, txnCount int, b bitmask) (err error) {
- index := 0
- txGroupHashes := make([]crypto.Digest, 16)
- tStart := 0
-
- // addGroupHashesFunc adds hashes to transactions in groups of more than 1 transaction,
- // or to transactions with one transaction and bitmask set for that index.
- // It stops at index nextSetBitIndex, or stops when all in txnGroups are visited.
- addGroupHashesFunc := func(nextSetBitIndex int, count int) error {
- remainingTxnGroups := txnGroups[tStart:]
- for t, txns := range remainingTxnGroups {
- if len(txns.Transactions) == 1 && index != nextSetBitIndex {
- index++
- continue
- }
- var txGroup transactions.TxGroup
- txGroup.TxGroupHashes = txGroupHashes[:len(txns.Transactions)]
- for i, tx := range txns.Transactions {
- txGroup.TxGroupHashes[i] = crypto.HashObj(tx.Txn)
- }
- groupHash := crypto.HashObj(txGroup)
- for i := range txns.Transactions {
- txns.Transactions[i].Txn.Group = groupHash
- index++
- }
- if index > nextSetBitIndex {
- tStart += t + 1
- return nil
- }
- }
- tStart = len(txnGroups)
- return nil
- }
- // addGroupHashesFunc will be called for each set bit. Between set bits, all transactions
- // in groups of more than 1 transactions will have the hashes added.
- err = b.iterate(txnCount, txnCount, addGroupHashesFunc)
-
- if err == nil {
- // One more call to addGroupHashesFunc to cover all the remaining transactions in groups of
- // more than 1 transaction that were not added because no groups with one transaction are left.
- err = addGroupHashesFunc(txnCount+1, -1)
- }
- return
-}
-
-func (stub *txGroupsEncodingStub) reconstructSignedTransactions(signedTxns []transactions.SignedTxn, genesisID string, genesisHash crypto.Digest) error {
- err := stub.BitmaskSig.iterate(int(stub.TotalTransactionsCount), len(stub.Sig)/len(crypto.Signature{}), func(i int, _ int) error {
- return nextSlice(&stub.Sig, signedTxns[i].Sig[:], len(crypto.Signature{}))
- })
- if err != nil {
- return err
- }
-
- if err = stub.reconstructMsigs(signedTxns); err != nil {
- return fmt.Errorf("failed to msigs: %w", err)
- }
- if err = stub.reconstructLsigs(signedTxns); err != nil {
- return fmt.Errorf("failed to lsigs: %w", err)
- }
- err = stub.BitmaskAuthAddr.iterate(int(stub.TotalTransactionsCount), len(stub.AuthAddr)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.AuthAddr, signedTxns[i].AuthAddr[:], crypto.DigestSize)
- })
- if err != nil {
- return err
- }
-
- return stub.reconstructTransactions(signedTxns, genesisID, genesisHash)
-}
-
-func (stub *txGroupsEncodingStub) reconstructMsigs(signedTxns []transactions.SignedTxn) (err error) {
- err = stub.BitmaskVersion.iterate(int(stub.TotalTransactionsCount), len(stub.Version), func(i int, index int) error {
- signedTxns[i].Msig.Version = stub.Version[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskThreshold.iterate(int(stub.TotalTransactionsCount), len(stub.Threshold), func(i int, index int) error {
- signedTxns[i].Msig.Threshold = stub.Threshold[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskSubsigs.iterate(int(stub.TotalTransactionsCount), len(stub.Subsigs), func(i int, index int) error {
- signedTxns[i].Msig.Subsigs = stub.Subsigs[index]
- return nil
- })
- if err != nil {
- return err
- }
- return nil
-}
-
-func (stub *txGroupsEncodingStub) reconstructLsigs(signedTxns []transactions.SignedTxn) (err error) {
- err = stub.BitmaskLogic.iterate(int(stub.TotalTransactionsCount), len(stub.Logic), func(i int, index int) error {
- signedTxns[i].Lsig.Logic = stub.Logic[index]
- // fetch sig/msig
- signedTxns[i].Lsig.Sig = signedTxns[i].Sig
- signedTxns[i].Sig = crypto.Signature{}
- signedTxns[i].Lsig.Msig = signedTxns[i].Msig
- signedTxns[i].Msig = crypto.MultisigSig{}
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskLogicArgs.iterate(int(stub.TotalTransactionsCount), len(stub.LogicArgs), func(i int, index int) error {
- signedTxns[i].Lsig.Args = stub.LogicArgs[index]
- return nil
- })
- if err != nil {
- return err
- }
- return nil
-}
-
-func (stub *txGroupsEncodingStub) reconstructTransactions(signedTxns []transactions.SignedTxn, genesisID string, genesisHash crypto.Digest) (err error) {
- err = stub.BitmaskTxType.iterate(int(stub.TotalTransactionsCount), len(stub.TxType)*2, func(i int, index int) error {
- b, err := getNibble(stub.TxType, index)
- if err != nil {
- return err
- }
- signedTxns[i].Txn.Type = ByteToTxType(b)
- return nil
- })
- for i := range signedTxns {
- if signedTxns[i].Txn.Type == "" {
- signedTxns[i].Txn.Type = ByteToTxType(stub.TxTypeOffset)
- }
- }
- if err != nil {
- return err
- }
-
- if err := stub.reconstructTxnHeader(signedTxns, genesisID, genesisHash); err != nil {
- return fmt.Errorf("failed to reconstructTxnHeader: %w", err)
- }
- if err := stub.reconstructKeyregTxnFields(signedTxns); err != nil {
- return fmt.Errorf("failed to reconstructKeyregTxnFields: %w", err)
- }
- if err := stub.reconstructPaymentTxnFields(signedTxns); err != nil {
- return fmt.Errorf("failed to reconstructPaymentTxnFields: %w", err)
- }
- if err := stub.reconstructAssetConfigTxnFields(signedTxns); err != nil {
- return fmt.Errorf("failed to reconstructAssetConfigTxnFields: %w", err)
- }
- if err := stub.reconstructAssetTransferTxnFields(signedTxns); err != nil {
- return fmt.Errorf("failed to reconstructAssetTransferTxnFields: %w", err)
- }
- if err := stub.reconstructAssetFreezeTxnFields(signedTxns); err != nil {
- return fmt.Errorf("failed to reconstructAssetFreezeTxnFields: %w", err)
- }
- if err := stub.reconstructApplicationCallTxnFields(signedTxns); err != nil {
- return fmt.Errorf("failed to reconstructApplicationCallTxnFields: %w", err)
- }
- if err := stub.reconstructCompactCertTxnFields(signedTxns); err != nil {
- return fmt.Errorf("failed to reconstructCompactCertTxnFields: %w", err)
- }
- return nil
-}
-
-func (stub *txGroupsEncodingStub) reconstructTxnHeader(signedTxns []transactions.SignedTxn, genesisID string, genesisHash crypto.Digest) (err error) {
- err = stub.BitmaskSender.iterate(int(stub.TotalTransactionsCount), len(stub.Sender)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.Sender, signedTxns[i].Txn.Sender[:], crypto.DigestSize)
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskFee.iterate(int(stub.TotalTransactionsCount), len(stub.Fee), func(i int, index int) error {
- signedTxns[i].Txn.Fee = stub.Fee[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskFirstValid.iterate(int(stub.TotalTransactionsCount), len(stub.FirstValid), func(i int, index int) error {
- signedTxns[i].Txn.FirstValid = stub.FirstValid[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskLastValid.iterate(int(stub.TotalTransactionsCount), len(stub.LastValid), func(i int, index int) error {
- signedTxns[i].Txn.LastValid = stub.LastValid[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskNote.iterate(int(stub.TotalTransactionsCount), len(stub.Note), func(i int, index int) error {
- signedTxns[i].Txn.Note = stub.Note[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskGenesisID.iterate(int(stub.TotalTransactionsCount), int(stub.TotalTransactionsCount), func(i int, index int) error {
- signedTxns[i].Txn.GenesisID = genesisID
- return nil
- })
- if err != nil {
- return err
- }
- for i := range signedTxns {
- signedTxns[i].Txn.GenesisHash = genesisHash
- }
- err = stub.BitmaskLease.iterate(int(stub.TotalTransactionsCount), len(stub.Lease)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.Lease, signedTxns[i].Txn.Lease[:], transactions.LeaseByteLength)
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskRekeyTo.iterate(int(stub.TotalTransactionsCount), len(stub.RekeyTo)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.RekeyTo, signedTxns[i].Txn.RekeyTo[:], crypto.DigestSize)
- })
- if err != nil {
- return err
- }
- return nil
-}
-
-func (stub *txGroupsEncodingStub) reconstructKeyregTxnFields(signedTxns []transactions.SignedTxn) (err error) {
- // should all have same number of elements
- if len(stub.VotePK)/len(crypto.OneTimeSignatureVerifier{}) != len(stub.VoteKeyDilution) || len(stub.SelectionPK)/len(crypto.VRFVerifier{}) != len(stub.VoteKeyDilution) {
- return errDataMissing
- }
- err = stub.BitmaskKeys.iterate(int(stub.TotalTransactionsCount), len(stub.VoteKeyDilution), func(i int, index int) error {
- signedTxns[i].Txn.VoteKeyDilution = stub.VoteKeyDilution[index]
- err := nextSlice(&stub.VotePK, signedTxns[i].Txn.VotePK[:], len(crypto.OneTimeSignatureVerifier{}))
- if err != nil {
- return err
- }
- return nextSlice(&stub.SelectionPK, signedTxns[i].Txn.SelectionPK[:], len(crypto.VRFVerifier{}))
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskVoteFirst.iterate(int(stub.TotalTransactionsCount), len(stub.VoteFirst), func(i int, index int) error {
- if index >= len(stub.VoteFirst) {
- return errDataMissing
- }
- signedTxns[i].Txn.VoteFirst = stub.VoteFirst[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskVoteLast.iterate(int(stub.TotalTransactionsCount), len(stub.VoteLast), func(i int, index int) error {
- if index >= len(stub.VoteLast) {
- return errDataMissing
- }
- signedTxns[i].Txn.VoteLast = stub.VoteLast[index]
- return nil
- })
- if err != nil {
- return err
- }
-
- err = stub.BitmaskNonparticipation.iterate(int(stub.TotalTransactionsCount), int(stub.TotalTransactionsCount), func(i int, index int) error {
- signedTxns[i].Txn.Nonparticipation = true
- return nil
- })
- if err != nil {
- return err
- }
- return nil
-}
-
-func (stub *txGroupsEncodingStub) reconstructPaymentTxnFields(signedTxns []transactions.SignedTxn) (err error) {
- err = stub.BitmaskReceiver.iterate(int(stub.TotalTransactionsCount), len(stub.Receiver)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.Receiver, signedTxns[i].Txn.Receiver[:], crypto.DigestSize)
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskAmount.iterate(int(stub.TotalTransactionsCount), len(stub.Amount), func(i int, index int) error {
- signedTxns[i].Txn.Amount = stub.Amount[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskCloseRemainderTo.iterate(int(stub.TotalTransactionsCount), len(stub.CloseRemainderTo)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.CloseRemainderTo, signedTxns[i].Txn.CloseRemainderTo[:], crypto.DigestSize)
- })
- if err != nil {
- return err
- }
- return nil
-}
-
-func (stub *txGroupsEncodingStub) reconstructAssetConfigTxnFields(signedTxns []transactions.SignedTxn) (err error) {
- err = stub.BitmaskConfigAsset.iterate(int(stub.TotalTransactionsCount), len(stub.ConfigAsset), func(i int, index int) error {
- signedTxns[i].Txn.ConfigAsset = stub.ConfigAsset[index]
- return nil
- })
- if err != nil {
- return err
- }
- return stub.reconstructAssetParams(signedTxns)
-}
-
-func (stub *txGroupsEncodingStub) reconstructAssetParams(signedTxns []transactions.SignedTxn) (err error) {
- err = stub.BitmaskTotal.iterate(int(stub.TotalTransactionsCount), len(stub.Total), func(i int, index int) error {
- signedTxns[i].Txn.AssetParams.Total = stub.Total[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskDecimals.iterate(int(stub.TotalTransactionsCount), len(stub.Decimals), func(i int, index int) error {
- signedTxns[i].Txn.AssetParams.Decimals = stub.Decimals[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskDefaultFrozen.iterate(int(stub.TotalTransactionsCount), int(stub.TotalTransactionsCount), func(i int, index int) error {
- signedTxns[i].Txn.AssetParams.DefaultFrozen = true
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskUnitName.iterate(int(stub.TotalTransactionsCount), len(stub.UnitName), func(i int, index int) error {
- signedTxns[i].Txn.AssetParams.UnitName = stub.UnitName[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskAssetName.iterate(int(stub.TotalTransactionsCount), len(stub.AssetName), func(i int, index int) error {
- signedTxns[i].Txn.AssetParams.AssetName = stub.AssetName[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskURL.iterate(int(stub.TotalTransactionsCount), len(stub.URL), func(i int, index int) error {
- signedTxns[i].Txn.AssetParams.URL = stub.URL[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskMetadataHash.iterate(int(stub.TotalTransactionsCount), len(stub.MetadataHash)/basics.MetadataHashLength, func(i int, index int) error {
- return nextSlice(&stub.MetadataHash, signedTxns[i].Txn.AssetParams.MetadataHash[:], basics.MetadataHashLength)
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskManager.iterate(int(stub.TotalTransactionsCount), len(stub.Manager)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.Manager, signedTxns[i].Txn.AssetParams.Manager[:], crypto.DigestSize)
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskReserve.iterate(int(stub.TotalTransactionsCount), len(stub.Reserve)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.Reserve, signedTxns[i].Txn.AssetParams.Reserve[:], crypto.DigestSize)
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskFreeze.iterate(int(stub.TotalTransactionsCount), len(stub.Freeze)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.Freeze, signedTxns[i].Txn.AssetParams.Freeze[:], crypto.DigestSize)
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskClawback.iterate(int(stub.TotalTransactionsCount), len(stub.Clawback)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.Clawback, signedTxns[i].Txn.AssetParams.Clawback[:], crypto.DigestSize)
- })
- if err != nil {
- return err
- }
- return nil
-}
-
-func (stub *txGroupsEncodingStub) reconstructAssetTransferTxnFields(signedTxns []transactions.SignedTxn) (err error) {
- err = stub.BitmaskXferAsset.iterate(int(stub.TotalTransactionsCount), len(stub.XferAsset), func(i int, index int) error {
- signedTxns[i].Txn.XferAsset = stub.XferAsset[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskAssetAmount.iterate(int(stub.TotalTransactionsCount), len(stub.AssetAmount), func(i int, index int) error {
- signedTxns[i].Txn.AssetAmount = stub.AssetAmount[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskAssetSender.iterate(int(stub.TotalTransactionsCount), len(stub.AssetSender)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.AssetSender, signedTxns[i].Txn.AssetSender[:], crypto.DigestSize)
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskAssetReceiver.iterate(int(stub.TotalTransactionsCount), len(stub.AssetReceiver)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.AssetReceiver, signedTxns[i].Txn.AssetReceiver[:], crypto.DigestSize)
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskAssetCloseTo.iterate(int(stub.TotalTransactionsCount), len(stub.AssetCloseTo)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.AssetCloseTo, signedTxns[i].Txn.AssetCloseTo[:], crypto.DigestSize)
- })
- if err != nil {
- return err
- }
- return nil
-}
-
-func (stub *txGroupsEncodingStub) reconstructAssetFreezeTxnFields(signedTxns []transactions.SignedTxn) (err error) {
- err = stub.BitmaskFreezeAccount.iterate(int(stub.TotalTransactionsCount), len(stub.FreezeAccount)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.FreezeAccount, signedTxns[i].Txn.FreezeAccount[:], crypto.DigestSize)
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskFreezeAsset.iterate(int(stub.TotalTransactionsCount), len(stub.FreezeAsset), func(i int, index int) error {
- signedTxns[i].Txn.FreezeAsset = stub.FreezeAsset[index]
- return nil
- })
- if err != nil {
- return err
- }
-
- err = stub.BitmaskAssetFrozen.iterate(int(stub.TotalTransactionsCount), int(stub.TotalTransactionsCount), func(i int, index int) error {
- signedTxns[i].Txn.AssetFrozen = true
- return nil
- })
- if err != nil {
- return err
- }
- return nil
-}
-
-func (stub *txGroupsEncodingStub) reconstructApplicationCallTxnFields(signedTxns []transactions.SignedTxn) (err error) {
- err = stub.BitmaskApplicationID.iterate(int(stub.TotalTransactionsCount), len(stub.ApplicationID), func(i int, index int) error {
- signedTxns[i].Txn.ApplicationID = stub.ApplicationID[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskOnCompletion.iterate(int(stub.TotalTransactionsCount), len(stub.OnCompletion)*2, func(i int, index int) error {
- b, err := getNibble(stub.OnCompletion, index)
- if err != nil {
- return err
- }
- signedTxns[i].Txn.OnCompletion = transactions.OnCompletion(b)
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskApplicationArgs.iterate(int(stub.TotalTransactionsCount), len(stub.ApplicationArgs), func(i int, index int) error {
- signedTxns[i].Txn.ApplicationArgs = stub.ApplicationArgs[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskAccounts.iterate(int(stub.TotalTransactionsCount), len(stub.Accounts), func(i int, index int) error {
- signedTxns[i].Txn.Accounts = stub.Accounts[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskForeignApps.iterate(int(stub.TotalTransactionsCount), len(stub.ForeignApps), func(i int, index int) error {
- signedTxns[i].Txn.ForeignApps = stub.ForeignApps[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskForeignAssets.iterate(int(stub.TotalTransactionsCount), len(stub.ForeignAssets), func(i int, index int) error {
- signedTxns[i].Txn.ForeignAssets = stub.ForeignAssets[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskLocalNumUint.iterate(int(stub.TotalTransactionsCount), len(stub.LocalNumUint), func(i int, index int) error {
- signedTxns[i].Txn.LocalStateSchema.NumUint = stub.LocalNumUint[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskLocalNumByteSlice.iterate(int(stub.TotalTransactionsCount), len(stub.LocalNumByteSlice), func(i int, index int) error {
- signedTxns[i].Txn.LocalStateSchema.NumByteSlice = stub.LocalNumByteSlice[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskGlobalNumUint.iterate(int(stub.TotalTransactionsCount), len(stub.GlobalNumUint), func(i int, index int) error {
- signedTxns[i].Txn.GlobalStateSchema.NumUint = stub.GlobalNumUint[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskGlobalNumByteSlice.iterate(int(stub.TotalTransactionsCount), len(stub.GlobalNumByteSlice), func(i int, index int) error {
- signedTxns[i].Txn.GlobalStateSchema.NumByteSlice = stub.GlobalNumByteSlice[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskApprovalProgram.iterate(int(stub.TotalTransactionsCount), len(stub.ApprovalProgram), func(i int, index int) error {
- signedTxns[i].Txn.ApprovalProgram = stub.ApprovalProgram[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskClearStateProgram.iterate(int(stub.TotalTransactionsCount), len(stub.ClearStateProgram), func(i int, index int) error {
- signedTxns[i].Txn.ClearStateProgram = stub.ClearStateProgram[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskExtraProgramPages.iterate(int(stub.TotalTransactionsCount), len(stub.ExtraProgramPages), func(i int, index int) error {
- signedTxns[i].Txn.ExtraProgramPages = stub.ExtraProgramPages[index]
- return nil
- })
- if err != nil {
- return err
- }
- return nil
-}
-
-func (stub *txGroupsEncodingStub) reconstructCompactCertTxnFields(signedTxns []transactions.SignedTxn) (err error) {
- err = stub.BitmaskCertRound.iterate(int(stub.TotalTransactionsCount), len(stub.CertRound), func(i int, index int) error {
- signedTxns[i].Txn.CertRound = stub.CertRound[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskCertType.iterate(int(stub.TotalTransactionsCount), len(stub.CertType), func(i int, index int) error {
- signedTxns[i].Txn.CertType = stub.CertType[index]
- return nil
- })
- if err != nil {
- return err
- }
- return stub.reconstructCert(signedTxns)
-}
-
-func (stub *txGroupsEncodingStub) reconstructCert(signedTxns []transactions.SignedTxn) (err error) {
- err = stub.BitmaskSigCommit.iterate(int(stub.TotalTransactionsCount), len(stub.SigCommit)/crypto.DigestSize, func(i int, index int) error {
- return nextSlice(&stub.SigCommit, signedTxns[i].Txn.Cert.SigCommit[:], crypto.DigestSize)
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskSignedWeight.iterate(int(stub.TotalTransactionsCount), len(stub.SignedWeight), func(i int, index int) error {
- signedTxns[i].Txn.Cert.SignedWeight = stub.SignedWeight[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskSigProofs.iterate(int(stub.TotalTransactionsCount), len(stub.SigProofs), func(i int, index int) error {
- signedTxns[i].Txn.Cert.SigProofs = stub.SigProofs[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskPartProofs.iterate(int(stub.TotalTransactionsCount), len(stub.PartProofs), func(i int, index int) error {
- signedTxns[i].Txn.Cert.PartProofs = stub.PartProofs[index]
- return nil
- })
- if err != nil {
- return err
- }
- err = stub.BitmaskReveals.iterate(int(stub.TotalTransactionsCount), len(stub.Reveals), func(i int, index int) error {
- signedTxns[i].Txn.Cert.Reveals = stub.Reveals[index]
- return nil
- })
- if err != nil {
- return err
- }
- return nil
-}
diff --git a/txnsync/exchange.go b/txnsync/exchange.go
deleted file mode 100644
index 32b08fdb8..000000000
--- a/txnsync/exchange.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
-)
-
-const txnBlockMessageVersion = 1
-const maxBloomFilterSize = 100000
-const maxAcceptedMsgSeq = 64
-const maxEncodedTransactionGroupBytes = 10000000
-const maxProposalSize = 350000
-
-type transactionBlockMessage struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- Version int32 `codec:"v"`
- Round basics.Round `codec:"r"`
- TxnBloomFilter encodedBloomFilter `codec:"b"`
- UpdatedRequestParams requestParams `codec:"p"`
- TransactionGroups packedTransactionGroups `codec:"g"`
- MsgSync timingParams `codec:"t"`
- RelayedProposal relayedProposal `codec:"rp"`
-}
-
-type encodedBloomFilter struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- BloomFilterType byte `codec:"t"`
- EncodingParams requestParams `codec:"p"`
- BloomFilter []byte `codec:"f,allocbound=maxBloomFilterSize"`
- ClearPrevious byte `codec:"c"`
-}
-
-type requestParams struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- Offset byte `codec:"o"`
- Modulator byte `codec:"m"`
-}
-
-const (
- compressionFormatNone byte = iota
- compressionFormatDeflate
-)
-
-type packedTransactionGroups struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- Bytes []byte `codec:"g,allocbound=maxEncodedTransactionGroupBytes"`
- CompressionFormat byte `codec:"c"`
- LenDecompressedBytes uint64 `codec:"l"`
-}
-
-type timingParams struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"` //nolint:structcheck,unused
-
- RefTxnBlockMsgSeq uint64 `codec:"s"`
- ResponseElapsedTime uint64 `codec:"r"`
- AcceptedMsgSeq []uint64 `codec:"a,allocbound=maxAcceptedMsgSeq"`
- NextMsgMinDelay uint64 `codec:"m"`
-}
-
-const (
- noProposal byte = iota //nolint:deadcode,unused,varcheck
- transactionsForProposal //nolint:deadcode,unused,varcheck
-)
-
-type relayedProposal struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
-
- RawBytes []byte `codec:"b,allocbound=maxProposalSize"`
- ExcludeProposal crypto.Digest `codec:"e"`
- Content byte `codec:"c"`
-}
diff --git a/txnsync/incoming.go b/txnsync/incoming.go
deleted file mode 100644
index 83ac416f1..000000000
--- a/txnsync/incoming.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "errors"
- "time"
-
- "github.com/algorand/go-algorand/data/pooldata"
-)
-
-var (
- errUnsupportedTransactionSyncMessageVersion = errors.New("unsupported transaction sync message version")
- errTransactionSyncIncomingMessageQueueFull = errors.New("transaction sync incoming message queue is full")
- errInvalidBloomFilter = errors.New("invalid bloom filter")
- errDecodingReceivedTransactionGroupsFailed = errors.New("failed to decode incoming transaction groups")
-)
-
-type incomingMessage struct {
- networkPeer interface{}
- message transactionBlockMessage
- sequenceNumber uint64
- peer *Peer
- encodedSize int // the byte length of the incoming network message
- bloomFilter *testableBloomFilter
- transactionGroups []pooldata.SignedTxGroup
- timeReceived int64
-}
-
-// incomingMessageHandler
-// note - this message is called by the network go-routine dispatch pool, and is not synchronized with the rest of the transaction synchronizer
-func (s *syncState) asyncIncomingMessageHandler(networkPeer interface{}, peer *Peer, message []byte, sequenceNumber uint64, receivedTimestamp int64) (err error) {
- // increase number of incoming messages metric.
- txsyncIncomingMessagesTotal.Inc(nil)
-
- // check the return value when we exit this function. if we fail, we increase the metric.
- defer func() {
- if err != nil {
- // increase number of unprocessed incoming messages metric.
- txsyncUnprocessedIncomingMessagesTotal.Inc(nil)
- }
- }()
-
- incomingMessage := incomingMessage{networkPeer: networkPeer, sequenceNumber: sequenceNumber, encodedSize: len(message), peer: peer, timeReceived: receivedTimestamp}
- _, err = incomingMessage.message.UnmarshalMsg(message)
- if err != nil {
- // if we received a message that we cannot parse, disconnect.
- s.log.Infof("received unparsable transaction sync message from peer. disconnecting from peer.")
- s.incomingMessagesQ.erase(peer, networkPeer)
- return err
- }
-
- if incomingMessage.message.Version != txnBlockMessageVersion {
- // we receive a message from a version that we don't support, disconnect.
- s.log.Infof("received unsupported transaction sync message version from peer (%d). disconnecting from peer.", incomingMessage.message.Version)
- s.incomingMessagesQ.erase(peer, networkPeer)
- return errUnsupportedTransactionSyncMessageVersion
- }
-
- // if the peer sent us a bloom filter, decode it
- if !incomingMessage.message.TxnBloomFilter.MsgIsZero() {
- bloomFilter, err := decodeBloomFilter(incomingMessage.message.TxnBloomFilter)
- if err != nil {
- s.log.Infof("Invalid bloom filter received from peer : %v", err)
- s.incomingMessagesQ.erase(peer, networkPeer)
- return errInvalidBloomFilter
- }
- incomingMessage.bloomFilter = bloomFilter
- // increase number of decoded bloom filters.
- txsyncDecodedBloomFiltersTotal.Inc(nil)
- }
-
- // if the peer sent us any transactions, decode these.
- incomingMessage.transactionGroups, err = decodeTransactionGroups(incomingMessage.message.TransactionGroups, s.genesisID, s.genesisHash)
- if err != nil {
- s.log.Infof("failed to decode received transactions groups: %v\n", err)
- s.incomingMessagesQ.erase(peer, networkPeer)
- return errDecodingReceivedTransactionGroupsFailed
- }
-
- if peer == nil {
- // if we don't have a peer, then we need to enqueue this task to be handled by the main loop since we want to ensure that
- // all the peer objects are created synchronously.
- enqueued := s.incomingMessagesQ.enqueue(incomingMessage)
- if !enqueued {
- // if we failed to enqueue, it means that the queue is full. Try to remove disconnected
- // peers from the queue before re-attempting.
- peers := s.node.GetPeers()
- if s.incomingMessagesQ.prunePeers(peers) {
- // if we were successful in removing at least a single peer, then try to add the entry again.
- enqueued = s.incomingMessagesQ.enqueue(incomingMessage)
- }
- if !enqueued {
- // if we can't enqueue that, return an error, which would disconnect the peer.
- // ( we have to disconnect, since otherwise, we would have no way to synchronize the sequence number)
- s.log.Infof("unable to enqueue incoming message from a peer without txsync allocated data; incoming messages queue is full. disconnecting from peer.")
- s.incomingMessagesQ.erase(peer, networkPeer)
- return errTransactionSyncIncomingMessageQueueFull
- }
-
- }
- return nil
- }
- // place the incoming message on the *peer* heap, allowing us to dequeue it in the order by which it was received by the network library.
- err = peer.incomingMessages.enqueue(incomingMessage)
- if err != nil {
- // if the incoming message queue for this peer is full, disconnect from this peer.
- s.log.Infof("unable to enqueue incoming message into peer incoming message backlog. disconnecting from peer.")
- s.incomingMessagesQ.erase(peer, networkPeer)
- return err
- }
-
- // (maybe) place the peer message on the main queue. This would get skipped if the peer is already on the queue.
- enqueued := s.incomingMessagesQ.enqueue(incomingMessage)
- if !enqueued {
- // if we failed to enqueue, it means that the queue is full. Try to remove disconnected
- // peers from the queue before re-attempting.
- peers := s.node.GetPeers()
- if s.incomingMessagesQ.prunePeers(peers) {
- // if we were successful in removing at least a single peer, then try to add the entry again.
- enqueued = s.incomingMessagesQ.enqueue(incomingMessage)
- }
- if !enqueued {
- // if we can't enqueue that, return an error, which would disconnect the peer.
- s.log.Infof("unable to enqueue incoming message from a peer with txsync allocated data; incoming messages queue is full. disconnecting from peer.")
- s.incomingMessagesQ.erase(peer, networkPeer)
- return errTransactionSyncIncomingMessageQueueFull
- }
- }
- return nil
-}
-
-func (s *syncState) evaluateIncomingMessage(message incomingMessage) {
- peer := message.peer
- if peer == nil {
- // check if a peer was created already for this network peer object.
- peerInfo := s.node.GetPeer(message.networkPeer)
- if peerInfo.NetworkPeer == nil {
- // the message.networkPeer isn't a valid unicast peer, so we can exit right here.
- return
- }
- if peerInfo.TxnSyncPeer == nil {
- // we couldn't really do much about this message previously, since we didn't have the peer.
- peer = makePeer(message.networkPeer, peerInfo.IsOutgoing, s.isRelay, &s.config, s.log, s.node.GetPeerLatency(message.networkPeer))
- // let the network peer object know about our peer
- s.node.UpdatePeers([]*Peer{peer}, []interface{}{message.networkPeer}, 0)
- } else {
- peer = peerInfo.TxnSyncPeer
- }
- message.peer = peer
- err := peer.incomingMessages.enqueue(message)
- if err != nil {
- // this is not really likely, since we won't saturate the peer heap right after creating it..
- return
- }
- }
-
- messageProcessed := false
- transactionPoolSize := 0
- totalAccumulatedTransactionsCount := 0 // the number of transactions that were added during the execution of this method
- transactionHandlerBacklogFull := false
-incomingMessageLoop:
- for {
- incomingMsg, seq, err := peer.incomingMessages.popSequence(peer.nextReceivedMessageSeq)
- switch err {
- case errHeapEmpty:
- // this is very likely, once we run out of consecutive messages.
- break incomingMessageLoop
- case errSequenceNumberMismatch:
- // if we receive a message which wasn't in-order, just let it go.
- s.log.Debugf("received message out of order; seq = %d, expecting seq = %d\n", seq, peer.nextReceivedMessageSeq)
- break incomingMessageLoop
- }
-
- // increase the message sequence number, since we're processing this message.
- peer.nextReceivedMessageSeq++
-
- // skip txnsync messages with proposalData for now
- if !incomingMsg.message.RelayedProposal.MsgIsZero() {
- continue
- }
-
- // update the round number if needed.
- if incomingMsg.message.Round > peer.lastRound {
- peer.lastRound = incomingMsg.message.Round
- } else if incomingMsg.message.Round < peer.lastRound {
- // peer sent us message for an older round, *after* a new round ?!
- continue
- }
-
- // if the peer sent us a bloom filter, store this.
- if incomingMsg.bloomFilter != nil {
- peer.addIncomingBloomFilter(incomingMsg.message.Round, incomingMsg.bloomFilter, s.round)
- }
-
- peer.updateRequestParams(incomingMsg.message.UpdatedRequestParams.Modulator, incomingMsg.message.UpdatedRequestParams.Offset)
- timeInQueue := time.Duration(0)
- if incomingMsg.timeReceived > 0 {
- timeInQueue = time.Since(time.Unix(0, incomingMsg.timeReceived))
- }
- peer.updateIncomingMessageTiming(incomingMsg.message.MsgSync, s.round, s.clock.Since(), timeInQueue, peer.cachedLatency, incomingMsg.encodedSize)
-
- // if the peer's round is more than a single round behind the local node, then we don't want to
- // try and load the transactions. The other peer should first catch up before getting transactions.
- if (peer.lastRound + 1) < s.round {
- if s.config.EnableVerbosedTransactionSyncLogging {
- s.log.Infof("Incoming Txsync #%d late round %d", seq, peer.lastRound)
- }
- continue
- }
-
- // add the received transaction groups to the peer's recentSentTransactions so that we won't be sending these back to the peer.
- peer.updateIncomingTransactionGroups(incomingMsg.transactionGroups)
-
- // before enqueuing more data to the transaction pool, make sure we flush the ack channel
- peer.dequeuePendingTransactionPoolAckMessages()
-
- // if we received at least a single transaction group, then forward it to the transaction handler.
- if len(incomingMsg.transactionGroups) > 0 {
- // get the number of transactions ( not transaction groups !! ) from the transaction groups slice.
- // this code is using the fact the we allocate all the transactions as a single array, and then slice
- // them for the different transaction groups. The transaction handler would re-allocate the transactions that
- // would be stored in the transaction pool.
- totalTransactionCount := cap(incomingMsg.transactionGroups[0].Transactions)
-
- // send the incoming transaction group to the node last, so that the txhandler could modify the underlaying array if needed.
- currentTransactionPoolSize := s.node.IncomingTransactionGroups(peer, peer.nextReceivedMessageSeq-1, incomingMsg.transactionGroups)
- // was the call reached the transaction handler queue ?
- if currentTransactionPoolSize >= 0 {
- // we want to store in transactionPoolSize only the first call to IncomingTransactionGroups:
- // when multiple IncomingTransactionGroups calls are made within this for-loop, we want to get the current transaction pool size,
- // plus an estimate for the optimistic size after all the transaction groups would get added. For that purpose, it would be sufficient
- // to get the transaction pool size once. The precise size of the transaction pool here is not critical - we use it only for the purpose
- // of calculating the beta number as well as figure if the transaction pool is full or not ( both of them are range-based ).
- if transactionPoolSize == 0 {
- transactionPoolSize = currentTransactionPoolSize
- }
- // add the transactions count to the accumulated count.
- totalAccumulatedTransactionsCount += totalTransactionCount
- } else {
- // no - we couldn't add this group since the transaction handler buffer backlog exceeded it's capacity.
- transactionHandlerBacklogFull = true
- }
- }
-
- s.log.incomingMessage(msgStats{seq, incomingMsg.message.Round, len(incomingMsg.transactionGroups), incomingMsg.message.UpdatedRequestParams, len(incomingMsg.message.TxnBloomFilter.BloomFilter), incomingMsg.message.MsgSync.NextMsgMinDelay, peer.networkAddress()})
- messageProcessed = true
- }
-
- // if we're a relay, this is an outgoing peer and we've processed a valid message,
- // then we want to respond right away as well as schedule bloom message.
- if messageProcessed && peer.isOutgoing && s.isRelay && peer.lastReceivedMessageNextMsgMinDelay != time.Duration(0) {
- peer.state = peerStateStartup
- // if we had another message coming from this peer previously, we need to ensure there are not scheduled tasks.
- s.scheduler.peerDuration(peer)
-
- s.scheduler.schedulePeer(peer, s.clock.Since())
- }
- if transactionPoolSize > 0 || transactionHandlerBacklogFull {
- s.onTransactionPoolChangedEvent(MakeTransactionPoolChangeEvent(transactionPoolSize+totalAccumulatedTransactionsCount, transactionHandlerBacklogFull))
- }
-}
diff --git a/txnsync/incomingMsgQ.go b/txnsync/incomingMsgQ.go
deleted file mode 100644
index aecb673bd..000000000
--- a/txnsync/incomingMsgQ.go
+++ /dev/null
@@ -1,372 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "sync"
-
- "github.com/algorand/go-deadlock"
-)
-
-// queuedMsgEntry used as a helper struct to manage the manipulation of incoming
-// message queue.
-type queuedMsgEntry struct {
- msg incomingMessage
- next *queuedMsgEntry
- prev *queuedMsgEntry
-}
-
-type queuedMsgList struct {
- head *queuedMsgEntry
-}
-
-// incomingMessageQueue manages the global incoming message queue across all the incoming peers.
-type incomingMessageQueue struct {
- outboundPeerCh chan incomingMessage
- enqueuedPeersMap map[*Peer]*queuedMsgEntry
- messages queuedMsgList
- freelist queuedMsgList
- enqueuedPeersMu deadlock.Mutex
- enqueuedPeersCond *sync.Cond
- shutdownRequest chan struct{}
- shutdownConfirmed chan struct{}
- deletePeersCh chan interface{}
- peerlessCount int
-}
-
-// maxPeersCount defines the maximum number of supported peers that can have their messages waiting
-// in the incoming message queue at the same time. This number can be lower then the actual number of
-// connected peers, as it's used only for pending messages.
-const maxPeersCount = 2048
-
-// maxPeerlessCount is the number of messages that we've received that doesn't have a Peer object allocated
-// for them ( yet )
-const maxPeerlessCount = 512
-
-// makeIncomingMessageQueue creates an incomingMessageQueue object and initializes all the internal variables.
-func makeIncomingMessageQueue() *incomingMessageQueue {
- imq := &incomingMessageQueue{
- outboundPeerCh: make(chan incomingMessage),
- enqueuedPeersMap: make(map[*Peer]*queuedMsgEntry, maxPeersCount),
- shutdownRequest: make(chan struct{}, 1),
- shutdownConfirmed: make(chan struct{}, 1),
- deletePeersCh: make(chan interface{}),
- }
- imq.enqueuedPeersCond = sync.NewCond(&imq.enqueuedPeersMu)
- imq.freelist.initialize(maxPeersCount)
- go imq.messagePump()
- return imq
-}
-
-// dequeueHead removes the first head message from the linked list.
-func (ml *queuedMsgList) dequeueHead() (out *queuedMsgEntry) {
- if ml.head == nil {
- return nil
- }
- entry := ml.head
- out = entry
- if entry.next == entry {
- ml.head = nil
- return
- }
- entry.next.prev = entry.prev
- entry.prev.next = entry.next
- ml.head = entry.next
- out.next = out
- out.prev = out
- return
-}
-
-// initialize initializes a list to have msgCount entries.
-func (ml *queuedMsgList) initialize(msgCount int) {
- msgs := make([]queuedMsgEntry, msgCount)
- for i := 0; i < msgCount; i++ {
- msgs[i].next = &msgs[(i+1)%msgCount]
- msgs[i].prev = &msgs[(i+msgCount-1)%msgCount]
- }
- ml.head = &msgs[0]
-}
-
-// empty methods tests to see if the linked list is empty
-func (ml *queuedMsgList) empty() bool {
- return ml.head == nil
-}
-
-// remove removes the given msg from the linked list. The method
-// is written with the assumption that the given msg is known to be
-// part of the linked list.
-func (ml *queuedMsgList) remove(msg *queuedMsgEntry) {
- if msg.next == msg {
- ml.head = nil
- return
- }
- msg.prev.next = msg.next
- msg.next.prev = msg.prev
- if ml.head == msg {
- ml.head = msg.next
- }
- msg.prev = msg
- msg.next = msg
-}
-
-// filterRemove removes zero or more messages from the linked list, for which the given
-// removeFunc returns true. The removed linked list entries are returned as a linked list.
-func (ml *queuedMsgList) filterRemove(removeFunc func(*queuedMsgEntry) bool) *queuedMsgEntry {
- if ml.empty() {
- return nil
- }
- // do we have a single item ?
- if ml.head.next == ml.head {
- if removeFunc(ml.head) {
- out := ml.head
- ml.head = nil
- return out
- }
- return nil
- }
- current := ml.head
- last := ml.head.prev
- var letGo queuedMsgList
- for {
- next := current.next
- if removeFunc(current) {
- ml.remove(current)
- letGo.enqueueTail(current)
- }
- if current == last {
- break
- }
- current = next
- }
- return letGo.head
-}
-
-// enqueueTail adds to the current linked list another linked list whose head is msg.
-func (ml *queuedMsgList) enqueueTail(msg *queuedMsgEntry) {
- if ml.head == nil {
- ml.head = msg
- return
- } else if msg == nil {
- return
- }
- lastEntryOld := ml.head.prev
- lastEntryNew := msg.prev
- lastEntryOld.next = msg
- ml.head.prev = lastEntryNew
- msg.prev = lastEntryOld
- lastEntryNew.next = ml.head
-}
-
-// shutdown signals to the message pump to shut down and waits until the message pump goroutine
-// aborts.
-func (imq *incomingMessageQueue) shutdown() {
- imq.enqueuedPeersMu.Lock()
- close(imq.shutdownRequest)
- imq.enqueuedPeersCond.Signal()
- imq.enqueuedPeersMu.Unlock()
- <-imq.shutdownConfirmed
-}
-
-// messagePump is the incoming message queue message pump. It takes messages from the messages list
-// and attempt to write these to the outboundPeerCh.
-func (imq *incomingMessageQueue) messagePump() {
- defer close(imq.shutdownConfirmed)
- imq.enqueuedPeersMu.Lock()
- defer imq.enqueuedPeersMu.Unlock()
-
- for {
- // check if we need to shutdown.
- select {
- case <-imq.shutdownRequest:
- return
- default:
- }
-
- // do we have any item to enqueue ?
- if !imq.messages.empty() {
- msgEntry := imq.messages.dequeueHead()
- msg := msgEntry.msg
- imq.freelist.enqueueTail(msgEntry)
- if msg.peer != nil {
- delete(imq.enqueuedPeersMap, msg.peer)
- } else {
- imq.peerlessCount--
- }
- imq.enqueuedPeersMu.Unlock()
- writeOutboundMessage:
- select {
- case imq.outboundPeerCh <- msg:
- imq.enqueuedPeersMu.Lock()
- continue
- case <-imq.shutdownRequest:
- imq.enqueuedPeersMu.Lock()
- return
- // see if this msg need to be delivered or not.
- case droppedPeer := <-imq.deletePeersCh:
- if msg.networkPeer == droppedPeer {
- // we want to skip this message.
- imq.enqueuedPeersMu.Lock()
- continue
- }
- goto writeOutboundMessage
- }
- }
- imq.enqueuedPeersCond.Wait()
- }
-}
-
-// getIncomingMessageChannel returns the incoming messages channel, which would contain entries once
-// we have one ( or more ) pending incoming messages.
-func (imq *incomingMessageQueue) getIncomingMessageChannel() <-chan incomingMessage {
- return imq.outboundPeerCh
-}
-
-// enqueue places the given message on the queue, if and only if it's associated peer doesn't
-// appear on the incoming message queue already. In the case there is no peer, the message
-// would be placed on the queue as is.
-// The method returns false if the incoming message doesn't have it's peer on the queue and
-// the method has failed to place the message on the queue. True is returned otherwise.
-func (imq *incomingMessageQueue) enqueue(m incomingMessage) bool {
- imq.enqueuedPeersMu.Lock()
- defer imq.enqueuedPeersMu.Unlock()
- if m.peer != nil {
- if _, has := imq.enqueuedPeersMap[m.peer]; has {
- return true
- }
- } else {
- // do we have enough "room" for peerless messages ?
- if imq.peerlessCount >= maxPeerlessCount {
- return false
- }
- }
- // do we have enough room in the message queue for the new message ?
- if imq.freelist.empty() {
- // no - we don't have enough room in the circular buffer.
- return false
- }
- freeMsgEntry := imq.freelist.dequeueHead()
- freeMsgEntry.msg = m
- imq.messages.enqueueTail(freeMsgEntry)
- // if we successfully enqueued the message, set the enqueuedPeersMap so that we won't enqueue the same peer twice.
- if m.peer != nil {
- imq.enqueuedPeersMap[m.peer] = freeMsgEntry
- } else {
- imq.peerlessCount++
- }
- imq.enqueuedPeersCond.Signal()
- return true
-}
-
-// erase removes all the entries associated with the given network peer.
-// this method isn't very efficient, and should be used only in cases where
-// we disconnect from a peer and want to cleanup all the pending tasks associated
-// with that peer.
-func (imq *incomingMessageQueue) erase(peer *Peer, networkPeer interface{}) {
- imq.enqueuedPeersMu.Lock()
-
- var peerMsgEntry *queuedMsgEntry
- if peer == nil {
- // lookup for a Peer object.
- for peer, peerMsgEntry = range imq.enqueuedPeersMap {
- if peer.networkPeer != networkPeer {
- continue
- }
- break
- }
- } else {
- var has bool
- if peerMsgEntry, has = imq.enqueuedPeersMap[peer]; !has {
- // the peer object is not in the map.
- peer = nil
- }
- }
-
- if peer != nil {
- delete(imq.enqueuedPeersMap, peer)
- imq.messages.remove(peerMsgEntry)
- imq.freelist.enqueueTail(peerMsgEntry)
- imq.enqueuedPeersMu.Unlock()
- select {
- case imq.deletePeersCh <- networkPeer:
- default:
- }
- return
- }
-
- imq.removeMessageByNetworkPeer(networkPeer)
- imq.enqueuedPeersMu.Unlock()
- select {
- case imq.deletePeersCh <- networkPeer:
- default:
- }
-}
-
-// removeMessageByNetworkPeer removes the messages associated with the given network peer from the
-// queue.
-// note : the method expect that the enqueuedPeersMu lock would be taken.
-func (imq *incomingMessageQueue) removeMessageByNetworkPeer(networkPeer interface{}) {
- peerlessCount := 0
- removeByNetworkPeer := func(msg *queuedMsgEntry) bool {
- if msg.msg.networkPeer == networkPeer {
- if msg.msg.peer == nil {
- peerlessCount++
- }
- return true
- }
- return false
- }
- removeList := imq.messages.filterRemove(removeByNetworkPeer)
- imq.freelist.enqueueTail(removeList)
- imq.peerlessCount -= peerlessCount
-}
-
-// prunePeers removes from the enqueuedMessages queue all the entries that are not provided in the
-// given activePeers slice.
-func (imq *incomingMessageQueue) prunePeers(activePeers []PeerInfo) (peerRemoved bool) {
- activePeersMap := make(map[*Peer]bool)
- activeNetworkPeersMap := make(map[interface{}]bool)
- for _, activePeer := range activePeers {
- if activePeer.TxnSyncPeer != nil {
- activePeersMap[activePeer.TxnSyncPeer] = true
- }
- if activePeer.NetworkPeer != nil {
- activeNetworkPeersMap[activePeer.NetworkPeer] = true
- }
- }
- imq.enqueuedPeersMu.Lock()
- defer imq.enqueuedPeersMu.Unlock()
- peerlessCount := 0
- isPeerMissing := func(msg *queuedMsgEntry) bool {
- if msg.msg.peer != nil {
- if !activePeersMap[msg.msg.peer] {
- return true
- }
- }
- if !activeNetworkPeersMap[msg.msg.networkPeer] {
- if msg.msg.peer == nil {
- peerlessCount++
- }
- return true
- }
- return false
- }
- removeList := imq.messages.filterRemove(isPeerMissing)
- peerRemoved = removeList != nil
- imq.freelist.enqueueTail(removeList)
- imq.peerlessCount -= peerlessCount
- return
-}
diff --git a/txnsync/incomingMsgQ_test.go b/txnsync/incomingMsgQ_test.go
deleted file mode 100644
index 9ff1adf4b..000000000
--- a/txnsync/incomingMsgQ_test.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-// fillMessageQueue fills the message queue with the given message.
-func (imq *incomingMessageQueue) fillMessageQueue(msg incomingMessage) {
- imq.enqueuedPeersMu.Lock()
- for i := 0; i < maxPeersCount; i++ {
- msgEntry := imq.freelist.dequeueHead()
- msgEntry.msg = msg
- imq.messages.enqueueTail(msgEntry)
- }
- if msg.peer == nil {
- imq.peerlessCount += maxPeersCount
- }
- imq.enqueuedPeersCond.Signal()
- imq.enqueuedPeersMu.Unlock()
-
- // wait for a single message to be consumed by the message pump.
- for {
- imq.enqueuedPeersMu.Lock()
- if !imq.freelist.empty() {
- break
- }
- imq.enqueuedPeersMu.Unlock()
- time.Sleep(time.Millisecond)
- }
- for !imq.freelist.empty() {
- msgEntry := imq.freelist.dequeueHead()
- msgEntry.msg = msg
- imq.messages.enqueueTail(msgEntry)
- }
- imq.enqueuedPeersCond.Signal()
- imq.enqueuedPeersMu.Unlock()
-}
-
-// count counts teh number of messages in the list
-func (ml *queuedMsgList) count() int {
- first := ml.head
- cur := first
- count := 0
- for cur != nil {
- next := cur.next
- if next == first {
- next = nil
- }
- count++
- cur = next
- }
- return count
-}
-
-// validateLinking test to see the the entries in the list are correctly connected.
-func (ml *queuedMsgList) validateLinking(t *testing.T) {
- cur := ml.head
- if cur == nil {
- return
- }
- seen := make(map[*queuedMsgEntry]bool)
- list := make([]*queuedMsgEntry, 0)
- for {
- if seen[cur] {
- break
- }
- seen[cur] = true
- require.NotNil(t, cur.prev)
- require.NotNil(t, cur.next)
- list = append(list, cur)
- cur = cur.next
- }
- for i := range list {
- require.Equal(t, list[i], list[(i+len(list)-1)%len(list)].next)
- require.Equal(t, list[i], list[(i+1)%len(list)].prev)
- }
-}
-
-// TestMsgQCounts tests the message queue add/remove manipulations
-func TestMsgQCounts(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var list queuedMsgList
- list.initialize(7)
- list.validateLinking(t)
- require.Equal(t, 7, list.count())
- list.dequeueHead()
- list.validateLinking(t)
- require.Equal(t, 6, list.count())
- var anotherList queuedMsgList
- anotherList.initialize(4)
- require.Equal(t, 4, anotherList.count())
- list.enqueueTail(anotherList.head)
- list.validateLinking(t)
- require.Equal(t, 10, list.count())
-}
-
-// TestMsgQFiltering tests the message queue filtering
-func TestMsgQFiltering(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- item1 := &queuedMsgEntry{}
- item2 := &queuedMsgEntry{}
- item3 := &queuedMsgEntry{}
- item1.next = item1
- item1.prev = item1
- item2.next = item2
- item2.prev = item2
- item3.next = item3
- item3.prev = item3
-
- var list queuedMsgList
- list.enqueueTail(item1)
- list.enqueueTail(item2)
- list.enqueueTail(item3)
-
- // test removing head.
- removedItem1 := list.filterRemove(func(msg *queuedMsgEntry) bool {
- return msg == item1
- })
- require.Equal(t, item1, removedItem1)
- require.Equal(t, 2, list.count())
-
- // test removing tail
- removedItem3 := list.filterRemove(func(msg *queuedMsgEntry) bool {
- return msg == item3
- })
- require.Equal(t, item3, removedItem3)
- require.Equal(t, 1, list.count())
-
- // test removing last item
- removedItem2 := list.filterRemove(func(msg *queuedMsgEntry) bool {
- return msg == item2
- })
- require.Equal(t, item2, removedItem2)
- require.True(t, list.empty())
-}
diff --git a/txnsync/incoming_test.go b/txnsync/incoming_test.go
deleted file mode 100644
index 978a1c4ca..000000000
--- a/txnsync/incoming_test.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "errors"
- "fmt"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/msgp/msgp"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-type incomingLogger struct {
- logging.Logger
- lastLogged string
-}
-
-func (ml *incomingLogger) Debugf(format string, args ...interface{}) {
- ml.lastLogged = fmt.Sprintf(format, args...)
-}
-
-func (ml *incomingLogger) Infof(format string, args ...interface{}) {
- ml.lastLogged = fmt.Sprintf(format, args...)
-}
-
-func TestAsyncIncomingMessageHandlerAndErrors(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- message := transactionBlockMessage{Version: 1}
- messageBytes := message.MarshalMsg(nil)
- sequenceNumber := uint64(1)
- incLogger := incomingLogger{}
-
- cfg := config.GetDefaultLocal()
- mNodeConnector := &mockNodeConnector{transactionPoolSize: 3}
- s := syncState{
- log: wrapLogger(&incLogger, &cfg),
- node: mNodeConnector,
- clock: mNodeConnector.Clock(),
- incomingMessagesQ: makeIncomingMessageQueue(),
- }
-
- // expect UnmarshalMsg error
- messageBytes[0] = 0
- err := s.asyncIncomingMessageHandler(nil, nil, messageBytes, sequenceNumber, 0)
- msgpe := msgp.TypeError{}
- require.True(t, errors.As(err, &msgpe))
-
- // expect wrong version error
- message = transactionBlockMessage{Version: -3}
- messageBytes = message.MarshalMsg(nil)
- err = s.asyncIncomingMessageHandler(nil, nil, messageBytes, sequenceNumber, 0)
- require.Equal(t, errUnsupportedTransactionSyncMessageVersion, err)
-
- // expect error decoding bloomFilter
- message.Version = 1
- message.TxnBloomFilter.BloomFilterType = byte(multiHashBloomFilter)
- messageBytes = message.MarshalMsg(nil)
- err = s.asyncIncomingMessageHandler(nil, nil, messageBytes, sequenceNumber, 0)
- require.Equal(t, errInvalidBloomFilter, err)
-
- // error decoding transaction groups
- message.TxnBloomFilter.BloomFilterType = byte(xorBloomFilter32)
- bf, _ := filterFactoryXor32(1, &s)
- bf.Set([]byte("aoeu1234aoeu1234"))
- message.TxnBloomFilter.BloomFilter, err = bf.MarshalBinary()
- require.NoError(t, err)
- message.TransactionGroups = packedTransactionGroups{Bytes: []byte{1}}
- messageBytes = message.MarshalMsg(nil)
- err = s.asyncIncomingMessageHandler(nil, nil, messageBytes, sequenceNumber, 0)
- require.Equal(t, errDecodingReceivedTransactionGroupsFailed, err)
- s.incomingMessagesQ.shutdown()
-
- peer := Peer{networkPeer: &s}
-
- // error queue full
- message.TransactionGroups = packedTransactionGroups{}
- messageBytes = message.MarshalMsg(nil)
- s.incomingMessagesQ = makeIncomingMessageQueue()
- s.incomingMessagesQ.fillMessageQueue(incomingMessage{peer: &peer, networkPeer: &s.incomingMessagesQ})
- mNodeConnector.peers = append(mNodeConnector.peers, PeerInfo{TxnSyncPeer: &peer, NetworkPeer: &s.incomingMessagesQ})
- err = s.asyncIncomingMessageHandler(nil, nil, messageBytes, sequenceNumber, 0)
- require.Equal(t, errTransactionSyncIncomingMessageQueueFull, err)
- s.incomingMessagesQ.shutdown()
-
- // Success where peer == nil
- s.incomingMessagesQ = makeIncomingMessageQueue()
- err = s.asyncIncomingMessageHandler(nil, nil, messageBytes, sequenceNumber, 0)
- require.NoError(t, err)
- s.incomingMessagesQ.shutdown()
-
- // error when placing the peer message on the main queue (incomingMessages cannot accept messages)
- s.incomingMessagesQ = makeIncomingMessageQueue()
- s.incomingMessagesQ.fillMessageQueue(incomingMessage{peer: nil, networkPeer: &s})
- mNodeConnector.peers = append(mNodeConnector.peers, PeerInfo{NetworkPeer: &s})
-
- err = s.asyncIncomingMessageHandler(nil, &peer, messageBytes, sequenceNumber, 0)
- require.Equal(t, errTransactionSyncIncomingMessageQueueFull, err)
- s.incomingMessagesQ.shutdown()
-
- s.incomingMessagesQ = makeIncomingMessageQueue()
- err = nil
- // fill up the incoming message queue (one was already added)
- for x := 1; x <= messageOrderingHeapLimit; x++ {
- require.NoError(t, err)
- err = s.asyncIncomingMessageHandler(nil, &peer, messageBytes, sequenceNumber, 0)
- }
- require.Equal(t, errHeapReachedCapacity, err)
- s.incomingMessagesQ.shutdown()
-}
-
-func TestEvaluateIncomingMessagePart1(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- message := incomingMessage{}
- cfg := config.GetDefaultLocal()
- peer := &Peer{}
-
- incLogger := incomingLogger{}
-
- mNodeConnector := &mockNodeConnector{}
- mNodeConnector.peerInfo = PeerInfo{}
- s := syncState{
- node: mNodeConnector,
- log: wrapLogger(&incLogger, &cfg),
- clock: mNodeConnector.Clock()}
-
- // Test the cases inside the peer == nil condition
-
- // the message.networkPeer isn't a valid unicast peer
- s.evaluateIncomingMessage(message)
-
- // peer was already created
- mNodeConnector.peerInfo.NetworkPeer = peer
-
- s.evaluateIncomingMessage(message)
- // no TxnSyncPeer in peerInfo
- require.True(t, mNodeConnector.updatingPeers)
- mNodeConnector.updatingPeers = false
-
- s.incomingMessagesQ = makeIncomingMessageQueue()
- defer s.incomingMessagesQ.shutdown()
- message.peer = peer
- require.True(t, s.incomingMessagesQ.enqueue(message))
- mNodeConnector.peerInfo.TxnSyncPeer = peer
- peer.incomingMessages = messageOrderingHeap{}
- // TxnSyncPeer in peerInfo
- s.evaluateIncomingMessage(message)
- require.False(t, mNodeConnector.updatingPeers)
- <-s.incomingMessagesQ.getIncomingMessageChannel()
- _, found := s.incomingMessagesQ.enqueuedPeersMap[peer]
- require.False(t, found)
-
- // fill the heap with messageOrderingHeapLimit elements so that the incomingMessages enqueue fails
- message.networkPeer = &s
- message.peer = nil
- for x := 0; x < messageOrderingHeapLimit; x++ {
- err := peer.incomingMessages.enqueue(message)
- require.NoError(t, err)
- }
- mNodeConnector.peers = []PeerInfo{{TxnSyncPeer: peer, NetworkPeer: &s}}
- // TxnSyncPeer in peerInfo
- s.evaluateIncomingMessage(message)
- require.False(t, mNodeConnector.updatingPeers)
-}
-
-func TestEvaluateIncomingMessagePart2(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- cfg := config.GetDefaultLocal()
- cfg.EnableVerbosedTransactionSyncLogging = true
- peer := &Peer{}
-
- incLogger := incomingLogger{}
-
- mNodeConnector := &mockNodeConnector{transactionPoolSize: 3}
- mNodeConnector.peerInfo = PeerInfo{NetworkPeer: peer}
-
- s := syncState{
- node: mNodeConnector,
- log: wrapLogger(&incLogger, &cfg),
- clock: mNodeConnector.Clock()}
-
- // Test the branches in the for loop
-
- mNodeConnector.peerInfo.TxnSyncPeer = peer
- peer.incomingMessages = messageOrderingHeap{}
-
- // txnsync messages with proposalData
- err := peer.incomingMessages.enqueue(
- incomingMessage{
- sequenceNumber: 0,
- message: transactionBlockMessage{
- RelayedProposal: relayedProposal{Content: 10}}})
- require.NoError(t, err)
-
- // update the round number
- err = peer.incomingMessages.enqueue(
- incomingMessage{
- sequenceNumber: 1,
- message: transactionBlockMessage{Round: 4}})
- require.NoError(t, err)
-
- // peer sent a message for an older round, *after* a new round
- err = peer.incomingMessages.enqueue(
- incomingMessage{
- sequenceNumber: 2,
- message: transactionBlockMessage{Round: 2}})
- require.NoError(t, err)
-
- // peer sends a bloom filter
- err = peer.incomingMessages.enqueue(
- incomingMessage{
- sequenceNumber: 3,
- bloomFilter: &testableBloomFilter{encodingParams: requestParams{Offset: 8}},
- message: transactionBlockMessage{Round: 4}})
- require.NoError(t, err)
-
- // message with a transaction group
- err = peer.incomingMessages.enqueue(
- incomingMessage{
- sequenceNumber: 4,
- transactionGroups: []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- Transactions: []transactions.SignedTxn{
- transactions.SignedTxn{}}}},
- message: transactionBlockMessage{Round: 4}})
- require.NoError(t, err)
- peer.recentSentTransactions = makeTransactionCache(5, 10, 20)
-
- // receive a message not in order
- s.evaluateIncomingMessage(incomingMessage{sequenceNumber: 11})
- require.Equal(t, "received message out of order; seq = 11, expecting seq = 5\n", incLogger.lastLogged)
- require.Equal(t, uint8(8), peer.recentIncomingBloomFilters[0].filter.encodingParams.Offset)
-
- // currentTransactionPoolSize is -1
- peer.incomingMessages = messageOrderingHeap{}
- mNodeConnector.transactionPoolSize = -1
- s.evaluateIncomingMessage(incomingMessage{
- sequenceNumber: 5,
- message: transactionBlockMessage{Round: 5},
- transactionGroups: []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- Transactions: []transactions.SignedTxn{
- transactions.SignedTxn{}}}},
- })
- require.Equal(t, "Incoming Txsync #5 round 5 transactions 1 request [0/0] bloom 0 nextTS 0 from ''", incLogger.lastLogged)
-
-}
-
-func TestEvaluateIncomingMessagePart3(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- cfg := config.GetDefaultLocal()
- cfg.EnableVerbosedTransactionSyncLogging = true
- peer := &Peer{isOutgoing: true, lastReceivedMessageNextMsgMinDelay: time.Duration(3)}
-
- incLogger := incomingLogger{}
-
- mNodeConnector := &mockNodeConnector{}
- mNodeConnector.peerInfo = PeerInfo{NetworkPeer: peer}
- mNodeConnector.peerInfo.TxnSyncPeer = peer
-
- s := syncState{
- node: mNodeConnector,
- log: wrapLogger(&incLogger, &cfg),
- clock: mNodeConnector.Clock(),
- round: 1,
- config: cfg,
- isRelay: true,
- scheduler: makePeerScheduler(),
- }
-
- // the peer will be added to s.scheduler
- s.evaluateIncomingMessage(incomingMessage{
- sequenceNumber: 0,
- message: transactionBlockMessage{
- MsgSync: timingParams{
- NextMsgMinDelay: 3}}})
- require.Equal(t, 1, len(s.scheduler.peers))
-
- s.round = 3
- s.evaluateIncomingMessage(incomingMessage{
- sequenceNumber: 1,
- message: transactionBlockMessage{
- MsgSync: timingParams{
- NextMsgMinDelay: 3}}})
-
- require.Equal(t, "Incoming Txsync #1 late round 0", incLogger.lastLogged)
-}
-
-func TestEvaluateIncomingMessageAccumulatedTransactionsCount(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- cfg := config.GetDefaultLocal()
- cfg.EnableVerbosedTransactionSyncLogging = true
- peer := &Peer{}
- peer.recentSentTransactions = makeTransactionCache(5, 10, 20)
- incLogger := incomingLogger{}
-
- mNodeConnector := &mockNodeConnector{transactionPoolSize: 3}
- mNodeConnector.peerInfo = PeerInfo{NetworkPeer: peer}
-
- s := syncState{
- node: mNodeConnector,
- log: wrapLogger(&incLogger, &cfg),
- clock: mNodeConnector.Clock()}
-
- mNodeConnector.peerInfo.TxnSyncPeer = peer
- peer.incomingMessages = messageOrderingHeap{}
-
- genesisID := "gID"
- genesisHash := crypto.Hash([]byte("gh"))
- txnGroups := getTxnGroups(genesisHash, genesisID)
-
- // test with more than 200 transactions in the txnGroups
- for x := 0; x < 100; x++ {
- t := getTxnGroups(genesisHash, genesisID)
- txnGroups = append(txnGroups, t...)
- }
-
- ptg, err := s.encodeTransactionGroups(txnGroups, 1000000000)
- require.NoError(t, err)
- txGroups, err := decodeTransactionGroups(ptg, genesisID, genesisHash)
- require.NoError(t, err)
-
- s.evaluateIncomingMessage(incomingMessage{
- sequenceNumber: 0,
- message: transactionBlockMessage{Round: 5},
- transactionGroups: txGroups,
- })
- require.Equal(t, time.Duration(115586426), s.lastBeta)
-}
diff --git a/txnsync/interfaces.go b/txnsync/interfaces.go
deleted file mode 100644
index 77ac07163..000000000
--- a/txnsync/interfaces.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "time"
-
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/util/timers"
-)
-
-//msgp:ignore eventType
-type eventType int
-
-const (
- transactionPoolChangedEvent eventType = 1
- newRoundEvent eventType = 2
-)
-
-// RoundSettings is used to communicate the transaction syncer setting for a specific round
-type RoundSettings struct {
- Round basics.Round
- FetchTransactions bool // for non-relays that has no participation keys, there is no need to request transactions
-}
-
-// Event is an external triggering event
-type Event struct {
- eventType
-
- transactionPoolSize int
- roundSettings RoundSettings
- transactionHandlerBacklogFull bool
-}
-
-// IncomingMessageHandler is the signature of the incoming message handler used by the transaction sync to receive network messages
-type IncomingMessageHandler func(networkPeer interface{}, peer *Peer, message []byte, sequenceNumber uint64, receivedTimestamp int64) error
-
-// SendMessageCallback define a message sent feedback for performing message tracking
-type SendMessageCallback func(enqueued bool, sequenceNumber uint64) error
-
-// PeerInfo describes a single peer returned by GetPeers or GetPeer
-type PeerInfo struct {
- TxnSyncPeer *Peer
- NetworkPeer interface{}
- IsOutgoing bool
-}
-
-// networkPeerAddress is a subset of the network package HTTPPeer and UnicastPeer interface that
-// provides feedback for the destination address. It's used for logging out packet's destination addresses.
-type networkPeerAddress interface {
- GetAddress() string
-}
-
-// NodeConnector is used by the transaction sync for communicating with components external to the txnsync package.
-type NodeConnector interface {
- Events() <-chan Event
- GetCurrentRoundSettings() RoundSettings // return the current round settings from the node
- Clock() timers.WallClock
- Random(uint64) uint64
- GetPeers() []PeerInfo
- GetPeer(interface{}) PeerInfo // get a single peer given a network peer opaque interface
- // UpdatePeers call is being made to inform the node that either a link need to be established
- // between the set of the txsyncPeers peers and the set of netPeers, or that the peersAverageDataExchangeRate
- // was recalculated and could potentially be updated.
- // The peersAverageDataExchangeRate passed in here is the average communication rate ( measured in bytes per second )
- // across all the connected peers.
- UpdatePeers(txsyncPeers []*Peer, netPeers []interface{}, peersAverageDataExchangeRate uint64)
- SendPeerMessage(netPeer interface{}, msg []byte, callback SendMessageCallback)
- GetPeerLatency(netPeer interface{}) time.Duration
- // GetPendingTransactionGroups is called by the transaction sync when it needs to look into the transaction
- // pool and get the updated set of pending transactions. The second returned argument is the latest locally originated
- // group counter within the given transaction groups list. If there is no group that is locally originated, the expected
- // value is InvalidSignedTxGroupCounter.
- GetPendingTransactionGroups() (txGroups []pooldata.SignedTxGroup, latestLocallyOriginatedGroupCounter uint64)
- // IncomingTransactionGroups is called by the transaction sync when transactions have been received and need
- // to be stored in the transaction pool. The method returns the number of transactions in the transaction
- // pool before the txGroups is applied. A negative value is returned if the provided txGroups could not be applied
- // to the transaction pool.
- IncomingTransactionGroups(peer *Peer, messageSeq uint64, txGroups []pooldata.SignedTxGroup) (transactionPoolSize int)
- NotifyMonitor() chan struct{}
-}
-
-// MakeTransactionPoolChangeEvent creates an event for when a txn pool size has changed.
-func MakeTransactionPoolChangeEvent(transactionPoolSize int, transactionHandlerBacklogFull bool) Event {
- return Event{
- eventType: transactionPoolChangedEvent,
- transactionPoolSize: transactionPoolSize,
- transactionHandlerBacklogFull: transactionHandlerBacklogFull,
- }
-}
-
-// MakeNewRoundEvent creates an event for when a new round starts
-func MakeNewRoundEvent(roundNumber basics.Round, fetchTransactions bool) Event {
- return Event{
- eventType: newRoundEvent,
- roundSettings: RoundSettings{
- Round: roundNumber,
- FetchTransactions: fetchTransactions,
- },
- }
-}
diff --git a/txnsync/logger.go b/txnsync/logger.go
deleted file mode 100644
index 802387168..000000000
--- a/txnsync/logger.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/logging"
-)
-
-// make a local alias of the type so that we can refer to it without '.'
-type algodlogger = logging.Logger
-
-type msgStats struct {
- sequenceNumber uint64
- round basics.Round
- transactions int
- offsetModulator requestParams
- bloomSize int
- nextMsgMinDelay uint64
- peerAddress string
-}
-
-type msgLogger interface {
- outgoingMessage(mstat msgStats)
- incomingMessage(mstat msgStats)
-}
-
-// Logger is go-algorand/logging.Logger with some private additions for txnsync
-type Logger interface {
- logging.Logger
- msgLogger
-}
-
-type basicMsgLogger struct {
- algodlogger
- config *config.Local
-}
-
-func wrapLogger(l logging.Logger, config *config.Local) Logger {
- if ll, ok := l.(Logger); ok {
- return ll
- }
- out := &basicMsgLogger{
- algodlogger: l,
- config: config,
- }
- return out
-}
-
-func (l *basicMsgLogger) logMessage(mstat msgStats, mode, tofrom string) {
- if !l.config.EnableVerbosedTransactionSyncLogging {
- return
- }
- l.Infof(
- "%s Txsync #%d round %d transactions %d request [%d/%d] bloom %d nextTS %d %s '%s'",
- mode,
- mstat.sequenceNumber,
- mstat.round,
- mstat.transactions,
- mstat.offsetModulator.Offset,
- mstat.offsetModulator.Modulator,
- mstat.bloomSize,
- mstat.nextMsgMinDelay,
- tofrom,
- mstat.peerAddress,
- )
-}
-func (l *basicMsgLogger) outgoingMessage(mstat msgStats) {
- l.logMessage(mstat, "Outgoing", "to")
-}
-func (l *basicMsgLogger) incomingMessage(mstat msgStats) {
- l.logMessage(mstat, "Incoming", "from")
-}
diff --git a/txnsync/mainloop.go b/txnsync/mainloop.go
deleted file mode 100644
index 138206be0..000000000
--- a/txnsync/mainloop.go
+++ /dev/null
@@ -1,445 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "context"
- "math"
- "sync"
- "time"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/util/bloom"
- "github.com/algorand/go-algorand/util/execpool"
- "github.com/algorand/go-algorand/util/timers"
-)
-
-const (
- kickoffTime = 200 * time.Millisecond
- randomRange = 100 * time.Millisecond
- sendMessagesTime = 10 * time.Millisecond
-
- // transactionPoolLowWatermark is the low watermark for the transaction pool, relative
- // to the transaction pool size. When the number of transactions in the transaction pool
- // drops below this value, the transactionPoolFull flag would get cleared.
- transactionPoolLowWatermark = float32(0.8)
-
- // transactionPoolHighWatermark is the low watermark for the transaction pool, relative
- // to the transaction pool size. When the number of transactions in the transaction pool
- // grows beyond this value, the transactionPoolFull flag would get set.
- transactionPoolHighWatermark = float32(0.9)
-
- // betaGranularChangeThreshold defined the difference threshold for changing the beta value.
- // Changes to the beta value only takes effect once the difference is sufficiently big enough
- // comared to the current beta value.
- betaGranularChangeThreshold = 0.1
-)
-
-type syncState struct {
- service *Service
- log Logger
- node NodeConnector
- isRelay bool
- clock timers.WallClock
- config config.Local
- threadpool execpool.BacklogPool
-
- genesisID string
- genesisHash crypto.Digest
-
- // lastBeta is the last beta value that was calculated for this node
- lastBeta time.Duration
- round basics.Round
- fetchTransactions bool
- scheduler peerScheduler
- interruptablePeers []*Peer
- interruptablePeersMap map[*Peer]int // map a peer into the index of interruptablePeers
- incomingMessagesQ *incomingMessageQueue
- outgoingMessagesCallbackCh chan sentMessageMetadata
- nextOffsetRollingCh <-chan time.Time
- requestsOffset uint64
-
- // The lastBloomFilter allows us to share the same bloom filter across multiples messages,
- // and compute it only once. Since this bloom filter could contain many hashes ( especially on relays )
- // it's important to avoid recomputing it needlessly.
- lastBloomFilter bloomFilter
-
- // The profiler helps us monitor the transaction sync components execution time. When enabled, it would report these
- // to the telemetry.
- profiler *profiler
-
- // transactionPoolFull indicates whether the transaction pool is currently in "full" state or not. While the transaction
- // pool is full, a node would not ask any of the other peers for additional transactions.
- transactionPoolFull bool
-
- // messageSendWaitGroup coordinates the messages that are being sent to the network. Before aborting the mainloop, we want to make
- // sure there are no outbound messages that are waiting to be sent to the network ( i.e. that all the tasks that we enqueued to the
- // execution pool were completed ). This does not include the time where the message spent while waiting on the network queue itself.
- messageSendWaitGroup sync.WaitGroup
-
- xorBuilder bloom.XorBuilder
-}
-
-func (s *syncState) mainloop(serviceCtx context.Context, wg *sync.WaitGroup) {
- defer wg.Done()
- defer s.messageSendWaitGroup.Wait()
-
- // The following would allow the emulator to start the service in a "stopped" mode.
- s.node.NotifyMonitor()
-
- s.clock = s.node.Clock()
- s.incomingMessagesQ = makeIncomingMessageQueue()
- defer s.incomingMessagesQ.shutdown()
- s.outgoingMessagesCallbackCh = make(chan sentMessageMetadata, 1024)
- s.interruptablePeersMap = make(map[*Peer]int)
- s.scheduler.node = s.node
- s.lastBeta = beta(0)
- roundSettings := s.node.GetCurrentRoundSettings()
- s.onNewRoundEvent(MakeNewRoundEvent(roundSettings.Round, roundSettings.FetchTransactions))
-
- // create a profiler, and its profiling elements.
- s.profiler = makeProfiler(200*time.Millisecond, s.clock, s.log, 2000*time.Millisecond) // todo : make the time configurable.
- profIdle := s.profiler.getElement(profElementIdle)
- profTxChange := s.profiler.getElement(profElementTxChange)
- profNewRounnd := s.profiler.getElement(profElementNewRound)
- profPeerState := s.profiler.getElement(profElementPeerState)
- profIncomingMsg := s.profiler.getElement(profElementIncomingMsg)
- profOutgoingMsg := s.profiler.getElement(profElementOutgoingMsg)
- profNextOffset := s.profiler.getElement(profElementNextOffset)
-
- externalEvents := s.node.Events()
- var nextPeerStateCh <-chan time.Time
- for {
- nextPeerStateTime := s.scheduler.nextDuration()
- if nextPeerStateTime != time.Duration(0) {
- nextPeerStateCh = s.clock.TimeoutAt(nextPeerStateTime)
- } else {
- nextPeerStateCh = nil
- }
-
- select {
- case ent := <-externalEvents:
- switch ent.eventType {
- case transactionPoolChangedEvent:
- profTxChange.start()
- s.onTransactionPoolChangedEvent(ent)
- profTxChange.end()
- case newRoundEvent:
- profNewRounnd.start()
- s.onNewRoundEvent(ent)
- profNewRounnd.end()
- }
- continue
- case <-nextPeerStateCh:
- profPeerState.start()
- s.evaluatePeerStateChanges(nextPeerStateTime)
- profPeerState.end()
- continue
- case incomingMsg := <-s.incomingMessagesQ.getIncomingMessageChannel():
- profIncomingMsg.start()
- s.evaluateIncomingMessage(incomingMsg)
- profIncomingMsg.end()
- continue
- case msgSent := <-s.outgoingMessagesCallbackCh:
- profOutgoingMsg.start()
- s.evaluateOutgoingMessage(msgSent)
- profOutgoingMsg.end()
- continue
- case <-s.nextOffsetRollingCh:
- profNextOffset.start()
- s.rollOffsets()
- profNextOffset.end()
- continue
- case <-serviceCtx.Done():
- return
- default:
- }
-
- profIdle.start()
- select {
- case ent := <-externalEvents:
- profIdle.end()
- switch ent.eventType {
- case transactionPoolChangedEvent:
- profTxChange.start()
- s.onTransactionPoolChangedEvent(ent)
- profTxChange.end()
- case newRoundEvent:
- profNewRounnd.start()
- s.onNewRoundEvent(ent)
- profNewRounnd.end()
- }
- case <-nextPeerStateCh:
- profIdle.end()
- profPeerState.start()
- s.evaluatePeerStateChanges(nextPeerStateTime)
- profPeerState.end()
- case incomingMsg := <-s.incomingMessagesQ.getIncomingMessageChannel():
- profIdle.end()
- profIncomingMsg.start()
- s.evaluateIncomingMessage(incomingMsg)
- profIncomingMsg.end()
- case msgSent := <-s.outgoingMessagesCallbackCh:
- profIdle.end()
- profOutgoingMsg.start()
- s.evaluateOutgoingMessage(msgSent)
- profOutgoingMsg.end()
- case <-s.nextOffsetRollingCh:
- profIdle.end()
- profNextOffset.start()
- s.rollOffsets()
- profNextOffset.end()
- case <-serviceCtx.Done():
- profIdle.end()
- return
- case <-s.node.NotifyMonitor():
- profIdle.end()
- }
- }
-}
-
-func (s *syncState) onTransactionPoolChangedEvent(ent Event) {
- if ent.transactionHandlerBacklogFull {
- // if the transaction handler backlog is full, we don't want to receive any more transactions.
- // setting the transactionPoolFull here would notify other nodes that we don't want any more messages.
- s.transactionPoolFull = true
- } else if s.transactionPoolFull {
- // the transaction pool is currently full.
- if float32(ent.transactionPoolSize) < float32(s.config.TxPoolSize)*transactionPoolLowWatermark {
- s.transactionPoolFull = false
- }
- } else {
- if float32(ent.transactionPoolSize) > float32(s.config.TxPoolSize)*transactionPoolHighWatermark {
- s.transactionPoolFull = true
- }
- }
-
- newBeta := beta(ent.transactionPoolSize)
-
- // check if beta should be updated
- if !shouldUpdateBeta(s.lastBeta, newBeta, betaGranularChangeThreshold) {
- // no changes
- return
- }
- // yes, change beta as the number of transactions in the pool have changed dramatically since the last time.
- s.lastBeta = newBeta
-
- peers := make([]*Peer, 0, len(s.interruptablePeers))
- for _, peer := range s.interruptablePeers {
- if peer == nil {
- continue
- }
- peers = append(peers, peer)
- peer.state = peerStateHoldsoff
- }
-
- // reset the interruptablePeers array, since all it's members were made into holdsoff
- s.interruptablePeers = nil
- s.interruptablePeersMap = make(map[*Peer]int)
- deadlineMonitor := s.clock.DeadlineMonitorAt(s.clock.Since() + sendMessagesTime)
- s.sendMessageLoop(s.clock.Since(), deadlineMonitor, peers)
-
- currentTimeout := s.clock.Since()
- for _, peer := range peers {
- peerNext := s.scheduler.peerDuration(peer)
- if peerNext < currentTimeout {
- // shouldn't be, but let's reschedule it if this is the case.
- s.scheduler.schedulePeer(peer, currentTimeout+s.lastBeta)
- continue
- }
- // given that peerNext is after currentTimeout, find out what's the difference, and divide by the beta.
- betaCount := (peerNext - currentTimeout) / s.lastBeta
- peerNext = currentTimeout + s.lastBeta*betaCount
- s.scheduler.schedulePeer(peer, peerNext)
- }
-}
-
-// calculate the beta parameter, based on the transaction pool size.
-func beta(txPoolSize int) time.Duration {
- if txPoolSize < 200 {
- txPoolSize = 200
- } else if txPoolSize > 10000 {
- txPoolSize = 10000
- }
- beta := 1.0 / (2 * 3.6923 * math.Exp(float64(txPoolSize)*0.00026))
- return time.Duration(float64(time.Second) * beta)
-
-}
-
-func shouldUpdateBeta(currentBeta, newBeta time.Duration, betaGranularChangeThreshold float32) bool {
- // see if the newBeta is at least threshold percent smaller or bigger than the current one
- if float32(newBeta) >= (float32(currentBeta) * (1.0 + betaGranularChangeThreshold)) {
- return true
- }
- if float32(newBeta) <= (float32(currentBeta) * (1.0 - betaGranularChangeThreshold)) {
- return true
- }
- // no, it's not.
- return false
-}
-
-func (s *syncState) onNewRoundEvent(ent Event) {
- s.clock = s.clock.Zero().(timers.WallClock)
- peers := s.getPeers()
- newRoundPeers := peers
- if s.isRelay {
- // on relays, outgoing peers have a difference scheduling, which is based on the incoming message timing
- // rather then a periodic message transmission.
- newRoundPeers = incomingPeersOnly(newRoundPeers)
- }
- s.scheduler.scheduleNewRound(newRoundPeers)
- s.round = ent.roundSettings.Round
- s.fetchTransactions = ent.roundSettings.FetchTransactions
- if !s.isRelay {
- s.nextOffsetRollingCh = s.clock.TimeoutAt(kickoffTime + 2*s.lastBeta)
- }
- s.updatePeersLatency(peers)
- s.updatePeersRequestParams(peers)
-}
-
-func (s *syncState) evaluatePeerStateChanges(currentTimeout time.Duration) {
- peers := s.scheduler.getNextPeers()
- if len(peers) == 0 {
- return
- }
-
- sendMessagePeers := 0
- for _, peer := range peers {
- ops := peer.advancePeerState(currentTimeout, s.isRelay)
- if (ops & peerOpsSendMessage) == peerOpsSendMessage {
- peers[sendMessagePeers] = peer
- sendMessagePeers++
- }
- if (ops & peerOpsSetInterruptible) == peerOpsSetInterruptible {
- if _, has := s.interruptablePeersMap[peer]; !has {
- s.interruptablePeers = append(s.interruptablePeers, peer)
- s.interruptablePeersMap[peer] = len(s.interruptablePeers) - 1
- }
- }
- if (ops & peerOpsClearInterruptible) == peerOpsClearInterruptible {
- if idx, has := s.interruptablePeersMap[peer]; has {
- delete(s.interruptablePeersMap, peer)
- s.interruptablePeers[idx] = nil
- }
- }
- if (ops & peerOpsReschedule) == peerOpsReschedule {
- s.scheduler.schedulePeer(peer, currentTimeout+s.lastBeta)
- }
- }
-
- peers = peers[:sendMessagePeers]
- deadlineMonitor := s.clock.DeadlineMonitorAt(currentTimeout + sendMessagesTime)
- s.sendMessageLoop(currentTimeout, deadlineMonitor, peers)
-}
-
-// rollOffsets rolls the "base" offset for the peers offset selection. This method is only called
-// for non-relays.
-func (s *syncState) rollOffsets() {
- s.nextOffsetRollingCh = s.clock.TimeoutAt(s.clock.Since() + 2*s.lastBeta)
- s.requestsOffset++
-
- if !s.fetchTransactions {
- return
- }
-
- // iterate on the outgoing peers and see if we want to send them an update as needed.
- // note that because this function is only called for non-relays, then all the connections
- // are outgoing.
- peers := s.getPeers()
- s.updatePeersRequestParams(peers)
-
- // check when each of these peers is expected to send a message. we might want to promote a message to be sent earlier.
- currentTimeOffset := s.clock.Since()
- deadlineMonitor := s.clock.DeadlineMonitorAt(currentTimeOffset + sendMessagesTime)
-
- for _, peer := range peers {
- nextSchedule := s.scheduler.peerDuration(peer)
- if nextSchedule == 0 {
- // a new peer - ignore for now. This peer would get scheduled on the next new round.
- continue
- }
- if currentTimeOffset+sendMessagesTime > nextSchedule {
- // there was a message scheduled already in less than 20ms, so keep that one.
- s.scheduler.schedulePeer(peer, nextSchedule)
- continue
- }
-
- // otherwise, send a message to that peer. Note that we're passing the `nextSchedule-s.lastBeta` as the currentTime,
- // so that the time offset would be based on that one. ( i.e. effectively, it would retain the existing timing, and prevent
- // the peers from getting aligned )
- s.sendMessageLoop(nextSchedule-s.lastBeta, deadlineMonitor, []*Peer{peer})
- }
-}
-
-func (s *syncState) getPeers() (result []*Peer) {
- peersInfo := s.node.GetPeers()
- updatedNetworkPeers := []interface{}{}
- updatedNetworkPeersSync := []*Peer{}
-
- var averageDataExchangeRate uint64
-
- // some of the network peers might not have a sync peer, so we need to create one for these.
- for _, peerInfo := range peersInfo {
- if peerInfo.TxnSyncPeer == nil {
- syncPeer := makePeer(peerInfo.NetworkPeer, peerInfo.IsOutgoing, s.isRelay, &s.config, s.log, s.node.GetPeerLatency(peerInfo.NetworkPeer))
- peerInfo.TxnSyncPeer = syncPeer
- updatedNetworkPeers = append(updatedNetworkPeers, peerInfo.NetworkPeer)
- updatedNetworkPeersSync = append(updatedNetworkPeersSync, syncPeer)
- }
- result = append(result, peerInfo.TxnSyncPeer)
- averageDataExchangeRate += peerInfo.TxnSyncPeer.dataExchangeRate
- }
- if len(peersInfo) > 0 {
- averageDataExchangeRate /= uint64(len(peersInfo))
- }
-
- // if we have any update for the transaction sync connector, the send them via
- // a UpdatePeers call.
- if len(updatedNetworkPeers) > 0 || len(peersInfo) > 0 {
- s.node.UpdatePeers(updatedNetworkPeersSync, updatedNetworkPeers, averageDataExchangeRate)
- }
- return result
-}
-
-func (s *syncState) updatePeersRequestParams(peers []*Peer) {
- if s.transactionPoolFull {
- for _, peer := range peers {
- peer.setLocalRequestParams(0, 0)
- }
- return
- }
- if s.isRelay {
- for _, peer := range peers {
- peer.setLocalRequestParams(0, 1)
- }
- } else {
- if s.fetchTransactions {
- for i, peer := range peers {
- // on non-relay, ask for offset/modulator
- peer.setLocalRequestParams(uint64(i)+s.requestsOffset, uint64(len(peers)))
- }
- }
- }
-}
-
-func (s *syncState) updatePeersLatency(peers []*Peer) {
- for _, peer := range peers {
- peer.cachedLatency = s.node.GetPeerLatency(peer.networkPeer)
- }
-}
diff --git a/txnsync/mainloop_test.go b/txnsync/mainloop_test.go
deleted file mode 100644
index 5622636d8..000000000
--- a/txnsync/mainloop_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "fmt"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-func TestBeta(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- beta0 := beta(0)
- beta10000 := beta(10000)
- require.GreaterOrEqual(t, int64(beta0), int64(100*time.Millisecond))
- require.LessOrEqual(t, int64(beta10000), int64(20*time.Millisecond))
- for i := 50; i < 20000; i += 50 {
- prev := beta(i - 50)
- cur := beta(i)
- require.LessOrEqualf(t, int64(cur), int64(prev), fmt.Sprintf("beta(%d) < beta(%d)", i, i-50))
- }
-
-}
-
-func TestShouldUpdateBeta(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- beta0 := beta(0)
- beta100 := beta(100)
- beta5000 := beta(5000)
- beta5100 := beta(5100)
- beta5900 := beta(5900)
- beta6000 := beta(6000)
- beta10000 := beta(10000)
- beta15000 := beta(15000)
-
- // new beta greater than betaGranularChangeThreshold times previous beta
- require.True(t, shouldUpdateBeta(beta0, beta10000, betaGranularChangeThreshold))
- require.True(t, shouldUpdateBeta(beta5000, beta6000, betaGranularChangeThreshold))
-
- //same beta values
- require.False(t, shouldUpdateBeta(beta0, beta100, betaGranularChangeThreshold))
- require.False(t, shouldUpdateBeta(beta10000, beta15000, betaGranularChangeThreshold))
-
- // new beta lesser than betaGranularChangeThreshold times previous beta
- require.True(t, shouldUpdateBeta(beta15000, beta0, betaGranularChangeThreshold))
- require.True(t, shouldUpdateBeta(beta6000, beta100, betaGranularChangeThreshold))
-
- // no change in beta is expected
- require.False(t, shouldUpdateBeta(beta5000, beta5100, betaGranularChangeThreshold))
- require.False(t, shouldUpdateBeta(beta6000, beta5900, betaGranularChangeThreshold))
-}
diff --git a/txnsync/metrics.go b/txnsync/metrics.go
deleted file mode 100644
index d2bcbdd9b..000000000
--- a/txnsync/metrics.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "github.com/algorand/go-algorand/util/metrics"
-)
-
-var txsyncIncomingMessagesTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_txsync_incoming_messages_total", Description: "total number of incoming transaction sync messages"})
-var txsyncUnprocessedIncomingMessagesTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_txsync_unprocessed_incoming_messages_total", Description: "total number of incoming transaction sync messages that were not processed"})
-var txsyncDecodedBloomFiltersTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_txsync_decoded_bloom_filters_total", Description: "total number of decoded bloom filters"})
-var txsyncCreatedPeersTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_txsync_created_peers_total", Description: "total number of created peers"})
-var txsyncOutgoingMessagesTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_txsync_outgoing_messages_total", Description: "total number of outgoing transaction sync messages"})
-var txsyncEncodedBloomFiltersTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_txsync_encoded_bloom_filters_total", Description: "total number of bloom filters encoded"})
diff --git a/txnsync/msgbuffers.go b/txnsync/msgbuffers.go
deleted file mode 100644
index a531f6da2..000000000
--- a/txnsync/msgbuffers.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "sync"
-
- "github.com/algorand/go-algorand/data/transactions"
-)
-
-const messageBufferDefaultInitialSize = 10240
-
-// msgBuffersPool holds temporary byte slice buffers used for encoding messages.
-var msgBuffersPool = sync.Pool{
- New: func() interface{} {
- return make([]byte, 0, messageBufferDefaultInitialSize)
- },
-}
-
-// GetEncodingBuf returns a byte slice that can be used for encoding a
-// temporary message. The byte slice has zero length but potentially
-// non-zero capacity. The caller gets full ownership of the byte slice,
-// but is encouraged to return it using releaseMessageBuffer().
-func getMessageBuffer() []byte {
- return msgBuffersPool.Get().([]byte)[:0]
-}
-
-// releaseMessageBuffer places a byte slice into the pool of temporary buffers
-// for encoding. The caller gives up ownership of the byte slice when
-// passing it to releaseMessageBuffer().
-func releaseMessageBuffer(s []byte) {
- msgBuffersPool.Put(s) //nolint:staticcheck
-}
-
-// txidSlicePool holds temporary byte slice buffers used for encoding messages.
-var txidSlicePool = sync.Pool{}
-
-// getTxIDSliceBuffer returns a slice that can be used for storing a
-// list of transaction IDs. The slice has zero length but potentially
-// non-zero capacity. The caller gets full ownership of the slice,
-// but is encouraged to return it using releaseTxIDSliceBuffer().
-func getTxIDSliceBuffer(minSize int) []transactions.Txid {
- alloc := txidSlicePool.Get()
- if alloc == nil {
- return make([]transactions.Txid, 0, minSize)
- }
- buf := alloc.([]transactions.Txid)[:0]
- if cap(buf) >= minSize {
- return buf
- }
- txidSlicePool.Put(alloc)
- return make([]transactions.Txid, 0, minSize)
-}
-
-// releaseTxIDSliceBuffer places a slice into the pool of buffers
-// for storage. The caller gives up ownership of the byte slice when
-// passing it to releaseMessageBuffer().
-func releaseTxIDSliceBuffer(s []transactions.Txid) {
- if cap(s) > 0 {
- txidSlicePool.Put(s) //nolint:staticcheck
- }
-}
diff --git a/txnsync/msgbuffers_test.go b/txnsync/msgbuffers_test.go
deleted file mode 100644
index f53a4bd0d..000000000
--- a/txnsync/msgbuffers_test.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-// A unique length that we can use to identify non-default allocated buffers
-var uniqueLength int = messageBufferDefaultInitialSize + 482
-var uniqueIdentifier int = 50
-
-// Stamp a byte buffer with a unique identifier, assumes a capacity of at least unique_length
-func stampBuffer(i int, buf *[]byte) {
- if cap(*buf) < uniqueLength {
- return
- }
-
- *buf = (*buf)[:cap(*buf)]
-
- for j := 0; j < i; j++ {
- (*buf)[uniqueLength-1-j] = byte(j)
- }
-
-}
-
-func validBuffer(i int, buf *[]byte) bool {
-
- if cap(*buf) != uniqueLength {
- return false
- }
-
- *buf = (*buf)[:cap(*buf)]
-
- for j := 0; j < i; j++ {
- if (*buf)[uniqueLength-1-j] != byte(j) {
- return false
- }
- }
-
- return true
-}
-
-// TestMessageBuffersPool tests that a buffer pool can be retrieved and has proper length/capacity properties
-func TestMessageBuffersPool(t *testing.T) {
-
- partitiontest.PartitionTest(t)
-
- foundBuffer := false
-
- for retryCount := 0; retryCount < 10; retryCount++ {
-
- // Let's put a bunch of uniquely identifiable buffers in the global pool
- for i := 0; i < 10; i++ {
-
- bytes := make([]byte, 0, uniqueLength)
- stampBuffer(uniqueIdentifier, &bytes)
-
- releaseMessageBuffer(bytes)
- }
-
- collector := [][]byte{}
-
- // Let's try to get at least one buffer that is uniquely identifiable over a period of time
- for i := 0; i < 10000; i++ {
- byte := getMessageBuffer()
-
- collector = append(collector, byte)
-
- if validBuffer(uniqueIdentifier, &byte) {
- foundBuffer = true
- break
- }
-
- time.Sleep(500 * time.Microsecond)
- }
-
- for _, b := range collector {
- releaseMessageBuffer(b)
- }
-
- if foundBuffer {
- // If we found a buffer, we passed the test
- break
- }
-
- // Otherwise, let's start all over again
- }
-
- require.True(t, foundBuffer)
-
-}
-
-// TestTxIDSlicePool tests that the transaction id pool can be retrieved and has proper length/capacity properties
-func TestTxIDSlicePool(t *testing.T) {
- partitiontest.PartitionTest(t)
- maxTestCount := 200
- for testCount := 0; testCount < maxTestCount; testCount++ {
- for i := 10; i < 100; i += 10 {
- txIDs := getTxIDSliceBuffer(i)
- require.Equal(t, 0, len(txIDs))
- require.GreaterOrEqual(t, cap(txIDs), i)
- releaseTxIDSliceBuffer(txIDs)
- }
-
- // Test that one of the previous buffers can be reused
- // We can assess this because all the previous buffers created
- // had a capacity greater than 10, so if one of these buffers
- // has a buffer size of at least 10 (when we asked for 5), we can
- // be assured that we have reused a previous buffer
- txIDs := getTxIDSliceBuffer(5)
- require.Equal(t, 0, len(txIDs))
- require.GreaterOrEqual(t, cap(txIDs), 5)
- if cap(txIDs) < 10 {
- // repeat this test again. it looks like the GC collected all the content
- // of the pool and forced us to allocate a new buffer.
- time.Sleep(10 * time.Millisecond)
- continue
- }
- releaseTxIDSliceBuffer(txIDs)
- return
- }
- require.FailNow(t, "failed to get a 5 entries buffer from slice pool")
-}
diff --git a/txnsync/msgorderingheap.go b/txnsync/msgorderingheap.go
deleted file mode 100644
index 69758216b..000000000
--- a/txnsync/msgorderingheap.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "container/heap"
- "errors"
-
- "github.com/algorand/go-deadlock"
-)
-
-var errHeapEmpty = errors.New("message ordering heap is empty")
-var errHeapReachedCapacity = errors.New("message ordering heap reached capacity")
-var errSequenceNumberMismatch = errors.New("sequence number mismatch")
-
-const messageOrderingHeapLimit = 128
-
-type messageHeapItem incomingMessage
-
-type messageOrderingHeap struct {
- mu deadlock.Mutex
- messages []messageHeapItem
-}
-
-// Push implements heap.Interface
-func (p *messageOrderingHeap) Push(x interface{}) {
- entry := x.(messageHeapItem)
- p.messages = append(p.messages, entry)
-}
-
-// Pop implements heap.Interface
-func (p *messageOrderingHeap) Pop() interface{} {
- end := len(p.messages) - 1
- res := p.messages[end]
- p.messages[end] = messageHeapItem{}
- p.messages = p.messages[0:end]
- return res
-}
-
-// Len implements heap.Interface
-func (p *messageOrderingHeap) Len() int {
- return len(p.messages)
-}
-
-// Swap implements heap.Interface
-func (p *messageOrderingHeap) Swap(i, j int) {
- p.messages[i], p.messages[j] = p.messages[j], p.messages[i]
-}
-
-// Less implements heap.Interface
-func (p *messageOrderingHeap) Less(i, j int) bool {
- return p.messages[i].sequenceNumber < p.messages[j].sequenceNumber
-}
-
-func (p *messageOrderingHeap) enqueue(msg incomingMessage) error {
- p.mu.Lock()
- defer p.mu.Unlock()
- if len(p.messages) >= messageOrderingHeapLimit {
- return errHeapReachedCapacity
- }
- heap.Push(p, messageHeapItem(msg))
- return nil
-}
-
-func (p *messageOrderingHeap) popSequence(sequenceNumber uint64) (msg incomingMessage, heapSequenceNumber uint64, err error) {
- p.mu.Lock()
- defer p.mu.Unlock()
- if len(p.messages) == 0 {
- return incomingMessage{}, 0, errHeapEmpty
- }
- if p.messages[0].sequenceNumber != sequenceNumber {
- return incomingMessage{}, p.messages[0].sequenceNumber, errSequenceNumberMismatch
- }
- entry := heap.Pop(p).(messageHeapItem)
- return incomingMessage(entry), sequenceNumber, nil
-}
-
-func (p *messageOrderingHeap) pop() (msg incomingMessage, err error) {
- p.mu.Lock()
- defer p.mu.Unlock()
- if len(p.messages) == 0 {
- return incomingMessage{}, errHeapEmpty
- }
- entry := heap.Pop(p).(messageHeapItem)
- return incomingMessage(entry), nil
-}
diff --git a/txnsync/msgorderingheap_test.go b/txnsync/msgorderingheap_test.go
deleted file mode 100644
index cd1a352a5..000000000
--- a/txnsync/msgorderingheap_test.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "math/rand"
- "reflect"
- "sort"
- "sync"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-deadlock"
-
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-func TestMessageOrderingHeap_PushPopSwapLess(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- heap := messageOrderingHeap{}
-
- msg1 := messageHeapItem{sequenceNumber: 1}
- msg2 := messageHeapItem{sequenceNumber: 2}
- msg3 := messageHeapItem{sequenceNumber: 3}
-
- a.Equal(len(heap.messages), 0)
- heap.Push(msg1)
- heap.Push(msg2)
- a.Equal(len(heap.messages), int(2))
- a.Equal(heap.Len(), int(2))
-
- a.True(heap.Less(0, 1))
-
- res := heap.Pop().(messageHeapItem)
- a.Equal(res.sequenceNumber, uint64(2))
- a.Equal(len(heap.messages), int(1))
- a.Equal(heap.Len(), int(1))
- a.Equal(heap.messages[0].sequenceNumber, uint64(1))
- heap.Push(msg2)
- heap.Push(msg3)
- heap.Swap(0, 1)
- a.Equal(heap.messages[0].sequenceNumber, uint64(2))
- a.Equal(heap.messages[1].sequenceNumber, uint64(1))
-
- a.False(heap.Less(0, 1))
-}
-
-func TestEnqueueHeapPop(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- heap := messageOrderingHeap{}
-
- for i := messageOrderingHeapLimit - 1; i >= 0; i-- {
- a.Nil(heap.enqueue(incomingMessage{sequenceNumber: uint64(i)}))
- }
-
- a.Equal(heap.Len(), int(messageOrderingHeapLimit))
- a.Equal(heap.enqueue(incomingMessage{}), errHeapReachedCapacity)
- a.Equal(heap.Len(), int(messageOrderingHeapLimit))
-
- for i := 0; i < messageOrderingHeapLimit; i++ {
- msg, err := heap.pop()
- a.Nil(err)
- a.Equal(msg.sequenceNumber, uint64(i))
- }
-
- _, err := heap.pop()
-
- a.Equal(heap.Len(), int(0))
- a.Equal(err, errHeapEmpty)
-
-}
-
-func TestPopSequence(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- heap := messageOrderingHeap{}
-
- _, _, err := heap.popSequence(0)
-
- a.Equal(err, errHeapEmpty)
- for i := messageOrderingHeapLimit - 1; i >= 0; i-- {
- a.Nil(heap.enqueue(incomingMessage{sequenceNumber: uint64(i)}))
- }
- a.Equal(heap.Len(), messageOrderingHeapLimit)
- _, heapSeqNum, err := heap.popSequence(3)
- a.Equal(heap.Len(), messageOrderingHeapLimit)
- a.Equal(heapSeqNum, uint64(0), errSequenceNumberMismatch)
- a.Error(err, errSequenceNumberMismatch)
-
- msg, heapSeqNum, err := heap.popSequence(0)
-
- a.NotNil(msg)
- a.Equal(heap.Len(), messageOrderingHeapLimit-1)
- a.Equal(msg.sequenceNumber, uint64(0))
- a.Equal(heapSeqNum, uint64(0))
- a.NoError(err)
-
-}
-
-func TestMultiThreaded(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- loopCount := 1000
- numThreads := 100
- itemsPerThread := 10
-
- totalItems := numThreads * itemsPerThread
-
- var (
- heap messageOrderingHeap
- startChan chan struct{}
- wg sync.WaitGroup
- )
-
- peers := []Peer{
- {},
- {},
- {},
- {},
- {},
- }
-
- genTxnGrp := func(value int) []pooldata.SignedTxGroup {
-
- if value%2 == 0 {
- return []pooldata.SignedTxGroup{
- {
- GroupTransactionID: transactions.Txid{byte(value % 255)},
- },
- }
- }
-
- return []pooldata.SignedTxGroup{
- {
- GroupTransactionID: transactions.Txid{byte(value % 255)},
- },
- {
- GroupTransactionID: transactions.Txid{byte(value + 1%255)},
- },
- }
- }
-
- encodeMsg := func(value int, peers []Peer) incomingMessage {
-
- rval := incomingMessage{
- sequenceNumber: uint64(value),
- peer: &peers[value%len(peers)],
- encodedSize: value + 874,
- transactionGroups: genTxnGrp(value),
- }
-
- return rval
- }
-
- validateMsg := func(message incomingMessage) bool {
- val := int(message.sequenceNumber)
-
- if message.peer != &peers[val%len(peers)] {
- return false
- }
-
- if message.encodedSize != val+874 {
- return false
- }
-
- if !reflect.DeepEqual(message.transactionGroups, genTxnGrp(val)) {
- return false
- }
-
- return true
-
- }
-
- fxn := func(values []int, heap *messageOrderingHeap, start chan struct{}, wg *sync.WaitGroup,
- enqueuedMtx *deadlock.Mutex, enqueuedList *[]int) {
- defer wg.Done()
- // Wait for the start
- <-start
-
- for _, value := range values {
- msg := encodeMsg(value, peers)
- err := heap.enqueue(msg)
-
- if err == nil {
- enqueuedMtx.Lock()
- *enqueuedList = append(*enqueuedList, value)
- enqueuedMtx.Unlock()
- }
- }
-
- }
-
- for i := 0; i < loopCount; i++ {
-
- var enqueuedList []int
- var enqueuedMtx deadlock.Mutex
-
- var masterList []int
-
- for j := 0; j < totalItems; j++ {
- masterList = append(masterList, j)
- }
-
- rand.Seed(time.Now().UnixNano())
- rand.Shuffle(len(masterList), func(i, j int) { masterList[i], masterList[j] = masterList[j], masterList[i] })
-
- heap = messageOrderingHeap{}
- startChan = make(chan struct{})
-
- currentIdx := 0
-
- for j := 0; j < numThreads; j++ {
- wg.Add(1)
-
- randomList := masterList[currentIdx : currentIdx+itemsPerThread]
- currentIdx = currentIdx + itemsPerThread
-
- go fxn(randomList, &heap, startChan, &wg, &enqueuedMtx, &enqueuedList)
- }
-
- // Tell all goroutines to go
- close(startChan)
-
- wg.Wait()
-
- a.Equal(heap.Len(), int(messageOrderingHeapLimit))
- a.Equal(heap.enqueue(incomingMessage{}), errHeapReachedCapacity)
- a.Equal(heap.Len(), int(messageOrderingHeapLimit))
-
- sort.Ints(enqueuedList)
-
- for _, val := range enqueuedList {
-
- msg, sequenceNumber, err := heap.popSequence(uint64(val))
- a.Nil(err)
- a.Equal(sequenceNumber, uint64(val))
- a.True(validateMsg(msg))
- }
-
- a.Equal(heap.Len(), int(0))
- }
-
-}
diff --git a/txnsync/msgp_gen.go b/txnsync/msgp_gen.go
deleted file mode 100644
index 1c1b51256..000000000
--- a/txnsync/msgp_gen.go
+++ /dev/null
@@ -1,35449 +0,0 @@
-package txnsync
-
-// Code generated by github.com/algorand/msgp DO NOT EDIT.
-
-import (
- "sort"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/compactcert"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/msgp/msgp"
-)
-
-// The following msgp objects are implemented in this file:
-// addresses
-// |-----> MarshalMsg
-// |-----> CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> Msgsize
-// |-----> MsgIsZero
-//
-// appIndices
-// |-----> MarshalMsg
-// |-----> CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> Msgsize
-// |-----> MsgIsZero
-//
-// applicationArgs
-// |-----> MarshalMsg
-// |-----> CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> Msgsize
-// |-----> MsgIsZero
-//
-// assetIndices
-// |-----> MarshalMsg
-// |-----> CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> Msgsize
-// |-----> MsgIsZero
-//
-// bitmask
-// |-----> MarshalMsg
-// |-----> CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> Msgsize
-// |-----> MsgIsZero
-//
-// certProofs
-// |-----> MarshalMsg
-// |-----> CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> Msgsize
-// |-----> MsgIsZero
-//
-// encodedApplicationCallTxnFields
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedAssetConfigTxnFields
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedAssetFreezeTxnFields
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedAssetParams
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedAssetTransferTxnFields
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedBloomFilter
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedCert
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedCompactCertTxnFields
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedKeyregTxnFields
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedLsigs
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedMsigs
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedPaymentTxnFields
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedSignedTxns
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedTxnHeaders
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedTxns
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// packedTransactionGroups
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// program
-// |-----> MarshalMsg
-// |-----> CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> Msgsize
-// |-----> MsgIsZero
-//
-// relayedProposal
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// requestParams
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// revealMap
-// |-----> MarshalMsg
-// |-----> CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> Msgsize
-// |-----> MsgIsZero
-//
-// timingParams
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// transactionBlockMessage
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// txGroupsEncodingStub
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// txGroupsEncodingStubOld
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// txnGroups
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-
-// MarshalMsg implements msgp.Marshaler
-func (z addresses) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- if z == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len(z)))
- }
- for za0005 := range z {
- o = z[za0005].MarshalMsg(o)
- }
- return
-}
-
-func (_ addresses) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(addresses)
- if !ok {
- _, ok = (z).(*addresses)
- }
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *addresses) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var zb0002 int
- var zb0003 bool
- zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 > transactions.EncodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0002), uint64(transactions.EncodedMaxAccounts))
- err = msgp.WrapError(err)
- return
- }
- if zb0003 {
- (*z) = nil
- } else if (*z) != nil && cap((*z)) >= zb0002 {
- (*z) = (*z)[:zb0002]
- } else {
- (*z) = make(addresses, zb0002)
- }
- for zb0001 := range *z {
- bts, err = (*z)[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, zb0001)
- return
- }
- }
- o = bts
- return
-}
-
-func (_ *addresses) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*addresses)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z addresses) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize
- for za0005 := range z {
- s += z[za0005].Msgsize()
- }
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z addresses) MsgIsZero() bool {
- return len(z) == 0
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z appIndices) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- if z == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len(z)))
- }
- for za0001 := range z {
- o = z[za0001].MarshalMsg(o)
- }
- return
-}
-
-func (_ appIndices) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(appIndices)
- if !ok {
- _, ok = (z).(*appIndices)
- }
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *appIndices) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var zb0002 int
- var zb0003 bool
- zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 > transactions.EncodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0002), uint64(transactions.EncodedMaxForeignApps))
- err = msgp.WrapError(err)
- return
- }
- if zb0003 {
- (*z) = nil
- } else if (*z) != nil && cap((*z)) >= zb0002 {
- (*z) = (*z)[:zb0002]
- } else {
- (*z) = make(appIndices, zb0002)
- }
- for zb0001 := range *z {
- bts, err = (*z)[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, zb0001)
- return
- }
- }
- o = bts
- return
-}
-
-func (_ *appIndices) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*appIndices)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z appIndices) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize
- for za0001 := range z {
- s += z[za0001].Msgsize()
- }
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z appIndices) MsgIsZero() bool {
- return len(z) == 0
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z applicationArgs) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- if z == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len(z)))
- }
- for za0001 := range z {
- o = msgp.AppendBytes(o, z[za0001])
- }
- return
-}
-
-func (_ applicationArgs) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(applicationArgs)
- if !ok {
- _, ok = (z).(*applicationArgs)
- }
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *applicationArgs) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var zb0002 int
- var zb0003 bool
- zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 > transactions.EncodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0002), uint64(transactions.EncodedMaxApplicationArgs))
- err = msgp.WrapError(err)
- return
- }
- if zb0003 {
- (*z) = nil
- } else if (*z) != nil && cap((*z)) >= zb0002 {
- (*z) = (*z)[:zb0002]
- } else {
- (*z) = make(applicationArgs, zb0002)
- }
- for zb0001 := range *z {
- (*z)[zb0001], bts, err = msgp.ReadBytesBytes(bts, (*z)[zb0001])
- if err != nil {
- err = msgp.WrapError(err, zb0001)
- return
- }
- }
- o = bts
- return
-}
-
-func (_ *applicationArgs) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*applicationArgs)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z applicationArgs) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize
- for za0001 := range z {
- s += msgp.BytesPrefixSize + len(z[za0001])
- }
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z applicationArgs) MsgIsZero() bool {
- return len(z) == 0
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z assetIndices) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- if z == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len(z)))
- }
- for za0001 := range z {
- o = z[za0001].MarshalMsg(o)
- }
- return
-}
-
-func (_ assetIndices) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(assetIndices)
- if !ok {
- _, ok = (z).(*assetIndices)
- }
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *assetIndices) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var zb0002 int
- var zb0003 bool
- zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 > transactions.EncodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0002), uint64(transactions.EncodedMaxForeignAssets))
- err = msgp.WrapError(err)
- return
- }
- if zb0003 {
- (*z) = nil
- } else if (*z) != nil && cap((*z)) >= zb0002 {
- (*z) = (*z)[:zb0002]
- } else {
- (*z) = make(assetIndices, zb0002)
- }
- for zb0001 := range *z {
- bts, err = (*z)[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, zb0001)
- return
- }
- }
- o = bts
- return
-}
-
-func (_ *assetIndices) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*assetIndices)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z assetIndices) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize
- for za0001 := range z {
- s += z[za0001].Msgsize()
- }
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z assetIndices) MsgIsZero() bool {
- return len(z) == 0
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z bitmask) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- o = msgp.AppendBytes(o, []byte(z))
- return
-}
-
-func (_ bitmask) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(bitmask)
- if !ok {
- _, ok = (z).(*bitmask)
- }
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *bitmask) UnmarshalMsg(bts []byte) (o []byte, err error) {
- {
- var zb0001 []byte
- var zb0002 int
- zb0002, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0002), uint64(maxBitmaskSize))
- return
- }
- zb0001, bts, err = msgp.ReadBytesBytes(bts, []byte((*z)))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- (*z) = bitmask(zb0001)
- }
- o = bts
- return
-}
-
-func (_ *bitmask) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*bitmask)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z bitmask) Msgsize() (s int) {
- s = msgp.BytesPrefixSize + len([]byte(z))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z bitmask) MsgIsZero() bool {
- return len(z) == 0
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z certProofs) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- if z == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len(z)))
- }
- for za0001 := range z {
- o = z[za0001].MarshalMsg(o)
- }
- return
-}
-
-func (_ certProofs) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(certProofs)
- if !ok {
- _, ok = (z).(*certProofs)
- }
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *certProofs) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var zb0002 int
- var zb0003 bool
- zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0002), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err)
- return
- }
- if zb0003 {
- (*z) = nil
- } else if (*z) != nil && cap((*z)) >= zb0002 {
- (*z) = (*z)[:zb0002]
- } else {
- (*z) = make(certProofs, zb0002)
- }
- for zb0001 := range *z {
- bts, err = (*z)[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, zb0001)
- return
- }
- }
- o = bts
- return
-}
-
-func (_ *certProofs) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*certProofs)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z certProofs) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize
- for za0001 := range z {
- s += z[za0001].Msgsize()
- }
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z certProofs) MsgIsZero() bool {
- return len(z) == 0
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedApplicationCallTxnFields) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0017Len := uint32(26)
- var zb0017Mask uint32 /* 27 bits */
- if len((*z).ApplicationArgs) == 0 {
- zb0017Len--
- zb0017Mask |= 0x2
- }
- if len((*z).BitmaskApplicationArgs) == 0 {
- zb0017Len--
- zb0017Mask |= 0x4
- }
- if len((*z).OnCompletion) == 0 {
- zb0017Len--
- zb0017Mask |= 0x8
- }
- if len((*z).BitmaskOnCompletion) == 0 {
- zb0017Len--
- zb0017Mask |= 0x10
- }
- if len((*z).ApprovalProgram) == 0 {
- zb0017Len--
- zb0017Mask |= 0x20
- }
- if len((*z).BitmaskApprovalProgram) == 0 {
- zb0017Len--
- zb0017Mask |= 0x40
- }
- if len((*z).ForeignAssets) == 0 {
- zb0017Len--
- zb0017Mask |= 0x80
- }
- if len((*z).BitmaskForeignAssets) == 0 {
- zb0017Len--
- zb0017Mask |= 0x100
- }
- if len((*z).Accounts) == 0 {
- zb0017Len--
- zb0017Mask |= 0x200
- }
- if len((*z).BitmaskAccounts) == 0 {
- zb0017Len--
- zb0017Mask |= 0x400
- }
- if len((*z).ExtraProgramPages) == 0 {
- zb0017Len--
- zb0017Mask |= 0x800
- }
- if len((*z).BitmaskExtraProgramPages) == 0 {
- zb0017Len--
- zb0017Mask |= 0x1000
- }
- if len((*z).ForeignApps) == 0 {
- zb0017Len--
- zb0017Mask |= 0x2000
- }
- if len((*z).BitmaskForeignApps) == 0 {
- zb0017Len--
- zb0017Mask |= 0x4000
- }
- if len((*z).ApplicationID) == 0 {
- zb0017Len--
- zb0017Mask |= 0x8000
- }
- if len((*z).BitmaskApplicationID) == 0 {
- zb0017Len--
- zb0017Mask |= 0x10000
- }
- if len((*z).ClearStateProgram) == 0 {
- zb0017Len--
- zb0017Mask |= 0x20000
- }
- if len((*z).BitmaskClearStateProgram) == 0 {
- zb0017Len--
- zb0017Mask |= 0x40000
- }
- if len((*z).GlobalNumByteSlice) == 0 {
- zb0017Len--
- zb0017Mask |= 0x80000
- }
- if len((*z).BitmaskGlobalNumByteSlice) == 0 {
- zb0017Len--
- zb0017Mask |= 0x100000
- }
- if len((*z).GlobalNumUint) == 0 {
- zb0017Len--
- zb0017Mask |= 0x200000
- }
- if len((*z).BitmaskGlobalNumUint) == 0 {
- zb0017Len--
- zb0017Mask |= 0x400000
- }
- if len((*z).LocalNumByteSlice) == 0 {
- zb0017Len--
- zb0017Mask |= 0x800000
- }
- if len((*z).BitmaskLocalNumByteSlice) == 0 {
- zb0017Len--
- zb0017Mask |= 0x1000000
- }
- if len((*z).LocalNumUint) == 0 {
- zb0017Len--
- zb0017Mask |= 0x2000000
- }
- if len((*z).BitmaskLocalNumUint) == 0 {
- zb0017Len--
- zb0017Mask |= 0x4000000
- }
- // variable map header, size zb0017Len
- o = msgp.AppendMapHeader(o, zb0017Len)
- if zb0017Len != 0 {
- if (zb0017Mask & 0x2) == 0 { // if not empty
- // string "apaa"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x61)
- if (*z).ApplicationArgs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).ApplicationArgs)))
- }
- for zb0002 := range (*z).ApplicationArgs {
- if (*z).ApplicationArgs[zb0002] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).ApplicationArgs[zb0002])))
- }
- for zb0003 := range (*z).ApplicationArgs[zb0002] {
- o = msgp.AppendBytes(o, (*z).ApplicationArgs[zb0002][zb0003])
- }
- }
- }
- if (zb0017Mask & 0x4) == 0 { // if not empty
- // string "apaabm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x61, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskApplicationArgs))
- }
- if (zb0017Mask & 0x8) == 0 { // if not empty
- // string "apan"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x6e)
- o = msgp.AppendBytes(o, (*z).OnCompletion)
- }
- if (zb0017Mask & 0x10) == 0 { // if not empty
- // string "apanbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskOnCompletion))
- }
- if (zb0017Mask & 0x20) == 0 { // if not empty
- // string "apap"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x70)
- if (*z).ApprovalProgram == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).ApprovalProgram)))
- }
- for zb0014 := range (*z).ApprovalProgram {
- o = msgp.AppendBytes(o, []byte((*z).ApprovalProgram[zb0014]))
- }
- }
- if (zb0017Mask & 0x40) == 0 { // if not empty
- // string "apapbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x70, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskApprovalProgram))
- }
- if (zb0017Mask & 0x80) == 0 { // if not empty
- // string "apas"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x73)
- if (*z).ForeignAssets == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).ForeignAssets)))
- }
- for zb0008 := range (*z).ForeignAssets {
- if (*z).ForeignAssets[zb0008] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).ForeignAssets[zb0008])))
- }
- for zb0009 := range (*z).ForeignAssets[zb0008] {
- o = (*z).ForeignAssets[zb0008][zb0009].MarshalMsg(o)
- }
- }
- }
- if (zb0017Mask & 0x100) == 0 { // if not empty
- // string "apasbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskForeignAssets))
- }
- if (zb0017Mask & 0x200) == 0 { // if not empty
- // string "apat"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x74)
- if (*z).Accounts == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).Accounts)))
- }
- for zb0004 := range (*z).Accounts {
- if (*z).Accounts[zb0004] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).Accounts[zb0004])))
- }
- for zb0005 := range (*z).Accounts[zb0004] {
- o = (*z).Accounts[zb0004][zb0005].MarshalMsg(o)
- }
- }
- }
- if (zb0017Mask & 0x400) == 0 { // if not empty
- // string "apatbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskAccounts))
- }
- if (zb0017Mask & 0x800) == 0 { // if not empty
- // string "apep"
- o = append(o, 0xa4, 0x61, 0x70, 0x65, 0x70)
- if (*z).ExtraProgramPages == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).ExtraProgramPages)))
- }
- for zb0016 := range (*z).ExtraProgramPages {
- o = msgp.AppendUint32(o, (*z).ExtraProgramPages[zb0016])
- }
- }
- if (zb0017Mask & 0x1000) == 0 { // if not empty
- // string "apepbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x65, 0x70, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskExtraProgramPages))
- }
- if (zb0017Mask & 0x2000) == 0 { // if not empty
- // string "apfa"
- o = append(o, 0xa4, 0x61, 0x70, 0x66, 0x61)
- if (*z).ForeignApps == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).ForeignApps)))
- }
- for zb0006 := range (*z).ForeignApps {
- if (*z).ForeignApps[zb0006] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).ForeignApps[zb0006])))
- }
- for zb0007 := range (*z).ForeignApps[zb0006] {
- o = (*z).ForeignApps[zb0006][zb0007].MarshalMsg(o)
- }
- }
- }
- if (zb0017Mask & 0x4000) == 0 { // if not empty
- // string "apfabm"
- o = append(o, 0xa6, 0x61, 0x70, 0x66, 0x61, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskForeignApps))
- }
- if (zb0017Mask & 0x8000) == 0 { // if not empty
- // string "apid"
- o = append(o, 0xa4, 0x61, 0x70, 0x69, 0x64)
- if (*z).ApplicationID == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).ApplicationID)))
- }
- for zb0001 := range (*z).ApplicationID {
- o = (*z).ApplicationID[zb0001].MarshalMsg(o)
- }
- }
- if (zb0017Mask & 0x10000) == 0 { // if not empty
- // string "apidbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskApplicationID))
- }
- if (zb0017Mask & 0x20000) == 0 { // if not empty
- // string "apsu"
- o = append(o, 0xa4, 0x61, 0x70, 0x73, 0x75)
- if (*z).ClearStateProgram == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).ClearStateProgram)))
- }
- for zb0015 := range (*z).ClearStateProgram {
- o = msgp.AppendBytes(o, []byte((*z).ClearStateProgram[zb0015]))
- }
- }
- if (zb0017Mask & 0x40000) == 0 { // if not empty
- // string "apsubm"
- o = append(o, 0xa6, 0x61, 0x70, 0x73, 0x75, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskClearStateProgram))
- }
- if (zb0017Mask & 0x80000) == 0 { // if not empty
- // string "gnbs"
- o = append(o, 0xa4, 0x67, 0x6e, 0x62, 0x73)
- if (*z).GlobalNumByteSlice == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).GlobalNumByteSlice)))
- }
- for zb0013 := range (*z).GlobalNumByteSlice {
- o = msgp.AppendUint64(o, (*z).GlobalNumByteSlice[zb0013])
- }
- }
- if (zb0017Mask & 0x100000) == 0 { // if not empty
- // string "gnbsbm"
- o = append(o, 0xa6, 0x67, 0x6e, 0x62, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskGlobalNumByteSlice))
- }
- if (zb0017Mask & 0x200000) == 0 { // if not empty
- // string "gnui"
- o = append(o, 0xa4, 0x67, 0x6e, 0x75, 0x69)
- if (*z).GlobalNumUint == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).GlobalNumUint)))
- }
- for zb0012 := range (*z).GlobalNumUint {
- o = msgp.AppendUint64(o, (*z).GlobalNumUint[zb0012])
- }
- }
- if (zb0017Mask & 0x400000) == 0 { // if not empty
- // string "gnuibm"
- o = append(o, 0xa6, 0x67, 0x6e, 0x75, 0x69, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskGlobalNumUint))
- }
- if (zb0017Mask & 0x800000) == 0 { // if not empty
- // string "lnbs"
- o = append(o, 0xa4, 0x6c, 0x6e, 0x62, 0x73)
- if (*z).LocalNumByteSlice == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).LocalNumByteSlice)))
- }
- for zb0011 := range (*z).LocalNumByteSlice {
- o = msgp.AppendUint64(o, (*z).LocalNumByteSlice[zb0011])
- }
- }
- if (zb0017Mask & 0x1000000) == 0 { // if not empty
- // string "lnbsbm"
- o = append(o, 0xa6, 0x6c, 0x6e, 0x62, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskLocalNumByteSlice))
- }
- if (zb0017Mask & 0x2000000) == 0 { // if not empty
- // string "lnui"
- o = append(o, 0xa4, 0x6c, 0x6e, 0x75, 0x69)
- if (*z).LocalNumUint == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).LocalNumUint)))
- }
- for zb0010 := range (*z).LocalNumUint {
- o = msgp.AppendUint64(o, (*z).LocalNumUint[zb0010])
- }
- }
- if (zb0017Mask & 0x4000000) == 0 { // if not empty
- // string "lnuibm"
- o = append(o, 0xa6, 0x6c, 0x6e, 0x75, 0x69, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskLocalNumUint))
- }
- }
- return
-}
-
-func (_ *encodedApplicationCallTxnFields) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedApplicationCallTxnFields)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0017 int
- var zb0018 bool
- zb0017, zb0018, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0017 > 0 {
- zb0017--
- var zb0019 int
- var zb0020 bool
- zb0019, zb0020, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationID")
- return
- }
- if zb0019 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0019), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ApplicationID")
- return
- }
- if zb0020 {
- (*z).ApplicationID = nil
- } else if (*z).ApplicationID != nil && cap((*z).ApplicationID) >= zb0019 {
- (*z).ApplicationID = ((*z).ApplicationID)[:zb0019]
- } else {
- (*z).ApplicationID = make([]basics.AppIndex, zb0019)
- }
- for zb0001 := range (*z).ApplicationID {
- bts, err = (*z).ApplicationID[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationID", zb0001)
- return
- }
- }
- }
- if zb0017 > 0 {
- zb0017--
- {
- var zb0021 []byte
- var zb0022 int
- zb0022, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationID")
- return
- }
- if zb0022 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0022), uint64(maxBitmaskSize))
- return
- }
- zb0021, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskApplicationID))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationID")
- return
- }
- (*z).BitmaskApplicationID = bitmask(zb0021)
- }
- }
- if zb0017 > 0 {
- zb0017--
- var zb0023 int
- zb0023, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "OnCompletion")
- return
- }
- if zb0023 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0023), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).OnCompletion, bts, err = msgp.ReadBytesBytes(bts, (*z).OnCompletion)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "OnCompletion")
- return
- }
- }
- if zb0017 > 0 {
- zb0017--
- {
- var zb0024 []byte
- var zb0025 int
- zb0025, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskOnCompletion")
- return
- }
- if zb0025 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0025), uint64(maxBitmaskSize))
- return
- }
- zb0024, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskOnCompletion))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskOnCompletion")
- return
- }
- (*z).BitmaskOnCompletion = bitmask(zb0024)
- }
- }
- if zb0017 > 0 {
- zb0017--
- var zb0026 int
- var zb0027 bool
- zb0026, zb0027, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
- return
- }
- if zb0026 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0026), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
- return
- }
- if zb0027 {
- (*z).ApplicationArgs = nil
- } else if (*z).ApplicationArgs != nil && cap((*z).ApplicationArgs) >= zb0026 {
- (*z).ApplicationArgs = ((*z).ApplicationArgs)[:zb0026]
- } else {
- (*z).ApplicationArgs = make([]applicationArgs, zb0026)
- }
- for zb0002 := range (*z).ApplicationArgs {
- var zb0028 int
- var zb0029 bool
- zb0028, zb0029, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs", zb0002)
- return
- }
- if zb0028 > transactions.EncodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0028), uint64(transactions.EncodedMaxApplicationArgs))
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs", zb0002)
- return
- }
- if zb0029 {
- (*z).ApplicationArgs[zb0002] = nil
- } else if (*z).ApplicationArgs[zb0002] != nil && cap((*z).ApplicationArgs[zb0002]) >= zb0028 {
- (*z).ApplicationArgs[zb0002] = ((*z).ApplicationArgs[zb0002])[:zb0028]
- } else {
- (*z).ApplicationArgs[zb0002] = make(applicationArgs, zb0028)
- }
- for zb0003 := range (*z).ApplicationArgs[zb0002] {
- (*z).ApplicationArgs[zb0002][zb0003], bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationArgs[zb0002][zb0003])
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs", zb0002, zb0003)
- return
- }
- }
- }
- }
- if zb0017 > 0 {
- zb0017--
- {
- var zb0030 []byte
- var zb0031 int
- zb0031, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationArgs")
- return
- }
- if zb0031 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0031), uint64(maxBitmaskSize))
- return
- }
- zb0030, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskApplicationArgs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationArgs")
- return
- }
- (*z).BitmaskApplicationArgs = bitmask(zb0030)
- }
- }
- if zb0017 > 0 {
- zb0017--
- var zb0032 int
- var zb0033 bool
- zb0032, zb0033, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Accounts")
- return
- }
- if zb0032 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0032), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Accounts")
- return
- }
- if zb0033 {
- (*z).Accounts = nil
- } else if (*z).Accounts != nil && cap((*z).Accounts) >= zb0032 {
- (*z).Accounts = ((*z).Accounts)[:zb0032]
- } else {
- (*z).Accounts = make([]addresses, zb0032)
- }
- for zb0004 := range (*z).Accounts {
- var zb0034 int
- var zb0035 bool
- zb0034, zb0035, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Accounts", zb0004)
- return
- }
- if zb0034 > transactions.EncodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0034), uint64(transactions.EncodedMaxAccounts))
- err = msgp.WrapError(err, "struct-from-array", "Accounts", zb0004)
- return
- }
- if zb0035 {
- (*z).Accounts[zb0004] = nil
- } else if (*z).Accounts[zb0004] != nil && cap((*z).Accounts[zb0004]) >= zb0034 {
- (*z).Accounts[zb0004] = ((*z).Accounts[zb0004])[:zb0034]
- } else {
- (*z).Accounts[zb0004] = make(addresses, zb0034)
- }
- for zb0005 := range (*z).Accounts[zb0004] {
- bts, err = (*z).Accounts[zb0004][zb0005].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Accounts", zb0004, zb0005)
- return
- }
- }
- }
- }
- if zb0017 > 0 {
- zb0017--
- {
- var zb0036 []byte
- var zb0037 int
- zb0037, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAccounts")
- return
- }
- if zb0037 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0037), uint64(maxBitmaskSize))
- return
- }
- zb0036, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAccounts))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAccounts")
- return
- }
- (*z).BitmaskAccounts = bitmask(zb0036)
- }
- }
- if zb0017 > 0 {
- zb0017--
- var zb0038 int
- var zb0039 bool
- zb0038, zb0039, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
- return
- }
- if zb0038 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0038), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
- return
- }
- if zb0039 {
- (*z).ForeignApps = nil
- } else if (*z).ForeignApps != nil && cap((*z).ForeignApps) >= zb0038 {
- (*z).ForeignApps = ((*z).ForeignApps)[:zb0038]
- } else {
- (*z).ForeignApps = make([]appIndices, zb0038)
- }
- for zb0006 := range (*z).ForeignApps {
- var zb0040 int
- var zb0041 bool
- zb0040, zb0041, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps", zb0006)
- return
- }
- if zb0040 > transactions.EncodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0040), uint64(transactions.EncodedMaxForeignApps))
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps", zb0006)
- return
- }
- if zb0041 {
- (*z).ForeignApps[zb0006] = nil
- } else if (*z).ForeignApps[zb0006] != nil && cap((*z).ForeignApps[zb0006]) >= zb0040 {
- (*z).ForeignApps[zb0006] = ((*z).ForeignApps[zb0006])[:zb0040]
- } else {
- (*z).ForeignApps[zb0006] = make(appIndices, zb0040)
- }
- for zb0007 := range (*z).ForeignApps[zb0006] {
- bts, err = (*z).ForeignApps[zb0006][zb0007].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps", zb0006, zb0007)
- return
- }
- }
- }
- }
- if zb0017 > 0 {
- zb0017--
- {
- var zb0042 []byte
- var zb0043 int
- zb0043, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignApps")
- return
- }
- if zb0043 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0043), uint64(maxBitmaskSize))
- return
- }
- zb0042, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskForeignApps))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignApps")
- return
- }
- (*z).BitmaskForeignApps = bitmask(zb0042)
- }
- }
- if zb0017 > 0 {
- zb0017--
- var zb0044 int
- var zb0045 bool
- zb0044, zb0045, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
- return
- }
- if zb0044 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0044), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
- return
- }
- if zb0045 {
- (*z).ForeignAssets = nil
- } else if (*z).ForeignAssets != nil && cap((*z).ForeignAssets) >= zb0044 {
- (*z).ForeignAssets = ((*z).ForeignAssets)[:zb0044]
- } else {
- (*z).ForeignAssets = make([]assetIndices, zb0044)
- }
- for zb0008 := range (*z).ForeignAssets {
- var zb0046 int
- var zb0047 bool
- zb0046, zb0047, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0008)
- return
- }
- if zb0046 > transactions.EncodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0046), uint64(transactions.EncodedMaxForeignAssets))
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0008)
- return
- }
- if zb0047 {
- (*z).ForeignAssets[zb0008] = nil
- } else if (*z).ForeignAssets[zb0008] != nil && cap((*z).ForeignAssets[zb0008]) >= zb0046 {
- (*z).ForeignAssets[zb0008] = ((*z).ForeignAssets[zb0008])[:zb0046]
- } else {
- (*z).ForeignAssets[zb0008] = make(assetIndices, zb0046)
- }
- for zb0009 := range (*z).ForeignAssets[zb0008] {
- bts, err = (*z).ForeignAssets[zb0008][zb0009].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0008, zb0009)
- return
- }
- }
- }
- }
- if zb0017 > 0 {
- zb0017--
- {
- var zb0048 []byte
- var zb0049 int
- zb0049, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignAssets")
- return
- }
- if zb0049 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0049), uint64(maxBitmaskSize))
- return
- }
- zb0048, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskForeignAssets))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignAssets")
- return
- }
- (*z).BitmaskForeignAssets = bitmask(zb0048)
- }
- }
- if zb0017 > 0 {
- zb0017--
- var zb0050 int
- var zb0051 bool
- zb0050, zb0051, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumUint")
- return
- }
- if zb0050 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0050), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LocalNumUint")
- return
- }
- if zb0051 {
- (*z).LocalNumUint = nil
- } else if (*z).LocalNumUint != nil && cap((*z).LocalNumUint) >= zb0050 {
- (*z).LocalNumUint = ((*z).LocalNumUint)[:zb0050]
- } else {
- (*z).LocalNumUint = make([]uint64, zb0050)
- }
- for zb0010 := range (*z).LocalNumUint {
- (*z).LocalNumUint[zb0010], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumUint", zb0010)
- return
- }
- }
- }
- if zb0017 > 0 {
- zb0017--
- {
- var zb0052 []byte
- var zb0053 int
- zb0053, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumUint")
- return
- }
- if zb0053 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0053), uint64(maxBitmaskSize))
- return
- }
- zb0052, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskLocalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumUint")
- return
- }
- (*z).BitmaskLocalNumUint = bitmask(zb0052)
- }
- }
- if zb0017 > 0 {
- zb0017--
- var zb0054 int
- var zb0055 bool
- zb0054, zb0055, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumByteSlice")
- return
- }
- if zb0054 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0054), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LocalNumByteSlice")
- return
- }
- if zb0055 {
- (*z).LocalNumByteSlice = nil
- } else if (*z).LocalNumByteSlice != nil && cap((*z).LocalNumByteSlice) >= zb0054 {
- (*z).LocalNumByteSlice = ((*z).LocalNumByteSlice)[:zb0054]
- } else {
- (*z).LocalNumByteSlice = make([]uint64, zb0054)
- }
- for zb0011 := range (*z).LocalNumByteSlice {
- (*z).LocalNumByteSlice[zb0011], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumByteSlice", zb0011)
- return
- }
- }
- }
- if zb0017 > 0 {
- zb0017--
- {
- var zb0056 []byte
- var zb0057 int
- zb0057, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumByteSlice")
- return
- }
- if zb0057 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0057), uint64(maxBitmaskSize))
- return
- }
- zb0056, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskLocalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumByteSlice")
- return
- }
- (*z).BitmaskLocalNumByteSlice = bitmask(zb0056)
- }
- }
- if zb0017 > 0 {
- zb0017--
- var zb0058 int
- var zb0059 bool
- zb0058, zb0059, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumUint")
- return
- }
- if zb0058 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0058), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumUint")
- return
- }
- if zb0059 {
- (*z).GlobalNumUint = nil
- } else if (*z).GlobalNumUint != nil && cap((*z).GlobalNumUint) >= zb0058 {
- (*z).GlobalNumUint = ((*z).GlobalNumUint)[:zb0058]
- } else {
- (*z).GlobalNumUint = make([]uint64, zb0058)
- }
- for zb0012 := range (*z).GlobalNumUint {
- (*z).GlobalNumUint[zb0012], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumUint", zb0012)
- return
- }
- }
- }
- if zb0017 > 0 {
- zb0017--
- {
- var zb0060 []byte
- var zb0061 int
- zb0061, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumUint")
- return
- }
- if zb0061 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0061), uint64(maxBitmaskSize))
- return
- }
- zb0060, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskGlobalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumUint")
- return
- }
- (*z).BitmaskGlobalNumUint = bitmask(zb0060)
- }
- }
- if zb0017 > 0 {
- zb0017--
- var zb0062 int
- var zb0063 bool
- zb0062, zb0063, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumByteSlice")
- return
- }
- if zb0062 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0062), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumByteSlice")
- return
- }
- if zb0063 {
- (*z).GlobalNumByteSlice = nil
- } else if (*z).GlobalNumByteSlice != nil && cap((*z).GlobalNumByteSlice) >= zb0062 {
- (*z).GlobalNumByteSlice = ((*z).GlobalNumByteSlice)[:zb0062]
- } else {
- (*z).GlobalNumByteSlice = make([]uint64, zb0062)
- }
- for zb0013 := range (*z).GlobalNumByteSlice {
- (*z).GlobalNumByteSlice[zb0013], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumByteSlice", zb0013)
- return
- }
- }
- }
- if zb0017 > 0 {
- zb0017--
- {
- var zb0064 []byte
- var zb0065 int
- zb0065, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumByteSlice")
- return
- }
- if zb0065 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0065), uint64(maxBitmaskSize))
- return
- }
- zb0064, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskGlobalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumByteSlice")
- return
- }
- (*z).BitmaskGlobalNumByteSlice = bitmask(zb0064)
- }
- }
- if zb0017 > 0 {
- zb0017--
- var zb0066 int
- var zb0067 bool
- zb0066, zb0067, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram")
- return
- }
- if zb0066 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0066), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram")
- return
- }
- if zb0067 {
- (*z).ApprovalProgram = nil
- } else if (*z).ApprovalProgram != nil && cap((*z).ApprovalProgram) >= zb0066 {
- (*z).ApprovalProgram = ((*z).ApprovalProgram)[:zb0066]
- } else {
- (*z).ApprovalProgram = make([]program, zb0066)
- }
- for zb0014 := range (*z).ApprovalProgram {
- {
- var zb0068 []byte
- var zb0069 int
- zb0069, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram", zb0014)
- return
- }
- if zb0069 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0069), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0068, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).ApprovalProgram[zb0014]))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram", zb0014)
- return
- }
- (*z).ApprovalProgram[zb0014] = program(zb0068)
- }
- }
- }
- if zb0017 > 0 {
- zb0017--
- {
- var zb0070 []byte
- var zb0071 int
- zb0071, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApprovalProgram")
- return
- }
- if zb0071 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0071), uint64(maxBitmaskSize))
- return
- }
- zb0070, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskApprovalProgram))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApprovalProgram")
- return
- }
- (*z).BitmaskApprovalProgram = bitmask(zb0070)
- }
- }
- if zb0017 > 0 {
- zb0017--
- var zb0072 int
- var zb0073 bool
- zb0072, zb0073, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram")
- return
- }
- if zb0072 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0072), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram")
- return
- }
- if zb0073 {
- (*z).ClearStateProgram = nil
- } else if (*z).ClearStateProgram != nil && cap((*z).ClearStateProgram) >= zb0072 {
- (*z).ClearStateProgram = ((*z).ClearStateProgram)[:zb0072]
- } else {
- (*z).ClearStateProgram = make([]program, zb0072)
- }
- for zb0015 := range (*z).ClearStateProgram {
- {
- var zb0074 []byte
- var zb0075 int
- zb0075, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram", zb0015)
- return
- }
- if zb0075 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0075), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0074, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).ClearStateProgram[zb0015]))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram", zb0015)
- return
- }
- (*z).ClearStateProgram[zb0015] = program(zb0074)
- }
- }
- }
- if zb0017 > 0 {
- zb0017--
- {
- var zb0076 []byte
- var zb0077 int
- zb0077, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClearStateProgram")
- return
- }
- if zb0077 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0077), uint64(maxBitmaskSize))
- return
- }
- zb0076, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskClearStateProgram))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClearStateProgram")
- return
- }
- (*z).BitmaskClearStateProgram = bitmask(zb0076)
- }
- }
- if zb0017 > 0 {
- zb0017--
- var zb0078 int
- var zb0079 bool
- zb0078, zb0079, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages")
- return
- }
- if zb0078 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0078), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages")
- return
- }
- if zb0079 {
- (*z).ExtraProgramPages = nil
- } else if (*z).ExtraProgramPages != nil && cap((*z).ExtraProgramPages) >= zb0078 {
- (*z).ExtraProgramPages = ((*z).ExtraProgramPages)[:zb0078]
- } else {
- (*z).ExtraProgramPages = make([]uint32, zb0078)
- }
- for zb0016 := range (*z).ExtraProgramPages {
- (*z).ExtraProgramPages[zb0016], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages", zb0016)
- return
- }
- }
- }
- if zb0017 > 0 {
- zb0017--
- {
- var zb0080 []byte
- var zb0081 int
- zb0081, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskExtraProgramPages")
- return
- }
- if zb0081 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0081), uint64(maxBitmaskSize))
- return
- }
- zb0080, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskExtraProgramPages))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskExtraProgramPages")
- return
- }
- (*z).BitmaskExtraProgramPages = bitmask(zb0080)
- }
- }
- if zb0017 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0017)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0018 {
- (*z) = encodedApplicationCallTxnFields{}
- }
- for zb0017 > 0 {
- zb0017--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "apid":
- var zb0082 int
- var zb0083 bool
- zb0082, zb0083, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationID")
- return
- }
- if zb0082 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0082), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ApplicationID")
- return
- }
- if zb0083 {
- (*z).ApplicationID = nil
- } else if (*z).ApplicationID != nil && cap((*z).ApplicationID) >= zb0082 {
- (*z).ApplicationID = ((*z).ApplicationID)[:zb0082]
- } else {
- (*z).ApplicationID = make([]basics.AppIndex, zb0082)
- }
- for zb0001 := range (*z).ApplicationID {
- bts, err = (*z).ApplicationID[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationID", zb0001)
- return
- }
- }
- case "apidbm":
- {
- var zb0084 []byte
- var zb0085 int
- zb0085, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationID")
- return
- }
- if zb0085 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0085), uint64(maxBitmaskSize))
- return
- }
- zb0084, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskApplicationID))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationID")
- return
- }
- (*z).BitmaskApplicationID = bitmask(zb0084)
- }
- case "apan":
- var zb0086 int
- zb0086, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "OnCompletion")
- return
- }
- if zb0086 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0086), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).OnCompletion, bts, err = msgp.ReadBytesBytes(bts, (*z).OnCompletion)
- if err != nil {
- err = msgp.WrapError(err, "OnCompletion")
- return
- }
- case "apanbm":
- {
- var zb0087 []byte
- var zb0088 int
- zb0088, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskOnCompletion")
- return
- }
- if zb0088 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0088), uint64(maxBitmaskSize))
- return
- }
- zb0087, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskOnCompletion))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskOnCompletion")
- return
- }
- (*z).BitmaskOnCompletion = bitmask(zb0087)
- }
- case "apaa":
- var zb0089 int
- var zb0090 bool
- zb0089, zb0090, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationArgs")
- return
- }
- if zb0089 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0089), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ApplicationArgs")
- return
- }
- if zb0090 {
- (*z).ApplicationArgs = nil
- } else if (*z).ApplicationArgs != nil && cap((*z).ApplicationArgs) >= zb0089 {
- (*z).ApplicationArgs = ((*z).ApplicationArgs)[:zb0089]
- } else {
- (*z).ApplicationArgs = make([]applicationArgs, zb0089)
- }
- for zb0002 := range (*z).ApplicationArgs {
- var zb0091 int
- var zb0092 bool
- zb0091, zb0092, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationArgs", zb0002)
- return
- }
- if zb0091 > transactions.EncodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0091), uint64(transactions.EncodedMaxApplicationArgs))
- err = msgp.WrapError(err, "ApplicationArgs", zb0002)
- return
- }
- if zb0092 {
- (*z).ApplicationArgs[zb0002] = nil
- } else if (*z).ApplicationArgs[zb0002] != nil && cap((*z).ApplicationArgs[zb0002]) >= zb0091 {
- (*z).ApplicationArgs[zb0002] = ((*z).ApplicationArgs[zb0002])[:zb0091]
- } else {
- (*z).ApplicationArgs[zb0002] = make(applicationArgs, zb0091)
- }
- for zb0003 := range (*z).ApplicationArgs[zb0002] {
- (*z).ApplicationArgs[zb0002][zb0003], bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationArgs[zb0002][zb0003])
- if err != nil {
- err = msgp.WrapError(err, "ApplicationArgs", zb0002, zb0003)
- return
- }
- }
- }
- case "apaabm":
- {
- var zb0093 []byte
- var zb0094 int
- zb0094, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationArgs")
- return
- }
- if zb0094 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0094), uint64(maxBitmaskSize))
- return
- }
- zb0093, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskApplicationArgs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationArgs")
- return
- }
- (*z).BitmaskApplicationArgs = bitmask(zb0093)
- }
- case "apat":
- var zb0095 int
- var zb0096 bool
- zb0095, zb0096, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Accounts")
- return
- }
- if zb0095 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0095), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Accounts")
- return
- }
- if zb0096 {
- (*z).Accounts = nil
- } else if (*z).Accounts != nil && cap((*z).Accounts) >= zb0095 {
- (*z).Accounts = ((*z).Accounts)[:zb0095]
- } else {
- (*z).Accounts = make([]addresses, zb0095)
- }
- for zb0004 := range (*z).Accounts {
- var zb0097 int
- var zb0098 bool
- zb0097, zb0098, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Accounts", zb0004)
- return
- }
- if zb0097 > transactions.EncodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0097), uint64(transactions.EncodedMaxAccounts))
- err = msgp.WrapError(err, "Accounts", zb0004)
- return
- }
- if zb0098 {
- (*z).Accounts[zb0004] = nil
- } else if (*z).Accounts[zb0004] != nil && cap((*z).Accounts[zb0004]) >= zb0097 {
- (*z).Accounts[zb0004] = ((*z).Accounts[zb0004])[:zb0097]
- } else {
- (*z).Accounts[zb0004] = make(addresses, zb0097)
- }
- for zb0005 := range (*z).Accounts[zb0004] {
- bts, err = (*z).Accounts[zb0004][zb0005].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Accounts", zb0004, zb0005)
- return
- }
- }
- }
- case "apatbm":
- {
- var zb0099 []byte
- var zb0100 int
- zb0100, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAccounts")
- return
- }
- if zb0100 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0100), uint64(maxBitmaskSize))
- return
- }
- zb0099, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAccounts))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAccounts")
- return
- }
- (*z).BitmaskAccounts = bitmask(zb0099)
- }
- case "apfa":
- var zb0101 int
- var zb0102 bool
- zb0101, zb0102, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignApps")
- return
- }
- if zb0101 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0101), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ForeignApps")
- return
- }
- if zb0102 {
- (*z).ForeignApps = nil
- } else if (*z).ForeignApps != nil && cap((*z).ForeignApps) >= zb0101 {
- (*z).ForeignApps = ((*z).ForeignApps)[:zb0101]
- } else {
- (*z).ForeignApps = make([]appIndices, zb0101)
- }
- for zb0006 := range (*z).ForeignApps {
- var zb0103 int
- var zb0104 bool
- zb0103, zb0104, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignApps", zb0006)
- return
- }
- if zb0103 > transactions.EncodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0103), uint64(transactions.EncodedMaxForeignApps))
- err = msgp.WrapError(err, "ForeignApps", zb0006)
- return
- }
- if zb0104 {
- (*z).ForeignApps[zb0006] = nil
- } else if (*z).ForeignApps[zb0006] != nil && cap((*z).ForeignApps[zb0006]) >= zb0103 {
- (*z).ForeignApps[zb0006] = ((*z).ForeignApps[zb0006])[:zb0103]
- } else {
- (*z).ForeignApps[zb0006] = make(appIndices, zb0103)
- }
- for zb0007 := range (*z).ForeignApps[zb0006] {
- bts, err = (*z).ForeignApps[zb0006][zb0007].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignApps", zb0006, zb0007)
- return
- }
- }
- }
- case "apfabm":
- {
- var zb0105 []byte
- var zb0106 int
- zb0106, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignApps")
- return
- }
- if zb0106 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0106), uint64(maxBitmaskSize))
- return
- }
- zb0105, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskForeignApps))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignApps")
- return
- }
- (*z).BitmaskForeignApps = bitmask(zb0105)
- }
- case "apas":
- var zb0107 int
- var zb0108 bool
- zb0107, zb0108, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignAssets")
- return
- }
- if zb0107 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0107), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ForeignAssets")
- return
- }
- if zb0108 {
- (*z).ForeignAssets = nil
- } else if (*z).ForeignAssets != nil && cap((*z).ForeignAssets) >= zb0107 {
- (*z).ForeignAssets = ((*z).ForeignAssets)[:zb0107]
- } else {
- (*z).ForeignAssets = make([]assetIndices, zb0107)
- }
- for zb0008 := range (*z).ForeignAssets {
- var zb0109 int
- var zb0110 bool
- zb0109, zb0110, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignAssets", zb0008)
- return
- }
- if zb0109 > transactions.EncodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0109), uint64(transactions.EncodedMaxForeignAssets))
- err = msgp.WrapError(err, "ForeignAssets", zb0008)
- return
- }
- if zb0110 {
- (*z).ForeignAssets[zb0008] = nil
- } else if (*z).ForeignAssets[zb0008] != nil && cap((*z).ForeignAssets[zb0008]) >= zb0109 {
- (*z).ForeignAssets[zb0008] = ((*z).ForeignAssets[zb0008])[:zb0109]
- } else {
- (*z).ForeignAssets[zb0008] = make(assetIndices, zb0109)
- }
- for zb0009 := range (*z).ForeignAssets[zb0008] {
- bts, err = (*z).ForeignAssets[zb0008][zb0009].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignAssets", zb0008, zb0009)
- return
- }
- }
- }
- case "apasbm":
- {
- var zb0111 []byte
- var zb0112 int
- zb0112, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignAssets")
- return
- }
- if zb0112 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0112), uint64(maxBitmaskSize))
- return
- }
- zb0111, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskForeignAssets))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignAssets")
- return
- }
- (*z).BitmaskForeignAssets = bitmask(zb0111)
- }
- case "lnui":
- var zb0113 int
- var zb0114 bool
- zb0113, zb0114, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumUint")
- return
- }
- if zb0113 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0113), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LocalNumUint")
- return
- }
- if zb0114 {
- (*z).LocalNumUint = nil
- } else if (*z).LocalNumUint != nil && cap((*z).LocalNumUint) >= zb0113 {
- (*z).LocalNumUint = ((*z).LocalNumUint)[:zb0113]
- } else {
- (*z).LocalNumUint = make([]uint64, zb0113)
- }
- for zb0010 := range (*z).LocalNumUint {
- (*z).LocalNumUint[zb0010], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumUint", zb0010)
- return
- }
- }
- case "lnuibm":
- {
- var zb0115 []byte
- var zb0116 int
- zb0116, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumUint")
- return
- }
- if zb0116 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0116), uint64(maxBitmaskSize))
- return
- }
- zb0115, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskLocalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumUint")
- return
- }
- (*z).BitmaskLocalNumUint = bitmask(zb0115)
- }
- case "lnbs":
- var zb0117 int
- var zb0118 bool
- zb0117, zb0118, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumByteSlice")
- return
- }
- if zb0117 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0117), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LocalNumByteSlice")
- return
- }
- if zb0118 {
- (*z).LocalNumByteSlice = nil
- } else if (*z).LocalNumByteSlice != nil && cap((*z).LocalNumByteSlice) >= zb0117 {
- (*z).LocalNumByteSlice = ((*z).LocalNumByteSlice)[:zb0117]
- } else {
- (*z).LocalNumByteSlice = make([]uint64, zb0117)
- }
- for zb0011 := range (*z).LocalNumByteSlice {
- (*z).LocalNumByteSlice[zb0011], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumByteSlice", zb0011)
- return
- }
- }
- case "lnbsbm":
- {
- var zb0119 []byte
- var zb0120 int
- zb0120, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumByteSlice")
- return
- }
- if zb0120 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0120), uint64(maxBitmaskSize))
- return
- }
- zb0119, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskLocalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumByteSlice")
- return
- }
- (*z).BitmaskLocalNumByteSlice = bitmask(zb0119)
- }
- case "gnui":
- var zb0121 int
- var zb0122 bool
- zb0121, zb0122, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumUint")
- return
- }
- if zb0121 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0121), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "GlobalNumUint")
- return
- }
- if zb0122 {
- (*z).GlobalNumUint = nil
- } else if (*z).GlobalNumUint != nil && cap((*z).GlobalNumUint) >= zb0121 {
- (*z).GlobalNumUint = ((*z).GlobalNumUint)[:zb0121]
- } else {
- (*z).GlobalNumUint = make([]uint64, zb0121)
- }
- for zb0012 := range (*z).GlobalNumUint {
- (*z).GlobalNumUint[zb0012], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumUint", zb0012)
- return
- }
- }
- case "gnuibm":
- {
- var zb0123 []byte
- var zb0124 int
- zb0124, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumUint")
- return
- }
- if zb0124 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0124), uint64(maxBitmaskSize))
- return
- }
- zb0123, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskGlobalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumUint")
- return
- }
- (*z).BitmaskGlobalNumUint = bitmask(zb0123)
- }
- case "gnbs":
- var zb0125 int
- var zb0126 bool
- zb0125, zb0126, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumByteSlice")
- return
- }
- if zb0125 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0125), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "GlobalNumByteSlice")
- return
- }
- if zb0126 {
- (*z).GlobalNumByteSlice = nil
- } else if (*z).GlobalNumByteSlice != nil && cap((*z).GlobalNumByteSlice) >= zb0125 {
- (*z).GlobalNumByteSlice = ((*z).GlobalNumByteSlice)[:zb0125]
- } else {
- (*z).GlobalNumByteSlice = make([]uint64, zb0125)
- }
- for zb0013 := range (*z).GlobalNumByteSlice {
- (*z).GlobalNumByteSlice[zb0013], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumByteSlice", zb0013)
- return
- }
- }
- case "gnbsbm":
- {
- var zb0127 []byte
- var zb0128 int
- zb0128, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumByteSlice")
- return
- }
- if zb0128 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0128), uint64(maxBitmaskSize))
- return
- }
- zb0127, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskGlobalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumByteSlice")
- return
- }
- (*z).BitmaskGlobalNumByteSlice = bitmask(zb0127)
- }
- case "apap":
- var zb0129 int
- var zb0130 bool
- zb0129, zb0130, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApprovalProgram")
- return
- }
- if zb0129 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0129), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ApprovalProgram")
- return
- }
- if zb0130 {
- (*z).ApprovalProgram = nil
- } else if (*z).ApprovalProgram != nil && cap((*z).ApprovalProgram) >= zb0129 {
- (*z).ApprovalProgram = ((*z).ApprovalProgram)[:zb0129]
- } else {
- (*z).ApprovalProgram = make([]program, zb0129)
- }
- for zb0014 := range (*z).ApprovalProgram {
- {
- var zb0131 []byte
- var zb0132 int
- zb0132, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApprovalProgram", zb0014)
- return
- }
- if zb0132 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0132), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0131, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).ApprovalProgram[zb0014]))
- if err != nil {
- err = msgp.WrapError(err, "ApprovalProgram", zb0014)
- return
- }
- (*z).ApprovalProgram[zb0014] = program(zb0131)
- }
- }
- case "apapbm":
- {
- var zb0133 []byte
- var zb0134 int
- zb0134, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApprovalProgram")
- return
- }
- if zb0134 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0134), uint64(maxBitmaskSize))
- return
- }
- zb0133, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskApprovalProgram))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApprovalProgram")
- return
- }
- (*z).BitmaskApprovalProgram = bitmask(zb0133)
- }
- case "apsu":
- var zb0135 int
- var zb0136 bool
- zb0135, zb0136, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ClearStateProgram")
- return
- }
- if zb0135 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0135), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ClearStateProgram")
- return
- }
- if zb0136 {
- (*z).ClearStateProgram = nil
- } else if (*z).ClearStateProgram != nil && cap((*z).ClearStateProgram) >= zb0135 {
- (*z).ClearStateProgram = ((*z).ClearStateProgram)[:zb0135]
- } else {
- (*z).ClearStateProgram = make([]program, zb0135)
- }
- for zb0015 := range (*z).ClearStateProgram {
- {
- var zb0137 []byte
- var zb0138 int
- zb0138, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "ClearStateProgram", zb0015)
- return
- }
- if zb0138 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0138), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0137, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).ClearStateProgram[zb0015]))
- if err != nil {
- err = msgp.WrapError(err, "ClearStateProgram", zb0015)
- return
- }
- (*z).ClearStateProgram[zb0015] = program(zb0137)
- }
- }
- case "apsubm":
- {
- var zb0139 []byte
- var zb0140 int
- zb0140, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClearStateProgram")
- return
- }
- if zb0140 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0140), uint64(maxBitmaskSize))
- return
- }
- zb0139, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskClearStateProgram))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClearStateProgram")
- return
- }
- (*z).BitmaskClearStateProgram = bitmask(zb0139)
- }
- case "apep":
- var zb0141 int
- var zb0142 bool
- zb0141, zb0142, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ExtraProgramPages")
- return
- }
- if zb0141 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0141), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ExtraProgramPages")
- return
- }
- if zb0142 {
- (*z).ExtraProgramPages = nil
- } else if (*z).ExtraProgramPages != nil && cap((*z).ExtraProgramPages) >= zb0141 {
- (*z).ExtraProgramPages = ((*z).ExtraProgramPages)[:zb0141]
- } else {
- (*z).ExtraProgramPages = make([]uint32, zb0141)
- }
- for zb0016 := range (*z).ExtraProgramPages {
- (*z).ExtraProgramPages[zb0016], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ExtraProgramPages", zb0016)
- return
- }
- }
- case "apepbm":
- {
- var zb0143 []byte
- var zb0144 int
- zb0144, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskExtraProgramPages")
- return
- }
- if zb0144 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0144), uint64(maxBitmaskSize))
- return
- }
- zb0143, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskExtraProgramPages))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskExtraProgramPages")
- return
- }
- (*z).BitmaskExtraProgramPages = bitmask(zb0143)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedApplicationCallTxnFields) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedApplicationCallTxnFields)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedApplicationCallTxnFields) Msgsize() (s int) {
- s = 3 + 5 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).ApplicationID {
- s += (*z).ApplicationID[zb0001].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskApplicationID)) + 5 + msgp.BytesPrefixSize + len((*z).OnCompletion) + 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskOnCompletion)) + 5 + msgp.ArrayHeaderSize
- for zb0002 := range (*z).ApplicationArgs {
- s += msgp.ArrayHeaderSize
- for zb0003 := range (*z).ApplicationArgs[zb0002] {
- s += msgp.BytesPrefixSize + len((*z).ApplicationArgs[zb0002][zb0003])
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskApplicationArgs)) + 5 + msgp.ArrayHeaderSize
- for zb0004 := range (*z).Accounts {
- s += msgp.ArrayHeaderSize
- for zb0005 := range (*z).Accounts[zb0004] {
- s += (*z).Accounts[zb0004][zb0005].Msgsize()
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskAccounts)) + 5 + msgp.ArrayHeaderSize
- for zb0006 := range (*z).ForeignApps {
- s += msgp.ArrayHeaderSize
- for zb0007 := range (*z).ForeignApps[zb0006] {
- s += (*z).ForeignApps[zb0006][zb0007].Msgsize()
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskForeignApps)) + 5 + msgp.ArrayHeaderSize
- for zb0008 := range (*z).ForeignAssets {
- s += msgp.ArrayHeaderSize
- for zb0009 := range (*z).ForeignAssets[zb0008] {
- s += (*z).ForeignAssets[zb0008][zb0009].Msgsize()
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskForeignAssets)) + 5 + msgp.ArrayHeaderSize + (len((*z).LocalNumUint) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskLocalNumUint)) + 5 + msgp.ArrayHeaderSize + (len((*z).LocalNumByteSlice) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskLocalNumByteSlice)) + 5 + msgp.ArrayHeaderSize + (len((*z).GlobalNumUint) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskGlobalNumUint)) + 5 + msgp.ArrayHeaderSize + (len((*z).GlobalNumByteSlice) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskGlobalNumByteSlice)) + 5 + msgp.ArrayHeaderSize
- for zb0014 := range (*z).ApprovalProgram {
- s += msgp.BytesPrefixSize + len([]byte((*z).ApprovalProgram[zb0014]))
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskApprovalProgram)) + 5 + msgp.ArrayHeaderSize
- for zb0015 := range (*z).ClearStateProgram {
- s += msgp.BytesPrefixSize + len([]byte((*z).ClearStateProgram[zb0015]))
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskClearStateProgram)) + 5 + msgp.ArrayHeaderSize + (len((*z).ExtraProgramPages) * (msgp.Uint32Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskExtraProgramPages))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedApplicationCallTxnFields) MsgIsZero() bool {
- return (len((*z).ApplicationID) == 0) && (len((*z).BitmaskApplicationID) == 0) && (len((*z).OnCompletion) == 0) && (len((*z).BitmaskOnCompletion) == 0) && (len((*z).ApplicationArgs) == 0) && (len((*z).BitmaskApplicationArgs) == 0) && (len((*z).Accounts) == 0) && (len((*z).BitmaskAccounts) == 0) && (len((*z).ForeignApps) == 0) && (len((*z).BitmaskForeignApps) == 0) && (len((*z).ForeignAssets) == 0) && (len((*z).BitmaskForeignAssets) == 0) && (len((*z).LocalNumUint) == 0) && (len((*z).BitmaskLocalNumUint) == 0) && (len((*z).LocalNumByteSlice) == 0) && (len((*z).BitmaskLocalNumByteSlice) == 0) && (len((*z).GlobalNumUint) == 0) && (len((*z).BitmaskGlobalNumUint) == 0) && (len((*z).GlobalNumByteSlice) == 0) && (len((*z).BitmaskGlobalNumByteSlice) == 0) && (len((*z).ApprovalProgram) == 0) && (len((*z).BitmaskApprovalProgram) == 0) && (len((*z).ClearStateProgram) == 0) && (len((*z).BitmaskClearStateProgram) == 0) && (len((*z).ExtraProgramPages) == 0) && (len((*z).BitmaskExtraProgramPages) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedAssetConfigTxnFields) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0007Len := uint32(23)
- var zb0007Mask uint32 /* 25 bits */
- if len((*z).encodedAssetParams.MetadataHash) == 0 {
- zb0007Len--
- zb0007Mask |= 0x4
- }
- if len((*z).encodedAssetParams.BitmaskMetadataHash) == 0 {
- zb0007Len--
- zb0007Mask |= 0x8
- }
- if len((*z).encodedAssetParams.AssetName) == 0 {
- zb0007Len--
- zb0007Mask |= 0x10
- }
- if len((*z).encodedAssetParams.BitmaskAssetName) == 0 {
- zb0007Len--
- zb0007Mask |= 0x20
- }
- if len((*z).encodedAssetParams.URL) == 0 {
- zb0007Len--
- zb0007Mask |= 0x40
- }
- if len((*z).encodedAssetParams.BitmaskURL) == 0 {
- zb0007Len--
- zb0007Mask |= 0x80
- }
- if len((*z).encodedAssetParams.Clawback) == 0 {
- zb0007Len--
- zb0007Mask |= 0x100
- }
- if len((*z).ConfigAsset) == 0 {
- zb0007Len--
- zb0007Mask |= 0x200
- }
- if len((*z).BitmaskConfigAsset) == 0 {
- zb0007Len--
- zb0007Mask |= 0x400
- }
- if len((*z).encodedAssetParams.BitmaskClawback) == 0 {
- zb0007Len--
- zb0007Mask |= 0x800
- }
- if len((*z).encodedAssetParams.Decimals) == 0 {
- zb0007Len--
- zb0007Mask |= 0x1000
- }
- if len((*z).encodedAssetParams.BitmaskDecimals) == 0 {
- zb0007Len--
- zb0007Mask |= 0x2000
- }
- if len((*z).encodedAssetParams.BitmaskDefaultFrozen) == 0 {
- zb0007Len--
- zb0007Mask |= 0x4000
- }
- if len((*z).encodedAssetParams.Freeze) == 0 {
- zb0007Len--
- zb0007Mask |= 0x8000
- }
- if len((*z).encodedAssetParams.BitmaskFreeze) == 0 {
- zb0007Len--
- zb0007Mask |= 0x10000
- }
- if len((*z).encodedAssetParams.Manager) == 0 {
- zb0007Len--
- zb0007Mask |= 0x20000
- }
- if len((*z).encodedAssetParams.BitmaskManager) == 0 {
- zb0007Len--
- zb0007Mask |= 0x40000
- }
- if len((*z).encodedAssetParams.Reserve) == 0 {
- zb0007Len--
- zb0007Mask |= 0x80000
- }
- if len((*z).encodedAssetParams.BitmaskReserve) == 0 {
- zb0007Len--
- zb0007Mask |= 0x100000
- }
- if len((*z).encodedAssetParams.Total) == 0 {
- zb0007Len--
- zb0007Mask |= 0x200000
- }
- if len((*z).encodedAssetParams.BitmaskTotal) == 0 {
- zb0007Len--
- zb0007Mask |= 0x400000
- }
- if len((*z).encodedAssetParams.UnitName) == 0 {
- zb0007Len--
- zb0007Mask |= 0x800000
- }
- if len((*z).encodedAssetParams.BitmaskUnitName) == 0 {
- zb0007Len--
- zb0007Mask |= 0x1000000
- }
- // variable map header, size zb0007Len
- o = msgp.AppendMapHeader(o, zb0007Len)
- if zb0007Len != 0 {
- if (zb0007Mask & 0x4) == 0 { // if not empty
- // string "am"
- o = append(o, 0xa2, 0x61, 0x6d)
- o = msgp.AppendBytes(o, (*z).encodedAssetParams.MetadataHash)
- }
- if (zb0007Mask & 0x8) == 0 { // if not empty
- // string "ambm"
- o = append(o, 0xa4, 0x61, 0x6d, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetParams.BitmaskMetadataHash))
- }
- if (zb0007Mask & 0x10) == 0 { // if not empty
- // string "an"
- o = append(o, 0xa2, 0x61, 0x6e)
- if (*z).encodedAssetParams.AssetName == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedAssetParams.AssetName)))
- }
- for zb0005 := range (*z).encodedAssetParams.AssetName {
- o = msgp.AppendString(o, (*z).encodedAssetParams.AssetName[zb0005])
- }
- }
- if (zb0007Mask & 0x20) == 0 { // if not empty
- // string "anbm"
- o = append(o, 0xa4, 0x61, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetParams.BitmaskAssetName))
- }
- if (zb0007Mask & 0x40) == 0 { // if not empty
- // string "au"
- o = append(o, 0xa2, 0x61, 0x75)
- if (*z).encodedAssetParams.URL == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedAssetParams.URL)))
- }
- for zb0006 := range (*z).encodedAssetParams.URL {
- o = msgp.AppendString(o, (*z).encodedAssetParams.URL[zb0006])
- }
- }
- if (zb0007Mask & 0x80) == 0 { // if not empty
- // string "aubm"
- o = append(o, 0xa4, 0x61, 0x75, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetParams.BitmaskURL))
- }
- if (zb0007Mask & 0x100) == 0 { // if not empty
- // string "c"
- o = append(o, 0xa1, 0x63)
- o = msgp.AppendBytes(o, (*z).encodedAssetParams.Clawback)
- }
- if (zb0007Mask & 0x200) == 0 { // if not empty
- // string "caid"
- o = append(o, 0xa4, 0x63, 0x61, 0x69, 0x64)
- if (*z).ConfigAsset == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).ConfigAsset)))
- }
- for zb0001 := range (*z).ConfigAsset {
- o = (*z).ConfigAsset[zb0001].MarshalMsg(o)
- }
- }
- if (zb0007Mask & 0x400) == 0 { // if not empty
- // string "caidbm"
- o = append(o, 0xa6, 0x63, 0x61, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskConfigAsset))
- }
- if (zb0007Mask & 0x800) == 0 { // if not empty
- // string "cbm"
- o = append(o, 0xa3, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetParams.BitmaskClawback))
- }
- if (zb0007Mask & 0x1000) == 0 { // if not empty
- // string "dc"
- o = append(o, 0xa2, 0x64, 0x63)
- if (*z).encodedAssetParams.Decimals == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedAssetParams.Decimals)))
- }
- for zb0003 := range (*z).encodedAssetParams.Decimals {
- o = msgp.AppendUint32(o, (*z).encodedAssetParams.Decimals[zb0003])
- }
- }
- if (zb0007Mask & 0x2000) == 0 { // if not empty
- // string "dcbm"
- o = append(o, 0xa4, 0x64, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetParams.BitmaskDecimals))
- }
- if (zb0007Mask & 0x4000) == 0 { // if not empty
- // string "dfbm"
- o = append(o, 0xa4, 0x64, 0x66, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetParams.BitmaskDefaultFrozen))
- }
- if (zb0007Mask & 0x8000) == 0 { // if not empty
- // string "f"
- o = append(o, 0xa1, 0x66)
- o = msgp.AppendBytes(o, (*z).encodedAssetParams.Freeze)
- }
- if (zb0007Mask & 0x10000) == 0 { // if not empty
- // string "fbm"
- o = append(o, 0xa3, 0x66, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetParams.BitmaskFreeze))
- }
- if (zb0007Mask & 0x20000) == 0 { // if not empty
- // string "m"
- o = append(o, 0xa1, 0x6d)
- o = msgp.AppendBytes(o, (*z).encodedAssetParams.Manager)
- }
- if (zb0007Mask & 0x40000) == 0 { // if not empty
- // string "mbm"
- o = append(o, 0xa3, 0x6d, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetParams.BitmaskManager))
- }
- if (zb0007Mask & 0x80000) == 0 { // if not empty
- // string "r"
- o = append(o, 0xa1, 0x72)
- o = msgp.AppendBytes(o, (*z).encodedAssetParams.Reserve)
- }
- if (zb0007Mask & 0x100000) == 0 { // if not empty
- // string "rbm"
- o = append(o, 0xa3, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetParams.BitmaskReserve))
- }
- if (zb0007Mask & 0x200000) == 0 { // if not empty
- // string "t"
- o = append(o, 0xa1, 0x74)
- if (*z).encodedAssetParams.Total == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedAssetParams.Total)))
- }
- for zb0002 := range (*z).encodedAssetParams.Total {
- o = msgp.AppendUint64(o, (*z).encodedAssetParams.Total[zb0002])
- }
- }
- if (zb0007Mask & 0x400000) == 0 { // if not empty
- // string "tbm"
- o = append(o, 0xa3, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetParams.BitmaskTotal))
- }
- if (zb0007Mask & 0x800000) == 0 { // if not empty
- // string "un"
- o = append(o, 0xa2, 0x75, 0x6e)
- if (*z).encodedAssetParams.UnitName == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedAssetParams.UnitName)))
- }
- for zb0004 := range (*z).encodedAssetParams.UnitName {
- o = msgp.AppendString(o, (*z).encodedAssetParams.UnitName[zb0004])
- }
- }
- if (zb0007Mask & 0x1000000) == 0 { // if not empty
- // string "unbm"
- o = append(o, 0xa4, 0x75, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetParams.BitmaskUnitName))
- }
- }
- return
-}
-
-func (_ *encodedAssetConfigTxnFields) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedAssetConfigTxnFields)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedAssetConfigTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0007 int
- var zb0008 bool
- zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0007 > 0 {
- zb0007--
- var zb0009 int
- var zb0010 bool
- zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ConfigAsset")
- return
- }
- if zb0009 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0009), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ConfigAsset")
- return
- }
- if zb0010 {
- (*z).ConfigAsset = nil
- } else if (*z).ConfigAsset != nil && cap((*z).ConfigAsset) >= zb0009 {
- (*z).ConfigAsset = ((*z).ConfigAsset)[:zb0009]
- } else {
- (*z).ConfigAsset = make([]basics.AssetIndex, zb0009)
- }
- for zb0001 := range (*z).ConfigAsset {
- bts, err = (*z).ConfigAsset[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ConfigAsset", zb0001)
- return
- }
- }
- }
- if zb0007 > 0 {
- zb0007--
- {
- var zb0011 []byte
- var zb0012 int
- zb0012, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskConfigAsset")
- return
- }
- if zb0012 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0012), uint64(maxBitmaskSize))
- return
- }
- zb0011, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskConfigAsset))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskConfigAsset")
- return
- }
- (*z).BitmaskConfigAsset = bitmask(zb0011)
- }
- }
- if zb0007 > 0 {
- zb0007--
- var zb0013 int
- var zb0014 bool
- zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Total")
- return
- }
- if zb0013 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Total")
- return
- }
- if zb0014 {
- (*z).encodedAssetParams.Total = nil
- } else if (*z).encodedAssetParams.Total != nil && cap((*z).encodedAssetParams.Total) >= zb0013 {
- (*z).encodedAssetParams.Total = ((*z).encodedAssetParams.Total)[:zb0013]
- } else {
- (*z).encodedAssetParams.Total = make([]uint64, zb0013)
- }
- for zb0002 := range (*z).encodedAssetParams.Total {
- (*z).encodedAssetParams.Total[zb0002], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Total", zb0002)
- return
- }
- }
- }
- if zb0007 > 0 {
- zb0007--
- {
- var zb0015 []byte
- var zb0016 int
- zb0016, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTotal")
- return
- }
- if zb0016 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0016), uint64(maxBitmaskSize))
- return
- }
- zb0015, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskTotal))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTotal")
- return
- }
- (*z).encodedAssetParams.BitmaskTotal = bitmask(zb0015)
- }
- }
- if zb0007 > 0 {
- zb0007--
- var zb0017 int
- var zb0018 bool
- zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Decimals")
- return
- }
- if zb0017 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Decimals")
- return
- }
- if zb0018 {
- (*z).encodedAssetParams.Decimals = nil
- } else if (*z).encodedAssetParams.Decimals != nil && cap((*z).encodedAssetParams.Decimals) >= zb0017 {
- (*z).encodedAssetParams.Decimals = ((*z).encodedAssetParams.Decimals)[:zb0017]
- } else {
- (*z).encodedAssetParams.Decimals = make([]uint32, zb0017)
- }
- for zb0003 := range (*z).encodedAssetParams.Decimals {
- (*z).encodedAssetParams.Decimals[zb0003], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Decimals", zb0003)
- return
- }
- }
- }
- if zb0007 > 0 {
- zb0007--
- {
- var zb0019 []byte
- var zb0020 int
- zb0020, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDecimals")
- return
- }
- if zb0020 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0020), uint64(maxBitmaskSize))
- return
- }
- zb0019, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskDecimals))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDecimals")
- return
- }
- (*z).encodedAssetParams.BitmaskDecimals = bitmask(zb0019)
- }
- }
- if zb0007 > 0 {
- zb0007--
- {
- var zb0021 []byte
- var zb0022 int
- zb0022, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDefaultFrozen")
- return
- }
- if zb0022 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0022), uint64(maxBitmaskSize))
- return
- }
- zb0021, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskDefaultFrozen))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDefaultFrozen")
- return
- }
- (*z).encodedAssetParams.BitmaskDefaultFrozen = bitmask(zb0021)
- }
- }
- if zb0007 > 0 {
- zb0007--
- var zb0023 int
- var zb0024 bool
- zb0023, zb0024, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UnitName")
- return
- }
- if zb0023 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0023), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "UnitName")
- return
- }
- if zb0024 {
- (*z).encodedAssetParams.UnitName = nil
- } else if (*z).encodedAssetParams.UnitName != nil && cap((*z).encodedAssetParams.UnitName) >= zb0023 {
- (*z).encodedAssetParams.UnitName = ((*z).encodedAssetParams.UnitName)[:zb0023]
- } else {
- (*z).encodedAssetParams.UnitName = make([]string, zb0023)
- }
- for zb0004 := range (*z).encodedAssetParams.UnitName {
- (*z).encodedAssetParams.UnitName[zb0004], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UnitName", zb0004)
- return
- }
- }
- }
- if zb0007 > 0 {
- zb0007--
- {
- var zb0025 []byte
- var zb0026 int
- zb0026, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskUnitName")
- return
- }
- if zb0026 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0026), uint64(maxBitmaskSize))
- return
- }
- zb0025, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskUnitName))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskUnitName")
- return
- }
- (*z).encodedAssetParams.BitmaskUnitName = bitmask(zb0025)
- }
- }
- if zb0007 > 0 {
- zb0007--
- var zb0027 int
- var zb0028 bool
- zb0027, zb0028, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetName")
- return
- }
- if zb0027 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0027), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "AssetName")
- return
- }
- if zb0028 {
- (*z).encodedAssetParams.AssetName = nil
- } else if (*z).encodedAssetParams.AssetName != nil && cap((*z).encodedAssetParams.AssetName) >= zb0027 {
- (*z).encodedAssetParams.AssetName = ((*z).encodedAssetParams.AssetName)[:zb0027]
- } else {
- (*z).encodedAssetParams.AssetName = make([]string, zb0027)
- }
- for zb0005 := range (*z).encodedAssetParams.AssetName {
- (*z).encodedAssetParams.AssetName[zb0005], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetName", zb0005)
- return
- }
- }
- }
- if zb0007 > 0 {
- zb0007--
- {
- var zb0029 []byte
- var zb0030 int
- zb0030, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetName")
- return
- }
- if zb0030 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0030), uint64(maxBitmaskSize))
- return
- }
- zb0029, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskAssetName))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetName")
- return
- }
- (*z).encodedAssetParams.BitmaskAssetName = bitmask(zb0029)
- }
- }
- if zb0007 > 0 {
- zb0007--
- var zb0031 int
- var zb0032 bool
- zb0031, zb0032, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "URL")
- return
- }
- if zb0031 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0031), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "URL")
- return
- }
- if zb0032 {
- (*z).encodedAssetParams.URL = nil
- } else if (*z).encodedAssetParams.URL != nil && cap((*z).encodedAssetParams.URL) >= zb0031 {
- (*z).encodedAssetParams.URL = ((*z).encodedAssetParams.URL)[:zb0031]
- } else {
- (*z).encodedAssetParams.URL = make([]string, zb0031)
- }
- for zb0006 := range (*z).encodedAssetParams.URL {
- (*z).encodedAssetParams.URL[zb0006], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "URL", zb0006)
- return
- }
- }
- }
- if zb0007 > 0 {
- zb0007--
- {
- var zb0033 []byte
- var zb0034 int
- zb0034, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskURL")
- return
- }
- if zb0034 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0034), uint64(maxBitmaskSize))
- return
- }
- zb0033, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskURL))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskURL")
- return
- }
- (*z).encodedAssetParams.BitmaskURL = bitmask(zb0033)
- }
- }
- if zb0007 > 0 {
- zb0007--
- var zb0035 int
- zb0035, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "MetadataHash")
- return
- }
- if zb0035 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0035), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetParams.MetadataHash, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetParams.MetadataHash)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "MetadataHash")
- return
- }
- }
- if zb0007 > 0 {
- zb0007--
- {
- var zb0036 []byte
- var zb0037 int
- zb0037, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskMetadataHash")
- return
- }
- if zb0037 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0037), uint64(maxBitmaskSize))
- return
- }
- zb0036, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskMetadataHash))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskMetadataHash")
- return
- }
- (*z).encodedAssetParams.BitmaskMetadataHash = bitmask(zb0036)
- }
- }
- if zb0007 > 0 {
- zb0007--
- var zb0038 int
- zb0038, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Manager")
- return
- }
- if zb0038 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0038), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetParams.Manager, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetParams.Manager)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Manager")
- return
- }
- }
- if zb0007 > 0 {
- zb0007--
- {
- var zb0039 []byte
- var zb0040 int
- zb0040, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskManager")
- return
- }
- if zb0040 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0040), uint64(maxBitmaskSize))
- return
- }
- zb0039, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskManager))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskManager")
- return
- }
- (*z).encodedAssetParams.BitmaskManager = bitmask(zb0039)
- }
- }
- if zb0007 > 0 {
- zb0007--
- var zb0041 int
- zb0041, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reserve")
- return
- }
- if zb0041 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0041), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetParams.Reserve, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetParams.Reserve)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reserve")
- return
- }
- }
- if zb0007 > 0 {
- zb0007--
- {
- var zb0042 []byte
- var zb0043 int
- zb0043, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReserve")
- return
- }
- if zb0043 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0043), uint64(maxBitmaskSize))
- return
- }
- zb0042, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskReserve))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReserve")
- return
- }
- (*z).encodedAssetParams.BitmaskReserve = bitmask(zb0042)
- }
- }
- if zb0007 > 0 {
- zb0007--
- var zb0044 int
- zb0044, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Freeze")
- return
- }
- if zb0044 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0044), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetParams.Freeze, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetParams.Freeze)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Freeze")
- return
- }
- }
- if zb0007 > 0 {
- zb0007--
- {
- var zb0045 []byte
- var zb0046 int
- zb0046, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreeze")
- return
- }
- if zb0046 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0046), uint64(maxBitmaskSize))
- return
- }
- zb0045, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskFreeze))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreeze")
- return
- }
- (*z).encodedAssetParams.BitmaskFreeze = bitmask(zb0045)
- }
- }
- if zb0007 > 0 {
- zb0007--
- var zb0047 int
- zb0047, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Clawback")
- return
- }
- if zb0047 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0047), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetParams.Clawback, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetParams.Clawback)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Clawback")
- return
- }
- }
- if zb0007 > 0 {
- zb0007--
- {
- var zb0048 []byte
- var zb0049 int
- zb0049, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClawback")
- return
- }
- if zb0049 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0049), uint64(maxBitmaskSize))
- return
- }
- zb0048, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskClawback))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClawback")
- return
- }
- (*z).encodedAssetParams.BitmaskClawback = bitmask(zb0048)
- }
- }
- if zb0007 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0007)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0008 {
- (*z) = encodedAssetConfigTxnFields{}
- }
- for zb0007 > 0 {
- zb0007--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "caid":
- var zb0050 int
- var zb0051 bool
- zb0050, zb0051, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ConfigAsset")
- return
- }
- if zb0050 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0050), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ConfigAsset")
- return
- }
- if zb0051 {
- (*z).ConfigAsset = nil
- } else if (*z).ConfigAsset != nil && cap((*z).ConfigAsset) >= zb0050 {
- (*z).ConfigAsset = ((*z).ConfigAsset)[:zb0050]
- } else {
- (*z).ConfigAsset = make([]basics.AssetIndex, zb0050)
- }
- for zb0001 := range (*z).ConfigAsset {
- bts, err = (*z).ConfigAsset[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ConfigAsset", zb0001)
- return
- }
- }
- case "caidbm":
- {
- var zb0052 []byte
- var zb0053 int
- zb0053, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskConfigAsset")
- return
- }
- if zb0053 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0053), uint64(maxBitmaskSize))
- return
- }
- zb0052, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskConfigAsset))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskConfigAsset")
- return
- }
- (*z).BitmaskConfigAsset = bitmask(zb0052)
- }
- case "t":
- var zb0054 int
- var zb0055 bool
- zb0054, zb0055, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Total")
- return
- }
- if zb0054 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0054), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Total")
- return
- }
- if zb0055 {
- (*z).encodedAssetParams.Total = nil
- } else if (*z).encodedAssetParams.Total != nil && cap((*z).encodedAssetParams.Total) >= zb0054 {
- (*z).encodedAssetParams.Total = ((*z).encodedAssetParams.Total)[:zb0054]
- } else {
- (*z).encodedAssetParams.Total = make([]uint64, zb0054)
- }
- for zb0002 := range (*z).encodedAssetParams.Total {
- (*z).encodedAssetParams.Total[zb0002], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Total", zb0002)
- return
- }
- }
- case "tbm":
- {
- var zb0056 []byte
- var zb0057 int
- zb0057, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTotal")
- return
- }
- if zb0057 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0057), uint64(maxBitmaskSize))
- return
- }
- zb0056, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskTotal))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTotal")
- return
- }
- (*z).encodedAssetParams.BitmaskTotal = bitmask(zb0056)
- }
- case "dc":
- var zb0058 int
- var zb0059 bool
- zb0058, zb0059, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Decimals")
- return
- }
- if zb0058 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0058), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Decimals")
- return
- }
- if zb0059 {
- (*z).encodedAssetParams.Decimals = nil
- } else if (*z).encodedAssetParams.Decimals != nil && cap((*z).encodedAssetParams.Decimals) >= zb0058 {
- (*z).encodedAssetParams.Decimals = ((*z).encodedAssetParams.Decimals)[:zb0058]
- } else {
- (*z).encodedAssetParams.Decimals = make([]uint32, zb0058)
- }
- for zb0003 := range (*z).encodedAssetParams.Decimals {
- (*z).encodedAssetParams.Decimals[zb0003], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Decimals", zb0003)
- return
- }
- }
- case "dcbm":
- {
- var zb0060 []byte
- var zb0061 int
- zb0061, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDecimals")
- return
- }
- if zb0061 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0061), uint64(maxBitmaskSize))
- return
- }
- zb0060, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskDecimals))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDecimals")
- return
- }
- (*z).encodedAssetParams.BitmaskDecimals = bitmask(zb0060)
- }
- case "dfbm":
- {
- var zb0062 []byte
- var zb0063 int
- zb0063, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDefaultFrozen")
- return
- }
- if zb0063 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0063), uint64(maxBitmaskSize))
- return
- }
- zb0062, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskDefaultFrozen))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDefaultFrozen")
- return
- }
- (*z).encodedAssetParams.BitmaskDefaultFrozen = bitmask(zb0062)
- }
- case "un":
- var zb0064 int
- var zb0065 bool
- zb0064, zb0065, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UnitName")
- return
- }
- if zb0064 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0064), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "UnitName")
- return
- }
- if zb0065 {
- (*z).encodedAssetParams.UnitName = nil
- } else if (*z).encodedAssetParams.UnitName != nil && cap((*z).encodedAssetParams.UnitName) >= zb0064 {
- (*z).encodedAssetParams.UnitName = ((*z).encodedAssetParams.UnitName)[:zb0064]
- } else {
- (*z).encodedAssetParams.UnitName = make([]string, zb0064)
- }
- for zb0004 := range (*z).encodedAssetParams.UnitName {
- (*z).encodedAssetParams.UnitName[zb0004], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UnitName", zb0004)
- return
- }
- }
- case "unbm":
- {
- var zb0066 []byte
- var zb0067 int
- zb0067, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskUnitName")
- return
- }
- if zb0067 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0067), uint64(maxBitmaskSize))
- return
- }
- zb0066, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskUnitName))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskUnitName")
- return
- }
- (*z).encodedAssetParams.BitmaskUnitName = bitmask(zb0066)
- }
- case "an":
- var zb0068 int
- var zb0069 bool
- zb0068, zb0069, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetName")
- return
- }
- if zb0068 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0068), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "AssetName")
- return
- }
- if zb0069 {
- (*z).encodedAssetParams.AssetName = nil
- } else if (*z).encodedAssetParams.AssetName != nil && cap((*z).encodedAssetParams.AssetName) >= zb0068 {
- (*z).encodedAssetParams.AssetName = ((*z).encodedAssetParams.AssetName)[:zb0068]
- } else {
- (*z).encodedAssetParams.AssetName = make([]string, zb0068)
- }
- for zb0005 := range (*z).encodedAssetParams.AssetName {
- (*z).encodedAssetParams.AssetName[zb0005], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetName", zb0005)
- return
- }
- }
- case "anbm":
- {
- var zb0070 []byte
- var zb0071 int
- zb0071, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetName")
- return
- }
- if zb0071 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0071), uint64(maxBitmaskSize))
- return
- }
- zb0070, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskAssetName))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetName")
- return
- }
- (*z).encodedAssetParams.BitmaskAssetName = bitmask(zb0070)
- }
- case "au":
- var zb0072 int
- var zb0073 bool
- zb0072, zb0073, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "URL")
- return
- }
- if zb0072 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0072), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "URL")
- return
- }
- if zb0073 {
- (*z).encodedAssetParams.URL = nil
- } else if (*z).encodedAssetParams.URL != nil && cap((*z).encodedAssetParams.URL) >= zb0072 {
- (*z).encodedAssetParams.URL = ((*z).encodedAssetParams.URL)[:zb0072]
- } else {
- (*z).encodedAssetParams.URL = make([]string, zb0072)
- }
- for zb0006 := range (*z).encodedAssetParams.URL {
- (*z).encodedAssetParams.URL[zb0006], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "URL", zb0006)
- return
- }
- }
- case "aubm":
- {
- var zb0074 []byte
- var zb0075 int
- zb0075, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskURL")
- return
- }
- if zb0075 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0075), uint64(maxBitmaskSize))
- return
- }
- zb0074, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskURL))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskURL")
- return
- }
- (*z).encodedAssetParams.BitmaskURL = bitmask(zb0074)
- }
- case "am":
- var zb0076 int
- zb0076, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "MetadataHash")
- return
- }
- if zb0076 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0076), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetParams.MetadataHash, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetParams.MetadataHash)
- if err != nil {
- err = msgp.WrapError(err, "MetadataHash")
- return
- }
- case "ambm":
- {
- var zb0077 []byte
- var zb0078 int
- zb0078, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskMetadataHash")
- return
- }
- if zb0078 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0078), uint64(maxBitmaskSize))
- return
- }
- zb0077, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskMetadataHash))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskMetadataHash")
- return
- }
- (*z).encodedAssetParams.BitmaskMetadataHash = bitmask(zb0077)
- }
- case "m":
- var zb0079 int
- zb0079, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Manager")
- return
- }
- if zb0079 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0079), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetParams.Manager, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetParams.Manager)
- if err != nil {
- err = msgp.WrapError(err, "Manager")
- return
- }
- case "mbm":
- {
- var zb0080 []byte
- var zb0081 int
- zb0081, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskManager")
- return
- }
- if zb0081 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0081), uint64(maxBitmaskSize))
- return
- }
- zb0080, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskManager))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskManager")
- return
- }
- (*z).encodedAssetParams.BitmaskManager = bitmask(zb0080)
- }
- case "r":
- var zb0082 int
- zb0082, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reserve")
- return
- }
- if zb0082 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0082), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetParams.Reserve, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetParams.Reserve)
- if err != nil {
- err = msgp.WrapError(err, "Reserve")
- return
- }
- case "rbm":
- {
- var zb0083 []byte
- var zb0084 int
- zb0084, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReserve")
- return
- }
- if zb0084 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0084), uint64(maxBitmaskSize))
- return
- }
- zb0083, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskReserve))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReserve")
- return
- }
- (*z).encodedAssetParams.BitmaskReserve = bitmask(zb0083)
- }
- case "f":
- var zb0085 int
- zb0085, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Freeze")
- return
- }
- if zb0085 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0085), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetParams.Freeze, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetParams.Freeze)
- if err != nil {
- err = msgp.WrapError(err, "Freeze")
- return
- }
- case "fbm":
- {
- var zb0086 []byte
- var zb0087 int
- zb0087, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreeze")
- return
- }
- if zb0087 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0087), uint64(maxBitmaskSize))
- return
- }
- zb0086, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskFreeze))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreeze")
- return
- }
- (*z).encodedAssetParams.BitmaskFreeze = bitmask(zb0086)
- }
- case "c":
- var zb0088 int
- zb0088, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Clawback")
- return
- }
- if zb0088 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0088), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetParams.Clawback, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetParams.Clawback)
- if err != nil {
- err = msgp.WrapError(err, "Clawback")
- return
- }
- case "cbm":
- {
- var zb0089 []byte
- var zb0090 int
- zb0090, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClawback")
- return
- }
- if zb0090 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0090), uint64(maxBitmaskSize))
- return
- }
- zb0089, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetParams.BitmaskClawback))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClawback")
- return
- }
- (*z).encodedAssetParams.BitmaskClawback = bitmask(zb0089)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedAssetConfigTxnFields) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedAssetConfigTxnFields)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedAssetConfigTxnFields) Msgsize() (s int) {
- s = 3 + 5 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).ConfigAsset {
- s += (*z).ConfigAsset[zb0001].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskConfigAsset)) + 2 + msgp.ArrayHeaderSize + (len((*z).encodedAssetParams.Total) * (msgp.Uint64Size)) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetParams.BitmaskTotal)) + 3 + msgp.ArrayHeaderSize + (len((*z).encodedAssetParams.Decimals) * (msgp.Uint32Size)) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetParams.BitmaskDecimals)) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetParams.BitmaskDefaultFrozen)) + 3 + msgp.ArrayHeaderSize
- for zb0004 := range (*z).encodedAssetParams.UnitName {
- s += msgp.StringPrefixSize + len((*z).encodedAssetParams.UnitName[zb0004])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetParams.BitmaskUnitName)) + 3 + msgp.ArrayHeaderSize
- for zb0005 := range (*z).encodedAssetParams.AssetName {
- s += msgp.StringPrefixSize + len((*z).encodedAssetParams.AssetName[zb0005])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetParams.BitmaskAssetName)) + 3 + msgp.ArrayHeaderSize
- for zb0006 := range (*z).encodedAssetParams.URL {
- s += msgp.StringPrefixSize + len((*z).encodedAssetParams.URL[zb0006])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetParams.BitmaskURL)) + 3 + msgp.BytesPrefixSize + len((*z).encodedAssetParams.MetadataHash) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetParams.BitmaskMetadataHash)) + 2 + msgp.BytesPrefixSize + len((*z).encodedAssetParams.Manager) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetParams.BitmaskManager)) + 2 + msgp.BytesPrefixSize + len((*z).encodedAssetParams.Reserve) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetParams.BitmaskReserve)) + 2 + msgp.BytesPrefixSize + len((*z).encodedAssetParams.Freeze) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetParams.BitmaskFreeze)) + 2 + msgp.BytesPrefixSize + len((*z).encodedAssetParams.Clawback) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetParams.BitmaskClawback))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedAssetConfigTxnFields) MsgIsZero() bool {
- return (len((*z).ConfigAsset) == 0) && (len((*z).BitmaskConfigAsset) == 0) && (len((*z).encodedAssetParams.Total) == 0) && (len((*z).encodedAssetParams.BitmaskTotal) == 0) && (len((*z).encodedAssetParams.Decimals) == 0) && (len((*z).encodedAssetParams.BitmaskDecimals) == 0) && (len((*z).encodedAssetParams.BitmaskDefaultFrozen) == 0) && (len((*z).encodedAssetParams.UnitName) == 0) && (len((*z).encodedAssetParams.BitmaskUnitName) == 0) && (len((*z).encodedAssetParams.AssetName) == 0) && (len((*z).encodedAssetParams.BitmaskAssetName) == 0) && (len((*z).encodedAssetParams.URL) == 0) && (len((*z).encodedAssetParams.BitmaskURL) == 0) && (len((*z).encodedAssetParams.MetadataHash) == 0) && (len((*z).encodedAssetParams.BitmaskMetadataHash) == 0) && (len((*z).encodedAssetParams.Manager) == 0) && (len((*z).encodedAssetParams.BitmaskManager) == 0) && (len((*z).encodedAssetParams.Reserve) == 0) && (len((*z).encodedAssetParams.BitmaskReserve) == 0) && (len((*z).encodedAssetParams.Freeze) == 0) && (len((*z).encodedAssetParams.BitmaskFreeze) == 0) && (len((*z).encodedAssetParams.Clawback) == 0) && (len((*z).encodedAssetParams.BitmaskClawback) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedAssetFreezeTxnFields) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0002Len := uint32(5)
- var zb0002Mask uint8 /* 6 bits */
- if len((*z).BitmaskAssetFrozen) == 0 {
- zb0002Len--
- zb0002Mask |= 0x2
- }
- if len((*z).FreezeAccount) == 0 {
- zb0002Len--
- zb0002Mask |= 0x4
- }
- if len((*z).BitmaskFreezeAccount) == 0 {
- zb0002Len--
- zb0002Mask |= 0x8
- }
- if len((*z).FreezeAsset) == 0 {
- zb0002Len--
- zb0002Mask |= 0x10
- }
- if len((*z).BitmaskFreezeAsset) == 0 {
- zb0002Len--
- zb0002Mask |= 0x20
- }
- // variable map header, size zb0002Len
- o = append(o, 0x80|uint8(zb0002Len))
- if zb0002Len != 0 {
- if (zb0002Mask & 0x2) == 0 { // if not empty
- // string "afrzbm"
- o = append(o, 0xa6, 0x61, 0x66, 0x72, 0x7a, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskAssetFrozen))
- }
- if (zb0002Mask & 0x4) == 0 { // if not empty
- // string "fadd"
- o = append(o, 0xa4, 0x66, 0x61, 0x64, 0x64)
- o = msgp.AppendBytes(o, (*z).FreezeAccount)
- }
- if (zb0002Mask & 0x8) == 0 { // if not empty
- // string "faddbm"
- o = append(o, 0xa6, 0x66, 0x61, 0x64, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskFreezeAccount))
- }
- if (zb0002Mask & 0x10) == 0 { // if not empty
- // string "faid"
- o = append(o, 0xa4, 0x66, 0x61, 0x69, 0x64)
- if (*z).FreezeAsset == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).FreezeAsset)))
- }
- for zb0001 := range (*z).FreezeAsset {
- o = (*z).FreezeAsset[zb0001].MarshalMsg(o)
- }
- }
- if (zb0002Mask & 0x20) == 0 { // if not empty
- // string "faidbm"
- o = append(o, 0xa6, 0x66, 0x61, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskFreezeAsset))
- }
- }
- return
-}
-
-func (_ *encodedAssetFreezeTxnFields) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedAssetFreezeTxnFields)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedAssetFreezeTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0002 int
- var zb0003 bool
- zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 > 0 {
- zb0002--
- var zb0004 int
- zb0004, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAccount")
- return
- }
- if zb0004 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0004), uint64(maxAddressBytes))
- return
- }
- (*z).FreezeAccount, bts, err = msgp.ReadBytesBytes(bts, (*z).FreezeAccount)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAccount")
- return
- }
- }
- if zb0002 > 0 {
- zb0002--
- {
- var zb0005 []byte
- var zb0006 int
- zb0006, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAccount")
- return
- }
- if zb0006 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(maxBitmaskSize))
- return
- }
- zb0005, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskFreezeAccount))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAccount")
- return
- }
- (*z).BitmaskFreezeAccount = bitmask(zb0005)
- }
- }
- if zb0002 > 0 {
- zb0002--
- var zb0007 int
- var zb0008 bool
- zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAsset")
- return
- }
- if zb0007 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "FreezeAsset")
- return
- }
- if zb0008 {
- (*z).FreezeAsset = nil
- } else if (*z).FreezeAsset != nil && cap((*z).FreezeAsset) >= zb0007 {
- (*z).FreezeAsset = ((*z).FreezeAsset)[:zb0007]
- } else {
- (*z).FreezeAsset = make([]basics.AssetIndex, zb0007)
- }
- for zb0001 := range (*z).FreezeAsset {
- bts, err = (*z).FreezeAsset[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAsset", zb0001)
- return
- }
- }
- }
- if zb0002 > 0 {
- zb0002--
- {
- var zb0009 []byte
- var zb0010 int
- zb0010, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAsset")
- return
- }
- if zb0010 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(maxBitmaskSize))
- return
- }
- zb0009, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskFreezeAsset))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAsset")
- return
- }
- (*z).BitmaskFreezeAsset = bitmask(zb0009)
- }
- }
- if zb0002 > 0 {
- zb0002--
- {
- var zb0011 []byte
- var zb0012 int
- zb0012, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetFrozen")
- return
- }
- if zb0012 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0012), uint64(maxBitmaskSize))
- return
- }
- zb0011, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAssetFrozen))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetFrozen")
- return
- }
- (*z).BitmaskAssetFrozen = bitmask(zb0011)
- }
- }
- if zb0002 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0002)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0003 {
- (*z) = encodedAssetFreezeTxnFields{}
- }
- for zb0002 > 0 {
- zb0002--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "fadd":
- var zb0013 int
- zb0013, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAccount")
- return
- }
- if zb0013 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(maxAddressBytes))
- return
- }
- (*z).FreezeAccount, bts, err = msgp.ReadBytesBytes(bts, (*z).FreezeAccount)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAccount")
- return
- }
- case "faddbm":
- {
- var zb0014 []byte
- var zb0015 int
- zb0015, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAccount")
- return
- }
- if zb0015 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0015), uint64(maxBitmaskSize))
- return
- }
- zb0014, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskFreezeAccount))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAccount")
- return
- }
- (*z).BitmaskFreezeAccount = bitmask(zb0014)
- }
- case "faid":
- var zb0016 int
- var zb0017 bool
- zb0016, zb0017, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAsset")
- return
- }
- if zb0016 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0016), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "FreezeAsset")
- return
- }
- if zb0017 {
- (*z).FreezeAsset = nil
- } else if (*z).FreezeAsset != nil && cap((*z).FreezeAsset) >= zb0016 {
- (*z).FreezeAsset = ((*z).FreezeAsset)[:zb0016]
- } else {
- (*z).FreezeAsset = make([]basics.AssetIndex, zb0016)
- }
- for zb0001 := range (*z).FreezeAsset {
- bts, err = (*z).FreezeAsset[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAsset", zb0001)
- return
- }
- }
- case "faidbm":
- {
- var zb0018 []byte
- var zb0019 int
- zb0019, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAsset")
- return
- }
- if zb0019 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0019), uint64(maxBitmaskSize))
- return
- }
- zb0018, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskFreezeAsset))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAsset")
- return
- }
- (*z).BitmaskFreezeAsset = bitmask(zb0018)
- }
- case "afrzbm":
- {
- var zb0020 []byte
- var zb0021 int
- zb0021, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetFrozen")
- return
- }
- if zb0021 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0021), uint64(maxBitmaskSize))
- return
- }
- zb0020, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAssetFrozen))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetFrozen")
- return
- }
- (*z).BitmaskAssetFrozen = bitmask(zb0020)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedAssetFreezeTxnFields) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedAssetFreezeTxnFields)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedAssetFreezeTxnFields) Msgsize() (s int) {
- s = 1 + 5 + msgp.BytesPrefixSize + len((*z).FreezeAccount) + 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskFreezeAccount)) + 5 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).FreezeAsset {
- s += (*z).FreezeAsset[zb0001].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskFreezeAsset)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskAssetFrozen))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedAssetFreezeTxnFields) MsgIsZero() bool {
- return (len((*z).FreezeAccount) == 0) && (len((*z).BitmaskFreezeAccount) == 0) && (len((*z).FreezeAsset) == 0) && (len((*z).BitmaskFreezeAsset) == 0) && (len((*z).BitmaskAssetFrozen) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedAssetParams) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0006Len := uint32(21)
- var zb0006Mask uint32 /* 22 bits */
- if len((*z).MetadataHash) == 0 {
- zb0006Len--
- zb0006Mask |= 0x2
- }
- if len((*z).BitmaskMetadataHash) == 0 {
- zb0006Len--
- zb0006Mask |= 0x4
- }
- if len((*z).AssetName) == 0 {
- zb0006Len--
- zb0006Mask |= 0x8
- }
- if len((*z).BitmaskAssetName) == 0 {
- zb0006Len--
- zb0006Mask |= 0x10
- }
- if len((*z).URL) == 0 {
- zb0006Len--
- zb0006Mask |= 0x20
- }
- if len((*z).BitmaskURL) == 0 {
- zb0006Len--
- zb0006Mask |= 0x40
- }
- if len((*z).Clawback) == 0 {
- zb0006Len--
- zb0006Mask |= 0x80
- }
- if len((*z).BitmaskClawback) == 0 {
- zb0006Len--
- zb0006Mask |= 0x100
- }
- if len((*z).Decimals) == 0 {
- zb0006Len--
- zb0006Mask |= 0x200
- }
- if len((*z).BitmaskDecimals) == 0 {
- zb0006Len--
- zb0006Mask |= 0x400
- }
- if len((*z).BitmaskDefaultFrozen) == 0 {
- zb0006Len--
- zb0006Mask |= 0x800
- }
- if len((*z).Freeze) == 0 {
- zb0006Len--
- zb0006Mask |= 0x1000
- }
- if len((*z).BitmaskFreeze) == 0 {
- zb0006Len--
- zb0006Mask |= 0x2000
- }
- if len((*z).Manager) == 0 {
- zb0006Len--
- zb0006Mask |= 0x4000
- }
- if len((*z).BitmaskManager) == 0 {
- zb0006Len--
- zb0006Mask |= 0x8000
- }
- if len((*z).Reserve) == 0 {
- zb0006Len--
- zb0006Mask |= 0x10000
- }
- if len((*z).BitmaskReserve) == 0 {
- zb0006Len--
- zb0006Mask |= 0x20000
- }
- if len((*z).Total) == 0 {
- zb0006Len--
- zb0006Mask |= 0x40000
- }
- if len((*z).BitmaskTotal) == 0 {
- zb0006Len--
- zb0006Mask |= 0x80000
- }
- if len((*z).UnitName) == 0 {
- zb0006Len--
- zb0006Mask |= 0x100000
- }
- if len((*z).BitmaskUnitName) == 0 {
- zb0006Len--
- zb0006Mask |= 0x200000
- }
- // variable map header, size zb0006Len
- o = msgp.AppendMapHeader(o, zb0006Len)
- if zb0006Len != 0 {
- if (zb0006Mask & 0x2) == 0 { // if not empty
- // string "am"
- o = append(o, 0xa2, 0x61, 0x6d)
- o = msgp.AppendBytes(o, (*z).MetadataHash)
- }
- if (zb0006Mask & 0x4) == 0 { // if not empty
- // string "ambm"
- o = append(o, 0xa4, 0x61, 0x6d, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskMetadataHash))
- }
- if (zb0006Mask & 0x8) == 0 { // if not empty
- // string "an"
- o = append(o, 0xa2, 0x61, 0x6e)
- if (*z).AssetName == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).AssetName)))
- }
- for zb0004 := range (*z).AssetName {
- o = msgp.AppendString(o, (*z).AssetName[zb0004])
- }
- }
- if (zb0006Mask & 0x10) == 0 { // if not empty
- // string "anbm"
- o = append(o, 0xa4, 0x61, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskAssetName))
- }
- if (zb0006Mask & 0x20) == 0 { // if not empty
- // string "au"
- o = append(o, 0xa2, 0x61, 0x75)
- if (*z).URL == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).URL)))
- }
- for zb0005 := range (*z).URL {
- o = msgp.AppendString(o, (*z).URL[zb0005])
- }
- }
- if (zb0006Mask & 0x40) == 0 { // if not empty
- // string "aubm"
- o = append(o, 0xa4, 0x61, 0x75, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskURL))
- }
- if (zb0006Mask & 0x80) == 0 { // if not empty
- // string "c"
- o = append(o, 0xa1, 0x63)
- o = msgp.AppendBytes(o, (*z).Clawback)
- }
- if (zb0006Mask & 0x100) == 0 { // if not empty
- // string "cbm"
- o = append(o, 0xa3, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskClawback))
- }
- if (zb0006Mask & 0x200) == 0 { // if not empty
- // string "dc"
- o = append(o, 0xa2, 0x64, 0x63)
- if (*z).Decimals == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).Decimals)))
- }
- for zb0002 := range (*z).Decimals {
- o = msgp.AppendUint32(o, (*z).Decimals[zb0002])
- }
- }
- if (zb0006Mask & 0x400) == 0 { // if not empty
- // string "dcbm"
- o = append(o, 0xa4, 0x64, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskDecimals))
- }
- if (zb0006Mask & 0x800) == 0 { // if not empty
- // string "dfbm"
- o = append(o, 0xa4, 0x64, 0x66, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskDefaultFrozen))
- }
- if (zb0006Mask & 0x1000) == 0 { // if not empty
- // string "f"
- o = append(o, 0xa1, 0x66)
- o = msgp.AppendBytes(o, (*z).Freeze)
- }
- if (zb0006Mask & 0x2000) == 0 { // if not empty
- // string "fbm"
- o = append(o, 0xa3, 0x66, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskFreeze))
- }
- if (zb0006Mask & 0x4000) == 0 { // if not empty
- // string "m"
- o = append(o, 0xa1, 0x6d)
- o = msgp.AppendBytes(o, (*z).Manager)
- }
- if (zb0006Mask & 0x8000) == 0 { // if not empty
- // string "mbm"
- o = append(o, 0xa3, 0x6d, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskManager))
- }
- if (zb0006Mask & 0x10000) == 0 { // if not empty
- // string "r"
- o = append(o, 0xa1, 0x72)
- o = msgp.AppendBytes(o, (*z).Reserve)
- }
- if (zb0006Mask & 0x20000) == 0 { // if not empty
- // string "rbm"
- o = append(o, 0xa3, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskReserve))
- }
- if (zb0006Mask & 0x40000) == 0 { // if not empty
- // string "t"
- o = append(o, 0xa1, 0x74)
- if (*z).Total == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).Total)))
- }
- for zb0001 := range (*z).Total {
- o = msgp.AppendUint64(o, (*z).Total[zb0001])
- }
- }
- if (zb0006Mask & 0x80000) == 0 { // if not empty
- // string "tbm"
- o = append(o, 0xa3, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskTotal))
- }
- if (zb0006Mask & 0x100000) == 0 { // if not empty
- // string "un"
- o = append(o, 0xa2, 0x75, 0x6e)
- if (*z).UnitName == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).UnitName)))
- }
- for zb0003 := range (*z).UnitName {
- o = msgp.AppendString(o, (*z).UnitName[zb0003])
- }
- }
- if (zb0006Mask & 0x200000) == 0 { // if not empty
- // string "unbm"
- o = append(o, 0xa4, 0x75, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskUnitName))
- }
- }
- return
-}
-
-func (_ *encodedAssetParams) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedAssetParams)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedAssetParams) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0006 > 0 {
- zb0006--
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Total")
- return
- }
- if zb0008 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Total")
- return
- }
- if zb0009 {
- (*z).Total = nil
- } else if (*z).Total != nil && cap((*z).Total) >= zb0008 {
- (*z).Total = ((*z).Total)[:zb0008]
- } else {
- (*z).Total = make([]uint64, zb0008)
- }
- for zb0001 := range (*z).Total {
- (*z).Total[zb0001], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Total", zb0001)
- return
- }
- }
- }
- if zb0006 > 0 {
- zb0006--
- {
- var zb0010 []byte
- var zb0011 int
- zb0011, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTotal")
- return
- }
- if zb0011 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(maxBitmaskSize))
- return
- }
- zb0010, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskTotal))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTotal")
- return
- }
- (*z).BitmaskTotal = bitmask(zb0010)
- }
- }
- if zb0006 > 0 {
- zb0006--
- var zb0012 int
- var zb0013 bool
- zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Decimals")
- return
- }
- if zb0012 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0012), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Decimals")
- return
- }
- if zb0013 {
- (*z).Decimals = nil
- } else if (*z).Decimals != nil && cap((*z).Decimals) >= zb0012 {
- (*z).Decimals = ((*z).Decimals)[:zb0012]
- } else {
- (*z).Decimals = make([]uint32, zb0012)
- }
- for zb0002 := range (*z).Decimals {
- (*z).Decimals[zb0002], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Decimals", zb0002)
- return
- }
- }
- }
- if zb0006 > 0 {
- zb0006--
- {
- var zb0014 []byte
- var zb0015 int
- zb0015, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDecimals")
- return
- }
- if zb0015 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0015), uint64(maxBitmaskSize))
- return
- }
- zb0014, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskDecimals))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDecimals")
- return
- }
- (*z).BitmaskDecimals = bitmask(zb0014)
- }
- }
- if zb0006 > 0 {
- zb0006--
- {
- var zb0016 []byte
- var zb0017 int
- zb0017, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDefaultFrozen")
- return
- }
- if zb0017 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(maxBitmaskSize))
- return
- }
- zb0016, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskDefaultFrozen))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDefaultFrozen")
- return
- }
- (*z).BitmaskDefaultFrozen = bitmask(zb0016)
- }
- }
- if zb0006 > 0 {
- zb0006--
- var zb0018 int
- var zb0019 bool
- zb0018, zb0019, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UnitName")
- return
- }
- if zb0018 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0018), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "UnitName")
- return
- }
- if zb0019 {
- (*z).UnitName = nil
- } else if (*z).UnitName != nil && cap((*z).UnitName) >= zb0018 {
- (*z).UnitName = ((*z).UnitName)[:zb0018]
- } else {
- (*z).UnitName = make([]string, zb0018)
- }
- for zb0003 := range (*z).UnitName {
- (*z).UnitName[zb0003], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UnitName", zb0003)
- return
- }
- }
- }
- if zb0006 > 0 {
- zb0006--
- {
- var zb0020 []byte
- var zb0021 int
- zb0021, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskUnitName")
- return
- }
- if zb0021 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0021), uint64(maxBitmaskSize))
- return
- }
- zb0020, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskUnitName))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskUnitName")
- return
- }
- (*z).BitmaskUnitName = bitmask(zb0020)
- }
- }
- if zb0006 > 0 {
- zb0006--
- var zb0022 int
- var zb0023 bool
- zb0022, zb0023, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetName")
- return
- }
- if zb0022 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0022), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "AssetName")
- return
- }
- if zb0023 {
- (*z).AssetName = nil
- } else if (*z).AssetName != nil && cap((*z).AssetName) >= zb0022 {
- (*z).AssetName = ((*z).AssetName)[:zb0022]
- } else {
- (*z).AssetName = make([]string, zb0022)
- }
- for zb0004 := range (*z).AssetName {
- (*z).AssetName[zb0004], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetName", zb0004)
- return
- }
- }
- }
- if zb0006 > 0 {
- zb0006--
- {
- var zb0024 []byte
- var zb0025 int
- zb0025, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetName")
- return
- }
- if zb0025 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0025), uint64(maxBitmaskSize))
- return
- }
- zb0024, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAssetName))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetName")
- return
- }
- (*z).BitmaskAssetName = bitmask(zb0024)
- }
- }
- if zb0006 > 0 {
- zb0006--
- var zb0026 int
- var zb0027 bool
- zb0026, zb0027, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "URL")
- return
- }
- if zb0026 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0026), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "URL")
- return
- }
- if zb0027 {
- (*z).URL = nil
- } else if (*z).URL != nil && cap((*z).URL) >= zb0026 {
- (*z).URL = ((*z).URL)[:zb0026]
- } else {
- (*z).URL = make([]string, zb0026)
- }
- for zb0005 := range (*z).URL {
- (*z).URL[zb0005], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "URL", zb0005)
- return
- }
- }
- }
- if zb0006 > 0 {
- zb0006--
- {
- var zb0028 []byte
- var zb0029 int
- zb0029, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskURL")
- return
- }
- if zb0029 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0029), uint64(maxBitmaskSize))
- return
- }
- zb0028, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskURL))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskURL")
- return
- }
- (*z).BitmaskURL = bitmask(zb0028)
- }
- }
- if zb0006 > 0 {
- zb0006--
- var zb0030 int
- zb0030, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "MetadataHash")
- return
- }
- if zb0030 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0030), uint64(maxAddressBytes))
- return
- }
- (*z).MetadataHash, bts, err = msgp.ReadBytesBytes(bts, (*z).MetadataHash)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "MetadataHash")
- return
- }
- }
- if zb0006 > 0 {
- zb0006--
- {
- var zb0031 []byte
- var zb0032 int
- zb0032, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskMetadataHash")
- return
- }
- if zb0032 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0032), uint64(maxBitmaskSize))
- return
- }
- zb0031, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskMetadataHash))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskMetadataHash")
- return
- }
- (*z).BitmaskMetadataHash = bitmask(zb0031)
- }
- }
- if zb0006 > 0 {
- zb0006--
- var zb0033 int
- zb0033, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Manager")
- return
- }
- if zb0033 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0033), uint64(maxAddressBytes))
- return
- }
- (*z).Manager, bts, err = msgp.ReadBytesBytes(bts, (*z).Manager)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Manager")
- return
- }
- }
- if zb0006 > 0 {
- zb0006--
- {
- var zb0034 []byte
- var zb0035 int
- zb0035, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskManager")
- return
- }
- if zb0035 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0035), uint64(maxBitmaskSize))
- return
- }
- zb0034, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskManager))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskManager")
- return
- }
- (*z).BitmaskManager = bitmask(zb0034)
- }
- }
- if zb0006 > 0 {
- zb0006--
- var zb0036 int
- zb0036, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reserve")
- return
- }
- if zb0036 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0036), uint64(maxAddressBytes))
- return
- }
- (*z).Reserve, bts, err = msgp.ReadBytesBytes(bts, (*z).Reserve)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reserve")
- return
- }
- }
- if zb0006 > 0 {
- zb0006--
- {
- var zb0037 []byte
- var zb0038 int
- zb0038, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReserve")
- return
- }
- if zb0038 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0038), uint64(maxBitmaskSize))
- return
- }
- zb0037, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskReserve))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReserve")
- return
- }
- (*z).BitmaskReserve = bitmask(zb0037)
- }
- }
- if zb0006 > 0 {
- zb0006--
- var zb0039 int
- zb0039, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Freeze")
- return
- }
- if zb0039 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0039), uint64(maxAddressBytes))
- return
- }
- (*z).Freeze, bts, err = msgp.ReadBytesBytes(bts, (*z).Freeze)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Freeze")
- return
- }
- }
- if zb0006 > 0 {
- zb0006--
- {
- var zb0040 []byte
- var zb0041 int
- zb0041, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreeze")
- return
- }
- if zb0041 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0041), uint64(maxBitmaskSize))
- return
- }
- zb0040, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskFreeze))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreeze")
- return
- }
- (*z).BitmaskFreeze = bitmask(zb0040)
- }
- }
- if zb0006 > 0 {
- zb0006--
- var zb0042 int
- zb0042, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Clawback")
- return
- }
- if zb0042 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0042), uint64(maxAddressBytes))
- return
- }
- (*z).Clawback, bts, err = msgp.ReadBytesBytes(bts, (*z).Clawback)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Clawback")
- return
- }
- }
- if zb0006 > 0 {
- zb0006--
- {
- var zb0043 []byte
- var zb0044 int
- zb0044, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClawback")
- return
- }
- if zb0044 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0044), uint64(maxBitmaskSize))
- return
- }
- zb0043, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskClawback))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClawback")
- return
- }
- (*z).BitmaskClawback = bitmask(zb0043)
- }
- }
- if zb0006 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0006)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0007 {
- (*z) = encodedAssetParams{}
- }
- for zb0006 > 0 {
- zb0006--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "t":
- var zb0045 int
- var zb0046 bool
- zb0045, zb0046, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Total")
- return
- }
- if zb0045 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0045), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Total")
- return
- }
- if zb0046 {
- (*z).Total = nil
- } else if (*z).Total != nil && cap((*z).Total) >= zb0045 {
- (*z).Total = ((*z).Total)[:zb0045]
- } else {
- (*z).Total = make([]uint64, zb0045)
- }
- for zb0001 := range (*z).Total {
- (*z).Total[zb0001], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Total", zb0001)
- return
- }
- }
- case "tbm":
- {
- var zb0047 []byte
- var zb0048 int
- zb0048, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTotal")
- return
- }
- if zb0048 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0048), uint64(maxBitmaskSize))
- return
- }
- zb0047, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskTotal))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTotal")
- return
- }
- (*z).BitmaskTotal = bitmask(zb0047)
- }
- case "dc":
- var zb0049 int
- var zb0050 bool
- zb0049, zb0050, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Decimals")
- return
- }
- if zb0049 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0049), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Decimals")
- return
- }
- if zb0050 {
- (*z).Decimals = nil
- } else if (*z).Decimals != nil && cap((*z).Decimals) >= zb0049 {
- (*z).Decimals = ((*z).Decimals)[:zb0049]
- } else {
- (*z).Decimals = make([]uint32, zb0049)
- }
- for zb0002 := range (*z).Decimals {
- (*z).Decimals[zb0002], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Decimals", zb0002)
- return
- }
- }
- case "dcbm":
- {
- var zb0051 []byte
- var zb0052 int
- zb0052, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDecimals")
- return
- }
- if zb0052 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0052), uint64(maxBitmaskSize))
- return
- }
- zb0051, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskDecimals))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDecimals")
- return
- }
- (*z).BitmaskDecimals = bitmask(zb0051)
- }
- case "dfbm":
- {
- var zb0053 []byte
- var zb0054 int
- zb0054, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDefaultFrozen")
- return
- }
- if zb0054 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0054), uint64(maxBitmaskSize))
- return
- }
- zb0053, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskDefaultFrozen))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDefaultFrozen")
- return
- }
- (*z).BitmaskDefaultFrozen = bitmask(zb0053)
- }
- case "un":
- var zb0055 int
- var zb0056 bool
- zb0055, zb0056, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UnitName")
- return
- }
- if zb0055 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0055), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "UnitName")
- return
- }
- if zb0056 {
- (*z).UnitName = nil
- } else if (*z).UnitName != nil && cap((*z).UnitName) >= zb0055 {
- (*z).UnitName = ((*z).UnitName)[:zb0055]
- } else {
- (*z).UnitName = make([]string, zb0055)
- }
- for zb0003 := range (*z).UnitName {
- (*z).UnitName[zb0003], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UnitName", zb0003)
- return
- }
- }
- case "unbm":
- {
- var zb0057 []byte
- var zb0058 int
- zb0058, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskUnitName")
- return
- }
- if zb0058 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0058), uint64(maxBitmaskSize))
- return
- }
- zb0057, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskUnitName))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskUnitName")
- return
- }
- (*z).BitmaskUnitName = bitmask(zb0057)
- }
- case "an":
- var zb0059 int
- var zb0060 bool
- zb0059, zb0060, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetName")
- return
- }
- if zb0059 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0059), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "AssetName")
- return
- }
- if zb0060 {
- (*z).AssetName = nil
- } else if (*z).AssetName != nil && cap((*z).AssetName) >= zb0059 {
- (*z).AssetName = ((*z).AssetName)[:zb0059]
- } else {
- (*z).AssetName = make([]string, zb0059)
- }
- for zb0004 := range (*z).AssetName {
- (*z).AssetName[zb0004], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetName", zb0004)
- return
- }
- }
- case "anbm":
- {
- var zb0061 []byte
- var zb0062 int
- zb0062, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetName")
- return
- }
- if zb0062 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0062), uint64(maxBitmaskSize))
- return
- }
- zb0061, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAssetName))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetName")
- return
- }
- (*z).BitmaskAssetName = bitmask(zb0061)
- }
- case "au":
- var zb0063 int
- var zb0064 bool
- zb0063, zb0064, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "URL")
- return
- }
- if zb0063 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0063), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "URL")
- return
- }
- if zb0064 {
- (*z).URL = nil
- } else if (*z).URL != nil && cap((*z).URL) >= zb0063 {
- (*z).URL = ((*z).URL)[:zb0063]
- } else {
- (*z).URL = make([]string, zb0063)
- }
- for zb0005 := range (*z).URL {
- (*z).URL[zb0005], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "URL", zb0005)
- return
- }
- }
- case "aubm":
- {
- var zb0065 []byte
- var zb0066 int
- zb0066, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskURL")
- return
- }
- if zb0066 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0066), uint64(maxBitmaskSize))
- return
- }
- zb0065, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskURL))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskURL")
- return
- }
- (*z).BitmaskURL = bitmask(zb0065)
- }
- case "am":
- var zb0067 int
- zb0067, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "MetadataHash")
- return
- }
- if zb0067 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0067), uint64(maxAddressBytes))
- return
- }
- (*z).MetadataHash, bts, err = msgp.ReadBytesBytes(bts, (*z).MetadataHash)
- if err != nil {
- err = msgp.WrapError(err, "MetadataHash")
- return
- }
- case "ambm":
- {
- var zb0068 []byte
- var zb0069 int
- zb0069, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskMetadataHash")
- return
- }
- if zb0069 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0069), uint64(maxBitmaskSize))
- return
- }
- zb0068, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskMetadataHash))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskMetadataHash")
- return
- }
- (*z).BitmaskMetadataHash = bitmask(zb0068)
- }
- case "m":
- var zb0070 int
- zb0070, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Manager")
- return
- }
- if zb0070 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0070), uint64(maxAddressBytes))
- return
- }
- (*z).Manager, bts, err = msgp.ReadBytesBytes(bts, (*z).Manager)
- if err != nil {
- err = msgp.WrapError(err, "Manager")
- return
- }
- case "mbm":
- {
- var zb0071 []byte
- var zb0072 int
- zb0072, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskManager")
- return
- }
- if zb0072 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0072), uint64(maxBitmaskSize))
- return
- }
- zb0071, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskManager))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskManager")
- return
- }
- (*z).BitmaskManager = bitmask(zb0071)
- }
- case "r":
- var zb0073 int
- zb0073, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reserve")
- return
- }
- if zb0073 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0073), uint64(maxAddressBytes))
- return
- }
- (*z).Reserve, bts, err = msgp.ReadBytesBytes(bts, (*z).Reserve)
- if err != nil {
- err = msgp.WrapError(err, "Reserve")
- return
- }
- case "rbm":
- {
- var zb0074 []byte
- var zb0075 int
- zb0075, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReserve")
- return
- }
- if zb0075 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0075), uint64(maxBitmaskSize))
- return
- }
- zb0074, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskReserve))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReserve")
- return
- }
- (*z).BitmaskReserve = bitmask(zb0074)
- }
- case "f":
- var zb0076 int
- zb0076, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Freeze")
- return
- }
- if zb0076 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0076), uint64(maxAddressBytes))
- return
- }
- (*z).Freeze, bts, err = msgp.ReadBytesBytes(bts, (*z).Freeze)
- if err != nil {
- err = msgp.WrapError(err, "Freeze")
- return
- }
- case "fbm":
- {
- var zb0077 []byte
- var zb0078 int
- zb0078, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreeze")
- return
- }
- if zb0078 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0078), uint64(maxBitmaskSize))
- return
- }
- zb0077, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskFreeze))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreeze")
- return
- }
- (*z).BitmaskFreeze = bitmask(zb0077)
- }
- case "c":
- var zb0079 int
- zb0079, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Clawback")
- return
- }
- if zb0079 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0079), uint64(maxAddressBytes))
- return
- }
- (*z).Clawback, bts, err = msgp.ReadBytesBytes(bts, (*z).Clawback)
- if err != nil {
- err = msgp.WrapError(err, "Clawback")
- return
- }
- case "cbm":
- {
- var zb0080 []byte
- var zb0081 int
- zb0081, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClawback")
- return
- }
- if zb0081 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0081), uint64(maxBitmaskSize))
- return
- }
- zb0080, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskClawback))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClawback")
- return
- }
- (*z).BitmaskClawback = bitmask(zb0080)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedAssetParams) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedAssetParams)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedAssetParams) Msgsize() (s int) {
- s = 3 + 2 + msgp.ArrayHeaderSize + (len((*z).Total) * (msgp.Uint64Size)) + 4 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskTotal)) + 3 + msgp.ArrayHeaderSize + (len((*z).Decimals) * (msgp.Uint32Size)) + 5 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskDecimals)) + 5 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskDefaultFrozen)) + 3 + msgp.ArrayHeaderSize
- for zb0003 := range (*z).UnitName {
- s += msgp.StringPrefixSize + len((*z).UnitName[zb0003])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskUnitName)) + 3 + msgp.ArrayHeaderSize
- for zb0004 := range (*z).AssetName {
- s += msgp.StringPrefixSize + len((*z).AssetName[zb0004])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskAssetName)) + 3 + msgp.ArrayHeaderSize
- for zb0005 := range (*z).URL {
- s += msgp.StringPrefixSize + len((*z).URL[zb0005])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskURL)) + 3 + msgp.BytesPrefixSize + len((*z).MetadataHash) + 5 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskMetadataHash)) + 2 + msgp.BytesPrefixSize + len((*z).Manager) + 4 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskManager)) + 2 + msgp.BytesPrefixSize + len((*z).Reserve) + 4 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskReserve)) + 2 + msgp.BytesPrefixSize + len((*z).Freeze) + 4 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskFreeze)) + 2 + msgp.BytesPrefixSize + len((*z).Clawback) + 4 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskClawback))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedAssetParams) MsgIsZero() bool {
- return (len((*z).Total) == 0) && (len((*z).BitmaskTotal) == 0) && (len((*z).Decimals) == 0) && (len((*z).BitmaskDecimals) == 0) && (len((*z).BitmaskDefaultFrozen) == 0) && (len((*z).UnitName) == 0) && (len((*z).BitmaskUnitName) == 0) && (len((*z).AssetName) == 0) && (len((*z).BitmaskAssetName) == 0) && (len((*z).URL) == 0) && (len((*z).BitmaskURL) == 0) && (len((*z).MetadataHash) == 0) && (len((*z).BitmaskMetadataHash) == 0) && (len((*z).Manager) == 0) && (len((*z).BitmaskManager) == 0) && (len((*z).Reserve) == 0) && (len((*z).BitmaskReserve) == 0) && (len((*z).Freeze) == 0) && (len((*z).BitmaskFreeze) == 0) && (len((*z).Clawback) == 0) && (len((*z).BitmaskClawback) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedAssetTransferTxnFields) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0003Len := uint32(10)
- var zb0003Mask uint16 /* 11 bits */
- if len((*z).AssetAmount) == 0 {
- zb0003Len--
- zb0003Mask |= 0x2
- }
- if len((*z).BitmaskAssetAmount) == 0 {
- zb0003Len--
- zb0003Mask |= 0x4
- }
- if len((*z).AssetCloseTo) == 0 {
- zb0003Len--
- zb0003Mask |= 0x8
- }
- if len((*z).BitmaskAssetCloseTo) == 0 {
- zb0003Len--
- zb0003Mask |= 0x10
- }
- if len((*z).AssetReceiver) == 0 {
- zb0003Len--
- zb0003Mask |= 0x20
- }
- if len((*z).BitmaskAssetReceiver) == 0 {
- zb0003Len--
- zb0003Mask |= 0x40
- }
- if len((*z).AssetSender) == 0 {
- zb0003Len--
- zb0003Mask |= 0x80
- }
- if len((*z).BitmaskAssetSender) == 0 {
- zb0003Len--
- zb0003Mask |= 0x100
- }
- if len((*z).XferAsset) == 0 {
- zb0003Len--
- zb0003Mask |= 0x200
- }
- if len((*z).BitmaskXferAsset) == 0 {
- zb0003Len--
- zb0003Mask |= 0x400
- }
- // variable map header, size zb0003Len
- o = append(o, 0x80|uint8(zb0003Len))
- if zb0003Len != 0 {
- if (zb0003Mask & 0x2) == 0 { // if not empty
- // string "aamt"
- o = append(o, 0xa4, 0x61, 0x61, 0x6d, 0x74)
- if (*z).AssetAmount == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).AssetAmount)))
- }
- for zb0002 := range (*z).AssetAmount {
- o = msgp.AppendUint64(o, (*z).AssetAmount[zb0002])
- }
- }
- if (zb0003Mask & 0x4) == 0 { // if not empty
- // string "aamtbm"
- o = append(o, 0xa6, 0x61, 0x61, 0x6d, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskAssetAmount))
- }
- if (zb0003Mask & 0x8) == 0 { // if not empty
- // string "aclose"
- o = append(o, 0xa6, 0x61, 0x63, 0x6c, 0x6f, 0x73, 0x65)
- o = msgp.AppendBytes(o, (*z).AssetCloseTo)
- }
- if (zb0003Mask & 0x10) == 0 { // if not empty
- // string "aclosebm"
- o = append(o, 0xa8, 0x61, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskAssetCloseTo))
- }
- if (zb0003Mask & 0x20) == 0 { // if not empty
- // string "arcv"
- o = append(o, 0xa4, 0x61, 0x72, 0x63, 0x76)
- o = msgp.AppendBytes(o, (*z).AssetReceiver)
- }
- if (zb0003Mask & 0x40) == 0 { // if not empty
- // string "arcvbm"
- o = append(o, 0xa6, 0x61, 0x72, 0x63, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskAssetReceiver))
- }
- if (zb0003Mask & 0x80) == 0 { // if not empty
- // string "asnd"
- o = append(o, 0xa4, 0x61, 0x73, 0x6e, 0x64)
- o = msgp.AppendBytes(o, (*z).AssetSender)
- }
- if (zb0003Mask & 0x100) == 0 { // if not empty
- // string "asndbm"
- o = append(o, 0xa6, 0x61, 0x73, 0x6e, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskAssetSender))
- }
- if (zb0003Mask & 0x200) == 0 { // if not empty
- // string "xaid"
- o = append(o, 0xa4, 0x78, 0x61, 0x69, 0x64)
- if (*z).XferAsset == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).XferAsset)))
- }
- for zb0001 := range (*z).XferAsset {
- o = (*z).XferAsset[zb0001].MarshalMsg(o)
- }
- }
- if (zb0003Mask & 0x400) == 0 { // if not empty
- // string "xaidbm"
- o = append(o, 0xa6, 0x78, 0x61, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskXferAsset))
- }
- }
- return
-}
-
-func (_ *encodedAssetTransferTxnFields) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedAssetTransferTxnFields)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedAssetTransferTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0003 > 0 {
- zb0003--
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "XferAsset")
- return
- }
- if zb0005 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "XferAsset")
- return
- }
- if zb0006 {
- (*z).XferAsset = nil
- } else if (*z).XferAsset != nil && cap((*z).XferAsset) >= zb0005 {
- (*z).XferAsset = ((*z).XferAsset)[:zb0005]
- } else {
- (*z).XferAsset = make([]basics.AssetIndex, zb0005)
- }
- for zb0001 := range (*z).XferAsset {
- bts, err = (*z).XferAsset[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "XferAsset", zb0001)
- return
- }
- }
- }
- if zb0003 > 0 {
- zb0003--
- {
- var zb0007 []byte
- var zb0008 int
- zb0008, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskXferAsset")
- return
- }
- if zb0008 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(maxBitmaskSize))
- return
- }
- zb0007, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskXferAsset))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskXferAsset")
- return
- }
- (*z).BitmaskXferAsset = bitmask(zb0007)
- }
- }
- if zb0003 > 0 {
- zb0003--
- var zb0009 int
- var zb0010 bool
- zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetAmount")
- return
- }
- if zb0009 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0009), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "AssetAmount")
- return
- }
- if zb0010 {
- (*z).AssetAmount = nil
- } else if (*z).AssetAmount != nil && cap((*z).AssetAmount) >= zb0009 {
- (*z).AssetAmount = ((*z).AssetAmount)[:zb0009]
- } else {
- (*z).AssetAmount = make([]uint64, zb0009)
- }
- for zb0002 := range (*z).AssetAmount {
- (*z).AssetAmount[zb0002], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetAmount", zb0002)
- return
- }
- }
- }
- if zb0003 > 0 {
- zb0003--
- {
- var zb0011 []byte
- var zb0012 int
- zb0012, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetAmount")
- return
- }
- if zb0012 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0012), uint64(maxBitmaskSize))
- return
- }
- zb0011, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAssetAmount))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetAmount")
- return
- }
- (*z).BitmaskAssetAmount = bitmask(zb0011)
- }
- }
- if zb0003 > 0 {
- zb0003--
- var zb0013 int
- zb0013, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetSender")
- return
- }
- if zb0013 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(maxAddressBytes))
- return
- }
- (*z).AssetSender, bts, err = msgp.ReadBytesBytes(bts, (*z).AssetSender)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetSender")
- return
- }
- }
- if zb0003 > 0 {
- zb0003--
- {
- var zb0014 []byte
- var zb0015 int
- zb0015, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetSender")
- return
- }
- if zb0015 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0015), uint64(maxBitmaskSize))
- return
- }
- zb0014, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAssetSender))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetSender")
- return
- }
- (*z).BitmaskAssetSender = bitmask(zb0014)
- }
- }
- if zb0003 > 0 {
- zb0003--
- var zb0016 int
- zb0016, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetReceiver")
- return
- }
- if zb0016 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0016), uint64(maxAddressBytes))
- return
- }
- (*z).AssetReceiver, bts, err = msgp.ReadBytesBytes(bts, (*z).AssetReceiver)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetReceiver")
- return
- }
- }
- if zb0003 > 0 {
- zb0003--
- {
- var zb0017 []byte
- var zb0018 int
- zb0018, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetReceiver")
- return
- }
- if zb0018 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0018), uint64(maxBitmaskSize))
- return
- }
- zb0017, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAssetReceiver))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetReceiver")
- return
- }
- (*z).BitmaskAssetReceiver = bitmask(zb0017)
- }
- }
- if zb0003 > 0 {
- zb0003--
- var zb0019 int
- zb0019, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetCloseTo")
- return
- }
- if zb0019 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0019), uint64(maxAddressBytes))
- return
- }
- (*z).AssetCloseTo, bts, err = msgp.ReadBytesBytes(bts, (*z).AssetCloseTo)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetCloseTo")
- return
- }
- }
- if zb0003 > 0 {
- zb0003--
- {
- var zb0020 []byte
- var zb0021 int
- zb0021, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetCloseTo")
- return
- }
- if zb0021 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0021), uint64(maxBitmaskSize))
- return
- }
- zb0020, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAssetCloseTo))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetCloseTo")
- return
- }
- (*z).BitmaskAssetCloseTo = bitmask(zb0020)
- }
- }
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0004 {
- (*z) = encodedAssetTransferTxnFields{}
- }
- for zb0003 > 0 {
- zb0003--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "xaid":
- var zb0022 int
- var zb0023 bool
- zb0022, zb0023, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "XferAsset")
- return
- }
- if zb0022 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0022), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "XferAsset")
- return
- }
- if zb0023 {
- (*z).XferAsset = nil
- } else if (*z).XferAsset != nil && cap((*z).XferAsset) >= zb0022 {
- (*z).XferAsset = ((*z).XferAsset)[:zb0022]
- } else {
- (*z).XferAsset = make([]basics.AssetIndex, zb0022)
- }
- for zb0001 := range (*z).XferAsset {
- bts, err = (*z).XferAsset[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "XferAsset", zb0001)
- return
- }
- }
- case "xaidbm":
- {
- var zb0024 []byte
- var zb0025 int
- zb0025, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskXferAsset")
- return
- }
- if zb0025 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0025), uint64(maxBitmaskSize))
- return
- }
- zb0024, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskXferAsset))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskXferAsset")
- return
- }
- (*z).BitmaskXferAsset = bitmask(zb0024)
- }
- case "aamt":
- var zb0026 int
- var zb0027 bool
- zb0026, zb0027, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetAmount")
- return
- }
- if zb0026 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0026), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "AssetAmount")
- return
- }
- if zb0027 {
- (*z).AssetAmount = nil
- } else if (*z).AssetAmount != nil && cap((*z).AssetAmount) >= zb0026 {
- (*z).AssetAmount = ((*z).AssetAmount)[:zb0026]
- } else {
- (*z).AssetAmount = make([]uint64, zb0026)
- }
- for zb0002 := range (*z).AssetAmount {
- (*z).AssetAmount[zb0002], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetAmount", zb0002)
- return
- }
- }
- case "aamtbm":
- {
- var zb0028 []byte
- var zb0029 int
- zb0029, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetAmount")
- return
- }
- if zb0029 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0029), uint64(maxBitmaskSize))
- return
- }
- zb0028, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAssetAmount))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetAmount")
- return
- }
- (*z).BitmaskAssetAmount = bitmask(zb0028)
- }
- case "asnd":
- var zb0030 int
- zb0030, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetSender")
- return
- }
- if zb0030 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0030), uint64(maxAddressBytes))
- return
- }
- (*z).AssetSender, bts, err = msgp.ReadBytesBytes(bts, (*z).AssetSender)
- if err != nil {
- err = msgp.WrapError(err, "AssetSender")
- return
- }
- case "asndbm":
- {
- var zb0031 []byte
- var zb0032 int
- zb0032, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetSender")
- return
- }
- if zb0032 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0032), uint64(maxBitmaskSize))
- return
- }
- zb0031, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAssetSender))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetSender")
- return
- }
- (*z).BitmaskAssetSender = bitmask(zb0031)
- }
- case "arcv":
- var zb0033 int
- zb0033, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetReceiver")
- return
- }
- if zb0033 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0033), uint64(maxAddressBytes))
- return
- }
- (*z).AssetReceiver, bts, err = msgp.ReadBytesBytes(bts, (*z).AssetReceiver)
- if err != nil {
- err = msgp.WrapError(err, "AssetReceiver")
- return
- }
- case "arcvbm":
- {
- var zb0034 []byte
- var zb0035 int
- zb0035, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetReceiver")
- return
- }
- if zb0035 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0035), uint64(maxBitmaskSize))
- return
- }
- zb0034, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAssetReceiver))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetReceiver")
- return
- }
- (*z).BitmaskAssetReceiver = bitmask(zb0034)
- }
- case "aclose":
- var zb0036 int
- zb0036, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetCloseTo")
- return
- }
- if zb0036 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0036), uint64(maxAddressBytes))
- return
- }
- (*z).AssetCloseTo, bts, err = msgp.ReadBytesBytes(bts, (*z).AssetCloseTo)
- if err != nil {
- err = msgp.WrapError(err, "AssetCloseTo")
- return
- }
- case "aclosebm":
- {
- var zb0037 []byte
- var zb0038 int
- zb0038, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetCloseTo")
- return
- }
- if zb0038 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0038), uint64(maxBitmaskSize))
- return
- }
- zb0037, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAssetCloseTo))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetCloseTo")
- return
- }
- (*z).BitmaskAssetCloseTo = bitmask(zb0037)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedAssetTransferTxnFields) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedAssetTransferTxnFields)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedAssetTransferTxnFields) Msgsize() (s int) {
- s = 1 + 5 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).XferAsset {
- s += (*z).XferAsset[zb0001].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskXferAsset)) + 5 + msgp.ArrayHeaderSize + (len((*z).AssetAmount) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskAssetAmount)) + 5 + msgp.BytesPrefixSize + len((*z).AssetSender) + 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskAssetSender)) + 5 + msgp.BytesPrefixSize + len((*z).AssetReceiver) + 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskAssetReceiver)) + 7 + msgp.BytesPrefixSize + len((*z).AssetCloseTo) + 9 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskAssetCloseTo))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedAssetTransferTxnFields) MsgIsZero() bool {
- return (len((*z).XferAsset) == 0) && (len((*z).BitmaskXferAsset) == 0) && (len((*z).AssetAmount) == 0) && (len((*z).BitmaskAssetAmount) == 0) && (len((*z).AssetSender) == 0) && (len((*z).BitmaskAssetSender) == 0) && (len((*z).AssetReceiver) == 0) && (len((*z).BitmaskAssetReceiver) == 0) && (len((*z).AssetCloseTo) == 0) && (len((*z).BitmaskAssetCloseTo) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedBloomFilter) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0001Len := uint32(4)
- var zb0001Mask uint8 /* 5 bits */
- if (*z).ClearPrevious == 0 {
- zb0001Len--
- zb0001Mask |= 0x2
- }
- if len((*z).BloomFilter) == 0 {
- zb0001Len--
- zb0001Mask |= 0x4
- }
- if ((*z).EncodingParams.Offset == 0) && ((*z).EncodingParams.Modulator == 0) {
- zb0001Len--
- zb0001Mask |= 0x8
- }
- if (*z).BloomFilterType == 0 {
- zb0001Len--
- zb0001Mask |= 0x10
- }
- // variable map header, size zb0001Len
- o = append(o, 0x80|uint8(zb0001Len))
- if zb0001Len != 0 {
- if (zb0001Mask & 0x2) == 0 { // if not empty
- // string "c"
- o = append(o, 0xa1, 0x63)
- o = msgp.AppendByte(o, (*z).ClearPrevious)
- }
- if (zb0001Mask & 0x4) == 0 { // if not empty
- // string "f"
- o = append(o, 0xa1, 0x66)
- o = msgp.AppendBytes(o, (*z).BloomFilter)
- }
- if (zb0001Mask & 0x8) == 0 { // if not empty
- // string "p"
- o = append(o, 0xa1, 0x70)
- // omitempty: check for empty values
- zb0002Len := uint32(2)
- var zb0002Mask uint8 /* 3 bits */
- if (*z).EncodingParams.Modulator == 0 {
- zb0002Len--
- zb0002Mask |= 0x2
- }
- if (*z).EncodingParams.Offset == 0 {
- zb0002Len--
- zb0002Mask |= 0x4
- }
- // variable map header, size zb0002Len
- o = append(o, 0x80|uint8(zb0002Len))
- if (zb0002Mask & 0x2) == 0 { // if not empty
- // string "m"
- o = append(o, 0xa1, 0x6d)
- o = msgp.AppendByte(o, (*z).EncodingParams.Modulator)
- }
- if (zb0002Mask & 0x4) == 0 { // if not empty
- // string "o"
- o = append(o, 0xa1, 0x6f)
- o = msgp.AppendByte(o, (*z).EncodingParams.Offset)
- }
- }
- if (zb0001Mask & 0x10) == 0 { // if not empty
- // string "t"
- o = append(o, 0xa1, 0x74)
- o = msgp.AppendByte(o, (*z).BloomFilterType)
- }
- }
- return
-}
-
-func (_ *encodedBloomFilter) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedBloomFilter)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedBloomFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0001 int
- var zb0002 bool
- zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0001 > 0 {
- zb0001--
- (*z).BloomFilterType, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BloomFilterType")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "EncodingParams")
- return
- }
- if zb0003 > 0 {
- zb0003--
- (*z).EncodingParams.Offset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "EncodingParams", "struct-from-array", "Offset")
- return
- }
- }
- if zb0003 > 0 {
- zb0003--
- (*z).EncodingParams.Modulator, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "EncodingParams", "struct-from-array", "Modulator")
- return
- }
- }
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "EncodingParams", "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "EncodingParams")
- return
- }
- if zb0004 {
- (*z).EncodingParams = requestParams{}
- }
- for zb0003 > 0 {
- zb0003--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "EncodingParams")
- return
- }
- switch string(field) {
- case "o":
- (*z).EncodingParams.Offset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "EncodingParams", "Offset")
- return
- }
- case "m":
- (*z).EncodingParams.Modulator, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "EncodingParams", "Modulator")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "EncodingParams")
- return
- }
- }
- }
- }
- }
- if zb0001 > 0 {
- zb0001--
- var zb0005 int
- zb0005, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BloomFilter")
- return
- }
- if zb0005 > maxBloomFilterSize {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(maxBloomFilterSize))
- return
- }
- (*z).BloomFilter, bts, err = msgp.ReadBytesBytes(bts, (*z).BloomFilter)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BloomFilter")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- (*z).ClearPrevious, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ClearPrevious")
- return
- }
- }
- if zb0001 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0001)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 {
- (*z) = encodedBloomFilter{}
- }
- for zb0001 > 0 {
- zb0001--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "t":
- (*z).BloomFilterType, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "BloomFilterType")
- return
- }
- case "p":
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "EncodingParams")
- return
- }
- if zb0006 > 0 {
- zb0006--
- (*z).EncodingParams.Offset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "EncodingParams", "struct-from-array", "Offset")
- return
- }
- }
- if zb0006 > 0 {
- zb0006--
- (*z).EncodingParams.Modulator, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "EncodingParams", "struct-from-array", "Modulator")
- return
- }
- }
- if zb0006 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0006)
- if err != nil {
- err = msgp.WrapError(err, "EncodingParams", "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err, "EncodingParams")
- return
- }
- if zb0007 {
- (*z).EncodingParams = requestParams{}
- }
- for zb0006 > 0 {
- zb0006--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err, "EncodingParams")
- return
- }
- switch string(field) {
- case "o":
- (*z).EncodingParams.Offset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "EncodingParams", "Offset")
- return
- }
- case "m":
- (*z).EncodingParams.Modulator, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "EncodingParams", "Modulator")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err, "EncodingParams")
- return
- }
- }
- }
- }
- case "f":
- var zb0008 int
- zb0008, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BloomFilter")
- return
- }
- if zb0008 > maxBloomFilterSize {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(maxBloomFilterSize))
- return
- }
- (*z).BloomFilter, bts, err = msgp.ReadBytesBytes(bts, (*z).BloomFilter)
- if err != nil {
- err = msgp.WrapError(err, "BloomFilter")
- return
- }
- case "c":
- (*z).ClearPrevious, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ClearPrevious")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedBloomFilter) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedBloomFilter)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedBloomFilter) Msgsize() (s int) {
- s = 1 + 2 + msgp.ByteSize + 2 + 1 + 2 + msgp.ByteSize + 2 + msgp.ByteSize + 2 + msgp.BytesPrefixSize + len((*z).BloomFilter) + 2 + msgp.ByteSize
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedBloomFilter) MsgIsZero() bool {
- return ((*z).BloomFilterType == 0) && (((*z).EncodingParams.Offset == 0) && ((*z).EncodingParams.Modulator == 0)) && (len((*z).BloomFilter) == 0) && ((*z).ClearPrevious == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedCert) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0009Len := uint32(10)
- var zb0009Mask uint16 /* 11 bits */
- if len((*z).PartProofs) == 0 {
- zb0009Len--
- zb0009Mask |= 0x2
- }
- if len((*z).BitmaskPartProofs) == 0 {
- zb0009Len--
- zb0009Mask |= 0x4
- }
- if len((*z).SigProofs) == 0 {
- zb0009Len--
- zb0009Mask |= 0x8
- }
- if len((*z).BitmaskSigProofs) == 0 {
- zb0009Len--
- zb0009Mask |= 0x10
- }
- if len((*z).SigCommit) == 0 {
- zb0009Len--
- zb0009Mask |= 0x20
- }
- if len((*z).BitmaskSigCommit) == 0 {
- zb0009Len--
- zb0009Mask |= 0x40
- }
- if len((*z).Reveals) == 0 {
- zb0009Len--
- zb0009Mask |= 0x80
- }
- if len((*z).BitmaskReveals) == 0 {
- zb0009Len--
- zb0009Mask |= 0x100
- }
- if len((*z).SignedWeight) == 0 {
- zb0009Len--
- zb0009Mask |= 0x200
- }
- if len((*z).BitmaskSignedWeight) == 0 {
- zb0009Len--
- zb0009Mask |= 0x400
- }
- // variable map header, size zb0009Len
- o = append(o, 0x80|uint8(zb0009Len))
- if zb0009Len != 0 {
- if (zb0009Mask & 0x2) == 0 { // if not empty
- // string "certP"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x50)
- if (*z).PartProofs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).PartProofs)))
- }
- for zb0004 := range (*z).PartProofs {
- if (*z).PartProofs[zb0004] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).PartProofs[zb0004])))
- }
- for zb0005 := range (*z).PartProofs[zb0004] {
- o = (*z).PartProofs[zb0004][zb0005].MarshalMsg(o)
- }
- }
- }
- if (zb0009Mask & 0x4) == 0 { // if not empty
- // string "certPbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x50, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskPartProofs))
- }
- if (zb0009Mask & 0x8) == 0 { // if not empty
- // string "certS"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x53)
- if (*z).SigProofs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).SigProofs)))
- }
- for zb0002 := range (*z).SigProofs {
- if (*z).SigProofs[zb0002] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).SigProofs[zb0002])))
- }
- for zb0003 := range (*z).SigProofs[zb0002] {
- o = (*z).SigProofs[zb0002][zb0003].MarshalMsg(o)
- }
- }
- }
- if (zb0009Mask & 0x10) == 0 { // if not empty
- // string "certSbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x53, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskSigProofs))
- }
- if (zb0009Mask & 0x20) == 0 { // if not empty
- // string "certc"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x63)
- o = msgp.AppendBytes(o, (*z).SigCommit)
- }
- if (zb0009Mask & 0x40) == 0 { // if not empty
- // string "certcbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskSigCommit))
- }
- if (zb0009Mask & 0x80) == 0 { // if not empty
- // string "certr"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x72)
- if (*z).Reveals == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).Reveals)))
- }
- for zb0006 := range (*z).Reveals {
- if (*z).Reveals[zb0006] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendMapHeader(o, uint32(len((*z).Reveals[zb0006])))
- }
- zb0007_keys := make([]uint64, 0, len((*z).Reveals[zb0006]))
- for zb0007 := range (*z).Reveals[zb0006] {
- zb0007_keys = append(zb0007_keys, zb0007)
- }
- sort.Sort(SortUint64(zb0007_keys))
- for _, zb0007 := range zb0007_keys {
- zb0008 := (*z).Reveals[zb0006][zb0007]
- _ = zb0008
- o = msgp.AppendUint64(o, zb0007)
- o = zb0008.MarshalMsg(o)
- }
- }
- }
- if (zb0009Mask & 0x100) == 0 { // if not empty
- // string "certrbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskReveals))
- }
- if (zb0009Mask & 0x200) == 0 { // if not empty
- // string "certw"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x77)
- if (*z).SignedWeight == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).SignedWeight)))
- }
- for zb0001 := range (*z).SignedWeight {
- o = msgp.AppendUint64(o, (*z).SignedWeight[zb0001])
- }
- }
- if (zb0009Mask & 0x400) == 0 { // if not empty
- // string "certwbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x77, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskSignedWeight))
- }
- }
- return
-}
-
-func (_ *encodedCert) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedCert)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedCert) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0009 int
- var zb0010 bool
- zb0009, zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0009 > 0 {
- zb0009--
- var zb0011 int
- zb0011, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigCommit")
- return
- }
- if zb0011 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(maxAddressBytes))
- return
- }
- (*z).SigCommit, bts, err = msgp.ReadBytesBytes(bts, (*z).SigCommit)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigCommit")
- return
- }
- }
- if zb0009 > 0 {
- zb0009--
- {
- var zb0012 []byte
- var zb0013 int
- zb0013, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigCommit")
- return
- }
- if zb0013 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(maxBitmaskSize))
- return
- }
- zb0012, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskSigCommit))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigCommit")
- return
- }
- (*z).BitmaskSigCommit = bitmask(zb0012)
- }
- }
- if zb0009 > 0 {
- zb0009--
- var zb0014 int
- var zb0015 bool
- zb0014, zb0015, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight")
- return
- }
- if zb0014 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0014), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight")
- return
- }
- if zb0015 {
- (*z).SignedWeight = nil
- } else if (*z).SignedWeight != nil && cap((*z).SignedWeight) >= zb0014 {
- (*z).SignedWeight = ((*z).SignedWeight)[:zb0014]
- } else {
- (*z).SignedWeight = make([]uint64, zb0014)
- }
- for zb0001 := range (*z).SignedWeight {
- (*z).SignedWeight[zb0001], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight", zb0001)
- return
- }
- }
- }
- if zb0009 > 0 {
- zb0009--
- {
- var zb0016 []byte
- var zb0017 int
- zb0017, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSignedWeight")
- return
- }
- if zb0017 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(maxBitmaskSize))
- return
- }
- zb0016, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskSignedWeight))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSignedWeight")
- return
- }
- (*z).BitmaskSignedWeight = bitmask(zb0016)
- }
- }
- if zb0009 > 0 {
- zb0009--
- var zb0018 int
- var zb0019 bool
- zb0018, zb0019, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs")
- return
- }
- if zb0018 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0018), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "SigProofs")
- return
- }
- if zb0019 {
- (*z).SigProofs = nil
- } else if (*z).SigProofs != nil && cap((*z).SigProofs) >= zb0018 {
- (*z).SigProofs = ((*z).SigProofs)[:zb0018]
- } else {
- (*z).SigProofs = make([]certProofs, zb0018)
- }
- for zb0002 := range (*z).SigProofs {
- var zb0020 int
- var zb0021 bool
- zb0020, zb0021, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0002)
- return
- }
- if zb0020 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0020), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0002)
- return
- }
- if zb0021 {
- (*z).SigProofs[zb0002] = nil
- } else if (*z).SigProofs[zb0002] != nil && cap((*z).SigProofs[zb0002]) >= zb0020 {
- (*z).SigProofs[zb0002] = ((*z).SigProofs[zb0002])[:zb0020]
- } else {
- (*z).SigProofs[zb0002] = make(certProofs, zb0020)
- }
- for zb0003 := range (*z).SigProofs[zb0002] {
- bts, err = (*z).SigProofs[zb0002][zb0003].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0002, zb0003)
- return
- }
- }
- }
- }
- if zb0009 > 0 {
- zb0009--
- {
- var zb0022 []byte
- var zb0023 int
- zb0023, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigProofs")
- return
- }
- if zb0023 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0023), uint64(maxBitmaskSize))
- return
- }
- zb0022, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskSigProofs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigProofs")
- return
- }
- (*z).BitmaskSigProofs = bitmask(zb0022)
- }
- }
- if zb0009 > 0 {
- zb0009--
- var zb0024 int
- var zb0025 bool
- zb0024, zb0025, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs")
- return
- }
- if zb0024 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0024), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "PartProofs")
- return
- }
- if zb0025 {
- (*z).PartProofs = nil
- } else if (*z).PartProofs != nil && cap((*z).PartProofs) >= zb0024 {
- (*z).PartProofs = ((*z).PartProofs)[:zb0024]
- } else {
- (*z).PartProofs = make([]certProofs, zb0024)
- }
- for zb0004 := range (*z).PartProofs {
- var zb0026 int
- var zb0027 bool
- zb0026, zb0027, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0004)
- return
- }
- if zb0026 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0026), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0004)
- return
- }
- if zb0027 {
- (*z).PartProofs[zb0004] = nil
- } else if (*z).PartProofs[zb0004] != nil && cap((*z).PartProofs[zb0004]) >= zb0026 {
- (*z).PartProofs[zb0004] = ((*z).PartProofs[zb0004])[:zb0026]
- } else {
- (*z).PartProofs[zb0004] = make(certProofs, zb0026)
- }
- for zb0005 := range (*z).PartProofs[zb0004] {
- bts, err = (*z).PartProofs[zb0004][zb0005].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0004, zb0005)
- return
- }
- }
- }
- }
- if zb0009 > 0 {
- zb0009--
- {
- var zb0028 []byte
- var zb0029 int
- zb0029, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskPartProofs")
- return
- }
- if zb0029 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0029), uint64(maxBitmaskSize))
- return
- }
- zb0028, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskPartProofs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskPartProofs")
- return
- }
- (*z).BitmaskPartProofs = bitmask(zb0028)
- }
- }
- if zb0009 > 0 {
- zb0009--
- var zb0030 int
- var zb0031 bool
- zb0030, zb0031, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals")
- return
- }
- if zb0030 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0030), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Reveals")
- return
- }
- if zb0031 {
- (*z).Reveals = nil
- } else if (*z).Reveals != nil && cap((*z).Reveals) >= zb0030 {
- (*z).Reveals = ((*z).Reveals)[:zb0030]
- } else {
- (*z).Reveals = make([]revealMap, zb0030)
- }
- for zb0006 := range (*z).Reveals {
- var zb0032 int
- var zb0033 bool
- zb0032, zb0033, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0006)
- return
- }
- if zb0032 > compactcert.MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0032), uint64(compactcert.MaxReveals))
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0006)
- return
- }
- if zb0033 {
- (*z).Reveals[zb0006] = nil
- } else if (*z).Reveals[zb0006] == nil {
- (*z).Reveals[zb0006] = make(revealMap, zb0032)
- }
- for zb0032 > 0 {
- var zb0007 uint64
- var zb0008 compactcert.Reveal
- zb0032--
- zb0007, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0006)
- return
- }
- bts, err = zb0008.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0006, zb0007)
- return
- }
- (*z).Reveals[zb0006][zb0007] = zb0008
- }
- }
- }
- if zb0009 > 0 {
- zb0009--
- {
- var zb0034 []byte
- var zb0035 int
- zb0035, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReveals")
- return
- }
- if zb0035 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0035), uint64(maxBitmaskSize))
- return
- }
- zb0034, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskReveals))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReveals")
- return
- }
- (*z).BitmaskReveals = bitmask(zb0034)
- }
- }
- if zb0009 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0009)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0010 {
- (*z) = encodedCert{}
- }
- for zb0009 > 0 {
- zb0009--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "certc":
- var zb0036 int
- zb0036, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigCommit")
- return
- }
- if zb0036 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0036), uint64(maxAddressBytes))
- return
- }
- (*z).SigCommit, bts, err = msgp.ReadBytesBytes(bts, (*z).SigCommit)
- if err != nil {
- err = msgp.WrapError(err, "SigCommit")
- return
- }
- case "certcbm":
- {
- var zb0037 []byte
- var zb0038 int
- zb0038, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigCommit")
- return
- }
- if zb0038 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0038), uint64(maxBitmaskSize))
- return
- }
- zb0037, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskSigCommit))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigCommit")
- return
- }
- (*z).BitmaskSigCommit = bitmask(zb0037)
- }
- case "certw":
- var zb0039 int
- var zb0040 bool
- zb0039, zb0040, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SignedWeight")
- return
- }
- if zb0039 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0039), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "SignedWeight")
- return
- }
- if zb0040 {
- (*z).SignedWeight = nil
- } else if (*z).SignedWeight != nil && cap((*z).SignedWeight) >= zb0039 {
- (*z).SignedWeight = ((*z).SignedWeight)[:zb0039]
- } else {
- (*z).SignedWeight = make([]uint64, zb0039)
- }
- for zb0001 := range (*z).SignedWeight {
- (*z).SignedWeight[zb0001], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SignedWeight", zb0001)
- return
- }
- }
- case "certwbm":
- {
- var zb0041 []byte
- var zb0042 int
- zb0042, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSignedWeight")
- return
- }
- if zb0042 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0042), uint64(maxBitmaskSize))
- return
- }
- zb0041, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskSignedWeight))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSignedWeight")
- return
- }
- (*z).BitmaskSignedWeight = bitmask(zb0041)
- }
- case "certS":
- var zb0043 int
- var zb0044 bool
- zb0043, zb0044, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs")
- return
- }
- if zb0043 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0043), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "SigProofs")
- return
- }
- if zb0044 {
- (*z).SigProofs = nil
- } else if (*z).SigProofs != nil && cap((*z).SigProofs) >= zb0043 {
- (*z).SigProofs = ((*z).SigProofs)[:zb0043]
- } else {
- (*z).SigProofs = make([]certProofs, zb0043)
- }
- for zb0002 := range (*z).SigProofs {
- var zb0045 int
- var zb0046 bool
- zb0045, zb0046, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs", zb0002)
- return
- }
- if zb0045 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0045), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "SigProofs", zb0002)
- return
- }
- if zb0046 {
- (*z).SigProofs[zb0002] = nil
- } else if (*z).SigProofs[zb0002] != nil && cap((*z).SigProofs[zb0002]) >= zb0045 {
- (*z).SigProofs[zb0002] = ((*z).SigProofs[zb0002])[:zb0045]
- } else {
- (*z).SigProofs[zb0002] = make(certProofs, zb0045)
- }
- for zb0003 := range (*z).SigProofs[zb0002] {
- bts, err = (*z).SigProofs[zb0002][zb0003].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs", zb0002, zb0003)
- return
- }
- }
- }
- case "certSbm":
- {
- var zb0047 []byte
- var zb0048 int
- zb0048, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigProofs")
- return
- }
- if zb0048 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0048), uint64(maxBitmaskSize))
- return
- }
- zb0047, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskSigProofs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigProofs")
- return
- }
- (*z).BitmaskSigProofs = bitmask(zb0047)
- }
- case "certP":
- var zb0049 int
- var zb0050 bool
- zb0049, zb0050, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs")
- return
- }
- if zb0049 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0049), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "PartProofs")
- return
- }
- if zb0050 {
- (*z).PartProofs = nil
- } else if (*z).PartProofs != nil && cap((*z).PartProofs) >= zb0049 {
- (*z).PartProofs = ((*z).PartProofs)[:zb0049]
- } else {
- (*z).PartProofs = make([]certProofs, zb0049)
- }
- for zb0004 := range (*z).PartProofs {
- var zb0051 int
- var zb0052 bool
- zb0051, zb0052, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs", zb0004)
- return
- }
- if zb0051 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0051), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "PartProofs", zb0004)
- return
- }
- if zb0052 {
- (*z).PartProofs[zb0004] = nil
- } else if (*z).PartProofs[zb0004] != nil && cap((*z).PartProofs[zb0004]) >= zb0051 {
- (*z).PartProofs[zb0004] = ((*z).PartProofs[zb0004])[:zb0051]
- } else {
- (*z).PartProofs[zb0004] = make(certProofs, zb0051)
- }
- for zb0005 := range (*z).PartProofs[zb0004] {
- bts, err = (*z).PartProofs[zb0004][zb0005].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs", zb0004, zb0005)
- return
- }
- }
- }
- case "certPbm":
- {
- var zb0053 []byte
- var zb0054 int
- zb0054, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskPartProofs")
- return
- }
- if zb0054 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0054), uint64(maxBitmaskSize))
- return
- }
- zb0053, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskPartProofs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskPartProofs")
- return
- }
- (*z).BitmaskPartProofs = bitmask(zb0053)
- }
- case "certr":
- var zb0055 int
- var zb0056 bool
- zb0055, zb0056, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals")
- return
- }
- if zb0055 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0055), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Reveals")
- return
- }
- if zb0056 {
- (*z).Reveals = nil
- } else if (*z).Reveals != nil && cap((*z).Reveals) >= zb0055 {
- (*z).Reveals = ((*z).Reveals)[:zb0055]
- } else {
- (*z).Reveals = make([]revealMap, zb0055)
- }
- for zb0006 := range (*z).Reveals {
- var zb0057 int
- var zb0058 bool
- zb0057, zb0058, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0006)
- return
- }
- if zb0057 > compactcert.MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0057), uint64(compactcert.MaxReveals))
- err = msgp.WrapError(err, "Reveals", zb0006)
- return
- }
- if zb0058 {
- (*z).Reveals[zb0006] = nil
- } else if (*z).Reveals[zb0006] == nil {
- (*z).Reveals[zb0006] = make(revealMap, zb0057)
- }
- for zb0057 > 0 {
- var zb0007 uint64
- var zb0008 compactcert.Reveal
- zb0057--
- zb0007, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0006)
- return
- }
- bts, err = zb0008.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0006, zb0007)
- return
- }
- (*z).Reveals[zb0006][zb0007] = zb0008
- }
- }
- case "certrbm":
- {
- var zb0059 []byte
- var zb0060 int
- zb0060, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReveals")
- return
- }
- if zb0060 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0060), uint64(maxBitmaskSize))
- return
- }
- zb0059, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskReveals))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReveals")
- return
- }
- (*z).BitmaskReveals = bitmask(zb0059)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedCert) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedCert)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedCert) Msgsize() (s int) {
- s = 1 + 6 + msgp.BytesPrefixSize + len((*z).SigCommit) + 8 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskSigCommit)) + 6 + msgp.ArrayHeaderSize + (len((*z).SignedWeight) * (msgp.Uint64Size)) + 8 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskSignedWeight)) + 6 + msgp.ArrayHeaderSize
- for zb0002 := range (*z).SigProofs {
- s += msgp.ArrayHeaderSize
- for zb0003 := range (*z).SigProofs[zb0002] {
- s += (*z).SigProofs[zb0002][zb0003].Msgsize()
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskSigProofs)) + 6 + msgp.ArrayHeaderSize
- for zb0004 := range (*z).PartProofs {
- s += msgp.ArrayHeaderSize
- for zb0005 := range (*z).PartProofs[zb0004] {
- s += (*z).PartProofs[zb0004][zb0005].Msgsize()
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskPartProofs)) + 6 + msgp.ArrayHeaderSize
- for zb0006 := range (*z).Reveals {
- s += msgp.MapHeaderSize
- if (*z).Reveals[zb0006] != nil {
- for zb0007, zb0008 := range (*z).Reveals[zb0006] {
- _ = zb0007
- _ = zb0008
- s += 0 + msgp.Uint64Size + zb0008.Msgsize()
- }
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskReveals))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedCert) MsgIsZero() bool {
- return (len((*z).SigCommit) == 0) && (len((*z).BitmaskSigCommit) == 0) && (len((*z).SignedWeight) == 0) && (len((*z).BitmaskSignedWeight) == 0) && (len((*z).SigProofs) == 0) && (len((*z).BitmaskSigProofs) == 0) && (len((*z).PartProofs) == 0) && (len((*z).BitmaskPartProofs) == 0) && (len((*z).Reveals) == 0) && (len((*z).BitmaskReveals) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedCompactCertTxnFields) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0011Len := uint32(14)
- var zb0011Mask uint16 /* 16 bits */
- if len((*z).encodedCert.PartProofs) == 0 {
- zb0011Len--
- zb0011Mask |= 0x4
- }
- if len((*z).encodedCert.BitmaskPartProofs) == 0 {
- zb0011Len--
- zb0011Mask |= 0x8
- }
- if len((*z).encodedCert.SigProofs) == 0 {
- zb0011Len--
- zb0011Mask |= 0x10
- }
- if len((*z).encodedCert.BitmaskSigProofs) == 0 {
- zb0011Len--
- zb0011Mask |= 0x20
- }
- if len((*z).encodedCert.SigCommit) == 0 {
- zb0011Len--
- zb0011Mask |= 0x40
- }
- if len((*z).encodedCert.BitmaskSigCommit) == 0 {
- zb0011Len--
- zb0011Mask |= 0x80
- }
- if len((*z).encodedCert.Reveals) == 0 {
- zb0011Len--
- zb0011Mask |= 0x100
- }
- if len((*z).encodedCert.BitmaskReveals) == 0 {
- zb0011Len--
- zb0011Mask |= 0x200
- }
- if len((*z).CertRound) == 0 {
- zb0011Len--
- zb0011Mask |= 0x400
- }
- if len((*z).BitmaskCertRound) == 0 {
- zb0011Len--
- zb0011Mask |= 0x800
- }
- if len((*z).CertType) == 0 {
- zb0011Len--
- zb0011Mask |= 0x1000
- }
- if len((*z).BitmaskCertType) == 0 {
- zb0011Len--
- zb0011Mask |= 0x2000
- }
- if len((*z).encodedCert.SignedWeight) == 0 {
- zb0011Len--
- zb0011Mask |= 0x4000
- }
- if len((*z).encodedCert.BitmaskSignedWeight) == 0 {
- zb0011Len--
- zb0011Mask |= 0x8000
- }
- // variable map header, size zb0011Len
- o = append(o, 0x80|uint8(zb0011Len))
- if zb0011Len != 0 {
- if (zb0011Mask & 0x4) == 0 { // if not empty
- // string "certP"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x50)
- if (*z).encodedCert.PartProofs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedCert.PartProofs)))
- }
- for zb0006 := range (*z).encodedCert.PartProofs {
- if (*z).encodedCert.PartProofs[zb0006] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedCert.PartProofs[zb0006])))
- }
- for zb0007 := range (*z).encodedCert.PartProofs[zb0006] {
- o = (*z).encodedCert.PartProofs[zb0006][zb0007].MarshalMsg(o)
- }
- }
- }
- if (zb0011Mask & 0x8) == 0 { // if not empty
- // string "certPbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x50, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedCert.BitmaskPartProofs))
- }
- if (zb0011Mask & 0x10) == 0 { // if not empty
- // string "certS"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x53)
- if (*z).encodedCert.SigProofs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedCert.SigProofs)))
- }
- for zb0004 := range (*z).encodedCert.SigProofs {
- if (*z).encodedCert.SigProofs[zb0004] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedCert.SigProofs[zb0004])))
- }
- for zb0005 := range (*z).encodedCert.SigProofs[zb0004] {
- o = (*z).encodedCert.SigProofs[zb0004][zb0005].MarshalMsg(o)
- }
- }
- }
- if (zb0011Mask & 0x20) == 0 { // if not empty
- // string "certSbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x53, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedCert.BitmaskSigProofs))
- }
- if (zb0011Mask & 0x40) == 0 { // if not empty
- // string "certc"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x63)
- o = msgp.AppendBytes(o, (*z).encodedCert.SigCommit)
- }
- if (zb0011Mask & 0x80) == 0 { // if not empty
- // string "certcbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedCert.BitmaskSigCommit))
- }
- if (zb0011Mask & 0x100) == 0 { // if not empty
- // string "certr"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x72)
- if (*z).encodedCert.Reveals == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedCert.Reveals)))
- }
- for zb0008 := range (*z).encodedCert.Reveals {
- if (*z).encodedCert.Reveals[zb0008] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendMapHeader(o, uint32(len((*z).encodedCert.Reveals[zb0008])))
- }
- zb0009_keys := make([]uint64, 0, len((*z).encodedCert.Reveals[zb0008]))
- for zb0009 := range (*z).encodedCert.Reveals[zb0008] {
- zb0009_keys = append(zb0009_keys, zb0009)
- }
- sort.Sort(SortUint64(zb0009_keys))
- for _, zb0009 := range zb0009_keys {
- zb0010 := (*z).encodedCert.Reveals[zb0008][zb0009]
- _ = zb0010
- o = msgp.AppendUint64(o, zb0009)
- o = zb0010.MarshalMsg(o)
- }
- }
- }
- if (zb0011Mask & 0x200) == 0 { // if not empty
- // string "certrbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedCert.BitmaskReveals))
- }
- if (zb0011Mask & 0x400) == 0 { // if not empty
- // string "certrnd"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x72, 0x6e, 0x64)
- if (*z).CertRound == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).CertRound)))
- }
- for zb0001 := range (*z).CertRound {
- o = (*z).CertRound[zb0001].MarshalMsg(o)
- }
- }
- if (zb0011Mask & 0x800) == 0 { // if not empty
- // string "certrndbm"
- o = append(o, 0xa9, 0x63, 0x65, 0x72, 0x74, 0x72, 0x6e, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskCertRound))
- }
- if (zb0011Mask & 0x1000) == 0 { // if not empty
- // string "certtype"
- o = append(o, 0xa8, 0x63, 0x65, 0x72, 0x74, 0x74, 0x79, 0x70, 0x65)
- if (*z).CertType == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).CertType)))
- }
- for zb0002 := range (*z).CertType {
- o = (*z).CertType[zb0002].MarshalMsg(o)
- }
- }
- if (zb0011Mask & 0x2000) == 0 { // if not empty
- // string "certtypebm"
- o = append(o, 0xaa, 0x63, 0x65, 0x72, 0x74, 0x74, 0x79, 0x70, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskCertType))
- }
- if (zb0011Mask & 0x4000) == 0 { // if not empty
- // string "certw"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x77)
- if (*z).encodedCert.SignedWeight == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedCert.SignedWeight)))
- }
- for zb0003 := range (*z).encodedCert.SignedWeight {
- o = msgp.AppendUint64(o, (*z).encodedCert.SignedWeight[zb0003])
- }
- }
- if (zb0011Mask & 0x8000) == 0 { // if not empty
- // string "certwbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x77, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedCert.BitmaskSignedWeight))
- }
- }
- return
-}
-
-func (_ *encodedCompactCertTxnFields) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedCompactCertTxnFields)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedCompactCertTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0011 int
- var zb0012 bool
- zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0011 > 0 {
- zb0011--
- var zb0013 int
- var zb0014 bool
- zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertRound")
- return
- }
- if zb0013 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "CertRound")
- return
- }
- if zb0014 {
- (*z).CertRound = nil
- } else if (*z).CertRound != nil && cap((*z).CertRound) >= zb0013 {
- (*z).CertRound = ((*z).CertRound)[:zb0013]
- } else {
- (*z).CertRound = make([]basics.Round, zb0013)
- }
- for zb0001 := range (*z).CertRound {
- bts, err = (*z).CertRound[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertRound", zb0001)
- return
- }
- }
- }
- if zb0011 > 0 {
- zb0011--
- {
- var zb0015 []byte
- var zb0016 int
- zb0016, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertRound")
- return
- }
- if zb0016 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0016), uint64(maxBitmaskSize))
- return
- }
- zb0015, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskCertRound))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertRound")
- return
- }
- (*z).BitmaskCertRound = bitmask(zb0015)
- }
- }
- if zb0011 > 0 {
- zb0011--
- var zb0017 int
- var zb0018 bool
- zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertType")
- return
- }
- if zb0017 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "CertType")
- return
- }
- if zb0018 {
- (*z).CertType = nil
- } else if (*z).CertType != nil && cap((*z).CertType) >= zb0017 {
- (*z).CertType = ((*z).CertType)[:zb0017]
- } else {
- (*z).CertType = make([]protocol.CompactCertType, zb0017)
- }
- for zb0002 := range (*z).CertType {
- bts, err = (*z).CertType[zb0002].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertType", zb0002)
- return
- }
- }
- }
- if zb0011 > 0 {
- zb0011--
- {
- var zb0019 []byte
- var zb0020 int
- zb0020, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertType")
- return
- }
- if zb0020 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0020), uint64(maxBitmaskSize))
- return
- }
- zb0019, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskCertType))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertType")
- return
- }
- (*z).BitmaskCertType = bitmask(zb0019)
- }
- }
- if zb0011 > 0 {
- zb0011--
- var zb0021 int
- zb0021, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigCommit")
- return
- }
- if zb0021 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0021), uint64(maxAddressBytes))
- return
- }
- (*z).encodedCert.SigCommit, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedCert.SigCommit)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigCommit")
- return
- }
- }
- if zb0011 > 0 {
- zb0011--
- {
- var zb0022 []byte
- var zb0023 int
- zb0023, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigCommit")
- return
- }
- if zb0023 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0023), uint64(maxBitmaskSize))
- return
- }
- zb0022, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCert.BitmaskSigCommit))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigCommit")
- return
- }
- (*z).encodedCert.BitmaskSigCommit = bitmask(zb0022)
- }
- }
- if zb0011 > 0 {
- zb0011--
- var zb0024 int
- var zb0025 bool
- zb0024, zb0025, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight")
- return
- }
- if zb0024 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0024), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight")
- return
- }
- if zb0025 {
- (*z).encodedCert.SignedWeight = nil
- } else if (*z).encodedCert.SignedWeight != nil && cap((*z).encodedCert.SignedWeight) >= zb0024 {
- (*z).encodedCert.SignedWeight = ((*z).encodedCert.SignedWeight)[:zb0024]
- } else {
- (*z).encodedCert.SignedWeight = make([]uint64, zb0024)
- }
- for zb0003 := range (*z).encodedCert.SignedWeight {
- (*z).encodedCert.SignedWeight[zb0003], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight", zb0003)
- return
- }
- }
- }
- if zb0011 > 0 {
- zb0011--
- {
- var zb0026 []byte
- var zb0027 int
- zb0027, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSignedWeight")
- return
- }
- if zb0027 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0027), uint64(maxBitmaskSize))
- return
- }
- zb0026, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCert.BitmaskSignedWeight))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSignedWeight")
- return
- }
- (*z).encodedCert.BitmaskSignedWeight = bitmask(zb0026)
- }
- }
- if zb0011 > 0 {
- zb0011--
- var zb0028 int
- var zb0029 bool
- zb0028, zb0029, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs")
- return
- }
- if zb0028 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0028), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "SigProofs")
- return
- }
- if zb0029 {
- (*z).encodedCert.SigProofs = nil
- } else if (*z).encodedCert.SigProofs != nil && cap((*z).encodedCert.SigProofs) >= zb0028 {
- (*z).encodedCert.SigProofs = ((*z).encodedCert.SigProofs)[:zb0028]
- } else {
- (*z).encodedCert.SigProofs = make([]certProofs, zb0028)
- }
- for zb0004 := range (*z).encodedCert.SigProofs {
- var zb0030 int
- var zb0031 bool
- zb0030, zb0031, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0004)
- return
- }
- if zb0030 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0030), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0004)
- return
- }
- if zb0031 {
- (*z).encodedCert.SigProofs[zb0004] = nil
- } else if (*z).encodedCert.SigProofs[zb0004] != nil && cap((*z).encodedCert.SigProofs[zb0004]) >= zb0030 {
- (*z).encodedCert.SigProofs[zb0004] = ((*z).encodedCert.SigProofs[zb0004])[:zb0030]
- } else {
- (*z).encodedCert.SigProofs[zb0004] = make(certProofs, zb0030)
- }
- for zb0005 := range (*z).encodedCert.SigProofs[zb0004] {
- bts, err = (*z).encodedCert.SigProofs[zb0004][zb0005].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0004, zb0005)
- return
- }
- }
- }
- }
- if zb0011 > 0 {
- zb0011--
- {
- var zb0032 []byte
- var zb0033 int
- zb0033, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigProofs")
- return
- }
- if zb0033 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0033), uint64(maxBitmaskSize))
- return
- }
- zb0032, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCert.BitmaskSigProofs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigProofs")
- return
- }
- (*z).encodedCert.BitmaskSigProofs = bitmask(zb0032)
- }
- }
- if zb0011 > 0 {
- zb0011--
- var zb0034 int
- var zb0035 bool
- zb0034, zb0035, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs")
- return
- }
- if zb0034 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0034), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "PartProofs")
- return
- }
- if zb0035 {
- (*z).encodedCert.PartProofs = nil
- } else if (*z).encodedCert.PartProofs != nil && cap((*z).encodedCert.PartProofs) >= zb0034 {
- (*z).encodedCert.PartProofs = ((*z).encodedCert.PartProofs)[:zb0034]
- } else {
- (*z).encodedCert.PartProofs = make([]certProofs, zb0034)
- }
- for zb0006 := range (*z).encodedCert.PartProofs {
- var zb0036 int
- var zb0037 bool
- zb0036, zb0037, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0006)
- return
- }
- if zb0036 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0036), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0006)
- return
- }
- if zb0037 {
- (*z).encodedCert.PartProofs[zb0006] = nil
- } else if (*z).encodedCert.PartProofs[zb0006] != nil && cap((*z).encodedCert.PartProofs[zb0006]) >= zb0036 {
- (*z).encodedCert.PartProofs[zb0006] = ((*z).encodedCert.PartProofs[zb0006])[:zb0036]
- } else {
- (*z).encodedCert.PartProofs[zb0006] = make(certProofs, zb0036)
- }
- for zb0007 := range (*z).encodedCert.PartProofs[zb0006] {
- bts, err = (*z).encodedCert.PartProofs[zb0006][zb0007].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0006, zb0007)
- return
- }
- }
- }
- }
- if zb0011 > 0 {
- zb0011--
- {
- var zb0038 []byte
- var zb0039 int
- zb0039, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskPartProofs")
- return
- }
- if zb0039 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0039), uint64(maxBitmaskSize))
- return
- }
- zb0038, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCert.BitmaskPartProofs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskPartProofs")
- return
- }
- (*z).encodedCert.BitmaskPartProofs = bitmask(zb0038)
- }
- }
- if zb0011 > 0 {
- zb0011--
- var zb0040 int
- var zb0041 bool
- zb0040, zb0041, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals")
- return
- }
- if zb0040 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0040), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Reveals")
- return
- }
- if zb0041 {
- (*z).encodedCert.Reveals = nil
- } else if (*z).encodedCert.Reveals != nil && cap((*z).encodedCert.Reveals) >= zb0040 {
- (*z).encodedCert.Reveals = ((*z).encodedCert.Reveals)[:zb0040]
- } else {
- (*z).encodedCert.Reveals = make([]revealMap, zb0040)
- }
- for zb0008 := range (*z).encodedCert.Reveals {
- var zb0042 int
- var zb0043 bool
- zb0042, zb0043, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0008)
- return
- }
- if zb0042 > compactcert.MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0042), uint64(compactcert.MaxReveals))
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0008)
- return
- }
- if zb0043 {
- (*z).encodedCert.Reveals[zb0008] = nil
- } else if (*z).encodedCert.Reveals[zb0008] == nil {
- (*z).encodedCert.Reveals[zb0008] = make(revealMap, zb0042)
- }
- for zb0042 > 0 {
- var zb0009 uint64
- var zb0010 compactcert.Reveal
- zb0042--
- zb0009, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0008)
- return
- }
- bts, err = zb0010.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0008, zb0009)
- return
- }
- (*z).encodedCert.Reveals[zb0008][zb0009] = zb0010
- }
- }
- }
- if zb0011 > 0 {
- zb0011--
- {
- var zb0044 []byte
- var zb0045 int
- zb0045, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReveals")
- return
- }
- if zb0045 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0045), uint64(maxBitmaskSize))
- return
- }
- zb0044, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCert.BitmaskReveals))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReveals")
- return
- }
- (*z).encodedCert.BitmaskReveals = bitmask(zb0044)
- }
- }
- if zb0011 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0011)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0012 {
- (*z) = encodedCompactCertTxnFields{}
- }
- for zb0011 > 0 {
- zb0011--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "certrnd":
- var zb0046 int
- var zb0047 bool
- zb0046, zb0047, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertRound")
- return
- }
- if zb0046 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0046), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "CertRound")
- return
- }
- if zb0047 {
- (*z).CertRound = nil
- } else if (*z).CertRound != nil && cap((*z).CertRound) >= zb0046 {
- (*z).CertRound = ((*z).CertRound)[:zb0046]
- } else {
- (*z).CertRound = make([]basics.Round, zb0046)
- }
- for zb0001 := range (*z).CertRound {
- bts, err = (*z).CertRound[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertRound", zb0001)
- return
- }
- }
- case "certrndbm":
- {
- var zb0048 []byte
- var zb0049 int
- zb0049, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertRound")
- return
- }
- if zb0049 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0049), uint64(maxBitmaskSize))
- return
- }
- zb0048, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskCertRound))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertRound")
- return
- }
- (*z).BitmaskCertRound = bitmask(zb0048)
- }
- case "certtype":
- var zb0050 int
- var zb0051 bool
- zb0050, zb0051, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertType")
- return
- }
- if zb0050 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0050), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "CertType")
- return
- }
- if zb0051 {
- (*z).CertType = nil
- } else if (*z).CertType != nil && cap((*z).CertType) >= zb0050 {
- (*z).CertType = ((*z).CertType)[:zb0050]
- } else {
- (*z).CertType = make([]protocol.CompactCertType, zb0050)
- }
- for zb0002 := range (*z).CertType {
- bts, err = (*z).CertType[zb0002].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertType", zb0002)
- return
- }
- }
- case "certtypebm":
- {
- var zb0052 []byte
- var zb0053 int
- zb0053, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertType")
- return
- }
- if zb0053 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0053), uint64(maxBitmaskSize))
- return
- }
- zb0052, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskCertType))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertType")
- return
- }
- (*z).BitmaskCertType = bitmask(zb0052)
- }
- case "certc":
- var zb0054 int
- zb0054, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigCommit")
- return
- }
- if zb0054 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0054), uint64(maxAddressBytes))
- return
- }
- (*z).encodedCert.SigCommit, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedCert.SigCommit)
- if err != nil {
- err = msgp.WrapError(err, "SigCommit")
- return
- }
- case "certcbm":
- {
- var zb0055 []byte
- var zb0056 int
- zb0056, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigCommit")
- return
- }
- if zb0056 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0056), uint64(maxBitmaskSize))
- return
- }
- zb0055, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCert.BitmaskSigCommit))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigCommit")
- return
- }
- (*z).encodedCert.BitmaskSigCommit = bitmask(zb0055)
- }
- case "certw":
- var zb0057 int
- var zb0058 bool
- zb0057, zb0058, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SignedWeight")
- return
- }
- if zb0057 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0057), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "SignedWeight")
- return
- }
- if zb0058 {
- (*z).encodedCert.SignedWeight = nil
- } else if (*z).encodedCert.SignedWeight != nil && cap((*z).encodedCert.SignedWeight) >= zb0057 {
- (*z).encodedCert.SignedWeight = ((*z).encodedCert.SignedWeight)[:zb0057]
- } else {
- (*z).encodedCert.SignedWeight = make([]uint64, zb0057)
- }
- for zb0003 := range (*z).encodedCert.SignedWeight {
- (*z).encodedCert.SignedWeight[zb0003], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SignedWeight", zb0003)
- return
- }
- }
- case "certwbm":
- {
- var zb0059 []byte
- var zb0060 int
- zb0060, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSignedWeight")
- return
- }
- if zb0060 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0060), uint64(maxBitmaskSize))
- return
- }
- zb0059, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCert.BitmaskSignedWeight))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSignedWeight")
- return
- }
- (*z).encodedCert.BitmaskSignedWeight = bitmask(zb0059)
- }
- case "certS":
- var zb0061 int
- var zb0062 bool
- zb0061, zb0062, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs")
- return
- }
- if zb0061 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0061), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "SigProofs")
- return
- }
- if zb0062 {
- (*z).encodedCert.SigProofs = nil
- } else if (*z).encodedCert.SigProofs != nil && cap((*z).encodedCert.SigProofs) >= zb0061 {
- (*z).encodedCert.SigProofs = ((*z).encodedCert.SigProofs)[:zb0061]
- } else {
- (*z).encodedCert.SigProofs = make([]certProofs, zb0061)
- }
- for zb0004 := range (*z).encodedCert.SigProofs {
- var zb0063 int
- var zb0064 bool
- zb0063, zb0064, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs", zb0004)
- return
- }
- if zb0063 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0063), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "SigProofs", zb0004)
- return
- }
- if zb0064 {
- (*z).encodedCert.SigProofs[zb0004] = nil
- } else if (*z).encodedCert.SigProofs[zb0004] != nil && cap((*z).encodedCert.SigProofs[zb0004]) >= zb0063 {
- (*z).encodedCert.SigProofs[zb0004] = ((*z).encodedCert.SigProofs[zb0004])[:zb0063]
- } else {
- (*z).encodedCert.SigProofs[zb0004] = make(certProofs, zb0063)
- }
- for zb0005 := range (*z).encodedCert.SigProofs[zb0004] {
- bts, err = (*z).encodedCert.SigProofs[zb0004][zb0005].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs", zb0004, zb0005)
- return
- }
- }
- }
- case "certSbm":
- {
- var zb0065 []byte
- var zb0066 int
- zb0066, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigProofs")
- return
- }
- if zb0066 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0066), uint64(maxBitmaskSize))
- return
- }
- zb0065, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCert.BitmaskSigProofs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigProofs")
- return
- }
- (*z).encodedCert.BitmaskSigProofs = bitmask(zb0065)
- }
- case "certP":
- var zb0067 int
- var zb0068 bool
- zb0067, zb0068, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs")
- return
- }
- if zb0067 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0067), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "PartProofs")
- return
- }
- if zb0068 {
- (*z).encodedCert.PartProofs = nil
- } else if (*z).encodedCert.PartProofs != nil && cap((*z).encodedCert.PartProofs) >= zb0067 {
- (*z).encodedCert.PartProofs = ((*z).encodedCert.PartProofs)[:zb0067]
- } else {
- (*z).encodedCert.PartProofs = make([]certProofs, zb0067)
- }
- for zb0006 := range (*z).encodedCert.PartProofs {
- var zb0069 int
- var zb0070 bool
- zb0069, zb0070, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs", zb0006)
- return
- }
- if zb0069 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0069), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "PartProofs", zb0006)
- return
- }
- if zb0070 {
- (*z).encodedCert.PartProofs[zb0006] = nil
- } else if (*z).encodedCert.PartProofs[zb0006] != nil && cap((*z).encodedCert.PartProofs[zb0006]) >= zb0069 {
- (*z).encodedCert.PartProofs[zb0006] = ((*z).encodedCert.PartProofs[zb0006])[:zb0069]
- } else {
- (*z).encodedCert.PartProofs[zb0006] = make(certProofs, zb0069)
- }
- for zb0007 := range (*z).encodedCert.PartProofs[zb0006] {
- bts, err = (*z).encodedCert.PartProofs[zb0006][zb0007].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs", zb0006, zb0007)
- return
- }
- }
- }
- case "certPbm":
- {
- var zb0071 []byte
- var zb0072 int
- zb0072, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskPartProofs")
- return
- }
- if zb0072 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0072), uint64(maxBitmaskSize))
- return
- }
- zb0071, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCert.BitmaskPartProofs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskPartProofs")
- return
- }
- (*z).encodedCert.BitmaskPartProofs = bitmask(zb0071)
- }
- case "certr":
- var zb0073 int
- var zb0074 bool
- zb0073, zb0074, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals")
- return
- }
- if zb0073 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0073), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Reveals")
- return
- }
- if zb0074 {
- (*z).encodedCert.Reveals = nil
- } else if (*z).encodedCert.Reveals != nil && cap((*z).encodedCert.Reveals) >= zb0073 {
- (*z).encodedCert.Reveals = ((*z).encodedCert.Reveals)[:zb0073]
- } else {
- (*z).encodedCert.Reveals = make([]revealMap, zb0073)
- }
- for zb0008 := range (*z).encodedCert.Reveals {
- var zb0075 int
- var zb0076 bool
- zb0075, zb0076, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0008)
- return
- }
- if zb0075 > compactcert.MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0075), uint64(compactcert.MaxReveals))
- err = msgp.WrapError(err, "Reveals", zb0008)
- return
- }
- if zb0076 {
- (*z).encodedCert.Reveals[zb0008] = nil
- } else if (*z).encodedCert.Reveals[zb0008] == nil {
- (*z).encodedCert.Reveals[zb0008] = make(revealMap, zb0075)
- }
- for zb0075 > 0 {
- var zb0009 uint64
- var zb0010 compactcert.Reveal
- zb0075--
- zb0009, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0008)
- return
- }
- bts, err = zb0010.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0008, zb0009)
- return
- }
- (*z).encodedCert.Reveals[zb0008][zb0009] = zb0010
- }
- }
- case "certrbm":
- {
- var zb0077 []byte
- var zb0078 int
- zb0078, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReveals")
- return
- }
- if zb0078 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0078), uint64(maxBitmaskSize))
- return
- }
- zb0077, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCert.BitmaskReveals))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReveals")
- return
- }
- (*z).encodedCert.BitmaskReveals = bitmask(zb0077)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedCompactCertTxnFields) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedCompactCertTxnFields)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedCompactCertTxnFields) Msgsize() (s int) {
- s = 1 + 8 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).CertRound {
- s += (*z).CertRound[zb0001].Msgsize()
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskCertRound)) + 9 + msgp.ArrayHeaderSize
- for zb0002 := range (*z).CertType {
- s += (*z).CertType[zb0002].Msgsize()
- }
- s += 11 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskCertType)) + 6 + msgp.BytesPrefixSize + len((*z).encodedCert.SigCommit) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedCert.BitmaskSigCommit)) + 6 + msgp.ArrayHeaderSize + (len((*z).encodedCert.SignedWeight) * (msgp.Uint64Size)) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedCert.BitmaskSignedWeight)) + 6 + msgp.ArrayHeaderSize
- for zb0004 := range (*z).encodedCert.SigProofs {
- s += msgp.ArrayHeaderSize
- for zb0005 := range (*z).encodedCert.SigProofs[zb0004] {
- s += (*z).encodedCert.SigProofs[zb0004][zb0005].Msgsize()
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedCert.BitmaskSigProofs)) + 6 + msgp.ArrayHeaderSize
- for zb0006 := range (*z).encodedCert.PartProofs {
- s += msgp.ArrayHeaderSize
- for zb0007 := range (*z).encodedCert.PartProofs[zb0006] {
- s += (*z).encodedCert.PartProofs[zb0006][zb0007].Msgsize()
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedCert.BitmaskPartProofs)) + 6 + msgp.ArrayHeaderSize
- for zb0008 := range (*z).encodedCert.Reveals {
- s += msgp.MapHeaderSize
- if (*z).encodedCert.Reveals[zb0008] != nil {
- for zb0009, zb0010 := range (*z).encodedCert.Reveals[zb0008] {
- _ = zb0009
- _ = zb0010
- s += 0 + msgp.Uint64Size + zb0010.Msgsize()
- }
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedCert.BitmaskReveals))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedCompactCertTxnFields) MsgIsZero() bool {
- return (len((*z).CertRound) == 0) && (len((*z).BitmaskCertRound) == 0) && (len((*z).CertType) == 0) && (len((*z).BitmaskCertType) == 0) && (len((*z).encodedCert.SigCommit) == 0) && (len((*z).encodedCert.BitmaskSigCommit) == 0) && (len((*z).encodedCert.SignedWeight) == 0) && (len((*z).encodedCert.BitmaskSignedWeight) == 0) && (len((*z).encodedCert.SigProofs) == 0) && (len((*z).encodedCert.BitmaskSigProofs) == 0) && (len((*z).encodedCert.PartProofs) == 0) && (len((*z).encodedCert.BitmaskPartProofs) == 0) && (len((*z).encodedCert.Reveals) == 0) && (len((*z).encodedCert.BitmaskReveals) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedKeyregTxnFields) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0004Len := uint32(9)
- var zb0004Mask uint16 /* 10 bits */
- if len((*z).BitmaskNonparticipation) == 0 {
- zb0004Len--
- zb0004Mask |= 0x2
- }
- if len((*z).SelectionPK) == 0 {
- zb0004Len--
- zb0004Mask |= 0x4
- }
- if len((*z).VoteFirst) == 0 {
- zb0004Len--
- zb0004Mask |= 0x8
- }
- if len((*z).BitmaskVoteFirst) == 0 {
- zb0004Len--
- zb0004Mask |= 0x10
- }
- if len((*z).BitmaskKeys) == 0 {
- zb0004Len--
- zb0004Mask |= 0x20
- }
- if len((*z).VoteKeyDilution) == 0 {
- zb0004Len--
- zb0004Mask |= 0x40
- }
- if len((*z).VotePK) == 0 {
- zb0004Len--
- zb0004Mask |= 0x80
- }
- if len((*z).VoteLast) == 0 {
- zb0004Len--
- zb0004Mask |= 0x100
- }
- if len((*z).BitmaskVoteLast) == 0 {
- zb0004Len--
- zb0004Mask |= 0x200
- }
- // variable map header, size zb0004Len
- o = append(o, 0x80|uint8(zb0004Len))
- if zb0004Len != 0 {
- if (zb0004Mask & 0x2) == 0 { // if not empty
- // string "nonpartbm"
- o = append(o, 0xa9, 0x6e, 0x6f, 0x6e, 0x70, 0x61, 0x72, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskNonparticipation))
- }
- if (zb0004Mask & 0x4) == 0 { // if not empty
- // string "selkey"
- o = append(o, 0xa6, 0x73, 0x65, 0x6c, 0x6b, 0x65, 0x79)
- o = msgp.AppendBytes(o, (*z).SelectionPK)
- }
- if (zb0004Mask & 0x8) == 0 { // if not empty
- // string "votefst"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x66, 0x73, 0x74)
- if (*z).VoteFirst == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).VoteFirst)))
- }
- for zb0001 := range (*z).VoteFirst {
- o = (*z).VoteFirst[zb0001].MarshalMsg(o)
- }
- }
- if (zb0004Mask & 0x10) == 0 { // if not empty
- // string "votefstbm"
- o = append(o, 0xa9, 0x76, 0x6f, 0x74, 0x65, 0x66, 0x73, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskVoteFirst))
- }
- if (zb0004Mask & 0x20) == 0 { // if not empty
- // string "votekbm"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskKeys))
- }
- if (zb0004Mask & 0x40) == 0 { // if not empty
- // string "votekd"
- o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x64)
- if (*z).VoteKeyDilution == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).VoteKeyDilution)))
- }
- for zb0003 := range (*z).VoteKeyDilution {
- o = msgp.AppendUint64(o, (*z).VoteKeyDilution[zb0003])
- }
- }
- if (zb0004Mask & 0x80) == 0 { // if not empty
- // string "votekey"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x65, 0x79)
- o = msgp.AppendBytes(o, (*z).VotePK)
- }
- if (zb0004Mask & 0x100) == 0 { // if not empty
- // string "votelst"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6c, 0x73, 0x74)
- if (*z).VoteLast == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).VoteLast)))
- }
- for zb0002 := range (*z).VoteLast {
- o = (*z).VoteLast[zb0002].MarshalMsg(o)
- }
- }
- if (zb0004Mask & 0x200) == 0 { // if not empty
- // string "votelstbm"
- o = append(o, 0xa9, 0x76, 0x6f, 0x74, 0x65, 0x6c, 0x73, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskVoteLast))
- }
- }
- return
-}
-
-func (_ *encodedKeyregTxnFields) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedKeyregTxnFields)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedKeyregTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0004 int
- var zb0005 bool
- zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0004 > 0 {
- zb0004--
- var zb0006 int
- zb0006, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VotePK")
- return
- }
- if zb0006 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(maxAddressBytes))
- return
- }
- (*z).VotePK, bts, err = msgp.ReadBytesBytes(bts, (*z).VotePK)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VotePK")
- return
- }
- }
- if zb0004 > 0 {
- zb0004--
- var zb0007 int
- zb0007, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SelectionPK")
- return
- }
- if zb0007 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(maxAddressBytes))
- return
- }
- (*z).SelectionPK, bts, err = msgp.ReadBytesBytes(bts, (*z).SelectionPK)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SelectionPK")
- return
- }
- }
- if zb0004 > 0 {
- zb0004--
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteFirst")
- return
- }
- if zb0008 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "VoteFirst")
- return
- }
- if zb0009 {
- (*z).VoteFirst = nil
- } else if (*z).VoteFirst != nil && cap((*z).VoteFirst) >= zb0008 {
- (*z).VoteFirst = ((*z).VoteFirst)[:zb0008]
- } else {
- (*z).VoteFirst = make([]basics.Round, zb0008)
- }
- for zb0001 := range (*z).VoteFirst {
- bts, err = (*z).VoteFirst[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteFirst", zb0001)
- return
- }
- }
- }
- if zb0004 > 0 {
- zb0004--
- {
- var zb0010 []byte
- var zb0011 int
- zb0011, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteFirst")
- return
- }
- if zb0011 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(maxBitmaskSize))
- return
- }
- zb0010, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskVoteFirst))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteFirst")
- return
- }
- (*z).BitmaskVoteFirst = bitmask(zb0010)
- }
- }
- if zb0004 > 0 {
- zb0004--
- var zb0012 int
- var zb0013 bool
- zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteLast")
- return
- }
- if zb0012 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0012), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "VoteLast")
- return
- }
- if zb0013 {
- (*z).VoteLast = nil
- } else if (*z).VoteLast != nil && cap((*z).VoteLast) >= zb0012 {
- (*z).VoteLast = ((*z).VoteLast)[:zb0012]
- } else {
- (*z).VoteLast = make([]basics.Round, zb0012)
- }
- for zb0002 := range (*z).VoteLast {
- bts, err = (*z).VoteLast[zb0002].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteLast", zb0002)
- return
- }
- }
- }
- if zb0004 > 0 {
- zb0004--
- {
- var zb0014 []byte
- var zb0015 int
- zb0015, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteLast")
- return
- }
- if zb0015 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0015), uint64(maxBitmaskSize))
- return
- }
- zb0014, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskVoteLast))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteLast")
- return
- }
- (*z).BitmaskVoteLast = bitmask(zb0014)
- }
- }
- if zb0004 > 0 {
- zb0004--
- var zb0016 int
- var zb0017 bool
- zb0016, zb0017, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution")
- return
- }
- if zb0016 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0016), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution")
- return
- }
- if zb0017 {
- (*z).VoteKeyDilution = nil
- } else if (*z).VoteKeyDilution != nil && cap((*z).VoteKeyDilution) >= zb0016 {
- (*z).VoteKeyDilution = ((*z).VoteKeyDilution)[:zb0016]
- } else {
- (*z).VoteKeyDilution = make([]uint64, zb0016)
- }
- for zb0003 := range (*z).VoteKeyDilution {
- (*z).VoteKeyDilution[zb0003], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution", zb0003)
- return
- }
- }
- }
- if zb0004 > 0 {
- zb0004--
- {
- var zb0018 []byte
- var zb0019 int
- zb0019, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskKeys")
- return
- }
- if zb0019 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0019), uint64(maxBitmaskSize))
- return
- }
- zb0018, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskKeys))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskKeys")
- return
- }
- (*z).BitmaskKeys = bitmask(zb0018)
- }
- }
- if zb0004 > 0 {
- zb0004--
- {
- var zb0020 []byte
- var zb0021 int
- zb0021, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNonparticipation")
- return
- }
- if zb0021 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0021), uint64(maxBitmaskSize))
- return
- }
- zb0020, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskNonparticipation))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNonparticipation")
- return
- }
- (*z).BitmaskNonparticipation = bitmask(zb0020)
- }
- }
- if zb0004 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0004)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0005 {
- (*z) = encodedKeyregTxnFields{}
- }
- for zb0004 > 0 {
- zb0004--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "votekey":
- var zb0022 int
- zb0022, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "VotePK")
- return
- }
- if zb0022 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0022), uint64(maxAddressBytes))
- return
- }
- (*z).VotePK, bts, err = msgp.ReadBytesBytes(bts, (*z).VotePK)
- if err != nil {
- err = msgp.WrapError(err, "VotePK")
- return
- }
- case "selkey":
- var zb0023 int
- zb0023, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "SelectionPK")
- return
- }
- if zb0023 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0023), uint64(maxAddressBytes))
- return
- }
- (*z).SelectionPK, bts, err = msgp.ReadBytesBytes(bts, (*z).SelectionPK)
- if err != nil {
- err = msgp.WrapError(err, "SelectionPK")
- return
- }
- case "votefst":
- var zb0024 int
- var zb0025 bool
- zb0024, zb0025, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteFirst")
- return
- }
- if zb0024 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0024), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "VoteFirst")
- return
- }
- if zb0025 {
- (*z).VoteFirst = nil
- } else if (*z).VoteFirst != nil && cap((*z).VoteFirst) >= zb0024 {
- (*z).VoteFirst = ((*z).VoteFirst)[:zb0024]
- } else {
- (*z).VoteFirst = make([]basics.Round, zb0024)
- }
- for zb0001 := range (*z).VoteFirst {
- bts, err = (*z).VoteFirst[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteFirst", zb0001)
- return
- }
- }
- case "votefstbm":
- {
- var zb0026 []byte
- var zb0027 int
- zb0027, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteFirst")
- return
- }
- if zb0027 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0027), uint64(maxBitmaskSize))
- return
- }
- zb0026, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskVoteFirst))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteFirst")
- return
- }
- (*z).BitmaskVoteFirst = bitmask(zb0026)
- }
- case "votelst":
- var zb0028 int
- var zb0029 bool
- zb0028, zb0029, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteLast")
- return
- }
- if zb0028 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0028), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "VoteLast")
- return
- }
- if zb0029 {
- (*z).VoteLast = nil
- } else if (*z).VoteLast != nil && cap((*z).VoteLast) >= zb0028 {
- (*z).VoteLast = ((*z).VoteLast)[:zb0028]
- } else {
- (*z).VoteLast = make([]basics.Round, zb0028)
- }
- for zb0002 := range (*z).VoteLast {
- bts, err = (*z).VoteLast[zb0002].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteLast", zb0002)
- return
- }
- }
- case "votelstbm":
- {
- var zb0030 []byte
- var zb0031 int
- zb0031, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteLast")
- return
- }
- if zb0031 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0031), uint64(maxBitmaskSize))
- return
- }
- zb0030, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskVoteLast))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteLast")
- return
- }
- (*z).BitmaskVoteLast = bitmask(zb0030)
- }
- case "votekd":
- var zb0032 int
- var zb0033 bool
- zb0032, zb0033, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteKeyDilution")
- return
- }
- if zb0032 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0032), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "VoteKeyDilution")
- return
- }
- if zb0033 {
- (*z).VoteKeyDilution = nil
- } else if (*z).VoteKeyDilution != nil && cap((*z).VoteKeyDilution) >= zb0032 {
- (*z).VoteKeyDilution = ((*z).VoteKeyDilution)[:zb0032]
- } else {
- (*z).VoteKeyDilution = make([]uint64, zb0032)
- }
- for zb0003 := range (*z).VoteKeyDilution {
- (*z).VoteKeyDilution[zb0003], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteKeyDilution", zb0003)
- return
- }
- }
- case "votekbm":
- {
- var zb0034 []byte
- var zb0035 int
- zb0035, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskKeys")
- return
- }
- if zb0035 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0035), uint64(maxBitmaskSize))
- return
- }
- zb0034, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskKeys))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskKeys")
- return
- }
- (*z).BitmaskKeys = bitmask(zb0034)
- }
- case "nonpartbm":
- {
- var zb0036 []byte
- var zb0037 int
- zb0037, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNonparticipation")
- return
- }
- if zb0037 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0037), uint64(maxBitmaskSize))
- return
- }
- zb0036, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskNonparticipation))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNonparticipation")
- return
- }
- (*z).BitmaskNonparticipation = bitmask(zb0036)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedKeyregTxnFields) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedKeyregTxnFields)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedKeyregTxnFields) Msgsize() (s int) {
- s = 1 + 8 + msgp.BytesPrefixSize + len((*z).VotePK) + 7 + msgp.BytesPrefixSize + len((*z).SelectionPK) + 8 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).VoteFirst {
- s += (*z).VoteFirst[zb0001].Msgsize()
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskVoteFirst)) + 8 + msgp.ArrayHeaderSize
- for zb0002 := range (*z).VoteLast {
- s += (*z).VoteLast[zb0002].Msgsize()
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskVoteLast)) + 7 + msgp.ArrayHeaderSize + (len((*z).VoteKeyDilution) * (msgp.Uint64Size)) + 8 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskKeys)) + 10 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskNonparticipation))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedKeyregTxnFields) MsgIsZero() bool {
- return (len((*z).VotePK) == 0) && (len((*z).SelectionPK) == 0) && (len((*z).VoteFirst) == 0) && (len((*z).BitmaskVoteFirst) == 0) && (len((*z).VoteLast) == 0) && (len((*z).BitmaskVoteLast) == 0) && (len((*z).VoteKeyDilution) == 0) && (len((*z).BitmaskKeys) == 0) && (len((*z).BitmaskNonparticipation) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedLsigs) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0004Len := uint32(4)
- var zb0004Mask uint8 /* 5 bits */
- if len((*z).LogicArgs) == 0 {
- zb0004Len--
- zb0004Mask |= 0x2
- }
- if len((*z).BitmaskLogicArgs) == 0 {
- zb0004Len--
- zb0004Mask |= 0x4
- }
- if len((*z).Logic) == 0 {
- zb0004Len--
- zb0004Mask |= 0x8
- }
- if len((*z).BitmaskLogic) == 0 {
- zb0004Len--
- zb0004Mask |= 0x10
- }
- // variable map header, size zb0004Len
- o = append(o, 0x80|uint8(zb0004Len))
- if zb0004Len != 0 {
- if (zb0004Mask & 0x2) == 0 { // if not empty
- // string "lsigarg"
- o = append(o, 0xa7, 0x6c, 0x73, 0x69, 0x67, 0x61, 0x72, 0x67)
- if (*z).LogicArgs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).LogicArgs)))
- }
- for zb0002 := range (*z).LogicArgs {
- if (*z).LogicArgs[zb0002] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).LogicArgs[zb0002])))
- }
- for zb0003 := range (*z).LogicArgs[zb0002] {
- o = msgp.AppendBytes(o, (*z).LogicArgs[zb0002][zb0003])
- }
- }
- }
- if (zb0004Mask & 0x4) == 0 { // if not empty
- // string "lsigargbm"
- o = append(o, 0xa9, 0x6c, 0x73, 0x69, 0x67, 0x61, 0x72, 0x67, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskLogicArgs))
- }
- if (zb0004Mask & 0x8) == 0 { // if not empty
- // string "lsigl"
- o = append(o, 0xa5, 0x6c, 0x73, 0x69, 0x67, 0x6c)
- if (*z).Logic == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).Logic)))
- }
- for zb0001 := range (*z).Logic {
- o = msgp.AppendBytes(o, (*z).Logic[zb0001])
- }
- }
- if (zb0004Mask & 0x10) == 0 { // if not empty
- // string "lsiglbm"
- o = append(o, 0xa7, 0x6c, 0x73, 0x69, 0x67, 0x6c, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskLogic))
- }
- }
- return
-}
-
-func (_ *encodedLsigs) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedLsigs)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedLsigs) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0004 int
- var zb0005 bool
- zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0004 > 0 {
- zb0004--
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Logic")
- return
- }
- if zb0006 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Logic")
- return
- }
- if zb0007 {
- (*z).Logic = nil
- } else if (*z).Logic != nil && cap((*z).Logic) >= zb0006 {
- (*z).Logic = ((*z).Logic)[:zb0006]
- } else {
- (*z).Logic = make([][]byte, zb0006)
- }
- for zb0001 := range (*z).Logic {
- var zb0008 int
- zb0008, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Logic", zb0001)
- return
- }
- if zb0008 > config.MaxLogicSigMaxSize {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxLogicSigMaxSize))
- return
- }
- (*z).Logic[zb0001], bts, err = msgp.ReadBytesBytes(bts, (*z).Logic[zb0001])
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Logic", zb0001)
- return
- }
- }
- }
- if zb0004 > 0 {
- zb0004--
- {
- var zb0009 []byte
- var zb0010 int
- zb0010, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLogic")
- return
- }
- if zb0010 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(maxBitmaskSize))
- return
- }
- zb0009, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskLogic))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLogic")
- return
- }
- (*z).BitmaskLogic = bitmask(zb0009)
- }
- }
- if zb0004 > 0 {
- zb0004--
- var zb0011 int
- var zb0012 bool
- zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs")
- return
- }
- if zb0011 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs")
- return
- }
- if zb0012 {
- (*z).LogicArgs = nil
- } else if (*z).LogicArgs != nil && cap((*z).LogicArgs) >= zb0011 {
- (*z).LogicArgs = ((*z).LogicArgs)[:zb0011]
- } else {
- (*z).LogicArgs = make([][][]byte, zb0011)
- }
- for zb0002 := range (*z).LogicArgs {
- var zb0013 int
- var zb0014 bool
- zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs", zb0002)
- return
- }
- if zb0013 > transactions.EvalMaxArgs {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(transactions.EvalMaxArgs))
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs", zb0002)
- return
- }
- if zb0014 {
- (*z).LogicArgs[zb0002] = nil
- } else if (*z).LogicArgs[zb0002] != nil && cap((*z).LogicArgs[zb0002]) >= zb0013 {
- (*z).LogicArgs[zb0002] = ((*z).LogicArgs[zb0002])[:zb0013]
- } else {
- (*z).LogicArgs[zb0002] = make([][]byte, zb0013)
- }
- for zb0003 := range (*z).LogicArgs[zb0002] {
- var zb0015 int
- zb0015, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs", zb0002, zb0003)
- return
- }
- if zb0015 > config.MaxLogicSigMaxSize {
- err = msgp.ErrOverflow(uint64(zb0015), uint64(config.MaxLogicSigMaxSize))
- return
- }
- (*z).LogicArgs[zb0002][zb0003], bts, err = msgp.ReadBytesBytes(bts, (*z).LogicArgs[zb0002][zb0003])
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs", zb0002, zb0003)
- return
- }
- }
- }
- }
- if zb0004 > 0 {
- zb0004--
- {
- var zb0016 []byte
- var zb0017 int
- zb0017, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLogicArgs")
- return
- }
- if zb0017 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(maxBitmaskSize))
- return
- }
- zb0016, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskLogicArgs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLogicArgs")
- return
- }
- (*z).BitmaskLogicArgs = bitmask(zb0016)
- }
- }
- if zb0004 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0004)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0005 {
- (*z) = encodedLsigs{}
- }
- for zb0004 > 0 {
- zb0004--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "lsigl":
- var zb0018 int
- var zb0019 bool
- zb0018, zb0019, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Logic")
- return
- }
- if zb0018 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0018), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Logic")
- return
- }
- if zb0019 {
- (*z).Logic = nil
- } else if (*z).Logic != nil && cap((*z).Logic) >= zb0018 {
- (*z).Logic = ((*z).Logic)[:zb0018]
- } else {
- (*z).Logic = make([][]byte, zb0018)
- }
- for zb0001 := range (*z).Logic {
- var zb0020 int
- zb0020, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Logic", zb0001)
- return
- }
- if zb0020 > config.MaxLogicSigMaxSize {
- err = msgp.ErrOverflow(uint64(zb0020), uint64(config.MaxLogicSigMaxSize))
- return
- }
- (*z).Logic[zb0001], bts, err = msgp.ReadBytesBytes(bts, (*z).Logic[zb0001])
- if err != nil {
- err = msgp.WrapError(err, "Logic", zb0001)
- return
- }
- }
- case "lsiglbm":
- {
- var zb0021 []byte
- var zb0022 int
- zb0022, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLogic")
- return
- }
- if zb0022 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0022), uint64(maxBitmaskSize))
- return
- }
- zb0021, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskLogic))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLogic")
- return
- }
- (*z).BitmaskLogic = bitmask(zb0021)
- }
- case "lsigarg":
- var zb0023 int
- var zb0024 bool
- zb0023, zb0024, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LogicArgs")
- return
- }
- if zb0023 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0023), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LogicArgs")
- return
- }
- if zb0024 {
- (*z).LogicArgs = nil
- } else if (*z).LogicArgs != nil && cap((*z).LogicArgs) >= zb0023 {
- (*z).LogicArgs = ((*z).LogicArgs)[:zb0023]
- } else {
- (*z).LogicArgs = make([][][]byte, zb0023)
- }
- for zb0002 := range (*z).LogicArgs {
- var zb0025 int
- var zb0026 bool
- zb0025, zb0026, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LogicArgs", zb0002)
- return
- }
- if zb0025 > transactions.EvalMaxArgs {
- err = msgp.ErrOverflow(uint64(zb0025), uint64(transactions.EvalMaxArgs))
- err = msgp.WrapError(err, "LogicArgs", zb0002)
- return
- }
- if zb0026 {
- (*z).LogicArgs[zb0002] = nil
- } else if (*z).LogicArgs[zb0002] != nil && cap((*z).LogicArgs[zb0002]) >= zb0025 {
- (*z).LogicArgs[zb0002] = ((*z).LogicArgs[zb0002])[:zb0025]
- } else {
- (*z).LogicArgs[zb0002] = make([][]byte, zb0025)
- }
- for zb0003 := range (*z).LogicArgs[zb0002] {
- var zb0027 int
- zb0027, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "LogicArgs", zb0002, zb0003)
- return
- }
- if zb0027 > config.MaxLogicSigMaxSize {
- err = msgp.ErrOverflow(uint64(zb0027), uint64(config.MaxLogicSigMaxSize))
- return
- }
- (*z).LogicArgs[zb0002][zb0003], bts, err = msgp.ReadBytesBytes(bts, (*z).LogicArgs[zb0002][zb0003])
- if err != nil {
- err = msgp.WrapError(err, "LogicArgs", zb0002, zb0003)
- return
- }
- }
- }
- case "lsigargbm":
- {
- var zb0028 []byte
- var zb0029 int
- zb0029, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLogicArgs")
- return
- }
- if zb0029 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0029), uint64(maxBitmaskSize))
- return
- }
- zb0028, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskLogicArgs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLogicArgs")
- return
- }
- (*z).BitmaskLogicArgs = bitmask(zb0028)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedLsigs) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedLsigs)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedLsigs) Msgsize() (s int) {
- s = 1 + 6 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).Logic {
- s += msgp.BytesPrefixSize + len((*z).Logic[zb0001])
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskLogic)) + 8 + msgp.ArrayHeaderSize
- for zb0002 := range (*z).LogicArgs {
- s += msgp.ArrayHeaderSize
- for zb0003 := range (*z).LogicArgs[zb0002] {
- s += msgp.BytesPrefixSize + len((*z).LogicArgs[zb0002][zb0003])
- }
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskLogicArgs))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedLsigs) MsgIsZero() bool {
- return (len((*z).Logic) == 0) && (len((*z).BitmaskLogic) == 0) && (len((*z).LogicArgs) == 0) && (len((*z).BitmaskLogicArgs) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedMsigs) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0003Len := uint32(6)
- var zb0003Mask uint8 /* 7 bits */
- if len((*z).Threshold) == 0 {
- zb0003Len--
- zb0003Mask |= 0x2
- }
- if len((*z).BitmaskThreshold) == 0 {
- zb0003Len--
- zb0003Mask |= 0x4
- }
- if len((*z).Version) == 0 {
- zb0003Len--
- zb0003Mask |= 0x8
- }
- if len((*z).BitmaskVersion) == 0 {
- zb0003Len--
- zb0003Mask |= 0x10
- }
- if len((*z).Subsigs) == 0 {
- zb0003Len--
- zb0003Mask |= 0x20
- }
- if len((*z).BitmaskSubsigs) == 0 {
- zb0003Len--
- zb0003Mask |= 0x40
- }
- // variable map header, size zb0003Len
- o = append(o, 0x80|uint8(zb0003Len))
- if zb0003Len != 0 {
- if (zb0003Mask & 0x2) == 0 { // if not empty
- // string "msigthr"
- o = append(o, 0xa7, 0x6d, 0x73, 0x69, 0x67, 0x74, 0x68, 0x72)
- o = msgp.AppendBytes(o, (*z).Threshold)
- }
- if (zb0003Mask & 0x4) == 0 { // if not empty
- // string "msigthrbm"
- o = append(o, 0xa9, 0x6d, 0x73, 0x69, 0x67, 0x74, 0x68, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskThreshold))
- }
- if (zb0003Mask & 0x8) == 0 { // if not empty
- // string "msigv"
- o = append(o, 0xa5, 0x6d, 0x73, 0x69, 0x67, 0x76)
- o = msgp.AppendBytes(o, (*z).Version)
- }
- if (zb0003Mask & 0x10) == 0 { // if not empty
- // string "msigvbm"
- o = append(o, 0xa7, 0x6d, 0x73, 0x69, 0x67, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskVersion))
- }
- if (zb0003Mask & 0x20) == 0 { // if not empty
- // string "subsig"
- o = append(o, 0xa6, 0x73, 0x75, 0x62, 0x73, 0x69, 0x67)
- if (*z).Subsigs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).Subsigs)))
- }
- for zb0001 := range (*z).Subsigs {
- if (*z).Subsigs[zb0001] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).Subsigs[zb0001])))
- }
- for zb0002 := range (*z).Subsigs[zb0001] {
- o = (*z).Subsigs[zb0001][zb0002].MarshalMsg(o)
- }
- }
- }
- if (zb0003Mask & 0x40) == 0 { // if not empty
- // string "subsigsbm"
- o = append(o, 0xa9, 0x73, 0x75, 0x62, 0x73, 0x69, 0x67, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskSubsigs))
- }
- }
- return
-}
-
-func (_ *encodedMsigs) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedMsigs)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedMsigs) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0003 > 0 {
- zb0003--
- var zb0005 int
- zb0005, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Version")
- return
- }
- if zb0005 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).Version, bts, err = msgp.ReadBytesBytes(bts, (*z).Version)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Version")
- return
- }
- }
- if zb0003 > 0 {
- zb0003--
- {
- var zb0006 []byte
- var zb0007 int
- zb0007, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVersion")
- return
- }
- if zb0007 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(maxBitmaskSize))
- return
- }
- zb0006, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskVersion))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVersion")
- return
- }
- (*z).BitmaskVersion = bitmask(zb0006)
- }
- }
- if zb0003 > 0 {
- zb0003--
- var zb0008 int
- zb0008, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Threshold")
- return
- }
- if zb0008 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).Threshold, bts, err = msgp.ReadBytesBytes(bts, (*z).Threshold)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Threshold")
- return
- }
- }
- if zb0003 > 0 {
- zb0003--
- {
- var zb0009 []byte
- var zb0010 int
- zb0010, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskThreshold")
- return
- }
- if zb0010 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(maxBitmaskSize))
- return
- }
- zb0009, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskThreshold))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskThreshold")
- return
- }
- (*z).BitmaskThreshold = bitmask(zb0009)
- }
- }
- if zb0003 > 0 {
- zb0003--
- var zb0011 int
- var zb0012 bool
- zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Subsigs")
- return
- }
- if zb0011 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Subsigs")
- return
- }
- if zb0012 {
- (*z).Subsigs = nil
- } else if (*z).Subsigs != nil && cap((*z).Subsigs) >= zb0011 {
- (*z).Subsigs = ((*z).Subsigs)[:zb0011]
- } else {
- (*z).Subsigs = make([][]crypto.MultisigSubsig, zb0011)
- }
- for zb0001 := range (*z).Subsigs {
- var zb0013 int
- var zb0014 bool
- zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Subsigs", zb0001)
- return
- }
- if zb0013 > crypto.MaxMultisig {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(crypto.MaxMultisig))
- err = msgp.WrapError(err, "struct-from-array", "Subsigs", zb0001)
- return
- }
- if zb0014 {
- (*z).Subsigs[zb0001] = nil
- } else if (*z).Subsigs[zb0001] != nil && cap((*z).Subsigs[zb0001]) >= zb0013 {
- (*z).Subsigs[zb0001] = ((*z).Subsigs[zb0001])[:zb0013]
- } else {
- (*z).Subsigs[zb0001] = make([]crypto.MultisigSubsig, zb0013)
- }
- for zb0002 := range (*z).Subsigs[zb0001] {
- bts, err = (*z).Subsigs[zb0001][zb0002].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Subsigs", zb0001, zb0002)
- return
- }
- }
- }
- }
- if zb0003 > 0 {
- zb0003--
- {
- var zb0015 []byte
- var zb0016 int
- zb0016, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSubsigs")
- return
- }
- if zb0016 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0016), uint64(maxBitmaskSize))
- return
- }
- zb0015, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskSubsigs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSubsigs")
- return
- }
- (*z).BitmaskSubsigs = bitmask(zb0015)
- }
- }
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0004 {
- (*z) = encodedMsigs{}
- }
- for zb0003 > 0 {
- zb0003--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "msigv":
- var zb0017 int
- zb0017, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Version")
- return
- }
- if zb0017 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).Version, bts, err = msgp.ReadBytesBytes(bts, (*z).Version)
- if err != nil {
- err = msgp.WrapError(err, "Version")
- return
- }
- case "msigvbm":
- {
- var zb0018 []byte
- var zb0019 int
- zb0019, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVersion")
- return
- }
- if zb0019 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0019), uint64(maxBitmaskSize))
- return
- }
- zb0018, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskVersion))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVersion")
- return
- }
- (*z).BitmaskVersion = bitmask(zb0018)
- }
- case "msigthr":
- var zb0020 int
- zb0020, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Threshold")
- return
- }
- if zb0020 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0020), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).Threshold, bts, err = msgp.ReadBytesBytes(bts, (*z).Threshold)
- if err != nil {
- err = msgp.WrapError(err, "Threshold")
- return
- }
- case "msigthrbm":
- {
- var zb0021 []byte
- var zb0022 int
- zb0022, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskThreshold")
- return
- }
- if zb0022 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0022), uint64(maxBitmaskSize))
- return
- }
- zb0021, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskThreshold))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskThreshold")
- return
- }
- (*z).BitmaskThreshold = bitmask(zb0021)
- }
- case "subsig":
- var zb0023 int
- var zb0024 bool
- zb0023, zb0024, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Subsigs")
- return
- }
- if zb0023 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0023), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Subsigs")
- return
- }
- if zb0024 {
- (*z).Subsigs = nil
- } else if (*z).Subsigs != nil && cap((*z).Subsigs) >= zb0023 {
- (*z).Subsigs = ((*z).Subsigs)[:zb0023]
- } else {
- (*z).Subsigs = make([][]crypto.MultisigSubsig, zb0023)
- }
- for zb0001 := range (*z).Subsigs {
- var zb0025 int
- var zb0026 bool
- zb0025, zb0026, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Subsigs", zb0001)
- return
- }
- if zb0025 > crypto.MaxMultisig {
- err = msgp.ErrOverflow(uint64(zb0025), uint64(crypto.MaxMultisig))
- err = msgp.WrapError(err, "Subsigs", zb0001)
- return
- }
- if zb0026 {
- (*z).Subsigs[zb0001] = nil
- } else if (*z).Subsigs[zb0001] != nil && cap((*z).Subsigs[zb0001]) >= zb0025 {
- (*z).Subsigs[zb0001] = ((*z).Subsigs[zb0001])[:zb0025]
- } else {
- (*z).Subsigs[zb0001] = make([]crypto.MultisigSubsig, zb0025)
- }
- for zb0002 := range (*z).Subsigs[zb0001] {
- bts, err = (*z).Subsigs[zb0001][zb0002].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Subsigs", zb0001, zb0002)
- return
- }
- }
- }
- case "subsigsbm":
- {
- var zb0027 []byte
- var zb0028 int
- zb0028, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSubsigs")
- return
- }
- if zb0028 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0028), uint64(maxBitmaskSize))
- return
- }
- zb0027, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskSubsigs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSubsigs")
- return
- }
- (*z).BitmaskSubsigs = bitmask(zb0027)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedMsigs) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedMsigs)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedMsigs) Msgsize() (s int) {
- s = 1 + 6 + msgp.BytesPrefixSize + len((*z).Version) + 8 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskVersion)) + 8 + msgp.BytesPrefixSize + len((*z).Threshold) + 10 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskThreshold)) + 7 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).Subsigs {
- s += msgp.ArrayHeaderSize
- for zb0002 := range (*z).Subsigs[zb0001] {
- s += (*z).Subsigs[zb0001][zb0002].Msgsize()
- }
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskSubsigs))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedMsigs) MsgIsZero() bool {
- return (len((*z).Version) == 0) && (len((*z).BitmaskVersion) == 0) && (len((*z).Threshold) == 0) && (len((*z).BitmaskThreshold) == 0) && (len((*z).Subsigs) == 0) && (len((*z).BitmaskSubsigs) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedPaymentTxnFields) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0002Len := uint32(6)
- var zb0002Mask uint8 /* 7 bits */
- if len((*z).Amount) == 0 {
- zb0002Len--
- zb0002Mask |= 0x2
- }
- if len((*z).BitmaskAmount) == 0 {
- zb0002Len--
- zb0002Mask |= 0x4
- }
- if len((*z).CloseRemainderTo) == 0 {
- zb0002Len--
- zb0002Mask |= 0x8
- }
- if len((*z).BitmaskCloseRemainderTo) == 0 {
- zb0002Len--
- zb0002Mask |= 0x10
- }
- if len((*z).Receiver) == 0 {
- zb0002Len--
- zb0002Mask |= 0x20
- }
- if len((*z).BitmaskReceiver) == 0 {
- zb0002Len--
- zb0002Mask |= 0x40
- }
- // variable map header, size zb0002Len
- o = append(o, 0x80|uint8(zb0002Len))
- if zb0002Len != 0 {
- if (zb0002Mask & 0x2) == 0 { // if not empty
- // string "amt"
- o = append(o, 0xa3, 0x61, 0x6d, 0x74)
- if (*z).Amount == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).Amount)))
- }
- for zb0001 := range (*z).Amount {
- o = (*z).Amount[zb0001].MarshalMsg(o)
- }
- }
- if (zb0002Mask & 0x4) == 0 { // if not empty
- // string "amtbm"
- o = append(o, 0xa5, 0x61, 0x6d, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskAmount))
- }
- if (zb0002Mask & 0x8) == 0 { // if not empty
- // string "close"
- o = append(o, 0xa5, 0x63, 0x6c, 0x6f, 0x73, 0x65)
- o = msgp.AppendBytes(o, (*z).CloseRemainderTo)
- }
- if (zb0002Mask & 0x10) == 0 { // if not empty
- // string "closebm"
- o = append(o, 0xa7, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskCloseRemainderTo))
- }
- if (zb0002Mask & 0x20) == 0 { // if not empty
- // string "rcv"
- o = append(o, 0xa3, 0x72, 0x63, 0x76)
- o = msgp.AppendBytes(o, (*z).Receiver)
- }
- if (zb0002Mask & 0x40) == 0 { // if not empty
- // string "rcvbm"
- o = append(o, 0xa5, 0x72, 0x63, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskReceiver))
- }
- }
- return
-}
-
-func (_ *encodedPaymentTxnFields) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedPaymentTxnFields)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedPaymentTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0002 int
- var zb0003 bool
- zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 > 0 {
- zb0002--
- var zb0004 int
- zb0004, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Receiver")
- return
- }
- if zb0004 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0004), uint64(maxAddressBytes))
- return
- }
- (*z).Receiver, bts, err = msgp.ReadBytesBytes(bts, (*z).Receiver)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Receiver")
- return
- }
- }
- if zb0002 > 0 {
- zb0002--
- {
- var zb0005 []byte
- var zb0006 int
- zb0006, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReceiver")
- return
- }
- if zb0006 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(maxBitmaskSize))
- return
- }
- zb0005, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskReceiver))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReceiver")
- return
- }
- (*z).BitmaskReceiver = bitmask(zb0005)
- }
- }
- if zb0002 > 0 {
- zb0002--
- var zb0007 int
- var zb0008 bool
- zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Amount")
- return
- }
- if zb0007 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Amount")
- return
- }
- if zb0008 {
- (*z).Amount = nil
- } else if (*z).Amount != nil && cap((*z).Amount) >= zb0007 {
- (*z).Amount = ((*z).Amount)[:zb0007]
- } else {
- (*z).Amount = make([]basics.MicroAlgos, zb0007)
- }
- for zb0001 := range (*z).Amount {
- bts, err = (*z).Amount[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Amount", zb0001)
- return
- }
- }
- }
- if zb0002 > 0 {
- zb0002--
- {
- var zb0009 []byte
- var zb0010 int
- zb0010, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAmount")
- return
- }
- if zb0010 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(maxBitmaskSize))
- return
- }
- zb0009, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAmount))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAmount")
- return
- }
- (*z).BitmaskAmount = bitmask(zb0009)
- }
- }
- if zb0002 > 0 {
- zb0002--
- var zb0011 int
- zb0011, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CloseRemainderTo")
- return
- }
- if zb0011 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(maxAddressBytes))
- return
- }
- (*z).CloseRemainderTo, bts, err = msgp.ReadBytesBytes(bts, (*z).CloseRemainderTo)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CloseRemainderTo")
- return
- }
- }
- if zb0002 > 0 {
- zb0002--
- {
- var zb0012 []byte
- var zb0013 int
- zb0013, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCloseRemainderTo")
- return
- }
- if zb0013 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(maxBitmaskSize))
- return
- }
- zb0012, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskCloseRemainderTo))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCloseRemainderTo")
- return
- }
- (*z).BitmaskCloseRemainderTo = bitmask(zb0012)
- }
- }
- if zb0002 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0002)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0003 {
- (*z) = encodedPaymentTxnFields{}
- }
- for zb0002 > 0 {
- zb0002--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "rcv":
- var zb0014 int
- zb0014, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Receiver")
- return
- }
- if zb0014 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0014), uint64(maxAddressBytes))
- return
- }
- (*z).Receiver, bts, err = msgp.ReadBytesBytes(bts, (*z).Receiver)
- if err != nil {
- err = msgp.WrapError(err, "Receiver")
- return
- }
- case "rcvbm":
- {
- var zb0015 []byte
- var zb0016 int
- zb0016, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReceiver")
- return
- }
- if zb0016 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0016), uint64(maxBitmaskSize))
- return
- }
- zb0015, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskReceiver))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReceiver")
- return
- }
- (*z).BitmaskReceiver = bitmask(zb0015)
- }
- case "amt":
- var zb0017 int
- var zb0018 bool
- zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Amount")
- return
- }
- if zb0017 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Amount")
- return
- }
- if zb0018 {
- (*z).Amount = nil
- } else if (*z).Amount != nil && cap((*z).Amount) >= zb0017 {
- (*z).Amount = ((*z).Amount)[:zb0017]
- } else {
- (*z).Amount = make([]basics.MicroAlgos, zb0017)
- }
- for zb0001 := range (*z).Amount {
- bts, err = (*z).Amount[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Amount", zb0001)
- return
- }
- }
- case "amtbm":
- {
- var zb0019 []byte
- var zb0020 int
- zb0020, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAmount")
- return
- }
- if zb0020 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0020), uint64(maxBitmaskSize))
- return
- }
- zb0019, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAmount))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAmount")
- return
- }
- (*z).BitmaskAmount = bitmask(zb0019)
- }
- case "close":
- var zb0021 int
- zb0021, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "CloseRemainderTo")
- return
- }
- if zb0021 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0021), uint64(maxAddressBytes))
- return
- }
- (*z).CloseRemainderTo, bts, err = msgp.ReadBytesBytes(bts, (*z).CloseRemainderTo)
- if err != nil {
- err = msgp.WrapError(err, "CloseRemainderTo")
- return
- }
- case "closebm":
- {
- var zb0022 []byte
- var zb0023 int
- zb0023, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCloseRemainderTo")
- return
- }
- if zb0023 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0023), uint64(maxBitmaskSize))
- return
- }
- zb0022, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskCloseRemainderTo))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCloseRemainderTo")
- return
- }
- (*z).BitmaskCloseRemainderTo = bitmask(zb0022)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedPaymentTxnFields) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedPaymentTxnFields)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedPaymentTxnFields) Msgsize() (s int) {
- s = 1 + 4 + msgp.BytesPrefixSize + len((*z).Receiver) + 6 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskReceiver)) + 4 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).Amount {
- s += (*z).Amount[zb0001].Msgsize()
- }
- s += 6 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskAmount)) + 6 + msgp.BytesPrefixSize + len((*z).CloseRemainderTo) + 8 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskCloseRemainderTo))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedPaymentTxnFields) MsgIsZero() bool {
- return (len((*z).Receiver) == 0) && (len((*z).BitmaskReceiver) == 0) && (len((*z).Amount) == 0) && (len((*z).BitmaskAmount) == 0) && (len((*z).CloseRemainderTo) == 0) && (len((*z).BitmaskCloseRemainderTo) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedSignedTxns) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0049Len := uint32(126)
- var zb0049Mask [3]uint64 /* 140 bits */
- if len((*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x4000
- }
- if len((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x8000
- }
- if len((*z).encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x10000
- }
- if len((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x20000
- }
- if len((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x40000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x80000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x100000
- }
- if len((*z).encodedTxns.encodedPaymentTxnFields.Amount) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x200000
- }
- if len((*z).encodedTxns.encodedPaymentTxnFields.BitmaskAmount) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x400000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x800000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x1000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x2000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x4000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.OnCompletion) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x8000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x10000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x20000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x40000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x80000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x100000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.Accounts) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x200000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x400000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x800000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x1000000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x2000000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x4000000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x8000000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x10000000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x20000000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x40000000000
- }
- if len((*z).encodedTxns.encodedAssetTransferTxnFields.AssetReceiver) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x80000000000
- }
- if len((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x100000000000
- }
- if len((*z).encodedTxns.encodedAssetTransferTxnFields.AssetSender) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x200000000000
- }
- if len((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x400000000000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x800000000000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x1000000000000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x2000000000000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x4000000000000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x8000000000000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x10000000000000
- }
- if len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x20000000000000
- }
- if len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x40000000000000
- }
- if len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x80000000000000
- }
- if len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x100000000000000
- }
- if len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x200000000000000
- }
- if len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x400000000000000
- }
- if len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x800000000000000
- }
- if len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x1000000000000000
- }
- if len((*z).encodedTxns.encodedCompactCertTxnFields.CertRound) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x2000000000000000
- }
- if len((*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x4000000000000000
- }
- if len((*z).encodedTxns.encodedCompactCertTxnFields.CertType) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x8000000000000000
- }
- if len((*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertType) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x1
- }
- if len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x2
- }
- if len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x4
- }
- if len((*z).encodedTxns.encodedPaymentTxnFields.CloseRemainderTo) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x8
- }
- if len((*z).encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x10
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x20
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x40
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x80
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x100
- }
- if len((*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x200
- }
- if len((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x400
- }
- if len((*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x800
- }
- if len((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x1000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x2000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.Fee) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x4000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.BitmaskFee) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x8000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.FirstValid) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x10000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.BitmaskFirstValid) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x20000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.BitmaskGenesisID) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x40000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x80000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x100000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x200000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x400000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.BitmaskGroup) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x800000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x1000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x2000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x4000000
- }
- if len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x8000000
- }
- if len((*z).encodedLsigs.LogicArgs) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x10000000
- }
- if len((*z).encodedLsigs.BitmaskLogicArgs) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x20000000
- }
- if len((*z).encodedLsigs.Logic) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x40000000
- }
- if len((*z).encodedLsigs.BitmaskLogic) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x80000000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.LastValid) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x100000000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.BitmaskLastValid) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x200000000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.Lease) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x400000000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.BitmaskLease) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x800000000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x1000000000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x2000000000
- }
- if len((*z).encodedMsigs.Threshold) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x4000000000
- }
- if len((*z).encodedMsigs.BitmaskThreshold) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x8000000000
- }
- if len((*z).encodedMsigs.Version) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x10000000000
- }
- if len((*z).encodedMsigs.BitmaskVersion) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x20000000000
- }
- if len((*z).encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x40000000000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.Note) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x80000000000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.BitmaskNote) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x100000000000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x200000000000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x400000000000
- }
- if len((*z).encodedTxns.encodedPaymentTxnFields.Receiver) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x800000000000
- }
- if len((*z).encodedTxns.encodedPaymentTxnFields.BitmaskReceiver) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x1000000000000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.RekeyTo) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x2000000000000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.BitmaskRekeyTo) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x4000000000000
- }
- if len((*z).encodedTxns.encodedKeyregTxnFields.SelectionPK) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x8000000000000
- }
- if len((*z).AuthAddr) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x10000000000000
- }
- if len((*z).BitmaskAuthAddr) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x20000000000000
- }
- if len((*z).Sig) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x40000000000000
- }
- if len((*z).BitmaskSig) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x80000000000000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.Sender) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x100000000000000
- }
- if len((*z).encodedTxns.encodedTxnHeaders.BitmaskSender) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x200000000000000
- }
- if len((*z).encodedMsigs.Subsigs) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x400000000000000
- }
- if len((*z).encodedMsigs.BitmaskSubsigs) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x800000000000000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x1000000000000000
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x2000000000000000
- }
- if len((*z).encodedTxns.TxType) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x4000000000000000
- }
- if len((*z).encodedTxns.BitmaskTxType) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x8000000000000000
- }
- if (*z).encodedTxns.TxTypeOffset == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x1
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x2
- }
- if len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x4
- }
- if len((*z).encodedTxns.encodedKeyregTxnFields.VoteFirst) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x8
- }
- if len((*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x10
- }
- if len((*z).encodedTxns.encodedKeyregTxnFields.BitmaskKeys) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x20
- }
- if len((*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x40
- }
- if len((*z).encodedTxns.encodedKeyregTxnFields.VotePK) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x80
- }
- if len((*z).encodedTxns.encodedKeyregTxnFields.VoteLast) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x100
- }
- if len((*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x200
- }
- if len((*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x400
- }
- if len((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x800
- }
- // variable map header, size zb0049Len
- o = msgp.AppendMapHeader(o, zb0049Len)
- if zb0049Len != 0 {
- if (zb0049Mask[0] & 0x4000) == 0 { // if not empty
- // string "aamt"
- o = append(o, 0xa4, 0x61, 0x61, 0x6d, 0x74)
- if (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount)))
- }
- for zb0021 := range (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount {
- o = msgp.AppendUint64(o, (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount[zb0021])
- }
- }
- if (zb0049Mask[0] & 0x8000) == 0 { // if not empty
- // string "aamtbm"
- o = append(o, 0xa6, 0x61, 0x61, 0x6d, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount))
- }
- if (zb0049Mask[0] & 0x10000) == 0 { // if not empty
- // string "aclose"
- o = append(o, 0xa6, 0x61, 0x63, 0x6c, 0x6f, 0x73, 0x65)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo)
- }
- if (zb0049Mask[0] & 0x20000) == 0 { // if not empty
- // string "aclosebm"
- o = append(o, 0xa8, 0x61, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo))
- }
- if (zb0049Mask[0] & 0x40000) == 0 { // if not empty
- // string "afrzbm"
- o = append(o, 0xa6, 0x61, 0x66, 0x72, 0x7a, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen))
- }
- if (zb0049Mask[0] & 0x80000) == 0 { // if not empty
- // string "am"
- o = append(o, 0xa2, 0x61, 0x6d)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash)
- }
- if (zb0049Mask[0] & 0x100000) == 0 { // if not empty
- // string "ambm"
- o = append(o, 0xa4, 0x61, 0x6d, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash))
- }
- if (zb0049Mask[0] & 0x200000) == 0 { // if not empty
- // string "amt"
- o = append(o, 0xa3, 0x61, 0x6d, 0x74)
- if (*z).encodedTxns.encodedPaymentTxnFields.Amount == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedPaymentTxnFields.Amount)))
- }
- for zb0013 := range (*z).encodedTxns.encodedPaymentTxnFields.Amount {
- o = (*z).encodedTxns.encodedPaymentTxnFields.Amount[zb0013].MarshalMsg(o)
- }
- }
- if (zb0049Mask[0] & 0x400000) == 0 { // if not empty
- // string "amtbm"
- o = append(o, 0xa5, 0x61, 0x6d, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedPaymentTxnFields.BitmaskAmount))
- }
- if (zb0049Mask[0] & 0x800000) == 0 { // if not empty
- // string "an"
- o = append(o, 0xa2, 0x61, 0x6e)
- if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName)))
- }
- for zb0018 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName {
- o = msgp.AppendString(o, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName[zb0018])
- }
- }
- if (zb0049Mask[0] & 0x1000000) == 0 { // if not empty
- // string "anbm"
- o = append(o, 0xa4, 0x61, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName))
- }
- if (zb0049Mask[0] & 0x2000000) == 0 { // if not empty
- // string "apaa"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x61)
- if (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs)))
- }
- for zb0024 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs {
- if (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024])))
- }
- for zb0025 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] {
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024][zb0025])
- }
- }
- }
- if (zb0049Mask[0] & 0x4000000) == 0 { // if not empty
- // string "apaabm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x61, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs))
- }
- if (zb0049Mask[0] & 0x8000000) == 0 { // if not empty
- // string "apan"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x6e)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedApplicationCallTxnFields.OnCompletion)
- }
- if (zb0049Mask[0] & 0x10000000) == 0 { // if not empty
- // string "apanbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion))
- }
- if (zb0049Mask[0] & 0x20000000) == 0 { // if not empty
- // string "apap"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x70)
- if (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram)))
- }
- for zb0036 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram {
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram[zb0036]))
- }
- }
- if (zb0049Mask[0] & 0x40000000) == 0 { // if not empty
- // string "apapbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x70, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram))
- }
- if (zb0049Mask[0] & 0x80000000) == 0 { // if not empty
- // string "apas"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x73)
- if (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets)))
- }
- for zb0030 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets {
- if (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030])))
- }
- for zb0031 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] {
- o = (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030][zb0031].MarshalMsg(o)
- }
- }
- }
- if (zb0049Mask[0] & 0x100000000) == 0 { // if not empty
- // string "apasbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets))
- }
- if (zb0049Mask[0] & 0x200000000) == 0 { // if not empty
- // string "apat"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x74)
- if (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.Accounts)))
- }
- for zb0026 := range (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts {
- if (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026])))
- }
- for zb0027 := range (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] {
- o = (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026][zb0027].MarshalMsg(o)
- }
- }
- }
- if (zb0049Mask[0] & 0x400000000) == 0 { // if not empty
- // string "apatbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts))
- }
- if (zb0049Mask[0] & 0x800000000) == 0 { // if not empty
- // string "apep"
- o = append(o, 0xa4, 0x61, 0x70, 0x65, 0x70)
- if (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages)))
- }
- for zb0038 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages {
- o = msgp.AppendUint32(o, (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages[zb0038])
- }
- }
- if (zb0049Mask[0] & 0x1000000000) == 0 { // if not empty
- // string "apepbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x65, 0x70, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages))
- }
- if (zb0049Mask[0] & 0x2000000000) == 0 { // if not empty
- // string "apfa"
- o = append(o, 0xa4, 0x61, 0x70, 0x66, 0x61)
- if (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps)))
- }
- for zb0028 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps {
- if (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028])))
- }
- for zb0029 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] {
- o = (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028][zb0029].MarshalMsg(o)
- }
- }
- }
- if (zb0049Mask[0] & 0x4000000000) == 0 { // if not empty
- // string "apfabm"
- o = append(o, 0xa6, 0x61, 0x70, 0x66, 0x61, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps))
- }
- if (zb0049Mask[0] & 0x8000000000) == 0 { // if not empty
- // string "apid"
- o = append(o, 0xa4, 0x61, 0x70, 0x69, 0x64)
- if (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID)))
- }
- for zb0023 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID {
- o = (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID[zb0023].MarshalMsg(o)
- }
- }
- if (zb0049Mask[0] & 0x10000000000) == 0 { // if not empty
- // string "apidbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID))
- }
- if (zb0049Mask[0] & 0x20000000000) == 0 { // if not empty
- // string "apsu"
- o = append(o, 0xa4, 0x61, 0x70, 0x73, 0x75)
- if (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram)))
- }
- for zb0037 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram {
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram[zb0037]))
- }
- }
- if (zb0049Mask[0] & 0x40000000000) == 0 { // if not empty
- // string "apsubm"
- o = append(o, 0xa6, 0x61, 0x70, 0x73, 0x75, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram))
- }
- if (zb0049Mask[0] & 0x80000000000) == 0 { // if not empty
- // string "arcv"
- o = append(o, 0xa4, 0x61, 0x72, 0x63, 0x76)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedAssetTransferTxnFields.AssetReceiver)
- }
- if (zb0049Mask[0] & 0x100000000000) == 0 { // if not empty
- // string "arcvbm"
- o = append(o, 0xa6, 0x61, 0x72, 0x63, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver))
- }
- if (zb0049Mask[0] & 0x200000000000) == 0 { // if not empty
- // string "asnd"
- o = append(o, 0xa4, 0x61, 0x73, 0x6e, 0x64)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedAssetTransferTxnFields.AssetSender)
- }
- if (zb0049Mask[0] & 0x400000000000) == 0 { // if not empty
- // string "asndbm"
- o = append(o, 0xa6, 0x61, 0x73, 0x6e, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender))
- }
- if (zb0049Mask[0] & 0x800000000000) == 0 { // if not empty
- // string "au"
- o = append(o, 0xa2, 0x61, 0x75)
- if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL)))
- }
- for zb0019 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL {
- o = msgp.AppendString(o, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL[zb0019])
- }
- }
- if (zb0049Mask[0] & 0x1000000000000) == 0 { // if not empty
- // string "aubm"
- o = append(o, 0xa4, 0x61, 0x75, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL))
- }
- if (zb0049Mask[0] & 0x2000000000000) == 0 { // if not empty
- // string "c"
- o = append(o, 0xa1, 0x63)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback)
- }
- if (zb0049Mask[0] & 0x4000000000000) == 0 { // if not empty
- // string "caid"
- o = append(o, 0xa4, 0x63, 0x61, 0x69, 0x64)
- if (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset)))
- }
- for zb0014 := range (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset {
- o = (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset[zb0014].MarshalMsg(o)
- }
- }
- if (zb0049Mask[0] & 0x8000000000000) == 0 { // if not empty
- // string "caidbm"
- o = append(o, 0xa6, 0x63, 0x61, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset))
- }
- if (zb0049Mask[0] & 0x10000000000000) == 0 { // if not empty
- // string "cbm"
- o = append(o, 0xa3, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback))
- }
- if (zb0049Mask[0] & 0x20000000000000) == 0 { // if not empty
- // string "certP"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x50)
- if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs)))
- }
- for zb0044 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs {
- if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044])))
- }
- for zb0045 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] {
- o = (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044][zb0045].MarshalMsg(o)
- }
- }
- }
- if (zb0049Mask[0] & 0x40000000000000) == 0 { // if not empty
- // string "certPbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x50, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs))
- }
- if (zb0049Mask[0] & 0x80000000000000) == 0 { // if not empty
- // string "certS"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x53)
- if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs)))
- }
- for zb0042 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs {
- if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042])))
- }
- for zb0043 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] {
- o = (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042][zb0043].MarshalMsg(o)
- }
- }
- }
- if (zb0049Mask[0] & 0x100000000000000) == 0 { // if not empty
- // string "certSbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x53, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs))
- }
- if (zb0049Mask[0] & 0x200000000000000) == 0 { // if not empty
- // string "certc"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x63)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit)
- }
- if (zb0049Mask[0] & 0x400000000000000) == 0 { // if not empty
- // string "certcbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit))
- }
- if (zb0049Mask[0] & 0x800000000000000) == 0 { // if not empty
- // string "certr"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x72)
- if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals)))
- }
- for zb0046 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals {
- if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendMapHeader(o, uint32(len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046])))
- }
- zb0047_keys := make([]uint64, 0, len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046]))
- for zb0047 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] {
- zb0047_keys = append(zb0047_keys, zb0047)
- }
- sort.Sort(SortUint64(zb0047_keys))
- for _, zb0047 := range zb0047_keys {
- zb0048 := (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046][zb0047]
- _ = zb0048
- o = msgp.AppendUint64(o, zb0047)
- o = zb0048.MarshalMsg(o)
- }
- }
- }
- if (zb0049Mask[0] & 0x1000000000000000) == 0 { // if not empty
- // string "certrbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals))
- }
- if (zb0049Mask[0] & 0x2000000000000000) == 0 { // if not empty
- // string "certrnd"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x72, 0x6e, 0x64)
- if (*z).encodedTxns.encodedCompactCertTxnFields.CertRound == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedCompactCertTxnFields.CertRound)))
- }
- for zb0039 := range (*z).encodedTxns.encodedCompactCertTxnFields.CertRound {
- o = (*z).encodedTxns.encodedCompactCertTxnFields.CertRound[zb0039].MarshalMsg(o)
- }
- }
- if (zb0049Mask[0] & 0x4000000000000000) == 0 { // if not empty
- // string "certrndbm"
- o = append(o, 0xa9, 0x63, 0x65, 0x72, 0x74, 0x72, 0x6e, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound))
- }
- if (zb0049Mask[0] & 0x8000000000000000) == 0 { // if not empty
- // string "certtype"
- o = append(o, 0xa8, 0x63, 0x65, 0x72, 0x74, 0x74, 0x79, 0x70, 0x65)
- if (*z).encodedTxns.encodedCompactCertTxnFields.CertType == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedCompactCertTxnFields.CertType)))
- }
- for zb0040 := range (*z).encodedTxns.encodedCompactCertTxnFields.CertType {
- o = (*z).encodedTxns.encodedCompactCertTxnFields.CertType[zb0040].MarshalMsg(o)
- }
- }
- if (zb0049Mask[1] & 0x1) == 0 { // if not empty
- // string "certtypebm"
- o = append(o, 0xaa, 0x63, 0x65, 0x72, 0x74, 0x74, 0x79, 0x70, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertType))
- }
- if (zb0049Mask[1] & 0x2) == 0 { // if not empty
- // string "certw"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x77)
- if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight)))
- }
- for zb0041 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight {
- o = msgp.AppendUint64(o, (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight[zb0041])
- }
- }
- if (zb0049Mask[1] & 0x4) == 0 { // if not empty
- // string "certwbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x77, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight))
- }
- if (zb0049Mask[1] & 0x8) == 0 { // if not empty
- // string "close"
- o = append(o, 0xa5, 0x63, 0x6c, 0x6f, 0x73, 0x65)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedPaymentTxnFields.CloseRemainderTo)
- }
- if (zb0049Mask[1] & 0x10) == 0 { // if not empty
- // string "closebm"
- o = append(o, 0xa7, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo))
- }
- if (zb0049Mask[1] & 0x20) == 0 { // if not empty
- // string "dc"
- o = append(o, 0xa2, 0x64, 0x63)
- if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals)))
- }
- for zb0016 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals {
- o = msgp.AppendUint32(o, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals[zb0016])
- }
- }
- if (zb0049Mask[1] & 0x40) == 0 { // if not empty
- // string "dcbm"
- o = append(o, 0xa4, 0x64, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals))
- }
- if (zb0049Mask[1] & 0x80) == 0 { // if not empty
- // string "dfbm"
- o = append(o, 0xa4, 0x64, 0x66, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen))
- }
- if (zb0049Mask[1] & 0x100) == 0 { // if not empty
- // string "f"
- o = append(o, 0xa1, 0x66)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze)
- }
- if (zb0049Mask[1] & 0x200) == 0 { // if not empty
- // string "fadd"
- o = append(o, 0xa4, 0x66, 0x61, 0x64, 0x64)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount)
- }
- if (zb0049Mask[1] & 0x400) == 0 { // if not empty
- // string "faddbm"
- o = append(o, 0xa6, 0x66, 0x61, 0x64, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount))
- }
- if (zb0049Mask[1] & 0x800) == 0 { // if not empty
- // string "faid"
- o = append(o, 0xa4, 0x66, 0x61, 0x69, 0x64)
- if (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset)))
- }
- for zb0022 := range (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset {
- o = (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset[zb0022].MarshalMsg(o)
- }
- }
- if (zb0049Mask[1] & 0x1000) == 0 { // if not empty
- // string "faidbm"
- o = append(o, 0xa6, 0x66, 0x61, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset))
- }
- if (zb0049Mask[1] & 0x2000) == 0 { // if not empty
- // string "fbm"
- o = append(o, 0xa3, 0x66, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze))
- }
- if (zb0049Mask[1] & 0x4000) == 0 { // if not empty
- // string "fee"
- o = append(o, 0xa3, 0x66, 0x65, 0x65)
- if (*z).encodedTxns.encodedTxnHeaders.Fee == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedTxnHeaders.Fee)))
- }
- for zb0006 := range (*z).encodedTxns.encodedTxnHeaders.Fee {
- o = (*z).encodedTxns.encodedTxnHeaders.Fee[zb0006].MarshalMsg(o)
- }
- }
- if (zb0049Mask[1] & 0x8000) == 0 { // if not empty
- // string "feebm"
- o = append(o, 0xa5, 0x66, 0x65, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskFee))
- }
- if (zb0049Mask[1] & 0x10000) == 0 { // if not empty
- // string "fv"
- o = append(o, 0xa2, 0x66, 0x76)
- if (*z).encodedTxns.encodedTxnHeaders.FirstValid == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedTxnHeaders.FirstValid)))
- }
- for zb0007 := range (*z).encodedTxns.encodedTxnHeaders.FirstValid {
- o = (*z).encodedTxns.encodedTxnHeaders.FirstValid[zb0007].MarshalMsg(o)
- }
- }
- if (zb0049Mask[1] & 0x20000) == 0 { // if not empty
- // string "fvbm"
- o = append(o, 0xa4, 0x66, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskFirstValid))
- }
- if (zb0049Mask[1] & 0x40000) == 0 { // if not empty
- // string "genbm"
- o = append(o, 0xa5, 0x67, 0x65, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskGenesisID))
- }
- if (zb0049Mask[1] & 0x80000) == 0 { // if not empty
- // string "gnbs"
- o = append(o, 0xa4, 0x67, 0x6e, 0x62, 0x73)
- if (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice)))
- }
- for zb0035 := range (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice {
- o = msgp.AppendUint64(o, (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice[zb0035])
- }
- }
- if (zb0049Mask[1] & 0x100000) == 0 { // if not empty
- // string "gnbsbm"
- o = append(o, 0xa6, 0x67, 0x6e, 0x62, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice))
- }
- if (zb0049Mask[1] & 0x200000) == 0 { // if not empty
- // string "gnui"
- o = append(o, 0xa4, 0x67, 0x6e, 0x75, 0x69)
- if (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint)))
- }
- for zb0034 := range (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint {
- o = msgp.AppendUint64(o, (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint[zb0034])
- }
- }
- if (zb0049Mask[1] & 0x400000) == 0 { // if not empty
- // string "gnuibm"
- o = append(o, 0xa6, 0x67, 0x6e, 0x75, 0x69, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint))
- }
- if (zb0049Mask[1] & 0x800000) == 0 { // if not empty
- // string "grpbm"
- o = append(o, 0xa5, 0x67, 0x72, 0x70, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskGroup))
- }
- if (zb0049Mask[1] & 0x1000000) == 0 { // if not empty
- // string "lnbs"
- o = append(o, 0xa4, 0x6c, 0x6e, 0x62, 0x73)
- if (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice)))
- }
- for zb0033 := range (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice {
- o = msgp.AppendUint64(o, (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice[zb0033])
- }
- }
- if (zb0049Mask[1] & 0x2000000) == 0 { // if not empty
- // string "lnbsbm"
- o = append(o, 0xa6, 0x6c, 0x6e, 0x62, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice))
- }
- if (zb0049Mask[1] & 0x4000000) == 0 { // if not empty
- // string "lnui"
- o = append(o, 0xa4, 0x6c, 0x6e, 0x75, 0x69)
- if (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint)))
- }
- for zb0032 := range (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint {
- o = msgp.AppendUint64(o, (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint[zb0032])
- }
- }
- if (zb0049Mask[1] & 0x8000000) == 0 { // if not empty
- // string "lnuibm"
- o = append(o, 0xa6, 0x6c, 0x6e, 0x75, 0x69, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint))
- }
- if (zb0049Mask[1] & 0x10000000) == 0 { // if not empty
- // string "lsigarg"
- o = append(o, 0xa7, 0x6c, 0x73, 0x69, 0x67, 0x61, 0x72, 0x67)
- if (*z).encodedLsigs.LogicArgs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedLsigs.LogicArgs)))
- }
- for zb0004 := range (*z).encodedLsigs.LogicArgs {
- if (*z).encodedLsigs.LogicArgs[zb0004] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedLsigs.LogicArgs[zb0004])))
- }
- for zb0005 := range (*z).encodedLsigs.LogicArgs[zb0004] {
- o = msgp.AppendBytes(o, (*z).encodedLsigs.LogicArgs[zb0004][zb0005])
- }
- }
- }
- if (zb0049Mask[1] & 0x20000000) == 0 { // if not empty
- // string "lsigargbm"
- o = append(o, 0xa9, 0x6c, 0x73, 0x69, 0x67, 0x61, 0x72, 0x67, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedLsigs.BitmaskLogicArgs))
- }
- if (zb0049Mask[1] & 0x40000000) == 0 { // if not empty
- // string "lsigl"
- o = append(o, 0xa5, 0x6c, 0x73, 0x69, 0x67, 0x6c)
- if (*z).encodedLsigs.Logic == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedLsigs.Logic)))
- }
- for zb0003 := range (*z).encodedLsigs.Logic {
- o = msgp.AppendBytes(o, (*z).encodedLsigs.Logic[zb0003])
- }
- }
- if (zb0049Mask[1] & 0x80000000) == 0 { // if not empty
- // string "lsiglbm"
- o = append(o, 0xa7, 0x6c, 0x73, 0x69, 0x67, 0x6c, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedLsigs.BitmaskLogic))
- }
- if (zb0049Mask[1] & 0x100000000) == 0 { // if not empty
- // string "lv"
- o = append(o, 0xa2, 0x6c, 0x76)
- if (*z).encodedTxns.encodedTxnHeaders.LastValid == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedTxnHeaders.LastValid)))
- }
- for zb0008 := range (*z).encodedTxns.encodedTxnHeaders.LastValid {
- o = (*z).encodedTxns.encodedTxnHeaders.LastValid[zb0008].MarshalMsg(o)
- }
- }
- if (zb0049Mask[1] & 0x200000000) == 0 { // if not empty
- // string "lvbm"
- o = append(o, 0xa4, 0x6c, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskLastValid))
- }
- if (zb0049Mask[1] & 0x400000000) == 0 { // if not empty
- // string "lx"
- o = append(o, 0xa2, 0x6c, 0x78)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedTxnHeaders.Lease)
- }
- if (zb0049Mask[1] & 0x800000000) == 0 { // if not empty
- // string "lxbm"
- o = append(o, 0xa4, 0x6c, 0x78, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskLease))
- }
- if (zb0049Mask[1] & 0x1000000000) == 0 { // if not empty
- // string "m"
- o = append(o, 0xa1, 0x6d)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager)
- }
- if (zb0049Mask[1] & 0x2000000000) == 0 { // if not empty
- // string "mbm"
- o = append(o, 0xa3, 0x6d, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager))
- }
- if (zb0049Mask[1] & 0x4000000000) == 0 { // if not empty
- // string "msigthr"
- o = append(o, 0xa7, 0x6d, 0x73, 0x69, 0x67, 0x74, 0x68, 0x72)
- o = msgp.AppendBytes(o, (*z).encodedMsigs.Threshold)
- }
- if (zb0049Mask[1] & 0x8000000000) == 0 { // if not empty
- // string "msigthrbm"
- o = append(o, 0xa9, 0x6d, 0x73, 0x69, 0x67, 0x74, 0x68, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedMsigs.BitmaskThreshold))
- }
- if (zb0049Mask[1] & 0x10000000000) == 0 { // if not empty
- // string "msigv"
- o = append(o, 0xa5, 0x6d, 0x73, 0x69, 0x67, 0x76)
- o = msgp.AppendBytes(o, (*z).encodedMsigs.Version)
- }
- if (zb0049Mask[1] & 0x20000000000) == 0 { // if not empty
- // string "msigvbm"
- o = append(o, 0xa7, 0x6d, 0x73, 0x69, 0x67, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedMsigs.BitmaskVersion))
- }
- if (zb0049Mask[1] & 0x40000000000) == 0 { // if not empty
- // string "nonpartbm"
- o = append(o, 0xa9, 0x6e, 0x6f, 0x6e, 0x70, 0x61, 0x72, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation))
- }
- if (zb0049Mask[1] & 0x80000000000) == 0 { // if not empty
- // string "note"
- o = append(o, 0xa4, 0x6e, 0x6f, 0x74, 0x65)
- if (*z).encodedTxns.encodedTxnHeaders.Note == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedTxnHeaders.Note)))
- }
- for zb0009 := range (*z).encodedTxns.encodedTxnHeaders.Note {
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedTxnHeaders.Note[zb0009])
- }
- }
- if (zb0049Mask[1] & 0x100000000000) == 0 { // if not empty
- // string "notebm"
- o = append(o, 0xa6, 0x6e, 0x6f, 0x74, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskNote))
- }
- if (zb0049Mask[1] & 0x200000000000) == 0 { // if not empty
- // string "r"
- o = append(o, 0xa1, 0x72)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve)
- }
- if (zb0049Mask[1] & 0x400000000000) == 0 { // if not empty
- // string "rbm"
- o = append(o, 0xa3, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve))
- }
- if (zb0049Mask[1] & 0x800000000000) == 0 { // if not empty
- // string "rcv"
- o = append(o, 0xa3, 0x72, 0x63, 0x76)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedPaymentTxnFields.Receiver)
- }
- if (zb0049Mask[1] & 0x1000000000000) == 0 { // if not empty
- // string "rcvbm"
- o = append(o, 0xa5, 0x72, 0x63, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedPaymentTxnFields.BitmaskReceiver))
- }
- if (zb0049Mask[1] & 0x2000000000000) == 0 { // if not empty
- // string "rekey"
- o = append(o, 0xa5, 0x72, 0x65, 0x6b, 0x65, 0x79)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedTxnHeaders.RekeyTo)
- }
- if (zb0049Mask[1] & 0x4000000000000) == 0 { // if not empty
- // string "rekeybm"
- o = append(o, 0xa7, 0x72, 0x65, 0x6b, 0x65, 0x79, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskRekeyTo))
- }
- if (zb0049Mask[1] & 0x8000000000000) == 0 { // if not empty
- // string "selkey"
- o = append(o, 0xa6, 0x73, 0x65, 0x6c, 0x6b, 0x65, 0x79)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedKeyregTxnFields.SelectionPK)
- }
- if (zb0049Mask[1] & 0x10000000000000) == 0 { // if not empty
- // string "sgnr"
- o = append(o, 0xa4, 0x73, 0x67, 0x6e, 0x72)
- o = msgp.AppendBytes(o, (*z).AuthAddr)
- }
- if (zb0049Mask[1] & 0x20000000000000) == 0 { // if not empty
- // string "sgnrbm"
- o = append(o, 0xa6, 0x73, 0x67, 0x6e, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskAuthAddr))
- }
- if (zb0049Mask[1] & 0x40000000000000) == 0 { // if not empty
- // string "sig"
- o = append(o, 0xa3, 0x73, 0x69, 0x67)
- o = msgp.AppendBytes(o, (*z).Sig)
- }
- if (zb0049Mask[1] & 0x80000000000000) == 0 { // if not empty
- // string "sigbm"
- o = append(o, 0xa5, 0x73, 0x69, 0x67, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskSig))
- }
- if (zb0049Mask[1] & 0x100000000000000) == 0 { // if not empty
- // string "snd"
- o = append(o, 0xa3, 0x73, 0x6e, 0x64)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedTxnHeaders.Sender)
- }
- if (zb0049Mask[1] & 0x200000000000000) == 0 { // if not empty
- // string "sndbm"
- o = append(o, 0xa5, 0x73, 0x6e, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskSender))
- }
- if (zb0049Mask[1] & 0x400000000000000) == 0 { // if not empty
- // string "subsig"
- o = append(o, 0xa6, 0x73, 0x75, 0x62, 0x73, 0x69, 0x67)
- if (*z).encodedMsigs.Subsigs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedMsigs.Subsigs)))
- }
- for zb0001 := range (*z).encodedMsigs.Subsigs {
- if (*z).encodedMsigs.Subsigs[zb0001] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedMsigs.Subsigs[zb0001])))
- }
- for zb0002 := range (*z).encodedMsigs.Subsigs[zb0001] {
- o = (*z).encodedMsigs.Subsigs[zb0001][zb0002].MarshalMsg(o)
- }
- }
- }
- if (zb0049Mask[1] & 0x800000000000000) == 0 { // if not empty
- // string "subsigsbm"
- o = append(o, 0xa9, 0x73, 0x75, 0x62, 0x73, 0x69, 0x67, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedMsigs.BitmaskSubsigs))
- }
- if (zb0049Mask[1] & 0x1000000000000000) == 0 { // if not empty
- // string "t"
- o = append(o, 0xa1, 0x74)
- if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total)))
- }
- for zb0015 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total {
- o = msgp.AppendUint64(o, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total[zb0015])
- }
- }
- if (zb0049Mask[1] & 0x2000000000000000) == 0 { // if not empty
- // string "tbm"
- o = append(o, 0xa3, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal))
- }
- if (zb0049Mask[1] & 0x4000000000000000) == 0 { // if not empty
- // string "type"
- o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65)
- o = msgp.AppendBytes(o, (*z).encodedTxns.TxType)
- }
- if (zb0049Mask[1] & 0x8000000000000000) == 0 { // if not empty
- // string "typebm"
- o = append(o, 0xa6, 0x74, 0x79, 0x70, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.BitmaskTxType))
- }
- if (zb0049Mask[2] & 0x1) == 0 { // if not empty
- // string "typeo"
- o = append(o, 0xa5, 0x74, 0x79, 0x70, 0x65, 0x6f)
- o = msgp.AppendByte(o, (*z).encodedTxns.TxTypeOffset)
- }
- if (zb0049Mask[2] & 0x2) == 0 { // if not empty
- // string "un"
- o = append(o, 0xa2, 0x75, 0x6e)
- if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName)))
- }
- for zb0017 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName {
- o = msgp.AppendString(o, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName[zb0017])
- }
- }
- if (zb0049Mask[2] & 0x4) == 0 { // if not empty
- // string "unbm"
- o = append(o, 0xa4, 0x75, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName))
- }
- if (zb0049Mask[2] & 0x8) == 0 { // if not empty
- // string "votefst"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x66, 0x73, 0x74)
- if (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedKeyregTxnFields.VoteFirst)))
- }
- for zb0010 := range (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst {
- o = (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst[zb0010].MarshalMsg(o)
- }
- }
- if (zb0049Mask[2] & 0x10) == 0 { // if not empty
- // string "votefstbm"
- o = append(o, 0xa9, 0x76, 0x6f, 0x74, 0x65, 0x66, 0x73, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst))
- }
- if (zb0049Mask[2] & 0x20) == 0 { // if not empty
- // string "votekbm"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskKeys))
- }
- if (zb0049Mask[2] & 0x40) == 0 { // if not empty
- // string "votekd"
- o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x64)
- if (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution)))
- }
- for zb0012 := range (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution {
- o = msgp.AppendUint64(o, (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution[zb0012])
- }
- }
- if (zb0049Mask[2] & 0x80) == 0 { // if not empty
- // string "votekey"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x65, 0x79)
- o = msgp.AppendBytes(o, (*z).encodedTxns.encodedKeyregTxnFields.VotePK)
- }
- if (zb0049Mask[2] & 0x100) == 0 { // if not empty
- // string "votelst"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6c, 0x73, 0x74)
- if (*z).encodedTxns.encodedKeyregTxnFields.VoteLast == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedKeyregTxnFields.VoteLast)))
- }
- for zb0011 := range (*z).encodedTxns.encodedKeyregTxnFields.VoteLast {
- o = (*z).encodedTxns.encodedKeyregTxnFields.VoteLast[zb0011].MarshalMsg(o)
- }
- }
- if (zb0049Mask[2] & 0x200) == 0 { // if not empty
- // string "votelstbm"
- o = append(o, 0xa9, 0x76, 0x6f, 0x74, 0x65, 0x6c, 0x73, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast))
- }
- if (zb0049Mask[2] & 0x400) == 0 { // if not empty
- // string "xaid"
- o = append(o, 0xa4, 0x78, 0x61, 0x69, 0x64)
- if (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset)))
- }
- for zb0020 := range (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset {
- o = (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset[zb0020].MarshalMsg(o)
- }
- }
- if (zb0049Mask[2] & 0x800) == 0 { // if not empty
- // string "xaidbm"
- o = append(o, 0xa6, 0x78, 0x61, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset))
- }
- }
- return
-}
-
-func (_ *encodedSignedTxns) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedSignedTxns)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedSignedTxns) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0049 int
- var zb0050 bool
- zb0049, zb0050, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0049, zb0050, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0049 > 0 {
- zb0049--
- var zb0051 int
- zb0051, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Sig")
- return
- }
- if zb0051 > maxSignatureBytes {
- err = msgp.ErrOverflow(uint64(zb0051), uint64(maxSignatureBytes))
- return
- }
- (*z).Sig, bts, err = msgp.ReadBytesBytes(bts, (*z).Sig)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Sig")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0052 []byte
- var zb0053 int
- zb0053, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSig")
- return
- }
- if zb0053 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0053), uint64(maxBitmaskSize))
- return
- }
- zb0052, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskSig))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSig")
- return
- }
- (*z).BitmaskSig = bitmask(zb0052)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0054 int
- zb0054, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Version")
- return
- }
- if zb0054 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0054), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedMsigs.Version, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedMsigs.Version)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Version")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0055 []byte
- var zb0056 int
- zb0056, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVersion")
- return
- }
- if zb0056 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0056), uint64(maxBitmaskSize))
- return
- }
- zb0055, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedMsigs.BitmaskVersion))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVersion")
- return
- }
- (*z).encodedMsigs.BitmaskVersion = bitmask(zb0055)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0057 int
- zb0057, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Threshold")
- return
- }
- if zb0057 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0057), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedMsigs.Threshold, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedMsigs.Threshold)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Threshold")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0058 []byte
- var zb0059 int
- zb0059, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskThreshold")
- return
- }
- if zb0059 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0059), uint64(maxBitmaskSize))
- return
- }
- zb0058, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedMsigs.BitmaskThreshold))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskThreshold")
- return
- }
- (*z).encodedMsigs.BitmaskThreshold = bitmask(zb0058)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0060 int
- var zb0061 bool
- zb0060, zb0061, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Subsigs")
- return
- }
- if zb0060 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0060), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Subsigs")
- return
- }
- if zb0061 {
- (*z).encodedMsigs.Subsigs = nil
- } else if (*z).encodedMsigs.Subsigs != nil && cap((*z).encodedMsigs.Subsigs) >= zb0060 {
- (*z).encodedMsigs.Subsigs = ((*z).encodedMsigs.Subsigs)[:zb0060]
- } else {
- (*z).encodedMsigs.Subsigs = make([][]crypto.MultisigSubsig, zb0060)
- }
- for zb0001 := range (*z).encodedMsigs.Subsigs {
- var zb0062 int
- var zb0063 bool
- zb0062, zb0063, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Subsigs", zb0001)
- return
- }
- if zb0062 > crypto.MaxMultisig {
- err = msgp.ErrOverflow(uint64(zb0062), uint64(crypto.MaxMultisig))
- err = msgp.WrapError(err, "struct-from-array", "Subsigs", zb0001)
- return
- }
- if zb0063 {
- (*z).encodedMsigs.Subsigs[zb0001] = nil
- } else if (*z).encodedMsigs.Subsigs[zb0001] != nil && cap((*z).encodedMsigs.Subsigs[zb0001]) >= zb0062 {
- (*z).encodedMsigs.Subsigs[zb0001] = ((*z).encodedMsigs.Subsigs[zb0001])[:zb0062]
- } else {
- (*z).encodedMsigs.Subsigs[zb0001] = make([]crypto.MultisigSubsig, zb0062)
- }
- for zb0002 := range (*z).encodedMsigs.Subsigs[zb0001] {
- bts, err = (*z).encodedMsigs.Subsigs[zb0001][zb0002].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Subsigs", zb0001, zb0002)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0064 []byte
- var zb0065 int
- zb0065, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSubsigs")
- return
- }
- if zb0065 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0065), uint64(maxBitmaskSize))
- return
- }
- zb0064, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedMsigs.BitmaskSubsigs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSubsigs")
- return
- }
- (*z).encodedMsigs.BitmaskSubsigs = bitmask(zb0064)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0066 int
- var zb0067 bool
- zb0066, zb0067, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Logic")
- return
- }
- if zb0066 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0066), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Logic")
- return
- }
- if zb0067 {
- (*z).encodedLsigs.Logic = nil
- } else if (*z).encodedLsigs.Logic != nil && cap((*z).encodedLsigs.Logic) >= zb0066 {
- (*z).encodedLsigs.Logic = ((*z).encodedLsigs.Logic)[:zb0066]
- } else {
- (*z).encodedLsigs.Logic = make([][]byte, zb0066)
- }
- for zb0003 := range (*z).encodedLsigs.Logic {
- var zb0068 int
- zb0068, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Logic", zb0003)
- return
- }
- if zb0068 > config.MaxLogicSigMaxSize {
- err = msgp.ErrOverflow(uint64(zb0068), uint64(config.MaxLogicSigMaxSize))
- return
- }
- (*z).encodedLsigs.Logic[zb0003], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedLsigs.Logic[zb0003])
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Logic", zb0003)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0069 []byte
- var zb0070 int
- zb0070, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLogic")
- return
- }
- if zb0070 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0070), uint64(maxBitmaskSize))
- return
- }
- zb0069, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedLsigs.BitmaskLogic))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLogic")
- return
- }
- (*z).encodedLsigs.BitmaskLogic = bitmask(zb0069)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0071 int
- var zb0072 bool
- zb0071, zb0072, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs")
- return
- }
- if zb0071 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0071), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs")
- return
- }
- if zb0072 {
- (*z).encodedLsigs.LogicArgs = nil
- } else if (*z).encodedLsigs.LogicArgs != nil && cap((*z).encodedLsigs.LogicArgs) >= zb0071 {
- (*z).encodedLsigs.LogicArgs = ((*z).encodedLsigs.LogicArgs)[:zb0071]
- } else {
- (*z).encodedLsigs.LogicArgs = make([][][]byte, zb0071)
- }
- for zb0004 := range (*z).encodedLsigs.LogicArgs {
- var zb0073 int
- var zb0074 bool
- zb0073, zb0074, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs", zb0004)
- return
- }
- if zb0073 > transactions.EvalMaxArgs {
- err = msgp.ErrOverflow(uint64(zb0073), uint64(transactions.EvalMaxArgs))
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs", zb0004)
- return
- }
- if zb0074 {
- (*z).encodedLsigs.LogicArgs[zb0004] = nil
- } else if (*z).encodedLsigs.LogicArgs[zb0004] != nil && cap((*z).encodedLsigs.LogicArgs[zb0004]) >= zb0073 {
- (*z).encodedLsigs.LogicArgs[zb0004] = ((*z).encodedLsigs.LogicArgs[zb0004])[:zb0073]
- } else {
- (*z).encodedLsigs.LogicArgs[zb0004] = make([][]byte, zb0073)
- }
- for zb0005 := range (*z).encodedLsigs.LogicArgs[zb0004] {
- var zb0075 int
- zb0075, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs", zb0004, zb0005)
- return
- }
- if zb0075 > config.MaxLogicSigMaxSize {
- err = msgp.ErrOverflow(uint64(zb0075), uint64(config.MaxLogicSigMaxSize))
- return
- }
- (*z).encodedLsigs.LogicArgs[zb0004][zb0005], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedLsigs.LogicArgs[zb0004][zb0005])
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs", zb0004, zb0005)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0076 []byte
- var zb0077 int
- zb0077, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLogicArgs")
- return
- }
- if zb0077 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0077), uint64(maxBitmaskSize))
- return
- }
- zb0076, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedLsigs.BitmaskLogicArgs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLogicArgs")
- return
- }
- (*z).encodedLsigs.BitmaskLogicArgs = bitmask(zb0076)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0078 int
- zb0078, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AuthAddr")
- return
- }
- if zb0078 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0078), uint64(maxAddressBytes))
- return
- }
- (*z).AuthAddr, bts, err = msgp.ReadBytesBytes(bts, (*z).AuthAddr)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AuthAddr")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0079 []byte
- var zb0080 int
- zb0080, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAuthAddr")
- return
- }
- if zb0080 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0080), uint64(maxBitmaskSize))
- return
- }
- zb0079, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAuthAddr))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAuthAddr")
- return
- }
- (*z).BitmaskAuthAddr = bitmask(zb0079)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0081 int
- zb0081, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TxType")
- return
- }
- if zb0081 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0081), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedTxns.TxType, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.TxType)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TxType")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0082 []byte
- var zb0083 int
- zb0083, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTxType")
- return
- }
- if zb0083 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0083), uint64(maxBitmaskSize))
- return
- }
- zb0082, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.BitmaskTxType))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTxType")
- return
- }
- (*z).encodedTxns.BitmaskTxType = bitmask(zb0082)
- }
- }
- if zb0049 > 0 {
- zb0049--
- (*z).encodedTxns.TxTypeOffset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TxTypeOffset")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0084 int
- zb0084, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Sender")
- return
- }
- if zb0084 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0084), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.Sender, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedTxnHeaders.Sender)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Sender")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0085 []byte
- var zb0086 int
- zb0086, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSender")
- return
- }
- if zb0086 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0086), uint64(maxBitmaskSize))
- return
- }
- zb0085, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskSender))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSender")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskSender = bitmask(zb0085)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0087 int
- var zb0088 bool
- zb0087, zb0088, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Fee")
- return
- }
- if zb0087 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0087), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Fee")
- return
- }
- if zb0088 {
- (*z).encodedTxns.encodedTxnHeaders.Fee = nil
- } else if (*z).encodedTxns.encodedTxnHeaders.Fee != nil && cap((*z).encodedTxns.encodedTxnHeaders.Fee) >= zb0087 {
- (*z).encodedTxns.encodedTxnHeaders.Fee = ((*z).encodedTxns.encodedTxnHeaders.Fee)[:zb0087]
- } else {
- (*z).encodedTxns.encodedTxnHeaders.Fee = make([]basics.MicroAlgos, zb0087)
- }
- for zb0006 := range (*z).encodedTxns.encodedTxnHeaders.Fee {
- bts, err = (*z).encodedTxns.encodedTxnHeaders.Fee[zb0006].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Fee", zb0006)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0089 []byte
- var zb0090 int
- zb0090, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFee")
- return
- }
- if zb0090 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0090), uint64(maxBitmaskSize))
- return
- }
- zb0089, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskFee))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFee")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskFee = bitmask(zb0089)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0091 int
- var zb0092 bool
- zb0091, zb0092, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FirstValid")
- return
- }
- if zb0091 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0091), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "FirstValid")
- return
- }
- if zb0092 {
- (*z).encodedTxns.encodedTxnHeaders.FirstValid = nil
- } else if (*z).encodedTxns.encodedTxnHeaders.FirstValid != nil && cap((*z).encodedTxns.encodedTxnHeaders.FirstValid) >= zb0091 {
- (*z).encodedTxns.encodedTxnHeaders.FirstValid = ((*z).encodedTxns.encodedTxnHeaders.FirstValid)[:zb0091]
- } else {
- (*z).encodedTxns.encodedTxnHeaders.FirstValid = make([]basics.Round, zb0091)
- }
- for zb0007 := range (*z).encodedTxns.encodedTxnHeaders.FirstValid {
- bts, err = (*z).encodedTxns.encodedTxnHeaders.FirstValid[zb0007].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FirstValid", zb0007)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0093 []byte
- var zb0094 int
- zb0094, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFirstValid")
- return
- }
- if zb0094 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0094), uint64(maxBitmaskSize))
- return
- }
- zb0093, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskFirstValid))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFirstValid")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskFirstValid = bitmask(zb0093)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0095 int
- var zb0096 bool
- zb0095, zb0096, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LastValid")
- return
- }
- if zb0095 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0095), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LastValid")
- return
- }
- if zb0096 {
- (*z).encodedTxns.encodedTxnHeaders.LastValid = nil
- } else if (*z).encodedTxns.encodedTxnHeaders.LastValid != nil && cap((*z).encodedTxns.encodedTxnHeaders.LastValid) >= zb0095 {
- (*z).encodedTxns.encodedTxnHeaders.LastValid = ((*z).encodedTxns.encodedTxnHeaders.LastValid)[:zb0095]
- } else {
- (*z).encodedTxns.encodedTxnHeaders.LastValid = make([]basics.Round, zb0095)
- }
- for zb0008 := range (*z).encodedTxns.encodedTxnHeaders.LastValid {
- bts, err = (*z).encodedTxns.encodedTxnHeaders.LastValid[zb0008].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LastValid", zb0008)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0097 []byte
- var zb0098 int
- zb0098, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLastValid")
- return
- }
- if zb0098 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0098), uint64(maxBitmaskSize))
- return
- }
- zb0097, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskLastValid))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLastValid")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskLastValid = bitmask(zb0097)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0099 int
- var zb0100 bool
- zb0099, zb0100, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Note")
- return
- }
- if zb0099 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0099), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Note")
- return
- }
- if zb0100 {
- (*z).encodedTxns.encodedTxnHeaders.Note = nil
- } else if (*z).encodedTxns.encodedTxnHeaders.Note != nil && cap((*z).encodedTxns.encodedTxnHeaders.Note) >= zb0099 {
- (*z).encodedTxns.encodedTxnHeaders.Note = ((*z).encodedTxns.encodedTxnHeaders.Note)[:zb0099]
- } else {
- (*z).encodedTxns.encodedTxnHeaders.Note = make([][]byte, zb0099)
- }
- for zb0009 := range (*z).encodedTxns.encodedTxnHeaders.Note {
- var zb0101 int
- zb0101, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Note", zb0009)
- return
- }
- if zb0101 > config.MaxTxnNoteBytes {
- err = msgp.ErrOverflow(uint64(zb0101), uint64(config.MaxTxnNoteBytes))
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.Note[zb0009], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedTxnHeaders.Note[zb0009])
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Note", zb0009)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0102 []byte
- var zb0103 int
- zb0103, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNote")
- return
- }
- if zb0103 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0103), uint64(maxBitmaskSize))
- return
- }
- zb0102, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskNote))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNote")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskNote = bitmask(zb0102)
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0104 []byte
- var zb0105 int
- zb0105, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGenesisID")
- return
- }
- if zb0105 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0105), uint64(maxBitmaskSize))
- return
- }
- zb0104, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskGenesisID))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGenesisID")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskGenesisID = bitmask(zb0104)
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0106 []byte
- var zb0107 int
- zb0107, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGroup")
- return
- }
- if zb0107 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0107), uint64(maxBitmaskSize))
- return
- }
- zb0106, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskGroup))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGroup")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskGroup = bitmask(zb0106)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0108 int
- zb0108, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Lease")
- return
- }
- if zb0108 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0108), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.Lease, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedTxnHeaders.Lease)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Lease")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0109 []byte
- var zb0110 int
- zb0110, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLease")
- return
- }
- if zb0110 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0110), uint64(maxBitmaskSize))
- return
- }
- zb0109, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskLease))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLease")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskLease = bitmask(zb0109)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0111 int
- zb0111, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "RekeyTo")
- return
- }
- if zb0111 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0111), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.RekeyTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedTxnHeaders.RekeyTo)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "RekeyTo")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0112 []byte
- var zb0113 int
- zb0113, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskRekeyTo")
- return
- }
- if zb0113 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0113), uint64(maxBitmaskSize))
- return
- }
- zb0112, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskRekeyTo))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskRekeyTo")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskRekeyTo = bitmask(zb0112)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0114 int
- zb0114, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VotePK")
- return
- }
- if zb0114 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0114), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedKeyregTxnFields.VotePK, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedKeyregTxnFields.VotePK)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VotePK")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0115 int
- zb0115, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SelectionPK")
- return
- }
- if zb0115 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0115), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedKeyregTxnFields.SelectionPK, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedKeyregTxnFields.SelectionPK)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SelectionPK")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0116 int
- var zb0117 bool
- zb0116, zb0117, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteFirst")
- return
- }
- if zb0116 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0116), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "VoteFirst")
- return
- }
- if zb0117 {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst = nil
- } else if (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst != nil && cap((*z).encodedTxns.encodedKeyregTxnFields.VoteFirst) >= zb0116 {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst = ((*z).encodedTxns.encodedKeyregTxnFields.VoteFirst)[:zb0116]
- } else {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst = make([]basics.Round, zb0116)
- }
- for zb0010 := range (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst {
- bts, err = (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst[zb0010].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteFirst", zb0010)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0118 []byte
- var zb0119 int
- zb0119, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteFirst")
- return
- }
- if zb0119 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0119), uint64(maxBitmaskSize))
- return
- }
- zb0118, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteFirst")
- return
- }
- (*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst = bitmask(zb0118)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0120 int
- var zb0121 bool
- zb0120, zb0121, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteLast")
- return
- }
- if zb0120 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0120), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "VoteLast")
- return
- }
- if zb0121 {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteLast = nil
- } else if (*z).encodedTxns.encodedKeyregTxnFields.VoteLast != nil && cap((*z).encodedTxns.encodedKeyregTxnFields.VoteLast) >= zb0120 {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteLast = ((*z).encodedTxns.encodedKeyregTxnFields.VoteLast)[:zb0120]
- } else {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteLast = make([]basics.Round, zb0120)
- }
- for zb0011 := range (*z).encodedTxns.encodedKeyregTxnFields.VoteLast {
- bts, err = (*z).encodedTxns.encodedKeyregTxnFields.VoteLast[zb0011].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteLast", zb0011)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0122 []byte
- var zb0123 int
- zb0123, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteLast")
- return
- }
- if zb0123 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0123), uint64(maxBitmaskSize))
- return
- }
- zb0122, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteLast")
- return
- }
- (*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast = bitmask(zb0122)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0124 int
- var zb0125 bool
- zb0124, zb0125, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution")
- return
- }
- if zb0124 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0124), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution")
- return
- }
- if zb0125 {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution = nil
- } else if (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution != nil && cap((*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution) >= zb0124 {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution = ((*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution)[:zb0124]
- } else {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution = make([]uint64, zb0124)
- }
- for zb0012 := range (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution[zb0012], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution", zb0012)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0126 []byte
- var zb0127 int
- zb0127, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskKeys")
- return
- }
- if zb0127 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0127), uint64(maxBitmaskSize))
- return
- }
- zb0126, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskKeys))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskKeys")
- return
- }
- (*z).encodedTxns.encodedKeyregTxnFields.BitmaskKeys = bitmask(zb0126)
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0128 []byte
- var zb0129 int
- zb0129, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNonparticipation")
- return
- }
- if zb0129 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0129), uint64(maxBitmaskSize))
- return
- }
- zb0128, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNonparticipation")
- return
- }
- (*z).encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation = bitmask(zb0128)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0130 int
- zb0130, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Receiver")
- return
- }
- if zb0130 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0130), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedPaymentTxnFields.Receiver, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedPaymentTxnFields.Receiver)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Receiver")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0131 []byte
- var zb0132 int
- zb0132, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReceiver")
- return
- }
- if zb0132 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0132), uint64(maxBitmaskSize))
- return
- }
- zb0131, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedPaymentTxnFields.BitmaskReceiver))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReceiver")
- return
- }
- (*z).encodedTxns.encodedPaymentTxnFields.BitmaskReceiver = bitmask(zb0131)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0133 int
- var zb0134 bool
- zb0133, zb0134, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Amount")
- return
- }
- if zb0133 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0133), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Amount")
- return
- }
- if zb0134 {
- (*z).encodedTxns.encodedPaymentTxnFields.Amount = nil
- } else if (*z).encodedTxns.encodedPaymentTxnFields.Amount != nil && cap((*z).encodedTxns.encodedPaymentTxnFields.Amount) >= zb0133 {
- (*z).encodedTxns.encodedPaymentTxnFields.Amount = ((*z).encodedTxns.encodedPaymentTxnFields.Amount)[:zb0133]
- } else {
- (*z).encodedTxns.encodedPaymentTxnFields.Amount = make([]basics.MicroAlgos, zb0133)
- }
- for zb0013 := range (*z).encodedTxns.encodedPaymentTxnFields.Amount {
- bts, err = (*z).encodedTxns.encodedPaymentTxnFields.Amount[zb0013].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Amount", zb0013)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0135 []byte
- var zb0136 int
- zb0136, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAmount")
- return
- }
- if zb0136 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0136), uint64(maxBitmaskSize))
- return
- }
- zb0135, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedPaymentTxnFields.BitmaskAmount))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAmount")
- return
- }
- (*z).encodedTxns.encodedPaymentTxnFields.BitmaskAmount = bitmask(zb0135)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0137 int
- zb0137, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CloseRemainderTo")
- return
- }
- if zb0137 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0137), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedPaymentTxnFields.CloseRemainderTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedPaymentTxnFields.CloseRemainderTo)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CloseRemainderTo")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0138 []byte
- var zb0139 int
- zb0139, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCloseRemainderTo")
- return
- }
- if zb0139 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0139), uint64(maxBitmaskSize))
- return
- }
- zb0138, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCloseRemainderTo")
- return
- }
- (*z).encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo = bitmask(zb0138)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0140 int
- var zb0141 bool
- zb0140, zb0141, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ConfigAsset")
- return
- }
- if zb0140 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0140), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ConfigAsset")
- return
- }
- if zb0141 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset = nil
- } else if (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset != nil && cap((*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset) >= zb0140 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset = ((*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset)[:zb0140]
- } else {
- (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset = make([]basics.AssetIndex, zb0140)
- }
- for zb0014 := range (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset {
- bts, err = (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset[zb0014].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ConfigAsset", zb0014)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0142 []byte
- var zb0143 int
- zb0143, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskConfigAsset")
- return
- }
- if zb0143 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0143), uint64(maxBitmaskSize))
- return
- }
- zb0142, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskConfigAsset")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset = bitmask(zb0142)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0144 int
- var zb0145 bool
- zb0144, zb0145, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Total")
- return
- }
- if zb0144 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0144), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Total")
- return
- }
- if zb0145 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total = nil
- } else if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total != nil && cap((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total) >= zb0144 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total = ((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total)[:zb0144]
- } else {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total = make([]uint64, zb0144)
- }
- for zb0015 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total[zb0015], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Total", zb0015)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0146 []byte
- var zb0147 int
- zb0147, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTotal")
- return
- }
- if zb0147 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0147), uint64(maxBitmaskSize))
- return
- }
- zb0146, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTotal")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal = bitmask(zb0146)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0148 int
- var zb0149 bool
- zb0148, zb0149, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Decimals")
- return
- }
- if zb0148 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0148), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Decimals")
- return
- }
- if zb0149 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals = nil
- } else if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals != nil && cap((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals) >= zb0148 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals = ((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals)[:zb0148]
- } else {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals = make([]uint32, zb0148)
- }
- for zb0016 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals[zb0016], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Decimals", zb0016)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0150 []byte
- var zb0151 int
- zb0151, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDecimals")
- return
- }
- if zb0151 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0151), uint64(maxBitmaskSize))
- return
- }
- zb0150, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDecimals")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals = bitmask(zb0150)
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0152 []byte
- var zb0153 int
- zb0153, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDefaultFrozen")
- return
- }
- if zb0153 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0153), uint64(maxBitmaskSize))
- return
- }
- zb0152, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDefaultFrozen")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen = bitmask(zb0152)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0154 int
- var zb0155 bool
- zb0154, zb0155, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UnitName")
- return
- }
- if zb0154 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0154), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "UnitName")
- return
- }
- if zb0155 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName = nil
- } else if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName != nil && cap((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName) >= zb0154 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName = ((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName)[:zb0154]
- } else {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName = make([]string, zb0154)
- }
- for zb0017 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName[zb0017], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UnitName", zb0017)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0156 []byte
- var zb0157 int
- zb0157, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskUnitName")
- return
- }
- if zb0157 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0157), uint64(maxBitmaskSize))
- return
- }
- zb0156, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskUnitName")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName = bitmask(zb0156)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0158 int
- var zb0159 bool
- zb0158, zb0159, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetName")
- return
- }
- if zb0158 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0158), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "AssetName")
- return
- }
- if zb0159 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName = nil
- } else if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName != nil && cap((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName) >= zb0158 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName = ((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName)[:zb0158]
- } else {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName = make([]string, zb0158)
- }
- for zb0018 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName[zb0018], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetName", zb0018)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0160 []byte
- var zb0161 int
- zb0161, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetName")
- return
- }
- if zb0161 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0161), uint64(maxBitmaskSize))
- return
- }
- zb0160, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetName")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName = bitmask(zb0160)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0162 int
- var zb0163 bool
- zb0162, zb0163, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "URL")
- return
- }
- if zb0162 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0162), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "URL")
- return
- }
- if zb0163 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL = nil
- } else if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL != nil && cap((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL) >= zb0162 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL = ((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL)[:zb0162]
- } else {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL = make([]string, zb0162)
- }
- for zb0019 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL[zb0019], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "URL", zb0019)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0164 []byte
- var zb0165 int
- zb0165, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskURL")
- return
- }
- if zb0165 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0165), uint64(maxBitmaskSize))
- return
- }
- zb0164, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskURL")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL = bitmask(zb0164)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0166 int
- zb0166, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "MetadataHash")
- return
- }
- if zb0166 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0166), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "MetadataHash")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0167 []byte
- var zb0168 int
- zb0168, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskMetadataHash")
- return
- }
- if zb0168 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0168), uint64(maxBitmaskSize))
- return
- }
- zb0167, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskMetadataHash")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash = bitmask(zb0167)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0169 int
- zb0169, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Manager")
- return
- }
- if zb0169 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0169), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Manager")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0170 []byte
- var zb0171 int
- zb0171, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskManager")
- return
- }
- if zb0171 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0171), uint64(maxBitmaskSize))
- return
- }
- zb0170, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskManager")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager = bitmask(zb0170)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0172 int
- zb0172, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reserve")
- return
- }
- if zb0172 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0172), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reserve")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0173 []byte
- var zb0174 int
- zb0174, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReserve")
- return
- }
- if zb0174 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0174), uint64(maxBitmaskSize))
- return
- }
- zb0173, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReserve")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve = bitmask(zb0173)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0175 int
- zb0175, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Freeze")
- return
- }
- if zb0175 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0175), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Freeze")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0176 []byte
- var zb0177 int
- zb0177, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreeze")
- return
- }
- if zb0177 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0177), uint64(maxBitmaskSize))
- return
- }
- zb0176, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreeze")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze = bitmask(zb0176)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0178 int
- zb0178, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Clawback")
- return
- }
- if zb0178 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0178), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Clawback")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0179 []byte
- var zb0180 int
- zb0180, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClawback")
- return
- }
- if zb0180 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0180), uint64(maxBitmaskSize))
- return
- }
- zb0179, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClawback")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback = bitmask(zb0179)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0181 int
- var zb0182 bool
- zb0181, zb0182, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "XferAsset")
- return
- }
- if zb0181 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0181), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "XferAsset")
- return
- }
- if zb0182 {
- (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset = nil
- } else if (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset != nil && cap((*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset) >= zb0181 {
- (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset = ((*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset)[:zb0181]
- } else {
- (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset = make([]basics.AssetIndex, zb0181)
- }
- for zb0020 := range (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset {
- bts, err = (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset[zb0020].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "XferAsset", zb0020)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0183 []byte
- var zb0184 int
- zb0184, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskXferAsset")
- return
- }
- if zb0184 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0184), uint64(maxBitmaskSize))
- return
- }
- zb0183, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskXferAsset")
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset = bitmask(zb0183)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0185 int
- var zb0186 bool
- zb0185, zb0186, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetAmount")
- return
- }
- if zb0185 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0185), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "AssetAmount")
- return
- }
- if zb0186 {
- (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount = nil
- } else if (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount != nil && cap((*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount) >= zb0185 {
- (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount = ((*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount)[:zb0185]
- } else {
- (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount = make([]uint64, zb0185)
- }
- for zb0021 := range (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount {
- (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount[zb0021], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetAmount", zb0021)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0187 []byte
- var zb0188 int
- zb0188, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetAmount")
- return
- }
- if zb0188 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0188), uint64(maxBitmaskSize))
- return
- }
- zb0187, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetAmount")
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount = bitmask(zb0187)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0189 int
- zb0189, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetSender")
- return
- }
- if zb0189 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0189), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.AssetSender, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetTransferTxnFields.AssetSender)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetSender")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0190 []byte
- var zb0191 int
- zb0191, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetSender")
- return
- }
- if zb0191 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0191), uint64(maxBitmaskSize))
- return
- }
- zb0190, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetSender")
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender = bitmask(zb0190)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0192 int
- zb0192, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetReceiver")
- return
- }
- if zb0192 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0192), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.AssetReceiver, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetTransferTxnFields.AssetReceiver)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetReceiver")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0193 []byte
- var zb0194 int
- zb0194, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetReceiver")
- return
- }
- if zb0194 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0194), uint64(maxBitmaskSize))
- return
- }
- zb0193, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetReceiver")
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver = bitmask(zb0193)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0195 int
- zb0195, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetCloseTo")
- return
- }
- if zb0195 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0195), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetCloseTo")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0196 []byte
- var zb0197 int
- zb0197, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetCloseTo")
- return
- }
- if zb0197 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0197), uint64(maxBitmaskSize))
- return
- }
- zb0196, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetCloseTo")
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo = bitmask(zb0196)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0198 int
- zb0198, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAccount")
- return
- }
- if zb0198 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0198), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAccount")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0199 []byte
- var zb0200 int
- zb0200, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAccount")
- return
- }
- if zb0200 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0200), uint64(maxBitmaskSize))
- return
- }
- zb0199, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAccount")
- return
- }
- (*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount = bitmask(zb0199)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0201 int
- var zb0202 bool
- zb0201, zb0202, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAsset")
- return
- }
- if zb0201 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0201), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "FreezeAsset")
- return
- }
- if zb0202 {
- (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset = nil
- } else if (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset != nil && cap((*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset) >= zb0201 {
- (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset = ((*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset)[:zb0201]
- } else {
- (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset = make([]basics.AssetIndex, zb0201)
- }
- for zb0022 := range (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset {
- bts, err = (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset[zb0022].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAsset", zb0022)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0203 []byte
- var zb0204 int
- zb0204, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAsset")
- return
- }
- if zb0204 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0204), uint64(maxBitmaskSize))
- return
- }
- zb0203, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAsset")
- return
- }
- (*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset = bitmask(zb0203)
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0205 []byte
- var zb0206 int
- zb0206, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetFrozen")
- return
- }
- if zb0206 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0206), uint64(maxBitmaskSize))
- return
- }
- zb0205, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetFrozen")
- return
- }
- (*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen = bitmask(zb0205)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0207 int
- var zb0208 bool
- zb0207, zb0208, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationID")
- return
- }
- if zb0207 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0207), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ApplicationID")
- return
- }
- if zb0208 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID) >= zb0207 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID = ((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID)[:zb0207]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID = make([]basics.AppIndex, zb0207)
- }
- for zb0023 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID {
- bts, err = (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID[zb0023].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationID", zb0023)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0209 []byte
- var zb0210 int
- zb0210, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationID")
- return
- }
- if zb0210 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0210), uint64(maxBitmaskSize))
- return
- }
- zb0209, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationID")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID = bitmask(zb0209)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0211 int
- zb0211, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "OnCompletion")
- return
- }
- if zb0211 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0211), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.OnCompletion, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedApplicationCallTxnFields.OnCompletion)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "OnCompletion")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0212 []byte
- var zb0213 int
- zb0213, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskOnCompletion")
- return
- }
- if zb0213 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0213), uint64(maxBitmaskSize))
- return
- }
- zb0212, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskOnCompletion")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion = bitmask(zb0212)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0214 int
- var zb0215 bool
- zb0214, zb0215, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
- return
- }
- if zb0214 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0214), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
- return
- }
- if zb0215 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs) >= zb0214 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs = ((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs)[:zb0214]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs = make([]applicationArgs, zb0214)
- }
- for zb0024 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs {
- var zb0216 int
- var zb0217 bool
- zb0216, zb0217, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs", zb0024)
- return
- }
- if zb0216 > transactions.EncodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0216), uint64(transactions.EncodedMaxApplicationArgs))
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs", zb0024)
- return
- }
- if zb0217 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024]) >= zb0216 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] = ((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024])[:zb0216]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] = make(applicationArgs, zb0216)
- }
- for zb0025 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024][zb0025], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024][zb0025])
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs", zb0024, zb0025)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0218 []byte
- var zb0219 int
- zb0219, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationArgs")
- return
- }
- if zb0219 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0219), uint64(maxBitmaskSize))
- return
- }
- zb0218, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationArgs")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs = bitmask(zb0218)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0220 int
- var zb0221 bool
- zb0220, zb0221, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Accounts")
- return
- }
- if zb0220 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0220), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Accounts")
- return
- }
- if zb0221 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.Accounts) >= zb0220 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts = ((*z).encodedTxns.encodedApplicationCallTxnFields.Accounts)[:zb0220]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts = make([]addresses, zb0220)
- }
- for zb0026 := range (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts {
- var zb0222 int
- var zb0223 bool
- zb0222, zb0223, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Accounts", zb0026)
- return
- }
- if zb0222 > transactions.EncodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0222), uint64(transactions.EncodedMaxAccounts))
- err = msgp.WrapError(err, "struct-from-array", "Accounts", zb0026)
- return
- }
- if zb0223 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026]) >= zb0222 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] = ((*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026])[:zb0222]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] = make(addresses, zb0222)
- }
- for zb0027 := range (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] {
- bts, err = (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026][zb0027].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Accounts", zb0026, zb0027)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0224 []byte
- var zb0225 int
- zb0225, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAccounts")
- return
- }
- if zb0225 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0225), uint64(maxBitmaskSize))
- return
- }
- zb0224, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAccounts")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts = bitmask(zb0224)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0226 int
- var zb0227 bool
- zb0226, zb0227, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
- return
- }
- if zb0226 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0226), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
- return
- }
- if zb0227 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps) >= zb0226 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps = ((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps)[:zb0226]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps = make([]appIndices, zb0226)
- }
- for zb0028 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps {
- var zb0228 int
- var zb0229 bool
- zb0228, zb0229, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps", zb0028)
- return
- }
- if zb0228 > transactions.EncodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0228), uint64(transactions.EncodedMaxForeignApps))
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps", zb0028)
- return
- }
- if zb0229 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028]) >= zb0228 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] = ((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028])[:zb0228]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] = make(appIndices, zb0228)
- }
- for zb0029 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] {
- bts, err = (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028][zb0029].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps", zb0028, zb0029)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0230 []byte
- var zb0231 int
- zb0231, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignApps")
- return
- }
- if zb0231 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0231), uint64(maxBitmaskSize))
- return
- }
- zb0230, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignApps")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps = bitmask(zb0230)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0232 int
- var zb0233 bool
- zb0232, zb0233, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
- return
- }
- if zb0232 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0232), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
- return
- }
- if zb0233 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets) >= zb0232 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets = ((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets)[:zb0232]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets = make([]assetIndices, zb0232)
- }
- for zb0030 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets {
- var zb0234 int
- var zb0235 bool
- zb0234, zb0235, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0030)
- return
- }
- if zb0234 > transactions.EncodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0234), uint64(transactions.EncodedMaxForeignAssets))
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0030)
- return
- }
- if zb0235 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030]) >= zb0234 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] = ((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030])[:zb0234]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] = make(assetIndices, zb0234)
- }
- for zb0031 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] {
- bts, err = (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030][zb0031].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0030, zb0031)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0236 []byte
- var zb0237 int
- zb0237, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignAssets")
- return
- }
- if zb0237 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0237), uint64(maxBitmaskSize))
- return
- }
- zb0236, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignAssets")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets = bitmask(zb0236)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0238 int
- var zb0239 bool
- zb0238, zb0239, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumUint")
- return
- }
- if zb0238 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0238), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LocalNumUint")
- return
- }
- if zb0239 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint) >= zb0238 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint = ((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint)[:zb0238]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint = make([]uint64, zb0238)
- }
- for zb0032 := range (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint[zb0032], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumUint", zb0032)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0240 []byte
- var zb0241 int
- zb0241, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumUint")
- return
- }
- if zb0241 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0241), uint64(maxBitmaskSize))
- return
- }
- zb0240, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumUint")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint = bitmask(zb0240)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0242 int
- var zb0243 bool
- zb0242, zb0243, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumByteSlice")
- return
- }
- if zb0242 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0242), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LocalNumByteSlice")
- return
- }
- if zb0243 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice) >= zb0242 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice = ((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice)[:zb0242]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice = make([]uint64, zb0242)
- }
- for zb0033 := range (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice[zb0033], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumByteSlice", zb0033)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0244 []byte
- var zb0245 int
- zb0245, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumByteSlice")
- return
- }
- if zb0245 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0245), uint64(maxBitmaskSize))
- return
- }
- zb0244, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumByteSlice")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice = bitmask(zb0244)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0246 int
- var zb0247 bool
- zb0246, zb0247, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumUint")
- return
- }
- if zb0246 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0246), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumUint")
- return
- }
- if zb0247 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint) >= zb0246 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint = ((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint)[:zb0246]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint = make([]uint64, zb0246)
- }
- for zb0034 := range (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint[zb0034], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumUint", zb0034)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0248 []byte
- var zb0249 int
- zb0249, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumUint")
- return
- }
- if zb0249 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0249), uint64(maxBitmaskSize))
- return
- }
- zb0248, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumUint")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint = bitmask(zb0248)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0250 int
- var zb0251 bool
- zb0250, zb0251, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumByteSlice")
- return
- }
- if zb0250 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0250), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumByteSlice")
- return
- }
- if zb0251 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice) >= zb0250 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice = ((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice)[:zb0250]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice = make([]uint64, zb0250)
- }
- for zb0035 := range (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice[zb0035], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumByteSlice", zb0035)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0252 []byte
- var zb0253 int
- zb0253, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumByteSlice")
- return
- }
- if zb0253 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0253), uint64(maxBitmaskSize))
- return
- }
- zb0252, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumByteSlice")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice = bitmask(zb0252)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0254 int
- var zb0255 bool
- zb0254, zb0255, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram")
- return
- }
- if zb0254 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0254), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram")
- return
- }
- if zb0255 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram) >= zb0254 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram = ((*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram)[:zb0254]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram = make([]program, zb0254)
- }
- for zb0036 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram {
- {
- var zb0256 []byte
- var zb0257 int
- zb0257, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram", zb0036)
- return
- }
- if zb0257 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0257), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0256, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram[zb0036]))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram", zb0036)
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram[zb0036] = program(zb0256)
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0258 []byte
- var zb0259 int
- zb0259, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApprovalProgram")
- return
- }
- if zb0259 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0259), uint64(maxBitmaskSize))
- return
- }
- zb0258, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApprovalProgram")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram = bitmask(zb0258)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0260 int
- var zb0261 bool
- zb0260, zb0261, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram")
- return
- }
- if zb0260 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0260), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram")
- return
- }
- if zb0261 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram) >= zb0260 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram = ((*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram)[:zb0260]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram = make([]program, zb0260)
- }
- for zb0037 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram {
- {
- var zb0262 []byte
- var zb0263 int
- zb0263, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram", zb0037)
- return
- }
- if zb0263 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0263), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0262, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram[zb0037]))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram", zb0037)
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram[zb0037] = program(zb0262)
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0264 []byte
- var zb0265 int
- zb0265, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClearStateProgram")
- return
- }
- if zb0265 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0265), uint64(maxBitmaskSize))
- return
- }
- zb0264, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClearStateProgram")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram = bitmask(zb0264)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0266 int
- var zb0267 bool
- zb0266, zb0267, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages")
- return
- }
- if zb0266 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0266), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages")
- return
- }
- if zb0267 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages) >= zb0266 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages = ((*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages)[:zb0266]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages = make([]uint32, zb0266)
- }
- for zb0038 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages[zb0038], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages", zb0038)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0268 []byte
- var zb0269 int
- zb0269, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskExtraProgramPages")
- return
- }
- if zb0269 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0269), uint64(maxBitmaskSize))
- return
- }
- zb0268, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskExtraProgramPages")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages = bitmask(zb0268)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0270 int
- var zb0271 bool
- zb0270, zb0271, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertRound")
- return
- }
- if zb0270 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0270), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "CertRound")
- return
- }
- if zb0271 {
- (*z).encodedTxns.encodedCompactCertTxnFields.CertRound = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.CertRound != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.CertRound) >= zb0270 {
- (*z).encodedTxns.encodedCompactCertTxnFields.CertRound = ((*z).encodedTxns.encodedCompactCertTxnFields.CertRound)[:zb0270]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.CertRound = make([]basics.Round, zb0270)
- }
- for zb0039 := range (*z).encodedTxns.encodedCompactCertTxnFields.CertRound {
- bts, err = (*z).encodedTxns.encodedCompactCertTxnFields.CertRound[zb0039].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertRound", zb0039)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0272 []byte
- var zb0273 int
- zb0273, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertRound")
- return
- }
- if zb0273 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0273), uint64(maxBitmaskSize))
- return
- }
- zb0272, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertRound")
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound = bitmask(zb0272)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0274 int
- var zb0275 bool
- zb0274, zb0275, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertType")
- return
- }
- if zb0274 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0274), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "CertType")
- return
- }
- if zb0275 {
- (*z).encodedTxns.encodedCompactCertTxnFields.CertType = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.CertType != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.CertType) >= zb0274 {
- (*z).encodedTxns.encodedCompactCertTxnFields.CertType = ((*z).encodedTxns.encodedCompactCertTxnFields.CertType)[:zb0274]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.CertType = make([]protocol.CompactCertType, zb0274)
- }
- for zb0040 := range (*z).encodedTxns.encodedCompactCertTxnFields.CertType {
- bts, err = (*z).encodedTxns.encodedCompactCertTxnFields.CertType[zb0040].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertType", zb0040)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0276 []byte
- var zb0277 int
- zb0277, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertType")
- return
- }
- if zb0277 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0277), uint64(maxBitmaskSize))
- return
- }
- zb0276, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertType))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertType")
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertType = bitmask(zb0276)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0278 int
- zb0278, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigCommit")
- return
- }
- if zb0278 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0278), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigCommit")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0279 []byte
- var zb0280 int
- zb0280, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigCommit")
- return
- }
- if zb0280 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0280), uint64(maxBitmaskSize))
- return
- }
- zb0279, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigCommit")
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit = bitmask(zb0279)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0281 int
- var zb0282 bool
- zb0281, zb0282, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight")
- return
- }
- if zb0281 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0281), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight")
- return
- }
- if zb0282 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight) >= zb0281 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight = ((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight)[:zb0281]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight = make([]uint64, zb0281)
- }
- for zb0041 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight[zb0041], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight", zb0041)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0283 []byte
- var zb0284 int
- zb0284, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSignedWeight")
- return
- }
- if zb0284 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0284), uint64(maxBitmaskSize))
- return
- }
- zb0283, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSignedWeight")
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight = bitmask(zb0283)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0285 int
- var zb0286 bool
- zb0285, zb0286, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs")
- return
- }
- if zb0285 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0285), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "SigProofs")
- return
- }
- if zb0286 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs) >= zb0285 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs = ((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs)[:zb0285]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs = make([]certProofs, zb0285)
- }
- for zb0042 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs {
- var zb0287 int
- var zb0288 bool
- zb0287, zb0288, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0042)
- return
- }
- if zb0287 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0287), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0042)
- return
- }
- if zb0288 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042]) >= zb0287 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] = ((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042])[:zb0287]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] = make(certProofs, zb0287)
- }
- for zb0043 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] {
- bts, err = (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042][zb0043].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0042, zb0043)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0289 []byte
- var zb0290 int
- zb0290, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigProofs")
- return
- }
- if zb0290 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0290), uint64(maxBitmaskSize))
- return
- }
- zb0289, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigProofs")
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs = bitmask(zb0289)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0291 int
- var zb0292 bool
- zb0291, zb0292, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs")
- return
- }
- if zb0291 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0291), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "PartProofs")
- return
- }
- if zb0292 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs) >= zb0291 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs = ((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs)[:zb0291]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs = make([]certProofs, zb0291)
- }
- for zb0044 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs {
- var zb0293 int
- var zb0294 bool
- zb0293, zb0294, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0044)
- return
- }
- if zb0293 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0293), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0044)
- return
- }
- if zb0294 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044]) >= zb0293 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] = ((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044])[:zb0293]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] = make(certProofs, zb0293)
- }
- for zb0045 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] {
- bts, err = (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044][zb0045].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0044, zb0045)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0295 []byte
- var zb0296 int
- zb0296, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskPartProofs")
- return
- }
- if zb0296 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0296), uint64(maxBitmaskSize))
- return
- }
- zb0295, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskPartProofs")
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs = bitmask(zb0295)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0297 int
- var zb0298 bool
- zb0297, zb0298, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals")
- return
- }
- if zb0297 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0297), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Reveals")
- return
- }
- if zb0298 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals) >= zb0297 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals = ((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals)[:zb0297]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals = make([]revealMap, zb0297)
- }
- for zb0046 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals {
- var zb0299 int
- var zb0300 bool
- zb0299, zb0300, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0046)
- return
- }
- if zb0299 > compactcert.MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0299), uint64(compactcert.MaxReveals))
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0046)
- return
- }
- if zb0300 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] == nil {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] = make(revealMap, zb0299)
- }
- for zb0299 > 0 {
- var zb0047 uint64
- var zb0048 compactcert.Reveal
- zb0299--
- zb0047, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0046)
- return
- }
- bts, err = zb0048.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0046, zb0047)
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046][zb0047] = zb0048
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0301 []byte
- var zb0302 int
- zb0302, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReveals")
- return
- }
- if zb0302 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0302), uint64(maxBitmaskSize))
- return
- }
- zb0301, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReveals")
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals = bitmask(zb0301)
- }
- }
- if zb0049 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0049)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0050 {
- (*z) = encodedSignedTxns{}
- }
- for zb0049 > 0 {
- zb0049--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "sig":
- var zb0303 int
- zb0303, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Sig")
- return
- }
- if zb0303 > maxSignatureBytes {
- err = msgp.ErrOverflow(uint64(zb0303), uint64(maxSignatureBytes))
- return
- }
- (*z).Sig, bts, err = msgp.ReadBytesBytes(bts, (*z).Sig)
- if err != nil {
- err = msgp.WrapError(err, "Sig")
- return
- }
- case "sigbm":
- {
- var zb0304 []byte
- var zb0305 int
- zb0305, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSig")
- return
- }
- if zb0305 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0305), uint64(maxBitmaskSize))
- return
- }
- zb0304, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskSig))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSig")
- return
- }
- (*z).BitmaskSig = bitmask(zb0304)
- }
- case "msigv":
- var zb0306 int
- zb0306, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Version")
- return
- }
- if zb0306 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0306), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedMsigs.Version, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedMsigs.Version)
- if err != nil {
- err = msgp.WrapError(err, "Version")
- return
- }
- case "msigvbm":
- {
- var zb0307 []byte
- var zb0308 int
- zb0308, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVersion")
- return
- }
- if zb0308 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0308), uint64(maxBitmaskSize))
- return
- }
- zb0307, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedMsigs.BitmaskVersion))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVersion")
- return
- }
- (*z).encodedMsigs.BitmaskVersion = bitmask(zb0307)
- }
- case "msigthr":
- var zb0309 int
- zb0309, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Threshold")
- return
- }
- if zb0309 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0309), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedMsigs.Threshold, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedMsigs.Threshold)
- if err != nil {
- err = msgp.WrapError(err, "Threshold")
- return
- }
- case "msigthrbm":
- {
- var zb0310 []byte
- var zb0311 int
- zb0311, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskThreshold")
- return
- }
- if zb0311 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0311), uint64(maxBitmaskSize))
- return
- }
- zb0310, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedMsigs.BitmaskThreshold))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskThreshold")
- return
- }
- (*z).encodedMsigs.BitmaskThreshold = bitmask(zb0310)
- }
- case "subsig":
- var zb0312 int
- var zb0313 bool
- zb0312, zb0313, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Subsigs")
- return
- }
- if zb0312 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0312), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Subsigs")
- return
- }
- if zb0313 {
- (*z).encodedMsigs.Subsigs = nil
- } else if (*z).encodedMsigs.Subsigs != nil && cap((*z).encodedMsigs.Subsigs) >= zb0312 {
- (*z).encodedMsigs.Subsigs = ((*z).encodedMsigs.Subsigs)[:zb0312]
- } else {
- (*z).encodedMsigs.Subsigs = make([][]crypto.MultisigSubsig, zb0312)
- }
- for zb0001 := range (*z).encodedMsigs.Subsigs {
- var zb0314 int
- var zb0315 bool
- zb0314, zb0315, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Subsigs", zb0001)
- return
- }
- if zb0314 > crypto.MaxMultisig {
- err = msgp.ErrOverflow(uint64(zb0314), uint64(crypto.MaxMultisig))
- err = msgp.WrapError(err, "Subsigs", zb0001)
- return
- }
- if zb0315 {
- (*z).encodedMsigs.Subsigs[zb0001] = nil
- } else if (*z).encodedMsigs.Subsigs[zb0001] != nil && cap((*z).encodedMsigs.Subsigs[zb0001]) >= zb0314 {
- (*z).encodedMsigs.Subsigs[zb0001] = ((*z).encodedMsigs.Subsigs[zb0001])[:zb0314]
- } else {
- (*z).encodedMsigs.Subsigs[zb0001] = make([]crypto.MultisigSubsig, zb0314)
- }
- for zb0002 := range (*z).encodedMsigs.Subsigs[zb0001] {
- bts, err = (*z).encodedMsigs.Subsigs[zb0001][zb0002].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Subsigs", zb0001, zb0002)
- return
- }
- }
- }
- case "subsigsbm":
- {
- var zb0316 []byte
- var zb0317 int
- zb0317, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSubsigs")
- return
- }
- if zb0317 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0317), uint64(maxBitmaskSize))
- return
- }
- zb0316, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedMsigs.BitmaskSubsigs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSubsigs")
- return
- }
- (*z).encodedMsigs.BitmaskSubsigs = bitmask(zb0316)
- }
- case "lsigl":
- var zb0318 int
- var zb0319 bool
- zb0318, zb0319, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Logic")
- return
- }
- if zb0318 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0318), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Logic")
- return
- }
- if zb0319 {
- (*z).encodedLsigs.Logic = nil
- } else if (*z).encodedLsigs.Logic != nil && cap((*z).encodedLsigs.Logic) >= zb0318 {
- (*z).encodedLsigs.Logic = ((*z).encodedLsigs.Logic)[:zb0318]
- } else {
- (*z).encodedLsigs.Logic = make([][]byte, zb0318)
- }
- for zb0003 := range (*z).encodedLsigs.Logic {
- var zb0320 int
- zb0320, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Logic", zb0003)
- return
- }
- if zb0320 > config.MaxLogicSigMaxSize {
- err = msgp.ErrOverflow(uint64(zb0320), uint64(config.MaxLogicSigMaxSize))
- return
- }
- (*z).encodedLsigs.Logic[zb0003], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedLsigs.Logic[zb0003])
- if err != nil {
- err = msgp.WrapError(err, "Logic", zb0003)
- return
- }
- }
- case "lsiglbm":
- {
- var zb0321 []byte
- var zb0322 int
- zb0322, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLogic")
- return
- }
- if zb0322 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0322), uint64(maxBitmaskSize))
- return
- }
- zb0321, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedLsigs.BitmaskLogic))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLogic")
- return
- }
- (*z).encodedLsigs.BitmaskLogic = bitmask(zb0321)
- }
- case "lsigarg":
- var zb0323 int
- var zb0324 bool
- zb0323, zb0324, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LogicArgs")
- return
- }
- if zb0323 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0323), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LogicArgs")
- return
- }
- if zb0324 {
- (*z).encodedLsigs.LogicArgs = nil
- } else if (*z).encodedLsigs.LogicArgs != nil && cap((*z).encodedLsigs.LogicArgs) >= zb0323 {
- (*z).encodedLsigs.LogicArgs = ((*z).encodedLsigs.LogicArgs)[:zb0323]
- } else {
- (*z).encodedLsigs.LogicArgs = make([][][]byte, zb0323)
- }
- for zb0004 := range (*z).encodedLsigs.LogicArgs {
- var zb0325 int
- var zb0326 bool
- zb0325, zb0326, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LogicArgs", zb0004)
- return
- }
- if zb0325 > transactions.EvalMaxArgs {
- err = msgp.ErrOverflow(uint64(zb0325), uint64(transactions.EvalMaxArgs))
- err = msgp.WrapError(err, "LogicArgs", zb0004)
- return
- }
- if zb0326 {
- (*z).encodedLsigs.LogicArgs[zb0004] = nil
- } else if (*z).encodedLsigs.LogicArgs[zb0004] != nil && cap((*z).encodedLsigs.LogicArgs[zb0004]) >= zb0325 {
- (*z).encodedLsigs.LogicArgs[zb0004] = ((*z).encodedLsigs.LogicArgs[zb0004])[:zb0325]
- } else {
- (*z).encodedLsigs.LogicArgs[zb0004] = make([][]byte, zb0325)
- }
- for zb0005 := range (*z).encodedLsigs.LogicArgs[zb0004] {
- var zb0327 int
- zb0327, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "LogicArgs", zb0004, zb0005)
- return
- }
- if zb0327 > config.MaxLogicSigMaxSize {
- err = msgp.ErrOverflow(uint64(zb0327), uint64(config.MaxLogicSigMaxSize))
- return
- }
- (*z).encodedLsigs.LogicArgs[zb0004][zb0005], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedLsigs.LogicArgs[zb0004][zb0005])
- if err != nil {
- err = msgp.WrapError(err, "LogicArgs", zb0004, zb0005)
- return
- }
- }
- }
- case "lsigargbm":
- {
- var zb0328 []byte
- var zb0329 int
- zb0329, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLogicArgs")
- return
- }
- if zb0329 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0329), uint64(maxBitmaskSize))
- return
- }
- zb0328, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedLsigs.BitmaskLogicArgs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLogicArgs")
- return
- }
- (*z).encodedLsigs.BitmaskLogicArgs = bitmask(zb0328)
- }
- case "sgnr":
- var zb0330 int
- zb0330, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "AuthAddr")
- return
- }
- if zb0330 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0330), uint64(maxAddressBytes))
- return
- }
- (*z).AuthAddr, bts, err = msgp.ReadBytesBytes(bts, (*z).AuthAddr)
- if err != nil {
- err = msgp.WrapError(err, "AuthAddr")
- return
- }
- case "sgnrbm":
- {
- var zb0331 []byte
- var zb0332 int
- zb0332, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAuthAddr")
- return
- }
- if zb0332 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0332), uint64(maxBitmaskSize))
- return
- }
- zb0331, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskAuthAddr))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAuthAddr")
- return
- }
- (*z).BitmaskAuthAddr = bitmask(zb0331)
- }
- case "type":
- var zb0333 int
- zb0333, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "TxType")
- return
- }
- if zb0333 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0333), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedTxns.TxType, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.TxType)
- if err != nil {
- err = msgp.WrapError(err, "TxType")
- return
- }
- case "typebm":
- {
- var zb0334 []byte
- var zb0335 int
- zb0335, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTxType")
- return
- }
- if zb0335 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0335), uint64(maxBitmaskSize))
- return
- }
- zb0334, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.BitmaskTxType))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTxType")
- return
- }
- (*z).encodedTxns.BitmaskTxType = bitmask(zb0334)
- }
- case "typeo":
- (*z).encodedTxns.TxTypeOffset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "TxTypeOffset")
- return
- }
- case "snd":
- var zb0336 int
- zb0336, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Sender")
- return
- }
- if zb0336 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0336), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.Sender, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedTxnHeaders.Sender)
- if err != nil {
- err = msgp.WrapError(err, "Sender")
- return
- }
- case "sndbm":
- {
- var zb0337 []byte
- var zb0338 int
- zb0338, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSender")
- return
- }
- if zb0338 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0338), uint64(maxBitmaskSize))
- return
- }
- zb0337, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskSender))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSender")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskSender = bitmask(zb0337)
- }
- case "fee":
- var zb0339 int
- var zb0340 bool
- zb0339, zb0340, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Fee")
- return
- }
- if zb0339 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0339), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Fee")
- return
- }
- if zb0340 {
- (*z).encodedTxns.encodedTxnHeaders.Fee = nil
- } else if (*z).encodedTxns.encodedTxnHeaders.Fee != nil && cap((*z).encodedTxns.encodedTxnHeaders.Fee) >= zb0339 {
- (*z).encodedTxns.encodedTxnHeaders.Fee = ((*z).encodedTxns.encodedTxnHeaders.Fee)[:zb0339]
- } else {
- (*z).encodedTxns.encodedTxnHeaders.Fee = make([]basics.MicroAlgos, zb0339)
- }
- for zb0006 := range (*z).encodedTxns.encodedTxnHeaders.Fee {
- bts, err = (*z).encodedTxns.encodedTxnHeaders.Fee[zb0006].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Fee", zb0006)
- return
- }
- }
- case "feebm":
- {
- var zb0341 []byte
- var zb0342 int
- zb0342, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFee")
- return
- }
- if zb0342 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0342), uint64(maxBitmaskSize))
- return
- }
- zb0341, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskFee))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFee")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskFee = bitmask(zb0341)
- }
- case "fv":
- var zb0343 int
- var zb0344 bool
- zb0343, zb0344, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "FirstValid")
- return
- }
- if zb0343 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0343), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "FirstValid")
- return
- }
- if zb0344 {
- (*z).encodedTxns.encodedTxnHeaders.FirstValid = nil
- } else if (*z).encodedTxns.encodedTxnHeaders.FirstValid != nil && cap((*z).encodedTxns.encodedTxnHeaders.FirstValid) >= zb0343 {
- (*z).encodedTxns.encodedTxnHeaders.FirstValid = ((*z).encodedTxns.encodedTxnHeaders.FirstValid)[:zb0343]
- } else {
- (*z).encodedTxns.encodedTxnHeaders.FirstValid = make([]basics.Round, zb0343)
- }
- for zb0007 := range (*z).encodedTxns.encodedTxnHeaders.FirstValid {
- bts, err = (*z).encodedTxns.encodedTxnHeaders.FirstValid[zb0007].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "FirstValid", zb0007)
- return
- }
- }
- case "fvbm":
- {
- var zb0345 []byte
- var zb0346 int
- zb0346, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFirstValid")
- return
- }
- if zb0346 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0346), uint64(maxBitmaskSize))
- return
- }
- zb0345, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskFirstValid))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFirstValid")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskFirstValid = bitmask(zb0345)
- }
- case "lv":
- var zb0347 int
- var zb0348 bool
- zb0347, zb0348, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LastValid")
- return
- }
- if zb0347 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0347), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LastValid")
- return
- }
- if zb0348 {
- (*z).encodedTxns.encodedTxnHeaders.LastValid = nil
- } else if (*z).encodedTxns.encodedTxnHeaders.LastValid != nil && cap((*z).encodedTxns.encodedTxnHeaders.LastValid) >= zb0347 {
- (*z).encodedTxns.encodedTxnHeaders.LastValid = ((*z).encodedTxns.encodedTxnHeaders.LastValid)[:zb0347]
- } else {
- (*z).encodedTxns.encodedTxnHeaders.LastValid = make([]basics.Round, zb0347)
- }
- for zb0008 := range (*z).encodedTxns.encodedTxnHeaders.LastValid {
- bts, err = (*z).encodedTxns.encodedTxnHeaders.LastValid[zb0008].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "LastValid", zb0008)
- return
- }
- }
- case "lvbm":
- {
- var zb0349 []byte
- var zb0350 int
- zb0350, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLastValid")
- return
- }
- if zb0350 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0350), uint64(maxBitmaskSize))
- return
- }
- zb0349, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskLastValid))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLastValid")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskLastValid = bitmask(zb0349)
- }
- case "note":
- var zb0351 int
- var zb0352 bool
- zb0351, zb0352, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Note")
- return
- }
- if zb0351 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0351), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Note")
- return
- }
- if zb0352 {
- (*z).encodedTxns.encodedTxnHeaders.Note = nil
- } else if (*z).encodedTxns.encodedTxnHeaders.Note != nil && cap((*z).encodedTxns.encodedTxnHeaders.Note) >= zb0351 {
- (*z).encodedTxns.encodedTxnHeaders.Note = ((*z).encodedTxns.encodedTxnHeaders.Note)[:zb0351]
- } else {
- (*z).encodedTxns.encodedTxnHeaders.Note = make([][]byte, zb0351)
- }
- for zb0009 := range (*z).encodedTxns.encodedTxnHeaders.Note {
- var zb0353 int
- zb0353, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Note", zb0009)
- return
- }
- if zb0353 > config.MaxTxnNoteBytes {
- err = msgp.ErrOverflow(uint64(zb0353), uint64(config.MaxTxnNoteBytes))
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.Note[zb0009], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedTxnHeaders.Note[zb0009])
- if err != nil {
- err = msgp.WrapError(err, "Note", zb0009)
- return
- }
- }
- case "notebm":
- {
- var zb0354 []byte
- var zb0355 int
- zb0355, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNote")
- return
- }
- if zb0355 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0355), uint64(maxBitmaskSize))
- return
- }
- zb0354, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskNote))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNote")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskNote = bitmask(zb0354)
- }
- case "genbm":
- {
- var zb0356 []byte
- var zb0357 int
- zb0357, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGenesisID")
- return
- }
- if zb0357 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0357), uint64(maxBitmaskSize))
- return
- }
- zb0356, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskGenesisID))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGenesisID")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskGenesisID = bitmask(zb0356)
- }
- case "grpbm":
- {
- var zb0358 []byte
- var zb0359 int
- zb0359, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGroup")
- return
- }
- if zb0359 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0359), uint64(maxBitmaskSize))
- return
- }
- zb0358, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskGroup))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGroup")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskGroup = bitmask(zb0358)
- }
- case "lx":
- var zb0360 int
- zb0360, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Lease")
- return
- }
- if zb0360 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0360), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.Lease, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedTxnHeaders.Lease)
- if err != nil {
- err = msgp.WrapError(err, "Lease")
- return
- }
- case "lxbm":
- {
- var zb0361 []byte
- var zb0362 int
- zb0362, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLease")
- return
- }
- if zb0362 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0362), uint64(maxBitmaskSize))
- return
- }
- zb0361, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskLease))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLease")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskLease = bitmask(zb0361)
- }
- case "rekey":
- var zb0363 int
- zb0363, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "RekeyTo")
- return
- }
- if zb0363 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0363), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.RekeyTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedTxnHeaders.RekeyTo)
- if err != nil {
- err = msgp.WrapError(err, "RekeyTo")
- return
- }
- case "rekeybm":
- {
- var zb0364 []byte
- var zb0365 int
- zb0365, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskRekeyTo")
- return
- }
- if zb0365 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0365), uint64(maxBitmaskSize))
- return
- }
- zb0364, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedTxnHeaders.BitmaskRekeyTo))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskRekeyTo")
- return
- }
- (*z).encodedTxns.encodedTxnHeaders.BitmaskRekeyTo = bitmask(zb0364)
- }
- case "votekey":
- var zb0366 int
- zb0366, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "VotePK")
- return
- }
- if zb0366 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0366), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedKeyregTxnFields.VotePK, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedKeyregTxnFields.VotePK)
- if err != nil {
- err = msgp.WrapError(err, "VotePK")
- return
- }
- case "selkey":
- var zb0367 int
- zb0367, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "SelectionPK")
- return
- }
- if zb0367 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0367), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedKeyregTxnFields.SelectionPK, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedKeyregTxnFields.SelectionPK)
- if err != nil {
- err = msgp.WrapError(err, "SelectionPK")
- return
- }
- case "votefst":
- var zb0368 int
- var zb0369 bool
- zb0368, zb0369, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteFirst")
- return
- }
- if zb0368 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0368), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "VoteFirst")
- return
- }
- if zb0369 {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst = nil
- } else if (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst != nil && cap((*z).encodedTxns.encodedKeyregTxnFields.VoteFirst) >= zb0368 {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst = ((*z).encodedTxns.encodedKeyregTxnFields.VoteFirst)[:zb0368]
- } else {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst = make([]basics.Round, zb0368)
- }
- for zb0010 := range (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst {
- bts, err = (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst[zb0010].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteFirst", zb0010)
- return
- }
- }
- case "votefstbm":
- {
- var zb0370 []byte
- var zb0371 int
- zb0371, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteFirst")
- return
- }
- if zb0371 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0371), uint64(maxBitmaskSize))
- return
- }
- zb0370, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteFirst")
- return
- }
- (*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst = bitmask(zb0370)
- }
- case "votelst":
- var zb0372 int
- var zb0373 bool
- zb0372, zb0373, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteLast")
- return
- }
- if zb0372 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0372), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "VoteLast")
- return
- }
- if zb0373 {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteLast = nil
- } else if (*z).encodedTxns.encodedKeyregTxnFields.VoteLast != nil && cap((*z).encodedTxns.encodedKeyregTxnFields.VoteLast) >= zb0372 {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteLast = ((*z).encodedTxns.encodedKeyregTxnFields.VoteLast)[:zb0372]
- } else {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteLast = make([]basics.Round, zb0372)
- }
- for zb0011 := range (*z).encodedTxns.encodedKeyregTxnFields.VoteLast {
- bts, err = (*z).encodedTxns.encodedKeyregTxnFields.VoteLast[zb0011].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteLast", zb0011)
- return
- }
- }
- case "votelstbm":
- {
- var zb0374 []byte
- var zb0375 int
- zb0375, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteLast")
- return
- }
- if zb0375 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0375), uint64(maxBitmaskSize))
- return
- }
- zb0374, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteLast")
- return
- }
- (*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast = bitmask(zb0374)
- }
- case "votekd":
- var zb0376 int
- var zb0377 bool
- zb0376, zb0377, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteKeyDilution")
- return
- }
- if zb0376 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0376), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "VoteKeyDilution")
- return
- }
- if zb0377 {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution = nil
- } else if (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution != nil && cap((*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution) >= zb0376 {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution = ((*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution)[:zb0376]
- } else {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution = make([]uint64, zb0376)
- }
- for zb0012 := range (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution {
- (*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution[zb0012], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteKeyDilution", zb0012)
- return
- }
- }
- case "votekbm":
- {
- var zb0378 []byte
- var zb0379 int
- zb0379, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskKeys")
- return
- }
- if zb0379 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0379), uint64(maxBitmaskSize))
- return
- }
- zb0378, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskKeys))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskKeys")
- return
- }
- (*z).encodedTxns.encodedKeyregTxnFields.BitmaskKeys = bitmask(zb0378)
- }
- case "nonpartbm":
- {
- var zb0380 []byte
- var zb0381 int
- zb0381, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNonparticipation")
- return
- }
- if zb0381 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0381), uint64(maxBitmaskSize))
- return
- }
- zb0380, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNonparticipation")
- return
- }
- (*z).encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation = bitmask(zb0380)
- }
- case "rcv":
- var zb0382 int
- zb0382, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Receiver")
- return
- }
- if zb0382 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0382), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedPaymentTxnFields.Receiver, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedPaymentTxnFields.Receiver)
- if err != nil {
- err = msgp.WrapError(err, "Receiver")
- return
- }
- case "rcvbm":
- {
- var zb0383 []byte
- var zb0384 int
- zb0384, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReceiver")
- return
- }
- if zb0384 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0384), uint64(maxBitmaskSize))
- return
- }
- zb0383, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedPaymentTxnFields.BitmaskReceiver))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReceiver")
- return
- }
- (*z).encodedTxns.encodedPaymentTxnFields.BitmaskReceiver = bitmask(zb0383)
- }
- case "amt":
- var zb0385 int
- var zb0386 bool
- zb0385, zb0386, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Amount")
- return
- }
- if zb0385 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0385), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Amount")
- return
- }
- if zb0386 {
- (*z).encodedTxns.encodedPaymentTxnFields.Amount = nil
- } else if (*z).encodedTxns.encodedPaymentTxnFields.Amount != nil && cap((*z).encodedTxns.encodedPaymentTxnFields.Amount) >= zb0385 {
- (*z).encodedTxns.encodedPaymentTxnFields.Amount = ((*z).encodedTxns.encodedPaymentTxnFields.Amount)[:zb0385]
- } else {
- (*z).encodedTxns.encodedPaymentTxnFields.Amount = make([]basics.MicroAlgos, zb0385)
- }
- for zb0013 := range (*z).encodedTxns.encodedPaymentTxnFields.Amount {
- bts, err = (*z).encodedTxns.encodedPaymentTxnFields.Amount[zb0013].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Amount", zb0013)
- return
- }
- }
- case "amtbm":
- {
- var zb0387 []byte
- var zb0388 int
- zb0388, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAmount")
- return
- }
- if zb0388 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0388), uint64(maxBitmaskSize))
- return
- }
- zb0387, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedPaymentTxnFields.BitmaskAmount))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAmount")
- return
- }
- (*z).encodedTxns.encodedPaymentTxnFields.BitmaskAmount = bitmask(zb0387)
- }
- case "close":
- var zb0389 int
- zb0389, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "CloseRemainderTo")
- return
- }
- if zb0389 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0389), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedPaymentTxnFields.CloseRemainderTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedPaymentTxnFields.CloseRemainderTo)
- if err != nil {
- err = msgp.WrapError(err, "CloseRemainderTo")
- return
- }
- case "closebm":
- {
- var zb0390 []byte
- var zb0391 int
- zb0391, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCloseRemainderTo")
- return
- }
- if zb0391 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0391), uint64(maxBitmaskSize))
- return
- }
- zb0390, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCloseRemainderTo")
- return
- }
- (*z).encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo = bitmask(zb0390)
- }
- case "caid":
- var zb0392 int
- var zb0393 bool
- zb0392, zb0393, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ConfigAsset")
- return
- }
- if zb0392 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0392), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ConfigAsset")
- return
- }
- if zb0393 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset = nil
- } else if (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset != nil && cap((*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset) >= zb0392 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset = ((*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset)[:zb0392]
- } else {
- (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset = make([]basics.AssetIndex, zb0392)
- }
- for zb0014 := range (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset {
- bts, err = (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset[zb0014].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ConfigAsset", zb0014)
- return
- }
- }
- case "caidbm":
- {
- var zb0394 []byte
- var zb0395 int
- zb0395, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskConfigAsset")
- return
- }
- if zb0395 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0395), uint64(maxBitmaskSize))
- return
- }
- zb0394, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskConfigAsset")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset = bitmask(zb0394)
- }
- case "t":
- var zb0396 int
- var zb0397 bool
- zb0396, zb0397, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Total")
- return
- }
- if zb0396 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0396), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Total")
- return
- }
- if zb0397 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total = nil
- } else if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total != nil && cap((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total) >= zb0396 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total = ((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total)[:zb0396]
- } else {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total = make([]uint64, zb0396)
- }
- for zb0015 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total[zb0015], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Total", zb0015)
- return
- }
- }
- case "tbm":
- {
- var zb0398 []byte
- var zb0399 int
- zb0399, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTotal")
- return
- }
- if zb0399 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0399), uint64(maxBitmaskSize))
- return
- }
- zb0398, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTotal")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal = bitmask(zb0398)
- }
- case "dc":
- var zb0400 int
- var zb0401 bool
- zb0400, zb0401, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Decimals")
- return
- }
- if zb0400 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0400), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Decimals")
- return
- }
- if zb0401 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals = nil
- } else if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals != nil && cap((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals) >= zb0400 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals = ((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals)[:zb0400]
- } else {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals = make([]uint32, zb0400)
- }
- for zb0016 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals[zb0016], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Decimals", zb0016)
- return
- }
- }
- case "dcbm":
- {
- var zb0402 []byte
- var zb0403 int
- zb0403, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDecimals")
- return
- }
- if zb0403 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0403), uint64(maxBitmaskSize))
- return
- }
- zb0402, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDecimals")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals = bitmask(zb0402)
- }
- case "dfbm":
- {
- var zb0404 []byte
- var zb0405 int
- zb0405, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDefaultFrozen")
- return
- }
- if zb0405 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0405), uint64(maxBitmaskSize))
- return
- }
- zb0404, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDefaultFrozen")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen = bitmask(zb0404)
- }
- case "un":
- var zb0406 int
- var zb0407 bool
- zb0406, zb0407, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UnitName")
- return
- }
- if zb0406 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0406), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "UnitName")
- return
- }
- if zb0407 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName = nil
- } else if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName != nil && cap((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName) >= zb0406 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName = ((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName)[:zb0406]
- } else {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName = make([]string, zb0406)
- }
- for zb0017 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName[zb0017], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UnitName", zb0017)
- return
- }
- }
- case "unbm":
- {
- var zb0408 []byte
- var zb0409 int
- zb0409, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskUnitName")
- return
- }
- if zb0409 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0409), uint64(maxBitmaskSize))
- return
- }
- zb0408, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskUnitName")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName = bitmask(zb0408)
- }
- case "an":
- var zb0410 int
- var zb0411 bool
- zb0410, zb0411, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetName")
- return
- }
- if zb0410 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0410), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "AssetName")
- return
- }
- if zb0411 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName = nil
- } else if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName != nil && cap((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName) >= zb0410 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName = ((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName)[:zb0410]
- } else {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName = make([]string, zb0410)
- }
- for zb0018 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName[zb0018], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetName", zb0018)
- return
- }
- }
- case "anbm":
- {
- var zb0412 []byte
- var zb0413 int
- zb0413, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetName")
- return
- }
- if zb0413 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0413), uint64(maxBitmaskSize))
- return
- }
- zb0412, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetName")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName = bitmask(zb0412)
- }
- case "au":
- var zb0414 int
- var zb0415 bool
- zb0414, zb0415, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "URL")
- return
- }
- if zb0414 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0414), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "URL")
- return
- }
- if zb0415 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL = nil
- } else if (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL != nil && cap((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL) >= zb0414 {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL = ((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL)[:zb0414]
- } else {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL = make([]string, zb0414)
- }
- for zb0019 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL {
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL[zb0019], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "URL", zb0019)
- return
- }
- }
- case "aubm":
- {
- var zb0416 []byte
- var zb0417 int
- zb0417, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskURL")
- return
- }
- if zb0417 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0417), uint64(maxBitmaskSize))
- return
- }
- zb0416, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskURL")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL = bitmask(zb0416)
- }
- case "am":
- var zb0418 int
- zb0418, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "MetadataHash")
- return
- }
- if zb0418 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0418), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash)
- if err != nil {
- err = msgp.WrapError(err, "MetadataHash")
- return
- }
- case "ambm":
- {
- var zb0419 []byte
- var zb0420 int
- zb0420, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskMetadataHash")
- return
- }
- if zb0420 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0420), uint64(maxBitmaskSize))
- return
- }
- zb0419, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskMetadataHash")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash = bitmask(zb0419)
- }
- case "m":
- var zb0421 int
- zb0421, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Manager")
- return
- }
- if zb0421 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0421), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager)
- if err != nil {
- err = msgp.WrapError(err, "Manager")
- return
- }
- case "mbm":
- {
- var zb0422 []byte
- var zb0423 int
- zb0423, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskManager")
- return
- }
- if zb0423 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0423), uint64(maxBitmaskSize))
- return
- }
- zb0422, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskManager")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager = bitmask(zb0422)
- }
- case "r":
- var zb0424 int
- zb0424, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reserve")
- return
- }
- if zb0424 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0424), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve)
- if err != nil {
- err = msgp.WrapError(err, "Reserve")
- return
- }
- case "rbm":
- {
- var zb0425 []byte
- var zb0426 int
- zb0426, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReserve")
- return
- }
- if zb0426 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0426), uint64(maxBitmaskSize))
- return
- }
- zb0425, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReserve")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve = bitmask(zb0425)
- }
- case "f":
- var zb0427 int
- zb0427, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Freeze")
- return
- }
- if zb0427 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0427), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze)
- if err != nil {
- err = msgp.WrapError(err, "Freeze")
- return
- }
- case "fbm":
- {
- var zb0428 []byte
- var zb0429 int
- zb0429, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreeze")
- return
- }
- if zb0429 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0429), uint64(maxBitmaskSize))
- return
- }
- zb0428, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreeze")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze = bitmask(zb0428)
- }
- case "c":
- var zb0430 int
- zb0430, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Clawback")
- return
- }
- if zb0430 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0430), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback)
- if err != nil {
- err = msgp.WrapError(err, "Clawback")
- return
- }
- case "cbm":
- {
- var zb0431 []byte
- var zb0432 int
- zb0432, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClawback")
- return
- }
- if zb0432 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0432), uint64(maxBitmaskSize))
- return
- }
- zb0431, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClawback")
- return
- }
- (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback = bitmask(zb0431)
- }
- case "xaid":
- var zb0433 int
- var zb0434 bool
- zb0433, zb0434, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "XferAsset")
- return
- }
- if zb0433 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0433), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "XferAsset")
- return
- }
- if zb0434 {
- (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset = nil
- } else if (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset != nil && cap((*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset) >= zb0433 {
- (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset = ((*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset)[:zb0433]
- } else {
- (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset = make([]basics.AssetIndex, zb0433)
- }
- for zb0020 := range (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset {
- bts, err = (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset[zb0020].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "XferAsset", zb0020)
- return
- }
- }
- case "xaidbm":
- {
- var zb0435 []byte
- var zb0436 int
- zb0436, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskXferAsset")
- return
- }
- if zb0436 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0436), uint64(maxBitmaskSize))
- return
- }
- zb0435, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskXferAsset")
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset = bitmask(zb0435)
- }
- case "aamt":
- var zb0437 int
- var zb0438 bool
- zb0437, zb0438, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetAmount")
- return
- }
- if zb0437 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0437), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "AssetAmount")
- return
- }
- if zb0438 {
- (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount = nil
- } else if (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount != nil && cap((*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount) >= zb0437 {
- (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount = ((*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount)[:zb0437]
- } else {
- (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount = make([]uint64, zb0437)
- }
- for zb0021 := range (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount {
- (*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount[zb0021], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetAmount", zb0021)
- return
- }
- }
- case "aamtbm":
- {
- var zb0439 []byte
- var zb0440 int
- zb0440, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetAmount")
- return
- }
- if zb0440 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0440), uint64(maxBitmaskSize))
- return
- }
- zb0439, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetAmount")
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount = bitmask(zb0439)
- }
- case "asnd":
- var zb0441 int
- zb0441, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetSender")
- return
- }
- if zb0441 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0441), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.AssetSender, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetTransferTxnFields.AssetSender)
- if err != nil {
- err = msgp.WrapError(err, "AssetSender")
- return
- }
- case "asndbm":
- {
- var zb0442 []byte
- var zb0443 int
- zb0443, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetSender")
- return
- }
- if zb0443 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0443), uint64(maxBitmaskSize))
- return
- }
- zb0442, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetSender")
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender = bitmask(zb0442)
- }
- case "arcv":
- var zb0444 int
- zb0444, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetReceiver")
- return
- }
- if zb0444 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0444), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.AssetReceiver, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetTransferTxnFields.AssetReceiver)
- if err != nil {
- err = msgp.WrapError(err, "AssetReceiver")
- return
- }
- case "arcvbm":
- {
- var zb0445 []byte
- var zb0446 int
- zb0446, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetReceiver")
- return
- }
- if zb0446 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0446), uint64(maxBitmaskSize))
- return
- }
- zb0445, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetReceiver")
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver = bitmask(zb0445)
- }
- case "aclose":
- var zb0447 int
- zb0447, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetCloseTo")
- return
- }
- if zb0447 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0447), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo)
- if err != nil {
- err = msgp.WrapError(err, "AssetCloseTo")
- return
- }
- case "aclosebm":
- {
- var zb0448 []byte
- var zb0449 int
- zb0449, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetCloseTo")
- return
- }
- if zb0449 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0449), uint64(maxBitmaskSize))
- return
- }
- zb0448, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetCloseTo")
- return
- }
- (*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo = bitmask(zb0448)
- }
- case "fadd":
- var zb0450 int
- zb0450, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAccount")
- return
- }
- if zb0450 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0450), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAccount")
- return
- }
- case "faddbm":
- {
- var zb0451 []byte
- var zb0452 int
- zb0452, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAccount")
- return
- }
- if zb0452 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0452), uint64(maxBitmaskSize))
- return
- }
- zb0451, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAccount")
- return
- }
- (*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount = bitmask(zb0451)
- }
- case "faid":
- var zb0453 int
- var zb0454 bool
- zb0453, zb0454, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAsset")
- return
- }
- if zb0453 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0453), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "FreezeAsset")
- return
- }
- if zb0454 {
- (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset = nil
- } else if (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset != nil && cap((*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset) >= zb0453 {
- (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset = ((*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset)[:zb0453]
- } else {
- (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset = make([]basics.AssetIndex, zb0453)
- }
- for zb0022 := range (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset {
- bts, err = (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset[zb0022].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAsset", zb0022)
- return
- }
- }
- case "faidbm":
- {
- var zb0455 []byte
- var zb0456 int
- zb0456, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAsset")
- return
- }
- if zb0456 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0456), uint64(maxBitmaskSize))
- return
- }
- zb0455, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAsset")
- return
- }
- (*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset = bitmask(zb0455)
- }
- case "afrzbm":
- {
- var zb0457 []byte
- var zb0458 int
- zb0458, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetFrozen")
- return
- }
- if zb0458 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0458), uint64(maxBitmaskSize))
- return
- }
- zb0457, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetFrozen")
- return
- }
- (*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen = bitmask(zb0457)
- }
- case "apid":
- var zb0459 int
- var zb0460 bool
- zb0459, zb0460, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationID")
- return
- }
- if zb0459 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0459), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ApplicationID")
- return
- }
- if zb0460 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID) >= zb0459 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID = ((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID)[:zb0459]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID = make([]basics.AppIndex, zb0459)
- }
- for zb0023 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID {
- bts, err = (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID[zb0023].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationID", zb0023)
- return
- }
- }
- case "apidbm":
- {
- var zb0461 []byte
- var zb0462 int
- zb0462, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationID")
- return
- }
- if zb0462 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0462), uint64(maxBitmaskSize))
- return
- }
- zb0461, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationID")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID = bitmask(zb0461)
- }
- case "apan":
- var zb0463 int
- zb0463, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "OnCompletion")
- return
- }
- if zb0463 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0463), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.OnCompletion, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedApplicationCallTxnFields.OnCompletion)
- if err != nil {
- err = msgp.WrapError(err, "OnCompletion")
- return
- }
- case "apanbm":
- {
- var zb0464 []byte
- var zb0465 int
- zb0465, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskOnCompletion")
- return
- }
- if zb0465 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0465), uint64(maxBitmaskSize))
- return
- }
- zb0464, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskOnCompletion")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion = bitmask(zb0464)
- }
- case "apaa":
- var zb0466 int
- var zb0467 bool
- zb0466, zb0467, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationArgs")
- return
- }
- if zb0466 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0466), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ApplicationArgs")
- return
- }
- if zb0467 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs) >= zb0466 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs = ((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs)[:zb0466]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs = make([]applicationArgs, zb0466)
- }
- for zb0024 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs {
- var zb0468 int
- var zb0469 bool
- zb0468, zb0469, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationArgs", zb0024)
- return
- }
- if zb0468 > transactions.EncodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0468), uint64(transactions.EncodedMaxApplicationArgs))
- err = msgp.WrapError(err, "ApplicationArgs", zb0024)
- return
- }
- if zb0469 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024]) >= zb0468 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] = ((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024])[:zb0468]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] = make(applicationArgs, zb0468)
- }
- for zb0025 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024][zb0025], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024][zb0025])
- if err != nil {
- err = msgp.WrapError(err, "ApplicationArgs", zb0024, zb0025)
- return
- }
- }
- }
- case "apaabm":
- {
- var zb0470 []byte
- var zb0471 int
- zb0471, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationArgs")
- return
- }
- if zb0471 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0471), uint64(maxBitmaskSize))
- return
- }
- zb0470, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationArgs")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs = bitmask(zb0470)
- }
- case "apat":
- var zb0472 int
- var zb0473 bool
- zb0472, zb0473, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Accounts")
- return
- }
- if zb0472 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0472), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Accounts")
- return
- }
- if zb0473 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.Accounts) >= zb0472 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts = ((*z).encodedTxns.encodedApplicationCallTxnFields.Accounts)[:zb0472]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts = make([]addresses, zb0472)
- }
- for zb0026 := range (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts {
- var zb0474 int
- var zb0475 bool
- zb0474, zb0475, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Accounts", zb0026)
- return
- }
- if zb0474 > transactions.EncodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0474), uint64(transactions.EncodedMaxAccounts))
- err = msgp.WrapError(err, "Accounts", zb0026)
- return
- }
- if zb0475 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026]) >= zb0474 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] = ((*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026])[:zb0474]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] = make(addresses, zb0474)
- }
- for zb0027 := range (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] {
- bts, err = (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026][zb0027].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Accounts", zb0026, zb0027)
- return
- }
- }
- }
- case "apatbm":
- {
- var zb0476 []byte
- var zb0477 int
- zb0477, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAccounts")
- return
- }
- if zb0477 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0477), uint64(maxBitmaskSize))
- return
- }
- zb0476, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAccounts")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts = bitmask(zb0476)
- }
- case "apfa":
- var zb0478 int
- var zb0479 bool
- zb0478, zb0479, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignApps")
- return
- }
- if zb0478 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0478), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ForeignApps")
- return
- }
- if zb0479 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps) >= zb0478 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps = ((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps)[:zb0478]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps = make([]appIndices, zb0478)
- }
- for zb0028 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps {
- var zb0480 int
- var zb0481 bool
- zb0480, zb0481, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignApps", zb0028)
- return
- }
- if zb0480 > transactions.EncodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0480), uint64(transactions.EncodedMaxForeignApps))
- err = msgp.WrapError(err, "ForeignApps", zb0028)
- return
- }
- if zb0481 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028]) >= zb0480 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] = ((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028])[:zb0480]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] = make(appIndices, zb0480)
- }
- for zb0029 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] {
- bts, err = (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028][zb0029].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignApps", zb0028, zb0029)
- return
- }
- }
- }
- case "apfabm":
- {
- var zb0482 []byte
- var zb0483 int
- zb0483, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignApps")
- return
- }
- if zb0483 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0483), uint64(maxBitmaskSize))
- return
- }
- zb0482, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignApps")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps = bitmask(zb0482)
- }
- case "apas":
- var zb0484 int
- var zb0485 bool
- zb0484, zb0485, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignAssets")
- return
- }
- if zb0484 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0484), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ForeignAssets")
- return
- }
- if zb0485 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets) >= zb0484 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets = ((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets)[:zb0484]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets = make([]assetIndices, zb0484)
- }
- for zb0030 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets {
- var zb0486 int
- var zb0487 bool
- zb0486, zb0487, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignAssets", zb0030)
- return
- }
- if zb0486 > transactions.EncodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0486), uint64(transactions.EncodedMaxForeignAssets))
- err = msgp.WrapError(err, "ForeignAssets", zb0030)
- return
- }
- if zb0487 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030]) >= zb0486 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] = ((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030])[:zb0486]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] = make(assetIndices, zb0486)
- }
- for zb0031 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] {
- bts, err = (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030][zb0031].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignAssets", zb0030, zb0031)
- return
- }
- }
- }
- case "apasbm":
- {
- var zb0488 []byte
- var zb0489 int
- zb0489, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignAssets")
- return
- }
- if zb0489 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0489), uint64(maxBitmaskSize))
- return
- }
- zb0488, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignAssets")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets = bitmask(zb0488)
- }
- case "lnui":
- var zb0490 int
- var zb0491 bool
- zb0490, zb0491, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumUint")
- return
- }
- if zb0490 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0490), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LocalNumUint")
- return
- }
- if zb0491 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint) >= zb0490 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint = ((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint)[:zb0490]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint = make([]uint64, zb0490)
- }
- for zb0032 := range (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint[zb0032], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumUint", zb0032)
- return
- }
- }
- case "lnuibm":
- {
- var zb0492 []byte
- var zb0493 int
- zb0493, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumUint")
- return
- }
- if zb0493 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0493), uint64(maxBitmaskSize))
- return
- }
- zb0492, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumUint")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint = bitmask(zb0492)
- }
- case "lnbs":
- var zb0494 int
- var zb0495 bool
- zb0494, zb0495, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumByteSlice")
- return
- }
- if zb0494 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0494), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LocalNumByteSlice")
- return
- }
- if zb0495 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice) >= zb0494 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice = ((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice)[:zb0494]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice = make([]uint64, zb0494)
- }
- for zb0033 := range (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice {
- (*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice[zb0033], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumByteSlice", zb0033)
- return
- }
- }
- case "lnbsbm":
- {
- var zb0496 []byte
- var zb0497 int
- zb0497, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumByteSlice")
- return
- }
- if zb0497 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0497), uint64(maxBitmaskSize))
- return
- }
- zb0496, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumByteSlice")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice = bitmask(zb0496)
- }
- case "gnui":
- var zb0498 int
- var zb0499 bool
- zb0498, zb0499, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumUint")
- return
- }
- if zb0498 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0498), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "GlobalNumUint")
- return
- }
- if zb0499 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint) >= zb0498 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint = ((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint)[:zb0498]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint = make([]uint64, zb0498)
- }
- for zb0034 := range (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint[zb0034], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumUint", zb0034)
- return
- }
- }
- case "gnuibm":
- {
- var zb0500 []byte
- var zb0501 int
- zb0501, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumUint")
- return
- }
- if zb0501 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0501), uint64(maxBitmaskSize))
- return
- }
- zb0500, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumUint")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint = bitmask(zb0500)
- }
- case "gnbs":
- var zb0502 int
- var zb0503 bool
- zb0502, zb0503, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumByteSlice")
- return
- }
- if zb0502 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0502), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "GlobalNumByteSlice")
- return
- }
- if zb0503 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice) >= zb0502 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice = ((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice)[:zb0502]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice = make([]uint64, zb0502)
- }
- for zb0035 := range (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice {
- (*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice[zb0035], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumByteSlice", zb0035)
- return
- }
- }
- case "gnbsbm":
- {
- var zb0504 []byte
- var zb0505 int
- zb0505, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumByteSlice")
- return
- }
- if zb0505 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0505), uint64(maxBitmaskSize))
- return
- }
- zb0504, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumByteSlice")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice = bitmask(zb0504)
- }
- case "apap":
- var zb0506 int
- var zb0507 bool
- zb0506, zb0507, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApprovalProgram")
- return
- }
- if zb0506 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0506), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ApprovalProgram")
- return
- }
- if zb0507 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram) >= zb0506 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram = ((*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram)[:zb0506]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram = make([]program, zb0506)
- }
- for zb0036 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram {
- {
- var zb0508 []byte
- var zb0509 int
- zb0509, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApprovalProgram", zb0036)
- return
- }
- if zb0509 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0509), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0508, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram[zb0036]))
- if err != nil {
- err = msgp.WrapError(err, "ApprovalProgram", zb0036)
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram[zb0036] = program(zb0508)
- }
- }
- case "apapbm":
- {
- var zb0510 []byte
- var zb0511 int
- zb0511, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApprovalProgram")
- return
- }
- if zb0511 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0511), uint64(maxBitmaskSize))
- return
- }
- zb0510, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApprovalProgram")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram = bitmask(zb0510)
- }
- case "apsu":
- var zb0512 int
- var zb0513 bool
- zb0512, zb0513, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ClearStateProgram")
- return
- }
- if zb0512 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0512), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ClearStateProgram")
- return
- }
- if zb0513 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram) >= zb0512 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram = ((*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram)[:zb0512]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram = make([]program, zb0512)
- }
- for zb0037 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram {
- {
- var zb0514 []byte
- var zb0515 int
- zb0515, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "ClearStateProgram", zb0037)
- return
- }
- if zb0515 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0515), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0514, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram[zb0037]))
- if err != nil {
- err = msgp.WrapError(err, "ClearStateProgram", zb0037)
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram[zb0037] = program(zb0514)
- }
- }
- case "apsubm":
- {
- var zb0516 []byte
- var zb0517 int
- zb0517, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClearStateProgram")
- return
- }
- if zb0517 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0517), uint64(maxBitmaskSize))
- return
- }
- zb0516, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClearStateProgram")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram = bitmask(zb0516)
- }
- case "apep":
- var zb0518 int
- var zb0519 bool
- zb0518, zb0519, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ExtraProgramPages")
- return
- }
- if zb0518 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0518), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ExtraProgramPages")
- return
- }
- if zb0519 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages = nil
- } else if (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages != nil && cap((*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages) >= zb0518 {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages = ((*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages)[:zb0518]
- } else {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages = make([]uint32, zb0518)
- }
- for zb0038 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages {
- (*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages[zb0038], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ExtraProgramPages", zb0038)
- return
- }
- }
- case "apepbm":
- {
- var zb0520 []byte
- var zb0521 int
- zb0521, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskExtraProgramPages")
- return
- }
- if zb0521 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0521), uint64(maxBitmaskSize))
- return
- }
- zb0520, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskExtraProgramPages")
- return
- }
- (*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages = bitmask(zb0520)
- }
- case "certrnd":
- var zb0522 int
- var zb0523 bool
- zb0522, zb0523, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertRound")
- return
- }
- if zb0522 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0522), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "CertRound")
- return
- }
- if zb0523 {
- (*z).encodedTxns.encodedCompactCertTxnFields.CertRound = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.CertRound != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.CertRound) >= zb0522 {
- (*z).encodedTxns.encodedCompactCertTxnFields.CertRound = ((*z).encodedTxns.encodedCompactCertTxnFields.CertRound)[:zb0522]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.CertRound = make([]basics.Round, zb0522)
- }
- for zb0039 := range (*z).encodedTxns.encodedCompactCertTxnFields.CertRound {
- bts, err = (*z).encodedTxns.encodedCompactCertTxnFields.CertRound[zb0039].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertRound", zb0039)
- return
- }
- }
- case "certrndbm":
- {
- var zb0524 []byte
- var zb0525 int
- zb0525, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertRound")
- return
- }
- if zb0525 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0525), uint64(maxBitmaskSize))
- return
- }
- zb0524, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertRound")
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound = bitmask(zb0524)
- }
- case "certtype":
- var zb0526 int
- var zb0527 bool
- zb0526, zb0527, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertType")
- return
- }
- if zb0526 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0526), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "CertType")
- return
- }
- if zb0527 {
- (*z).encodedTxns.encodedCompactCertTxnFields.CertType = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.CertType != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.CertType) >= zb0526 {
- (*z).encodedTxns.encodedCompactCertTxnFields.CertType = ((*z).encodedTxns.encodedCompactCertTxnFields.CertType)[:zb0526]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.CertType = make([]protocol.CompactCertType, zb0526)
- }
- for zb0040 := range (*z).encodedTxns.encodedCompactCertTxnFields.CertType {
- bts, err = (*z).encodedTxns.encodedCompactCertTxnFields.CertType[zb0040].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertType", zb0040)
- return
- }
- }
- case "certtypebm":
- {
- var zb0528 []byte
- var zb0529 int
- zb0529, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertType")
- return
- }
- if zb0529 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0529), uint64(maxBitmaskSize))
- return
- }
- zb0528, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertType))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertType")
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertType = bitmask(zb0528)
- }
- case "certc":
- var zb0530 int
- zb0530, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigCommit")
- return
- }
- if zb0530 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0530), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit)
- if err != nil {
- err = msgp.WrapError(err, "SigCommit")
- return
- }
- case "certcbm":
- {
- var zb0531 []byte
- var zb0532 int
- zb0532, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigCommit")
- return
- }
- if zb0532 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0532), uint64(maxBitmaskSize))
- return
- }
- zb0531, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigCommit")
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit = bitmask(zb0531)
- }
- case "certw":
- var zb0533 int
- var zb0534 bool
- zb0533, zb0534, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SignedWeight")
- return
- }
- if zb0533 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0533), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "SignedWeight")
- return
- }
- if zb0534 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight) >= zb0533 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight = ((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight)[:zb0533]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight = make([]uint64, zb0533)
- }
- for zb0041 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight[zb0041], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SignedWeight", zb0041)
- return
- }
- }
- case "certwbm":
- {
- var zb0535 []byte
- var zb0536 int
- zb0536, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSignedWeight")
- return
- }
- if zb0536 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0536), uint64(maxBitmaskSize))
- return
- }
- zb0535, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSignedWeight")
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight = bitmask(zb0535)
- }
- case "certS":
- var zb0537 int
- var zb0538 bool
- zb0537, zb0538, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs")
- return
- }
- if zb0537 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0537), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "SigProofs")
- return
- }
- if zb0538 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs) >= zb0537 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs = ((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs)[:zb0537]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs = make([]certProofs, zb0537)
- }
- for zb0042 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs {
- var zb0539 int
- var zb0540 bool
- zb0539, zb0540, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs", zb0042)
- return
- }
- if zb0539 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0539), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "SigProofs", zb0042)
- return
- }
- if zb0540 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042]) >= zb0539 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] = ((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042])[:zb0539]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] = make(certProofs, zb0539)
- }
- for zb0043 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] {
- bts, err = (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042][zb0043].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs", zb0042, zb0043)
- return
- }
- }
- }
- case "certSbm":
- {
- var zb0541 []byte
- var zb0542 int
- zb0542, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigProofs")
- return
- }
- if zb0542 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0542), uint64(maxBitmaskSize))
- return
- }
- zb0541, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigProofs")
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs = bitmask(zb0541)
- }
- case "certP":
- var zb0543 int
- var zb0544 bool
- zb0543, zb0544, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs")
- return
- }
- if zb0543 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0543), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "PartProofs")
- return
- }
- if zb0544 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs) >= zb0543 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs = ((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs)[:zb0543]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs = make([]certProofs, zb0543)
- }
- for zb0044 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs {
- var zb0545 int
- var zb0546 bool
- zb0545, zb0546, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs", zb0044)
- return
- }
- if zb0545 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0545), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "PartProofs", zb0044)
- return
- }
- if zb0546 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044]) >= zb0545 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] = ((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044])[:zb0545]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] = make(certProofs, zb0545)
- }
- for zb0045 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] {
- bts, err = (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044][zb0045].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs", zb0044, zb0045)
- return
- }
- }
- }
- case "certPbm":
- {
- var zb0547 []byte
- var zb0548 int
- zb0548, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskPartProofs")
- return
- }
- if zb0548 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0548), uint64(maxBitmaskSize))
- return
- }
- zb0547, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskPartProofs")
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs = bitmask(zb0547)
- }
- case "certr":
- var zb0549 int
- var zb0550 bool
- zb0549, zb0550, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals")
- return
- }
- if zb0549 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0549), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Reveals")
- return
- }
- if zb0550 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals != nil && cap((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals) >= zb0549 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals = ((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals)[:zb0549]
- } else {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals = make([]revealMap, zb0549)
- }
- for zb0046 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals {
- var zb0551 int
- var zb0552 bool
- zb0551, zb0552, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0046)
- return
- }
- if zb0551 > compactcert.MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0551), uint64(compactcert.MaxReveals))
- err = msgp.WrapError(err, "Reveals", zb0046)
- return
- }
- if zb0552 {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] = nil
- } else if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] == nil {
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] = make(revealMap, zb0551)
- }
- for zb0551 > 0 {
- var zb0047 uint64
- var zb0048 compactcert.Reveal
- zb0551--
- zb0047, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0046)
- return
- }
- bts, err = zb0048.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0046, zb0047)
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046][zb0047] = zb0048
- }
- }
- case "certrbm":
- {
- var zb0553 []byte
- var zb0554 int
- zb0554, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReveals")
- return
- }
- if zb0554 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0554), uint64(maxBitmaskSize))
- return
- }
- zb0553, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReveals")
- return
- }
- (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals = bitmask(zb0553)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedSignedTxns) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedSignedTxns)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedSignedTxns) Msgsize() (s int) {
- s = 3 + 4 + msgp.BytesPrefixSize + len((*z).Sig) + 6 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskSig)) + 6 + msgp.BytesPrefixSize + len((*z).encodedMsigs.Version) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedMsigs.BitmaskVersion)) + 8 + msgp.BytesPrefixSize + len((*z).encodedMsigs.Threshold) + 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedMsigs.BitmaskThreshold)) + 7 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).encodedMsigs.Subsigs {
- s += msgp.ArrayHeaderSize
- for zb0002 := range (*z).encodedMsigs.Subsigs[zb0001] {
- s += (*z).encodedMsigs.Subsigs[zb0001][zb0002].Msgsize()
- }
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedMsigs.BitmaskSubsigs)) + 6 + msgp.ArrayHeaderSize
- for zb0003 := range (*z).encodedLsigs.Logic {
- s += msgp.BytesPrefixSize + len((*z).encodedLsigs.Logic[zb0003])
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedLsigs.BitmaskLogic)) + 8 + msgp.ArrayHeaderSize
- for zb0004 := range (*z).encodedLsigs.LogicArgs {
- s += msgp.ArrayHeaderSize
- for zb0005 := range (*z).encodedLsigs.LogicArgs[zb0004] {
- s += msgp.BytesPrefixSize + len((*z).encodedLsigs.LogicArgs[zb0004][zb0005])
- }
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedLsigs.BitmaskLogicArgs)) + 5 + msgp.BytesPrefixSize + len((*z).AuthAddr) + 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskAuthAddr)) + 5 + msgp.BytesPrefixSize + len((*z).encodedTxns.TxType) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.BitmaskTxType)) + 6 + msgp.ByteSize + 4 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedTxnHeaders.Sender) + 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedTxnHeaders.BitmaskSender)) + 4 + msgp.ArrayHeaderSize
- for zb0006 := range (*z).encodedTxns.encodedTxnHeaders.Fee {
- s += (*z).encodedTxns.encodedTxnHeaders.Fee[zb0006].Msgsize()
- }
- s += 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedTxnHeaders.BitmaskFee)) + 3 + msgp.ArrayHeaderSize
- for zb0007 := range (*z).encodedTxns.encodedTxnHeaders.FirstValid {
- s += (*z).encodedTxns.encodedTxnHeaders.FirstValid[zb0007].Msgsize()
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedTxnHeaders.BitmaskFirstValid)) + 3 + msgp.ArrayHeaderSize
- for zb0008 := range (*z).encodedTxns.encodedTxnHeaders.LastValid {
- s += (*z).encodedTxns.encodedTxnHeaders.LastValid[zb0008].Msgsize()
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedTxnHeaders.BitmaskLastValid)) + 5 + msgp.ArrayHeaderSize
- for zb0009 := range (*z).encodedTxns.encodedTxnHeaders.Note {
- s += msgp.BytesPrefixSize + len((*z).encodedTxns.encodedTxnHeaders.Note[zb0009])
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedTxnHeaders.BitmaskNote)) + 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedTxnHeaders.BitmaskGenesisID)) + 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedTxnHeaders.BitmaskGroup)) + 3 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedTxnHeaders.Lease) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedTxnHeaders.BitmaskLease)) + 6 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedTxnHeaders.RekeyTo) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedTxnHeaders.BitmaskRekeyTo)) + 8 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedKeyregTxnFields.VotePK) + 7 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedKeyregTxnFields.SelectionPK) + 8 + msgp.ArrayHeaderSize
- for zb0010 := range (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst {
- s += (*z).encodedTxns.encodedKeyregTxnFields.VoteFirst[zb0010].Msgsize()
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst)) + 8 + msgp.ArrayHeaderSize
- for zb0011 := range (*z).encodedTxns.encodedKeyregTxnFields.VoteLast {
- s += (*z).encodedTxns.encodedKeyregTxnFields.VoteLast[zb0011].Msgsize()
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast)) + 7 + msgp.ArrayHeaderSize + (len((*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution) * (msgp.Uint64Size)) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskKeys)) + 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation)) + 4 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedPaymentTxnFields.Receiver) + 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedPaymentTxnFields.BitmaskReceiver)) + 4 + msgp.ArrayHeaderSize
- for zb0013 := range (*z).encodedTxns.encodedPaymentTxnFields.Amount {
- s += (*z).encodedTxns.encodedPaymentTxnFields.Amount[zb0013].Msgsize()
- }
- s += 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedPaymentTxnFields.BitmaskAmount)) + 6 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedPaymentTxnFields.CloseRemainderTo) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo)) + 5 + msgp.ArrayHeaderSize
- for zb0014 := range (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset {
- s += (*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset[zb0014].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset)) + 2 + msgp.ArrayHeaderSize + (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total) * (msgp.Uint64Size)) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal)) + 3 + msgp.ArrayHeaderSize + (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals) * (msgp.Uint32Size)) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals)) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen)) + 3 + msgp.ArrayHeaderSize
- for zb0017 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName {
- s += msgp.StringPrefixSize + len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName[zb0017])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName)) + 3 + msgp.ArrayHeaderSize
- for zb0018 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName {
- s += msgp.StringPrefixSize + len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName[zb0018])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName)) + 3 + msgp.ArrayHeaderSize
- for zb0019 := range (*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL {
- s += msgp.StringPrefixSize + len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL[zb0019])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL)) + 3 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash)) + 2 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager)) + 2 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve)) + 2 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze)) + 2 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback)) + 5 + msgp.ArrayHeaderSize
- for zb0020 := range (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset {
- s += (*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset[zb0020].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount)) + 5 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedAssetTransferTxnFields.AssetSender) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender)) + 5 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedAssetTransferTxnFields.AssetReceiver) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver)) + 7 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo) + 9 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo)) + 5 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount)) + 5 + msgp.ArrayHeaderSize
- for zb0022 := range (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset {
- s += (*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset[zb0022].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen)) + 5 + msgp.ArrayHeaderSize
- for zb0023 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID {
- s += (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID[zb0023].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID)) + 5 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedApplicationCallTxnFields.OnCompletion) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion)) + 5 + msgp.ArrayHeaderSize
- for zb0024 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs {
- s += msgp.ArrayHeaderSize
- for zb0025 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] {
- s += msgp.BytesPrefixSize + len((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024][zb0025])
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs)) + 5 + msgp.ArrayHeaderSize
- for zb0026 := range (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts {
- s += msgp.ArrayHeaderSize
- for zb0027 := range (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] {
- s += (*z).encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026][zb0027].Msgsize()
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts)) + 5 + msgp.ArrayHeaderSize
- for zb0028 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps {
- s += msgp.ArrayHeaderSize
- for zb0029 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] {
- s += (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028][zb0029].Msgsize()
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps)) + 5 + msgp.ArrayHeaderSize
- for zb0030 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets {
- s += msgp.ArrayHeaderSize
- for zb0031 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] {
- s += (*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030][zb0031].Msgsize()
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice)) + 5 + msgp.ArrayHeaderSize
- for zb0036 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram {
- s += msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram[zb0036]))
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram)) + 5 + msgp.ArrayHeaderSize
- for zb0037 := range (*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram {
- s += msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram[zb0037]))
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages) * (msgp.Uint32Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages)) + 8 + msgp.ArrayHeaderSize
- for zb0039 := range (*z).encodedTxns.encodedCompactCertTxnFields.CertRound {
- s += (*z).encodedTxns.encodedCompactCertTxnFields.CertRound[zb0039].Msgsize()
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound)) + 9 + msgp.ArrayHeaderSize
- for zb0040 := range (*z).encodedTxns.encodedCompactCertTxnFields.CertType {
- s += (*z).encodedTxns.encodedCompactCertTxnFields.CertType[zb0040].Msgsize()
- }
- s += 11 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertType)) + 6 + msgp.BytesPrefixSize + len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit)) + 6 + msgp.ArrayHeaderSize + (len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight) * (msgp.Uint64Size)) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight)) + 6 + msgp.ArrayHeaderSize
- for zb0042 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs {
- s += msgp.ArrayHeaderSize
- for zb0043 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] {
- s += (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042][zb0043].Msgsize()
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs)) + 6 + msgp.ArrayHeaderSize
- for zb0044 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs {
- s += msgp.ArrayHeaderSize
- for zb0045 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] {
- s += (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044][zb0045].Msgsize()
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs)) + 6 + msgp.ArrayHeaderSize
- for zb0046 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals {
- s += msgp.MapHeaderSize
- if (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] != nil {
- for zb0047, zb0048 := range (*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] {
- _ = zb0047
- _ = zb0048
- s += 0 + msgp.Uint64Size + zb0048.Msgsize()
- }
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedSignedTxns) MsgIsZero() bool {
- return (len((*z).Sig) == 0) && (len((*z).BitmaskSig) == 0) && (len((*z).encodedMsigs.Version) == 0) && (len((*z).encodedMsigs.BitmaskVersion) == 0) && (len((*z).encodedMsigs.Threshold) == 0) && (len((*z).encodedMsigs.BitmaskThreshold) == 0) && (len((*z).encodedMsigs.Subsigs) == 0) && (len((*z).encodedMsigs.BitmaskSubsigs) == 0) && (len((*z).encodedLsigs.Logic) == 0) && (len((*z).encodedLsigs.BitmaskLogic) == 0) && (len((*z).encodedLsigs.LogicArgs) == 0) && (len((*z).encodedLsigs.BitmaskLogicArgs) == 0) && (len((*z).AuthAddr) == 0) && (len((*z).BitmaskAuthAddr) == 0) && (len((*z).encodedTxns.TxType) == 0) && (len((*z).encodedTxns.BitmaskTxType) == 0) && ((*z).encodedTxns.TxTypeOffset == 0) && (len((*z).encodedTxns.encodedTxnHeaders.Sender) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.BitmaskSender) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.Fee) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.BitmaskFee) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.FirstValid) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.BitmaskFirstValid) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.LastValid) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.BitmaskLastValid) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.Note) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.BitmaskNote) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.BitmaskGenesisID) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.BitmaskGroup) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.Lease) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.BitmaskLease) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.RekeyTo) == 0) && (len((*z).encodedTxns.encodedTxnHeaders.BitmaskRekeyTo) == 0) && (len((*z).encodedTxns.encodedKeyregTxnFields.VotePK) == 0) && (len((*z).encodedTxns.encodedKeyregTxnFields.SelectionPK) == 0) && (len((*z).encodedTxns.encodedKeyregTxnFields.VoteFirst) == 0) && (len((*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst) == 0) && (len((*z).encodedTxns.encodedKeyregTxnFields.VoteLast) == 0) && (len((*z).encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast) == 0) && (len((*z).encodedTxns.encodedKeyregTxnFields.VoteKeyDilution) == 0) && (len((*z).encodedTxns.encodedKeyregTxnFields.BitmaskKeys) == 0) && (len((*z).encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation) == 0) && (len((*z).encodedTxns.encodedPaymentTxnFields.Receiver) == 0) && (len((*z).encodedTxns.encodedPaymentTxnFields.BitmaskReceiver) == 0) && (len((*z).encodedTxns.encodedPaymentTxnFields.Amount) == 0) && (len((*z).encodedTxns.encodedPaymentTxnFields.BitmaskAmount) == 0) && (len((*z).encodedTxns.encodedPaymentTxnFields.CloseRemainderTo) == 0) && (len((*z).encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.ConfigAsset) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback) == 0) && (len((*z).encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback) == 0) && (len((*z).encodedTxns.encodedAssetTransferTxnFields.XferAsset) == 0) && (len((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset) == 0) && (len((*z).encodedTxns.encodedAssetTransferTxnFields.AssetAmount) == 0) && (len((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount) == 0) && (len((*z).encodedTxns.encodedAssetTransferTxnFields.AssetSender) == 0) && (len((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender) == 0) && (len((*z).encodedTxns.encodedAssetTransferTxnFields.AssetReceiver) == 0) && (len((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver) == 0) && (len((*z).encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo) == 0) && (len((*z).encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo) == 0) && (len((*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount) == 0) && (len((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount) == 0) && (len((*z).encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset) == 0) && (len((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset) == 0) && (len((*z).encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationID) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.OnCompletion) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.Accounts) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.ForeignAssets) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumUint) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages) == 0) && (len((*z).encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages) == 0) && (len((*z).encodedTxns.encodedCompactCertTxnFields.CertRound) == 0) && (len((*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound) == 0) && (len((*z).encodedTxns.encodedCompactCertTxnFields.CertType) == 0) && (len((*z).encodedTxns.encodedCompactCertTxnFields.BitmaskCertType) == 0) && (len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit) == 0) && (len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit) == 0) && (len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight) == 0) && (len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight) == 0) && (len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs) == 0) && (len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs) == 0) && (len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs) == 0) && (len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs) == 0) && (len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals) == 0) && (len((*z).encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedTxnHeaders) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0005Len := uint32(16)
- var zb0005Mask uint32 /* 17 bits */
- if len((*z).Fee) == 0 {
- zb0005Len--
- zb0005Mask |= 0x2
- }
- if len((*z).BitmaskFee) == 0 {
- zb0005Len--
- zb0005Mask |= 0x4
- }
- if len((*z).FirstValid) == 0 {
- zb0005Len--
- zb0005Mask |= 0x8
- }
- if len((*z).BitmaskFirstValid) == 0 {
- zb0005Len--
- zb0005Mask |= 0x10
- }
- if len((*z).BitmaskGenesisID) == 0 {
- zb0005Len--
- zb0005Mask |= 0x20
- }
- if len((*z).BitmaskGroup) == 0 {
- zb0005Len--
- zb0005Mask |= 0x40
- }
- if len((*z).LastValid) == 0 {
- zb0005Len--
- zb0005Mask |= 0x80
- }
- if len((*z).BitmaskLastValid) == 0 {
- zb0005Len--
- zb0005Mask |= 0x100
- }
- if len((*z).Lease) == 0 {
- zb0005Len--
- zb0005Mask |= 0x200
- }
- if len((*z).BitmaskLease) == 0 {
- zb0005Len--
- zb0005Mask |= 0x400
- }
- if len((*z).Note) == 0 {
- zb0005Len--
- zb0005Mask |= 0x800
- }
- if len((*z).BitmaskNote) == 0 {
- zb0005Len--
- zb0005Mask |= 0x1000
- }
- if len((*z).RekeyTo) == 0 {
- zb0005Len--
- zb0005Mask |= 0x2000
- }
- if len((*z).BitmaskRekeyTo) == 0 {
- zb0005Len--
- zb0005Mask |= 0x4000
- }
- if len((*z).Sender) == 0 {
- zb0005Len--
- zb0005Mask |= 0x8000
- }
- if len((*z).BitmaskSender) == 0 {
- zb0005Len--
- zb0005Mask |= 0x10000
- }
- // variable map header, size zb0005Len
- o = msgp.AppendMapHeader(o, zb0005Len)
- if zb0005Len != 0 {
- if (zb0005Mask & 0x2) == 0 { // if not empty
- // string "fee"
- o = append(o, 0xa3, 0x66, 0x65, 0x65)
- if (*z).Fee == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).Fee)))
- }
- for zb0001 := range (*z).Fee {
- o = (*z).Fee[zb0001].MarshalMsg(o)
- }
- }
- if (zb0005Mask & 0x4) == 0 { // if not empty
- // string "feebm"
- o = append(o, 0xa5, 0x66, 0x65, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskFee))
- }
- if (zb0005Mask & 0x8) == 0 { // if not empty
- // string "fv"
- o = append(o, 0xa2, 0x66, 0x76)
- if (*z).FirstValid == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).FirstValid)))
- }
- for zb0002 := range (*z).FirstValid {
- o = (*z).FirstValid[zb0002].MarshalMsg(o)
- }
- }
- if (zb0005Mask & 0x10) == 0 { // if not empty
- // string "fvbm"
- o = append(o, 0xa4, 0x66, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskFirstValid))
- }
- if (zb0005Mask & 0x20) == 0 { // if not empty
- // string "genbm"
- o = append(o, 0xa5, 0x67, 0x65, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskGenesisID))
- }
- if (zb0005Mask & 0x40) == 0 { // if not empty
- // string "grpbm"
- o = append(o, 0xa5, 0x67, 0x72, 0x70, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskGroup))
- }
- if (zb0005Mask & 0x80) == 0 { // if not empty
- // string "lv"
- o = append(o, 0xa2, 0x6c, 0x76)
- if (*z).LastValid == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).LastValid)))
- }
- for zb0003 := range (*z).LastValid {
- o = (*z).LastValid[zb0003].MarshalMsg(o)
- }
- }
- if (zb0005Mask & 0x100) == 0 { // if not empty
- // string "lvbm"
- o = append(o, 0xa4, 0x6c, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskLastValid))
- }
- if (zb0005Mask & 0x200) == 0 { // if not empty
- // string "lx"
- o = append(o, 0xa2, 0x6c, 0x78)
- o = msgp.AppendBytes(o, (*z).Lease)
- }
- if (zb0005Mask & 0x400) == 0 { // if not empty
- // string "lxbm"
- o = append(o, 0xa4, 0x6c, 0x78, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskLease))
- }
- if (zb0005Mask & 0x800) == 0 { // if not empty
- // string "note"
- o = append(o, 0xa4, 0x6e, 0x6f, 0x74, 0x65)
- if (*z).Note == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).Note)))
- }
- for zb0004 := range (*z).Note {
- o = msgp.AppendBytes(o, (*z).Note[zb0004])
- }
- }
- if (zb0005Mask & 0x1000) == 0 { // if not empty
- // string "notebm"
- o = append(o, 0xa6, 0x6e, 0x6f, 0x74, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskNote))
- }
- if (zb0005Mask & 0x2000) == 0 { // if not empty
- // string "rekey"
- o = append(o, 0xa5, 0x72, 0x65, 0x6b, 0x65, 0x79)
- o = msgp.AppendBytes(o, (*z).RekeyTo)
- }
- if (zb0005Mask & 0x4000) == 0 { // if not empty
- // string "rekeybm"
- o = append(o, 0xa7, 0x72, 0x65, 0x6b, 0x65, 0x79, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskRekeyTo))
- }
- if (zb0005Mask & 0x8000) == 0 { // if not empty
- // string "snd"
- o = append(o, 0xa3, 0x73, 0x6e, 0x64)
- o = msgp.AppendBytes(o, (*z).Sender)
- }
- if (zb0005Mask & 0x10000) == 0 { // if not empty
- // string "sndbm"
- o = append(o, 0xa5, 0x73, 0x6e, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskSender))
- }
- }
- return
-}
-
-func (_ *encodedTxnHeaders) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedTxnHeaders)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedTxnHeaders) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0005 > 0 {
- zb0005--
- var zb0007 int
- zb0007, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Sender")
- return
- }
- if zb0007 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(maxAddressBytes))
- return
- }
- (*z).Sender, bts, err = msgp.ReadBytesBytes(bts, (*z).Sender)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Sender")
- return
- }
- }
- if zb0005 > 0 {
- zb0005--
- {
- var zb0008 []byte
- var zb0009 int
- zb0009, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSender")
- return
- }
- if zb0009 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0009), uint64(maxBitmaskSize))
- return
- }
- zb0008, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskSender))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSender")
- return
- }
- (*z).BitmaskSender = bitmask(zb0008)
- }
- }
- if zb0005 > 0 {
- zb0005--
- var zb0010 int
- var zb0011 bool
- zb0010, zb0011, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Fee")
- return
- }
- if zb0010 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Fee")
- return
- }
- if zb0011 {
- (*z).Fee = nil
- } else if (*z).Fee != nil && cap((*z).Fee) >= zb0010 {
- (*z).Fee = ((*z).Fee)[:zb0010]
- } else {
- (*z).Fee = make([]basics.MicroAlgos, zb0010)
- }
- for zb0001 := range (*z).Fee {
- bts, err = (*z).Fee[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Fee", zb0001)
- return
- }
- }
- }
- if zb0005 > 0 {
- zb0005--
- {
- var zb0012 []byte
- var zb0013 int
- zb0013, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFee")
- return
- }
- if zb0013 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(maxBitmaskSize))
- return
- }
- zb0012, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskFee))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFee")
- return
- }
- (*z).BitmaskFee = bitmask(zb0012)
- }
- }
- if zb0005 > 0 {
- zb0005--
- var zb0014 int
- var zb0015 bool
- zb0014, zb0015, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FirstValid")
- return
- }
- if zb0014 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0014), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "FirstValid")
- return
- }
- if zb0015 {
- (*z).FirstValid = nil
- } else if (*z).FirstValid != nil && cap((*z).FirstValid) >= zb0014 {
- (*z).FirstValid = ((*z).FirstValid)[:zb0014]
- } else {
- (*z).FirstValid = make([]basics.Round, zb0014)
- }
- for zb0002 := range (*z).FirstValid {
- bts, err = (*z).FirstValid[zb0002].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FirstValid", zb0002)
- return
- }
- }
- }
- if zb0005 > 0 {
- zb0005--
- {
- var zb0016 []byte
- var zb0017 int
- zb0017, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFirstValid")
- return
- }
- if zb0017 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(maxBitmaskSize))
- return
- }
- zb0016, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskFirstValid))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFirstValid")
- return
- }
- (*z).BitmaskFirstValid = bitmask(zb0016)
- }
- }
- if zb0005 > 0 {
- zb0005--
- var zb0018 int
- var zb0019 bool
- zb0018, zb0019, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LastValid")
- return
- }
- if zb0018 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0018), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LastValid")
- return
- }
- if zb0019 {
- (*z).LastValid = nil
- } else if (*z).LastValid != nil && cap((*z).LastValid) >= zb0018 {
- (*z).LastValid = ((*z).LastValid)[:zb0018]
- } else {
- (*z).LastValid = make([]basics.Round, zb0018)
- }
- for zb0003 := range (*z).LastValid {
- bts, err = (*z).LastValid[zb0003].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LastValid", zb0003)
- return
- }
- }
- }
- if zb0005 > 0 {
- zb0005--
- {
- var zb0020 []byte
- var zb0021 int
- zb0021, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLastValid")
- return
- }
- if zb0021 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0021), uint64(maxBitmaskSize))
- return
- }
- zb0020, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskLastValid))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLastValid")
- return
- }
- (*z).BitmaskLastValid = bitmask(zb0020)
- }
- }
- if zb0005 > 0 {
- zb0005--
- var zb0022 int
- var zb0023 bool
- zb0022, zb0023, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Note")
- return
- }
- if zb0022 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0022), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Note")
- return
- }
- if zb0023 {
- (*z).Note = nil
- } else if (*z).Note != nil && cap((*z).Note) >= zb0022 {
- (*z).Note = ((*z).Note)[:zb0022]
- } else {
- (*z).Note = make([][]byte, zb0022)
- }
- for zb0004 := range (*z).Note {
- var zb0024 int
- zb0024, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Note", zb0004)
- return
- }
- if zb0024 > config.MaxTxnNoteBytes {
- err = msgp.ErrOverflow(uint64(zb0024), uint64(config.MaxTxnNoteBytes))
- return
- }
- (*z).Note[zb0004], bts, err = msgp.ReadBytesBytes(bts, (*z).Note[zb0004])
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Note", zb0004)
- return
- }
- }
- }
- if zb0005 > 0 {
- zb0005--
- {
- var zb0025 []byte
- var zb0026 int
- zb0026, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNote")
- return
- }
- if zb0026 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0026), uint64(maxBitmaskSize))
- return
- }
- zb0025, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskNote))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNote")
- return
- }
- (*z).BitmaskNote = bitmask(zb0025)
- }
- }
- if zb0005 > 0 {
- zb0005--
- {
- var zb0027 []byte
- var zb0028 int
- zb0028, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGenesisID")
- return
- }
- if zb0028 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0028), uint64(maxBitmaskSize))
- return
- }
- zb0027, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskGenesisID))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGenesisID")
- return
- }
- (*z).BitmaskGenesisID = bitmask(zb0027)
- }
- }
- if zb0005 > 0 {
- zb0005--
- {
- var zb0029 []byte
- var zb0030 int
- zb0030, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGroup")
- return
- }
- if zb0030 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0030), uint64(maxBitmaskSize))
- return
- }
- zb0029, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskGroup))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGroup")
- return
- }
- (*z).BitmaskGroup = bitmask(zb0029)
- }
- }
- if zb0005 > 0 {
- zb0005--
- var zb0031 int
- zb0031, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Lease")
- return
- }
- if zb0031 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0031), uint64(maxAddressBytes))
- return
- }
- (*z).Lease, bts, err = msgp.ReadBytesBytes(bts, (*z).Lease)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Lease")
- return
- }
- }
- if zb0005 > 0 {
- zb0005--
- {
- var zb0032 []byte
- var zb0033 int
- zb0033, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLease")
- return
- }
- if zb0033 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0033), uint64(maxBitmaskSize))
- return
- }
- zb0032, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskLease))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLease")
- return
- }
- (*z).BitmaskLease = bitmask(zb0032)
- }
- }
- if zb0005 > 0 {
- zb0005--
- var zb0034 int
- zb0034, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "RekeyTo")
- return
- }
- if zb0034 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0034), uint64(maxAddressBytes))
- return
- }
- (*z).RekeyTo, bts, err = msgp.ReadBytesBytes(bts, (*z).RekeyTo)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "RekeyTo")
- return
- }
- }
- if zb0005 > 0 {
- zb0005--
- {
- var zb0035 []byte
- var zb0036 int
- zb0036, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskRekeyTo")
- return
- }
- if zb0036 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0036), uint64(maxBitmaskSize))
- return
- }
- zb0035, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskRekeyTo))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskRekeyTo")
- return
- }
- (*z).BitmaskRekeyTo = bitmask(zb0035)
- }
- }
- if zb0005 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0005)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0006 {
- (*z) = encodedTxnHeaders{}
- }
- for zb0005 > 0 {
- zb0005--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "snd":
- var zb0037 int
- zb0037, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Sender")
- return
- }
- if zb0037 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0037), uint64(maxAddressBytes))
- return
- }
- (*z).Sender, bts, err = msgp.ReadBytesBytes(bts, (*z).Sender)
- if err != nil {
- err = msgp.WrapError(err, "Sender")
- return
- }
- case "sndbm":
- {
- var zb0038 []byte
- var zb0039 int
- zb0039, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSender")
- return
- }
- if zb0039 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0039), uint64(maxBitmaskSize))
- return
- }
- zb0038, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskSender))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSender")
- return
- }
- (*z).BitmaskSender = bitmask(zb0038)
- }
- case "fee":
- var zb0040 int
- var zb0041 bool
- zb0040, zb0041, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Fee")
- return
- }
- if zb0040 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0040), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Fee")
- return
- }
- if zb0041 {
- (*z).Fee = nil
- } else if (*z).Fee != nil && cap((*z).Fee) >= zb0040 {
- (*z).Fee = ((*z).Fee)[:zb0040]
- } else {
- (*z).Fee = make([]basics.MicroAlgos, zb0040)
- }
- for zb0001 := range (*z).Fee {
- bts, err = (*z).Fee[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Fee", zb0001)
- return
- }
- }
- case "feebm":
- {
- var zb0042 []byte
- var zb0043 int
- zb0043, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFee")
- return
- }
- if zb0043 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0043), uint64(maxBitmaskSize))
- return
- }
- zb0042, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskFee))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFee")
- return
- }
- (*z).BitmaskFee = bitmask(zb0042)
- }
- case "fv":
- var zb0044 int
- var zb0045 bool
- zb0044, zb0045, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "FirstValid")
- return
- }
- if zb0044 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0044), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "FirstValid")
- return
- }
- if zb0045 {
- (*z).FirstValid = nil
- } else if (*z).FirstValid != nil && cap((*z).FirstValid) >= zb0044 {
- (*z).FirstValid = ((*z).FirstValid)[:zb0044]
- } else {
- (*z).FirstValid = make([]basics.Round, zb0044)
- }
- for zb0002 := range (*z).FirstValid {
- bts, err = (*z).FirstValid[zb0002].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "FirstValid", zb0002)
- return
- }
- }
- case "fvbm":
- {
- var zb0046 []byte
- var zb0047 int
- zb0047, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFirstValid")
- return
- }
- if zb0047 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0047), uint64(maxBitmaskSize))
- return
- }
- zb0046, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskFirstValid))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFirstValid")
- return
- }
- (*z).BitmaskFirstValid = bitmask(zb0046)
- }
- case "lv":
- var zb0048 int
- var zb0049 bool
- zb0048, zb0049, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LastValid")
- return
- }
- if zb0048 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0048), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LastValid")
- return
- }
- if zb0049 {
- (*z).LastValid = nil
- } else if (*z).LastValid != nil && cap((*z).LastValid) >= zb0048 {
- (*z).LastValid = ((*z).LastValid)[:zb0048]
- } else {
- (*z).LastValid = make([]basics.Round, zb0048)
- }
- for zb0003 := range (*z).LastValid {
- bts, err = (*z).LastValid[zb0003].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "LastValid", zb0003)
- return
- }
- }
- case "lvbm":
- {
- var zb0050 []byte
- var zb0051 int
- zb0051, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLastValid")
- return
- }
- if zb0051 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0051), uint64(maxBitmaskSize))
- return
- }
- zb0050, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskLastValid))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLastValid")
- return
- }
- (*z).BitmaskLastValid = bitmask(zb0050)
- }
- case "note":
- var zb0052 int
- var zb0053 bool
- zb0052, zb0053, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Note")
- return
- }
- if zb0052 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0052), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Note")
- return
- }
- if zb0053 {
- (*z).Note = nil
- } else if (*z).Note != nil && cap((*z).Note) >= zb0052 {
- (*z).Note = ((*z).Note)[:zb0052]
- } else {
- (*z).Note = make([][]byte, zb0052)
- }
- for zb0004 := range (*z).Note {
- var zb0054 int
- zb0054, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Note", zb0004)
- return
- }
- if zb0054 > config.MaxTxnNoteBytes {
- err = msgp.ErrOverflow(uint64(zb0054), uint64(config.MaxTxnNoteBytes))
- return
- }
- (*z).Note[zb0004], bts, err = msgp.ReadBytesBytes(bts, (*z).Note[zb0004])
- if err != nil {
- err = msgp.WrapError(err, "Note", zb0004)
- return
- }
- }
- case "notebm":
- {
- var zb0055 []byte
- var zb0056 int
- zb0056, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNote")
- return
- }
- if zb0056 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0056), uint64(maxBitmaskSize))
- return
- }
- zb0055, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskNote))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNote")
- return
- }
- (*z).BitmaskNote = bitmask(zb0055)
- }
- case "genbm":
- {
- var zb0057 []byte
- var zb0058 int
- zb0058, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGenesisID")
- return
- }
- if zb0058 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0058), uint64(maxBitmaskSize))
- return
- }
- zb0057, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskGenesisID))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGenesisID")
- return
- }
- (*z).BitmaskGenesisID = bitmask(zb0057)
- }
- case "grpbm":
- {
- var zb0059 []byte
- var zb0060 int
- zb0060, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGroup")
- return
- }
- if zb0060 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0060), uint64(maxBitmaskSize))
- return
- }
- zb0059, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskGroup))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGroup")
- return
- }
- (*z).BitmaskGroup = bitmask(zb0059)
- }
- case "lx":
- var zb0061 int
- zb0061, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Lease")
- return
- }
- if zb0061 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0061), uint64(maxAddressBytes))
- return
- }
- (*z).Lease, bts, err = msgp.ReadBytesBytes(bts, (*z).Lease)
- if err != nil {
- err = msgp.WrapError(err, "Lease")
- return
- }
- case "lxbm":
- {
- var zb0062 []byte
- var zb0063 int
- zb0063, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLease")
- return
- }
- if zb0063 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0063), uint64(maxBitmaskSize))
- return
- }
- zb0062, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskLease))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLease")
- return
- }
- (*z).BitmaskLease = bitmask(zb0062)
- }
- case "rekey":
- var zb0064 int
- zb0064, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "RekeyTo")
- return
- }
- if zb0064 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0064), uint64(maxAddressBytes))
- return
- }
- (*z).RekeyTo, bts, err = msgp.ReadBytesBytes(bts, (*z).RekeyTo)
- if err != nil {
- err = msgp.WrapError(err, "RekeyTo")
- return
- }
- case "rekeybm":
- {
- var zb0065 []byte
- var zb0066 int
- zb0066, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskRekeyTo")
- return
- }
- if zb0066 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0066), uint64(maxBitmaskSize))
- return
- }
- zb0065, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskRekeyTo))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskRekeyTo")
- return
- }
- (*z).BitmaskRekeyTo = bitmask(zb0065)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedTxnHeaders) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedTxnHeaders)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedTxnHeaders) Msgsize() (s int) {
- s = 3 + 4 + msgp.BytesPrefixSize + len((*z).Sender) + 6 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskSender)) + 4 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).Fee {
- s += (*z).Fee[zb0001].Msgsize()
- }
- s += 6 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskFee)) + 3 + msgp.ArrayHeaderSize
- for zb0002 := range (*z).FirstValid {
- s += (*z).FirstValid[zb0002].Msgsize()
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskFirstValid)) + 3 + msgp.ArrayHeaderSize
- for zb0003 := range (*z).LastValid {
- s += (*z).LastValid[zb0003].Msgsize()
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskLastValid)) + 5 + msgp.ArrayHeaderSize
- for zb0004 := range (*z).Note {
- s += msgp.BytesPrefixSize + len((*z).Note[zb0004])
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskNote)) + 6 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskGenesisID)) + 6 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskGroup)) + 3 + msgp.BytesPrefixSize + len((*z).Lease) + 5 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskLease)) + 6 + msgp.BytesPrefixSize + len((*z).RekeyTo) + 8 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskRekeyTo))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedTxnHeaders) MsgIsZero() bool {
- return (len((*z).Sender) == 0) && (len((*z).BitmaskSender) == 0) && (len((*z).Fee) == 0) && (len((*z).BitmaskFee) == 0) && (len((*z).FirstValid) == 0) && (len((*z).BitmaskFirstValid) == 0) && (len((*z).LastValid) == 0) && (len((*z).BitmaskLastValid) == 0) && (len((*z).Note) == 0) && (len((*z).BitmaskNote) == 0) && (len((*z).BitmaskGenesisID) == 0) && (len((*z).BitmaskGroup) == 0) && (len((*z).Lease) == 0) && (len((*z).BitmaskLease) == 0) && (len((*z).RekeyTo) == 0) && (len((*z).BitmaskRekeyTo) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedTxns) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0044Len := uint32(112)
- var zb0044Mask [2]uint64 /* 123 bits */
- if len((*z).encodedAssetTransferTxnFields.AssetAmount) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x800
- }
- if len((*z).encodedAssetTransferTxnFields.BitmaskAssetAmount) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x1000
- }
- if len((*z).encodedAssetTransferTxnFields.AssetCloseTo) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x2000
- }
- if len((*z).encodedAssetTransferTxnFields.BitmaskAssetCloseTo) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x4000
- }
- if len((*z).encodedAssetFreezeTxnFields.BitmaskAssetFrozen) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x8000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x10000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x20000
- }
- if len((*z).encodedPaymentTxnFields.Amount) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x40000
- }
- if len((*z).encodedPaymentTxnFields.BitmaskAmount) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x80000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x100000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x200000
- }
- if len((*z).encodedApplicationCallTxnFields.ApplicationArgs) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x400000
- }
- if len((*z).encodedApplicationCallTxnFields.BitmaskApplicationArgs) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x800000
- }
- if len((*z).encodedApplicationCallTxnFields.OnCompletion) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x1000000
- }
- if len((*z).encodedApplicationCallTxnFields.BitmaskOnCompletion) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x2000000
- }
- if len((*z).encodedApplicationCallTxnFields.ApprovalProgram) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x4000000
- }
- if len((*z).encodedApplicationCallTxnFields.BitmaskApprovalProgram) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x8000000
- }
- if len((*z).encodedApplicationCallTxnFields.ForeignAssets) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x10000000
- }
- if len((*z).encodedApplicationCallTxnFields.BitmaskForeignAssets) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x20000000
- }
- if len((*z).encodedApplicationCallTxnFields.Accounts) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x40000000
- }
- if len((*z).encodedApplicationCallTxnFields.BitmaskAccounts) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x80000000
- }
- if len((*z).encodedApplicationCallTxnFields.ExtraProgramPages) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x100000000
- }
- if len((*z).encodedApplicationCallTxnFields.BitmaskExtraProgramPages) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x200000000
- }
- if len((*z).encodedApplicationCallTxnFields.ForeignApps) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x400000000
- }
- if len((*z).encodedApplicationCallTxnFields.BitmaskForeignApps) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x800000000
- }
- if len((*z).encodedApplicationCallTxnFields.ApplicationID) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x1000000000
- }
- if len((*z).encodedApplicationCallTxnFields.BitmaskApplicationID) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x2000000000
- }
- if len((*z).encodedApplicationCallTxnFields.ClearStateProgram) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x4000000000
- }
- if len((*z).encodedApplicationCallTxnFields.BitmaskClearStateProgram) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x8000000000
- }
- if len((*z).encodedAssetTransferTxnFields.AssetReceiver) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x10000000000
- }
- if len((*z).encodedAssetTransferTxnFields.BitmaskAssetReceiver) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x20000000000
- }
- if len((*z).encodedAssetTransferTxnFields.AssetSender) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x40000000000
- }
- if len((*z).encodedAssetTransferTxnFields.BitmaskAssetSender) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x80000000000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.URL) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x100000000000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x200000000000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Clawback) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x400000000000
- }
- if len((*z).encodedAssetConfigTxnFields.ConfigAsset) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x800000000000
- }
- if len((*z).encodedAssetConfigTxnFields.BitmaskConfigAsset) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x1000000000000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x2000000000000
- }
- if len((*z).encodedCompactCertTxnFields.encodedCert.PartProofs) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x4000000000000
- }
- if len((*z).encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x8000000000000
- }
- if len((*z).encodedCompactCertTxnFields.encodedCert.SigProofs) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x10000000000000
- }
- if len((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x20000000000000
- }
- if len((*z).encodedCompactCertTxnFields.encodedCert.SigCommit) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x40000000000000
- }
- if len((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x80000000000000
- }
- if len((*z).encodedCompactCertTxnFields.encodedCert.Reveals) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x100000000000000
- }
- if len((*z).encodedCompactCertTxnFields.encodedCert.BitmaskReveals) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x200000000000000
- }
- if len((*z).encodedCompactCertTxnFields.CertRound) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x400000000000000
- }
- if len((*z).encodedCompactCertTxnFields.BitmaskCertRound) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x800000000000000
- }
- if len((*z).encodedCompactCertTxnFields.CertType) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x1000000000000000
- }
- if len((*z).encodedCompactCertTxnFields.BitmaskCertType) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x2000000000000000
- }
- if len((*z).encodedCompactCertTxnFields.encodedCert.SignedWeight) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x4000000000000000
- }
- if len((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight) == 0 {
- zb0044Len--
- zb0044Mask[0] |= 0x8000000000000000
- }
- if len((*z).encodedPaymentTxnFields.CloseRemainderTo) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x1
- }
- if len((*z).encodedPaymentTxnFields.BitmaskCloseRemainderTo) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x2
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x4
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x8
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x10
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Freeze) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x20
- }
- if len((*z).encodedAssetFreezeTxnFields.FreezeAccount) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x40
- }
- if len((*z).encodedAssetFreezeTxnFields.BitmaskFreezeAccount) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x80
- }
- if len((*z).encodedAssetFreezeTxnFields.FreezeAsset) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x100
- }
- if len((*z).encodedAssetFreezeTxnFields.BitmaskFreezeAsset) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x200
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x400
- }
- if len((*z).encodedTxnHeaders.Fee) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x800
- }
- if len((*z).encodedTxnHeaders.BitmaskFee) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x1000
- }
- if len((*z).encodedTxnHeaders.FirstValid) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x2000
- }
- if len((*z).encodedTxnHeaders.BitmaskFirstValid) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x4000
- }
- if len((*z).encodedTxnHeaders.BitmaskGenesisID) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x8000
- }
- if len((*z).encodedApplicationCallTxnFields.GlobalNumByteSlice) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x10000
- }
- if len((*z).encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x20000
- }
- if len((*z).encodedApplicationCallTxnFields.GlobalNumUint) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x40000
- }
- if len((*z).encodedApplicationCallTxnFields.BitmaskGlobalNumUint) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x80000
- }
- if len((*z).encodedTxnHeaders.BitmaskGroup) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x100000
- }
- if len((*z).encodedApplicationCallTxnFields.LocalNumByteSlice) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x200000
- }
- if len((*z).encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x400000
- }
- if len((*z).encodedApplicationCallTxnFields.LocalNumUint) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x800000
- }
- if len((*z).encodedApplicationCallTxnFields.BitmaskLocalNumUint) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x1000000
- }
- if len((*z).encodedTxnHeaders.LastValid) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x2000000
- }
- if len((*z).encodedTxnHeaders.BitmaskLastValid) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x4000000
- }
- if len((*z).encodedTxnHeaders.Lease) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x8000000
- }
- if len((*z).encodedTxnHeaders.BitmaskLease) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x10000000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Manager) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x20000000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x40000000
- }
- if len((*z).encodedKeyregTxnFields.BitmaskNonparticipation) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x80000000
- }
- if len((*z).encodedTxnHeaders.Note) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x100000000
- }
- if len((*z).encodedTxnHeaders.BitmaskNote) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x200000000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Reserve) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x400000000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x800000000
- }
- if len((*z).encodedPaymentTxnFields.Receiver) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x1000000000
- }
- if len((*z).encodedPaymentTxnFields.BitmaskReceiver) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x2000000000
- }
- if len((*z).encodedTxnHeaders.RekeyTo) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x4000000000
- }
- if len((*z).encodedTxnHeaders.BitmaskRekeyTo) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x8000000000
- }
- if len((*z).encodedKeyregTxnFields.SelectionPK) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x10000000000
- }
- if len((*z).encodedTxnHeaders.Sender) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x20000000000
- }
- if len((*z).encodedTxnHeaders.BitmaskSender) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x40000000000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Total) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x80000000000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x100000000000
- }
- if len((*z).TxType) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x200000000000
- }
- if len((*z).BitmaskTxType) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x400000000000
- }
- if (*z).TxTypeOffset == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x800000000000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x1000000000000
- }
- if len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x2000000000000
- }
- if len((*z).encodedKeyregTxnFields.VoteFirst) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x4000000000000
- }
- if len((*z).encodedKeyregTxnFields.BitmaskVoteFirst) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x8000000000000
- }
- if len((*z).encodedKeyregTxnFields.BitmaskKeys) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x10000000000000
- }
- if len((*z).encodedKeyregTxnFields.VoteKeyDilution) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x20000000000000
- }
- if len((*z).encodedKeyregTxnFields.VotePK) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x40000000000000
- }
- if len((*z).encodedKeyregTxnFields.VoteLast) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x80000000000000
- }
- if len((*z).encodedKeyregTxnFields.BitmaskVoteLast) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x100000000000000
- }
- if len((*z).encodedAssetTransferTxnFields.XferAsset) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x200000000000000
- }
- if len((*z).encodedAssetTransferTxnFields.BitmaskXferAsset) == 0 {
- zb0044Len--
- zb0044Mask[1] |= 0x400000000000000
- }
- // variable map header, size zb0044Len
- o = msgp.AppendMapHeader(o, zb0044Len)
- if zb0044Len != 0 {
- if (zb0044Mask[0] & 0x800) == 0 { // if not empty
- // string "aamt"
- o = append(o, 0xa4, 0x61, 0x61, 0x6d, 0x74)
- if (*z).encodedAssetTransferTxnFields.AssetAmount == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedAssetTransferTxnFields.AssetAmount)))
- }
- for zb0016 := range (*z).encodedAssetTransferTxnFields.AssetAmount {
- o = msgp.AppendUint64(o, (*z).encodedAssetTransferTxnFields.AssetAmount[zb0016])
- }
- }
- if (zb0044Mask[0] & 0x1000) == 0 { // if not empty
- // string "aamtbm"
- o = append(o, 0xa6, 0x61, 0x61, 0x6d, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetTransferTxnFields.BitmaskAssetAmount))
- }
- if (zb0044Mask[0] & 0x2000) == 0 { // if not empty
- // string "aclose"
- o = append(o, 0xa6, 0x61, 0x63, 0x6c, 0x6f, 0x73, 0x65)
- o = msgp.AppendBytes(o, (*z).encodedAssetTransferTxnFields.AssetCloseTo)
- }
- if (zb0044Mask[0] & 0x4000) == 0 { // if not empty
- // string "aclosebm"
- o = append(o, 0xa8, 0x61, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetTransferTxnFields.BitmaskAssetCloseTo))
- }
- if (zb0044Mask[0] & 0x8000) == 0 { // if not empty
- // string "afrzbm"
- o = append(o, 0xa6, 0x61, 0x66, 0x72, 0x7a, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetFreezeTxnFields.BitmaskAssetFrozen))
- }
- if (zb0044Mask[0] & 0x10000) == 0 { // if not empty
- // string "am"
- o = append(o, 0xa2, 0x61, 0x6d)
- o = msgp.AppendBytes(o, (*z).encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash)
- }
- if (zb0044Mask[0] & 0x20000) == 0 { // if not empty
- // string "ambm"
- o = append(o, 0xa4, 0x61, 0x6d, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash))
- }
- if (zb0044Mask[0] & 0x40000) == 0 { // if not empty
- // string "amt"
- o = append(o, 0xa3, 0x61, 0x6d, 0x74)
- if (*z).encodedPaymentTxnFields.Amount == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedPaymentTxnFields.Amount)))
- }
- for zb0008 := range (*z).encodedPaymentTxnFields.Amount {
- o = (*z).encodedPaymentTxnFields.Amount[zb0008].MarshalMsg(o)
- }
- }
- if (zb0044Mask[0] & 0x80000) == 0 { // if not empty
- // string "amtbm"
- o = append(o, 0xa5, 0x61, 0x6d, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedPaymentTxnFields.BitmaskAmount))
- }
- if (zb0044Mask[0] & 0x100000) == 0 { // if not empty
- // string "an"
- o = append(o, 0xa2, 0x61, 0x6e)
- if (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName)))
- }
- for zb0013 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName {
- o = msgp.AppendString(o, (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName[zb0013])
- }
- }
- if (zb0044Mask[0] & 0x200000) == 0 { // if not empty
- // string "anbm"
- o = append(o, 0xa4, 0x61, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName))
- }
- if (zb0044Mask[0] & 0x400000) == 0 { // if not empty
- // string "apaa"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x61)
- if (*z).encodedApplicationCallTxnFields.ApplicationArgs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.ApplicationArgs)))
- }
- for zb0019 := range (*z).encodedApplicationCallTxnFields.ApplicationArgs {
- if (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019])))
- }
- for zb0020 := range (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019] {
- o = msgp.AppendBytes(o, (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019][zb0020])
- }
- }
- }
- if (zb0044Mask[0] & 0x800000) == 0 { // if not empty
- // string "apaabm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x61, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.BitmaskApplicationArgs))
- }
- if (zb0044Mask[0] & 0x1000000) == 0 { // if not empty
- // string "apan"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x6e)
- o = msgp.AppendBytes(o, (*z).encodedApplicationCallTxnFields.OnCompletion)
- }
- if (zb0044Mask[0] & 0x2000000) == 0 { // if not empty
- // string "apanbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.BitmaskOnCompletion))
- }
- if (zb0044Mask[0] & 0x4000000) == 0 { // if not empty
- // string "apap"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x70)
- if (*z).encodedApplicationCallTxnFields.ApprovalProgram == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.ApprovalProgram)))
- }
- for zb0031 := range (*z).encodedApplicationCallTxnFields.ApprovalProgram {
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.ApprovalProgram[zb0031]))
- }
- }
- if (zb0044Mask[0] & 0x8000000) == 0 { // if not empty
- // string "apapbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x70, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.BitmaskApprovalProgram))
- }
- if (zb0044Mask[0] & 0x10000000) == 0 { // if not empty
- // string "apas"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x73)
- if (*z).encodedApplicationCallTxnFields.ForeignAssets == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.ForeignAssets)))
- }
- for zb0025 := range (*z).encodedApplicationCallTxnFields.ForeignAssets {
- if (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025])))
- }
- for zb0026 := range (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025] {
- o = (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025][zb0026].MarshalMsg(o)
- }
- }
- }
- if (zb0044Mask[0] & 0x20000000) == 0 { // if not empty
- // string "apasbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.BitmaskForeignAssets))
- }
- if (zb0044Mask[0] & 0x40000000) == 0 { // if not empty
- // string "apat"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x74)
- if (*z).encodedApplicationCallTxnFields.Accounts == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.Accounts)))
- }
- for zb0021 := range (*z).encodedApplicationCallTxnFields.Accounts {
- if (*z).encodedApplicationCallTxnFields.Accounts[zb0021] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.Accounts[zb0021])))
- }
- for zb0022 := range (*z).encodedApplicationCallTxnFields.Accounts[zb0021] {
- o = (*z).encodedApplicationCallTxnFields.Accounts[zb0021][zb0022].MarshalMsg(o)
- }
- }
- }
- if (zb0044Mask[0] & 0x80000000) == 0 { // if not empty
- // string "apatbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.BitmaskAccounts))
- }
- if (zb0044Mask[0] & 0x100000000) == 0 { // if not empty
- // string "apep"
- o = append(o, 0xa4, 0x61, 0x70, 0x65, 0x70)
- if (*z).encodedApplicationCallTxnFields.ExtraProgramPages == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.ExtraProgramPages)))
- }
- for zb0033 := range (*z).encodedApplicationCallTxnFields.ExtraProgramPages {
- o = msgp.AppendUint32(o, (*z).encodedApplicationCallTxnFields.ExtraProgramPages[zb0033])
- }
- }
- if (zb0044Mask[0] & 0x200000000) == 0 { // if not empty
- // string "apepbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x65, 0x70, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.BitmaskExtraProgramPages))
- }
- if (zb0044Mask[0] & 0x400000000) == 0 { // if not empty
- // string "apfa"
- o = append(o, 0xa4, 0x61, 0x70, 0x66, 0x61)
- if (*z).encodedApplicationCallTxnFields.ForeignApps == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.ForeignApps)))
- }
- for zb0023 := range (*z).encodedApplicationCallTxnFields.ForeignApps {
- if (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.ForeignApps[zb0023])))
- }
- for zb0024 := range (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023] {
- o = (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023][zb0024].MarshalMsg(o)
- }
- }
- }
- if (zb0044Mask[0] & 0x800000000) == 0 { // if not empty
- // string "apfabm"
- o = append(o, 0xa6, 0x61, 0x70, 0x66, 0x61, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.BitmaskForeignApps))
- }
- if (zb0044Mask[0] & 0x1000000000) == 0 { // if not empty
- // string "apid"
- o = append(o, 0xa4, 0x61, 0x70, 0x69, 0x64)
- if (*z).encodedApplicationCallTxnFields.ApplicationID == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.ApplicationID)))
- }
- for zb0018 := range (*z).encodedApplicationCallTxnFields.ApplicationID {
- o = (*z).encodedApplicationCallTxnFields.ApplicationID[zb0018].MarshalMsg(o)
- }
- }
- if (zb0044Mask[0] & 0x2000000000) == 0 { // if not empty
- // string "apidbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.BitmaskApplicationID))
- }
- if (zb0044Mask[0] & 0x4000000000) == 0 { // if not empty
- // string "apsu"
- o = append(o, 0xa4, 0x61, 0x70, 0x73, 0x75)
- if (*z).encodedApplicationCallTxnFields.ClearStateProgram == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.ClearStateProgram)))
- }
- for zb0032 := range (*z).encodedApplicationCallTxnFields.ClearStateProgram {
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.ClearStateProgram[zb0032]))
- }
- }
- if (zb0044Mask[0] & 0x8000000000) == 0 { // if not empty
- // string "apsubm"
- o = append(o, 0xa6, 0x61, 0x70, 0x73, 0x75, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.BitmaskClearStateProgram))
- }
- if (zb0044Mask[0] & 0x10000000000) == 0 { // if not empty
- // string "arcv"
- o = append(o, 0xa4, 0x61, 0x72, 0x63, 0x76)
- o = msgp.AppendBytes(o, (*z).encodedAssetTransferTxnFields.AssetReceiver)
- }
- if (zb0044Mask[0] & 0x20000000000) == 0 { // if not empty
- // string "arcvbm"
- o = append(o, 0xa6, 0x61, 0x72, 0x63, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetTransferTxnFields.BitmaskAssetReceiver))
- }
- if (zb0044Mask[0] & 0x40000000000) == 0 { // if not empty
- // string "asnd"
- o = append(o, 0xa4, 0x61, 0x73, 0x6e, 0x64)
- o = msgp.AppendBytes(o, (*z).encodedAssetTransferTxnFields.AssetSender)
- }
- if (zb0044Mask[0] & 0x80000000000) == 0 { // if not empty
- // string "asndbm"
- o = append(o, 0xa6, 0x61, 0x73, 0x6e, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetTransferTxnFields.BitmaskAssetSender))
- }
- if (zb0044Mask[0] & 0x100000000000) == 0 { // if not empty
- // string "au"
- o = append(o, 0xa2, 0x61, 0x75)
- if (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedAssetConfigTxnFields.encodedAssetParams.URL)))
- }
- for zb0014 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL {
- o = msgp.AppendString(o, (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL[zb0014])
- }
- }
- if (zb0044Mask[0] & 0x200000000000) == 0 { // if not empty
- // string "aubm"
- o = append(o, 0xa4, 0x61, 0x75, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL))
- }
- if (zb0044Mask[0] & 0x400000000000) == 0 { // if not empty
- // string "c"
- o = append(o, 0xa1, 0x63)
- o = msgp.AppendBytes(o, (*z).encodedAssetConfigTxnFields.encodedAssetParams.Clawback)
- }
- if (zb0044Mask[0] & 0x800000000000) == 0 { // if not empty
- // string "caid"
- o = append(o, 0xa4, 0x63, 0x61, 0x69, 0x64)
- if (*z).encodedAssetConfigTxnFields.ConfigAsset == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedAssetConfigTxnFields.ConfigAsset)))
- }
- for zb0009 := range (*z).encodedAssetConfigTxnFields.ConfigAsset {
- o = (*z).encodedAssetConfigTxnFields.ConfigAsset[zb0009].MarshalMsg(o)
- }
- }
- if (zb0044Mask[0] & 0x1000000000000) == 0 { // if not empty
- // string "caidbm"
- o = append(o, 0xa6, 0x63, 0x61, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetConfigTxnFields.BitmaskConfigAsset))
- }
- if (zb0044Mask[0] & 0x2000000000000) == 0 { // if not empty
- // string "cbm"
- o = append(o, 0xa3, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback))
- }
- if (zb0044Mask[0] & 0x4000000000000) == 0 { // if not empty
- // string "certP"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x50)
- if (*z).encodedCompactCertTxnFields.encodedCert.PartProofs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedCompactCertTxnFields.encodedCert.PartProofs)))
- }
- for zb0039 := range (*z).encodedCompactCertTxnFields.encodedCert.PartProofs {
- if (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039])))
- }
- for zb0040 := range (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039] {
- o = (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039][zb0040].MarshalMsg(o)
- }
- }
- }
- if (zb0044Mask[0] & 0x8000000000000) == 0 { // if not empty
- // string "certPbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x50, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs))
- }
- if (zb0044Mask[0] & 0x10000000000000) == 0 { // if not empty
- // string "certS"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x53)
- if (*z).encodedCompactCertTxnFields.encodedCert.SigProofs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedCompactCertTxnFields.encodedCert.SigProofs)))
- }
- for zb0037 := range (*z).encodedCompactCertTxnFields.encodedCert.SigProofs {
- if (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037])))
- }
- for zb0038 := range (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037] {
- o = (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037][zb0038].MarshalMsg(o)
- }
- }
- }
- if (zb0044Mask[0] & 0x20000000000000) == 0 { // if not empty
- // string "certSbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x53, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs))
- }
- if (zb0044Mask[0] & 0x40000000000000) == 0 { // if not empty
- // string "certc"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x63)
- o = msgp.AppendBytes(o, (*z).encodedCompactCertTxnFields.encodedCert.SigCommit)
- }
- if (zb0044Mask[0] & 0x80000000000000) == 0 { // if not empty
- // string "certcbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit))
- }
- if (zb0044Mask[0] & 0x100000000000000) == 0 { // if not empty
- // string "certr"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x72)
- if (*z).encodedCompactCertTxnFields.encodedCert.Reveals == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedCompactCertTxnFields.encodedCert.Reveals)))
- }
- for zb0041 := range (*z).encodedCompactCertTxnFields.encodedCert.Reveals {
- if (*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendMapHeader(o, uint32(len((*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041])))
- }
- zb0042_keys := make([]uint64, 0, len((*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041]))
- for zb0042 := range (*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041] {
- zb0042_keys = append(zb0042_keys, zb0042)
- }
- sort.Sort(SortUint64(zb0042_keys))
- for _, zb0042 := range zb0042_keys {
- zb0043 := (*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041][zb0042]
- _ = zb0043
- o = msgp.AppendUint64(o, zb0042)
- o = zb0043.MarshalMsg(o)
- }
- }
- }
- if (zb0044Mask[0] & 0x200000000000000) == 0 { // if not empty
- // string "certrbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskReveals))
- }
- if (zb0044Mask[0] & 0x400000000000000) == 0 { // if not empty
- // string "certrnd"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x72, 0x6e, 0x64)
- if (*z).encodedCompactCertTxnFields.CertRound == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedCompactCertTxnFields.CertRound)))
- }
- for zb0034 := range (*z).encodedCompactCertTxnFields.CertRound {
- o = (*z).encodedCompactCertTxnFields.CertRound[zb0034].MarshalMsg(o)
- }
- }
- if (zb0044Mask[0] & 0x800000000000000) == 0 { // if not empty
- // string "certrndbm"
- o = append(o, 0xa9, 0x63, 0x65, 0x72, 0x74, 0x72, 0x6e, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedCompactCertTxnFields.BitmaskCertRound))
- }
- if (zb0044Mask[0] & 0x1000000000000000) == 0 { // if not empty
- // string "certtype"
- o = append(o, 0xa8, 0x63, 0x65, 0x72, 0x74, 0x74, 0x79, 0x70, 0x65)
- if (*z).encodedCompactCertTxnFields.CertType == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedCompactCertTxnFields.CertType)))
- }
- for zb0035 := range (*z).encodedCompactCertTxnFields.CertType {
- o = (*z).encodedCompactCertTxnFields.CertType[zb0035].MarshalMsg(o)
- }
- }
- if (zb0044Mask[0] & 0x2000000000000000) == 0 { // if not empty
- // string "certtypebm"
- o = append(o, 0xaa, 0x63, 0x65, 0x72, 0x74, 0x74, 0x79, 0x70, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedCompactCertTxnFields.BitmaskCertType))
- }
- if (zb0044Mask[0] & 0x4000000000000000) == 0 { // if not empty
- // string "certw"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x77)
- if (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedCompactCertTxnFields.encodedCert.SignedWeight)))
- }
- for zb0036 := range (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight {
- o = msgp.AppendUint64(o, (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight[zb0036])
- }
- }
- if (zb0044Mask[0] & 0x8000000000000000) == 0 { // if not empty
- // string "certwbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x77, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight))
- }
- if (zb0044Mask[1] & 0x1) == 0 { // if not empty
- // string "close"
- o = append(o, 0xa5, 0x63, 0x6c, 0x6f, 0x73, 0x65)
- o = msgp.AppendBytes(o, (*z).encodedPaymentTxnFields.CloseRemainderTo)
- }
- if (zb0044Mask[1] & 0x2) == 0 { // if not empty
- // string "closebm"
- o = append(o, 0xa7, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedPaymentTxnFields.BitmaskCloseRemainderTo))
- }
- if (zb0044Mask[1] & 0x4) == 0 { // if not empty
- // string "dc"
- o = append(o, 0xa2, 0x64, 0x63)
- if (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals)))
- }
- for zb0011 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals {
- o = msgp.AppendUint32(o, (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals[zb0011])
- }
- }
- if (zb0044Mask[1] & 0x8) == 0 { // if not empty
- // string "dcbm"
- o = append(o, 0xa4, 0x64, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals))
- }
- if (zb0044Mask[1] & 0x10) == 0 { // if not empty
- // string "dfbm"
- o = append(o, 0xa4, 0x64, 0x66, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen))
- }
- if (zb0044Mask[1] & 0x20) == 0 { // if not empty
- // string "f"
- o = append(o, 0xa1, 0x66)
- o = msgp.AppendBytes(o, (*z).encodedAssetConfigTxnFields.encodedAssetParams.Freeze)
- }
- if (zb0044Mask[1] & 0x40) == 0 { // if not empty
- // string "fadd"
- o = append(o, 0xa4, 0x66, 0x61, 0x64, 0x64)
- o = msgp.AppendBytes(o, (*z).encodedAssetFreezeTxnFields.FreezeAccount)
- }
- if (zb0044Mask[1] & 0x80) == 0 { // if not empty
- // string "faddbm"
- o = append(o, 0xa6, 0x66, 0x61, 0x64, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetFreezeTxnFields.BitmaskFreezeAccount))
- }
- if (zb0044Mask[1] & 0x100) == 0 { // if not empty
- // string "faid"
- o = append(o, 0xa4, 0x66, 0x61, 0x69, 0x64)
- if (*z).encodedAssetFreezeTxnFields.FreezeAsset == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedAssetFreezeTxnFields.FreezeAsset)))
- }
- for zb0017 := range (*z).encodedAssetFreezeTxnFields.FreezeAsset {
- o = (*z).encodedAssetFreezeTxnFields.FreezeAsset[zb0017].MarshalMsg(o)
- }
- }
- if (zb0044Mask[1] & 0x200) == 0 { // if not empty
- // string "faidbm"
- o = append(o, 0xa6, 0x66, 0x61, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetFreezeTxnFields.BitmaskFreezeAsset))
- }
- if (zb0044Mask[1] & 0x400) == 0 { // if not empty
- // string "fbm"
- o = append(o, 0xa3, 0x66, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze))
- }
- if (zb0044Mask[1] & 0x800) == 0 { // if not empty
- // string "fee"
- o = append(o, 0xa3, 0x66, 0x65, 0x65)
- if (*z).encodedTxnHeaders.Fee == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxnHeaders.Fee)))
- }
- for zb0001 := range (*z).encodedTxnHeaders.Fee {
- o = (*z).encodedTxnHeaders.Fee[zb0001].MarshalMsg(o)
- }
- }
- if (zb0044Mask[1] & 0x1000) == 0 { // if not empty
- // string "feebm"
- o = append(o, 0xa5, 0x66, 0x65, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxnHeaders.BitmaskFee))
- }
- if (zb0044Mask[1] & 0x2000) == 0 { // if not empty
- // string "fv"
- o = append(o, 0xa2, 0x66, 0x76)
- if (*z).encodedTxnHeaders.FirstValid == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxnHeaders.FirstValid)))
- }
- for zb0002 := range (*z).encodedTxnHeaders.FirstValid {
- o = (*z).encodedTxnHeaders.FirstValid[zb0002].MarshalMsg(o)
- }
- }
- if (zb0044Mask[1] & 0x4000) == 0 { // if not empty
- // string "fvbm"
- o = append(o, 0xa4, 0x66, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxnHeaders.BitmaskFirstValid))
- }
- if (zb0044Mask[1] & 0x8000) == 0 { // if not empty
- // string "genbm"
- o = append(o, 0xa5, 0x67, 0x65, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxnHeaders.BitmaskGenesisID))
- }
- if (zb0044Mask[1] & 0x10000) == 0 { // if not empty
- // string "gnbs"
- o = append(o, 0xa4, 0x67, 0x6e, 0x62, 0x73)
- if (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.GlobalNumByteSlice)))
- }
- for zb0030 := range (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice {
- o = msgp.AppendUint64(o, (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice[zb0030])
- }
- }
- if (zb0044Mask[1] & 0x20000) == 0 { // if not empty
- // string "gnbsbm"
- o = append(o, 0xa6, 0x67, 0x6e, 0x62, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice))
- }
- if (zb0044Mask[1] & 0x40000) == 0 { // if not empty
- // string "gnui"
- o = append(o, 0xa4, 0x67, 0x6e, 0x75, 0x69)
- if (*z).encodedApplicationCallTxnFields.GlobalNumUint == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.GlobalNumUint)))
- }
- for zb0029 := range (*z).encodedApplicationCallTxnFields.GlobalNumUint {
- o = msgp.AppendUint64(o, (*z).encodedApplicationCallTxnFields.GlobalNumUint[zb0029])
- }
- }
- if (zb0044Mask[1] & 0x80000) == 0 { // if not empty
- // string "gnuibm"
- o = append(o, 0xa6, 0x67, 0x6e, 0x75, 0x69, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.BitmaskGlobalNumUint))
- }
- if (zb0044Mask[1] & 0x100000) == 0 { // if not empty
- // string "grpbm"
- o = append(o, 0xa5, 0x67, 0x72, 0x70, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxnHeaders.BitmaskGroup))
- }
- if (zb0044Mask[1] & 0x200000) == 0 { // if not empty
- // string "lnbs"
- o = append(o, 0xa4, 0x6c, 0x6e, 0x62, 0x73)
- if (*z).encodedApplicationCallTxnFields.LocalNumByteSlice == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.LocalNumByteSlice)))
- }
- for zb0028 := range (*z).encodedApplicationCallTxnFields.LocalNumByteSlice {
- o = msgp.AppendUint64(o, (*z).encodedApplicationCallTxnFields.LocalNumByteSlice[zb0028])
- }
- }
- if (zb0044Mask[1] & 0x400000) == 0 { // if not empty
- // string "lnbsbm"
- o = append(o, 0xa6, 0x6c, 0x6e, 0x62, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice))
- }
- if (zb0044Mask[1] & 0x800000) == 0 { // if not empty
- // string "lnui"
- o = append(o, 0xa4, 0x6c, 0x6e, 0x75, 0x69)
- if (*z).encodedApplicationCallTxnFields.LocalNumUint == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedApplicationCallTxnFields.LocalNumUint)))
- }
- for zb0027 := range (*z).encodedApplicationCallTxnFields.LocalNumUint {
- o = msgp.AppendUint64(o, (*z).encodedApplicationCallTxnFields.LocalNumUint[zb0027])
- }
- }
- if (zb0044Mask[1] & 0x1000000) == 0 { // if not empty
- // string "lnuibm"
- o = append(o, 0xa6, 0x6c, 0x6e, 0x75, 0x69, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedApplicationCallTxnFields.BitmaskLocalNumUint))
- }
- if (zb0044Mask[1] & 0x2000000) == 0 { // if not empty
- // string "lv"
- o = append(o, 0xa2, 0x6c, 0x76)
- if (*z).encodedTxnHeaders.LastValid == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxnHeaders.LastValid)))
- }
- for zb0003 := range (*z).encodedTxnHeaders.LastValid {
- o = (*z).encodedTxnHeaders.LastValid[zb0003].MarshalMsg(o)
- }
- }
- if (zb0044Mask[1] & 0x4000000) == 0 { // if not empty
- // string "lvbm"
- o = append(o, 0xa4, 0x6c, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxnHeaders.BitmaskLastValid))
- }
- if (zb0044Mask[1] & 0x8000000) == 0 { // if not empty
- // string "lx"
- o = append(o, 0xa2, 0x6c, 0x78)
- o = msgp.AppendBytes(o, (*z).encodedTxnHeaders.Lease)
- }
- if (zb0044Mask[1] & 0x10000000) == 0 { // if not empty
- // string "lxbm"
- o = append(o, 0xa4, 0x6c, 0x78, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxnHeaders.BitmaskLease))
- }
- if (zb0044Mask[1] & 0x20000000) == 0 { // if not empty
- // string "m"
- o = append(o, 0xa1, 0x6d)
- o = msgp.AppendBytes(o, (*z).encodedAssetConfigTxnFields.encodedAssetParams.Manager)
- }
- if (zb0044Mask[1] & 0x40000000) == 0 { // if not empty
- // string "mbm"
- o = append(o, 0xa3, 0x6d, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager))
- }
- if (zb0044Mask[1] & 0x80000000) == 0 { // if not empty
- // string "nonpartbm"
- o = append(o, 0xa9, 0x6e, 0x6f, 0x6e, 0x70, 0x61, 0x72, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedKeyregTxnFields.BitmaskNonparticipation))
- }
- if (zb0044Mask[1] & 0x100000000) == 0 { // if not empty
- // string "note"
- o = append(o, 0xa4, 0x6e, 0x6f, 0x74, 0x65)
- if (*z).encodedTxnHeaders.Note == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedTxnHeaders.Note)))
- }
- for zb0004 := range (*z).encodedTxnHeaders.Note {
- o = msgp.AppendBytes(o, (*z).encodedTxnHeaders.Note[zb0004])
- }
- }
- if (zb0044Mask[1] & 0x200000000) == 0 { // if not empty
- // string "notebm"
- o = append(o, 0xa6, 0x6e, 0x6f, 0x74, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxnHeaders.BitmaskNote))
- }
- if (zb0044Mask[1] & 0x400000000) == 0 { // if not empty
- // string "r"
- o = append(o, 0xa1, 0x72)
- o = msgp.AppendBytes(o, (*z).encodedAssetConfigTxnFields.encodedAssetParams.Reserve)
- }
- if (zb0044Mask[1] & 0x800000000) == 0 { // if not empty
- // string "rbm"
- o = append(o, 0xa3, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve))
- }
- if (zb0044Mask[1] & 0x1000000000) == 0 { // if not empty
- // string "rcv"
- o = append(o, 0xa3, 0x72, 0x63, 0x76)
- o = msgp.AppendBytes(o, (*z).encodedPaymentTxnFields.Receiver)
- }
- if (zb0044Mask[1] & 0x2000000000) == 0 { // if not empty
- // string "rcvbm"
- o = append(o, 0xa5, 0x72, 0x63, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedPaymentTxnFields.BitmaskReceiver))
- }
- if (zb0044Mask[1] & 0x4000000000) == 0 { // if not empty
- // string "rekey"
- o = append(o, 0xa5, 0x72, 0x65, 0x6b, 0x65, 0x79)
- o = msgp.AppendBytes(o, (*z).encodedTxnHeaders.RekeyTo)
- }
- if (zb0044Mask[1] & 0x8000000000) == 0 { // if not empty
- // string "rekeybm"
- o = append(o, 0xa7, 0x72, 0x65, 0x6b, 0x65, 0x79, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxnHeaders.BitmaskRekeyTo))
- }
- if (zb0044Mask[1] & 0x10000000000) == 0 { // if not empty
- // string "selkey"
- o = append(o, 0xa6, 0x73, 0x65, 0x6c, 0x6b, 0x65, 0x79)
- o = msgp.AppendBytes(o, (*z).encodedKeyregTxnFields.SelectionPK)
- }
- if (zb0044Mask[1] & 0x20000000000) == 0 { // if not empty
- // string "snd"
- o = append(o, 0xa3, 0x73, 0x6e, 0x64)
- o = msgp.AppendBytes(o, (*z).encodedTxnHeaders.Sender)
- }
- if (zb0044Mask[1] & 0x40000000000) == 0 { // if not empty
- // string "sndbm"
- o = append(o, 0xa5, 0x73, 0x6e, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedTxnHeaders.BitmaskSender))
- }
- if (zb0044Mask[1] & 0x80000000000) == 0 { // if not empty
- // string "t"
- o = append(o, 0xa1, 0x74)
- if (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Total)))
- }
- for zb0010 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total {
- o = msgp.AppendUint64(o, (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total[zb0010])
- }
- }
- if (zb0044Mask[1] & 0x100000000000) == 0 { // if not empty
- // string "tbm"
- o = append(o, 0xa3, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal))
- }
- if (zb0044Mask[1] & 0x200000000000) == 0 { // if not empty
- // string "type"
- o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65)
- o = msgp.AppendBytes(o, (*z).TxType)
- }
- if (zb0044Mask[1] & 0x400000000000) == 0 { // if not empty
- // string "typebm"
- o = append(o, 0xa6, 0x74, 0x79, 0x70, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).BitmaskTxType))
- }
- if (zb0044Mask[1] & 0x800000000000) == 0 { // if not empty
- // string "typeo"
- o = append(o, 0xa5, 0x74, 0x79, 0x70, 0x65, 0x6f)
- o = msgp.AppendByte(o, (*z).TxTypeOffset)
- }
- if (zb0044Mask[1] & 0x1000000000000) == 0 { // if not empty
- // string "un"
- o = append(o, 0xa2, 0x75, 0x6e)
- if (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName)))
- }
- for zb0012 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName {
- o = msgp.AppendString(o, (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName[zb0012])
- }
- }
- if (zb0044Mask[1] & 0x2000000000000) == 0 { // if not empty
- // string "unbm"
- o = append(o, 0xa4, 0x75, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName))
- }
- if (zb0044Mask[1] & 0x4000000000000) == 0 { // if not empty
- // string "votefst"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x66, 0x73, 0x74)
- if (*z).encodedKeyregTxnFields.VoteFirst == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedKeyregTxnFields.VoteFirst)))
- }
- for zb0005 := range (*z).encodedKeyregTxnFields.VoteFirst {
- o = (*z).encodedKeyregTxnFields.VoteFirst[zb0005].MarshalMsg(o)
- }
- }
- if (zb0044Mask[1] & 0x8000000000000) == 0 { // if not empty
- // string "votefstbm"
- o = append(o, 0xa9, 0x76, 0x6f, 0x74, 0x65, 0x66, 0x73, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedKeyregTxnFields.BitmaskVoteFirst))
- }
- if (zb0044Mask[1] & 0x10000000000000) == 0 { // if not empty
- // string "votekbm"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedKeyregTxnFields.BitmaskKeys))
- }
- if (zb0044Mask[1] & 0x20000000000000) == 0 { // if not empty
- // string "votekd"
- o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x64)
- if (*z).encodedKeyregTxnFields.VoteKeyDilution == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedKeyregTxnFields.VoteKeyDilution)))
- }
- for zb0007 := range (*z).encodedKeyregTxnFields.VoteKeyDilution {
- o = msgp.AppendUint64(o, (*z).encodedKeyregTxnFields.VoteKeyDilution[zb0007])
- }
- }
- if (zb0044Mask[1] & 0x40000000000000) == 0 { // if not empty
- // string "votekey"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x65, 0x79)
- o = msgp.AppendBytes(o, (*z).encodedKeyregTxnFields.VotePK)
- }
- if (zb0044Mask[1] & 0x80000000000000) == 0 { // if not empty
- // string "votelst"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6c, 0x73, 0x74)
- if (*z).encodedKeyregTxnFields.VoteLast == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedKeyregTxnFields.VoteLast)))
- }
- for zb0006 := range (*z).encodedKeyregTxnFields.VoteLast {
- o = (*z).encodedKeyregTxnFields.VoteLast[zb0006].MarshalMsg(o)
- }
- }
- if (zb0044Mask[1] & 0x100000000000000) == 0 { // if not empty
- // string "votelstbm"
- o = append(o, 0xa9, 0x76, 0x6f, 0x74, 0x65, 0x6c, 0x73, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedKeyregTxnFields.BitmaskVoteLast))
- }
- if (zb0044Mask[1] & 0x200000000000000) == 0 { // if not empty
- // string "xaid"
- o = append(o, 0xa4, 0x78, 0x61, 0x69, 0x64)
- if (*z).encodedAssetTransferTxnFields.XferAsset == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedAssetTransferTxnFields.XferAsset)))
- }
- for zb0015 := range (*z).encodedAssetTransferTxnFields.XferAsset {
- o = (*z).encodedAssetTransferTxnFields.XferAsset[zb0015].MarshalMsg(o)
- }
- }
- if (zb0044Mask[1] & 0x400000000000000) == 0 { // if not empty
- // string "xaidbm"
- o = append(o, 0xa6, 0x78, 0x61, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedAssetTransferTxnFields.BitmaskXferAsset))
- }
- }
- return
-}
-
-func (_ *encodedTxns) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedTxns)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedTxns) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0044 int
- var zb0045 bool
- zb0044, zb0045, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0044, zb0045, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0044 > 0 {
- zb0044--
- var zb0046 int
- zb0046, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TxType")
- return
- }
- if zb0046 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0046), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).TxType, bts, err = msgp.ReadBytesBytes(bts, (*z).TxType)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TxType")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0047 []byte
- var zb0048 int
- zb0048, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTxType")
- return
- }
- if zb0048 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0048), uint64(maxBitmaskSize))
- return
- }
- zb0047, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskTxType))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTxType")
- return
- }
- (*z).BitmaskTxType = bitmask(zb0047)
- }
- }
- if zb0044 > 0 {
- zb0044--
- (*z).TxTypeOffset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TxTypeOffset")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0049 int
- zb0049, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Sender")
- return
- }
- if zb0049 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0049), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxnHeaders.Sender, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxnHeaders.Sender)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Sender")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0050 []byte
- var zb0051 int
- zb0051, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSender")
- return
- }
- if zb0051 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0051), uint64(maxBitmaskSize))
- return
- }
- zb0050, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskSender))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSender")
- return
- }
- (*z).encodedTxnHeaders.BitmaskSender = bitmask(zb0050)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0052 int
- var zb0053 bool
- zb0052, zb0053, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Fee")
- return
- }
- if zb0052 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0052), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Fee")
- return
- }
- if zb0053 {
- (*z).encodedTxnHeaders.Fee = nil
- } else if (*z).encodedTxnHeaders.Fee != nil && cap((*z).encodedTxnHeaders.Fee) >= zb0052 {
- (*z).encodedTxnHeaders.Fee = ((*z).encodedTxnHeaders.Fee)[:zb0052]
- } else {
- (*z).encodedTxnHeaders.Fee = make([]basics.MicroAlgos, zb0052)
- }
- for zb0001 := range (*z).encodedTxnHeaders.Fee {
- bts, err = (*z).encodedTxnHeaders.Fee[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Fee", zb0001)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0054 []byte
- var zb0055 int
- zb0055, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFee")
- return
- }
- if zb0055 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0055), uint64(maxBitmaskSize))
- return
- }
- zb0054, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskFee))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFee")
- return
- }
- (*z).encodedTxnHeaders.BitmaskFee = bitmask(zb0054)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0056 int
- var zb0057 bool
- zb0056, zb0057, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FirstValid")
- return
- }
- if zb0056 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0056), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "FirstValid")
- return
- }
- if zb0057 {
- (*z).encodedTxnHeaders.FirstValid = nil
- } else if (*z).encodedTxnHeaders.FirstValid != nil && cap((*z).encodedTxnHeaders.FirstValid) >= zb0056 {
- (*z).encodedTxnHeaders.FirstValid = ((*z).encodedTxnHeaders.FirstValid)[:zb0056]
- } else {
- (*z).encodedTxnHeaders.FirstValid = make([]basics.Round, zb0056)
- }
- for zb0002 := range (*z).encodedTxnHeaders.FirstValid {
- bts, err = (*z).encodedTxnHeaders.FirstValid[zb0002].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FirstValid", zb0002)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0058 []byte
- var zb0059 int
- zb0059, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFirstValid")
- return
- }
- if zb0059 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0059), uint64(maxBitmaskSize))
- return
- }
- zb0058, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskFirstValid))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFirstValid")
- return
- }
- (*z).encodedTxnHeaders.BitmaskFirstValid = bitmask(zb0058)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0060 int
- var zb0061 bool
- zb0060, zb0061, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LastValid")
- return
- }
- if zb0060 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0060), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LastValid")
- return
- }
- if zb0061 {
- (*z).encodedTxnHeaders.LastValid = nil
- } else if (*z).encodedTxnHeaders.LastValid != nil && cap((*z).encodedTxnHeaders.LastValid) >= zb0060 {
- (*z).encodedTxnHeaders.LastValid = ((*z).encodedTxnHeaders.LastValid)[:zb0060]
- } else {
- (*z).encodedTxnHeaders.LastValid = make([]basics.Round, zb0060)
- }
- for zb0003 := range (*z).encodedTxnHeaders.LastValid {
- bts, err = (*z).encodedTxnHeaders.LastValid[zb0003].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LastValid", zb0003)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0062 []byte
- var zb0063 int
- zb0063, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLastValid")
- return
- }
- if zb0063 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0063), uint64(maxBitmaskSize))
- return
- }
- zb0062, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskLastValid))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLastValid")
- return
- }
- (*z).encodedTxnHeaders.BitmaskLastValid = bitmask(zb0062)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0064 int
- var zb0065 bool
- zb0064, zb0065, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Note")
- return
- }
- if zb0064 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0064), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Note")
- return
- }
- if zb0065 {
- (*z).encodedTxnHeaders.Note = nil
- } else if (*z).encodedTxnHeaders.Note != nil && cap((*z).encodedTxnHeaders.Note) >= zb0064 {
- (*z).encodedTxnHeaders.Note = ((*z).encodedTxnHeaders.Note)[:zb0064]
- } else {
- (*z).encodedTxnHeaders.Note = make([][]byte, zb0064)
- }
- for zb0004 := range (*z).encodedTxnHeaders.Note {
- var zb0066 int
- zb0066, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Note", zb0004)
- return
- }
- if zb0066 > config.MaxTxnNoteBytes {
- err = msgp.ErrOverflow(uint64(zb0066), uint64(config.MaxTxnNoteBytes))
- return
- }
- (*z).encodedTxnHeaders.Note[zb0004], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxnHeaders.Note[zb0004])
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Note", zb0004)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0067 []byte
- var zb0068 int
- zb0068, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNote")
- return
- }
- if zb0068 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0068), uint64(maxBitmaskSize))
- return
- }
- zb0067, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskNote))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNote")
- return
- }
- (*z).encodedTxnHeaders.BitmaskNote = bitmask(zb0067)
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0069 []byte
- var zb0070 int
- zb0070, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGenesisID")
- return
- }
- if zb0070 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0070), uint64(maxBitmaskSize))
- return
- }
- zb0069, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskGenesisID))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGenesisID")
- return
- }
- (*z).encodedTxnHeaders.BitmaskGenesisID = bitmask(zb0069)
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0071 []byte
- var zb0072 int
- zb0072, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGroup")
- return
- }
- if zb0072 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0072), uint64(maxBitmaskSize))
- return
- }
- zb0071, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskGroup))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGroup")
- return
- }
- (*z).encodedTxnHeaders.BitmaskGroup = bitmask(zb0071)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0073 int
- zb0073, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Lease")
- return
- }
- if zb0073 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0073), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxnHeaders.Lease, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxnHeaders.Lease)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Lease")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0074 []byte
- var zb0075 int
- zb0075, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLease")
- return
- }
- if zb0075 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0075), uint64(maxBitmaskSize))
- return
- }
- zb0074, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskLease))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLease")
- return
- }
- (*z).encodedTxnHeaders.BitmaskLease = bitmask(zb0074)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0076 int
- zb0076, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "RekeyTo")
- return
- }
- if zb0076 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0076), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxnHeaders.RekeyTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxnHeaders.RekeyTo)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "RekeyTo")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0077 []byte
- var zb0078 int
- zb0078, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskRekeyTo")
- return
- }
- if zb0078 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0078), uint64(maxBitmaskSize))
- return
- }
- zb0077, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskRekeyTo))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskRekeyTo")
- return
- }
- (*z).encodedTxnHeaders.BitmaskRekeyTo = bitmask(zb0077)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0079 int
- zb0079, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VotePK")
- return
- }
- if zb0079 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0079), uint64(maxAddressBytes))
- return
- }
- (*z).encodedKeyregTxnFields.VotePK, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedKeyregTxnFields.VotePK)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VotePK")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0080 int
- zb0080, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SelectionPK")
- return
- }
- if zb0080 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0080), uint64(maxAddressBytes))
- return
- }
- (*z).encodedKeyregTxnFields.SelectionPK, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedKeyregTxnFields.SelectionPK)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SelectionPK")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0081 int
- var zb0082 bool
- zb0081, zb0082, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteFirst")
- return
- }
- if zb0081 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0081), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "VoteFirst")
- return
- }
- if zb0082 {
- (*z).encodedKeyregTxnFields.VoteFirst = nil
- } else if (*z).encodedKeyregTxnFields.VoteFirst != nil && cap((*z).encodedKeyregTxnFields.VoteFirst) >= zb0081 {
- (*z).encodedKeyregTxnFields.VoteFirst = ((*z).encodedKeyregTxnFields.VoteFirst)[:zb0081]
- } else {
- (*z).encodedKeyregTxnFields.VoteFirst = make([]basics.Round, zb0081)
- }
- for zb0005 := range (*z).encodedKeyregTxnFields.VoteFirst {
- bts, err = (*z).encodedKeyregTxnFields.VoteFirst[zb0005].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteFirst", zb0005)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0083 []byte
- var zb0084 int
- zb0084, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteFirst")
- return
- }
- if zb0084 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0084), uint64(maxBitmaskSize))
- return
- }
- zb0083, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedKeyregTxnFields.BitmaskVoteFirst))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteFirst")
- return
- }
- (*z).encodedKeyregTxnFields.BitmaskVoteFirst = bitmask(zb0083)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0085 int
- var zb0086 bool
- zb0085, zb0086, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteLast")
- return
- }
- if zb0085 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0085), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "VoteLast")
- return
- }
- if zb0086 {
- (*z).encodedKeyregTxnFields.VoteLast = nil
- } else if (*z).encodedKeyregTxnFields.VoteLast != nil && cap((*z).encodedKeyregTxnFields.VoteLast) >= zb0085 {
- (*z).encodedKeyregTxnFields.VoteLast = ((*z).encodedKeyregTxnFields.VoteLast)[:zb0085]
- } else {
- (*z).encodedKeyregTxnFields.VoteLast = make([]basics.Round, zb0085)
- }
- for zb0006 := range (*z).encodedKeyregTxnFields.VoteLast {
- bts, err = (*z).encodedKeyregTxnFields.VoteLast[zb0006].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteLast", zb0006)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0087 []byte
- var zb0088 int
- zb0088, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteLast")
- return
- }
- if zb0088 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0088), uint64(maxBitmaskSize))
- return
- }
- zb0087, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedKeyregTxnFields.BitmaskVoteLast))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteLast")
- return
- }
- (*z).encodedKeyregTxnFields.BitmaskVoteLast = bitmask(zb0087)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0089 int
- var zb0090 bool
- zb0089, zb0090, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution")
- return
- }
- if zb0089 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0089), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution")
- return
- }
- if zb0090 {
- (*z).encodedKeyregTxnFields.VoteKeyDilution = nil
- } else if (*z).encodedKeyregTxnFields.VoteKeyDilution != nil && cap((*z).encodedKeyregTxnFields.VoteKeyDilution) >= zb0089 {
- (*z).encodedKeyregTxnFields.VoteKeyDilution = ((*z).encodedKeyregTxnFields.VoteKeyDilution)[:zb0089]
- } else {
- (*z).encodedKeyregTxnFields.VoteKeyDilution = make([]uint64, zb0089)
- }
- for zb0007 := range (*z).encodedKeyregTxnFields.VoteKeyDilution {
- (*z).encodedKeyregTxnFields.VoteKeyDilution[zb0007], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution", zb0007)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0091 []byte
- var zb0092 int
- zb0092, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskKeys")
- return
- }
- if zb0092 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0092), uint64(maxBitmaskSize))
- return
- }
- zb0091, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedKeyregTxnFields.BitmaskKeys))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskKeys")
- return
- }
- (*z).encodedKeyregTxnFields.BitmaskKeys = bitmask(zb0091)
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0093 []byte
- var zb0094 int
- zb0094, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNonparticipation")
- return
- }
- if zb0094 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0094), uint64(maxBitmaskSize))
- return
- }
- zb0093, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedKeyregTxnFields.BitmaskNonparticipation))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNonparticipation")
- return
- }
- (*z).encodedKeyregTxnFields.BitmaskNonparticipation = bitmask(zb0093)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0095 int
- zb0095, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Receiver")
- return
- }
- if zb0095 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0095), uint64(maxAddressBytes))
- return
- }
- (*z).encodedPaymentTxnFields.Receiver, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedPaymentTxnFields.Receiver)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Receiver")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0096 []byte
- var zb0097 int
- zb0097, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReceiver")
- return
- }
- if zb0097 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0097), uint64(maxBitmaskSize))
- return
- }
- zb0096, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedPaymentTxnFields.BitmaskReceiver))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReceiver")
- return
- }
- (*z).encodedPaymentTxnFields.BitmaskReceiver = bitmask(zb0096)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0098 int
- var zb0099 bool
- zb0098, zb0099, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Amount")
- return
- }
- if zb0098 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0098), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Amount")
- return
- }
- if zb0099 {
- (*z).encodedPaymentTxnFields.Amount = nil
- } else if (*z).encodedPaymentTxnFields.Amount != nil && cap((*z).encodedPaymentTxnFields.Amount) >= zb0098 {
- (*z).encodedPaymentTxnFields.Amount = ((*z).encodedPaymentTxnFields.Amount)[:zb0098]
- } else {
- (*z).encodedPaymentTxnFields.Amount = make([]basics.MicroAlgos, zb0098)
- }
- for zb0008 := range (*z).encodedPaymentTxnFields.Amount {
- bts, err = (*z).encodedPaymentTxnFields.Amount[zb0008].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Amount", zb0008)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0100 []byte
- var zb0101 int
- zb0101, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAmount")
- return
- }
- if zb0101 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0101), uint64(maxBitmaskSize))
- return
- }
- zb0100, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedPaymentTxnFields.BitmaskAmount))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAmount")
- return
- }
- (*z).encodedPaymentTxnFields.BitmaskAmount = bitmask(zb0100)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0102 int
- zb0102, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CloseRemainderTo")
- return
- }
- if zb0102 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0102), uint64(maxAddressBytes))
- return
- }
- (*z).encodedPaymentTxnFields.CloseRemainderTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedPaymentTxnFields.CloseRemainderTo)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CloseRemainderTo")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0103 []byte
- var zb0104 int
- zb0104, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCloseRemainderTo")
- return
- }
- if zb0104 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0104), uint64(maxBitmaskSize))
- return
- }
- zb0103, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedPaymentTxnFields.BitmaskCloseRemainderTo))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCloseRemainderTo")
- return
- }
- (*z).encodedPaymentTxnFields.BitmaskCloseRemainderTo = bitmask(zb0103)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0105 int
- var zb0106 bool
- zb0105, zb0106, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ConfigAsset")
- return
- }
- if zb0105 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0105), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ConfigAsset")
- return
- }
- if zb0106 {
- (*z).encodedAssetConfigTxnFields.ConfigAsset = nil
- } else if (*z).encodedAssetConfigTxnFields.ConfigAsset != nil && cap((*z).encodedAssetConfigTxnFields.ConfigAsset) >= zb0105 {
- (*z).encodedAssetConfigTxnFields.ConfigAsset = ((*z).encodedAssetConfigTxnFields.ConfigAsset)[:zb0105]
- } else {
- (*z).encodedAssetConfigTxnFields.ConfigAsset = make([]basics.AssetIndex, zb0105)
- }
- for zb0009 := range (*z).encodedAssetConfigTxnFields.ConfigAsset {
- bts, err = (*z).encodedAssetConfigTxnFields.ConfigAsset[zb0009].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ConfigAsset", zb0009)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0107 []byte
- var zb0108 int
- zb0108, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskConfigAsset")
- return
- }
- if zb0108 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0108), uint64(maxBitmaskSize))
- return
- }
- zb0107, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.BitmaskConfigAsset))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskConfigAsset")
- return
- }
- (*z).encodedAssetConfigTxnFields.BitmaskConfigAsset = bitmask(zb0107)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0109 int
- var zb0110 bool
- zb0109, zb0110, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Total")
- return
- }
- if zb0109 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0109), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Total")
- return
- }
- if zb0110 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total = nil
- } else if (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total != nil && cap((*z).encodedAssetConfigTxnFields.encodedAssetParams.Total) >= zb0109 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total = ((*z).encodedAssetConfigTxnFields.encodedAssetParams.Total)[:zb0109]
- } else {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total = make([]uint64, zb0109)
- }
- for zb0010 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total[zb0010], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Total", zb0010)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0111 []byte
- var zb0112 int
- zb0112, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTotal")
- return
- }
- if zb0112 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0112), uint64(maxBitmaskSize))
- return
- }
- zb0111, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTotal")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal = bitmask(zb0111)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0113 int
- var zb0114 bool
- zb0113, zb0114, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Decimals")
- return
- }
- if zb0113 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0113), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Decimals")
- return
- }
- if zb0114 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals = nil
- } else if (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals != nil && cap((*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals) >= zb0113 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals = ((*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals)[:zb0113]
- } else {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals = make([]uint32, zb0113)
- }
- for zb0011 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals[zb0011], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Decimals", zb0011)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0115 []byte
- var zb0116 int
- zb0116, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDecimals")
- return
- }
- if zb0116 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0116), uint64(maxBitmaskSize))
- return
- }
- zb0115, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDecimals")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals = bitmask(zb0115)
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0117 []byte
- var zb0118 int
- zb0118, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDefaultFrozen")
- return
- }
- if zb0118 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0118), uint64(maxBitmaskSize))
- return
- }
- zb0117, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDefaultFrozen")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen = bitmask(zb0117)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0119 int
- var zb0120 bool
- zb0119, zb0120, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UnitName")
- return
- }
- if zb0119 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0119), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "UnitName")
- return
- }
- if zb0120 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName = nil
- } else if (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName != nil && cap((*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName) >= zb0119 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName = ((*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName)[:zb0119]
- } else {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName = make([]string, zb0119)
- }
- for zb0012 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName[zb0012], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UnitName", zb0012)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0121 []byte
- var zb0122 int
- zb0122, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskUnitName")
- return
- }
- if zb0122 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0122), uint64(maxBitmaskSize))
- return
- }
- zb0121, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskUnitName")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName = bitmask(zb0121)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0123 int
- var zb0124 bool
- zb0123, zb0124, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetName")
- return
- }
- if zb0123 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0123), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "AssetName")
- return
- }
- if zb0124 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName = nil
- } else if (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName != nil && cap((*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName) >= zb0123 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName = ((*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName)[:zb0123]
- } else {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName = make([]string, zb0123)
- }
- for zb0013 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName[zb0013], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetName", zb0013)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0125 []byte
- var zb0126 int
- zb0126, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetName")
- return
- }
- if zb0126 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0126), uint64(maxBitmaskSize))
- return
- }
- zb0125, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetName")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName = bitmask(zb0125)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0127 int
- var zb0128 bool
- zb0127, zb0128, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "URL")
- return
- }
- if zb0127 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0127), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "URL")
- return
- }
- if zb0128 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL = nil
- } else if (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL != nil && cap((*z).encodedAssetConfigTxnFields.encodedAssetParams.URL) >= zb0127 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL = ((*z).encodedAssetConfigTxnFields.encodedAssetParams.URL)[:zb0127]
- } else {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL = make([]string, zb0127)
- }
- for zb0014 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL[zb0014], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "URL", zb0014)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0129 []byte
- var zb0130 int
- zb0130, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskURL")
- return
- }
- if zb0130 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0130), uint64(maxBitmaskSize))
- return
- }
- zb0129, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskURL")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL = bitmask(zb0129)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0131 int
- zb0131, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "MetadataHash")
- return
- }
- if zb0131 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0131), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "MetadataHash")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0132 []byte
- var zb0133 int
- zb0133, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskMetadataHash")
- return
- }
- if zb0133 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0133), uint64(maxBitmaskSize))
- return
- }
- zb0132, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskMetadataHash")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash = bitmask(zb0132)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0134 int
- zb0134, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Manager")
- return
- }
- if zb0134 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0134), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Manager, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetConfigTxnFields.encodedAssetParams.Manager)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Manager")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0135 []byte
- var zb0136 int
- zb0136, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskManager")
- return
- }
- if zb0136 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0136), uint64(maxBitmaskSize))
- return
- }
- zb0135, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskManager")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager = bitmask(zb0135)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0137 int
- zb0137, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reserve")
- return
- }
- if zb0137 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0137), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Reserve, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetConfigTxnFields.encodedAssetParams.Reserve)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reserve")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0138 []byte
- var zb0139 int
- zb0139, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReserve")
- return
- }
- if zb0139 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0139), uint64(maxBitmaskSize))
- return
- }
- zb0138, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReserve")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve = bitmask(zb0138)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0140 int
- zb0140, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Freeze")
- return
- }
- if zb0140 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0140), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Freeze, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetConfigTxnFields.encodedAssetParams.Freeze)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Freeze")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0141 []byte
- var zb0142 int
- zb0142, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreeze")
- return
- }
- if zb0142 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0142), uint64(maxBitmaskSize))
- return
- }
- zb0141, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreeze")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze = bitmask(zb0141)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0143 int
- zb0143, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Clawback")
- return
- }
- if zb0143 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0143), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Clawback, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetConfigTxnFields.encodedAssetParams.Clawback)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Clawback")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0144 []byte
- var zb0145 int
- zb0145, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClawback")
- return
- }
- if zb0145 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0145), uint64(maxBitmaskSize))
- return
- }
- zb0144, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClawback")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback = bitmask(zb0144)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0146 int
- var zb0147 bool
- zb0146, zb0147, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "XferAsset")
- return
- }
- if zb0146 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0146), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "XferAsset")
- return
- }
- if zb0147 {
- (*z).encodedAssetTransferTxnFields.XferAsset = nil
- } else if (*z).encodedAssetTransferTxnFields.XferAsset != nil && cap((*z).encodedAssetTransferTxnFields.XferAsset) >= zb0146 {
- (*z).encodedAssetTransferTxnFields.XferAsset = ((*z).encodedAssetTransferTxnFields.XferAsset)[:zb0146]
- } else {
- (*z).encodedAssetTransferTxnFields.XferAsset = make([]basics.AssetIndex, zb0146)
- }
- for zb0015 := range (*z).encodedAssetTransferTxnFields.XferAsset {
- bts, err = (*z).encodedAssetTransferTxnFields.XferAsset[zb0015].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "XferAsset", zb0015)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0148 []byte
- var zb0149 int
- zb0149, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskXferAsset")
- return
- }
- if zb0149 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0149), uint64(maxBitmaskSize))
- return
- }
- zb0148, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetTransferTxnFields.BitmaskXferAsset))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskXferAsset")
- return
- }
- (*z).encodedAssetTransferTxnFields.BitmaskXferAsset = bitmask(zb0148)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0150 int
- var zb0151 bool
- zb0150, zb0151, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetAmount")
- return
- }
- if zb0150 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0150), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "AssetAmount")
- return
- }
- if zb0151 {
- (*z).encodedAssetTransferTxnFields.AssetAmount = nil
- } else if (*z).encodedAssetTransferTxnFields.AssetAmount != nil && cap((*z).encodedAssetTransferTxnFields.AssetAmount) >= zb0150 {
- (*z).encodedAssetTransferTxnFields.AssetAmount = ((*z).encodedAssetTransferTxnFields.AssetAmount)[:zb0150]
- } else {
- (*z).encodedAssetTransferTxnFields.AssetAmount = make([]uint64, zb0150)
- }
- for zb0016 := range (*z).encodedAssetTransferTxnFields.AssetAmount {
- (*z).encodedAssetTransferTxnFields.AssetAmount[zb0016], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetAmount", zb0016)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0152 []byte
- var zb0153 int
- zb0153, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetAmount")
- return
- }
- if zb0153 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0153), uint64(maxBitmaskSize))
- return
- }
- zb0152, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetTransferTxnFields.BitmaskAssetAmount))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetAmount")
- return
- }
- (*z).encodedAssetTransferTxnFields.BitmaskAssetAmount = bitmask(zb0152)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0154 int
- zb0154, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetSender")
- return
- }
- if zb0154 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0154), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetTransferTxnFields.AssetSender, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetTransferTxnFields.AssetSender)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetSender")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0155 []byte
- var zb0156 int
- zb0156, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetSender")
- return
- }
- if zb0156 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0156), uint64(maxBitmaskSize))
- return
- }
- zb0155, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetTransferTxnFields.BitmaskAssetSender))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetSender")
- return
- }
- (*z).encodedAssetTransferTxnFields.BitmaskAssetSender = bitmask(zb0155)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0157 int
- zb0157, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetReceiver")
- return
- }
- if zb0157 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0157), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetTransferTxnFields.AssetReceiver, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetTransferTxnFields.AssetReceiver)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetReceiver")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0158 []byte
- var zb0159 int
- zb0159, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetReceiver")
- return
- }
- if zb0159 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0159), uint64(maxBitmaskSize))
- return
- }
- zb0158, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetTransferTxnFields.BitmaskAssetReceiver))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetReceiver")
- return
- }
- (*z).encodedAssetTransferTxnFields.BitmaskAssetReceiver = bitmask(zb0158)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0160 int
- zb0160, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetCloseTo")
- return
- }
- if zb0160 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0160), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetTransferTxnFields.AssetCloseTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetTransferTxnFields.AssetCloseTo)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetCloseTo")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0161 []byte
- var zb0162 int
- zb0162, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetCloseTo")
- return
- }
- if zb0162 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0162), uint64(maxBitmaskSize))
- return
- }
- zb0161, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetTransferTxnFields.BitmaskAssetCloseTo))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetCloseTo")
- return
- }
- (*z).encodedAssetTransferTxnFields.BitmaskAssetCloseTo = bitmask(zb0161)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0163 int
- zb0163, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAccount")
- return
- }
- if zb0163 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0163), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetFreezeTxnFields.FreezeAccount, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetFreezeTxnFields.FreezeAccount)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAccount")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0164 []byte
- var zb0165 int
- zb0165, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAccount")
- return
- }
- if zb0165 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0165), uint64(maxBitmaskSize))
- return
- }
- zb0164, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetFreezeTxnFields.BitmaskFreezeAccount))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAccount")
- return
- }
- (*z).encodedAssetFreezeTxnFields.BitmaskFreezeAccount = bitmask(zb0164)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0166 int
- var zb0167 bool
- zb0166, zb0167, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAsset")
- return
- }
- if zb0166 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0166), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "FreezeAsset")
- return
- }
- if zb0167 {
- (*z).encodedAssetFreezeTxnFields.FreezeAsset = nil
- } else if (*z).encodedAssetFreezeTxnFields.FreezeAsset != nil && cap((*z).encodedAssetFreezeTxnFields.FreezeAsset) >= zb0166 {
- (*z).encodedAssetFreezeTxnFields.FreezeAsset = ((*z).encodedAssetFreezeTxnFields.FreezeAsset)[:zb0166]
- } else {
- (*z).encodedAssetFreezeTxnFields.FreezeAsset = make([]basics.AssetIndex, zb0166)
- }
- for zb0017 := range (*z).encodedAssetFreezeTxnFields.FreezeAsset {
- bts, err = (*z).encodedAssetFreezeTxnFields.FreezeAsset[zb0017].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAsset", zb0017)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0168 []byte
- var zb0169 int
- zb0169, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAsset")
- return
- }
- if zb0169 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0169), uint64(maxBitmaskSize))
- return
- }
- zb0168, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetFreezeTxnFields.BitmaskFreezeAsset))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAsset")
- return
- }
- (*z).encodedAssetFreezeTxnFields.BitmaskFreezeAsset = bitmask(zb0168)
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0170 []byte
- var zb0171 int
- zb0171, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetFrozen")
- return
- }
- if zb0171 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0171), uint64(maxBitmaskSize))
- return
- }
- zb0170, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetFreezeTxnFields.BitmaskAssetFrozen))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetFrozen")
- return
- }
- (*z).encodedAssetFreezeTxnFields.BitmaskAssetFrozen = bitmask(zb0170)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0172 int
- var zb0173 bool
- zb0172, zb0173, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationID")
- return
- }
- if zb0172 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0172), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ApplicationID")
- return
- }
- if zb0173 {
- (*z).encodedApplicationCallTxnFields.ApplicationID = nil
- } else if (*z).encodedApplicationCallTxnFields.ApplicationID != nil && cap((*z).encodedApplicationCallTxnFields.ApplicationID) >= zb0172 {
- (*z).encodedApplicationCallTxnFields.ApplicationID = ((*z).encodedApplicationCallTxnFields.ApplicationID)[:zb0172]
- } else {
- (*z).encodedApplicationCallTxnFields.ApplicationID = make([]basics.AppIndex, zb0172)
- }
- for zb0018 := range (*z).encodedApplicationCallTxnFields.ApplicationID {
- bts, err = (*z).encodedApplicationCallTxnFields.ApplicationID[zb0018].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationID", zb0018)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0174 []byte
- var zb0175 int
- zb0175, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationID")
- return
- }
- if zb0175 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0175), uint64(maxBitmaskSize))
- return
- }
- zb0174, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskApplicationID))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationID")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskApplicationID = bitmask(zb0174)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0176 int
- zb0176, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "OnCompletion")
- return
- }
- if zb0176 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0176), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedApplicationCallTxnFields.OnCompletion, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedApplicationCallTxnFields.OnCompletion)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "OnCompletion")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0177 []byte
- var zb0178 int
- zb0178, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskOnCompletion")
- return
- }
- if zb0178 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0178), uint64(maxBitmaskSize))
- return
- }
- zb0177, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskOnCompletion))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskOnCompletion")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskOnCompletion = bitmask(zb0177)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0179 int
- var zb0180 bool
- zb0179, zb0180, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
- return
- }
- if zb0179 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0179), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
- return
- }
- if zb0180 {
- (*z).encodedApplicationCallTxnFields.ApplicationArgs = nil
- } else if (*z).encodedApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).encodedApplicationCallTxnFields.ApplicationArgs) >= zb0179 {
- (*z).encodedApplicationCallTxnFields.ApplicationArgs = ((*z).encodedApplicationCallTxnFields.ApplicationArgs)[:zb0179]
- } else {
- (*z).encodedApplicationCallTxnFields.ApplicationArgs = make([]applicationArgs, zb0179)
- }
- for zb0019 := range (*z).encodedApplicationCallTxnFields.ApplicationArgs {
- var zb0181 int
- var zb0182 bool
- zb0181, zb0182, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs", zb0019)
- return
- }
- if zb0181 > transactions.EncodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0181), uint64(transactions.EncodedMaxApplicationArgs))
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs", zb0019)
- return
- }
- if zb0182 {
- (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019] = nil
- } else if (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019] != nil && cap((*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019]) >= zb0181 {
- (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019] = ((*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019])[:zb0181]
- } else {
- (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019] = make(applicationArgs, zb0181)
- }
- for zb0020 := range (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019] {
- (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019][zb0020], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019][zb0020])
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs", zb0019, zb0020)
- return
- }
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0183 []byte
- var zb0184 int
- zb0184, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationArgs")
- return
- }
- if zb0184 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0184), uint64(maxBitmaskSize))
- return
- }
- zb0183, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskApplicationArgs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationArgs")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskApplicationArgs = bitmask(zb0183)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0185 int
- var zb0186 bool
- zb0185, zb0186, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Accounts")
- return
- }
- if zb0185 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0185), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Accounts")
- return
- }
- if zb0186 {
- (*z).encodedApplicationCallTxnFields.Accounts = nil
- } else if (*z).encodedApplicationCallTxnFields.Accounts != nil && cap((*z).encodedApplicationCallTxnFields.Accounts) >= zb0185 {
- (*z).encodedApplicationCallTxnFields.Accounts = ((*z).encodedApplicationCallTxnFields.Accounts)[:zb0185]
- } else {
- (*z).encodedApplicationCallTxnFields.Accounts = make([]addresses, zb0185)
- }
- for zb0021 := range (*z).encodedApplicationCallTxnFields.Accounts {
- var zb0187 int
- var zb0188 bool
- zb0187, zb0188, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Accounts", zb0021)
- return
- }
- if zb0187 > transactions.EncodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0187), uint64(transactions.EncodedMaxAccounts))
- err = msgp.WrapError(err, "struct-from-array", "Accounts", zb0021)
- return
- }
- if zb0188 {
- (*z).encodedApplicationCallTxnFields.Accounts[zb0021] = nil
- } else if (*z).encodedApplicationCallTxnFields.Accounts[zb0021] != nil && cap((*z).encodedApplicationCallTxnFields.Accounts[zb0021]) >= zb0187 {
- (*z).encodedApplicationCallTxnFields.Accounts[zb0021] = ((*z).encodedApplicationCallTxnFields.Accounts[zb0021])[:zb0187]
- } else {
- (*z).encodedApplicationCallTxnFields.Accounts[zb0021] = make(addresses, zb0187)
- }
- for zb0022 := range (*z).encodedApplicationCallTxnFields.Accounts[zb0021] {
- bts, err = (*z).encodedApplicationCallTxnFields.Accounts[zb0021][zb0022].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Accounts", zb0021, zb0022)
- return
- }
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0189 []byte
- var zb0190 int
- zb0190, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAccounts")
- return
- }
- if zb0190 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0190), uint64(maxBitmaskSize))
- return
- }
- zb0189, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskAccounts))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAccounts")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskAccounts = bitmask(zb0189)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0191 int
- var zb0192 bool
- zb0191, zb0192, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
- return
- }
- if zb0191 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0191), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
- return
- }
- if zb0192 {
- (*z).encodedApplicationCallTxnFields.ForeignApps = nil
- } else if (*z).encodedApplicationCallTxnFields.ForeignApps != nil && cap((*z).encodedApplicationCallTxnFields.ForeignApps) >= zb0191 {
- (*z).encodedApplicationCallTxnFields.ForeignApps = ((*z).encodedApplicationCallTxnFields.ForeignApps)[:zb0191]
- } else {
- (*z).encodedApplicationCallTxnFields.ForeignApps = make([]appIndices, zb0191)
- }
- for zb0023 := range (*z).encodedApplicationCallTxnFields.ForeignApps {
- var zb0193 int
- var zb0194 bool
- zb0193, zb0194, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps", zb0023)
- return
- }
- if zb0193 > transactions.EncodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0193), uint64(transactions.EncodedMaxForeignApps))
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps", zb0023)
- return
- }
- if zb0194 {
- (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023] = nil
- } else if (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023] != nil && cap((*z).encodedApplicationCallTxnFields.ForeignApps[zb0023]) >= zb0193 {
- (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023] = ((*z).encodedApplicationCallTxnFields.ForeignApps[zb0023])[:zb0193]
- } else {
- (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023] = make(appIndices, zb0193)
- }
- for zb0024 := range (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023] {
- bts, err = (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023][zb0024].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps", zb0023, zb0024)
- return
- }
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0195 []byte
- var zb0196 int
- zb0196, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignApps")
- return
- }
- if zb0196 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0196), uint64(maxBitmaskSize))
- return
- }
- zb0195, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskForeignApps))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignApps")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskForeignApps = bitmask(zb0195)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0197 int
- var zb0198 bool
- zb0197, zb0198, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
- return
- }
- if zb0197 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0197), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
- return
- }
- if zb0198 {
- (*z).encodedApplicationCallTxnFields.ForeignAssets = nil
- } else if (*z).encodedApplicationCallTxnFields.ForeignAssets != nil && cap((*z).encodedApplicationCallTxnFields.ForeignAssets) >= zb0197 {
- (*z).encodedApplicationCallTxnFields.ForeignAssets = ((*z).encodedApplicationCallTxnFields.ForeignAssets)[:zb0197]
- } else {
- (*z).encodedApplicationCallTxnFields.ForeignAssets = make([]assetIndices, zb0197)
- }
- for zb0025 := range (*z).encodedApplicationCallTxnFields.ForeignAssets {
- var zb0199 int
- var zb0200 bool
- zb0199, zb0200, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0025)
- return
- }
- if zb0199 > transactions.EncodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0199), uint64(transactions.EncodedMaxForeignAssets))
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0025)
- return
- }
- if zb0200 {
- (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025] = nil
- } else if (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025] != nil && cap((*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025]) >= zb0199 {
- (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025] = ((*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025])[:zb0199]
- } else {
- (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025] = make(assetIndices, zb0199)
- }
- for zb0026 := range (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025] {
- bts, err = (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025][zb0026].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0025, zb0026)
- return
- }
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0201 []byte
- var zb0202 int
- zb0202, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignAssets")
- return
- }
- if zb0202 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0202), uint64(maxBitmaskSize))
- return
- }
- zb0201, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskForeignAssets))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignAssets")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskForeignAssets = bitmask(zb0201)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0203 int
- var zb0204 bool
- zb0203, zb0204, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumUint")
- return
- }
- if zb0203 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0203), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LocalNumUint")
- return
- }
- if zb0204 {
- (*z).encodedApplicationCallTxnFields.LocalNumUint = nil
- } else if (*z).encodedApplicationCallTxnFields.LocalNumUint != nil && cap((*z).encodedApplicationCallTxnFields.LocalNumUint) >= zb0203 {
- (*z).encodedApplicationCallTxnFields.LocalNumUint = ((*z).encodedApplicationCallTxnFields.LocalNumUint)[:zb0203]
- } else {
- (*z).encodedApplicationCallTxnFields.LocalNumUint = make([]uint64, zb0203)
- }
- for zb0027 := range (*z).encodedApplicationCallTxnFields.LocalNumUint {
- (*z).encodedApplicationCallTxnFields.LocalNumUint[zb0027], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumUint", zb0027)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0205 []byte
- var zb0206 int
- zb0206, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumUint")
- return
- }
- if zb0206 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0206), uint64(maxBitmaskSize))
- return
- }
- zb0205, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskLocalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumUint")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskLocalNumUint = bitmask(zb0205)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0207 int
- var zb0208 bool
- zb0207, zb0208, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumByteSlice")
- return
- }
- if zb0207 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0207), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LocalNumByteSlice")
- return
- }
- if zb0208 {
- (*z).encodedApplicationCallTxnFields.LocalNumByteSlice = nil
- } else if (*z).encodedApplicationCallTxnFields.LocalNumByteSlice != nil && cap((*z).encodedApplicationCallTxnFields.LocalNumByteSlice) >= zb0207 {
- (*z).encodedApplicationCallTxnFields.LocalNumByteSlice = ((*z).encodedApplicationCallTxnFields.LocalNumByteSlice)[:zb0207]
- } else {
- (*z).encodedApplicationCallTxnFields.LocalNumByteSlice = make([]uint64, zb0207)
- }
- for zb0028 := range (*z).encodedApplicationCallTxnFields.LocalNumByteSlice {
- (*z).encodedApplicationCallTxnFields.LocalNumByteSlice[zb0028], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumByteSlice", zb0028)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0209 []byte
- var zb0210 int
- zb0210, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumByteSlice")
- return
- }
- if zb0210 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0210), uint64(maxBitmaskSize))
- return
- }
- zb0209, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumByteSlice")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice = bitmask(zb0209)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0211 int
- var zb0212 bool
- zb0211, zb0212, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumUint")
- return
- }
- if zb0211 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0211), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumUint")
- return
- }
- if zb0212 {
- (*z).encodedApplicationCallTxnFields.GlobalNumUint = nil
- } else if (*z).encodedApplicationCallTxnFields.GlobalNumUint != nil && cap((*z).encodedApplicationCallTxnFields.GlobalNumUint) >= zb0211 {
- (*z).encodedApplicationCallTxnFields.GlobalNumUint = ((*z).encodedApplicationCallTxnFields.GlobalNumUint)[:zb0211]
- } else {
- (*z).encodedApplicationCallTxnFields.GlobalNumUint = make([]uint64, zb0211)
- }
- for zb0029 := range (*z).encodedApplicationCallTxnFields.GlobalNumUint {
- (*z).encodedApplicationCallTxnFields.GlobalNumUint[zb0029], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumUint", zb0029)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0213 []byte
- var zb0214 int
- zb0214, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumUint")
- return
- }
- if zb0214 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0214), uint64(maxBitmaskSize))
- return
- }
- zb0213, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskGlobalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumUint")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskGlobalNumUint = bitmask(zb0213)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0215 int
- var zb0216 bool
- zb0215, zb0216, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumByteSlice")
- return
- }
- if zb0215 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0215), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumByteSlice")
- return
- }
- if zb0216 {
- (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice = nil
- } else if (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice != nil && cap((*z).encodedApplicationCallTxnFields.GlobalNumByteSlice) >= zb0215 {
- (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice = ((*z).encodedApplicationCallTxnFields.GlobalNumByteSlice)[:zb0215]
- } else {
- (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice = make([]uint64, zb0215)
- }
- for zb0030 := range (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice {
- (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice[zb0030], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumByteSlice", zb0030)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0217 []byte
- var zb0218 int
- zb0218, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumByteSlice")
- return
- }
- if zb0218 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0218), uint64(maxBitmaskSize))
- return
- }
- zb0217, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumByteSlice")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice = bitmask(zb0217)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0219 int
- var zb0220 bool
- zb0219, zb0220, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram")
- return
- }
- if zb0219 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0219), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram")
- return
- }
- if zb0220 {
- (*z).encodedApplicationCallTxnFields.ApprovalProgram = nil
- } else if (*z).encodedApplicationCallTxnFields.ApprovalProgram != nil && cap((*z).encodedApplicationCallTxnFields.ApprovalProgram) >= zb0219 {
- (*z).encodedApplicationCallTxnFields.ApprovalProgram = ((*z).encodedApplicationCallTxnFields.ApprovalProgram)[:zb0219]
- } else {
- (*z).encodedApplicationCallTxnFields.ApprovalProgram = make([]program, zb0219)
- }
- for zb0031 := range (*z).encodedApplicationCallTxnFields.ApprovalProgram {
- {
- var zb0221 []byte
- var zb0222 int
- zb0222, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram", zb0031)
- return
- }
- if zb0222 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0222), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0221, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.ApprovalProgram[zb0031]))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram", zb0031)
- return
- }
- (*z).encodedApplicationCallTxnFields.ApprovalProgram[zb0031] = program(zb0221)
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0223 []byte
- var zb0224 int
- zb0224, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApprovalProgram")
- return
- }
- if zb0224 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0224), uint64(maxBitmaskSize))
- return
- }
- zb0223, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskApprovalProgram))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApprovalProgram")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskApprovalProgram = bitmask(zb0223)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0225 int
- var zb0226 bool
- zb0225, zb0226, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram")
- return
- }
- if zb0225 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0225), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram")
- return
- }
- if zb0226 {
- (*z).encodedApplicationCallTxnFields.ClearStateProgram = nil
- } else if (*z).encodedApplicationCallTxnFields.ClearStateProgram != nil && cap((*z).encodedApplicationCallTxnFields.ClearStateProgram) >= zb0225 {
- (*z).encodedApplicationCallTxnFields.ClearStateProgram = ((*z).encodedApplicationCallTxnFields.ClearStateProgram)[:zb0225]
- } else {
- (*z).encodedApplicationCallTxnFields.ClearStateProgram = make([]program, zb0225)
- }
- for zb0032 := range (*z).encodedApplicationCallTxnFields.ClearStateProgram {
- {
- var zb0227 []byte
- var zb0228 int
- zb0228, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram", zb0032)
- return
- }
- if zb0228 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0228), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0227, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.ClearStateProgram[zb0032]))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram", zb0032)
- return
- }
- (*z).encodedApplicationCallTxnFields.ClearStateProgram[zb0032] = program(zb0227)
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0229 []byte
- var zb0230 int
- zb0230, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClearStateProgram")
- return
- }
- if zb0230 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0230), uint64(maxBitmaskSize))
- return
- }
- zb0229, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskClearStateProgram))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClearStateProgram")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskClearStateProgram = bitmask(zb0229)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0231 int
- var zb0232 bool
- zb0231, zb0232, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages")
- return
- }
- if zb0231 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0231), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages")
- return
- }
- if zb0232 {
- (*z).encodedApplicationCallTxnFields.ExtraProgramPages = nil
- } else if (*z).encodedApplicationCallTxnFields.ExtraProgramPages != nil && cap((*z).encodedApplicationCallTxnFields.ExtraProgramPages) >= zb0231 {
- (*z).encodedApplicationCallTxnFields.ExtraProgramPages = ((*z).encodedApplicationCallTxnFields.ExtraProgramPages)[:zb0231]
- } else {
- (*z).encodedApplicationCallTxnFields.ExtraProgramPages = make([]uint32, zb0231)
- }
- for zb0033 := range (*z).encodedApplicationCallTxnFields.ExtraProgramPages {
- (*z).encodedApplicationCallTxnFields.ExtraProgramPages[zb0033], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages", zb0033)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0233 []byte
- var zb0234 int
- zb0234, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskExtraProgramPages")
- return
- }
- if zb0234 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0234), uint64(maxBitmaskSize))
- return
- }
- zb0233, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskExtraProgramPages))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskExtraProgramPages")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskExtraProgramPages = bitmask(zb0233)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0235 int
- var zb0236 bool
- zb0235, zb0236, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertRound")
- return
- }
- if zb0235 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0235), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "CertRound")
- return
- }
- if zb0236 {
- (*z).encodedCompactCertTxnFields.CertRound = nil
- } else if (*z).encodedCompactCertTxnFields.CertRound != nil && cap((*z).encodedCompactCertTxnFields.CertRound) >= zb0235 {
- (*z).encodedCompactCertTxnFields.CertRound = ((*z).encodedCompactCertTxnFields.CertRound)[:zb0235]
- } else {
- (*z).encodedCompactCertTxnFields.CertRound = make([]basics.Round, zb0235)
- }
- for zb0034 := range (*z).encodedCompactCertTxnFields.CertRound {
- bts, err = (*z).encodedCompactCertTxnFields.CertRound[zb0034].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertRound", zb0034)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0237 []byte
- var zb0238 int
- zb0238, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertRound")
- return
- }
- if zb0238 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0238), uint64(maxBitmaskSize))
- return
- }
- zb0237, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCompactCertTxnFields.BitmaskCertRound))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertRound")
- return
- }
- (*z).encodedCompactCertTxnFields.BitmaskCertRound = bitmask(zb0237)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0239 int
- var zb0240 bool
- zb0239, zb0240, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertType")
- return
- }
- if zb0239 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0239), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "CertType")
- return
- }
- if zb0240 {
- (*z).encodedCompactCertTxnFields.CertType = nil
- } else if (*z).encodedCompactCertTxnFields.CertType != nil && cap((*z).encodedCompactCertTxnFields.CertType) >= zb0239 {
- (*z).encodedCompactCertTxnFields.CertType = ((*z).encodedCompactCertTxnFields.CertType)[:zb0239]
- } else {
- (*z).encodedCompactCertTxnFields.CertType = make([]protocol.CompactCertType, zb0239)
- }
- for zb0035 := range (*z).encodedCompactCertTxnFields.CertType {
- bts, err = (*z).encodedCompactCertTxnFields.CertType[zb0035].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertType", zb0035)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0241 []byte
- var zb0242 int
- zb0242, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertType")
- return
- }
- if zb0242 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0242), uint64(maxBitmaskSize))
- return
- }
- zb0241, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCompactCertTxnFields.BitmaskCertType))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertType")
- return
- }
- (*z).encodedCompactCertTxnFields.BitmaskCertType = bitmask(zb0241)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0243 int
- zb0243, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigCommit")
- return
- }
- if zb0243 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0243), uint64(maxAddressBytes))
- return
- }
- (*z).encodedCompactCertTxnFields.encodedCert.SigCommit, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedCompactCertTxnFields.encodedCert.SigCommit)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigCommit")
- return
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0244 []byte
- var zb0245 int
- zb0245, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigCommit")
- return
- }
- if zb0245 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0245), uint64(maxBitmaskSize))
- return
- }
- zb0244, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigCommit")
- return
- }
- (*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit = bitmask(zb0244)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0246 int
- var zb0247 bool
- zb0246, zb0247, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight")
- return
- }
- if zb0246 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0246), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight")
- return
- }
- if zb0247 {
- (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight = nil
- } else if (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight != nil && cap((*z).encodedCompactCertTxnFields.encodedCert.SignedWeight) >= zb0246 {
- (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight = ((*z).encodedCompactCertTxnFields.encodedCert.SignedWeight)[:zb0246]
- } else {
- (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight = make([]uint64, zb0246)
- }
- for zb0036 := range (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight {
- (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight[zb0036], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight", zb0036)
- return
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0248 []byte
- var zb0249 int
- zb0249, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSignedWeight")
- return
- }
- if zb0249 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0249), uint64(maxBitmaskSize))
- return
- }
- zb0248, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSignedWeight")
- return
- }
- (*z).encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight = bitmask(zb0248)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0250 int
- var zb0251 bool
- zb0250, zb0251, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs")
- return
- }
- if zb0250 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0250), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "SigProofs")
- return
- }
- if zb0251 {
- (*z).encodedCompactCertTxnFields.encodedCert.SigProofs = nil
- } else if (*z).encodedCompactCertTxnFields.encodedCert.SigProofs != nil && cap((*z).encodedCompactCertTxnFields.encodedCert.SigProofs) >= zb0250 {
- (*z).encodedCompactCertTxnFields.encodedCert.SigProofs = ((*z).encodedCompactCertTxnFields.encodedCert.SigProofs)[:zb0250]
- } else {
- (*z).encodedCompactCertTxnFields.encodedCert.SigProofs = make([]certProofs, zb0250)
- }
- for zb0037 := range (*z).encodedCompactCertTxnFields.encodedCert.SigProofs {
- var zb0252 int
- var zb0253 bool
- zb0252, zb0253, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0037)
- return
- }
- if zb0252 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0252), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0037)
- return
- }
- if zb0253 {
- (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037] = nil
- } else if (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037] != nil && cap((*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037]) >= zb0252 {
- (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037] = ((*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037])[:zb0252]
- } else {
- (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037] = make(certProofs, zb0252)
- }
- for zb0038 := range (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037] {
- bts, err = (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037][zb0038].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0037, zb0038)
- return
- }
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0254 []byte
- var zb0255 int
- zb0255, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigProofs")
- return
- }
- if zb0255 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0255), uint64(maxBitmaskSize))
- return
- }
- zb0254, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigProofs")
- return
- }
- (*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs = bitmask(zb0254)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0256 int
- var zb0257 bool
- zb0256, zb0257, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs")
- return
- }
- if zb0256 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0256), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "PartProofs")
- return
- }
- if zb0257 {
- (*z).encodedCompactCertTxnFields.encodedCert.PartProofs = nil
- } else if (*z).encodedCompactCertTxnFields.encodedCert.PartProofs != nil && cap((*z).encodedCompactCertTxnFields.encodedCert.PartProofs) >= zb0256 {
- (*z).encodedCompactCertTxnFields.encodedCert.PartProofs = ((*z).encodedCompactCertTxnFields.encodedCert.PartProofs)[:zb0256]
- } else {
- (*z).encodedCompactCertTxnFields.encodedCert.PartProofs = make([]certProofs, zb0256)
- }
- for zb0039 := range (*z).encodedCompactCertTxnFields.encodedCert.PartProofs {
- var zb0258 int
- var zb0259 bool
- zb0258, zb0259, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0039)
- return
- }
- if zb0258 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0258), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0039)
- return
- }
- if zb0259 {
- (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039] = nil
- } else if (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039] != nil && cap((*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039]) >= zb0258 {
- (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039] = ((*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039])[:zb0258]
- } else {
- (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039] = make(certProofs, zb0258)
- }
- for zb0040 := range (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039] {
- bts, err = (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039][zb0040].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0039, zb0040)
- return
- }
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0260 []byte
- var zb0261 int
- zb0261, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskPartProofs")
- return
- }
- if zb0261 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0261), uint64(maxBitmaskSize))
- return
- }
- zb0260, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskPartProofs")
- return
- }
- (*z).encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs = bitmask(zb0260)
- }
- }
- if zb0044 > 0 {
- zb0044--
- var zb0262 int
- var zb0263 bool
- zb0262, zb0263, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals")
- return
- }
- if zb0262 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0262), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Reveals")
- return
- }
- if zb0263 {
- (*z).encodedCompactCertTxnFields.encodedCert.Reveals = nil
- } else if (*z).encodedCompactCertTxnFields.encodedCert.Reveals != nil && cap((*z).encodedCompactCertTxnFields.encodedCert.Reveals) >= zb0262 {
- (*z).encodedCompactCertTxnFields.encodedCert.Reveals = ((*z).encodedCompactCertTxnFields.encodedCert.Reveals)[:zb0262]
- } else {
- (*z).encodedCompactCertTxnFields.encodedCert.Reveals = make([]revealMap, zb0262)
- }
- for zb0041 := range (*z).encodedCompactCertTxnFields.encodedCert.Reveals {
- var zb0264 int
- var zb0265 bool
- zb0264, zb0265, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0041)
- return
- }
- if zb0264 > compactcert.MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0264), uint64(compactcert.MaxReveals))
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0041)
- return
- }
- if zb0265 {
- (*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041] = nil
- } else if (*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041] == nil {
- (*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041] = make(revealMap, zb0264)
- }
- for zb0264 > 0 {
- var zb0042 uint64
- var zb0043 compactcert.Reveal
- zb0264--
- zb0042, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0041)
- return
- }
- bts, err = zb0043.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0041, zb0042)
- return
- }
- (*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041][zb0042] = zb0043
- }
- }
- }
- if zb0044 > 0 {
- zb0044--
- {
- var zb0266 []byte
- var zb0267 int
- zb0267, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReveals")
- return
- }
- if zb0267 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0267), uint64(maxBitmaskSize))
- return
- }
- zb0266, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskReveals))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReveals")
- return
- }
- (*z).encodedCompactCertTxnFields.encodedCert.BitmaskReveals = bitmask(zb0266)
- }
- }
- if zb0044 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0044)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0045 {
- (*z) = encodedTxns{}
- }
- for zb0044 > 0 {
- zb0044--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "type":
- var zb0268 int
- zb0268, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "TxType")
- return
- }
- if zb0268 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0268), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).TxType, bts, err = msgp.ReadBytesBytes(bts, (*z).TxType)
- if err != nil {
- err = msgp.WrapError(err, "TxType")
- return
- }
- case "typebm":
- {
- var zb0269 []byte
- var zb0270 int
- zb0270, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTxType")
- return
- }
- if zb0270 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0270), uint64(maxBitmaskSize))
- return
- }
- zb0269, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).BitmaskTxType))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTxType")
- return
- }
- (*z).BitmaskTxType = bitmask(zb0269)
- }
- case "typeo":
- (*z).TxTypeOffset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "TxTypeOffset")
- return
- }
- case "snd":
- var zb0271 int
- zb0271, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Sender")
- return
- }
- if zb0271 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0271), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxnHeaders.Sender, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxnHeaders.Sender)
- if err != nil {
- err = msgp.WrapError(err, "Sender")
- return
- }
- case "sndbm":
- {
- var zb0272 []byte
- var zb0273 int
- zb0273, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSender")
- return
- }
- if zb0273 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0273), uint64(maxBitmaskSize))
- return
- }
- zb0272, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskSender))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSender")
- return
- }
- (*z).encodedTxnHeaders.BitmaskSender = bitmask(zb0272)
- }
- case "fee":
- var zb0274 int
- var zb0275 bool
- zb0274, zb0275, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Fee")
- return
- }
- if zb0274 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0274), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Fee")
- return
- }
- if zb0275 {
- (*z).encodedTxnHeaders.Fee = nil
- } else if (*z).encodedTxnHeaders.Fee != nil && cap((*z).encodedTxnHeaders.Fee) >= zb0274 {
- (*z).encodedTxnHeaders.Fee = ((*z).encodedTxnHeaders.Fee)[:zb0274]
- } else {
- (*z).encodedTxnHeaders.Fee = make([]basics.MicroAlgos, zb0274)
- }
- for zb0001 := range (*z).encodedTxnHeaders.Fee {
- bts, err = (*z).encodedTxnHeaders.Fee[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Fee", zb0001)
- return
- }
- }
- case "feebm":
- {
- var zb0276 []byte
- var zb0277 int
- zb0277, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFee")
- return
- }
- if zb0277 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0277), uint64(maxBitmaskSize))
- return
- }
- zb0276, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskFee))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFee")
- return
- }
- (*z).encodedTxnHeaders.BitmaskFee = bitmask(zb0276)
- }
- case "fv":
- var zb0278 int
- var zb0279 bool
- zb0278, zb0279, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "FirstValid")
- return
- }
- if zb0278 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0278), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "FirstValid")
- return
- }
- if zb0279 {
- (*z).encodedTxnHeaders.FirstValid = nil
- } else if (*z).encodedTxnHeaders.FirstValid != nil && cap((*z).encodedTxnHeaders.FirstValid) >= zb0278 {
- (*z).encodedTxnHeaders.FirstValid = ((*z).encodedTxnHeaders.FirstValid)[:zb0278]
- } else {
- (*z).encodedTxnHeaders.FirstValid = make([]basics.Round, zb0278)
- }
- for zb0002 := range (*z).encodedTxnHeaders.FirstValid {
- bts, err = (*z).encodedTxnHeaders.FirstValid[zb0002].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "FirstValid", zb0002)
- return
- }
- }
- case "fvbm":
- {
- var zb0280 []byte
- var zb0281 int
- zb0281, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFirstValid")
- return
- }
- if zb0281 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0281), uint64(maxBitmaskSize))
- return
- }
- zb0280, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskFirstValid))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFirstValid")
- return
- }
- (*z).encodedTxnHeaders.BitmaskFirstValid = bitmask(zb0280)
- }
- case "lv":
- var zb0282 int
- var zb0283 bool
- zb0282, zb0283, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LastValid")
- return
- }
- if zb0282 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0282), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LastValid")
- return
- }
- if zb0283 {
- (*z).encodedTxnHeaders.LastValid = nil
- } else if (*z).encodedTxnHeaders.LastValid != nil && cap((*z).encodedTxnHeaders.LastValid) >= zb0282 {
- (*z).encodedTxnHeaders.LastValid = ((*z).encodedTxnHeaders.LastValid)[:zb0282]
- } else {
- (*z).encodedTxnHeaders.LastValid = make([]basics.Round, zb0282)
- }
- for zb0003 := range (*z).encodedTxnHeaders.LastValid {
- bts, err = (*z).encodedTxnHeaders.LastValid[zb0003].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "LastValid", zb0003)
- return
- }
- }
- case "lvbm":
- {
- var zb0284 []byte
- var zb0285 int
- zb0285, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLastValid")
- return
- }
- if zb0285 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0285), uint64(maxBitmaskSize))
- return
- }
- zb0284, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskLastValid))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLastValid")
- return
- }
- (*z).encodedTxnHeaders.BitmaskLastValid = bitmask(zb0284)
- }
- case "note":
- var zb0286 int
- var zb0287 bool
- zb0286, zb0287, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Note")
- return
- }
- if zb0286 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0286), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Note")
- return
- }
- if zb0287 {
- (*z).encodedTxnHeaders.Note = nil
- } else if (*z).encodedTxnHeaders.Note != nil && cap((*z).encodedTxnHeaders.Note) >= zb0286 {
- (*z).encodedTxnHeaders.Note = ((*z).encodedTxnHeaders.Note)[:zb0286]
- } else {
- (*z).encodedTxnHeaders.Note = make([][]byte, zb0286)
- }
- for zb0004 := range (*z).encodedTxnHeaders.Note {
- var zb0288 int
- zb0288, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Note", zb0004)
- return
- }
- if zb0288 > config.MaxTxnNoteBytes {
- err = msgp.ErrOverflow(uint64(zb0288), uint64(config.MaxTxnNoteBytes))
- return
- }
- (*z).encodedTxnHeaders.Note[zb0004], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxnHeaders.Note[zb0004])
- if err != nil {
- err = msgp.WrapError(err, "Note", zb0004)
- return
- }
- }
- case "notebm":
- {
- var zb0289 []byte
- var zb0290 int
- zb0290, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNote")
- return
- }
- if zb0290 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0290), uint64(maxBitmaskSize))
- return
- }
- zb0289, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskNote))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNote")
- return
- }
- (*z).encodedTxnHeaders.BitmaskNote = bitmask(zb0289)
- }
- case "genbm":
- {
- var zb0291 []byte
- var zb0292 int
- zb0292, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGenesisID")
- return
- }
- if zb0292 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0292), uint64(maxBitmaskSize))
- return
- }
- zb0291, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskGenesisID))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGenesisID")
- return
- }
- (*z).encodedTxnHeaders.BitmaskGenesisID = bitmask(zb0291)
- }
- case "grpbm":
- {
- var zb0293 []byte
- var zb0294 int
- zb0294, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGroup")
- return
- }
- if zb0294 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0294), uint64(maxBitmaskSize))
- return
- }
- zb0293, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskGroup))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGroup")
- return
- }
- (*z).encodedTxnHeaders.BitmaskGroup = bitmask(zb0293)
- }
- case "lx":
- var zb0295 int
- zb0295, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Lease")
- return
- }
- if zb0295 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0295), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxnHeaders.Lease, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxnHeaders.Lease)
- if err != nil {
- err = msgp.WrapError(err, "Lease")
- return
- }
- case "lxbm":
- {
- var zb0296 []byte
- var zb0297 int
- zb0297, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLease")
- return
- }
- if zb0297 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0297), uint64(maxBitmaskSize))
- return
- }
- zb0296, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskLease))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLease")
- return
- }
- (*z).encodedTxnHeaders.BitmaskLease = bitmask(zb0296)
- }
- case "rekey":
- var zb0298 int
- zb0298, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "RekeyTo")
- return
- }
- if zb0298 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0298), uint64(maxAddressBytes))
- return
- }
- (*z).encodedTxnHeaders.RekeyTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedTxnHeaders.RekeyTo)
- if err != nil {
- err = msgp.WrapError(err, "RekeyTo")
- return
- }
- case "rekeybm":
- {
- var zb0299 []byte
- var zb0300 int
- zb0300, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskRekeyTo")
- return
- }
- if zb0300 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0300), uint64(maxBitmaskSize))
- return
- }
- zb0299, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedTxnHeaders.BitmaskRekeyTo))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskRekeyTo")
- return
- }
- (*z).encodedTxnHeaders.BitmaskRekeyTo = bitmask(zb0299)
- }
- case "votekey":
- var zb0301 int
- zb0301, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "VotePK")
- return
- }
- if zb0301 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0301), uint64(maxAddressBytes))
- return
- }
- (*z).encodedKeyregTxnFields.VotePK, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedKeyregTxnFields.VotePK)
- if err != nil {
- err = msgp.WrapError(err, "VotePK")
- return
- }
- case "selkey":
- var zb0302 int
- zb0302, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "SelectionPK")
- return
- }
- if zb0302 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0302), uint64(maxAddressBytes))
- return
- }
- (*z).encodedKeyregTxnFields.SelectionPK, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedKeyregTxnFields.SelectionPK)
- if err != nil {
- err = msgp.WrapError(err, "SelectionPK")
- return
- }
- case "votefst":
- var zb0303 int
- var zb0304 bool
- zb0303, zb0304, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteFirst")
- return
- }
- if zb0303 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0303), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "VoteFirst")
- return
- }
- if zb0304 {
- (*z).encodedKeyregTxnFields.VoteFirst = nil
- } else if (*z).encodedKeyregTxnFields.VoteFirst != nil && cap((*z).encodedKeyregTxnFields.VoteFirst) >= zb0303 {
- (*z).encodedKeyregTxnFields.VoteFirst = ((*z).encodedKeyregTxnFields.VoteFirst)[:zb0303]
- } else {
- (*z).encodedKeyregTxnFields.VoteFirst = make([]basics.Round, zb0303)
- }
- for zb0005 := range (*z).encodedKeyregTxnFields.VoteFirst {
- bts, err = (*z).encodedKeyregTxnFields.VoteFirst[zb0005].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteFirst", zb0005)
- return
- }
- }
- case "votefstbm":
- {
- var zb0305 []byte
- var zb0306 int
- zb0306, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteFirst")
- return
- }
- if zb0306 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0306), uint64(maxBitmaskSize))
- return
- }
- zb0305, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedKeyregTxnFields.BitmaskVoteFirst))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteFirst")
- return
- }
- (*z).encodedKeyregTxnFields.BitmaskVoteFirst = bitmask(zb0305)
- }
- case "votelst":
- var zb0307 int
- var zb0308 bool
- zb0307, zb0308, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteLast")
- return
- }
- if zb0307 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0307), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "VoteLast")
- return
- }
- if zb0308 {
- (*z).encodedKeyregTxnFields.VoteLast = nil
- } else if (*z).encodedKeyregTxnFields.VoteLast != nil && cap((*z).encodedKeyregTxnFields.VoteLast) >= zb0307 {
- (*z).encodedKeyregTxnFields.VoteLast = ((*z).encodedKeyregTxnFields.VoteLast)[:zb0307]
- } else {
- (*z).encodedKeyregTxnFields.VoteLast = make([]basics.Round, zb0307)
- }
- for zb0006 := range (*z).encodedKeyregTxnFields.VoteLast {
- bts, err = (*z).encodedKeyregTxnFields.VoteLast[zb0006].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteLast", zb0006)
- return
- }
- }
- case "votelstbm":
- {
- var zb0309 []byte
- var zb0310 int
- zb0310, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteLast")
- return
- }
- if zb0310 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0310), uint64(maxBitmaskSize))
- return
- }
- zb0309, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedKeyregTxnFields.BitmaskVoteLast))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteLast")
- return
- }
- (*z).encodedKeyregTxnFields.BitmaskVoteLast = bitmask(zb0309)
- }
- case "votekd":
- var zb0311 int
- var zb0312 bool
- zb0311, zb0312, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteKeyDilution")
- return
- }
- if zb0311 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0311), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "VoteKeyDilution")
- return
- }
- if zb0312 {
- (*z).encodedKeyregTxnFields.VoteKeyDilution = nil
- } else if (*z).encodedKeyregTxnFields.VoteKeyDilution != nil && cap((*z).encodedKeyregTxnFields.VoteKeyDilution) >= zb0311 {
- (*z).encodedKeyregTxnFields.VoteKeyDilution = ((*z).encodedKeyregTxnFields.VoteKeyDilution)[:zb0311]
- } else {
- (*z).encodedKeyregTxnFields.VoteKeyDilution = make([]uint64, zb0311)
- }
- for zb0007 := range (*z).encodedKeyregTxnFields.VoteKeyDilution {
- (*z).encodedKeyregTxnFields.VoteKeyDilution[zb0007], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteKeyDilution", zb0007)
- return
- }
- }
- case "votekbm":
- {
- var zb0313 []byte
- var zb0314 int
- zb0314, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskKeys")
- return
- }
- if zb0314 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0314), uint64(maxBitmaskSize))
- return
- }
- zb0313, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedKeyregTxnFields.BitmaskKeys))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskKeys")
- return
- }
- (*z).encodedKeyregTxnFields.BitmaskKeys = bitmask(zb0313)
- }
- case "nonpartbm":
- {
- var zb0315 []byte
- var zb0316 int
- zb0316, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNonparticipation")
- return
- }
- if zb0316 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0316), uint64(maxBitmaskSize))
- return
- }
- zb0315, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedKeyregTxnFields.BitmaskNonparticipation))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNonparticipation")
- return
- }
- (*z).encodedKeyregTxnFields.BitmaskNonparticipation = bitmask(zb0315)
- }
- case "rcv":
- var zb0317 int
- zb0317, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Receiver")
- return
- }
- if zb0317 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0317), uint64(maxAddressBytes))
- return
- }
- (*z).encodedPaymentTxnFields.Receiver, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedPaymentTxnFields.Receiver)
- if err != nil {
- err = msgp.WrapError(err, "Receiver")
- return
- }
- case "rcvbm":
- {
- var zb0318 []byte
- var zb0319 int
- zb0319, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReceiver")
- return
- }
- if zb0319 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0319), uint64(maxBitmaskSize))
- return
- }
- zb0318, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedPaymentTxnFields.BitmaskReceiver))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReceiver")
- return
- }
- (*z).encodedPaymentTxnFields.BitmaskReceiver = bitmask(zb0318)
- }
- case "amt":
- var zb0320 int
- var zb0321 bool
- zb0320, zb0321, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Amount")
- return
- }
- if zb0320 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0320), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Amount")
- return
- }
- if zb0321 {
- (*z).encodedPaymentTxnFields.Amount = nil
- } else if (*z).encodedPaymentTxnFields.Amount != nil && cap((*z).encodedPaymentTxnFields.Amount) >= zb0320 {
- (*z).encodedPaymentTxnFields.Amount = ((*z).encodedPaymentTxnFields.Amount)[:zb0320]
- } else {
- (*z).encodedPaymentTxnFields.Amount = make([]basics.MicroAlgos, zb0320)
- }
- for zb0008 := range (*z).encodedPaymentTxnFields.Amount {
- bts, err = (*z).encodedPaymentTxnFields.Amount[zb0008].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Amount", zb0008)
- return
- }
- }
- case "amtbm":
- {
- var zb0322 []byte
- var zb0323 int
- zb0323, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAmount")
- return
- }
- if zb0323 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0323), uint64(maxBitmaskSize))
- return
- }
- zb0322, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedPaymentTxnFields.BitmaskAmount))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAmount")
- return
- }
- (*z).encodedPaymentTxnFields.BitmaskAmount = bitmask(zb0322)
- }
- case "close":
- var zb0324 int
- zb0324, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "CloseRemainderTo")
- return
- }
- if zb0324 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0324), uint64(maxAddressBytes))
- return
- }
- (*z).encodedPaymentTxnFields.CloseRemainderTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedPaymentTxnFields.CloseRemainderTo)
- if err != nil {
- err = msgp.WrapError(err, "CloseRemainderTo")
- return
- }
- case "closebm":
- {
- var zb0325 []byte
- var zb0326 int
- zb0326, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCloseRemainderTo")
- return
- }
- if zb0326 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0326), uint64(maxBitmaskSize))
- return
- }
- zb0325, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedPaymentTxnFields.BitmaskCloseRemainderTo))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCloseRemainderTo")
- return
- }
- (*z).encodedPaymentTxnFields.BitmaskCloseRemainderTo = bitmask(zb0325)
- }
- case "caid":
- var zb0327 int
- var zb0328 bool
- zb0327, zb0328, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ConfigAsset")
- return
- }
- if zb0327 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0327), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ConfigAsset")
- return
- }
- if zb0328 {
- (*z).encodedAssetConfigTxnFields.ConfigAsset = nil
- } else if (*z).encodedAssetConfigTxnFields.ConfigAsset != nil && cap((*z).encodedAssetConfigTxnFields.ConfigAsset) >= zb0327 {
- (*z).encodedAssetConfigTxnFields.ConfigAsset = ((*z).encodedAssetConfigTxnFields.ConfigAsset)[:zb0327]
- } else {
- (*z).encodedAssetConfigTxnFields.ConfigAsset = make([]basics.AssetIndex, zb0327)
- }
- for zb0009 := range (*z).encodedAssetConfigTxnFields.ConfigAsset {
- bts, err = (*z).encodedAssetConfigTxnFields.ConfigAsset[zb0009].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ConfigAsset", zb0009)
- return
- }
- }
- case "caidbm":
- {
- var zb0329 []byte
- var zb0330 int
- zb0330, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskConfigAsset")
- return
- }
- if zb0330 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0330), uint64(maxBitmaskSize))
- return
- }
- zb0329, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.BitmaskConfigAsset))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskConfigAsset")
- return
- }
- (*z).encodedAssetConfigTxnFields.BitmaskConfigAsset = bitmask(zb0329)
- }
- case "t":
- var zb0331 int
- var zb0332 bool
- zb0331, zb0332, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Total")
- return
- }
- if zb0331 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0331), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Total")
- return
- }
- if zb0332 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total = nil
- } else if (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total != nil && cap((*z).encodedAssetConfigTxnFields.encodedAssetParams.Total) >= zb0331 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total = ((*z).encodedAssetConfigTxnFields.encodedAssetParams.Total)[:zb0331]
- } else {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total = make([]uint64, zb0331)
- }
- for zb0010 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Total[zb0010], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Total", zb0010)
- return
- }
- }
- case "tbm":
- {
- var zb0333 []byte
- var zb0334 int
- zb0334, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTotal")
- return
- }
- if zb0334 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0334), uint64(maxBitmaskSize))
- return
- }
- zb0333, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTotal")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal = bitmask(zb0333)
- }
- case "dc":
- var zb0335 int
- var zb0336 bool
- zb0335, zb0336, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Decimals")
- return
- }
- if zb0335 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0335), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Decimals")
- return
- }
- if zb0336 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals = nil
- } else if (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals != nil && cap((*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals) >= zb0335 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals = ((*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals)[:zb0335]
- } else {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals = make([]uint32, zb0335)
- }
- for zb0011 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals[zb0011], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Decimals", zb0011)
- return
- }
- }
- case "dcbm":
- {
- var zb0337 []byte
- var zb0338 int
- zb0338, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDecimals")
- return
- }
- if zb0338 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0338), uint64(maxBitmaskSize))
- return
- }
- zb0337, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDecimals")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals = bitmask(zb0337)
- }
- case "dfbm":
- {
- var zb0339 []byte
- var zb0340 int
- zb0340, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDefaultFrozen")
- return
- }
- if zb0340 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0340), uint64(maxBitmaskSize))
- return
- }
- zb0339, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDefaultFrozen")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen = bitmask(zb0339)
- }
- case "un":
- var zb0341 int
- var zb0342 bool
- zb0341, zb0342, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UnitName")
- return
- }
- if zb0341 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0341), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "UnitName")
- return
- }
- if zb0342 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName = nil
- } else if (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName != nil && cap((*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName) >= zb0341 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName = ((*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName)[:zb0341]
- } else {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName = make([]string, zb0341)
- }
- for zb0012 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName[zb0012], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UnitName", zb0012)
- return
- }
- }
- case "unbm":
- {
- var zb0343 []byte
- var zb0344 int
- zb0344, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskUnitName")
- return
- }
- if zb0344 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0344), uint64(maxBitmaskSize))
- return
- }
- zb0343, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskUnitName")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName = bitmask(zb0343)
- }
- case "an":
- var zb0345 int
- var zb0346 bool
- zb0345, zb0346, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetName")
- return
- }
- if zb0345 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0345), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "AssetName")
- return
- }
- if zb0346 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName = nil
- } else if (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName != nil && cap((*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName) >= zb0345 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName = ((*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName)[:zb0345]
- } else {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName = make([]string, zb0345)
- }
- for zb0013 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName[zb0013], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetName", zb0013)
- return
- }
- }
- case "anbm":
- {
- var zb0347 []byte
- var zb0348 int
- zb0348, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetName")
- return
- }
- if zb0348 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0348), uint64(maxBitmaskSize))
- return
- }
- zb0347, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetName")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName = bitmask(zb0347)
- }
- case "au":
- var zb0349 int
- var zb0350 bool
- zb0349, zb0350, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "URL")
- return
- }
- if zb0349 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0349), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "URL")
- return
- }
- if zb0350 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL = nil
- } else if (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL != nil && cap((*z).encodedAssetConfigTxnFields.encodedAssetParams.URL) >= zb0349 {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL = ((*z).encodedAssetConfigTxnFields.encodedAssetParams.URL)[:zb0349]
- } else {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL = make([]string, zb0349)
- }
- for zb0014 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL {
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL[zb0014], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "URL", zb0014)
- return
- }
- }
- case "aubm":
- {
- var zb0351 []byte
- var zb0352 int
- zb0352, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskURL")
- return
- }
- if zb0352 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0352), uint64(maxBitmaskSize))
- return
- }
- zb0351, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskURL")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL = bitmask(zb0351)
- }
- case "am":
- var zb0353 int
- zb0353, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "MetadataHash")
- return
- }
- if zb0353 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0353), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash)
- if err != nil {
- err = msgp.WrapError(err, "MetadataHash")
- return
- }
- case "ambm":
- {
- var zb0354 []byte
- var zb0355 int
- zb0355, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskMetadataHash")
- return
- }
- if zb0355 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0355), uint64(maxBitmaskSize))
- return
- }
- zb0354, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskMetadataHash")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash = bitmask(zb0354)
- }
- case "m":
- var zb0356 int
- zb0356, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Manager")
- return
- }
- if zb0356 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0356), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Manager, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetConfigTxnFields.encodedAssetParams.Manager)
- if err != nil {
- err = msgp.WrapError(err, "Manager")
- return
- }
- case "mbm":
- {
- var zb0357 []byte
- var zb0358 int
- zb0358, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskManager")
- return
- }
- if zb0358 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0358), uint64(maxBitmaskSize))
- return
- }
- zb0357, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskManager")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager = bitmask(zb0357)
- }
- case "r":
- var zb0359 int
- zb0359, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reserve")
- return
- }
- if zb0359 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0359), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Reserve, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetConfigTxnFields.encodedAssetParams.Reserve)
- if err != nil {
- err = msgp.WrapError(err, "Reserve")
- return
- }
- case "rbm":
- {
- var zb0360 []byte
- var zb0361 int
- zb0361, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReserve")
- return
- }
- if zb0361 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0361), uint64(maxBitmaskSize))
- return
- }
- zb0360, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReserve")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve = bitmask(zb0360)
- }
- case "f":
- var zb0362 int
- zb0362, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Freeze")
- return
- }
- if zb0362 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0362), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Freeze, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetConfigTxnFields.encodedAssetParams.Freeze)
- if err != nil {
- err = msgp.WrapError(err, "Freeze")
- return
- }
- case "fbm":
- {
- var zb0363 []byte
- var zb0364 int
- zb0364, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreeze")
- return
- }
- if zb0364 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0364), uint64(maxBitmaskSize))
- return
- }
- zb0363, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreeze")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze = bitmask(zb0363)
- }
- case "c":
- var zb0365 int
- zb0365, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Clawback")
- return
- }
- if zb0365 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0365), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.Clawback, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetConfigTxnFields.encodedAssetParams.Clawback)
- if err != nil {
- err = msgp.WrapError(err, "Clawback")
- return
- }
- case "cbm":
- {
- var zb0366 []byte
- var zb0367 int
- zb0367, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClawback")
- return
- }
- if zb0367 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0367), uint64(maxBitmaskSize))
- return
- }
- zb0366, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClawback")
- return
- }
- (*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback = bitmask(zb0366)
- }
- case "xaid":
- var zb0368 int
- var zb0369 bool
- zb0368, zb0369, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "XferAsset")
- return
- }
- if zb0368 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0368), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "XferAsset")
- return
- }
- if zb0369 {
- (*z).encodedAssetTransferTxnFields.XferAsset = nil
- } else if (*z).encodedAssetTransferTxnFields.XferAsset != nil && cap((*z).encodedAssetTransferTxnFields.XferAsset) >= zb0368 {
- (*z).encodedAssetTransferTxnFields.XferAsset = ((*z).encodedAssetTransferTxnFields.XferAsset)[:zb0368]
- } else {
- (*z).encodedAssetTransferTxnFields.XferAsset = make([]basics.AssetIndex, zb0368)
- }
- for zb0015 := range (*z).encodedAssetTransferTxnFields.XferAsset {
- bts, err = (*z).encodedAssetTransferTxnFields.XferAsset[zb0015].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "XferAsset", zb0015)
- return
- }
- }
- case "xaidbm":
- {
- var zb0370 []byte
- var zb0371 int
- zb0371, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskXferAsset")
- return
- }
- if zb0371 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0371), uint64(maxBitmaskSize))
- return
- }
- zb0370, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetTransferTxnFields.BitmaskXferAsset))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskXferAsset")
- return
- }
- (*z).encodedAssetTransferTxnFields.BitmaskXferAsset = bitmask(zb0370)
- }
- case "aamt":
- var zb0372 int
- var zb0373 bool
- zb0372, zb0373, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetAmount")
- return
- }
- if zb0372 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0372), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "AssetAmount")
- return
- }
- if zb0373 {
- (*z).encodedAssetTransferTxnFields.AssetAmount = nil
- } else if (*z).encodedAssetTransferTxnFields.AssetAmount != nil && cap((*z).encodedAssetTransferTxnFields.AssetAmount) >= zb0372 {
- (*z).encodedAssetTransferTxnFields.AssetAmount = ((*z).encodedAssetTransferTxnFields.AssetAmount)[:zb0372]
- } else {
- (*z).encodedAssetTransferTxnFields.AssetAmount = make([]uint64, zb0372)
- }
- for zb0016 := range (*z).encodedAssetTransferTxnFields.AssetAmount {
- (*z).encodedAssetTransferTxnFields.AssetAmount[zb0016], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetAmount", zb0016)
- return
- }
- }
- case "aamtbm":
- {
- var zb0374 []byte
- var zb0375 int
- zb0375, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetAmount")
- return
- }
- if zb0375 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0375), uint64(maxBitmaskSize))
- return
- }
- zb0374, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetTransferTxnFields.BitmaskAssetAmount))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetAmount")
- return
- }
- (*z).encodedAssetTransferTxnFields.BitmaskAssetAmount = bitmask(zb0374)
- }
- case "asnd":
- var zb0376 int
- zb0376, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetSender")
- return
- }
- if zb0376 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0376), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetTransferTxnFields.AssetSender, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetTransferTxnFields.AssetSender)
- if err != nil {
- err = msgp.WrapError(err, "AssetSender")
- return
- }
- case "asndbm":
- {
- var zb0377 []byte
- var zb0378 int
- zb0378, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetSender")
- return
- }
- if zb0378 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0378), uint64(maxBitmaskSize))
- return
- }
- zb0377, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetTransferTxnFields.BitmaskAssetSender))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetSender")
- return
- }
- (*z).encodedAssetTransferTxnFields.BitmaskAssetSender = bitmask(zb0377)
- }
- case "arcv":
- var zb0379 int
- zb0379, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetReceiver")
- return
- }
- if zb0379 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0379), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetTransferTxnFields.AssetReceiver, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetTransferTxnFields.AssetReceiver)
- if err != nil {
- err = msgp.WrapError(err, "AssetReceiver")
- return
- }
- case "arcvbm":
- {
- var zb0380 []byte
- var zb0381 int
- zb0381, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetReceiver")
- return
- }
- if zb0381 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0381), uint64(maxBitmaskSize))
- return
- }
- zb0380, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetTransferTxnFields.BitmaskAssetReceiver))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetReceiver")
- return
- }
- (*z).encodedAssetTransferTxnFields.BitmaskAssetReceiver = bitmask(zb0380)
- }
- case "aclose":
- var zb0382 int
- zb0382, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetCloseTo")
- return
- }
- if zb0382 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0382), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetTransferTxnFields.AssetCloseTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetTransferTxnFields.AssetCloseTo)
- if err != nil {
- err = msgp.WrapError(err, "AssetCloseTo")
- return
- }
- case "aclosebm":
- {
- var zb0383 []byte
- var zb0384 int
- zb0384, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetCloseTo")
- return
- }
- if zb0384 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0384), uint64(maxBitmaskSize))
- return
- }
- zb0383, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetTransferTxnFields.BitmaskAssetCloseTo))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetCloseTo")
- return
- }
- (*z).encodedAssetTransferTxnFields.BitmaskAssetCloseTo = bitmask(zb0383)
- }
- case "fadd":
- var zb0385 int
- zb0385, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAccount")
- return
- }
- if zb0385 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0385), uint64(maxAddressBytes))
- return
- }
- (*z).encodedAssetFreezeTxnFields.FreezeAccount, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedAssetFreezeTxnFields.FreezeAccount)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAccount")
- return
- }
- case "faddbm":
- {
- var zb0386 []byte
- var zb0387 int
- zb0387, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAccount")
- return
- }
- if zb0387 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0387), uint64(maxBitmaskSize))
- return
- }
- zb0386, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetFreezeTxnFields.BitmaskFreezeAccount))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAccount")
- return
- }
- (*z).encodedAssetFreezeTxnFields.BitmaskFreezeAccount = bitmask(zb0386)
- }
- case "faid":
- var zb0388 int
- var zb0389 bool
- zb0388, zb0389, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAsset")
- return
- }
- if zb0388 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0388), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "FreezeAsset")
- return
- }
- if zb0389 {
- (*z).encodedAssetFreezeTxnFields.FreezeAsset = nil
- } else if (*z).encodedAssetFreezeTxnFields.FreezeAsset != nil && cap((*z).encodedAssetFreezeTxnFields.FreezeAsset) >= zb0388 {
- (*z).encodedAssetFreezeTxnFields.FreezeAsset = ((*z).encodedAssetFreezeTxnFields.FreezeAsset)[:zb0388]
- } else {
- (*z).encodedAssetFreezeTxnFields.FreezeAsset = make([]basics.AssetIndex, zb0388)
- }
- for zb0017 := range (*z).encodedAssetFreezeTxnFields.FreezeAsset {
- bts, err = (*z).encodedAssetFreezeTxnFields.FreezeAsset[zb0017].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAsset", zb0017)
- return
- }
- }
- case "faidbm":
- {
- var zb0390 []byte
- var zb0391 int
- zb0391, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAsset")
- return
- }
- if zb0391 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0391), uint64(maxBitmaskSize))
- return
- }
- zb0390, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetFreezeTxnFields.BitmaskFreezeAsset))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAsset")
- return
- }
- (*z).encodedAssetFreezeTxnFields.BitmaskFreezeAsset = bitmask(zb0390)
- }
- case "afrzbm":
- {
- var zb0392 []byte
- var zb0393 int
- zb0393, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetFrozen")
- return
- }
- if zb0393 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0393), uint64(maxBitmaskSize))
- return
- }
- zb0392, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedAssetFreezeTxnFields.BitmaskAssetFrozen))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetFrozen")
- return
- }
- (*z).encodedAssetFreezeTxnFields.BitmaskAssetFrozen = bitmask(zb0392)
- }
- case "apid":
- var zb0394 int
- var zb0395 bool
- zb0394, zb0395, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationID")
- return
- }
- if zb0394 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0394), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ApplicationID")
- return
- }
- if zb0395 {
- (*z).encodedApplicationCallTxnFields.ApplicationID = nil
- } else if (*z).encodedApplicationCallTxnFields.ApplicationID != nil && cap((*z).encodedApplicationCallTxnFields.ApplicationID) >= zb0394 {
- (*z).encodedApplicationCallTxnFields.ApplicationID = ((*z).encodedApplicationCallTxnFields.ApplicationID)[:zb0394]
- } else {
- (*z).encodedApplicationCallTxnFields.ApplicationID = make([]basics.AppIndex, zb0394)
- }
- for zb0018 := range (*z).encodedApplicationCallTxnFields.ApplicationID {
- bts, err = (*z).encodedApplicationCallTxnFields.ApplicationID[zb0018].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationID", zb0018)
- return
- }
- }
- case "apidbm":
- {
- var zb0396 []byte
- var zb0397 int
- zb0397, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationID")
- return
- }
- if zb0397 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0397), uint64(maxBitmaskSize))
- return
- }
- zb0396, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskApplicationID))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationID")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskApplicationID = bitmask(zb0396)
- }
- case "apan":
- var zb0398 int
- zb0398, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "OnCompletion")
- return
- }
- if zb0398 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0398), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedApplicationCallTxnFields.OnCompletion, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedApplicationCallTxnFields.OnCompletion)
- if err != nil {
- err = msgp.WrapError(err, "OnCompletion")
- return
- }
- case "apanbm":
- {
- var zb0399 []byte
- var zb0400 int
- zb0400, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskOnCompletion")
- return
- }
- if zb0400 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0400), uint64(maxBitmaskSize))
- return
- }
- zb0399, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskOnCompletion))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskOnCompletion")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskOnCompletion = bitmask(zb0399)
- }
- case "apaa":
- var zb0401 int
- var zb0402 bool
- zb0401, zb0402, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationArgs")
- return
- }
- if zb0401 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0401), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ApplicationArgs")
- return
- }
- if zb0402 {
- (*z).encodedApplicationCallTxnFields.ApplicationArgs = nil
- } else if (*z).encodedApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).encodedApplicationCallTxnFields.ApplicationArgs) >= zb0401 {
- (*z).encodedApplicationCallTxnFields.ApplicationArgs = ((*z).encodedApplicationCallTxnFields.ApplicationArgs)[:zb0401]
- } else {
- (*z).encodedApplicationCallTxnFields.ApplicationArgs = make([]applicationArgs, zb0401)
- }
- for zb0019 := range (*z).encodedApplicationCallTxnFields.ApplicationArgs {
- var zb0403 int
- var zb0404 bool
- zb0403, zb0404, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationArgs", zb0019)
- return
- }
- if zb0403 > transactions.EncodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0403), uint64(transactions.EncodedMaxApplicationArgs))
- err = msgp.WrapError(err, "ApplicationArgs", zb0019)
- return
- }
- if zb0404 {
- (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019] = nil
- } else if (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019] != nil && cap((*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019]) >= zb0403 {
- (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019] = ((*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019])[:zb0403]
- } else {
- (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019] = make(applicationArgs, zb0403)
- }
- for zb0020 := range (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019] {
- (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019][zb0020], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019][zb0020])
- if err != nil {
- err = msgp.WrapError(err, "ApplicationArgs", zb0019, zb0020)
- return
- }
- }
- }
- case "apaabm":
- {
- var zb0405 []byte
- var zb0406 int
- zb0406, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationArgs")
- return
- }
- if zb0406 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0406), uint64(maxBitmaskSize))
- return
- }
- zb0405, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskApplicationArgs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationArgs")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskApplicationArgs = bitmask(zb0405)
- }
- case "apat":
- var zb0407 int
- var zb0408 bool
- zb0407, zb0408, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Accounts")
- return
- }
- if zb0407 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0407), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Accounts")
- return
- }
- if zb0408 {
- (*z).encodedApplicationCallTxnFields.Accounts = nil
- } else if (*z).encodedApplicationCallTxnFields.Accounts != nil && cap((*z).encodedApplicationCallTxnFields.Accounts) >= zb0407 {
- (*z).encodedApplicationCallTxnFields.Accounts = ((*z).encodedApplicationCallTxnFields.Accounts)[:zb0407]
- } else {
- (*z).encodedApplicationCallTxnFields.Accounts = make([]addresses, zb0407)
- }
- for zb0021 := range (*z).encodedApplicationCallTxnFields.Accounts {
- var zb0409 int
- var zb0410 bool
- zb0409, zb0410, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Accounts", zb0021)
- return
- }
- if zb0409 > transactions.EncodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0409), uint64(transactions.EncodedMaxAccounts))
- err = msgp.WrapError(err, "Accounts", zb0021)
- return
- }
- if zb0410 {
- (*z).encodedApplicationCallTxnFields.Accounts[zb0021] = nil
- } else if (*z).encodedApplicationCallTxnFields.Accounts[zb0021] != nil && cap((*z).encodedApplicationCallTxnFields.Accounts[zb0021]) >= zb0409 {
- (*z).encodedApplicationCallTxnFields.Accounts[zb0021] = ((*z).encodedApplicationCallTxnFields.Accounts[zb0021])[:zb0409]
- } else {
- (*z).encodedApplicationCallTxnFields.Accounts[zb0021] = make(addresses, zb0409)
- }
- for zb0022 := range (*z).encodedApplicationCallTxnFields.Accounts[zb0021] {
- bts, err = (*z).encodedApplicationCallTxnFields.Accounts[zb0021][zb0022].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Accounts", zb0021, zb0022)
- return
- }
- }
- }
- case "apatbm":
- {
- var zb0411 []byte
- var zb0412 int
- zb0412, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAccounts")
- return
- }
- if zb0412 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0412), uint64(maxBitmaskSize))
- return
- }
- zb0411, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskAccounts))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAccounts")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskAccounts = bitmask(zb0411)
- }
- case "apfa":
- var zb0413 int
- var zb0414 bool
- zb0413, zb0414, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignApps")
- return
- }
- if zb0413 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0413), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ForeignApps")
- return
- }
- if zb0414 {
- (*z).encodedApplicationCallTxnFields.ForeignApps = nil
- } else if (*z).encodedApplicationCallTxnFields.ForeignApps != nil && cap((*z).encodedApplicationCallTxnFields.ForeignApps) >= zb0413 {
- (*z).encodedApplicationCallTxnFields.ForeignApps = ((*z).encodedApplicationCallTxnFields.ForeignApps)[:zb0413]
- } else {
- (*z).encodedApplicationCallTxnFields.ForeignApps = make([]appIndices, zb0413)
- }
- for zb0023 := range (*z).encodedApplicationCallTxnFields.ForeignApps {
- var zb0415 int
- var zb0416 bool
- zb0415, zb0416, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignApps", zb0023)
- return
- }
- if zb0415 > transactions.EncodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0415), uint64(transactions.EncodedMaxForeignApps))
- err = msgp.WrapError(err, "ForeignApps", zb0023)
- return
- }
- if zb0416 {
- (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023] = nil
- } else if (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023] != nil && cap((*z).encodedApplicationCallTxnFields.ForeignApps[zb0023]) >= zb0415 {
- (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023] = ((*z).encodedApplicationCallTxnFields.ForeignApps[zb0023])[:zb0415]
- } else {
- (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023] = make(appIndices, zb0415)
- }
- for zb0024 := range (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023] {
- bts, err = (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023][zb0024].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignApps", zb0023, zb0024)
- return
- }
- }
- }
- case "apfabm":
- {
- var zb0417 []byte
- var zb0418 int
- zb0418, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignApps")
- return
- }
- if zb0418 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0418), uint64(maxBitmaskSize))
- return
- }
- zb0417, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskForeignApps))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignApps")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskForeignApps = bitmask(zb0417)
- }
- case "apas":
- var zb0419 int
- var zb0420 bool
- zb0419, zb0420, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignAssets")
- return
- }
- if zb0419 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0419), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ForeignAssets")
- return
- }
- if zb0420 {
- (*z).encodedApplicationCallTxnFields.ForeignAssets = nil
- } else if (*z).encodedApplicationCallTxnFields.ForeignAssets != nil && cap((*z).encodedApplicationCallTxnFields.ForeignAssets) >= zb0419 {
- (*z).encodedApplicationCallTxnFields.ForeignAssets = ((*z).encodedApplicationCallTxnFields.ForeignAssets)[:zb0419]
- } else {
- (*z).encodedApplicationCallTxnFields.ForeignAssets = make([]assetIndices, zb0419)
- }
- for zb0025 := range (*z).encodedApplicationCallTxnFields.ForeignAssets {
- var zb0421 int
- var zb0422 bool
- zb0421, zb0422, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignAssets", zb0025)
- return
- }
- if zb0421 > transactions.EncodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0421), uint64(transactions.EncodedMaxForeignAssets))
- err = msgp.WrapError(err, "ForeignAssets", zb0025)
- return
- }
- if zb0422 {
- (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025] = nil
- } else if (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025] != nil && cap((*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025]) >= zb0421 {
- (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025] = ((*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025])[:zb0421]
- } else {
- (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025] = make(assetIndices, zb0421)
- }
- for zb0026 := range (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025] {
- bts, err = (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025][zb0026].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignAssets", zb0025, zb0026)
- return
- }
- }
- }
- case "apasbm":
- {
- var zb0423 []byte
- var zb0424 int
- zb0424, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignAssets")
- return
- }
- if zb0424 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0424), uint64(maxBitmaskSize))
- return
- }
- zb0423, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskForeignAssets))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignAssets")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskForeignAssets = bitmask(zb0423)
- }
- case "lnui":
- var zb0425 int
- var zb0426 bool
- zb0425, zb0426, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumUint")
- return
- }
- if zb0425 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0425), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LocalNumUint")
- return
- }
- if zb0426 {
- (*z).encodedApplicationCallTxnFields.LocalNumUint = nil
- } else if (*z).encodedApplicationCallTxnFields.LocalNumUint != nil && cap((*z).encodedApplicationCallTxnFields.LocalNumUint) >= zb0425 {
- (*z).encodedApplicationCallTxnFields.LocalNumUint = ((*z).encodedApplicationCallTxnFields.LocalNumUint)[:zb0425]
- } else {
- (*z).encodedApplicationCallTxnFields.LocalNumUint = make([]uint64, zb0425)
- }
- for zb0027 := range (*z).encodedApplicationCallTxnFields.LocalNumUint {
- (*z).encodedApplicationCallTxnFields.LocalNumUint[zb0027], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumUint", zb0027)
- return
- }
- }
- case "lnuibm":
- {
- var zb0427 []byte
- var zb0428 int
- zb0428, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumUint")
- return
- }
- if zb0428 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0428), uint64(maxBitmaskSize))
- return
- }
- zb0427, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskLocalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumUint")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskLocalNumUint = bitmask(zb0427)
- }
- case "lnbs":
- var zb0429 int
- var zb0430 bool
- zb0429, zb0430, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumByteSlice")
- return
- }
- if zb0429 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0429), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LocalNumByteSlice")
- return
- }
- if zb0430 {
- (*z).encodedApplicationCallTxnFields.LocalNumByteSlice = nil
- } else if (*z).encodedApplicationCallTxnFields.LocalNumByteSlice != nil && cap((*z).encodedApplicationCallTxnFields.LocalNumByteSlice) >= zb0429 {
- (*z).encodedApplicationCallTxnFields.LocalNumByteSlice = ((*z).encodedApplicationCallTxnFields.LocalNumByteSlice)[:zb0429]
- } else {
- (*z).encodedApplicationCallTxnFields.LocalNumByteSlice = make([]uint64, zb0429)
- }
- for zb0028 := range (*z).encodedApplicationCallTxnFields.LocalNumByteSlice {
- (*z).encodedApplicationCallTxnFields.LocalNumByteSlice[zb0028], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumByteSlice", zb0028)
- return
- }
- }
- case "lnbsbm":
- {
- var zb0431 []byte
- var zb0432 int
- zb0432, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumByteSlice")
- return
- }
- if zb0432 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0432), uint64(maxBitmaskSize))
- return
- }
- zb0431, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumByteSlice")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice = bitmask(zb0431)
- }
- case "gnui":
- var zb0433 int
- var zb0434 bool
- zb0433, zb0434, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumUint")
- return
- }
- if zb0433 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0433), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "GlobalNumUint")
- return
- }
- if zb0434 {
- (*z).encodedApplicationCallTxnFields.GlobalNumUint = nil
- } else if (*z).encodedApplicationCallTxnFields.GlobalNumUint != nil && cap((*z).encodedApplicationCallTxnFields.GlobalNumUint) >= zb0433 {
- (*z).encodedApplicationCallTxnFields.GlobalNumUint = ((*z).encodedApplicationCallTxnFields.GlobalNumUint)[:zb0433]
- } else {
- (*z).encodedApplicationCallTxnFields.GlobalNumUint = make([]uint64, zb0433)
- }
- for zb0029 := range (*z).encodedApplicationCallTxnFields.GlobalNumUint {
- (*z).encodedApplicationCallTxnFields.GlobalNumUint[zb0029], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumUint", zb0029)
- return
- }
- }
- case "gnuibm":
- {
- var zb0435 []byte
- var zb0436 int
- zb0436, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumUint")
- return
- }
- if zb0436 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0436), uint64(maxBitmaskSize))
- return
- }
- zb0435, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskGlobalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumUint")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskGlobalNumUint = bitmask(zb0435)
- }
- case "gnbs":
- var zb0437 int
- var zb0438 bool
- zb0437, zb0438, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumByteSlice")
- return
- }
- if zb0437 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0437), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "GlobalNumByteSlice")
- return
- }
- if zb0438 {
- (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice = nil
- } else if (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice != nil && cap((*z).encodedApplicationCallTxnFields.GlobalNumByteSlice) >= zb0437 {
- (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice = ((*z).encodedApplicationCallTxnFields.GlobalNumByteSlice)[:zb0437]
- } else {
- (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice = make([]uint64, zb0437)
- }
- for zb0030 := range (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice {
- (*z).encodedApplicationCallTxnFields.GlobalNumByteSlice[zb0030], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumByteSlice", zb0030)
- return
- }
- }
- case "gnbsbm":
- {
- var zb0439 []byte
- var zb0440 int
- zb0440, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumByteSlice")
- return
- }
- if zb0440 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0440), uint64(maxBitmaskSize))
- return
- }
- zb0439, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumByteSlice")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice = bitmask(zb0439)
- }
- case "apap":
- var zb0441 int
- var zb0442 bool
- zb0441, zb0442, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApprovalProgram")
- return
- }
- if zb0441 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0441), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ApprovalProgram")
- return
- }
- if zb0442 {
- (*z).encodedApplicationCallTxnFields.ApprovalProgram = nil
- } else if (*z).encodedApplicationCallTxnFields.ApprovalProgram != nil && cap((*z).encodedApplicationCallTxnFields.ApprovalProgram) >= zb0441 {
- (*z).encodedApplicationCallTxnFields.ApprovalProgram = ((*z).encodedApplicationCallTxnFields.ApprovalProgram)[:zb0441]
- } else {
- (*z).encodedApplicationCallTxnFields.ApprovalProgram = make([]program, zb0441)
- }
- for zb0031 := range (*z).encodedApplicationCallTxnFields.ApprovalProgram {
- {
- var zb0443 []byte
- var zb0444 int
- zb0444, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApprovalProgram", zb0031)
- return
- }
- if zb0444 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0444), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0443, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.ApprovalProgram[zb0031]))
- if err != nil {
- err = msgp.WrapError(err, "ApprovalProgram", zb0031)
- return
- }
- (*z).encodedApplicationCallTxnFields.ApprovalProgram[zb0031] = program(zb0443)
- }
- }
- case "apapbm":
- {
- var zb0445 []byte
- var zb0446 int
- zb0446, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApprovalProgram")
- return
- }
- if zb0446 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0446), uint64(maxBitmaskSize))
- return
- }
- zb0445, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskApprovalProgram))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApprovalProgram")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskApprovalProgram = bitmask(zb0445)
- }
- case "apsu":
- var zb0447 int
- var zb0448 bool
- zb0447, zb0448, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ClearStateProgram")
- return
- }
- if zb0447 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0447), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ClearStateProgram")
- return
- }
- if zb0448 {
- (*z).encodedApplicationCallTxnFields.ClearStateProgram = nil
- } else if (*z).encodedApplicationCallTxnFields.ClearStateProgram != nil && cap((*z).encodedApplicationCallTxnFields.ClearStateProgram) >= zb0447 {
- (*z).encodedApplicationCallTxnFields.ClearStateProgram = ((*z).encodedApplicationCallTxnFields.ClearStateProgram)[:zb0447]
- } else {
- (*z).encodedApplicationCallTxnFields.ClearStateProgram = make([]program, zb0447)
- }
- for zb0032 := range (*z).encodedApplicationCallTxnFields.ClearStateProgram {
- {
- var zb0449 []byte
- var zb0450 int
- zb0450, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "ClearStateProgram", zb0032)
- return
- }
- if zb0450 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0450), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0449, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.ClearStateProgram[zb0032]))
- if err != nil {
- err = msgp.WrapError(err, "ClearStateProgram", zb0032)
- return
- }
- (*z).encodedApplicationCallTxnFields.ClearStateProgram[zb0032] = program(zb0449)
- }
- }
- case "apsubm":
- {
- var zb0451 []byte
- var zb0452 int
- zb0452, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClearStateProgram")
- return
- }
- if zb0452 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0452), uint64(maxBitmaskSize))
- return
- }
- zb0451, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskClearStateProgram))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClearStateProgram")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskClearStateProgram = bitmask(zb0451)
- }
- case "apep":
- var zb0453 int
- var zb0454 bool
- zb0453, zb0454, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ExtraProgramPages")
- return
- }
- if zb0453 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0453), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ExtraProgramPages")
- return
- }
- if zb0454 {
- (*z).encodedApplicationCallTxnFields.ExtraProgramPages = nil
- } else if (*z).encodedApplicationCallTxnFields.ExtraProgramPages != nil && cap((*z).encodedApplicationCallTxnFields.ExtraProgramPages) >= zb0453 {
- (*z).encodedApplicationCallTxnFields.ExtraProgramPages = ((*z).encodedApplicationCallTxnFields.ExtraProgramPages)[:zb0453]
- } else {
- (*z).encodedApplicationCallTxnFields.ExtraProgramPages = make([]uint32, zb0453)
- }
- for zb0033 := range (*z).encodedApplicationCallTxnFields.ExtraProgramPages {
- (*z).encodedApplicationCallTxnFields.ExtraProgramPages[zb0033], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ExtraProgramPages", zb0033)
- return
- }
- }
- case "apepbm":
- {
- var zb0455 []byte
- var zb0456 int
- zb0456, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskExtraProgramPages")
- return
- }
- if zb0456 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0456), uint64(maxBitmaskSize))
- return
- }
- zb0455, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedApplicationCallTxnFields.BitmaskExtraProgramPages))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskExtraProgramPages")
- return
- }
- (*z).encodedApplicationCallTxnFields.BitmaskExtraProgramPages = bitmask(zb0455)
- }
- case "certrnd":
- var zb0457 int
- var zb0458 bool
- zb0457, zb0458, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertRound")
- return
- }
- if zb0457 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0457), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "CertRound")
- return
- }
- if zb0458 {
- (*z).encodedCompactCertTxnFields.CertRound = nil
- } else if (*z).encodedCompactCertTxnFields.CertRound != nil && cap((*z).encodedCompactCertTxnFields.CertRound) >= zb0457 {
- (*z).encodedCompactCertTxnFields.CertRound = ((*z).encodedCompactCertTxnFields.CertRound)[:zb0457]
- } else {
- (*z).encodedCompactCertTxnFields.CertRound = make([]basics.Round, zb0457)
- }
- for zb0034 := range (*z).encodedCompactCertTxnFields.CertRound {
- bts, err = (*z).encodedCompactCertTxnFields.CertRound[zb0034].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertRound", zb0034)
- return
- }
- }
- case "certrndbm":
- {
- var zb0459 []byte
- var zb0460 int
- zb0460, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertRound")
- return
- }
- if zb0460 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0460), uint64(maxBitmaskSize))
- return
- }
- zb0459, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCompactCertTxnFields.BitmaskCertRound))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertRound")
- return
- }
- (*z).encodedCompactCertTxnFields.BitmaskCertRound = bitmask(zb0459)
- }
- case "certtype":
- var zb0461 int
- var zb0462 bool
- zb0461, zb0462, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertType")
- return
- }
- if zb0461 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0461), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "CertType")
- return
- }
- if zb0462 {
- (*z).encodedCompactCertTxnFields.CertType = nil
- } else if (*z).encodedCompactCertTxnFields.CertType != nil && cap((*z).encodedCompactCertTxnFields.CertType) >= zb0461 {
- (*z).encodedCompactCertTxnFields.CertType = ((*z).encodedCompactCertTxnFields.CertType)[:zb0461]
- } else {
- (*z).encodedCompactCertTxnFields.CertType = make([]protocol.CompactCertType, zb0461)
- }
- for zb0035 := range (*z).encodedCompactCertTxnFields.CertType {
- bts, err = (*z).encodedCompactCertTxnFields.CertType[zb0035].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertType", zb0035)
- return
- }
- }
- case "certtypebm":
- {
- var zb0463 []byte
- var zb0464 int
- zb0464, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertType")
- return
- }
- if zb0464 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0464), uint64(maxBitmaskSize))
- return
- }
- zb0463, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCompactCertTxnFields.BitmaskCertType))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertType")
- return
- }
- (*z).encodedCompactCertTxnFields.BitmaskCertType = bitmask(zb0463)
- }
- case "certc":
- var zb0465 int
- zb0465, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigCommit")
- return
- }
- if zb0465 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0465), uint64(maxAddressBytes))
- return
- }
- (*z).encodedCompactCertTxnFields.encodedCert.SigCommit, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedCompactCertTxnFields.encodedCert.SigCommit)
- if err != nil {
- err = msgp.WrapError(err, "SigCommit")
- return
- }
- case "certcbm":
- {
- var zb0466 []byte
- var zb0467 int
- zb0467, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigCommit")
- return
- }
- if zb0467 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0467), uint64(maxBitmaskSize))
- return
- }
- zb0466, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigCommit")
- return
- }
- (*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit = bitmask(zb0466)
- }
- case "certw":
- var zb0468 int
- var zb0469 bool
- zb0468, zb0469, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SignedWeight")
- return
- }
- if zb0468 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0468), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "SignedWeight")
- return
- }
- if zb0469 {
- (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight = nil
- } else if (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight != nil && cap((*z).encodedCompactCertTxnFields.encodedCert.SignedWeight) >= zb0468 {
- (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight = ((*z).encodedCompactCertTxnFields.encodedCert.SignedWeight)[:zb0468]
- } else {
- (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight = make([]uint64, zb0468)
- }
- for zb0036 := range (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight {
- (*z).encodedCompactCertTxnFields.encodedCert.SignedWeight[zb0036], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SignedWeight", zb0036)
- return
- }
- }
- case "certwbm":
- {
- var zb0470 []byte
- var zb0471 int
- zb0471, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSignedWeight")
- return
- }
- if zb0471 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0471), uint64(maxBitmaskSize))
- return
- }
- zb0470, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSignedWeight")
- return
- }
- (*z).encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight = bitmask(zb0470)
- }
- case "certS":
- var zb0472 int
- var zb0473 bool
- zb0472, zb0473, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs")
- return
- }
- if zb0472 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0472), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "SigProofs")
- return
- }
- if zb0473 {
- (*z).encodedCompactCertTxnFields.encodedCert.SigProofs = nil
- } else if (*z).encodedCompactCertTxnFields.encodedCert.SigProofs != nil && cap((*z).encodedCompactCertTxnFields.encodedCert.SigProofs) >= zb0472 {
- (*z).encodedCompactCertTxnFields.encodedCert.SigProofs = ((*z).encodedCompactCertTxnFields.encodedCert.SigProofs)[:zb0472]
- } else {
- (*z).encodedCompactCertTxnFields.encodedCert.SigProofs = make([]certProofs, zb0472)
- }
- for zb0037 := range (*z).encodedCompactCertTxnFields.encodedCert.SigProofs {
- var zb0474 int
- var zb0475 bool
- zb0474, zb0475, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs", zb0037)
- return
- }
- if zb0474 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0474), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "SigProofs", zb0037)
- return
- }
- if zb0475 {
- (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037] = nil
- } else if (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037] != nil && cap((*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037]) >= zb0474 {
- (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037] = ((*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037])[:zb0474]
- } else {
- (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037] = make(certProofs, zb0474)
- }
- for zb0038 := range (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037] {
- bts, err = (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037][zb0038].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs", zb0037, zb0038)
- return
- }
- }
- }
- case "certSbm":
- {
- var zb0476 []byte
- var zb0477 int
- zb0477, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigProofs")
- return
- }
- if zb0477 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0477), uint64(maxBitmaskSize))
- return
- }
- zb0476, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigProofs")
- return
- }
- (*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs = bitmask(zb0476)
- }
- case "certP":
- var zb0478 int
- var zb0479 bool
- zb0478, zb0479, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs")
- return
- }
- if zb0478 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0478), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "PartProofs")
- return
- }
- if zb0479 {
- (*z).encodedCompactCertTxnFields.encodedCert.PartProofs = nil
- } else if (*z).encodedCompactCertTxnFields.encodedCert.PartProofs != nil && cap((*z).encodedCompactCertTxnFields.encodedCert.PartProofs) >= zb0478 {
- (*z).encodedCompactCertTxnFields.encodedCert.PartProofs = ((*z).encodedCompactCertTxnFields.encodedCert.PartProofs)[:zb0478]
- } else {
- (*z).encodedCompactCertTxnFields.encodedCert.PartProofs = make([]certProofs, zb0478)
- }
- for zb0039 := range (*z).encodedCompactCertTxnFields.encodedCert.PartProofs {
- var zb0480 int
- var zb0481 bool
- zb0480, zb0481, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs", zb0039)
- return
- }
- if zb0480 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0480), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "PartProofs", zb0039)
- return
- }
- if zb0481 {
- (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039] = nil
- } else if (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039] != nil && cap((*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039]) >= zb0480 {
- (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039] = ((*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039])[:zb0480]
- } else {
- (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039] = make(certProofs, zb0480)
- }
- for zb0040 := range (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039] {
- bts, err = (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039][zb0040].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs", zb0039, zb0040)
- return
- }
- }
- }
- case "certPbm":
- {
- var zb0482 []byte
- var zb0483 int
- zb0483, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskPartProofs")
- return
- }
- if zb0483 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0483), uint64(maxBitmaskSize))
- return
- }
- zb0482, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskPartProofs")
- return
- }
- (*z).encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs = bitmask(zb0482)
- }
- case "certr":
- var zb0484 int
- var zb0485 bool
- zb0484, zb0485, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals")
- return
- }
- if zb0484 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0484), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Reveals")
- return
- }
- if zb0485 {
- (*z).encodedCompactCertTxnFields.encodedCert.Reveals = nil
- } else if (*z).encodedCompactCertTxnFields.encodedCert.Reveals != nil && cap((*z).encodedCompactCertTxnFields.encodedCert.Reveals) >= zb0484 {
- (*z).encodedCompactCertTxnFields.encodedCert.Reveals = ((*z).encodedCompactCertTxnFields.encodedCert.Reveals)[:zb0484]
- } else {
- (*z).encodedCompactCertTxnFields.encodedCert.Reveals = make([]revealMap, zb0484)
- }
- for zb0041 := range (*z).encodedCompactCertTxnFields.encodedCert.Reveals {
- var zb0486 int
- var zb0487 bool
- zb0486, zb0487, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0041)
- return
- }
- if zb0486 > compactcert.MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0486), uint64(compactcert.MaxReveals))
- err = msgp.WrapError(err, "Reveals", zb0041)
- return
- }
- if zb0487 {
- (*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041] = nil
- } else if (*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041] == nil {
- (*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041] = make(revealMap, zb0486)
- }
- for zb0486 > 0 {
- var zb0042 uint64
- var zb0043 compactcert.Reveal
- zb0486--
- zb0042, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0041)
- return
- }
- bts, err = zb0043.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0041, zb0042)
- return
- }
- (*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041][zb0042] = zb0043
- }
- }
- case "certrbm":
- {
- var zb0488 []byte
- var zb0489 int
- zb0489, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReveals")
- return
- }
- if zb0489 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0489), uint64(maxBitmaskSize))
- return
- }
- zb0488, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskReveals))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReveals")
- return
- }
- (*z).encodedCompactCertTxnFields.encodedCert.BitmaskReveals = bitmask(zb0488)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedTxns) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedTxns)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedTxns) Msgsize() (s int) {
- s = 3 + 5 + msgp.BytesPrefixSize + len((*z).TxType) + 7 + msgp.BytesPrefixSize + len([]byte((*z).BitmaskTxType)) + 6 + msgp.ByteSize + 4 + msgp.BytesPrefixSize + len((*z).encodedTxnHeaders.Sender) + 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxnHeaders.BitmaskSender)) + 4 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).encodedTxnHeaders.Fee {
- s += (*z).encodedTxnHeaders.Fee[zb0001].Msgsize()
- }
- s += 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxnHeaders.BitmaskFee)) + 3 + msgp.ArrayHeaderSize
- for zb0002 := range (*z).encodedTxnHeaders.FirstValid {
- s += (*z).encodedTxnHeaders.FirstValid[zb0002].Msgsize()
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxnHeaders.BitmaskFirstValid)) + 3 + msgp.ArrayHeaderSize
- for zb0003 := range (*z).encodedTxnHeaders.LastValid {
- s += (*z).encodedTxnHeaders.LastValid[zb0003].Msgsize()
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxnHeaders.BitmaskLastValid)) + 5 + msgp.ArrayHeaderSize
- for zb0004 := range (*z).encodedTxnHeaders.Note {
- s += msgp.BytesPrefixSize + len((*z).encodedTxnHeaders.Note[zb0004])
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxnHeaders.BitmaskNote)) + 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxnHeaders.BitmaskGenesisID)) + 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxnHeaders.BitmaskGroup)) + 3 + msgp.BytesPrefixSize + len((*z).encodedTxnHeaders.Lease) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxnHeaders.BitmaskLease)) + 6 + msgp.BytesPrefixSize + len((*z).encodedTxnHeaders.RekeyTo) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedTxnHeaders.BitmaskRekeyTo)) + 8 + msgp.BytesPrefixSize + len((*z).encodedKeyregTxnFields.VotePK) + 7 + msgp.BytesPrefixSize + len((*z).encodedKeyregTxnFields.SelectionPK) + 8 + msgp.ArrayHeaderSize
- for zb0005 := range (*z).encodedKeyregTxnFields.VoteFirst {
- s += (*z).encodedKeyregTxnFields.VoteFirst[zb0005].Msgsize()
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedKeyregTxnFields.BitmaskVoteFirst)) + 8 + msgp.ArrayHeaderSize
- for zb0006 := range (*z).encodedKeyregTxnFields.VoteLast {
- s += (*z).encodedKeyregTxnFields.VoteLast[zb0006].Msgsize()
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedKeyregTxnFields.BitmaskVoteLast)) + 7 + msgp.ArrayHeaderSize + (len((*z).encodedKeyregTxnFields.VoteKeyDilution) * (msgp.Uint64Size)) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedKeyregTxnFields.BitmaskKeys)) + 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedKeyregTxnFields.BitmaskNonparticipation)) + 4 + msgp.BytesPrefixSize + len((*z).encodedPaymentTxnFields.Receiver) + 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedPaymentTxnFields.BitmaskReceiver)) + 4 + msgp.ArrayHeaderSize
- for zb0008 := range (*z).encodedPaymentTxnFields.Amount {
- s += (*z).encodedPaymentTxnFields.Amount[zb0008].Msgsize()
- }
- s += 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedPaymentTxnFields.BitmaskAmount)) + 6 + msgp.BytesPrefixSize + len((*z).encodedPaymentTxnFields.CloseRemainderTo) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedPaymentTxnFields.BitmaskCloseRemainderTo)) + 5 + msgp.ArrayHeaderSize
- for zb0009 := range (*z).encodedAssetConfigTxnFields.ConfigAsset {
- s += (*z).encodedAssetConfigTxnFields.ConfigAsset[zb0009].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetConfigTxnFields.BitmaskConfigAsset)) + 2 + msgp.ArrayHeaderSize + (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Total) * (msgp.Uint64Size)) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal)) + 3 + msgp.ArrayHeaderSize + (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals) * (msgp.Uint32Size)) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals)) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen)) + 3 + msgp.ArrayHeaderSize
- for zb0012 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName {
- s += msgp.StringPrefixSize + len((*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName[zb0012])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName)) + 3 + msgp.ArrayHeaderSize
- for zb0013 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName {
- s += msgp.StringPrefixSize + len((*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName[zb0013])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName)) + 3 + msgp.ArrayHeaderSize
- for zb0014 := range (*z).encodedAssetConfigTxnFields.encodedAssetParams.URL {
- s += msgp.StringPrefixSize + len((*z).encodedAssetConfigTxnFields.encodedAssetParams.URL[zb0014])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL)) + 3 + msgp.BytesPrefixSize + len((*z).encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash)) + 2 + msgp.BytesPrefixSize + len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Manager) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager)) + 2 + msgp.BytesPrefixSize + len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Reserve) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve)) + 2 + msgp.BytesPrefixSize + len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Freeze) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze)) + 2 + msgp.BytesPrefixSize + len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Clawback) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback)) + 5 + msgp.ArrayHeaderSize
- for zb0015 := range (*z).encodedAssetTransferTxnFields.XferAsset {
- s += (*z).encodedAssetTransferTxnFields.XferAsset[zb0015].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetTransferTxnFields.BitmaskXferAsset)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedAssetTransferTxnFields.AssetAmount) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetTransferTxnFields.BitmaskAssetAmount)) + 5 + msgp.BytesPrefixSize + len((*z).encodedAssetTransferTxnFields.AssetSender) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetTransferTxnFields.BitmaskAssetSender)) + 5 + msgp.BytesPrefixSize + len((*z).encodedAssetTransferTxnFields.AssetReceiver) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetTransferTxnFields.BitmaskAssetReceiver)) + 7 + msgp.BytesPrefixSize + len((*z).encodedAssetTransferTxnFields.AssetCloseTo) + 9 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetTransferTxnFields.BitmaskAssetCloseTo)) + 5 + msgp.BytesPrefixSize + len((*z).encodedAssetFreezeTxnFields.FreezeAccount) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetFreezeTxnFields.BitmaskFreezeAccount)) + 5 + msgp.ArrayHeaderSize
- for zb0017 := range (*z).encodedAssetFreezeTxnFields.FreezeAsset {
- s += (*z).encodedAssetFreezeTxnFields.FreezeAsset[zb0017].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetFreezeTxnFields.BitmaskFreezeAsset)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedAssetFreezeTxnFields.BitmaskAssetFrozen)) + 5 + msgp.ArrayHeaderSize
- for zb0018 := range (*z).encodedApplicationCallTxnFields.ApplicationID {
- s += (*z).encodedApplicationCallTxnFields.ApplicationID[zb0018].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.BitmaskApplicationID)) + 5 + msgp.BytesPrefixSize + len((*z).encodedApplicationCallTxnFields.OnCompletion) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.BitmaskOnCompletion)) + 5 + msgp.ArrayHeaderSize
- for zb0019 := range (*z).encodedApplicationCallTxnFields.ApplicationArgs {
- s += msgp.ArrayHeaderSize
- for zb0020 := range (*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019] {
- s += msgp.BytesPrefixSize + len((*z).encodedApplicationCallTxnFields.ApplicationArgs[zb0019][zb0020])
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.BitmaskApplicationArgs)) + 5 + msgp.ArrayHeaderSize
- for zb0021 := range (*z).encodedApplicationCallTxnFields.Accounts {
- s += msgp.ArrayHeaderSize
- for zb0022 := range (*z).encodedApplicationCallTxnFields.Accounts[zb0021] {
- s += (*z).encodedApplicationCallTxnFields.Accounts[zb0021][zb0022].Msgsize()
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.BitmaskAccounts)) + 5 + msgp.ArrayHeaderSize
- for zb0023 := range (*z).encodedApplicationCallTxnFields.ForeignApps {
- s += msgp.ArrayHeaderSize
- for zb0024 := range (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023] {
- s += (*z).encodedApplicationCallTxnFields.ForeignApps[zb0023][zb0024].Msgsize()
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.BitmaskForeignApps)) + 5 + msgp.ArrayHeaderSize
- for zb0025 := range (*z).encodedApplicationCallTxnFields.ForeignAssets {
- s += msgp.ArrayHeaderSize
- for zb0026 := range (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025] {
- s += (*z).encodedApplicationCallTxnFields.ForeignAssets[zb0025][zb0026].Msgsize()
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.BitmaskForeignAssets)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedApplicationCallTxnFields.LocalNumUint) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.BitmaskLocalNumUint)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedApplicationCallTxnFields.LocalNumByteSlice) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedApplicationCallTxnFields.GlobalNumUint) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.BitmaskGlobalNumUint)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedApplicationCallTxnFields.GlobalNumByteSlice) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice)) + 5 + msgp.ArrayHeaderSize
- for zb0031 := range (*z).encodedApplicationCallTxnFields.ApprovalProgram {
- s += msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.ApprovalProgram[zb0031]))
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.BitmaskApprovalProgram)) + 5 + msgp.ArrayHeaderSize
- for zb0032 := range (*z).encodedApplicationCallTxnFields.ClearStateProgram {
- s += msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.ClearStateProgram[zb0032]))
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.BitmaskClearStateProgram)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedApplicationCallTxnFields.ExtraProgramPages) * (msgp.Uint32Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedApplicationCallTxnFields.BitmaskExtraProgramPages)) + 8 + msgp.ArrayHeaderSize
- for zb0034 := range (*z).encodedCompactCertTxnFields.CertRound {
- s += (*z).encodedCompactCertTxnFields.CertRound[zb0034].Msgsize()
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedCompactCertTxnFields.BitmaskCertRound)) + 9 + msgp.ArrayHeaderSize
- for zb0035 := range (*z).encodedCompactCertTxnFields.CertType {
- s += (*z).encodedCompactCertTxnFields.CertType[zb0035].Msgsize()
- }
- s += 11 + msgp.BytesPrefixSize + len([]byte((*z).encodedCompactCertTxnFields.BitmaskCertType)) + 6 + msgp.BytesPrefixSize + len((*z).encodedCompactCertTxnFields.encodedCert.SigCommit) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit)) + 6 + msgp.ArrayHeaderSize + (len((*z).encodedCompactCertTxnFields.encodedCert.SignedWeight) * (msgp.Uint64Size)) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight)) + 6 + msgp.ArrayHeaderSize
- for zb0037 := range (*z).encodedCompactCertTxnFields.encodedCert.SigProofs {
- s += msgp.ArrayHeaderSize
- for zb0038 := range (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037] {
- s += (*z).encodedCompactCertTxnFields.encodedCert.SigProofs[zb0037][zb0038].Msgsize()
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs)) + 6 + msgp.ArrayHeaderSize
- for zb0039 := range (*z).encodedCompactCertTxnFields.encodedCert.PartProofs {
- s += msgp.ArrayHeaderSize
- for zb0040 := range (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039] {
- s += (*z).encodedCompactCertTxnFields.encodedCert.PartProofs[zb0039][zb0040].Msgsize()
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs)) + 6 + msgp.ArrayHeaderSize
- for zb0041 := range (*z).encodedCompactCertTxnFields.encodedCert.Reveals {
- s += msgp.MapHeaderSize
- if (*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041] != nil {
- for zb0042, zb0043 := range (*z).encodedCompactCertTxnFields.encodedCert.Reveals[zb0041] {
- _ = zb0042
- _ = zb0043
- s += 0 + msgp.Uint64Size + zb0043.Msgsize()
- }
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedCompactCertTxnFields.encodedCert.BitmaskReveals))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedTxns) MsgIsZero() bool {
- return (len((*z).TxType) == 0) && (len((*z).BitmaskTxType) == 0) && ((*z).TxTypeOffset == 0) && (len((*z).encodedTxnHeaders.Sender) == 0) && (len((*z).encodedTxnHeaders.BitmaskSender) == 0) && (len((*z).encodedTxnHeaders.Fee) == 0) && (len((*z).encodedTxnHeaders.BitmaskFee) == 0) && (len((*z).encodedTxnHeaders.FirstValid) == 0) && (len((*z).encodedTxnHeaders.BitmaskFirstValid) == 0) && (len((*z).encodedTxnHeaders.LastValid) == 0) && (len((*z).encodedTxnHeaders.BitmaskLastValid) == 0) && (len((*z).encodedTxnHeaders.Note) == 0) && (len((*z).encodedTxnHeaders.BitmaskNote) == 0) && (len((*z).encodedTxnHeaders.BitmaskGenesisID) == 0) && (len((*z).encodedTxnHeaders.BitmaskGroup) == 0) && (len((*z).encodedTxnHeaders.Lease) == 0) && (len((*z).encodedTxnHeaders.BitmaskLease) == 0) && (len((*z).encodedTxnHeaders.RekeyTo) == 0) && (len((*z).encodedTxnHeaders.BitmaskRekeyTo) == 0) && (len((*z).encodedKeyregTxnFields.VotePK) == 0) && (len((*z).encodedKeyregTxnFields.SelectionPK) == 0) && (len((*z).encodedKeyregTxnFields.VoteFirst) == 0) && (len((*z).encodedKeyregTxnFields.BitmaskVoteFirst) == 0) && (len((*z).encodedKeyregTxnFields.VoteLast) == 0) && (len((*z).encodedKeyregTxnFields.BitmaskVoteLast) == 0) && (len((*z).encodedKeyregTxnFields.VoteKeyDilution) == 0) && (len((*z).encodedKeyregTxnFields.BitmaskKeys) == 0) && (len((*z).encodedKeyregTxnFields.BitmaskNonparticipation) == 0) && (len((*z).encodedPaymentTxnFields.Receiver) == 0) && (len((*z).encodedPaymentTxnFields.BitmaskReceiver) == 0) && (len((*z).encodedPaymentTxnFields.Amount) == 0) && (len((*z).encodedPaymentTxnFields.BitmaskAmount) == 0) && (len((*z).encodedPaymentTxnFields.CloseRemainderTo) == 0) && (len((*z).encodedPaymentTxnFields.BitmaskCloseRemainderTo) == 0) && (len((*z).encodedAssetConfigTxnFields.ConfigAsset) == 0) && (len((*z).encodedAssetConfigTxnFields.BitmaskConfigAsset) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Total) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Decimals) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.UnitName) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.AssetName) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.URL) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Manager) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Reserve) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Freeze) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.Clawback) == 0) && (len((*z).encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback) == 0) && (len((*z).encodedAssetTransferTxnFields.XferAsset) == 0) && (len((*z).encodedAssetTransferTxnFields.BitmaskXferAsset) == 0) && (len((*z).encodedAssetTransferTxnFields.AssetAmount) == 0) && (len((*z).encodedAssetTransferTxnFields.BitmaskAssetAmount) == 0) && (len((*z).encodedAssetTransferTxnFields.AssetSender) == 0) && (len((*z).encodedAssetTransferTxnFields.BitmaskAssetSender) == 0) && (len((*z).encodedAssetTransferTxnFields.AssetReceiver) == 0) && (len((*z).encodedAssetTransferTxnFields.BitmaskAssetReceiver) == 0) && (len((*z).encodedAssetTransferTxnFields.AssetCloseTo) == 0) && (len((*z).encodedAssetTransferTxnFields.BitmaskAssetCloseTo) == 0) && (len((*z).encodedAssetFreezeTxnFields.FreezeAccount) == 0) && (len((*z).encodedAssetFreezeTxnFields.BitmaskFreezeAccount) == 0) && (len((*z).encodedAssetFreezeTxnFields.FreezeAsset) == 0) && (len((*z).encodedAssetFreezeTxnFields.BitmaskFreezeAsset) == 0) && (len((*z).encodedAssetFreezeTxnFields.BitmaskAssetFrozen) == 0) && (len((*z).encodedApplicationCallTxnFields.ApplicationID) == 0) && (len((*z).encodedApplicationCallTxnFields.BitmaskApplicationID) == 0) && (len((*z).encodedApplicationCallTxnFields.OnCompletion) == 0) && (len((*z).encodedApplicationCallTxnFields.BitmaskOnCompletion) == 0) && (len((*z).encodedApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).encodedApplicationCallTxnFields.BitmaskApplicationArgs) == 0) && (len((*z).encodedApplicationCallTxnFields.Accounts) == 0) && (len((*z).encodedApplicationCallTxnFields.BitmaskAccounts) == 0) && (len((*z).encodedApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).encodedApplicationCallTxnFields.BitmaskForeignApps) == 0) && (len((*z).encodedApplicationCallTxnFields.ForeignAssets) == 0) && (len((*z).encodedApplicationCallTxnFields.BitmaskForeignAssets) == 0) && (len((*z).encodedApplicationCallTxnFields.LocalNumUint) == 0) && (len((*z).encodedApplicationCallTxnFields.BitmaskLocalNumUint) == 0) && (len((*z).encodedApplicationCallTxnFields.LocalNumByteSlice) == 0) && (len((*z).encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice) == 0) && (len((*z).encodedApplicationCallTxnFields.GlobalNumUint) == 0) && (len((*z).encodedApplicationCallTxnFields.BitmaskGlobalNumUint) == 0) && (len((*z).encodedApplicationCallTxnFields.GlobalNumByteSlice) == 0) && (len((*z).encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice) == 0) && (len((*z).encodedApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).encodedApplicationCallTxnFields.BitmaskApprovalProgram) == 0) && (len((*z).encodedApplicationCallTxnFields.ClearStateProgram) == 0) && (len((*z).encodedApplicationCallTxnFields.BitmaskClearStateProgram) == 0) && (len((*z).encodedApplicationCallTxnFields.ExtraProgramPages) == 0) && (len((*z).encodedApplicationCallTxnFields.BitmaskExtraProgramPages) == 0) && (len((*z).encodedCompactCertTxnFields.CertRound) == 0) && (len((*z).encodedCompactCertTxnFields.BitmaskCertRound) == 0) && (len((*z).encodedCompactCertTxnFields.CertType) == 0) && (len((*z).encodedCompactCertTxnFields.BitmaskCertType) == 0) && (len((*z).encodedCompactCertTxnFields.encodedCert.SigCommit) == 0) && (len((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit) == 0) && (len((*z).encodedCompactCertTxnFields.encodedCert.SignedWeight) == 0) && (len((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight) == 0) && (len((*z).encodedCompactCertTxnFields.encodedCert.SigProofs) == 0) && (len((*z).encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs) == 0) && (len((*z).encodedCompactCertTxnFields.encodedCert.PartProofs) == 0) && (len((*z).encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs) == 0) && (len((*z).encodedCompactCertTxnFields.encodedCert.Reveals) == 0) && (len((*z).encodedCompactCertTxnFields.encodedCert.BitmaskReveals) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *packedTransactionGroups) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0001Len := uint32(3)
- var zb0001Mask uint8 /* 4 bits */
- if (*z).CompressionFormat == 0 {
- zb0001Len--
- zb0001Mask |= 0x2
- }
- if len((*z).Bytes) == 0 {
- zb0001Len--
- zb0001Mask |= 0x4
- }
- if (*z).LenDecompressedBytes == 0 {
- zb0001Len--
- zb0001Mask |= 0x8
- }
- // variable map header, size zb0001Len
- o = append(o, 0x80|uint8(zb0001Len))
- if zb0001Len != 0 {
- if (zb0001Mask & 0x2) == 0 { // if not empty
- // string "c"
- o = append(o, 0xa1, 0x63)
- o = msgp.AppendByte(o, (*z).CompressionFormat)
- }
- if (zb0001Mask & 0x4) == 0 { // if not empty
- // string "g"
- o = append(o, 0xa1, 0x67)
- o = msgp.AppendBytes(o, (*z).Bytes)
- }
- if (zb0001Mask & 0x8) == 0 { // if not empty
- // string "l"
- o = append(o, 0xa1, 0x6c)
- o = msgp.AppendUint64(o, (*z).LenDecompressedBytes)
- }
- }
- return
-}
-
-func (_ *packedTransactionGroups) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*packedTransactionGroups)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *packedTransactionGroups) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0001 int
- var zb0002 bool
- zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0001 > 0 {
- zb0001--
- var zb0003 int
- zb0003, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Bytes")
- return
- }
- if zb0003 > maxEncodedTransactionGroupBytes {
- err = msgp.ErrOverflow(uint64(zb0003), uint64(maxEncodedTransactionGroupBytes))
- return
- }
- (*z).Bytes, bts, err = msgp.ReadBytesBytes(bts, (*z).Bytes)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Bytes")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- (*z).CompressionFormat, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CompressionFormat")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- (*z).LenDecompressedBytes, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LenDecompressedBytes")
- return
- }
- }
- if zb0001 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0001)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 {
- (*z) = packedTransactionGroups{}
- }
- for zb0001 > 0 {
- zb0001--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "g":
- var zb0004 int
- zb0004, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Bytes")
- return
- }
- if zb0004 > maxEncodedTransactionGroupBytes {
- err = msgp.ErrOverflow(uint64(zb0004), uint64(maxEncodedTransactionGroupBytes))
- return
- }
- (*z).Bytes, bts, err = msgp.ReadBytesBytes(bts, (*z).Bytes)
- if err != nil {
- err = msgp.WrapError(err, "Bytes")
- return
- }
- case "c":
- (*z).CompressionFormat, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "CompressionFormat")
- return
- }
- case "l":
- (*z).LenDecompressedBytes, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LenDecompressedBytes")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *packedTransactionGroups) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*packedTransactionGroups)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *packedTransactionGroups) Msgsize() (s int) {
- s = 1 + 2 + msgp.BytesPrefixSize + len((*z).Bytes) + 2 + msgp.ByteSize + 2 + msgp.Uint64Size
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *packedTransactionGroups) MsgIsZero() bool {
- return (len((*z).Bytes) == 0) && ((*z).CompressionFormat == 0) && ((*z).LenDecompressedBytes == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z program) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- o = msgp.AppendBytes(o, []byte(z))
- return
-}
-
-func (_ program) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(program)
- if !ok {
- _, ok = (z).(*program)
- }
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *program) UnmarshalMsg(bts []byte) (o []byte, err error) {
- {
- var zb0001 []byte
- var zb0002 int
- zb0002, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0002), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0001, bts, err = msgp.ReadBytesBytes(bts, []byte((*z)))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- (*z) = program(zb0001)
- }
- o = bts
- return
-}
-
-func (_ *program) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*program)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z program) Msgsize() (s int) {
- s = msgp.BytesPrefixSize + len([]byte(z))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z program) MsgIsZero() bool {
- return len(z) == 0
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *relayedProposal) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0001Len := uint32(3)
- var zb0001Mask uint8 /* 4 bits */
- if len((*z).RawBytes) == 0 {
- zb0001Len--
- zb0001Mask |= 0x2
- }
- if (*z).Content == 0 {
- zb0001Len--
- zb0001Mask |= 0x4
- }
- if (*z).ExcludeProposal.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x8
- }
- // variable map header, size zb0001Len
- o = append(o, 0x80|uint8(zb0001Len))
- if zb0001Len != 0 {
- if (zb0001Mask & 0x2) == 0 { // if not empty
- // string "b"
- o = append(o, 0xa1, 0x62)
- o = msgp.AppendBytes(o, (*z).RawBytes)
- }
- if (zb0001Mask & 0x4) == 0 { // if not empty
- // string "c"
- o = append(o, 0xa1, 0x63)
- o = msgp.AppendByte(o, (*z).Content)
- }
- if (zb0001Mask & 0x8) == 0 { // if not empty
- // string "e"
- o = append(o, 0xa1, 0x65)
- o = (*z).ExcludeProposal.MarshalMsg(o)
- }
- }
- return
-}
-
-func (_ *relayedProposal) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*relayedProposal)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *relayedProposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0001 int
- var zb0002 bool
- zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0001 > 0 {
- zb0001--
- var zb0003 int
- zb0003, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "RawBytes")
- return
- }
- if zb0003 > maxProposalSize {
- err = msgp.ErrOverflow(uint64(zb0003), uint64(maxProposalSize))
- return
- }
- (*z).RawBytes, bts, err = msgp.ReadBytesBytes(bts, (*z).RawBytes)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "RawBytes")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).ExcludeProposal.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ExcludeProposal")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- (*z).Content, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Content")
- return
- }
- }
- if zb0001 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0001)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 {
- (*z) = relayedProposal{}
- }
- for zb0001 > 0 {
- zb0001--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "b":
- var zb0004 int
- zb0004, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "RawBytes")
- return
- }
- if zb0004 > maxProposalSize {
- err = msgp.ErrOverflow(uint64(zb0004), uint64(maxProposalSize))
- return
- }
- (*z).RawBytes, bts, err = msgp.ReadBytesBytes(bts, (*z).RawBytes)
- if err != nil {
- err = msgp.WrapError(err, "RawBytes")
- return
- }
- case "e":
- bts, err = (*z).ExcludeProposal.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ExcludeProposal")
- return
- }
- case "c":
- (*z).Content, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Content")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *relayedProposal) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*relayedProposal)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *relayedProposal) Msgsize() (s int) {
- s = 1 + 2 + msgp.BytesPrefixSize + len((*z).RawBytes) + 2 + (*z).ExcludeProposal.Msgsize() + 2 + msgp.ByteSize
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *relayedProposal) MsgIsZero() bool {
- return (len((*z).RawBytes) == 0) && ((*z).ExcludeProposal.MsgIsZero()) && ((*z).Content == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *requestParams) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0001Len := uint32(2)
- var zb0001Mask uint8 /* 3 bits */
- if (*z).Modulator == 0 {
- zb0001Len--
- zb0001Mask |= 0x2
- }
- if (*z).Offset == 0 {
- zb0001Len--
- zb0001Mask |= 0x4
- }
- // variable map header, size zb0001Len
- o = append(o, 0x80|uint8(zb0001Len))
- if zb0001Len != 0 {
- if (zb0001Mask & 0x2) == 0 { // if not empty
- // string "m"
- o = append(o, 0xa1, 0x6d)
- o = msgp.AppendByte(o, (*z).Modulator)
- }
- if (zb0001Mask & 0x4) == 0 { // if not empty
- // string "o"
- o = append(o, 0xa1, 0x6f)
- o = msgp.AppendByte(o, (*z).Offset)
- }
- }
- return
-}
-
-func (_ *requestParams) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*requestParams)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *requestParams) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0001 int
- var zb0002 bool
- zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0001 > 0 {
- zb0001--
- (*z).Offset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Offset")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- (*z).Modulator, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Modulator")
- return
- }
- }
- if zb0001 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0001)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 {
- (*z) = requestParams{}
- }
- for zb0001 > 0 {
- zb0001--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "o":
- (*z).Offset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Offset")
- return
- }
- case "m":
- (*z).Modulator, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Modulator")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *requestParams) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*requestParams)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *requestParams) Msgsize() (s int) {
- s = 1 + 2 + msgp.ByteSize + 2 + msgp.ByteSize
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *requestParams) MsgIsZero() bool {
- return ((*z).Offset == 0) && ((*z).Modulator == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z revealMap) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- if z == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendMapHeader(o, uint32(len(z)))
- }
- za0002_keys := make([]uint64, 0, len(z))
- for za0002 := range z {
- za0002_keys = append(za0002_keys, za0002)
- }
- sort.Sort(SortUint64(za0002_keys))
- for _, za0002 := range za0002_keys {
- za0003 := z[za0002]
- _ = za0003
- o = msgp.AppendUint64(o, za0002)
- o = za0003.MarshalMsg(o)
- }
- return
-}
-
-func (_ revealMap) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(revealMap)
- if !ok {
- _, ok = (z).(*revealMap)
- }
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *revealMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0003 > compactcert.MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0003), uint64(compactcert.MaxReveals))
- err = msgp.WrapError(err)
- return
- }
- if zb0004 {
- (*z) = nil
- } else if (*z) == nil {
- (*z) = make(revealMap, zb0003)
- }
- for zb0003 > 0 {
- var zb0001 uint64
- var zb0002 compactcert.Reveal
- zb0003--
- zb0001, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- bts, err = zb0002.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, zb0001)
- return
- }
- (*z)[zb0001] = zb0002
- }
- o = bts
- return
-}
-
-func (_ *revealMap) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*revealMap)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z revealMap) Msgsize() (s int) {
- s = msgp.MapHeaderSize
- if z != nil {
- for za0002, za0003 := range z {
- _ = za0002
- _ = za0003
- s += 0 + msgp.Uint64Size + za0003.Msgsize()
- }
- }
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z revealMap) MsgIsZero() bool {
- return len(z) == 0
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *timingParams) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0002Len := uint32(4)
- var zb0002Mask uint8 /* 5 bits */
- if len((*z).AcceptedMsgSeq) == 0 {
- zb0002Len--
- zb0002Mask |= 0x2
- }
- if (*z).NextMsgMinDelay == 0 {
- zb0002Len--
- zb0002Mask |= 0x4
- }
- if (*z).ResponseElapsedTime == 0 {
- zb0002Len--
- zb0002Mask |= 0x8
- }
- if (*z).RefTxnBlockMsgSeq == 0 {
- zb0002Len--
- zb0002Mask |= 0x10
- }
- // variable map header, size zb0002Len
- o = append(o, 0x80|uint8(zb0002Len))
- if zb0002Len != 0 {
- if (zb0002Mask & 0x2) == 0 { // if not empty
- // string "a"
- o = append(o, 0xa1, 0x61)
- if (*z).AcceptedMsgSeq == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).AcceptedMsgSeq)))
- }
- for zb0001 := range (*z).AcceptedMsgSeq {
- o = msgp.AppendUint64(o, (*z).AcceptedMsgSeq[zb0001])
- }
- }
- if (zb0002Mask & 0x4) == 0 { // if not empty
- // string "m"
- o = append(o, 0xa1, 0x6d)
- o = msgp.AppendUint64(o, (*z).NextMsgMinDelay)
- }
- if (zb0002Mask & 0x8) == 0 { // if not empty
- // string "r"
- o = append(o, 0xa1, 0x72)
- o = msgp.AppendUint64(o, (*z).ResponseElapsedTime)
- }
- if (zb0002Mask & 0x10) == 0 { // if not empty
- // string "s"
- o = append(o, 0xa1, 0x73)
- o = msgp.AppendUint64(o, (*z).RefTxnBlockMsgSeq)
- }
- }
- return
-}
-
-func (_ *timingParams) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*timingParams)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *timingParams) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0002 int
- var zb0003 bool
- zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 > 0 {
- zb0002--
- (*z).RefTxnBlockMsgSeq, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "RefTxnBlockMsgSeq")
- return
- }
- }
- if zb0002 > 0 {
- zb0002--
- (*z).ResponseElapsedTime, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ResponseElapsedTime")
- return
- }
- }
- if zb0002 > 0 {
- zb0002--
- var zb0004 int
- var zb0005 bool
- zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AcceptedMsgSeq")
- return
- }
- if zb0004 > maxAcceptedMsgSeq {
- err = msgp.ErrOverflow(uint64(zb0004), uint64(maxAcceptedMsgSeq))
- err = msgp.WrapError(err, "struct-from-array", "AcceptedMsgSeq")
- return
- }
- if zb0005 {
- (*z).AcceptedMsgSeq = nil
- } else if (*z).AcceptedMsgSeq != nil && cap((*z).AcceptedMsgSeq) >= zb0004 {
- (*z).AcceptedMsgSeq = ((*z).AcceptedMsgSeq)[:zb0004]
- } else {
- (*z).AcceptedMsgSeq = make([]uint64, zb0004)
- }
- for zb0001 := range (*z).AcceptedMsgSeq {
- (*z).AcceptedMsgSeq[zb0001], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AcceptedMsgSeq", zb0001)
- return
- }
- }
- }
- if zb0002 > 0 {
- zb0002--
- (*z).NextMsgMinDelay, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "NextMsgMinDelay")
- return
- }
- }
- if zb0002 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0002)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0003 {
- (*z) = timingParams{}
- }
- for zb0002 > 0 {
- zb0002--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "s":
- (*z).RefTxnBlockMsgSeq, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "RefTxnBlockMsgSeq")
- return
- }
- case "r":
- (*z).ResponseElapsedTime, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ResponseElapsedTime")
- return
- }
- case "a":
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AcceptedMsgSeq")
- return
- }
- if zb0006 > maxAcceptedMsgSeq {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(maxAcceptedMsgSeq))
- err = msgp.WrapError(err, "AcceptedMsgSeq")
- return
- }
- if zb0007 {
- (*z).AcceptedMsgSeq = nil
- } else if (*z).AcceptedMsgSeq != nil && cap((*z).AcceptedMsgSeq) >= zb0006 {
- (*z).AcceptedMsgSeq = ((*z).AcceptedMsgSeq)[:zb0006]
- } else {
- (*z).AcceptedMsgSeq = make([]uint64, zb0006)
- }
- for zb0001 := range (*z).AcceptedMsgSeq {
- (*z).AcceptedMsgSeq[zb0001], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AcceptedMsgSeq", zb0001)
- return
- }
- }
- case "m":
- (*z).NextMsgMinDelay, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "NextMsgMinDelay")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *timingParams) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*timingParams)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *timingParams) Msgsize() (s int) {
- s = 1 + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.ArrayHeaderSize + (len((*z).AcceptedMsgSeq) * (msgp.Uint64Size)) + 2 + msgp.Uint64Size
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *timingParams) MsgIsZero() bool {
- return ((*z).RefTxnBlockMsgSeq == 0) && ((*z).ResponseElapsedTime == 0) && (len((*z).AcceptedMsgSeq) == 0) && ((*z).NextMsgMinDelay == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *transactionBlockMessage) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0001Len := uint32(7)
- var zb0001Mask uint8 /* 8 bits */
- if (*z).TxnBloomFilter.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x2
- }
- if (*z).TransactionGroups.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x4
- }
- if ((*z).UpdatedRequestParams.Offset == 0) && ((*z).UpdatedRequestParams.Modulator == 0) {
- zb0001Len--
- zb0001Mask |= 0x8
- }
- if (*z).Round.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x10
- }
- if (*z).RelayedProposal.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x20
- }
- if (*z).MsgSync.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x40
- }
- if (*z).Version == 0 {
- zb0001Len--
- zb0001Mask |= 0x80
- }
- // variable map header, size zb0001Len
- o = append(o, 0x80|uint8(zb0001Len))
- if zb0001Len != 0 {
- if (zb0001Mask & 0x2) == 0 { // if not empty
- // string "b"
- o = append(o, 0xa1, 0x62)
- o = (*z).TxnBloomFilter.MarshalMsg(o)
- }
- if (zb0001Mask & 0x4) == 0 { // if not empty
- // string "g"
- o = append(o, 0xa1, 0x67)
- o = (*z).TransactionGroups.MarshalMsg(o)
- }
- if (zb0001Mask & 0x8) == 0 { // if not empty
- // string "p"
- o = append(o, 0xa1, 0x70)
- // omitempty: check for empty values
- zb0002Len := uint32(2)
- var zb0002Mask uint8 /* 3 bits */
- if (*z).UpdatedRequestParams.Modulator == 0 {
- zb0002Len--
- zb0002Mask |= 0x2
- }
- if (*z).UpdatedRequestParams.Offset == 0 {
- zb0002Len--
- zb0002Mask |= 0x4
- }
- // variable map header, size zb0002Len
- o = append(o, 0x80|uint8(zb0002Len))
- if (zb0002Mask & 0x2) == 0 { // if not empty
- // string "m"
- o = append(o, 0xa1, 0x6d)
- o = msgp.AppendByte(o, (*z).UpdatedRequestParams.Modulator)
- }
- if (zb0002Mask & 0x4) == 0 { // if not empty
- // string "o"
- o = append(o, 0xa1, 0x6f)
- o = msgp.AppendByte(o, (*z).UpdatedRequestParams.Offset)
- }
- }
- if (zb0001Mask & 0x10) == 0 { // if not empty
- // string "r"
- o = append(o, 0xa1, 0x72)
- o = (*z).Round.MarshalMsg(o)
- }
- if (zb0001Mask & 0x20) == 0 { // if not empty
- // string "rp"
- o = append(o, 0xa2, 0x72, 0x70)
- o = (*z).RelayedProposal.MarshalMsg(o)
- }
- if (zb0001Mask & 0x40) == 0 { // if not empty
- // string "t"
- o = append(o, 0xa1, 0x74)
- o = (*z).MsgSync.MarshalMsg(o)
- }
- if (zb0001Mask & 0x80) == 0 { // if not empty
- // string "v"
- o = append(o, 0xa1, 0x76)
- o = msgp.AppendInt32(o, (*z).Version)
- }
- }
- return
-}
-
-func (_ *transactionBlockMessage) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*transactionBlockMessage)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *transactionBlockMessage) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0001 int
- var zb0002 bool
- zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0001 > 0 {
- zb0001--
- (*z).Version, bts, err = msgp.ReadInt32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Version")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).Round.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Round")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).TxnBloomFilter.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TxnBloomFilter")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UpdatedRequestParams")
- return
- }
- if zb0003 > 0 {
- zb0003--
- (*z).UpdatedRequestParams.Offset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UpdatedRequestParams", "struct-from-array", "Offset")
- return
- }
- }
- if zb0003 > 0 {
- zb0003--
- (*z).UpdatedRequestParams.Modulator, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UpdatedRequestParams", "struct-from-array", "Modulator")
- return
- }
- }
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UpdatedRequestParams", "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UpdatedRequestParams")
- return
- }
- if zb0004 {
- (*z).UpdatedRequestParams = requestParams{}
- }
- for zb0003 > 0 {
- zb0003--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UpdatedRequestParams")
- return
- }
- switch string(field) {
- case "o":
- (*z).UpdatedRequestParams.Offset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UpdatedRequestParams", "Offset")
- return
- }
- case "m":
- (*z).UpdatedRequestParams.Modulator, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UpdatedRequestParams", "Modulator")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UpdatedRequestParams")
- return
- }
- }
- }
- }
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).TransactionGroups.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TransactionGroups")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).MsgSync.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "MsgSync")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).RelayedProposal.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "RelayedProposal")
- return
- }
- }
- if zb0001 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0001)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 {
- (*z) = transactionBlockMessage{}
- }
- for zb0001 > 0 {
- zb0001--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "v":
- (*z).Version, bts, err = msgp.ReadInt32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Version")
- return
- }
- case "r":
- bts, err = (*z).Round.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Round")
- return
- }
- case "b":
- bts, err = (*z).TxnBloomFilter.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "TxnBloomFilter")
- return
- }
- case "p":
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UpdatedRequestParams")
- return
- }
- if zb0005 > 0 {
- zb0005--
- (*z).UpdatedRequestParams.Offset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UpdatedRequestParams", "struct-from-array", "Offset")
- return
- }
- }
- if zb0005 > 0 {
- zb0005--
- (*z).UpdatedRequestParams.Modulator, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UpdatedRequestParams", "struct-from-array", "Modulator")
- return
- }
- }
- if zb0005 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0005)
- if err != nil {
- err = msgp.WrapError(err, "UpdatedRequestParams", "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err, "UpdatedRequestParams")
- return
- }
- if zb0006 {
- (*z).UpdatedRequestParams = requestParams{}
- }
- for zb0005 > 0 {
- zb0005--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err, "UpdatedRequestParams")
- return
- }
- switch string(field) {
- case "o":
- (*z).UpdatedRequestParams.Offset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UpdatedRequestParams", "Offset")
- return
- }
- case "m":
- (*z).UpdatedRequestParams.Modulator, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UpdatedRequestParams", "Modulator")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err, "UpdatedRequestParams")
- return
- }
- }
- }
- }
- case "g":
- bts, err = (*z).TransactionGroups.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "TransactionGroups")
- return
- }
- case "t":
- bts, err = (*z).MsgSync.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "MsgSync")
- return
- }
- case "rp":
- bts, err = (*z).RelayedProposal.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "RelayedProposal")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *transactionBlockMessage) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*transactionBlockMessage)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *transactionBlockMessage) Msgsize() (s int) {
- s = 1 + 2 + msgp.Int32Size + 2 + (*z).Round.Msgsize() + 2 + (*z).TxnBloomFilter.Msgsize() + 2 + 1 + 2 + msgp.ByteSize + 2 + msgp.ByteSize + 2 + (*z).TransactionGroups.Msgsize() + 2 + (*z).MsgSync.Msgsize() + 3 + (*z).RelayedProposal.Msgsize()
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *transactionBlockMessage) MsgIsZero() bool {
- return ((*z).Version == 0) && ((*z).Round.MsgIsZero()) && ((*z).TxnBloomFilter.MsgIsZero()) && (((*z).UpdatedRequestParams.Offset == 0) && ((*z).UpdatedRequestParams.Modulator == 0)) && ((*z).TransactionGroups.MsgIsZero()) && ((*z).MsgSync.MsgIsZero()) && ((*z).RelayedProposal.MsgIsZero())
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *txGroupsEncodingStub) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0049Len := uint32(129)
- var zb0049Mask [3]uint64 /* 144 bits */
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x8000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x10000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x20000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x40000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x80000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x100000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x200000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x400000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskAmount) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x800000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x1000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x2000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x4000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x8000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.OnCompletion) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x10000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x20000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x40000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x80000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x100000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x200000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x400000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x800000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x1000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x2000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x4000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x8000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x10000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x20000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x40000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x80000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetReceiver) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x100000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x200000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetSender) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x400000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x800000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x1000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x2000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x4000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x8000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x10000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x20000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x40000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x80000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x100000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x200000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x400000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x800000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x1000000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x2000000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x4000000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound) == 0 {
- zb0049Len--
- zb0049Mask[0] |= 0x8000000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x1
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertType) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x2
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x4
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x8
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.CloseRemainderTo) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x10
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x20
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x40
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x80
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x100
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x200
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x400
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x800
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x1000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x2000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x4000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x8000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFee) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x10000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x20000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFirstValid) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x40000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGenesisID) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x80000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x100000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x200000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x400000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x800000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGroup) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x1000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x2000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x4000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x8000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x10000000
- }
- if len((*z).encodedSignedTxns.encodedLsigs.LogicArgs) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x20000000
- }
- if len((*z).encodedSignedTxns.encodedLsigs.BitmaskLogicArgs) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x40000000
- }
- if len((*z).encodedSignedTxns.encodedLsigs.Logic) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x80000000
- }
- if len((*z).encodedSignedTxns.encodedLsigs.BitmaskLogic) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x100000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x200000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLastValid) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x400000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Lease) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x800000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLease) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x1000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x2000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x4000000000
- }
- if len((*z).encodedSignedTxns.encodedMsigs.Threshold) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x8000000000
- }
- if len((*z).encodedSignedTxns.encodedMsigs.BitmaskThreshold) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x10000000000
- }
- if len((*z).encodedSignedTxns.encodedMsigs.Version) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x20000000000
- }
- if len((*z).encodedSignedTxns.encodedMsigs.BitmaskVersion) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x40000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x80000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x100000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskNote) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x200000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x400000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x800000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Receiver) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x1000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskReceiver) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x2000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.RekeyTo) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x4000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskRekeyTo) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x8000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.SelectionPK) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x10000000000000
- }
- if len((*z).encodedSignedTxns.AuthAddr) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x20000000000000
- }
- if len((*z).encodedSignedTxns.BitmaskAuthAddr) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x40000000000000
- }
- if len((*z).encodedSignedTxns.Sig) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x80000000000000
- }
- if len((*z).encodedSignedTxns.BitmaskSig) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x100000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Sender) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x200000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskSender) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x400000000000000
- }
- if len((*z).encodedSignedTxns.encodedMsigs.Subsigs) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x800000000000000
- }
- if len((*z).encodedSignedTxns.encodedMsigs.BitmaskSubsigs) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x1000000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x2000000000000000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal) == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x4000000000000000
- }
- if (*z).TransactionGroupCount == 0 {
- zb0049Len--
- zb0049Mask[1] |= 0x8000000000000000
- }
- if len((*z).TransactionGroupSizes) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x1
- }
- if (*z).TotalTransactionsCount == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x2
- }
- if len((*z).encodedSignedTxns.encodedTxns.TxType) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x4
- }
- if len((*z).encodedSignedTxns.encodedTxns.BitmaskTxType) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x8
- }
- if (*z).encodedSignedTxns.encodedTxns.TxTypeOffset == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x10
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x20
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x40
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x80
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x100
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskKeys) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x200
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x400
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VotePK) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x800
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x1000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x2000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x4000
- }
- if len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset) == 0 {
- zb0049Len--
- zb0049Mask[2] |= 0x8000
- }
- // variable map header, size zb0049Len
- o = msgp.AppendMapHeader(o, zb0049Len)
- if zb0049Len != 0 {
- if (zb0049Mask[0] & 0x8000) == 0 { // if not empty
- // string "aamt"
- o = append(o, 0xa4, 0x61, 0x61, 0x6d, 0x74)
- if (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount)))
- }
- for zb0021 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount {
- o = msgp.AppendUint64(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount[zb0021])
- }
- }
- if (zb0049Mask[0] & 0x10000) == 0 { // if not empty
- // string "aamtbm"
- o = append(o, 0xa6, 0x61, 0x61, 0x6d, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount))
- }
- if (zb0049Mask[0] & 0x20000) == 0 { // if not empty
- // string "aclose"
- o = append(o, 0xa6, 0x61, 0x63, 0x6c, 0x6f, 0x73, 0x65)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo)
- }
- if (zb0049Mask[0] & 0x40000) == 0 { // if not empty
- // string "aclosebm"
- o = append(o, 0xa8, 0x61, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo))
- }
- if (zb0049Mask[0] & 0x80000) == 0 { // if not empty
- // string "afrzbm"
- o = append(o, 0xa6, 0x61, 0x66, 0x72, 0x7a, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen))
- }
- if (zb0049Mask[0] & 0x100000) == 0 { // if not empty
- // string "am"
- o = append(o, 0xa2, 0x61, 0x6d)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash)
- }
- if (zb0049Mask[0] & 0x200000) == 0 { // if not empty
- // string "ambm"
- o = append(o, 0xa4, 0x61, 0x6d, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash))
- }
- if (zb0049Mask[0] & 0x400000) == 0 { // if not empty
- // string "amt"
- o = append(o, 0xa3, 0x61, 0x6d, 0x74)
- if (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount)))
- }
- for zb0013 := range (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount {
- o = (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount[zb0013].MarshalMsg(o)
- }
- }
- if (zb0049Mask[0] & 0x800000) == 0 { // if not empty
- // string "amtbm"
- o = append(o, 0xa5, 0x61, 0x6d, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskAmount))
- }
- if (zb0049Mask[0] & 0x1000000) == 0 { // if not empty
- // string "an"
- o = append(o, 0xa2, 0x61, 0x6e)
- if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName)))
- }
- for zb0018 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName {
- o = msgp.AppendString(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName[zb0018])
- }
- }
- if (zb0049Mask[0] & 0x2000000) == 0 { // if not empty
- // string "anbm"
- o = append(o, 0xa4, 0x61, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName))
- }
- if (zb0049Mask[0] & 0x4000000) == 0 { // if not empty
- // string "apaa"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x61)
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs)))
- }
- for zb0024 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs {
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024])))
- }
- for zb0025 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] {
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024][zb0025])
- }
- }
- }
- if (zb0049Mask[0] & 0x8000000) == 0 { // if not empty
- // string "apaabm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x61, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs))
- }
- if (zb0049Mask[0] & 0x10000000) == 0 { // if not empty
- // string "apan"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x6e)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.OnCompletion)
- }
- if (zb0049Mask[0] & 0x20000000) == 0 { // if not empty
- // string "apanbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion))
- }
- if (zb0049Mask[0] & 0x40000000) == 0 { // if not empty
- // string "apap"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x70)
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram)))
- }
- for zb0036 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram {
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram[zb0036]))
- }
- }
- if (zb0049Mask[0] & 0x80000000) == 0 { // if not empty
- // string "apapbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x70, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram))
- }
- if (zb0049Mask[0] & 0x100000000) == 0 { // if not empty
- // string "apas"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x73)
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets)))
- }
- for zb0030 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets {
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030])))
- }
- for zb0031 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] {
- o = (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030][zb0031].MarshalMsg(o)
- }
- }
- }
- if (zb0049Mask[0] & 0x200000000) == 0 { // if not empty
- // string "apasbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets))
- }
- if (zb0049Mask[0] & 0x400000000) == 0 { // if not empty
- // string "apat"
- o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x74)
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts)))
- }
- for zb0026 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts {
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026])))
- }
- for zb0027 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] {
- o = (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026][zb0027].MarshalMsg(o)
- }
- }
- }
- if (zb0049Mask[0] & 0x800000000) == 0 { // if not empty
- // string "apatbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x61, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts))
- }
- if (zb0049Mask[0] & 0x1000000000) == 0 { // if not empty
- // string "apep"
- o = append(o, 0xa4, 0x61, 0x70, 0x65, 0x70)
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages)))
- }
- for zb0038 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages {
- o = msgp.AppendUint32(o, (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages[zb0038])
- }
- }
- if (zb0049Mask[0] & 0x2000000000) == 0 { // if not empty
- // string "apepbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x65, 0x70, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages))
- }
- if (zb0049Mask[0] & 0x4000000000) == 0 { // if not empty
- // string "apfa"
- o = append(o, 0xa4, 0x61, 0x70, 0x66, 0x61)
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps)))
- }
- for zb0028 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps {
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028])))
- }
- for zb0029 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] {
- o = (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028][zb0029].MarshalMsg(o)
- }
- }
- }
- if (zb0049Mask[0] & 0x8000000000) == 0 { // if not empty
- // string "apfabm"
- o = append(o, 0xa6, 0x61, 0x70, 0x66, 0x61, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps))
- }
- if (zb0049Mask[0] & 0x10000000000) == 0 { // if not empty
- // string "apid"
- o = append(o, 0xa4, 0x61, 0x70, 0x69, 0x64)
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID)))
- }
- for zb0023 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID {
- o = (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID[zb0023].MarshalMsg(o)
- }
- }
- if (zb0049Mask[0] & 0x20000000000) == 0 { // if not empty
- // string "apidbm"
- o = append(o, 0xa6, 0x61, 0x70, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID))
- }
- if (zb0049Mask[0] & 0x40000000000) == 0 { // if not empty
- // string "apsu"
- o = append(o, 0xa4, 0x61, 0x70, 0x73, 0x75)
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram)))
- }
- for zb0037 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram {
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram[zb0037]))
- }
- }
- if (zb0049Mask[0] & 0x80000000000) == 0 { // if not empty
- // string "apsubm"
- o = append(o, 0xa6, 0x61, 0x70, 0x73, 0x75, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram))
- }
- if (zb0049Mask[0] & 0x100000000000) == 0 { // if not empty
- // string "arcv"
- o = append(o, 0xa4, 0x61, 0x72, 0x63, 0x76)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetReceiver)
- }
- if (zb0049Mask[0] & 0x200000000000) == 0 { // if not empty
- // string "arcvbm"
- o = append(o, 0xa6, 0x61, 0x72, 0x63, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver))
- }
- if (zb0049Mask[0] & 0x400000000000) == 0 { // if not empty
- // string "asnd"
- o = append(o, 0xa4, 0x61, 0x73, 0x6e, 0x64)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetSender)
- }
- if (zb0049Mask[0] & 0x800000000000) == 0 { // if not empty
- // string "asndbm"
- o = append(o, 0xa6, 0x61, 0x73, 0x6e, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender))
- }
- if (zb0049Mask[0] & 0x1000000000000) == 0 { // if not empty
- // string "au"
- o = append(o, 0xa2, 0x61, 0x75)
- if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL)))
- }
- for zb0019 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL {
- o = msgp.AppendString(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL[zb0019])
- }
- }
- if (zb0049Mask[0] & 0x2000000000000) == 0 { // if not empty
- // string "aubm"
- o = append(o, 0xa4, 0x61, 0x75, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL))
- }
- if (zb0049Mask[0] & 0x4000000000000) == 0 { // if not empty
- // string "c"
- o = append(o, 0xa1, 0x63)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback)
- }
- if (zb0049Mask[0] & 0x8000000000000) == 0 { // if not empty
- // string "caid"
- o = append(o, 0xa4, 0x63, 0x61, 0x69, 0x64)
- if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset)))
- }
- for zb0014 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset {
- o = (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset[zb0014].MarshalMsg(o)
- }
- }
- if (zb0049Mask[0] & 0x10000000000000) == 0 { // if not empty
- // string "caidbm"
- o = append(o, 0xa6, 0x63, 0x61, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset))
- }
- if (zb0049Mask[0] & 0x20000000000000) == 0 { // if not empty
- // string "cbm"
- o = append(o, 0xa3, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback))
- }
- if (zb0049Mask[0] & 0x40000000000000) == 0 { // if not empty
- // string "certP"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x50)
- if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs)))
- }
- for zb0044 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs {
- if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044])))
- }
- for zb0045 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] {
- o = (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044][zb0045].MarshalMsg(o)
- }
- }
- }
- if (zb0049Mask[0] & 0x80000000000000) == 0 { // if not empty
- // string "certPbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x50, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs))
- }
- if (zb0049Mask[0] & 0x100000000000000) == 0 { // if not empty
- // string "certS"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x53)
- if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs)))
- }
- for zb0042 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs {
- if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042])))
- }
- for zb0043 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] {
- o = (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042][zb0043].MarshalMsg(o)
- }
- }
- }
- if (zb0049Mask[0] & 0x200000000000000) == 0 { // if not empty
- // string "certSbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x53, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs))
- }
- if (zb0049Mask[0] & 0x400000000000000) == 0 { // if not empty
- // string "certc"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x63)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit)
- }
- if (zb0049Mask[0] & 0x800000000000000) == 0 { // if not empty
- // string "certcbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit))
- }
- if (zb0049Mask[0] & 0x1000000000000000) == 0 { // if not empty
- // string "certr"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x72)
- if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals)))
- }
- for zb0046 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals {
- if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendMapHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046])))
- }
- zb0047_keys := make([]uint64, 0, len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046]))
- for zb0047 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] {
- zb0047_keys = append(zb0047_keys, zb0047)
- }
- sort.Sort(SortUint64(zb0047_keys))
- for _, zb0047 := range zb0047_keys {
- zb0048 := (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046][zb0047]
- _ = zb0048
- o = msgp.AppendUint64(o, zb0047)
- o = zb0048.MarshalMsg(o)
- }
- }
- }
- if (zb0049Mask[0] & 0x2000000000000000) == 0 { // if not empty
- // string "certrbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals))
- }
- if (zb0049Mask[0] & 0x4000000000000000) == 0 { // if not empty
- // string "certrnd"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x72, 0x6e, 0x64)
- if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound)))
- }
- for zb0039 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound {
- o = (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound[zb0039].MarshalMsg(o)
- }
- }
- if (zb0049Mask[0] & 0x8000000000000000) == 0 { // if not empty
- // string "certrndbm"
- o = append(o, 0xa9, 0x63, 0x65, 0x72, 0x74, 0x72, 0x6e, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound))
- }
- if (zb0049Mask[1] & 0x1) == 0 { // if not empty
- // string "certtype"
- o = append(o, 0xa8, 0x63, 0x65, 0x72, 0x74, 0x74, 0x79, 0x70, 0x65)
- if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType)))
- }
- for zb0040 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType {
- o = (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType[zb0040].MarshalMsg(o)
- }
- }
- if (zb0049Mask[1] & 0x2) == 0 { // if not empty
- // string "certtypebm"
- o = append(o, 0xaa, 0x63, 0x65, 0x72, 0x74, 0x74, 0x79, 0x70, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertType))
- }
- if (zb0049Mask[1] & 0x4) == 0 { // if not empty
- // string "certw"
- o = append(o, 0xa5, 0x63, 0x65, 0x72, 0x74, 0x77)
- if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight)))
- }
- for zb0041 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight {
- o = msgp.AppendUint64(o, (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight[zb0041])
- }
- }
- if (zb0049Mask[1] & 0x8) == 0 { // if not empty
- // string "certwbm"
- o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x77, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight))
- }
- if (zb0049Mask[1] & 0x10) == 0 { // if not empty
- // string "close"
- o = append(o, 0xa5, 0x63, 0x6c, 0x6f, 0x73, 0x65)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.CloseRemainderTo)
- }
- if (zb0049Mask[1] & 0x20) == 0 { // if not empty
- // string "closebm"
- o = append(o, 0xa7, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo))
- }
- if (zb0049Mask[1] & 0x40) == 0 { // if not empty
- // string "dc"
- o = append(o, 0xa2, 0x64, 0x63)
- if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals)))
- }
- for zb0016 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals {
- o = msgp.AppendUint32(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals[zb0016])
- }
- }
- if (zb0049Mask[1] & 0x80) == 0 { // if not empty
- // string "dcbm"
- o = append(o, 0xa4, 0x64, 0x63, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals))
- }
- if (zb0049Mask[1] & 0x100) == 0 { // if not empty
- // string "dfbm"
- o = append(o, 0xa4, 0x64, 0x66, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen))
- }
- if (zb0049Mask[1] & 0x200) == 0 { // if not empty
- // string "f"
- o = append(o, 0xa1, 0x66)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze)
- }
- if (zb0049Mask[1] & 0x400) == 0 { // if not empty
- // string "fadd"
- o = append(o, 0xa4, 0x66, 0x61, 0x64, 0x64)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount)
- }
- if (zb0049Mask[1] & 0x800) == 0 { // if not empty
- // string "faddbm"
- o = append(o, 0xa6, 0x66, 0x61, 0x64, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount))
- }
- if (zb0049Mask[1] & 0x1000) == 0 { // if not empty
- // string "faid"
- o = append(o, 0xa4, 0x66, 0x61, 0x69, 0x64)
- if (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset)))
- }
- for zb0022 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset {
- o = (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset[zb0022].MarshalMsg(o)
- }
- }
- if (zb0049Mask[1] & 0x2000) == 0 { // if not empty
- // string "faidbm"
- o = append(o, 0xa6, 0x66, 0x61, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset))
- }
- if (zb0049Mask[1] & 0x4000) == 0 { // if not empty
- // string "fbm"
- o = append(o, 0xa3, 0x66, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze))
- }
- if (zb0049Mask[1] & 0x8000) == 0 { // if not empty
- // string "fee"
- o = append(o, 0xa3, 0x66, 0x65, 0x65)
- if (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee)))
- }
- for zb0006 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee {
- o = (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee[zb0006].MarshalMsg(o)
- }
- }
- if (zb0049Mask[1] & 0x10000) == 0 { // if not empty
- // string "feebm"
- o = append(o, 0xa5, 0x66, 0x65, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFee))
- }
- if (zb0049Mask[1] & 0x20000) == 0 { // if not empty
- // string "fv"
- o = append(o, 0xa2, 0x66, 0x76)
- if (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid)))
- }
- for zb0007 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid {
- o = (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid[zb0007].MarshalMsg(o)
- }
- }
- if (zb0049Mask[1] & 0x40000) == 0 { // if not empty
- // string "fvbm"
- o = append(o, 0xa4, 0x66, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFirstValid))
- }
- if (zb0049Mask[1] & 0x80000) == 0 { // if not empty
- // string "genbm"
- o = append(o, 0xa5, 0x67, 0x65, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGenesisID))
- }
- if (zb0049Mask[1] & 0x100000) == 0 { // if not empty
- // string "gnbs"
- o = append(o, 0xa4, 0x67, 0x6e, 0x62, 0x73)
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice)))
- }
- for zb0035 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice {
- o = msgp.AppendUint64(o, (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice[zb0035])
- }
- }
- if (zb0049Mask[1] & 0x200000) == 0 { // if not empty
- // string "gnbsbm"
- o = append(o, 0xa6, 0x67, 0x6e, 0x62, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice))
- }
- if (zb0049Mask[1] & 0x400000) == 0 { // if not empty
- // string "gnui"
- o = append(o, 0xa4, 0x67, 0x6e, 0x75, 0x69)
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint)))
- }
- for zb0034 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint {
- o = msgp.AppendUint64(o, (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint[zb0034])
- }
- }
- if (zb0049Mask[1] & 0x800000) == 0 { // if not empty
- // string "gnuibm"
- o = append(o, 0xa6, 0x67, 0x6e, 0x75, 0x69, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint))
- }
- if (zb0049Mask[1] & 0x1000000) == 0 { // if not empty
- // string "grpbm"
- o = append(o, 0xa5, 0x67, 0x72, 0x70, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGroup))
- }
- if (zb0049Mask[1] & 0x2000000) == 0 { // if not empty
- // string "lnbs"
- o = append(o, 0xa4, 0x6c, 0x6e, 0x62, 0x73)
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice)))
- }
- for zb0033 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice {
- o = msgp.AppendUint64(o, (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice[zb0033])
- }
- }
- if (zb0049Mask[1] & 0x4000000) == 0 { // if not empty
- // string "lnbsbm"
- o = append(o, 0xa6, 0x6c, 0x6e, 0x62, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice))
- }
- if (zb0049Mask[1] & 0x8000000) == 0 { // if not empty
- // string "lnui"
- o = append(o, 0xa4, 0x6c, 0x6e, 0x75, 0x69)
- if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint)))
- }
- for zb0032 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint {
- o = msgp.AppendUint64(o, (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint[zb0032])
- }
- }
- if (zb0049Mask[1] & 0x10000000) == 0 { // if not empty
- // string "lnuibm"
- o = append(o, 0xa6, 0x6c, 0x6e, 0x75, 0x69, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint))
- }
- if (zb0049Mask[1] & 0x20000000) == 0 { // if not empty
- // string "lsigarg"
- o = append(o, 0xa7, 0x6c, 0x73, 0x69, 0x67, 0x61, 0x72, 0x67)
- if (*z).encodedSignedTxns.encodedLsigs.LogicArgs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedLsigs.LogicArgs)))
- }
- for zb0004 := range (*z).encodedSignedTxns.encodedLsigs.LogicArgs {
- if (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004])))
- }
- for zb0005 := range (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004] {
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004][zb0005])
- }
- }
- }
- if (zb0049Mask[1] & 0x40000000) == 0 { // if not empty
- // string "lsigargbm"
- o = append(o, 0xa9, 0x6c, 0x73, 0x69, 0x67, 0x61, 0x72, 0x67, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedLsigs.BitmaskLogicArgs))
- }
- if (zb0049Mask[1] & 0x80000000) == 0 { // if not empty
- // string "lsigl"
- o = append(o, 0xa5, 0x6c, 0x73, 0x69, 0x67, 0x6c)
- if (*z).encodedSignedTxns.encodedLsigs.Logic == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedLsigs.Logic)))
- }
- for zb0003 := range (*z).encodedSignedTxns.encodedLsigs.Logic {
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedLsigs.Logic[zb0003])
- }
- }
- if (zb0049Mask[1] & 0x100000000) == 0 { // if not empty
- // string "lsiglbm"
- o = append(o, 0xa7, 0x6c, 0x73, 0x69, 0x67, 0x6c, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedLsigs.BitmaskLogic))
- }
- if (zb0049Mask[1] & 0x200000000) == 0 { // if not empty
- // string "lv"
- o = append(o, 0xa2, 0x6c, 0x76)
- if (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid)))
- }
- for zb0008 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid {
- o = (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid[zb0008].MarshalMsg(o)
- }
- }
- if (zb0049Mask[1] & 0x400000000) == 0 { // if not empty
- // string "lvbm"
- o = append(o, 0xa4, 0x6c, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLastValid))
- }
- if (zb0049Mask[1] & 0x800000000) == 0 { // if not empty
- // string "lx"
- o = append(o, 0xa2, 0x6c, 0x78)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Lease)
- }
- if (zb0049Mask[1] & 0x1000000000) == 0 { // if not empty
- // string "lxbm"
- o = append(o, 0xa4, 0x6c, 0x78, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLease))
- }
- if (zb0049Mask[1] & 0x2000000000) == 0 { // if not empty
- // string "m"
- o = append(o, 0xa1, 0x6d)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager)
- }
- if (zb0049Mask[1] & 0x4000000000) == 0 { // if not empty
- // string "mbm"
- o = append(o, 0xa3, 0x6d, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager))
- }
- if (zb0049Mask[1] & 0x8000000000) == 0 { // if not empty
- // string "msigthr"
- o = append(o, 0xa7, 0x6d, 0x73, 0x69, 0x67, 0x74, 0x68, 0x72)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedMsigs.Threshold)
- }
- if (zb0049Mask[1] & 0x10000000000) == 0 { // if not empty
- // string "msigthrbm"
- o = append(o, 0xa9, 0x6d, 0x73, 0x69, 0x67, 0x74, 0x68, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedMsigs.BitmaskThreshold))
- }
- if (zb0049Mask[1] & 0x20000000000) == 0 { // if not empty
- // string "msigv"
- o = append(o, 0xa5, 0x6d, 0x73, 0x69, 0x67, 0x76)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedMsigs.Version)
- }
- if (zb0049Mask[1] & 0x40000000000) == 0 { // if not empty
- // string "msigvbm"
- o = append(o, 0xa7, 0x6d, 0x73, 0x69, 0x67, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedMsigs.BitmaskVersion))
- }
- if (zb0049Mask[1] & 0x80000000000) == 0 { // if not empty
- // string "nonpartbm"
- o = append(o, 0xa9, 0x6e, 0x6f, 0x6e, 0x70, 0x61, 0x72, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation))
- }
- if (zb0049Mask[1] & 0x100000000000) == 0 { // if not empty
- // string "note"
- o = append(o, 0xa4, 0x6e, 0x6f, 0x74, 0x65)
- if (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note)))
- }
- for zb0009 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note {
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note[zb0009])
- }
- }
- if (zb0049Mask[1] & 0x200000000000) == 0 { // if not empty
- // string "notebm"
- o = append(o, 0xa6, 0x6e, 0x6f, 0x74, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskNote))
- }
- if (zb0049Mask[1] & 0x400000000000) == 0 { // if not empty
- // string "r"
- o = append(o, 0xa1, 0x72)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve)
- }
- if (zb0049Mask[1] & 0x800000000000) == 0 { // if not empty
- // string "rbm"
- o = append(o, 0xa3, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve))
- }
- if (zb0049Mask[1] & 0x1000000000000) == 0 { // if not empty
- // string "rcv"
- o = append(o, 0xa3, 0x72, 0x63, 0x76)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Receiver)
- }
- if (zb0049Mask[1] & 0x2000000000000) == 0 { // if not empty
- // string "rcvbm"
- o = append(o, 0xa5, 0x72, 0x63, 0x76, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskReceiver))
- }
- if (zb0049Mask[1] & 0x4000000000000) == 0 { // if not empty
- // string "rekey"
- o = append(o, 0xa5, 0x72, 0x65, 0x6b, 0x65, 0x79)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.RekeyTo)
- }
- if (zb0049Mask[1] & 0x8000000000000) == 0 { // if not empty
- // string "rekeybm"
- o = append(o, 0xa7, 0x72, 0x65, 0x6b, 0x65, 0x79, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskRekeyTo))
- }
- if (zb0049Mask[1] & 0x10000000000000) == 0 { // if not empty
- // string "selkey"
- o = append(o, 0xa6, 0x73, 0x65, 0x6c, 0x6b, 0x65, 0x79)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.SelectionPK)
- }
- if (zb0049Mask[1] & 0x20000000000000) == 0 { // if not empty
- // string "sgnr"
- o = append(o, 0xa4, 0x73, 0x67, 0x6e, 0x72)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.AuthAddr)
- }
- if (zb0049Mask[1] & 0x40000000000000) == 0 { // if not empty
- // string "sgnrbm"
- o = append(o, 0xa6, 0x73, 0x67, 0x6e, 0x72, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.BitmaskAuthAddr))
- }
- if (zb0049Mask[1] & 0x80000000000000) == 0 { // if not empty
- // string "sig"
- o = append(o, 0xa3, 0x73, 0x69, 0x67)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.Sig)
- }
- if (zb0049Mask[1] & 0x100000000000000) == 0 { // if not empty
- // string "sigbm"
- o = append(o, 0xa5, 0x73, 0x69, 0x67, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.BitmaskSig))
- }
- if (zb0049Mask[1] & 0x200000000000000) == 0 { // if not empty
- // string "snd"
- o = append(o, 0xa3, 0x73, 0x6e, 0x64)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Sender)
- }
- if (zb0049Mask[1] & 0x400000000000000) == 0 { // if not empty
- // string "sndbm"
- o = append(o, 0xa5, 0x73, 0x6e, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskSender))
- }
- if (zb0049Mask[1] & 0x800000000000000) == 0 { // if not empty
- // string "subsig"
- o = append(o, 0xa6, 0x73, 0x75, 0x62, 0x73, 0x69, 0x67)
- if (*z).encodedSignedTxns.encodedMsigs.Subsigs == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedMsigs.Subsigs)))
- }
- for zb0001 := range (*z).encodedSignedTxns.encodedMsigs.Subsigs {
- if (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001] == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001])))
- }
- for zb0002 := range (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001] {
- o = (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001][zb0002].MarshalMsg(o)
- }
- }
- }
- if (zb0049Mask[1] & 0x1000000000000000) == 0 { // if not empty
- // string "subsigsbm"
- o = append(o, 0xa9, 0x73, 0x75, 0x62, 0x73, 0x69, 0x67, 0x73, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedMsigs.BitmaskSubsigs))
- }
- if (zb0049Mask[1] & 0x2000000000000000) == 0 { // if not empty
- // string "t"
- o = append(o, 0xa1, 0x74)
- if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total)))
- }
- for zb0015 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total {
- o = msgp.AppendUint64(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total[zb0015])
- }
- }
- if (zb0049Mask[1] & 0x4000000000000000) == 0 { // if not empty
- // string "tbm"
- o = append(o, 0xa3, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal))
- }
- if (zb0049Mask[1] & 0x8000000000000000) == 0 { // if not empty
- // string "tgc"
- o = append(o, 0xa3, 0x74, 0x67, 0x63)
- o = msgp.AppendUint64(o, (*z).TransactionGroupCount)
- }
- if (zb0049Mask[2] & 0x1) == 0 { // if not empty
- // string "tgs"
- o = append(o, 0xa3, 0x74, 0x67, 0x73)
- o = msgp.AppendBytes(o, (*z).TransactionGroupSizes)
- }
- if (zb0049Mask[2] & 0x2) == 0 { // if not empty
- // string "ttc"
- o = append(o, 0xa3, 0x74, 0x74, 0x63)
- o = msgp.AppendUint64(o, (*z).TotalTransactionsCount)
- }
- if (zb0049Mask[2] & 0x4) == 0 { // if not empty
- // string "type"
- o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.TxType)
- }
- if (zb0049Mask[2] & 0x8) == 0 { // if not empty
- // string "typebm"
- o = append(o, 0xa6, 0x74, 0x79, 0x70, 0x65, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.BitmaskTxType))
- }
- if (zb0049Mask[2] & 0x10) == 0 { // if not empty
- // string "typeo"
- o = append(o, 0xa5, 0x74, 0x79, 0x70, 0x65, 0x6f)
- o = msgp.AppendByte(o, (*z).encodedSignedTxns.encodedTxns.TxTypeOffset)
- }
- if (zb0049Mask[2] & 0x20) == 0 { // if not empty
- // string "un"
- o = append(o, 0xa2, 0x75, 0x6e)
- if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName)))
- }
- for zb0017 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName {
- o = msgp.AppendString(o, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName[zb0017])
- }
- }
- if (zb0049Mask[2] & 0x40) == 0 { // if not empty
- // string "unbm"
- o = append(o, 0xa4, 0x75, 0x6e, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName))
- }
- if (zb0049Mask[2] & 0x80) == 0 { // if not empty
- // string "votefst"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x66, 0x73, 0x74)
- if (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst)))
- }
- for zb0010 := range (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst {
- o = (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst[zb0010].MarshalMsg(o)
- }
- }
- if (zb0049Mask[2] & 0x100) == 0 { // if not empty
- // string "votefstbm"
- o = append(o, 0xa9, 0x76, 0x6f, 0x74, 0x65, 0x66, 0x73, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst))
- }
- if (zb0049Mask[2] & 0x200) == 0 { // if not empty
- // string "votekbm"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskKeys))
- }
- if (zb0049Mask[2] & 0x400) == 0 { // if not empty
- // string "votekd"
- o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x64)
- if (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution)))
- }
- for zb0012 := range (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution {
- o = msgp.AppendUint64(o, (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution[zb0012])
- }
- }
- if (zb0049Mask[2] & 0x800) == 0 { // if not empty
- // string "votekey"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x65, 0x79)
- o = msgp.AppendBytes(o, (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VotePK)
- }
- if (zb0049Mask[2] & 0x1000) == 0 { // if not empty
- // string "votelst"
- o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6c, 0x73, 0x74)
- if (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast)))
- }
- for zb0011 := range (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast {
- o = (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast[zb0011].MarshalMsg(o)
- }
- }
- if (zb0049Mask[2] & 0x2000) == 0 { // if not empty
- // string "votelstbm"
- o = append(o, 0xa9, 0x76, 0x6f, 0x74, 0x65, 0x6c, 0x73, 0x74, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast))
- }
- if (zb0049Mask[2] & 0x4000) == 0 { // if not empty
- // string "xaid"
- o = append(o, 0xa4, 0x78, 0x61, 0x69, 0x64)
- if (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset)))
- }
- for zb0020 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset {
- o = (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset[zb0020].MarshalMsg(o)
- }
- }
- if (zb0049Mask[2] & 0x8000) == 0 { // if not empty
- // string "xaidbm"
- o = append(o, 0xa6, 0x78, 0x61, 0x69, 0x64, 0x62, 0x6d)
- o = msgp.AppendBytes(o, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset))
- }
- }
- return
-}
-
-func (_ *txGroupsEncodingStub) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*txGroupsEncodingStub)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *txGroupsEncodingStub) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0049 int
- var zb0050 bool
- zb0049, zb0050, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0049, zb0050, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0049 > 0 {
- zb0049--
- (*z).TotalTransactionsCount, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TotalTransactionsCount")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- (*z).TransactionGroupCount, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TransactionGroupCount")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0051 int
- zb0051, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TransactionGroupSizes")
- return
- }
- if zb0051 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0051), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).TransactionGroupSizes, bts, err = msgp.ReadBytesBytes(bts, (*z).TransactionGroupSizes)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TransactionGroupSizes")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0052 int
- zb0052, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Sig")
- return
- }
- if zb0052 > maxSignatureBytes {
- err = msgp.ErrOverflow(uint64(zb0052), uint64(maxSignatureBytes))
- return
- }
- (*z).encodedSignedTxns.Sig, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.Sig)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Sig")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0053 []byte
- var zb0054 int
- zb0054, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSig")
- return
- }
- if zb0054 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0054), uint64(maxBitmaskSize))
- return
- }
- zb0053, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.BitmaskSig))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSig")
- return
- }
- (*z).encodedSignedTxns.BitmaskSig = bitmask(zb0053)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0055 int
- zb0055, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Version")
- return
- }
- if zb0055 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0055), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedSignedTxns.encodedMsigs.Version, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedMsigs.Version)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Version")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0056 []byte
- var zb0057 int
- zb0057, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVersion")
- return
- }
- if zb0057 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0057), uint64(maxBitmaskSize))
- return
- }
- zb0056, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedMsigs.BitmaskVersion))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVersion")
- return
- }
- (*z).encodedSignedTxns.encodedMsigs.BitmaskVersion = bitmask(zb0056)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0058 int
- zb0058, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Threshold")
- return
- }
- if zb0058 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0058), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedSignedTxns.encodedMsigs.Threshold, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedMsigs.Threshold)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Threshold")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0059 []byte
- var zb0060 int
- zb0060, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskThreshold")
- return
- }
- if zb0060 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0060), uint64(maxBitmaskSize))
- return
- }
- zb0059, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedMsigs.BitmaskThreshold))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskThreshold")
- return
- }
- (*z).encodedSignedTxns.encodedMsigs.BitmaskThreshold = bitmask(zb0059)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0061 int
- var zb0062 bool
- zb0061, zb0062, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Subsigs")
- return
- }
- if zb0061 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0061), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Subsigs")
- return
- }
- if zb0062 {
- (*z).encodedSignedTxns.encodedMsigs.Subsigs = nil
- } else if (*z).encodedSignedTxns.encodedMsigs.Subsigs != nil && cap((*z).encodedSignedTxns.encodedMsigs.Subsigs) >= zb0061 {
- (*z).encodedSignedTxns.encodedMsigs.Subsigs = ((*z).encodedSignedTxns.encodedMsigs.Subsigs)[:zb0061]
- } else {
- (*z).encodedSignedTxns.encodedMsigs.Subsigs = make([][]crypto.MultisigSubsig, zb0061)
- }
- for zb0001 := range (*z).encodedSignedTxns.encodedMsigs.Subsigs {
- var zb0063 int
- var zb0064 bool
- zb0063, zb0064, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Subsigs", zb0001)
- return
- }
- if zb0063 > crypto.MaxMultisig {
- err = msgp.ErrOverflow(uint64(zb0063), uint64(crypto.MaxMultisig))
- err = msgp.WrapError(err, "struct-from-array", "Subsigs", zb0001)
- return
- }
- if zb0064 {
- (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001] = nil
- } else if (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001] != nil && cap((*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001]) >= zb0063 {
- (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001] = ((*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001])[:zb0063]
- } else {
- (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001] = make([]crypto.MultisigSubsig, zb0063)
- }
- for zb0002 := range (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001] {
- bts, err = (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001][zb0002].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Subsigs", zb0001, zb0002)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0065 []byte
- var zb0066 int
- zb0066, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSubsigs")
- return
- }
- if zb0066 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0066), uint64(maxBitmaskSize))
- return
- }
- zb0065, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedMsigs.BitmaskSubsigs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSubsigs")
- return
- }
- (*z).encodedSignedTxns.encodedMsigs.BitmaskSubsigs = bitmask(zb0065)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0067 int
- var zb0068 bool
- zb0067, zb0068, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Logic")
- return
- }
- if zb0067 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0067), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Logic")
- return
- }
- if zb0068 {
- (*z).encodedSignedTxns.encodedLsigs.Logic = nil
- } else if (*z).encodedSignedTxns.encodedLsigs.Logic != nil && cap((*z).encodedSignedTxns.encodedLsigs.Logic) >= zb0067 {
- (*z).encodedSignedTxns.encodedLsigs.Logic = ((*z).encodedSignedTxns.encodedLsigs.Logic)[:zb0067]
- } else {
- (*z).encodedSignedTxns.encodedLsigs.Logic = make([][]byte, zb0067)
- }
- for zb0003 := range (*z).encodedSignedTxns.encodedLsigs.Logic {
- var zb0069 int
- zb0069, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Logic", zb0003)
- return
- }
- if zb0069 > config.MaxLogicSigMaxSize {
- err = msgp.ErrOverflow(uint64(zb0069), uint64(config.MaxLogicSigMaxSize))
- return
- }
- (*z).encodedSignedTxns.encodedLsigs.Logic[zb0003], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedLsigs.Logic[zb0003])
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Logic", zb0003)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0070 []byte
- var zb0071 int
- zb0071, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLogic")
- return
- }
- if zb0071 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0071), uint64(maxBitmaskSize))
- return
- }
- zb0070, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedLsigs.BitmaskLogic))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLogic")
- return
- }
- (*z).encodedSignedTxns.encodedLsigs.BitmaskLogic = bitmask(zb0070)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0072 int
- var zb0073 bool
- zb0072, zb0073, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs")
- return
- }
- if zb0072 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0072), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs")
- return
- }
- if zb0073 {
- (*z).encodedSignedTxns.encodedLsigs.LogicArgs = nil
- } else if (*z).encodedSignedTxns.encodedLsigs.LogicArgs != nil && cap((*z).encodedSignedTxns.encodedLsigs.LogicArgs) >= zb0072 {
- (*z).encodedSignedTxns.encodedLsigs.LogicArgs = ((*z).encodedSignedTxns.encodedLsigs.LogicArgs)[:zb0072]
- } else {
- (*z).encodedSignedTxns.encodedLsigs.LogicArgs = make([][][]byte, zb0072)
- }
- for zb0004 := range (*z).encodedSignedTxns.encodedLsigs.LogicArgs {
- var zb0074 int
- var zb0075 bool
- zb0074, zb0075, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs", zb0004)
- return
- }
- if zb0074 > transactions.EvalMaxArgs {
- err = msgp.ErrOverflow(uint64(zb0074), uint64(transactions.EvalMaxArgs))
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs", zb0004)
- return
- }
- if zb0075 {
- (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004] = nil
- } else if (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004] != nil && cap((*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004]) >= zb0074 {
- (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004] = ((*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004])[:zb0074]
- } else {
- (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004] = make([][]byte, zb0074)
- }
- for zb0005 := range (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004] {
- var zb0076 int
- zb0076, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs", zb0004, zb0005)
- return
- }
- if zb0076 > config.MaxLogicSigMaxSize {
- err = msgp.ErrOverflow(uint64(zb0076), uint64(config.MaxLogicSigMaxSize))
- return
- }
- (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004][zb0005], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004][zb0005])
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LogicArgs", zb0004, zb0005)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0077 []byte
- var zb0078 int
- zb0078, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLogicArgs")
- return
- }
- if zb0078 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0078), uint64(maxBitmaskSize))
- return
- }
- zb0077, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedLsigs.BitmaskLogicArgs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLogicArgs")
- return
- }
- (*z).encodedSignedTxns.encodedLsigs.BitmaskLogicArgs = bitmask(zb0077)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0079 int
- zb0079, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AuthAddr")
- return
- }
- if zb0079 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0079), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.AuthAddr, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.AuthAddr)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AuthAddr")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0080 []byte
- var zb0081 int
- zb0081, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAuthAddr")
- return
- }
- if zb0081 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0081), uint64(maxBitmaskSize))
- return
- }
- zb0080, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.BitmaskAuthAddr))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAuthAddr")
- return
- }
- (*z).encodedSignedTxns.BitmaskAuthAddr = bitmask(zb0080)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0082 int
- zb0082, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TxType")
- return
- }
- if zb0082 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0082), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.TxType, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.TxType)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TxType")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0083 []byte
- var zb0084 int
- zb0084, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTxType")
- return
- }
- if zb0084 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0084), uint64(maxBitmaskSize))
- return
- }
- zb0083, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.BitmaskTxType))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTxType")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.BitmaskTxType = bitmask(zb0083)
- }
- }
- if zb0049 > 0 {
- zb0049--
- (*z).encodedSignedTxns.encodedTxns.TxTypeOffset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TxTypeOffset")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0085 int
- zb0085, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Sender")
- return
- }
- if zb0085 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0085), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Sender, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Sender)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Sender")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0086 []byte
- var zb0087 int
- zb0087, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSender")
- return
- }
- if zb0087 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0087), uint64(maxBitmaskSize))
- return
- }
- zb0086, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskSender))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSender")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskSender = bitmask(zb0086)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0088 int
- var zb0089 bool
- zb0088, zb0089, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Fee")
- return
- }
- if zb0088 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0088), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Fee")
- return
- }
- if zb0089 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee) >= zb0088 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee = ((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee)[:zb0088]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee = make([]basics.MicroAlgos, zb0088)
- }
- for zb0006 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee[zb0006].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Fee", zb0006)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0090 []byte
- var zb0091 int
- zb0091, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFee")
- return
- }
- if zb0091 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0091), uint64(maxBitmaskSize))
- return
- }
- zb0090, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFee))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFee")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFee = bitmask(zb0090)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0092 int
- var zb0093 bool
- zb0092, zb0093, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FirstValid")
- return
- }
- if zb0092 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0092), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "FirstValid")
- return
- }
- if zb0093 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid) >= zb0092 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid = ((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid)[:zb0092]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid = make([]basics.Round, zb0092)
- }
- for zb0007 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid[zb0007].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FirstValid", zb0007)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0094 []byte
- var zb0095 int
- zb0095, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFirstValid")
- return
- }
- if zb0095 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0095), uint64(maxBitmaskSize))
- return
- }
- zb0094, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFirstValid))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFirstValid")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFirstValid = bitmask(zb0094)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0096 int
- var zb0097 bool
- zb0096, zb0097, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LastValid")
- return
- }
- if zb0096 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0096), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LastValid")
- return
- }
- if zb0097 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid) >= zb0096 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid = ((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid)[:zb0096]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid = make([]basics.Round, zb0096)
- }
- for zb0008 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid[zb0008].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LastValid", zb0008)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0098 []byte
- var zb0099 int
- zb0099, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLastValid")
- return
- }
- if zb0099 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0099), uint64(maxBitmaskSize))
- return
- }
- zb0098, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLastValid))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLastValid")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLastValid = bitmask(zb0098)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0100 int
- var zb0101 bool
- zb0100, zb0101, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Note")
- return
- }
- if zb0100 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0100), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Note")
- return
- }
- if zb0101 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note) >= zb0100 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note = ((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note)[:zb0100]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note = make([][]byte, zb0100)
- }
- for zb0009 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note {
- var zb0102 int
- zb0102, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Note", zb0009)
- return
- }
- if zb0102 > config.MaxTxnNoteBytes {
- err = msgp.ErrOverflow(uint64(zb0102), uint64(config.MaxTxnNoteBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note[zb0009], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note[zb0009])
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Note", zb0009)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0103 []byte
- var zb0104 int
- zb0104, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNote")
- return
- }
- if zb0104 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0104), uint64(maxBitmaskSize))
- return
- }
- zb0103, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskNote))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNote")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskNote = bitmask(zb0103)
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0105 []byte
- var zb0106 int
- zb0106, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGenesisID")
- return
- }
- if zb0106 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0106), uint64(maxBitmaskSize))
- return
- }
- zb0105, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGenesisID))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGenesisID")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGenesisID = bitmask(zb0105)
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0107 []byte
- var zb0108 int
- zb0108, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGroup")
- return
- }
- if zb0108 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0108), uint64(maxBitmaskSize))
- return
- }
- zb0107, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGroup))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGroup")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGroup = bitmask(zb0107)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0109 int
- zb0109, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Lease")
- return
- }
- if zb0109 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0109), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Lease, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Lease)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Lease")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0110 []byte
- var zb0111 int
- zb0111, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLease")
- return
- }
- if zb0111 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0111), uint64(maxBitmaskSize))
- return
- }
- zb0110, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLease))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLease")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLease = bitmask(zb0110)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0112 int
- zb0112, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "RekeyTo")
- return
- }
- if zb0112 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0112), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.RekeyTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.RekeyTo)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "RekeyTo")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0113 []byte
- var zb0114 int
- zb0114, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskRekeyTo")
- return
- }
- if zb0114 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0114), uint64(maxBitmaskSize))
- return
- }
- zb0113, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskRekeyTo))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskRekeyTo")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskRekeyTo = bitmask(zb0113)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0115 int
- zb0115, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VotePK")
- return
- }
- if zb0115 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0115), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VotePK, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VotePK)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VotePK")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0116 int
- zb0116, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SelectionPK")
- return
- }
- if zb0116 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0116), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.SelectionPK, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.SelectionPK)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SelectionPK")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0117 int
- var zb0118 bool
- zb0117, zb0118, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteFirst")
- return
- }
- if zb0117 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0117), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "VoteFirst")
- return
- }
- if zb0118 {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst) >= zb0117 {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst = ((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst)[:zb0117]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst = make([]basics.Round, zb0117)
- }
- for zb0010 := range (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst[zb0010].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteFirst", zb0010)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0119 []byte
- var zb0120 int
- zb0120, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteFirst")
- return
- }
- if zb0120 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0120), uint64(maxBitmaskSize))
- return
- }
- zb0119, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteFirst")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst = bitmask(zb0119)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0121 int
- var zb0122 bool
- zb0121, zb0122, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteLast")
- return
- }
- if zb0121 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0121), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "VoteLast")
- return
- }
- if zb0122 {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast) >= zb0121 {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast = ((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast)[:zb0121]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast = make([]basics.Round, zb0121)
- }
- for zb0011 := range (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast[zb0011].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteLast", zb0011)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0123 []byte
- var zb0124 int
- zb0124, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteLast")
- return
- }
- if zb0124 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0124), uint64(maxBitmaskSize))
- return
- }
- zb0123, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskVoteLast")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast = bitmask(zb0123)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0125 int
- var zb0126 bool
- zb0125, zb0126, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution")
- return
- }
- if zb0125 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0125), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution")
- return
- }
- if zb0126 {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution) >= zb0125 {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution = ((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution)[:zb0125]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution = make([]uint64, zb0125)
- }
- for zb0012 := range (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution[zb0012], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution", zb0012)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0127 []byte
- var zb0128 int
- zb0128, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskKeys")
- return
- }
- if zb0128 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0128), uint64(maxBitmaskSize))
- return
- }
- zb0127, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskKeys))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskKeys")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskKeys = bitmask(zb0127)
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0129 []byte
- var zb0130 int
- zb0130, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNonparticipation")
- return
- }
- if zb0130 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0130), uint64(maxBitmaskSize))
- return
- }
- zb0129, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskNonparticipation")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation = bitmask(zb0129)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0131 int
- zb0131, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Receiver")
- return
- }
- if zb0131 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0131), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Receiver, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Receiver)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Receiver")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0132 []byte
- var zb0133 int
- zb0133, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReceiver")
- return
- }
- if zb0133 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0133), uint64(maxBitmaskSize))
- return
- }
- zb0132, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskReceiver))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReceiver")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskReceiver = bitmask(zb0132)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0134 int
- var zb0135 bool
- zb0134, zb0135, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Amount")
- return
- }
- if zb0134 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0134), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Amount")
- return
- }
- if zb0135 {
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount) >= zb0134 {
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount = ((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount)[:zb0134]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount = make([]basics.MicroAlgos, zb0134)
- }
- for zb0013 := range (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount[zb0013].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Amount", zb0013)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0136 []byte
- var zb0137 int
- zb0137, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAmount")
- return
- }
- if zb0137 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0137), uint64(maxBitmaskSize))
- return
- }
- zb0136, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskAmount))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAmount")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskAmount = bitmask(zb0136)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0138 int
- zb0138, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CloseRemainderTo")
- return
- }
- if zb0138 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0138), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.CloseRemainderTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.CloseRemainderTo)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CloseRemainderTo")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0139 []byte
- var zb0140 int
- zb0140, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCloseRemainderTo")
- return
- }
- if zb0140 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0140), uint64(maxBitmaskSize))
- return
- }
- zb0139, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCloseRemainderTo")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo = bitmask(zb0139)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0141 int
- var zb0142 bool
- zb0141, zb0142, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ConfigAsset")
- return
- }
- if zb0141 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0141), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ConfigAsset")
- return
- }
- if zb0142 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset) >= zb0141 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset = ((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset)[:zb0141]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset = make([]basics.AssetIndex, zb0141)
- }
- for zb0014 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset[zb0014].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ConfigAsset", zb0014)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0143 []byte
- var zb0144 int
- zb0144, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskConfigAsset")
- return
- }
- if zb0144 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0144), uint64(maxBitmaskSize))
- return
- }
- zb0143, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskConfigAsset")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset = bitmask(zb0143)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0145 int
- var zb0146 bool
- zb0145, zb0146, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Total")
- return
- }
- if zb0145 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0145), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Total")
- return
- }
- if zb0146 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total) >= zb0145 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total = ((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total)[:zb0145]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total = make([]uint64, zb0145)
- }
- for zb0015 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total[zb0015], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Total", zb0015)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0147 []byte
- var zb0148 int
- zb0148, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTotal")
- return
- }
- if zb0148 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0148), uint64(maxBitmaskSize))
- return
- }
- zb0147, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskTotal")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal = bitmask(zb0147)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0149 int
- var zb0150 bool
- zb0149, zb0150, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Decimals")
- return
- }
- if zb0149 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0149), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Decimals")
- return
- }
- if zb0150 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals) >= zb0149 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals = ((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals)[:zb0149]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals = make([]uint32, zb0149)
- }
- for zb0016 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals[zb0016], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Decimals", zb0016)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0151 []byte
- var zb0152 int
- zb0152, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDecimals")
- return
- }
- if zb0152 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0152), uint64(maxBitmaskSize))
- return
- }
- zb0151, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDecimals")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals = bitmask(zb0151)
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0153 []byte
- var zb0154 int
- zb0154, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDefaultFrozen")
- return
- }
- if zb0154 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0154), uint64(maxBitmaskSize))
- return
- }
- zb0153, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskDefaultFrozen")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen = bitmask(zb0153)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0155 int
- var zb0156 bool
- zb0155, zb0156, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UnitName")
- return
- }
- if zb0155 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0155), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "UnitName")
- return
- }
- if zb0156 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName) >= zb0155 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName = ((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName)[:zb0155]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName = make([]string, zb0155)
- }
- for zb0017 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName[zb0017], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "UnitName", zb0017)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0157 []byte
- var zb0158 int
- zb0158, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskUnitName")
- return
- }
- if zb0158 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0158), uint64(maxBitmaskSize))
- return
- }
- zb0157, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskUnitName")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName = bitmask(zb0157)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0159 int
- var zb0160 bool
- zb0159, zb0160, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetName")
- return
- }
- if zb0159 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0159), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "AssetName")
- return
- }
- if zb0160 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName) >= zb0159 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName = ((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName)[:zb0159]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName = make([]string, zb0159)
- }
- for zb0018 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName[zb0018], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetName", zb0018)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0161 []byte
- var zb0162 int
- zb0162, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetName")
- return
- }
- if zb0162 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0162), uint64(maxBitmaskSize))
- return
- }
- zb0161, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetName")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName = bitmask(zb0161)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0163 int
- var zb0164 bool
- zb0163, zb0164, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "URL")
- return
- }
- if zb0163 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0163), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "URL")
- return
- }
- if zb0164 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL) >= zb0163 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL = ((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL)[:zb0163]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL = make([]string, zb0163)
- }
- for zb0019 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL[zb0019], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "URL", zb0019)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0165 []byte
- var zb0166 int
- zb0166, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskURL")
- return
- }
- if zb0166 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0166), uint64(maxBitmaskSize))
- return
- }
- zb0165, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskURL")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL = bitmask(zb0165)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0167 int
- zb0167, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "MetadataHash")
- return
- }
- if zb0167 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0167), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "MetadataHash")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0168 []byte
- var zb0169 int
- zb0169, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskMetadataHash")
- return
- }
- if zb0169 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0169), uint64(maxBitmaskSize))
- return
- }
- zb0168, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskMetadataHash")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash = bitmask(zb0168)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0170 int
- zb0170, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Manager")
- return
- }
- if zb0170 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0170), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Manager")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0171 []byte
- var zb0172 int
- zb0172, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskManager")
- return
- }
- if zb0172 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0172), uint64(maxBitmaskSize))
- return
- }
- zb0171, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskManager")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager = bitmask(zb0171)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0173 int
- zb0173, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reserve")
- return
- }
- if zb0173 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0173), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reserve")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0174 []byte
- var zb0175 int
- zb0175, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReserve")
- return
- }
- if zb0175 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0175), uint64(maxBitmaskSize))
- return
- }
- zb0174, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReserve")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve = bitmask(zb0174)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0176 int
- zb0176, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Freeze")
- return
- }
- if zb0176 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0176), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Freeze")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0177 []byte
- var zb0178 int
- zb0178, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreeze")
- return
- }
- if zb0178 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0178), uint64(maxBitmaskSize))
- return
- }
- zb0177, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreeze")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze = bitmask(zb0177)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0179 int
- zb0179, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Clawback")
- return
- }
- if zb0179 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0179), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Clawback")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0180 []byte
- var zb0181 int
- zb0181, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClawback")
- return
- }
- if zb0181 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0181), uint64(maxBitmaskSize))
- return
- }
- zb0180, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClawback")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback = bitmask(zb0180)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0182 int
- var zb0183 bool
- zb0182, zb0183, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "XferAsset")
- return
- }
- if zb0182 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0182), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "XferAsset")
- return
- }
- if zb0183 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset) >= zb0182 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset = ((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset)[:zb0182]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset = make([]basics.AssetIndex, zb0182)
- }
- for zb0020 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset[zb0020].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "XferAsset", zb0020)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0184 []byte
- var zb0185 int
- zb0185, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskXferAsset")
- return
- }
- if zb0185 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0185), uint64(maxBitmaskSize))
- return
- }
- zb0184, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskXferAsset")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset = bitmask(zb0184)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0186 int
- var zb0187 bool
- zb0186, zb0187, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetAmount")
- return
- }
- if zb0186 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0186), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "AssetAmount")
- return
- }
- if zb0187 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount) >= zb0186 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount = ((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount)[:zb0186]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount = make([]uint64, zb0186)
- }
- for zb0021 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount[zb0021], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetAmount", zb0021)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0188 []byte
- var zb0189 int
- zb0189, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetAmount")
- return
- }
- if zb0189 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0189), uint64(maxBitmaskSize))
- return
- }
- zb0188, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetAmount")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount = bitmask(zb0188)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0190 int
- zb0190, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetSender")
- return
- }
- if zb0190 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0190), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetSender, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetSender)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetSender")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0191 []byte
- var zb0192 int
- zb0192, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetSender")
- return
- }
- if zb0192 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0192), uint64(maxBitmaskSize))
- return
- }
- zb0191, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetSender")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender = bitmask(zb0191)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0193 int
- zb0193, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetReceiver")
- return
- }
- if zb0193 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0193), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetReceiver, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetReceiver)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetReceiver")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0194 []byte
- var zb0195 int
- zb0195, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetReceiver")
- return
- }
- if zb0195 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0195), uint64(maxBitmaskSize))
- return
- }
- zb0194, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetReceiver")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver = bitmask(zb0194)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0196 int
- zb0196, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetCloseTo")
- return
- }
- if zb0196 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0196), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AssetCloseTo")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0197 []byte
- var zb0198 int
- zb0198, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetCloseTo")
- return
- }
- if zb0198 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0198), uint64(maxBitmaskSize))
- return
- }
- zb0197, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetCloseTo")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo = bitmask(zb0197)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0199 int
- zb0199, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAccount")
- return
- }
- if zb0199 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0199), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAccount")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0200 []byte
- var zb0201 int
- zb0201, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAccount")
- return
- }
- if zb0201 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0201), uint64(maxBitmaskSize))
- return
- }
- zb0200, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAccount")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount = bitmask(zb0200)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0202 int
- var zb0203 bool
- zb0202, zb0203, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAsset")
- return
- }
- if zb0202 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0202), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "FreezeAsset")
- return
- }
- if zb0203 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset) >= zb0202 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset = ((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset)[:zb0202]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset = make([]basics.AssetIndex, zb0202)
- }
- for zb0022 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset[zb0022].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "FreezeAsset", zb0022)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0204 []byte
- var zb0205 int
- zb0205, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAsset")
- return
- }
- if zb0205 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0205), uint64(maxBitmaskSize))
- return
- }
- zb0204, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskFreezeAsset")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset = bitmask(zb0204)
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0206 []byte
- var zb0207 int
- zb0207, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetFrozen")
- return
- }
- if zb0207 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0207), uint64(maxBitmaskSize))
- return
- }
- zb0206, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAssetFrozen")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen = bitmask(zb0206)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0208 int
- var zb0209 bool
- zb0208, zb0209, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationID")
- return
- }
- if zb0208 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0208), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ApplicationID")
- return
- }
- if zb0209 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID) >= zb0208 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID)[:zb0208]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID = make([]basics.AppIndex, zb0208)
- }
- for zb0023 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID[zb0023].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationID", zb0023)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0210 []byte
- var zb0211 int
- zb0211, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationID")
- return
- }
- if zb0211 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0211), uint64(maxBitmaskSize))
- return
- }
- zb0210, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationID")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID = bitmask(zb0210)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0212 int
- zb0212, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "OnCompletion")
- return
- }
- if zb0212 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0212), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.OnCompletion, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.OnCompletion)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "OnCompletion")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0213 []byte
- var zb0214 int
- zb0214, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskOnCompletion")
- return
- }
- if zb0214 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0214), uint64(maxBitmaskSize))
- return
- }
- zb0213, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskOnCompletion")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion = bitmask(zb0213)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0215 int
- var zb0216 bool
- zb0215, zb0216, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
- return
- }
- if zb0215 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0215), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
- return
- }
- if zb0216 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs) >= zb0215 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs)[:zb0215]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs = make([]applicationArgs, zb0215)
- }
- for zb0024 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs {
- var zb0217 int
- var zb0218 bool
- zb0217, zb0218, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs", zb0024)
- return
- }
- if zb0217 > transactions.EncodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0217), uint64(transactions.EncodedMaxApplicationArgs))
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs", zb0024)
- return
- }
- if zb0218 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024]) >= zb0217 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024])[:zb0217]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] = make(applicationArgs, zb0217)
- }
- for zb0025 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024][zb0025], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024][zb0025])
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs", zb0024, zb0025)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0219 []byte
- var zb0220 int
- zb0220, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationArgs")
- return
- }
- if zb0220 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0220), uint64(maxBitmaskSize))
- return
- }
- zb0219, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApplicationArgs")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs = bitmask(zb0219)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0221 int
- var zb0222 bool
- zb0221, zb0222, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Accounts")
- return
- }
- if zb0221 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0221), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Accounts")
- return
- }
- if zb0222 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts) >= zb0221 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts)[:zb0221]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts = make([]addresses, zb0221)
- }
- for zb0026 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts {
- var zb0223 int
- var zb0224 bool
- zb0223, zb0224, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Accounts", zb0026)
- return
- }
- if zb0223 > transactions.EncodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0223), uint64(transactions.EncodedMaxAccounts))
- err = msgp.WrapError(err, "struct-from-array", "Accounts", zb0026)
- return
- }
- if zb0224 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026]) >= zb0223 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026])[:zb0223]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] = make(addresses, zb0223)
- }
- for zb0027 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026][zb0027].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Accounts", zb0026, zb0027)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0225 []byte
- var zb0226 int
- zb0226, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAccounts")
- return
- }
- if zb0226 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0226), uint64(maxBitmaskSize))
- return
- }
- zb0225, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskAccounts")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts = bitmask(zb0225)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0227 int
- var zb0228 bool
- zb0227, zb0228, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
- return
- }
- if zb0227 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0227), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
- return
- }
- if zb0228 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps) >= zb0227 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps)[:zb0227]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps = make([]appIndices, zb0227)
- }
- for zb0028 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps {
- var zb0229 int
- var zb0230 bool
- zb0229, zb0230, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps", zb0028)
- return
- }
- if zb0229 > transactions.EncodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0229), uint64(transactions.EncodedMaxForeignApps))
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps", zb0028)
- return
- }
- if zb0230 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028]) >= zb0229 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028])[:zb0229]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] = make(appIndices, zb0229)
- }
- for zb0029 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028][zb0029].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignApps", zb0028, zb0029)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0231 []byte
- var zb0232 int
- zb0232, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignApps")
- return
- }
- if zb0232 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0232), uint64(maxBitmaskSize))
- return
- }
- zb0231, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignApps")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps = bitmask(zb0231)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0233 int
- var zb0234 bool
- zb0233, zb0234, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
- return
- }
- if zb0233 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0233), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
- return
- }
- if zb0234 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets) >= zb0233 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets)[:zb0233]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets = make([]assetIndices, zb0233)
- }
- for zb0030 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets {
- var zb0235 int
- var zb0236 bool
- zb0235, zb0236, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0030)
- return
- }
- if zb0235 > transactions.EncodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0235), uint64(transactions.EncodedMaxForeignAssets))
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0030)
- return
- }
- if zb0236 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030]) >= zb0235 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030])[:zb0235]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] = make(assetIndices, zb0235)
- }
- for zb0031 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030][zb0031].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0030, zb0031)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0237 []byte
- var zb0238 int
- zb0238, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignAssets")
- return
- }
- if zb0238 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0238), uint64(maxBitmaskSize))
- return
- }
- zb0237, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskForeignAssets")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets = bitmask(zb0237)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0239 int
- var zb0240 bool
- zb0239, zb0240, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumUint")
- return
- }
- if zb0239 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0239), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LocalNumUint")
- return
- }
- if zb0240 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint) >= zb0239 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint)[:zb0239]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint = make([]uint64, zb0239)
- }
- for zb0032 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint[zb0032], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumUint", zb0032)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0241 []byte
- var zb0242 int
- zb0242, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumUint")
- return
- }
- if zb0242 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0242), uint64(maxBitmaskSize))
- return
- }
- zb0241, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumUint")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint = bitmask(zb0241)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0243 int
- var zb0244 bool
- zb0243, zb0244, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumByteSlice")
- return
- }
- if zb0243 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0243), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "LocalNumByteSlice")
- return
- }
- if zb0244 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice) >= zb0243 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice)[:zb0243]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice = make([]uint64, zb0243)
- }
- for zb0033 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice[zb0033], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "LocalNumByteSlice", zb0033)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0245 []byte
- var zb0246 int
- zb0246, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumByteSlice")
- return
- }
- if zb0246 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0246), uint64(maxBitmaskSize))
- return
- }
- zb0245, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskLocalNumByteSlice")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice = bitmask(zb0245)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0247 int
- var zb0248 bool
- zb0247, zb0248, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumUint")
- return
- }
- if zb0247 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0247), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumUint")
- return
- }
- if zb0248 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint) >= zb0247 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint)[:zb0247]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint = make([]uint64, zb0247)
- }
- for zb0034 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint[zb0034], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumUint", zb0034)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0249 []byte
- var zb0250 int
- zb0250, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumUint")
- return
- }
- if zb0250 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0250), uint64(maxBitmaskSize))
- return
- }
- zb0249, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumUint")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint = bitmask(zb0249)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0251 int
- var zb0252 bool
- zb0251, zb0252, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumByteSlice")
- return
- }
- if zb0251 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0251), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumByteSlice")
- return
- }
- if zb0252 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice) >= zb0251 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice)[:zb0251]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice = make([]uint64, zb0251)
- }
- for zb0035 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice[zb0035], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "GlobalNumByteSlice", zb0035)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0253 []byte
- var zb0254 int
- zb0254, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumByteSlice")
- return
- }
- if zb0254 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0254), uint64(maxBitmaskSize))
- return
- }
- zb0253, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskGlobalNumByteSlice")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice = bitmask(zb0253)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0255 int
- var zb0256 bool
- zb0255, zb0256, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram")
- return
- }
- if zb0255 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0255), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram")
- return
- }
- if zb0256 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram) >= zb0255 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram)[:zb0255]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram = make([]program, zb0255)
- }
- for zb0036 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram {
- {
- var zb0257 []byte
- var zb0258 int
- zb0258, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram", zb0036)
- return
- }
- if zb0258 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0258), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0257, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram[zb0036]))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram", zb0036)
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram[zb0036] = program(zb0257)
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0259 []byte
- var zb0260 int
- zb0260, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApprovalProgram")
- return
- }
- if zb0260 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0260), uint64(maxBitmaskSize))
- return
- }
- zb0259, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskApprovalProgram")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram = bitmask(zb0259)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0261 int
- var zb0262 bool
- zb0261, zb0262, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram")
- return
- }
- if zb0261 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0261), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram")
- return
- }
- if zb0262 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram) >= zb0261 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram)[:zb0261]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram = make([]program, zb0261)
- }
- for zb0037 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram {
- {
- var zb0263 []byte
- var zb0264 int
- zb0264, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram", zb0037)
- return
- }
- if zb0264 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0264), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0263, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram[zb0037]))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram", zb0037)
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram[zb0037] = program(zb0263)
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0265 []byte
- var zb0266 int
- zb0266, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClearStateProgram")
- return
- }
- if zb0266 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0266), uint64(maxBitmaskSize))
- return
- }
- zb0265, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskClearStateProgram")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram = bitmask(zb0265)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0267 int
- var zb0268 bool
- zb0267, zb0268, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages")
- return
- }
- if zb0267 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0267), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages")
- return
- }
- if zb0268 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages) >= zb0267 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages)[:zb0267]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages = make([]uint32, zb0267)
- }
- for zb0038 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages[zb0038], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages", zb0038)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0269 []byte
- var zb0270 int
- zb0270, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskExtraProgramPages")
- return
- }
- if zb0270 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0270), uint64(maxBitmaskSize))
- return
- }
- zb0269, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskExtraProgramPages")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages = bitmask(zb0269)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0271 int
- var zb0272 bool
- zb0271, zb0272, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertRound")
- return
- }
- if zb0271 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0271), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "CertRound")
- return
- }
- if zb0272 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound) >= zb0271 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound)[:zb0271]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound = make([]basics.Round, zb0271)
- }
- for zb0039 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound[zb0039].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertRound", zb0039)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0273 []byte
- var zb0274 int
- zb0274, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertRound")
- return
- }
- if zb0274 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0274), uint64(maxBitmaskSize))
- return
- }
- zb0273, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertRound")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound = bitmask(zb0273)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0275 int
- var zb0276 bool
- zb0275, zb0276, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertType")
- return
- }
- if zb0275 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0275), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "CertType")
- return
- }
- if zb0276 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType) >= zb0275 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType)[:zb0275]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType = make([]protocol.CompactCertType, zb0275)
- }
- for zb0040 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType[zb0040].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "CertType", zb0040)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0277 []byte
- var zb0278 int
- zb0278, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertType")
- return
- }
- if zb0278 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0278), uint64(maxBitmaskSize))
- return
- }
- zb0277, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertType))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskCertType")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertType = bitmask(zb0277)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0279 int
- zb0279, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigCommit")
- return
- }
- if zb0279 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0279), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigCommit")
- return
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0280 []byte
- var zb0281 int
- zb0281, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigCommit")
- return
- }
- if zb0281 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0281), uint64(maxBitmaskSize))
- return
- }
- zb0280, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigCommit")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit = bitmask(zb0280)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0282 int
- var zb0283 bool
- zb0282, zb0283, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight")
- return
- }
- if zb0282 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0282), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight")
- return
- }
- if zb0283 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight) >= zb0282 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight)[:zb0282]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight = make([]uint64, zb0282)
- }
- for zb0041 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight[zb0041], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SignedWeight", zb0041)
- return
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0284 []byte
- var zb0285 int
- zb0285, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSignedWeight")
- return
- }
- if zb0285 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0285), uint64(maxBitmaskSize))
- return
- }
- zb0284, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSignedWeight")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight = bitmask(zb0284)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0286 int
- var zb0287 bool
- zb0286, zb0287, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs")
- return
- }
- if zb0286 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0286), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "SigProofs")
- return
- }
- if zb0287 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs) >= zb0286 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs)[:zb0286]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs = make([]certProofs, zb0286)
- }
- for zb0042 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs {
- var zb0288 int
- var zb0289 bool
- zb0288, zb0289, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0042)
- return
- }
- if zb0288 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0288), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0042)
- return
- }
- if zb0289 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042]) >= zb0288 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042])[:zb0288]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] = make(certProofs, zb0288)
- }
- for zb0043 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042][zb0043].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0042, zb0043)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0290 []byte
- var zb0291 int
- zb0291, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigProofs")
- return
- }
- if zb0291 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0291), uint64(maxBitmaskSize))
- return
- }
- zb0290, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskSigProofs")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs = bitmask(zb0290)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0292 int
- var zb0293 bool
- zb0292, zb0293, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs")
- return
- }
- if zb0292 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0292), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "PartProofs")
- return
- }
- if zb0293 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs) >= zb0292 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs)[:zb0292]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs = make([]certProofs, zb0292)
- }
- for zb0044 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs {
- var zb0294 int
- var zb0295 bool
- zb0294, zb0295, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0044)
- return
- }
- if zb0294 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0294), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0044)
- return
- }
- if zb0295 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044]) >= zb0294 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044])[:zb0294]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] = make(certProofs, zb0294)
- }
- for zb0045 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044][zb0045].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0044, zb0045)
- return
- }
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0296 []byte
- var zb0297 int
- zb0297, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskPartProofs")
- return
- }
- if zb0297 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0297), uint64(maxBitmaskSize))
- return
- }
- zb0296, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskPartProofs")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs = bitmask(zb0296)
- }
- }
- if zb0049 > 0 {
- zb0049--
- var zb0298 int
- var zb0299 bool
- zb0298, zb0299, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals")
- return
- }
- if zb0298 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0298), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "Reveals")
- return
- }
- if zb0299 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals) >= zb0298 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals)[:zb0298]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals = make([]revealMap, zb0298)
- }
- for zb0046 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals {
- var zb0300 int
- var zb0301 bool
- zb0300, zb0301, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0046)
- return
- }
- if zb0300 > compactcert.MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0300), uint64(compactcert.MaxReveals))
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0046)
- return
- }
- if zb0301 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] == nil {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] = make(revealMap, zb0300)
- }
- for zb0300 > 0 {
- var zb0047 uint64
- var zb0048 compactcert.Reveal
- zb0300--
- zb0047, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0046)
- return
- }
- bts, err = zb0048.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0046, zb0047)
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046][zb0047] = zb0048
- }
- }
- }
- if zb0049 > 0 {
- zb0049--
- {
- var zb0302 []byte
- var zb0303 int
- zb0303, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReveals")
- return
- }
- if zb0303 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0303), uint64(maxBitmaskSize))
- return
- }
- zb0302, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "BitmaskReveals")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals = bitmask(zb0302)
- }
- }
- if zb0049 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0049)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0050 {
- (*z) = txGroupsEncodingStub{}
- }
- for zb0049 > 0 {
- zb0049--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "ttc":
- (*z).TotalTransactionsCount, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "TotalTransactionsCount")
- return
- }
- case "tgc":
- (*z).TransactionGroupCount, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "TransactionGroupCount")
- return
- }
- case "tgs":
- var zb0304 int
- zb0304, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "TransactionGroupSizes")
- return
- }
- if zb0304 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0304), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).TransactionGroupSizes, bts, err = msgp.ReadBytesBytes(bts, (*z).TransactionGroupSizes)
- if err != nil {
- err = msgp.WrapError(err, "TransactionGroupSizes")
- return
- }
- case "sig":
- var zb0305 int
- zb0305, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Sig")
- return
- }
- if zb0305 > maxSignatureBytes {
- err = msgp.ErrOverflow(uint64(zb0305), uint64(maxSignatureBytes))
- return
- }
- (*z).encodedSignedTxns.Sig, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.Sig)
- if err != nil {
- err = msgp.WrapError(err, "Sig")
- return
- }
- case "sigbm":
- {
- var zb0306 []byte
- var zb0307 int
- zb0307, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSig")
- return
- }
- if zb0307 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0307), uint64(maxBitmaskSize))
- return
- }
- zb0306, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.BitmaskSig))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSig")
- return
- }
- (*z).encodedSignedTxns.BitmaskSig = bitmask(zb0306)
- }
- case "msigv":
- var zb0308 int
- zb0308, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Version")
- return
- }
- if zb0308 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0308), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedSignedTxns.encodedMsigs.Version, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedMsigs.Version)
- if err != nil {
- err = msgp.WrapError(err, "Version")
- return
- }
- case "msigvbm":
- {
- var zb0309 []byte
- var zb0310 int
- zb0310, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVersion")
- return
- }
- if zb0310 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0310), uint64(maxBitmaskSize))
- return
- }
- zb0309, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedMsigs.BitmaskVersion))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVersion")
- return
- }
- (*z).encodedSignedTxns.encodedMsigs.BitmaskVersion = bitmask(zb0309)
- }
- case "msigthr":
- var zb0311 int
- zb0311, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Threshold")
- return
- }
- if zb0311 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0311), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedSignedTxns.encodedMsigs.Threshold, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedMsigs.Threshold)
- if err != nil {
- err = msgp.WrapError(err, "Threshold")
- return
- }
- case "msigthrbm":
- {
- var zb0312 []byte
- var zb0313 int
- zb0313, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskThreshold")
- return
- }
- if zb0313 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0313), uint64(maxBitmaskSize))
- return
- }
- zb0312, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedMsigs.BitmaskThreshold))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskThreshold")
- return
- }
- (*z).encodedSignedTxns.encodedMsigs.BitmaskThreshold = bitmask(zb0312)
- }
- case "subsig":
- var zb0314 int
- var zb0315 bool
- zb0314, zb0315, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Subsigs")
- return
- }
- if zb0314 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0314), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Subsigs")
- return
- }
- if zb0315 {
- (*z).encodedSignedTxns.encodedMsigs.Subsigs = nil
- } else if (*z).encodedSignedTxns.encodedMsigs.Subsigs != nil && cap((*z).encodedSignedTxns.encodedMsigs.Subsigs) >= zb0314 {
- (*z).encodedSignedTxns.encodedMsigs.Subsigs = ((*z).encodedSignedTxns.encodedMsigs.Subsigs)[:zb0314]
- } else {
- (*z).encodedSignedTxns.encodedMsigs.Subsigs = make([][]crypto.MultisigSubsig, zb0314)
- }
- for zb0001 := range (*z).encodedSignedTxns.encodedMsigs.Subsigs {
- var zb0316 int
- var zb0317 bool
- zb0316, zb0317, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Subsigs", zb0001)
- return
- }
- if zb0316 > crypto.MaxMultisig {
- err = msgp.ErrOverflow(uint64(zb0316), uint64(crypto.MaxMultisig))
- err = msgp.WrapError(err, "Subsigs", zb0001)
- return
- }
- if zb0317 {
- (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001] = nil
- } else if (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001] != nil && cap((*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001]) >= zb0316 {
- (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001] = ((*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001])[:zb0316]
- } else {
- (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001] = make([]crypto.MultisigSubsig, zb0316)
- }
- for zb0002 := range (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001] {
- bts, err = (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001][zb0002].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Subsigs", zb0001, zb0002)
- return
- }
- }
- }
- case "subsigsbm":
- {
- var zb0318 []byte
- var zb0319 int
- zb0319, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSubsigs")
- return
- }
- if zb0319 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0319), uint64(maxBitmaskSize))
- return
- }
- zb0318, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedMsigs.BitmaskSubsigs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSubsigs")
- return
- }
- (*z).encodedSignedTxns.encodedMsigs.BitmaskSubsigs = bitmask(zb0318)
- }
- case "lsigl":
- var zb0320 int
- var zb0321 bool
- zb0320, zb0321, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Logic")
- return
- }
- if zb0320 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0320), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Logic")
- return
- }
- if zb0321 {
- (*z).encodedSignedTxns.encodedLsigs.Logic = nil
- } else if (*z).encodedSignedTxns.encodedLsigs.Logic != nil && cap((*z).encodedSignedTxns.encodedLsigs.Logic) >= zb0320 {
- (*z).encodedSignedTxns.encodedLsigs.Logic = ((*z).encodedSignedTxns.encodedLsigs.Logic)[:zb0320]
- } else {
- (*z).encodedSignedTxns.encodedLsigs.Logic = make([][]byte, zb0320)
- }
- for zb0003 := range (*z).encodedSignedTxns.encodedLsigs.Logic {
- var zb0322 int
- zb0322, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Logic", zb0003)
- return
- }
- if zb0322 > config.MaxLogicSigMaxSize {
- err = msgp.ErrOverflow(uint64(zb0322), uint64(config.MaxLogicSigMaxSize))
- return
- }
- (*z).encodedSignedTxns.encodedLsigs.Logic[zb0003], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedLsigs.Logic[zb0003])
- if err != nil {
- err = msgp.WrapError(err, "Logic", zb0003)
- return
- }
- }
- case "lsiglbm":
- {
- var zb0323 []byte
- var zb0324 int
- zb0324, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLogic")
- return
- }
- if zb0324 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0324), uint64(maxBitmaskSize))
- return
- }
- zb0323, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedLsigs.BitmaskLogic))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLogic")
- return
- }
- (*z).encodedSignedTxns.encodedLsigs.BitmaskLogic = bitmask(zb0323)
- }
- case "lsigarg":
- var zb0325 int
- var zb0326 bool
- zb0325, zb0326, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LogicArgs")
- return
- }
- if zb0325 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0325), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LogicArgs")
- return
- }
- if zb0326 {
- (*z).encodedSignedTxns.encodedLsigs.LogicArgs = nil
- } else if (*z).encodedSignedTxns.encodedLsigs.LogicArgs != nil && cap((*z).encodedSignedTxns.encodedLsigs.LogicArgs) >= zb0325 {
- (*z).encodedSignedTxns.encodedLsigs.LogicArgs = ((*z).encodedSignedTxns.encodedLsigs.LogicArgs)[:zb0325]
- } else {
- (*z).encodedSignedTxns.encodedLsigs.LogicArgs = make([][][]byte, zb0325)
- }
- for zb0004 := range (*z).encodedSignedTxns.encodedLsigs.LogicArgs {
- var zb0327 int
- var zb0328 bool
- zb0327, zb0328, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LogicArgs", zb0004)
- return
- }
- if zb0327 > transactions.EvalMaxArgs {
- err = msgp.ErrOverflow(uint64(zb0327), uint64(transactions.EvalMaxArgs))
- err = msgp.WrapError(err, "LogicArgs", zb0004)
- return
- }
- if zb0328 {
- (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004] = nil
- } else if (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004] != nil && cap((*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004]) >= zb0327 {
- (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004] = ((*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004])[:zb0327]
- } else {
- (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004] = make([][]byte, zb0327)
- }
- for zb0005 := range (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004] {
- var zb0329 int
- zb0329, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "LogicArgs", zb0004, zb0005)
- return
- }
- if zb0329 > config.MaxLogicSigMaxSize {
- err = msgp.ErrOverflow(uint64(zb0329), uint64(config.MaxLogicSigMaxSize))
- return
- }
- (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004][zb0005], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004][zb0005])
- if err != nil {
- err = msgp.WrapError(err, "LogicArgs", zb0004, zb0005)
- return
- }
- }
- }
- case "lsigargbm":
- {
- var zb0330 []byte
- var zb0331 int
- zb0331, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLogicArgs")
- return
- }
- if zb0331 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0331), uint64(maxBitmaskSize))
- return
- }
- zb0330, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedLsigs.BitmaskLogicArgs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLogicArgs")
- return
- }
- (*z).encodedSignedTxns.encodedLsigs.BitmaskLogicArgs = bitmask(zb0330)
- }
- case "sgnr":
- var zb0332 int
- zb0332, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "AuthAddr")
- return
- }
- if zb0332 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0332), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.AuthAddr, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.AuthAddr)
- if err != nil {
- err = msgp.WrapError(err, "AuthAddr")
- return
- }
- case "sgnrbm":
- {
- var zb0333 []byte
- var zb0334 int
- zb0334, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAuthAddr")
- return
- }
- if zb0334 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0334), uint64(maxBitmaskSize))
- return
- }
- zb0333, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.BitmaskAuthAddr))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAuthAddr")
- return
- }
- (*z).encodedSignedTxns.BitmaskAuthAddr = bitmask(zb0333)
- }
- case "type":
- var zb0335 int
- zb0335, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "TxType")
- return
- }
- if zb0335 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0335), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.TxType, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.TxType)
- if err != nil {
- err = msgp.WrapError(err, "TxType")
- return
- }
- case "typebm":
- {
- var zb0336 []byte
- var zb0337 int
- zb0337, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTxType")
- return
- }
- if zb0337 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0337), uint64(maxBitmaskSize))
- return
- }
- zb0336, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.BitmaskTxType))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTxType")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.BitmaskTxType = bitmask(zb0336)
- }
- case "typeo":
- (*z).encodedSignedTxns.encodedTxns.TxTypeOffset, bts, err = msgp.ReadByteBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "TxTypeOffset")
- return
- }
- case "snd":
- var zb0338 int
- zb0338, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Sender")
- return
- }
- if zb0338 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0338), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Sender, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Sender)
- if err != nil {
- err = msgp.WrapError(err, "Sender")
- return
- }
- case "sndbm":
- {
- var zb0339 []byte
- var zb0340 int
- zb0340, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSender")
- return
- }
- if zb0340 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0340), uint64(maxBitmaskSize))
- return
- }
- zb0339, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskSender))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSender")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskSender = bitmask(zb0339)
- }
- case "fee":
- var zb0341 int
- var zb0342 bool
- zb0341, zb0342, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Fee")
- return
- }
- if zb0341 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0341), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Fee")
- return
- }
- if zb0342 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee) >= zb0341 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee = ((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee)[:zb0341]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee = make([]basics.MicroAlgos, zb0341)
- }
- for zb0006 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee[zb0006].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Fee", zb0006)
- return
- }
- }
- case "feebm":
- {
- var zb0343 []byte
- var zb0344 int
- zb0344, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFee")
- return
- }
- if zb0344 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0344), uint64(maxBitmaskSize))
- return
- }
- zb0343, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFee))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFee")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFee = bitmask(zb0343)
- }
- case "fv":
- var zb0345 int
- var zb0346 bool
- zb0345, zb0346, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "FirstValid")
- return
- }
- if zb0345 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0345), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "FirstValid")
- return
- }
- if zb0346 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid) >= zb0345 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid = ((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid)[:zb0345]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid = make([]basics.Round, zb0345)
- }
- for zb0007 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid[zb0007].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "FirstValid", zb0007)
- return
- }
- }
- case "fvbm":
- {
- var zb0347 []byte
- var zb0348 int
- zb0348, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFirstValid")
- return
- }
- if zb0348 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0348), uint64(maxBitmaskSize))
- return
- }
- zb0347, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFirstValid))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFirstValid")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFirstValid = bitmask(zb0347)
- }
- case "lv":
- var zb0349 int
- var zb0350 bool
- zb0349, zb0350, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LastValid")
- return
- }
- if zb0349 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0349), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LastValid")
- return
- }
- if zb0350 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid) >= zb0349 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid = ((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid)[:zb0349]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid = make([]basics.Round, zb0349)
- }
- for zb0008 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid[zb0008].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "LastValid", zb0008)
- return
- }
- }
- case "lvbm":
- {
- var zb0351 []byte
- var zb0352 int
- zb0352, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLastValid")
- return
- }
- if zb0352 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0352), uint64(maxBitmaskSize))
- return
- }
- zb0351, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLastValid))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLastValid")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLastValid = bitmask(zb0351)
- }
- case "note":
- var zb0353 int
- var zb0354 bool
- zb0353, zb0354, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Note")
- return
- }
- if zb0353 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0353), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Note")
- return
- }
- if zb0354 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note) >= zb0353 {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note = ((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note)[:zb0353]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note = make([][]byte, zb0353)
- }
- for zb0009 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note {
- var zb0355 int
- zb0355, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Note", zb0009)
- return
- }
- if zb0355 > config.MaxTxnNoteBytes {
- err = msgp.ErrOverflow(uint64(zb0355), uint64(config.MaxTxnNoteBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note[zb0009], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note[zb0009])
- if err != nil {
- err = msgp.WrapError(err, "Note", zb0009)
- return
- }
- }
- case "notebm":
- {
- var zb0356 []byte
- var zb0357 int
- zb0357, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNote")
- return
- }
- if zb0357 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0357), uint64(maxBitmaskSize))
- return
- }
- zb0356, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskNote))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNote")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskNote = bitmask(zb0356)
- }
- case "genbm":
- {
- var zb0358 []byte
- var zb0359 int
- zb0359, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGenesisID")
- return
- }
- if zb0359 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0359), uint64(maxBitmaskSize))
- return
- }
- zb0358, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGenesisID))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGenesisID")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGenesisID = bitmask(zb0358)
- }
- case "grpbm":
- {
- var zb0360 []byte
- var zb0361 int
- zb0361, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGroup")
- return
- }
- if zb0361 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0361), uint64(maxBitmaskSize))
- return
- }
- zb0360, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGroup))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGroup")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGroup = bitmask(zb0360)
- }
- case "lx":
- var zb0362 int
- zb0362, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Lease")
- return
- }
- if zb0362 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0362), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Lease, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Lease)
- if err != nil {
- err = msgp.WrapError(err, "Lease")
- return
- }
- case "lxbm":
- {
- var zb0363 []byte
- var zb0364 int
- zb0364, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLease")
- return
- }
- if zb0364 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0364), uint64(maxBitmaskSize))
- return
- }
- zb0363, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLease))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLease")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLease = bitmask(zb0363)
- }
- case "rekey":
- var zb0365 int
- zb0365, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "RekeyTo")
- return
- }
- if zb0365 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0365), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.RekeyTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.RekeyTo)
- if err != nil {
- err = msgp.WrapError(err, "RekeyTo")
- return
- }
- case "rekeybm":
- {
- var zb0366 []byte
- var zb0367 int
- zb0367, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskRekeyTo")
- return
- }
- if zb0367 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0367), uint64(maxBitmaskSize))
- return
- }
- zb0366, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskRekeyTo))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskRekeyTo")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskRekeyTo = bitmask(zb0366)
- }
- case "votekey":
- var zb0368 int
- zb0368, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "VotePK")
- return
- }
- if zb0368 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0368), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VotePK, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VotePK)
- if err != nil {
- err = msgp.WrapError(err, "VotePK")
- return
- }
- case "selkey":
- var zb0369 int
- zb0369, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "SelectionPK")
- return
- }
- if zb0369 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0369), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.SelectionPK, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.SelectionPK)
- if err != nil {
- err = msgp.WrapError(err, "SelectionPK")
- return
- }
- case "votefst":
- var zb0370 int
- var zb0371 bool
- zb0370, zb0371, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteFirst")
- return
- }
- if zb0370 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0370), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "VoteFirst")
- return
- }
- if zb0371 {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst) >= zb0370 {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst = ((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst)[:zb0370]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst = make([]basics.Round, zb0370)
- }
- for zb0010 := range (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst[zb0010].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteFirst", zb0010)
- return
- }
- }
- case "votefstbm":
- {
- var zb0372 []byte
- var zb0373 int
- zb0373, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteFirst")
- return
- }
- if zb0373 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0373), uint64(maxBitmaskSize))
- return
- }
- zb0372, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteFirst")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst = bitmask(zb0372)
- }
- case "votelst":
- var zb0374 int
- var zb0375 bool
- zb0374, zb0375, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteLast")
- return
- }
- if zb0374 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0374), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "VoteLast")
- return
- }
- if zb0375 {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast) >= zb0374 {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast = ((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast)[:zb0374]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast = make([]basics.Round, zb0374)
- }
- for zb0011 := range (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast[zb0011].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteLast", zb0011)
- return
- }
- }
- case "votelstbm":
- {
- var zb0376 []byte
- var zb0377 int
- zb0377, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteLast")
- return
- }
- if zb0377 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0377), uint64(maxBitmaskSize))
- return
- }
- zb0376, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskVoteLast")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast = bitmask(zb0376)
- }
- case "votekd":
- var zb0378 int
- var zb0379 bool
- zb0378, zb0379, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteKeyDilution")
- return
- }
- if zb0378 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0378), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "VoteKeyDilution")
- return
- }
- if zb0379 {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution) >= zb0378 {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution = ((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution)[:zb0378]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution = make([]uint64, zb0378)
- }
- for zb0012 := range (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution {
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution[zb0012], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "VoteKeyDilution", zb0012)
- return
- }
- }
- case "votekbm":
- {
- var zb0380 []byte
- var zb0381 int
- zb0381, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskKeys")
- return
- }
- if zb0381 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0381), uint64(maxBitmaskSize))
- return
- }
- zb0380, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskKeys))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskKeys")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskKeys = bitmask(zb0380)
- }
- case "nonpartbm":
- {
- var zb0382 []byte
- var zb0383 int
- zb0383, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNonparticipation")
- return
- }
- if zb0383 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0383), uint64(maxBitmaskSize))
- return
- }
- zb0382, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskNonparticipation")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation = bitmask(zb0382)
- }
- case "rcv":
- var zb0384 int
- zb0384, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Receiver")
- return
- }
- if zb0384 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0384), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Receiver, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Receiver)
- if err != nil {
- err = msgp.WrapError(err, "Receiver")
- return
- }
- case "rcvbm":
- {
- var zb0385 []byte
- var zb0386 int
- zb0386, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReceiver")
- return
- }
- if zb0386 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0386), uint64(maxBitmaskSize))
- return
- }
- zb0385, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskReceiver))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReceiver")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskReceiver = bitmask(zb0385)
- }
- case "amt":
- var zb0387 int
- var zb0388 bool
- zb0387, zb0388, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Amount")
- return
- }
- if zb0387 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0387), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Amount")
- return
- }
- if zb0388 {
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount) >= zb0387 {
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount = ((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount)[:zb0387]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount = make([]basics.MicroAlgos, zb0387)
- }
- for zb0013 := range (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount[zb0013].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Amount", zb0013)
- return
- }
- }
- case "amtbm":
- {
- var zb0389 []byte
- var zb0390 int
- zb0390, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAmount")
- return
- }
- if zb0390 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0390), uint64(maxBitmaskSize))
- return
- }
- zb0389, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskAmount))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAmount")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskAmount = bitmask(zb0389)
- }
- case "close":
- var zb0391 int
- zb0391, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "CloseRemainderTo")
- return
- }
- if zb0391 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0391), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.CloseRemainderTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.CloseRemainderTo)
- if err != nil {
- err = msgp.WrapError(err, "CloseRemainderTo")
- return
- }
- case "closebm":
- {
- var zb0392 []byte
- var zb0393 int
- zb0393, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCloseRemainderTo")
- return
- }
- if zb0393 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0393), uint64(maxBitmaskSize))
- return
- }
- zb0392, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCloseRemainderTo")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo = bitmask(zb0392)
- }
- case "caid":
- var zb0394 int
- var zb0395 bool
- zb0394, zb0395, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ConfigAsset")
- return
- }
- if zb0394 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0394), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ConfigAsset")
- return
- }
- if zb0395 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset) >= zb0394 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset = ((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset)[:zb0394]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset = make([]basics.AssetIndex, zb0394)
- }
- for zb0014 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset[zb0014].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ConfigAsset", zb0014)
- return
- }
- }
- case "caidbm":
- {
- var zb0396 []byte
- var zb0397 int
- zb0397, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskConfigAsset")
- return
- }
- if zb0397 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0397), uint64(maxBitmaskSize))
- return
- }
- zb0396, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskConfigAsset")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset = bitmask(zb0396)
- }
- case "t":
- var zb0398 int
- var zb0399 bool
- zb0398, zb0399, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Total")
- return
- }
- if zb0398 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0398), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Total")
- return
- }
- if zb0399 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total) >= zb0398 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total = ((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total)[:zb0398]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total = make([]uint64, zb0398)
- }
- for zb0015 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total[zb0015], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Total", zb0015)
- return
- }
- }
- case "tbm":
- {
- var zb0400 []byte
- var zb0401 int
- zb0401, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTotal")
- return
- }
- if zb0401 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0401), uint64(maxBitmaskSize))
- return
- }
- zb0400, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskTotal")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal = bitmask(zb0400)
- }
- case "dc":
- var zb0402 int
- var zb0403 bool
- zb0402, zb0403, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Decimals")
- return
- }
- if zb0402 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0402), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Decimals")
- return
- }
- if zb0403 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals) >= zb0402 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals = ((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals)[:zb0402]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals = make([]uint32, zb0402)
- }
- for zb0016 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals[zb0016], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Decimals", zb0016)
- return
- }
- }
- case "dcbm":
- {
- var zb0404 []byte
- var zb0405 int
- zb0405, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDecimals")
- return
- }
- if zb0405 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0405), uint64(maxBitmaskSize))
- return
- }
- zb0404, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDecimals")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals = bitmask(zb0404)
- }
- case "dfbm":
- {
- var zb0406 []byte
- var zb0407 int
- zb0407, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDefaultFrozen")
- return
- }
- if zb0407 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0407), uint64(maxBitmaskSize))
- return
- }
- zb0406, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskDefaultFrozen")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen = bitmask(zb0406)
- }
- case "un":
- var zb0408 int
- var zb0409 bool
- zb0408, zb0409, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UnitName")
- return
- }
- if zb0408 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0408), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "UnitName")
- return
- }
- if zb0409 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName) >= zb0408 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName = ((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName)[:zb0408]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName = make([]string, zb0408)
- }
- for zb0017 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName[zb0017], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "UnitName", zb0017)
- return
- }
- }
- case "unbm":
- {
- var zb0410 []byte
- var zb0411 int
- zb0411, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskUnitName")
- return
- }
- if zb0411 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0411), uint64(maxBitmaskSize))
- return
- }
- zb0410, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskUnitName")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName = bitmask(zb0410)
- }
- case "an":
- var zb0412 int
- var zb0413 bool
- zb0412, zb0413, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetName")
- return
- }
- if zb0412 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0412), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "AssetName")
- return
- }
- if zb0413 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName) >= zb0412 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName = ((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName)[:zb0412]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName = make([]string, zb0412)
- }
- for zb0018 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName[zb0018], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetName", zb0018)
- return
- }
- }
- case "anbm":
- {
- var zb0414 []byte
- var zb0415 int
- zb0415, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetName")
- return
- }
- if zb0415 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0415), uint64(maxBitmaskSize))
- return
- }
- zb0414, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetName")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName = bitmask(zb0414)
- }
- case "au":
- var zb0416 int
- var zb0417 bool
- zb0416, zb0417, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "URL")
- return
- }
- if zb0416 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0416), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "URL")
- return
- }
- if zb0417 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL) >= zb0416 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL = ((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL)[:zb0416]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL = make([]string, zb0416)
- }
- for zb0019 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL[zb0019], bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "URL", zb0019)
- return
- }
- }
- case "aubm":
- {
- var zb0418 []byte
- var zb0419 int
- zb0419, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskURL")
- return
- }
- if zb0419 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0419), uint64(maxBitmaskSize))
- return
- }
- zb0418, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskURL")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL = bitmask(zb0418)
- }
- case "am":
- var zb0420 int
- zb0420, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "MetadataHash")
- return
- }
- if zb0420 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0420), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash)
- if err != nil {
- err = msgp.WrapError(err, "MetadataHash")
- return
- }
- case "ambm":
- {
- var zb0421 []byte
- var zb0422 int
- zb0422, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskMetadataHash")
- return
- }
- if zb0422 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0422), uint64(maxBitmaskSize))
- return
- }
- zb0421, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskMetadataHash")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash = bitmask(zb0421)
- }
- case "m":
- var zb0423 int
- zb0423, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Manager")
- return
- }
- if zb0423 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0423), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager)
- if err != nil {
- err = msgp.WrapError(err, "Manager")
- return
- }
- case "mbm":
- {
- var zb0424 []byte
- var zb0425 int
- zb0425, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskManager")
- return
- }
- if zb0425 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0425), uint64(maxBitmaskSize))
- return
- }
- zb0424, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskManager")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager = bitmask(zb0424)
- }
- case "r":
- var zb0426 int
- zb0426, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reserve")
- return
- }
- if zb0426 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0426), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve)
- if err != nil {
- err = msgp.WrapError(err, "Reserve")
- return
- }
- case "rbm":
- {
- var zb0427 []byte
- var zb0428 int
- zb0428, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReserve")
- return
- }
- if zb0428 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0428), uint64(maxBitmaskSize))
- return
- }
- zb0427, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReserve")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve = bitmask(zb0427)
- }
- case "f":
- var zb0429 int
- zb0429, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Freeze")
- return
- }
- if zb0429 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0429), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze)
- if err != nil {
- err = msgp.WrapError(err, "Freeze")
- return
- }
- case "fbm":
- {
- var zb0430 []byte
- var zb0431 int
- zb0431, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreeze")
- return
- }
- if zb0431 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0431), uint64(maxBitmaskSize))
- return
- }
- zb0430, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreeze")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze = bitmask(zb0430)
- }
- case "c":
- var zb0432 int
- zb0432, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Clawback")
- return
- }
- if zb0432 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0432), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback)
- if err != nil {
- err = msgp.WrapError(err, "Clawback")
- return
- }
- case "cbm":
- {
- var zb0433 []byte
- var zb0434 int
- zb0434, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClawback")
- return
- }
- if zb0434 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0434), uint64(maxBitmaskSize))
- return
- }
- zb0433, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClawback")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback = bitmask(zb0433)
- }
- case "xaid":
- var zb0435 int
- var zb0436 bool
- zb0435, zb0436, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "XferAsset")
- return
- }
- if zb0435 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0435), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "XferAsset")
- return
- }
- if zb0436 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset) >= zb0435 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset = ((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset)[:zb0435]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset = make([]basics.AssetIndex, zb0435)
- }
- for zb0020 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset[zb0020].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "XferAsset", zb0020)
- return
- }
- }
- case "xaidbm":
- {
- var zb0437 []byte
- var zb0438 int
- zb0438, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskXferAsset")
- return
- }
- if zb0438 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0438), uint64(maxBitmaskSize))
- return
- }
- zb0437, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskXferAsset")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset = bitmask(zb0437)
- }
- case "aamt":
- var zb0439 int
- var zb0440 bool
- zb0439, zb0440, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetAmount")
- return
- }
- if zb0439 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0439), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "AssetAmount")
- return
- }
- if zb0440 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount) >= zb0439 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount = ((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount)[:zb0439]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount = make([]uint64, zb0439)
- }
- for zb0021 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount[zb0021], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetAmount", zb0021)
- return
- }
- }
- case "aamtbm":
- {
- var zb0441 []byte
- var zb0442 int
- zb0442, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetAmount")
- return
- }
- if zb0442 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0442), uint64(maxBitmaskSize))
- return
- }
- zb0441, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetAmount")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount = bitmask(zb0441)
- }
- case "asnd":
- var zb0443 int
- zb0443, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetSender")
- return
- }
- if zb0443 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0443), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetSender, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetSender)
- if err != nil {
- err = msgp.WrapError(err, "AssetSender")
- return
- }
- case "asndbm":
- {
- var zb0444 []byte
- var zb0445 int
- zb0445, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetSender")
- return
- }
- if zb0445 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0445), uint64(maxBitmaskSize))
- return
- }
- zb0444, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetSender")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender = bitmask(zb0444)
- }
- case "arcv":
- var zb0446 int
- zb0446, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetReceiver")
- return
- }
- if zb0446 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0446), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetReceiver, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetReceiver)
- if err != nil {
- err = msgp.WrapError(err, "AssetReceiver")
- return
- }
- case "arcvbm":
- {
- var zb0447 []byte
- var zb0448 int
- zb0448, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetReceiver")
- return
- }
- if zb0448 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0448), uint64(maxBitmaskSize))
- return
- }
- zb0447, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetReceiver")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver = bitmask(zb0447)
- }
- case "aclose":
- var zb0449 int
- zb0449, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "AssetCloseTo")
- return
- }
- if zb0449 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0449), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo)
- if err != nil {
- err = msgp.WrapError(err, "AssetCloseTo")
- return
- }
- case "aclosebm":
- {
- var zb0450 []byte
- var zb0451 int
- zb0451, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetCloseTo")
- return
- }
- if zb0451 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0451), uint64(maxBitmaskSize))
- return
- }
- zb0450, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetCloseTo")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo = bitmask(zb0450)
- }
- case "fadd":
- var zb0452 int
- zb0452, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAccount")
- return
- }
- if zb0452 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0452), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAccount")
- return
- }
- case "faddbm":
- {
- var zb0453 []byte
- var zb0454 int
- zb0454, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAccount")
- return
- }
- if zb0454 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0454), uint64(maxBitmaskSize))
- return
- }
- zb0453, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAccount")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount = bitmask(zb0453)
- }
- case "faid":
- var zb0455 int
- var zb0456 bool
- zb0455, zb0456, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAsset")
- return
- }
- if zb0455 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0455), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "FreezeAsset")
- return
- }
- if zb0456 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset) >= zb0455 {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset = ((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset)[:zb0455]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset = make([]basics.AssetIndex, zb0455)
- }
- for zb0022 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset[zb0022].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "FreezeAsset", zb0022)
- return
- }
- }
- case "faidbm":
- {
- var zb0457 []byte
- var zb0458 int
- zb0458, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAsset")
- return
- }
- if zb0458 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0458), uint64(maxBitmaskSize))
- return
- }
- zb0457, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskFreezeAsset")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset = bitmask(zb0457)
- }
- case "afrzbm":
- {
- var zb0459 []byte
- var zb0460 int
- zb0460, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetFrozen")
- return
- }
- if zb0460 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0460), uint64(maxBitmaskSize))
- return
- }
- zb0459, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAssetFrozen")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen = bitmask(zb0459)
- }
- case "apid":
- var zb0461 int
- var zb0462 bool
- zb0461, zb0462, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationID")
- return
- }
- if zb0461 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0461), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ApplicationID")
- return
- }
- if zb0462 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID) >= zb0461 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID)[:zb0461]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID = make([]basics.AppIndex, zb0461)
- }
- for zb0023 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID[zb0023].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationID", zb0023)
- return
- }
- }
- case "apidbm":
- {
- var zb0463 []byte
- var zb0464 int
- zb0464, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationID")
- return
- }
- if zb0464 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0464), uint64(maxBitmaskSize))
- return
- }
- zb0463, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationID")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID = bitmask(zb0463)
- }
- case "apan":
- var zb0465 int
- zb0465, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "OnCompletion")
- return
- }
- if zb0465 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0465), uint64(maxEncodedTransactionGroups))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.OnCompletion, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.OnCompletion)
- if err != nil {
- err = msgp.WrapError(err, "OnCompletion")
- return
- }
- case "apanbm":
- {
- var zb0466 []byte
- var zb0467 int
- zb0467, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskOnCompletion")
- return
- }
- if zb0467 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0467), uint64(maxBitmaskSize))
- return
- }
- zb0466, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskOnCompletion")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion = bitmask(zb0466)
- }
- case "apaa":
- var zb0468 int
- var zb0469 bool
- zb0468, zb0469, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationArgs")
- return
- }
- if zb0468 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0468), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ApplicationArgs")
- return
- }
- if zb0469 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs) >= zb0468 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs)[:zb0468]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs = make([]applicationArgs, zb0468)
- }
- for zb0024 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs {
- var zb0470 int
- var zb0471 bool
- zb0470, zb0471, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApplicationArgs", zb0024)
- return
- }
- if zb0470 > transactions.EncodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0470), uint64(transactions.EncodedMaxApplicationArgs))
- err = msgp.WrapError(err, "ApplicationArgs", zb0024)
- return
- }
- if zb0471 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024]) >= zb0470 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024])[:zb0470]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] = make(applicationArgs, zb0470)
- }
- for zb0025 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024][zb0025], bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024][zb0025])
- if err != nil {
- err = msgp.WrapError(err, "ApplicationArgs", zb0024, zb0025)
- return
- }
- }
- }
- case "apaabm":
- {
- var zb0472 []byte
- var zb0473 int
- zb0473, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationArgs")
- return
- }
- if zb0473 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0473), uint64(maxBitmaskSize))
- return
- }
- zb0472, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApplicationArgs")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs = bitmask(zb0472)
- }
- case "apat":
- var zb0474 int
- var zb0475 bool
- zb0474, zb0475, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Accounts")
- return
- }
- if zb0474 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0474), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Accounts")
- return
- }
- if zb0475 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts) >= zb0474 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts)[:zb0474]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts = make([]addresses, zb0474)
- }
- for zb0026 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts {
- var zb0476 int
- var zb0477 bool
- zb0476, zb0477, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Accounts", zb0026)
- return
- }
- if zb0476 > transactions.EncodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0476), uint64(transactions.EncodedMaxAccounts))
- err = msgp.WrapError(err, "Accounts", zb0026)
- return
- }
- if zb0477 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026]) >= zb0476 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026])[:zb0476]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] = make(addresses, zb0476)
- }
- for zb0027 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026][zb0027].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Accounts", zb0026, zb0027)
- return
- }
- }
- }
- case "apatbm":
- {
- var zb0478 []byte
- var zb0479 int
- zb0479, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAccounts")
- return
- }
- if zb0479 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0479), uint64(maxBitmaskSize))
- return
- }
- zb0478, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskAccounts")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts = bitmask(zb0478)
- }
- case "apfa":
- var zb0480 int
- var zb0481 bool
- zb0480, zb0481, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignApps")
- return
- }
- if zb0480 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0480), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ForeignApps")
- return
- }
- if zb0481 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps) >= zb0480 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps)[:zb0480]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps = make([]appIndices, zb0480)
- }
- for zb0028 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps {
- var zb0482 int
- var zb0483 bool
- zb0482, zb0483, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignApps", zb0028)
- return
- }
- if zb0482 > transactions.EncodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0482), uint64(transactions.EncodedMaxForeignApps))
- err = msgp.WrapError(err, "ForeignApps", zb0028)
- return
- }
- if zb0483 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028]) >= zb0482 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028])[:zb0482]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] = make(appIndices, zb0482)
- }
- for zb0029 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028][zb0029].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignApps", zb0028, zb0029)
- return
- }
- }
- }
- case "apfabm":
- {
- var zb0484 []byte
- var zb0485 int
- zb0485, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignApps")
- return
- }
- if zb0485 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0485), uint64(maxBitmaskSize))
- return
- }
- zb0484, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignApps")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps = bitmask(zb0484)
- }
- case "apas":
- var zb0486 int
- var zb0487 bool
- zb0486, zb0487, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignAssets")
- return
- }
- if zb0486 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0486), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ForeignAssets")
- return
- }
- if zb0487 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets) >= zb0486 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets)[:zb0486]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets = make([]assetIndices, zb0486)
- }
- for zb0030 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets {
- var zb0488 int
- var zb0489 bool
- zb0488, zb0489, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignAssets", zb0030)
- return
- }
- if zb0488 > transactions.EncodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0488), uint64(transactions.EncodedMaxForeignAssets))
- err = msgp.WrapError(err, "ForeignAssets", zb0030)
- return
- }
- if zb0489 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030]) >= zb0488 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030])[:zb0488]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] = make(assetIndices, zb0488)
- }
- for zb0031 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030][zb0031].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "ForeignAssets", zb0030, zb0031)
- return
- }
- }
- }
- case "apasbm":
- {
- var zb0490 []byte
- var zb0491 int
- zb0491, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignAssets")
- return
- }
- if zb0491 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0491), uint64(maxBitmaskSize))
- return
- }
- zb0490, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskForeignAssets")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets = bitmask(zb0490)
- }
- case "lnui":
- var zb0492 int
- var zb0493 bool
- zb0492, zb0493, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumUint")
- return
- }
- if zb0492 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0492), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LocalNumUint")
- return
- }
- if zb0493 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint) >= zb0492 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint)[:zb0492]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint = make([]uint64, zb0492)
- }
- for zb0032 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint[zb0032], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumUint", zb0032)
- return
- }
- }
- case "lnuibm":
- {
- var zb0494 []byte
- var zb0495 int
- zb0495, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumUint")
- return
- }
- if zb0495 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0495), uint64(maxBitmaskSize))
- return
- }
- zb0494, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumUint")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint = bitmask(zb0494)
- }
- case "lnbs":
- var zb0496 int
- var zb0497 bool
- zb0496, zb0497, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumByteSlice")
- return
- }
- if zb0496 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0496), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "LocalNumByteSlice")
- return
- }
- if zb0497 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice) >= zb0496 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice)[:zb0496]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice = make([]uint64, zb0496)
- }
- for zb0033 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice[zb0033], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "LocalNumByteSlice", zb0033)
- return
- }
- }
- case "lnbsbm":
- {
- var zb0498 []byte
- var zb0499 int
- zb0499, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumByteSlice")
- return
- }
- if zb0499 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0499), uint64(maxBitmaskSize))
- return
- }
- zb0498, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskLocalNumByteSlice")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice = bitmask(zb0498)
- }
- case "gnui":
- var zb0500 int
- var zb0501 bool
- zb0500, zb0501, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumUint")
- return
- }
- if zb0500 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0500), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "GlobalNumUint")
- return
- }
- if zb0501 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint) >= zb0500 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint)[:zb0500]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint = make([]uint64, zb0500)
- }
- for zb0034 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint[zb0034], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumUint", zb0034)
- return
- }
- }
- case "gnuibm":
- {
- var zb0502 []byte
- var zb0503 int
- zb0503, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumUint")
- return
- }
- if zb0503 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0503), uint64(maxBitmaskSize))
- return
- }
- zb0502, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumUint")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint = bitmask(zb0502)
- }
- case "gnbs":
- var zb0504 int
- var zb0505 bool
- zb0504, zb0505, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumByteSlice")
- return
- }
- if zb0504 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0504), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "GlobalNumByteSlice")
- return
- }
- if zb0505 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice) >= zb0504 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice)[:zb0504]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice = make([]uint64, zb0504)
- }
- for zb0035 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice[zb0035], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "GlobalNumByteSlice", zb0035)
- return
- }
- }
- case "gnbsbm":
- {
- var zb0506 []byte
- var zb0507 int
- zb0507, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumByteSlice")
- return
- }
- if zb0507 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0507), uint64(maxBitmaskSize))
- return
- }
- zb0506, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskGlobalNumByteSlice")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice = bitmask(zb0506)
- }
- case "apap":
- var zb0508 int
- var zb0509 bool
- zb0508, zb0509, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApprovalProgram")
- return
- }
- if zb0508 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0508), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ApprovalProgram")
- return
- }
- if zb0509 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram) >= zb0508 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram)[:zb0508]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram = make([]program, zb0508)
- }
- for zb0036 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram {
- {
- var zb0510 []byte
- var zb0511 int
- zb0511, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "ApprovalProgram", zb0036)
- return
- }
- if zb0511 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0511), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0510, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram[zb0036]))
- if err != nil {
- err = msgp.WrapError(err, "ApprovalProgram", zb0036)
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram[zb0036] = program(zb0510)
- }
- }
- case "apapbm":
- {
- var zb0512 []byte
- var zb0513 int
- zb0513, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApprovalProgram")
- return
- }
- if zb0513 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0513), uint64(maxBitmaskSize))
- return
- }
- zb0512, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskApprovalProgram")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram = bitmask(zb0512)
- }
- case "apsu":
- var zb0514 int
- var zb0515 bool
- zb0514, zb0515, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ClearStateProgram")
- return
- }
- if zb0514 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0514), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ClearStateProgram")
- return
- }
- if zb0515 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram) >= zb0514 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram)[:zb0514]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram = make([]program, zb0514)
- }
- for zb0037 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram {
- {
- var zb0516 []byte
- var zb0517 int
- zb0517, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "ClearStateProgram", zb0037)
- return
- }
- if zb0517 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0517), uint64(config.MaxAvailableAppProgramLen))
- return
- }
- zb0516, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram[zb0037]))
- if err != nil {
- err = msgp.WrapError(err, "ClearStateProgram", zb0037)
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram[zb0037] = program(zb0516)
- }
- }
- case "apsubm":
- {
- var zb0518 []byte
- var zb0519 int
- zb0519, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClearStateProgram")
- return
- }
- if zb0519 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0519), uint64(maxBitmaskSize))
- return
- }
- zb0518, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskClearStateProgram")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram = bitmask(zb0518)
- }
- case "apep":
- var zb0520 int
- var zb0521 bool
- zb0520, zb0521, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ExtraProgramPages")
- return
- }
- if zb0520 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0520), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "ExtraProgramPages")
- return
- }
- if zb0521 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages) >= zb0520 {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages = ((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages)[:zb0520]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages = make([]uint32, zb0520)
- }
- for zb0038 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages {
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages[zb0038], bts, err = msgp.ReadUint32Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ExtraProgramPages", zb0038)
- return
- }
- }
- case "apepbm":
- {
- var zb0522 []byte
- var zb0523 int
- zb0523, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskExtraProgramPages")
- return
- }
- if zb0523 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0523), uint64(maxBitmaskSize))
- return
- }
- zb0522, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskExtraProgramPages")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages = bitmask(zb0522)
- }
- case "certrnd":
- var zb0524 int
- var zb0525 bool
- zb0524, zb0525, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertRound")
- return
- }
- if zb0524 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0524), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "CertRound")
- return
- }
- if zb0525 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound) >= zb0524 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound)[:zb0524]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound = make([]basics.Round, zb0524)
- }
- for zb0039 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound[zb0039].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertRound", zb0039)
- return
- }
- }
- case "certrndbm":
- {
- var zb0526 []byte
- var zb0527 int
- zb0527, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertRound")
- return
- }
- if zb0527 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0527), uint64(maxBitmaskSize))
- return
- }
- zb0526, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertRound")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound = bitmask(zb0526)
- }
- case "certtype":
- var zb0528 int
- var zb0529 bool
- zb0528, zb0529, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertType")
- return
- }
- if zb0528 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0528), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "CertType")
- return
- }
- if zb0529 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType) >= zb0528 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType)[:zb0528]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType = make([]protocol.CompactCertType, zb0528)
- }
- for zb0040 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType[zb0040].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "CertType", zb0040)
- return
- }
- }
- case "certtypebm":
- {
- var zb0530 []byte
- var zb0531 int
- zb0531, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertType")
- return
- }
- if zb0531 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0531), uint64(maxBitmaskSize))
- return
- }
- zb0530, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertType))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskCertType")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertType = bitmask(zb0530)
- }
- case "certc":
- var zb0532 int
- zb0532, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigCommit")
- return
- }
- if zb0532 > maxAddressBytes {
- err = msgp.ErrOverflow(uint64(zb0532), uint64(maxAddressBytes))
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit, bts, err = msgp.ReadBytesBytes(bts, (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit)
- if err != nil {
- err = msgp.WrapError(err, "SigCommit")
- return
- }
- case "certcbm":
- {
- var zb0533 []byte
- var zb0534 int
- zb0534, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigCommit")
- return
- }
- if zb0534 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0534), uint64(maxBitmaskSize))
- return
- }
- zb0533, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigCommit")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit = bitmask(zb0533)
- }
- case "certw":
- var zb0535 int
- var zb0536 bool
- zb0535, zb0536, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SignedWeight")
- return
- }
- if zb0535 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0535), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "SignedWeight")
- return
- }
- if zb0536 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight) >= zb0535 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight)[:zb0535]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight = make([]uint64, zb0535)
- }
- for zb0041 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight[zb0041], bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SignedWeight", zb0041)
- return
- }
- }
- case "certwbm":
- {
- var zb0537 []byte
- var zb0538 int
- zb0538, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSignedWeight")
- return
- }
- if zb0538 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0538), uint64(maxBitmaskSize))
- return
- }
- zb0537, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSignedWeight")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight = bitmask(zb0537)
- }
- case "certS":
- var zb0539 int
- var zb0540 bool
- zb0539, zb0540, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs")
- return
- }
- if zb0539 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0539), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "SigProofs")
- return
- }
- if zb0540 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs) >= zb0539 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs)[:zb0539]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs = make([]certProofs, zb0539)
- }
- for zb0042 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs {
- var zb0541 int
- var zb0542 bool
- zb0541, zb0542, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs", zb0042)
- return
- }
- if zb0541 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0541), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "SigProofs", zb0042)
- return
- }
- if zb0542 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042]) >= zb0541 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042])[:zb0541]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] = make(certProofs, zb0541)
- }
- for zb0043 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042][zb0043].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "SigProofs", zb0042, zb0043)
- return
- }
- }
- }
- case "certSbm":
- {
- var zb0543 []byte
- var zb0544 int
- zb0544, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigProofs")
- return
- }
- if zb0544 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0544), uint64(maxBitmaskSize))
- return
- }
- zb0543, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskSigProofs")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs = bitmask(zb0543)
- }
- case "certP":
- var zb0545 int
- var zb0546 bool
- zb0545, zb0546, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs")
- return
- }
- if zb0545 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0545), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "PartProofs")
- return
- }
- if zb0546 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs) >= zb0545 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs)[:zb0545]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs = make([]certProofs, zb0545)
- }
- for zb0044 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs {
- var zb0547 int
- var zb0548 bool
- zb0547, zb0548, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs", zb0044)
- return
- }
- if zb0547 > compactcert.MaxProofDigests {
- err = msgp.ErrOverflow(uint64(zb0547), uint64(compactcert.MaxProofDigests))
- err = msgp.WrapError(err, "PartProofs", zb0044)
- return
- }
- if zb0548 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044]) >= zb0547 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044])[:zb0547]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] = make(certProofs, zb0547)
- }
- for zb0045 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] {
- bts, err = (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044][zb0045].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartProofs", zb0044, zb0045)
- return
- }
- }
- }
- case "certPbm":
- {
- var zb0549 []byte
- var zb0550 int
- zb0550, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskPartProofs")
- return
- }
- if zb0550 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0550), uint64(maxBitmaskSize))
- return
- }
- zb0549, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskPartProofs")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs = bitmask(zb0549)
- }
- case "certr":
- var zb0551 int
- var zb0552 bool
- zb0551, zb0552, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals")
- return
- }
- if zb0551 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0551), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "Reveals")
- return
- }
- if zb0552 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals != nil && cap((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals) >= zb0551 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals = ((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals)[:zb0551]
- } else {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals = make([]revealMap, zb0551)
- }
- for zb0046 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals {
- var zb0553 int
- var zb0554 bool
- zb0553, zb0554, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0046)
- return
- }
- if zb0553 > compactcert.MaxReveals {
- err = msgp.ErrOverflow(uint64(zb0553), uint64(compactcert.MaxReveals))
- err = msgp.WrapError(err, "Reveals", zb0046)
- return
- }
- if zb0554 {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] = nil
- } else if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] == nil {
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] = make(revealMap, zb0553)
- }
- for zb0553 > 0 {
- var zb0047 uint64
- var zb0048 compactcert.Reveal
- zb0553--
- zb0047, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0046)
- return
- }
- bts, err = zb0048.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Reveals", zb0046, zb0047)
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046][zb0047] = zb0048
- }
- }
- case "certrbm":
- {
- var zb0555 []byte
- var zb0556 int
- zb0556, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReveals")
- return
- }
- if zb0556 > maxBitmaskSize {
- err = msgp.ErrOverflow(uint64(zb0556), uint64(maxBitmaskSize))
- return
- }
- zb0555, bts, err = msgp.ReadBytesBytes(bts, []byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals))
- if err != nil {
- err = msgp.WrapError(err, "BitmaskReveals")
- return
- }
- (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals = bitmask(zb0555)
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *txGroupsEncodingStub) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*txGroupsEncodingStub)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *txGroupsEncodingStub) Msgsize() (s int) {
- s = 3 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4 + msgp.BytesPrefixSize + len((*z).TransactionGroupSizes) + 4 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.Sig) + 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.BitmaskSig)) + 6 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedMsigs.Version) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedMsigs.BitmaskVersion)) + 8 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedMsigs.Threshold) + 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedMsigs.BitmaskThreshold)) + 7 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).encodedSignedTxns.encodedMsigs.Subsigs {
- s += msgp.ArrayHeaderSize
- for zb0002 := range (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001] {
- s += (*z).encodedSignedTxns.encodedMsigs.Subsigs[zb0001][zb0002].Msgsize()
- }
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedMsigs.BitmaskSubsigs)) + 6 + msgp.ArrayHeaderSize
- for zb0003 := range (*z).encodedSignedTxns.encodedLsigs.Logic {
- s += msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedLsigs.Logic[zb0003])
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedLsigs.BitmaskLogic)) + 8 + msgp.ArrayHeaderSize
- for zb0004 := range (*z).encodedSignedTxns.encodedLsigs.LogicArgs {
- s += msgp.ArrayHeaderSize
- for zb0005 := range (*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004] {
- s += msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedLsigs.LogicArgs[zb0004][zb0005])
- }
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedLsigs.BitmaskLogicArgs)) + 5 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.AuthAddr) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.BitmaskAuthAddr)) + 5 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.TxType) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.BitmaskTxType)) + 6 + msgp.ByteSize + 4 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Sender) + 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskSender)) + 4 + msgp.ArrayHeaderSize
- for zb0006 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee {
- s += (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee[zb0006].Msgsize()
- }
- s += 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFee)) + 3 + msgp.ArrayHeaderSize
- for zb0007 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid {
- s += (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid[zb0007].Msgsize()
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFirstValid)) + 3 + msgp.ArrayHeaderSize
- for zb0008 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid {
- s += (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid[zb0008].Msgsize()
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLastValid)) + 5 + msgp.ArrayHeaderSize
- for zb0009 := range (*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note {
- s += msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note[zb0009])
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskNote)) + 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGenesisID)) + 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGroup)) + 3 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Lease) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLease)) + 6 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.RekeyTo) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskRekeyTo)) + 8 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VotePK) + 7 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.SelectionPK) + 8 + msgp.ArrayHeaderSize
- for zb0010 := range (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst {
- s += (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst[zb0010].Msgsize()
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst)) + 8 + msgp.ArrayHeaderSize
- for zb0011 := range (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast {
- s += (*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast[zb0011].Msgsize()
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast)) + 7 + msgp.ArrayHeaderSize + (len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution) * (msgp.Uint64Size)) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskKeys)) + 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation)) + 4 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Receiver) + 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskReceiver)) + 4 + msgp.ArrayHeaderSize
- for zb0013 := range (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount {
- s += (*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount[zb0013].Msgsize()
- }
- s += 6 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskAmount)) + 6 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.CloseRemainderTo) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo)) + 5 + msgp.ArrayHeaderSize
- for zb0014 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset {
- s += (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset[zb0014].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset)) + 2 + msgp.ArrayHeaderSize + (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total) * (msgp.Uint64Size)) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal)) + 3 + msgp.ArrayHeaderSize + (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals) * (msgp.Uint32Size)) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals)) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen)) + 3 + msgp.ArrayHeaderSize
- for zb0017 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName {
- s += msgp.StringPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName[zb0017])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName)) + 3 + msgp.ArrayHeaderSize
- for zb0018 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName {
- s += msgp.StringPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName[zb0018])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName)) + 3 + msgp.ArrayHeaderSize
- for zb0019 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL {
- s += msgp.StringPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL[zb0019])
- }
- s += 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL)) + 3 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash) + 5 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash)) + 2 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager)) + 2 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve)) + 2 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze)) + 2 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback) + 4 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback)) + 5 + msgp.ArrayHeaderSize
- for zb0020 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset {
- s += (*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset[zb0020].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount)) + 5 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetSender) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender)) + 5 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetReceiver) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver)) + 7 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo) + 9 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo)) + 5 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount)) + 5 + msgp.ArrayHeaderSize
- for zb0022 := range (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset {
- s += (*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset[zb0022].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen)) + 5 + msgp.ArrayHeaderSize
- for zb0023 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID {
- s += (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID[zb0023].Msgsize()
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID)) + 5 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.OnCompletion) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion)) + 5 + msgp.ArrayHeaderSize
- for zb0024 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs {
- s += msgp.ArrayHeaderSize
- for zb0025 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024] {
- s += msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs[zb0024][zb0025])
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs)) + 5 + msgp.ArrayHeaderSize
- for zb0026 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts {
- s += msgp.ArrayHeaderSize
- for zb0027 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026] {
- s += (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts[zb0026][zb0027].Msgsize()
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts)) + 5 + msgp.ArrayHeaderSize
- for zb0028 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps {
- s += msgp.ArrayHeaderSize
- for zb0029 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028] {
- s += (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps[zb0028][zb0029].Msgsize()
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps)) + 5 + msgp.ArrayHeaderSize
- for zb0030 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets {
- s += msgp.ArrayHeaderSize
- for zb0031 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030] {
- s += (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets[zb0030][zb0031].Msgsize()
- }
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice) * (msgp.Uint64Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice)) + 5 + msgp.ArrayHeaderSize
- for zb0036 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram {
- s += msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram[zb0036]))
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram)) + 5 + msgp.ArrayHeaderSize
- for zb0037 := range (*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram {
- s += msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram[zb0037]))
- }
- s += 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram)) + 5 + msgp.ArrayHeaderSize + (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages) * (msgp.Uint32Size)) + 7 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages)) + 8 + msgp.ArrayHeaderSize
- for zb0039 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound {
- s += (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound[zb0039].Msgsize()
- }
- s += 10 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound)) + 9 + msgp.ArrayHeaderSize
- for zb0040 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType {
- s += (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType[zb0040].Msgsize()
- }
- s += 11 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertType)) + 6 + msgp.BytesPrefixSize + len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit)) + 6 + msgp.ArrayHeaderSize + (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight) * (msgp.Uint64Size)) + 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight)) + 6 + msgp.ArrayHeaderSize
- for zb0042 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs {
- s += msgp.ArrayHeaderSize
- for zb0043 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042] {
- s += (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs[zb0042][zb0043].Msgsize()
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs)) + 6 + msgp.ArrayHeaderSize
- for zb0044 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs {
- s += msgp.ArrayHeaderSize
- for zb0045 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044] {
- s += (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs[zb0044][zb0045].Msgsize()
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs)) + 6 + msgp.ArrayHeaderSize
- for zb0046 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals {
- s += msgp.MapHeaderSize
- if (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] != nil {
- for zb0047, zb0048 := range (*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals[zb0046] {
- _ = zb0047
- _ = zb0048
- s += 0 + msgp.Uint64Size + zb0048.Msgsize()
- }
- }
- }
- s += 8 + msgp.BytesPrefixSize + len([]byte((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals))
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *txGroupsEncodingStub) MsgIsZero() bool {
- return ((*z).TotalTransactionsCount == 0) && ((*z).TransactionGroupCount == 0) && (len((*z).TransactionGroupSizes) == 0) && (len((*z).encodedSignedTxns.Sig) == 0) && (len((*z).encodedSignedTxns.BitmaskSig) == 0) && (len((*z).encodedSignedTxns.encodedMsigs.Version) == 0) && (len((*z).encodedSignedTxns.encodedMsigs.BitmaskVersion) == 0) && (len((*z).encodedSignedTxns.encodedMsigs.Threshold) == 0) && (len((*z).encodedSignedTxns.encodedMsigs.BitmaskThreshold) == 0) && (len((*z).encodedSignedTxns.encodedMsigs.Subsigs) == 0) && (len((*z).encodedSignedTxns.encodedMsigs.BitmaskSubsigs) == 0) && (len((*z).encodedSignedTxns.encodedLsigs.Logic) == 0) && (len((*z).encodedSignedTxns.encodedLsigs.BitmaskLogic) == 0) && (len((*z).encodedSignedTxns.encodedLsigs.LogicArgs) == 0) && (len((*z).encodedSignedTxns.encodedLsigs.BitmaskLogicArgs) == 0) && (len((*z).encodedSignedTxns.AuthAddr) == 0) && (len((*z).encodedSignedTxns.BitmaskAuthAddr) == 0) && (len((*z).encodedSignedTxns.encodedTxns.TxType) == 0) && (len((*z).encodedSignedTxns.encodedTxns.BitmaskTxType) == 0) && ((*z).encodedSignedTxns.encodedTxns.TxTypeOffset == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Sender) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskSender) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Fee) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFee) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.FirstValid) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskFirstValid) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.LastValid) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLastValid) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Note) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskNote) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGenesisID) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskGroup) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.Lease) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskLease) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.RekeyTo) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedTxnHeaders.BitmaskRekeyTo) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VotePK) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.SelectionPK) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteFirst) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteFirst) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteLast) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskVoteLast) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.VoteKeyDilution) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskKeys) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedKeyregTxnFields.BitmaskNonparticipation) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Receiver) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskReceiver) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.Amount) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskAmount) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.CloseRemainderTo) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedPaymentTxnFields.BitmaskCloseRemainderTo) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.ConfigAsset) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.BitmaskConfigAsset) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Total) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskTotal) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Decimals) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDecimals) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskDefaultFrozen) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.UnitName) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskUnitName) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.AssetName) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskAssetName) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.URL) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskURL) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.MetadataHash) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskMetadataHash) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Manager) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskManager) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Reserve) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskReserve) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Freeze) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskFreeze) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.Clawback) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetConfigTxnFields.encodedAssetParams.BitmaskClawback) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.XferAsset) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskXferAsset) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetAmount) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetAmount) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetSender) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetSender) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetReceiver) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetReceiver) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.AssetCloseTo) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetTransferTxnFields.BitmaskAssetCloseTo) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAccount) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAccount) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.FreezeAsset) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskFreezeAsset) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedAssetFreezeTxnFields.BitmaskAssetFrozen) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationID) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationID) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.OnCompletion) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskOnCompletion) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApplicationArgs) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.Accounts) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskAccounts) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignApps) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ForeignAssets) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskForeignAssets) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumUint) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumUint) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.LocalNumByteSlice) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskLocalNumByteSlice) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumUint) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumUint) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.GlobalNumByteSlice) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskGlobalNumByteSlice) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskApprovalProgram) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ClearStateProgram) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskClearStateProgram) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.ExtraProgramPages) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedApplicationCallTxnFields.BitmaskExtraProgramPages) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertRound) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertRound) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.CertType) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.BitmaskCertType) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigCommit) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigCommit) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SignedWeight) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSignedWeight) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.SigProofs) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskSigProofs) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.PartProofs) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskPartProofs) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.Reveals) == 0) && (len((*z).encodedSignedTxns.encodedTxns.encodedCompactCertTxnFields.encodedCert.BitmaskReveals) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *txGroupsEncodingStubOld) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0002Len := uint32(1)
- var zb0002Mask uint8 /* 2 bits */
- if len((*z).TxnGroups) == 0 {
- zb0002Len--
- zb0002Mask |= 0x2
- }
- // variable map header, size zb0002Len
- o = append(o, 0x80|uint8(zb0002Len))
- if zb0002Len != 0 {
- if (zb0002Mask & 0x2) == 0 { // if not empty
- // string "t"
- o = append(o, 0xa1, 0x74)
- if (*z).TxnGroups == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).TxnGroups)))
- }
- for zb0001 := range (*z).TxnGroups {
- o = (*z).TxnGroups[zb0001].MarshalMsg(o)
- }
- }
- }
- return
-}
-
-func (_ *txGroupsEncodingStubOld) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*txGroupsEncodingStubOld)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *txGroupsEncodingStubOld) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0002 int
- var zb0003 bool
- zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 > 0 {
- zb0002--
- var zb0004 int
- var zb0005 bool
- zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TxnGroups")
- return
- }
- if zb0004 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0004), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "struct-from-array", "TxnGroups")
- return
- }
- if zb0005 {
- (*z).TxnGroups = nil
- } else if (*z).TxnGroups != nil && cap((*z).TxnGroups) >= zb0004 {
- (*z).TxnGroups = ((*z).TxnGroups)[:zb0004]
- } else {
- (*z).TxnGroups = make([]txnGroups, zb0004)
- }
- for zb0001 := range (*z).TxnGroups {
- bts, err = (*z).TxnGroups[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "TxnGroups", zb0001)
- return
- }
- }
- }
- if zb0002 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0002)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0003 {
- (*z) = txGroupsEncodingStubOld{}
- }
- for zb0002 > 0 {
- zb0002--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "t":
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "TxnGroups")
- return
- }
- if zb0006 > maxEncodedTransactionGroups {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(maxEncodedTransactionGroups))
- err = msgp.WrapError(err, "TxnGroups")
- return
- }
- if zb0007 {
- (*z).TxnGroups = nil
- } else if (*z).TxnGroups != nil && cap((*z).TxnGroups) >= zb0006 {
- (*z).TxnGroups = ((*z).TxnGroups)[:zb0006]
- } else {
- (*z).TxnGroups = make([]txnGroups, zb0006)
- }
- for zb0001 := range (*z).TxnGroups {
- bts, err = (*z).TxnGroups[zb0001].UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "TxnGroups", zb0001)
- return
- }
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *txGroupsEncodingStubOld) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*txGroupsEncodingStubOld)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *txGroupsEncodingStubOld) Msgsize() (s int) {
- s = 1 + 2 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).TxnGroups {
- s += (*z).TxnGroups[zb0001].Msgsize()
- }
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *txGroupsEncodingStubOld) MsgIsZero() bool {
- return (len((*z).TxnGroups) == 0)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *txnGroups) MarshalMsg(b []byte) []byte {
- return ((*(pooldata.SignedTxnSlice))(z)).MarshalMsg(b)
-}
-func (_ *txnGroups) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*txnGroups)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *txnGroups) UnmarshalMsg(bts []byte) ([]byte, error) {
- return ((*(pooldata.SignedTxnSlice))(z)).UnmarshalMsg(bts)
-}
-func (_ *txnGroups) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*txnGroups)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *txnGroups) Msgsize() int {
- return ((*(pooldata.SignedTxnSlice))(z)).Msgsize()
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *txnGroups) MsgIsZero() bool {
- return ((*(pooldata.SignedTxnSlice))(z)).MsgIsZero()
-}
diff --git a/txnsync/msgp_gen_test.go b/txnsync/msgp_gen_test.go
deleted file mode 100644
index 95b7d2451..000000000
--- a/txnsync/msgp_gen_test.go
+++ /dev/null
@@ -1,1693 +0,0 @@
-// +build !skip_msgp_testing
-
-package txnsync
-
-// Code generated by github.com/algorand/msgp DO NOT EDIT.
-
-import (
- "testing"
-
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/algorand/msgp/msgp"
-)
-
-func TestMarshalUnmarshaladdresses(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := addresses{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingaddresses(t *testing.T) {
- protocol.RunEncodingTest(t, &addresses{})
-}
-
-func BenchmarkMarshalMsgaddresses(b *testing.B) {
- v := addresses{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgaddresses(b *testing.B) {
- v := addresses{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshaladdresses(b *testing.B) {
- v := addresses{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalappIndices(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := appIndices{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingappIndices(t *testing.T) {
- protocol.RunEncodingTest(t, &appIndices{})
-}
-
-func BenchmarkMarshalMsgappIndices(b *testing.B) {
- v := appIndices{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgappIndices(b *testing.B) {
- v := appIndices{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalappIndices(b *testing.B) {
- v := appIndices{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalapplicationArgs(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := applicationArgs{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingapplicationArgs(t *testing.T) {
- protocol.RunEncodingTest(t, &applicationArgs{})
-}
-
-func BenchmarkMarshalMsgapplicationArgs(b *testing.B) {
- v := applicationArgs{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgapplicationArgs(b *testing.B) {
- v := applicationArgs{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalapplicationArgs(b *testing.B) {
- v := applicationArgs{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalassetIndices(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := assetIndices{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingassetIndices(t *testing.T) {
- protocol.RunEncodingTest(t, &assetIndices{})
-}
-
-func BenchmarkMarshalMsgassetIndices(b *testing.B) {
- v := assetIndices{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgassetIndices(b *testing.B) {
- v := assetIndices{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalassetIndices(b *testing.B) {
- v := assetIndices{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalcertProofs(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := certProofs{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingcertProofs(t *testing.T) {
- protocol.RunEncodingTest(t, &certProofs{})
-}
-
-func BenchmarkMarshalMsgcertProofs(b *testing.B) {
- v := certProofs{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgcertProofs(b *testing.B) {
- v := certProofs{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalcertProofs(b *testing.B) {
- v := certProofs{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedApplicationCallTxnFields(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedApplicationCallTxnFields{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedApplicationCallTxnFields(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedApplicationCallTxnFields{})
-}
-
-func BenchmarkMarshalMsgencodedApplicationCallTxnFields(b *testing.B) {
- v := encodedApplicationCallTxnFields{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedApplicationCallTxnFields(b *testing.B) {
- v := encodedApplicationCallTxnFields{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedApplicationCallTxnFields(b *testing.B) {
- v := encodedApplicationCallTxnFields{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedAssetConfigTxnFields(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedAssetConfigTxnFields{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedAssetConfigTxnFields(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedAssetConfigTxnFields{})
-}
-
-func BenchmarkMarshalMsgencodedAssetConfigTxnFields(b *testing.B) {
- v := encodedAssetConfigTxnFields{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedAssetConfigTxnFields(b *testing.B) {
- v := encodedAssetConfigTxnFields{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedAssetConfigTxnFields(b *testing.B) {
- v := encodedAssetConfigTxnFields{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedAssetFreezeTxnFields(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedAssetFreezeTxnFields{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedAssetFreezeTxnFields(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedAssetFreezeTxnFields{})
-}
-
-func BenchmarkMarshalMsgencodedAssetFreezeTxnFields(b *testing.B) {
- v := encodedAssetFreezeTxnFields{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedAssetFreezeTxnFields(b *testing.B) {
- v := encodedAssetFreezeTxnFields{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedAssetFreezeTxnFields(b *testing.B) {
- v := encodedAssetFreezeTxnFields{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedAssetParams(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedAssetParams{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedAssetParams(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedAssetParams{})
-}
-
-func BenchmarkMarshalMsgencodedAssetParams(b *testing.B) {
- v := encodedAssetParams{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedAssetParams(b *testing.B) {
- v := encodedAssetParams{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedAssetParams(b *testing.B) {
- v := encodedAssetParams{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedAssetTransferTxnFields(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedAssetTransferTxnFields{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedAssetTransferTxnFields(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedAssetTransferTxnFields{})
-}
-
-func BenchmarkMarshalMsgencodedAssetTransferTxnFields(b *testing.B) {
- v := encodedAssetTransferTxnFields{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedAssetTransferTxnFields(b *testing.B) {
- v := encodedAssetTransferTxnFields{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedAssetTransferTxnFields(b *testing.B) {
- v := encodedAssetTransferTxnFields{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedBloomFilter(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedBloomFilter{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedBloomFilter(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedBloomFilter{})
-}
-
-func BenchmarkMarshalMsgencodedBloomFilter(b *testing.B) {
- v := encodedBloomFilter{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedBloomFilter(b *testing.B) {
- v := encodedBloomFilter{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedBloomFilter(b *testing.B) {
- v := encodedBloomFilter{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedCert(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedCert{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedCert(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedCert{})
-}
-
-func BenchmarkMarshalMsgencodedCert(b *testing.B) {
- v := encodedCert{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedCert(b *testing.B) {
- v := encodedCert{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedCert(b *testing.B) {
- v := encodedCert{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedCompactCertTxnFields(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedCompactCertTxnFields{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedCompactCertTxnFields(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedCompactCertTxnFields{})
-}
-
-func BenchmarkMarshalMsgencodedCompactCertTxnFields(b *testing.B) {
- v := encodedCompactCertTxnFields{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedCompactCertTxnFields(b *testing.B) {
- v := encodedCompactCertTxnFields{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedCompactCertTxnFields(b *testing.B) {
- v := encodedCompactCertTxnFields{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedKeyregTxnFields(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedKeyregTxnFields{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedKeyregTxnFields(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedKeyregTxnFields{})
-}
-
-func BenchmarkMarshalMsgencodedKeyregTxnFields(b *testing.B) {
- v := encodedKeyregTxnFields{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedKeyregTxnFields(b *testing.B) {
- v := encodedKeyregTxnFields{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedKeyregTxnFields(b *testing.B) {
- v := encodedKeyregTxnFields{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedLsigs(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedLsigs{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedLsigs(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedLsigs{})
-}
-
-func BenchmarkMarshalMsgencodedLsigs(b *testing.B) {
- v := encodedLsigs{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedLsigs(b *testing.B) {
- v := encodedLsigs{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedLsigs(b *testing.B) {
- v := encodedLsigs{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedMsigs(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedMsigs{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedMsigs(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedMsigs{})
-}
-
-func BenchmarkMarshalMsgencodedMsigs(b *testing.B) {
- v := encodedMsigs{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedMsigs(b *testing.B) {
- v := encodedMsigs{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedMsigs(b *testing.B) {
- v := encodedMsigs{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedPaymentTxnFields(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedPaymentTxnFields{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedPaymentTxnFields(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedPaymentTxnFields{})
-}
-
-func BenchmarkMarshalMsgencodedPaymentTxnFields(b *testing.B) {
- v := encodedPaymentTxnFields{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedPaymentTxnFields(b *testing.B) {
- v := encodedPaymentTxnFields{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedPaymentTxnFields(b *testing.B) {
- v := encodedPaymentTxnFields{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedSignedTxns(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedSignedTxns{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedSignedTxns(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedSignedTxns{})
-}
-
-func BenchmarkMarshalMsgencodedSignedTxns(b *testing.B) {
- v := encodedSignedTxns{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedSignedTxns(b *testing.B) {
- v := encodedSignedTxns{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedSignedTxns(b *testing.B) {
- v := encodedSignedTxns{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedTxnHeaders(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedTxnHeaders{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedTxnHeaders(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedTxnHeaders{})
-}
-
-func BenchmarkMarshalMsgencodedTxnHeaders(b *testing.B) {
- v := encodedTxnHeaders{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedTxnHeaders(b *testing.B) {
- v := encodedTxnHeaders{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedTxnHeaders(b *testing.B) {
- v := encodedTxnHeaders{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedTxns(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedTxns{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedTxns(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedTxns{})
-}
-
-func BenchmarkMarshalMsgencodedTxns(b *testing.B) {
- v := encodedTxns{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedTxns(b *testing.B) {
- v := encodedTxns{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedTxns(b *testing.B) {
- v := encodedTxns{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalpackedTransactionGroups(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := packedTransactionGroups{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingpackedTransactionGroups(t *testing.T) {
- protocol.RunEncodingTest(t, &packedTransactionGroups{})
-}
-
-func BenchmarkMarshalMsgpackedTransactionGroups(b *testing.B) {
- v := packedTransactionGroups{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgpackedTransactionGroups(b *testing.B) {
- v := packedTransactionGroups{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalpackedTransactionGroups(b *testing.B) {
- v := packedTransactionGroups{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalrelayedProposal(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := relayedProposal{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingrelayedProposal(t *testing.T) {
- protocol.RunEncodingTest(t, &relayedProposal{})
-}
-
-func BenchmarkMarshalMsgrelayedProposal(b *testing.B) {
- v := relayedProposal{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgrelayedProposal(b *testing.B) {
- v := relayedProposal{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalrelayedProposal(b *testing.B) {
- v := relayedProposal{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalrequestParams(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := requestParams{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingrequestParams(t *testing.T) {
- protocol.RunEncodingTest(t, &requestParams{})
-}
-
-func BenchmarkMarshalMsgrequestParams(b *testing.B) {
- v := requestParams{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgrequestParams(b *testing.B) {
- v := requestParams{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalrequestParams(b *testing.B) {
- v := requestParams{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalrevealMap(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := revealMap{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingrevealMap(t *testing.T) {
- protocol.RunEncodingTest(t, &revealMap{})
-}
-
-func BenchmarkMarshalMsgrevealMap(b *testing.B) {
- v := revealMap{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgrevealMap(b *testing.B) {
- v := revealMap{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalrevealMap(b *testing.B) {
- v := revealMap{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshaltimingParams(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := timingParams{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingtimingParams(t *testing.T) {
- protocol.RunEncodingTest(t, &timingParams{})
-}
-
-func BenchmarkMarshalMsgtimingParams(b *testing.B) {
- v := timingParams{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgtimingParams(b *testing.B) {
- v := timingParams{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshaltimingParams(b *testing.B) {
- v := timingParams{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshaltransactionBlockMessage(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := transactionBlockMessage{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingtransactionBlockMessage(t *testing.T) {
- protocol.RunEncodingTest(t, &transactionBlockMessage{})
-}
-
-func BenchmarkMarshalMsgtransactionBlockMessage(b *testing.B) {
- v := transactionBlockMessage{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgtransactionBlockMessage(b *testing.B) {
- v := transactionBlockMessage{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshaltransactionBlockMessage(b *testing.B) {
- v := transactionBlockMessage{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshaltxGroupsEncodingStub(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := txGroupsEncodingStub{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingtxGroupsEncodingStub(t *testing.T) {
- protocol.RunEncodingTest(t, &txGroupsEncodingStub{})
-}
-
-func BenchmarkMarshalMsgtxGroupsEncodingStub(b *testing.B) {
- v := txGroupsEncodingStub{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgtxGroupsEncodingStub(b *testing.B) {
- v := txGroupsEncodingStub{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshaltxGroupsEncodingStub(b *testing.B) {
- v := txGroupsEncodingStub{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshaltxGroupsEncodingStubOld(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := txGroupsEncodingStubOld{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingtxGroupsEncodingStubOld(t *testing.T) {
- protocol.RunEncodingTest(t, &txGroupsEncodingStubOld{})
-}
-
-func BenchmarkMarshalMsgtxGroupsEncodingStubOld(b *testing.B) {
- v := txGroupsEncodingStubOld{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgtxGroupsEncodingStubOld(b *testing.B) {
- v := txGroupsEncodingStubOld{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshaltxGroupsEncodingStubOld(b *testing.B) {
- v := txGroupsEncodingStubOld{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
diff --git a/txnsync/outgoing.go b/txnsync/outgoing.go
deleted file mode 100644
index fa18bd353..000000000
--- a/txnsync/outgoing.go
+++ /dev/null
@@ -1,348 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "context"
- "errors"
- "sort"
- "time"
-
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/util/timers"
-)
-
-const messageTimeWindow = 20 * time.Millisecond
-
-var errTransactionSyncOutgoingMessageQueueFull = errors.New("transaction sync outgoing message queue is full")
-var errTransactionSyncOutgoingMessageSendFailed = errors.New("transaction sync failed to send message")
-
-// sentMessageMetadata is the message metadata for a message that is being sent. It includes some extra
-// pieces of information about the message itself, used for tracking the "content" of the message beyond
-// the point where it's being encoded.
-type sentMessageMetadata struct {
- encodedMessageSize int
- sentTransactionsIDs []transactions.Txid
- message *transactionBlockMessage
- peer *Peer
- sentTimestamp time.Duration
- sequenceNumber uint64
- partialMessage bool
- transactionGroups []pooldata.SignedTxGroup
- projectedSequenceNumber uint64
-}
-
-// messageAsyncEncoder structure encapsulates the encoding and sending of a given message to the network. The encoding
-// could be a lengthy operation which does't need to be blocking the main loop. Moving the actual encoding into an
-// execution pool thread frees up the main loop, allowing smoother operation.
-type messageAsyncEncoder struct {
- state *syncState
- messageData sentMessageMetadata
- roundClock timers.WallClock
- lastReceivedMessageTimestamp time.Duration
- peerDataExchangeRate uint64
- // sentMessagesCh is a copy of the outgoingMessagesCallbackCh in the syncState object. We want to create a copy of
- // the channel so that in case of a txnsync restart ( i.e. fast catchup ), we can still generate a new channel
- // without triggering a data race. The alternative is to block the txnsync.Shutdown() until we receive the feedback
- // from the network library, but that could be susceptible to undesired network disconnections.
- sentMessagesCh chan sentMessageMetadata
-}
-
-// asyncMessageSent called via the network package to inform the txsync that a message was enqueued, and the associated sequence number.
-func (encoder *messageAsyncEncoder) asyncMessageSent(enqueued bool, sequenceNumber uint64) error {
- if !enqueued {
- encoder.state.log.Infof("unable to send message to peer. disconnecting from peer.")
- encoder.state.incomingMessagesQ.erase(encoder.messageData.peer, encoder.messageData.peer.networkPeer)
- return errTransactionSyncOutgoingMessageSendFailed
- }
- // record the sequence number here, so that we can store that later on.
- encoder.messageData.sequenceNumber = sequenceNumber
-
- select {
- case encoder.sentMessagesCh <- encoder.messageData:
- return nil
- default:
- // if we can't place it on the channel, return an error so that the node could disconnect from this peer.
- encoder.state.log.Infof("unable to enqueue outgoing message confirmation; outgoingMessagesCallbackCh is full. disconnecting from peer.")
- encoder.state.incomingMessagesQ.erase(encoder.messageData.peer, encoder.messageData.peer.networkPeer)
- return errTransactionSyncOutgoingMessageQueueFull
- }
-}
-
-// asyncEncodeAndSend encodes transaction groups and sends peer message asynchronously
-func (encoder *messageAsyncEncoder) asyncEncodeAndSend(interface{}) interface{} {
- defer encoder.state.messageSendWaitGroup.Done()
-
- var err error
- if len(encoder.messageData.transactionGroups) > 0 {
- encoder.messageData.message.TransactionGroups, err = encoder.state.encodeTransactionGroups(encoder.messageData.transactionGroups, encoder.peerDataExchangeRate)
- if err != nil {
- encoder.state.log.Warnf("unable to encode transaction groups : %v", err)
- }
- encoder.messageData.transactionGroups = nil // clear out to allow GC to reclaim
- }
-
- if encoder.lastReceivedMessageTimestamp >= 0 {
- // adding a nanosecond to the elapsed time is meaningless for the data rate calculation, but would ensure that
- // the ResponseElapsedTime field has a clear distinction between "being set" vs. "not being set"
- encoder.messageData.message.MsgSync.ResponseElapsedTime = uint64((encoder.roundClock.Since() - encoder.lastReceivedMessageTimestamp).Nanoseconds())
- }
-
- encodedMessage := encoder.messageData.message.MarshalMsg(getMessageBuffer())
- encoder.messageData.encodedMessageSize = len(encodedMessage)
- // now that the message is ready, we can discard the encoded transaction group slice to allow the GC to collect it.
- releaseEncodedTransactionGroups(encoder.messageData.message.TransactionGroups.Bytes)
- // record the timestamp here, before sending the raw bytes to the network :
- // the time we spend on the network package might include the network processing time, which
- // we want to make sure we avoid.
- encoder.messageData.sentTimestamp = encoder.roundClock.Since()
-
- encoder.state.node.SendPeerMessage(encoder.messageData.peer.networkPeer, encodedMessage, encoder.asyncMessageSent)
- releaseMessageBuffer(encodedMessage)
-
- encoder.messageData.message.TransactionGroups.Bytes = nil
- // increase the metric for total messages sent.
- txsyncOutgoingMessagesTotal.Inc(nil)
- return nil
-}
-
-// enqueue add the given message encoding task to the execution pool, and increase the waitgroup as needed.
-func (encoder *messageAsyncEncoder) enqueue() {
- encoder.state.messageSendWaitGroup.Add(1)
- if err := encoder.state.threadpool.EnqueueBacklog(context.Background(), encoder.asyncEncodeAndSend, nil, nil); err != nil {
- encoder.state.messageSendWaitGroup.Done()
- }
-}
-
-// pendingTransactionGroupsSnapshot is used to represent a snapshot of a pending transaction groups along with the latestLocallyOriginatedGroupCounter value.
-// The goal is to ensure we're "capturing" this only once per `sendMessageLoop` call. In order to do so, we allocate that structure on the stack, and passing
-// a pointer to that structure downstream.
-type pendingTransactionGroupsSnapshot struct {
- pendingTransactionsGroups []pooldata.SignedTxGroup
- latestLocallyOriginatedGroupCounter uint64
-}
-
-func (s *syncState) sendMessageLoop(currentTime time.Duration, deadline timers.DeadlineMonitor, peers []*Peer) {
- if len(peers) == 0 {
- // no peers - no messages that need to be sent.
- return
- }
- var pendingTransactions pendingTransactionGroupsSnapshot
- profGetTxnsGroups := s.profiler.getElement(profElementGetTxnsGroups)
- profAssembleMessage := s.profiler.getElement(profElementAssembleMessage)
- var assembledBloomFilter bloomFilter
- profGetTxnsGroups.start()
- pendingTransactions.pendingTransactionsGroups, pendingTransactions.latestLocallyOriginatedGroupCounter = s.node.GetPendingTransactionGroups()
- profGetTxnsGroups.end()
- for _, peer := range peers {
- msgEncoder := &messageAsyncEncoder{state: s, roundClock: s.clock, peerDataExchangeRate: peer.dataExchangeRate, sentMessagesCh: s.outgoingMessagesCallbackCh}
- profAssembleMessage.start()
- msgEncoder.messageData, assembledBloomFilter, msgEncoder.lastReceivedMessageTimestamp = s.assemblePeerMessage(peer, &pendingTransactions)
- profAssembleMessage.end()
- isPartialMessage := msgEncoder.messageData.partialMessage
- // The message that we've just encoded is expected to be sent out with the next sequence number.
- // However, since the enqueue method is using the execution pool, there is a remote chance that we
- // would "garble" the message ordering. That's not a huge issue, but we need to be able to tell that
- // so we can have accurate elapsed time measurements for the data exchange rate calculations.
- msgEncoder.messageData.projectedSequenceNumber = peer.lastSentMessageSequenceNumber + 1
- msgEncoder.enqueue()
-
- // update the bloom filter right here, since we want to make sure the peer contains the
- // correct sent bloom filter, regardless of the message sending timing. If and when we
- // generate the next message, we need to ensure that we're aware of this bloom filter, since
- // it would affect whether we re-generate another bloom filter or not.
- peer.updateSentBoomFilter(assembledBloomFilter, s.round)
-
- scheduleOffset, ops := peer.getNextScheduleOffset(s.isRelay, s.lastBeta, isPartialMessage, currentTime)
- if (ops & peerOpsSetInterruptible) == peerOpsSetInterruptible {
- if _, has := s.interruptablePeersMap[peer]; !has {
- s.interruptablePeers = append(s.interruptablePeers, peer)
- s.interruptablePeersMap[peer] = len(s.interruptablePeers) - 1
- }
- }
- if (ops & peerOpsClearInterruptible) == peerOpsClearInterruptible {
- if idx, has := s.interruptablePeersMap[peer]; has {
- delete(s.interruptablePeersMap, peer)
- s.interruptablePeers[idx] = nil
- }
- }
- if (ops & peerOpsReschedule) == peerOpsReschedule {
- s.scheduler.schedulePeer(peer, currentTime+scheduleOffset)
- }
-
- if deadline.Expired() {
- // we ran out of time sending messages, stop sending any more messages.
- break
- }
- }
-}
-
-func (s *syncState) assemblePeerMessage(peer *Peer, pendingTransactions *pendingTransactionGroupsSnapshot) (metaMessage sentMessageMetadata, assembledBloomFilter bloomFilter, lastReceivedMessageTimestamp time.Duration) {
- metaMessage = sentMessageMetadata{
- peer: peer,
- message: &transactionBlockMessage{
- Version: txnBlockMessageVersion,
- Round: s.round,
- },
- }
-
- bloomFilterSize := 0
-
- msgOps := peer.getMessageConstructionOps(s.isRelay, s.fetchTransactions)
-
- if msgOps&messageConstUpdateRequestParams == messageConstUpdateRequestParams {
- // update the UpdatedRequestParams
- offset, modulator := peer.getLocalRequestParams()
- metaMessage.message.UpdatedRequestParams.Modulator = modulator
- if modulator > 0 {
- // for relays, the modulator is always one, which means the following would always be zero.
- metaMessage.message.UpdatedRequestParams.Offset = byte(uint64(offset) % uint64(modulator))
- }
- }
-
- if (msgOps&messageConstBloomFilter == messageConstBloomFilter) && len(pendingTransactions.pendingTransactionsGroups) > 0 {
- var lastBloomFilter *bloomFilter
- var excludeTransactions *transactionCache
- // for relays, where we send a full bloom filter to everyone, we want to coordinate that with a single
- // copy of the bloom filter, to prevent re-creation.
- if s.isRelay {
- lastBloomFilter = &s.lastBloomFilter
- } else {
- // for peers, we want to make sure we don't regenerate the same bloom filter as before.
- lastBloomFilter = &peer.lastSentBloomFilter
-
- // for non-relays, we want to be more picky and send bloom filter that excludes the transactions that were send from that relay
- // ( since the relay already knows that it sent us these transactions ). we cannot do the same for relay->relay since it would
- // conflict with the bloom filters being calculated only once.
- excludeTransactions = peer.recentSentTransactions
- }
- filterTxns := pendingTransactions.pendingTransactionsGroups
- minGroupCounter, lastGroupRound := peer.sentFilterParams.nextFilterGroup(metaMessage.message.UpdatedRequestParams)
- if lastGroupRound != s.round {
- minGroupCounter = 0
- }
- if minGroupCounter > 0 {
- mgi := sort.Search(
- len(filterTxns),
- func(i int) bool {
- return filterTxns[i].GroupCounter >= minGroupCounter
- },
- )
- if mgi >= len(filterTxns) {
- goto notxns
- }
- filterTxns = filterTxns[mgi:]
- }
- profMakeBloomFilter := s.profiler.getElement(profElementMakeBloomFilter)
- profMakeBloomFilter.start()
- // generate a bloom filter that matches the requests params.
- assembledBloomFilter = s.makeBloomFilter(metaMessage.message.UpdatedRequestParams, filterTxns, excludeTransactions, lastBloomFilter)
- // we check here to see if the bloom filter we need happen to be the same as the one that was previously sent to the peer.
- // ( note that we check here againt the peer, whereas the hint to makeBloomFilter could be the cached one for the relay )
- if !assembledBloomFilter.sameParams(peer.lastSentBloomFilter) && assembledBloomFilter.encodedLength > 0 {
- if lastGroupRound != s.round {
- assembledBloomFilter.encoded.ClearPrevious = 1
- }
- metaMessage.message.TxnBloomFilter = assembledBloomFilter.encoded
- bloomFilterSize = assembledBloomFilter.encodedLength
- }
- profMakeBloomFilter.end()
- if s.isRelay {
- s.lastBloomFilter = assembledBloomFilter
- }
- }
-notxns:
-
- if msgOps&messageConstTransactions == messageConstTransactions {
- transactionGroups := pendingTransactions.pendingTransactionsGroups
- if !s.isRelay {
- // on non-relay, we need to filter out the non-locally originated messages since we don't want
- // non-relays to send transaction that they received via the transaction sync back.
- transactionGroups = s.locallyGeneratedTransactions(pendingTransactions)
- }
-
- profTxnsSelection := s.profiler.getElement(profElementTxnsSelection)
- profTxnsSelection.start()
- metaMessage.transactionGroups, metaMessage.sentTransactionsIDs, metaMessage.partialMessage = peer.selectPendingTransactions(transactionGroups, messageTimeWindow, s.round, bloomFilterSize)
- profTxnsSelection.end()
-
- // clear the last sent bloom filter on the end of a series of partial messages.
- // this would ensure we generate a new bloom filter every beta, which is needed
- // in order to avoid the bloom filter inherent false positive rate.
- if !metaMessage.partialMessage {
- peer.lastSentBloomFilter = bloomFilter{}
- }
- }
-
- metaMessage.message.MsgSync.RefTxnBlockMsgSeq = peer.nextReceivedMessageSeq - 1
- // signify that timestamp is not set
- lastReceivedMessageTimestamp = time.Duration(-1)
- if peer.lastReceivedMessageTimestamp != 0 && peer.lastReceivedMessageLocalRound == s.round {
- lastReceivedMessageTimestamp = peer.lastReceivedMessageTimestamp
- // reset the lastReceivedMessageTimestamp so that we won't be using that again on a subsequent outgoing message.
- peer.lastReceivedMessageTimestamp = 0
- }
-
- // use the messages seq number that we've accepted so far, and let the other peer
- // know about them. The getAcceptedMessages would delete the returned list from the peer's storage before
- // returning.
- metaMessage.message.MsgSync.AcceptedMsgSeq = peer.getAcceptedMessages()
-
- if msgOps&messageConstNextMinDelay == messageConstNextMinDelay {
- metaMessage.message.MsgSync.NextMsgMinDelay = uint64(s.lastBeta.Nanoseconds()) * 2
- }
- return
-}
-
-func (s *syncState) evaluateOutgoingMessage(msgData sentMessageMetadata) {
- timestamp := msgData.sentTimestamp
- // test to see if our message got re-ordered between the time we placed it on the execution pool queue and the time
- // we received it back from the network:
- if msgData.sequenceNumber != msgData.projectedSequenceNumber {
- // yes, the order was changed. In this case, we will set the timestamp to zero. This would allow the
- // incoming message handler to identify that we shouldn't use this timestamp for calculating the data exchange rate.
- timestamp = 0
- }
- msgData.peer.updateMessageSent(msgData.message, msgData.sentTransactionsIDs, timestamp, msgData.sequenceNumber, msgData.encodedMessageSize)
- s.log.outgoingMessage(msgStats{msgData.sequenceNumber, msgData.message.Round, len(msgData.sentTransactionsIDs), msgData.message.UpdatedRequestParams, len(msgData.message.TxnBloomFilter.BloomFilter), msgData.message.MsgSync.NextMsgMinDelay, msgData.peer.networkAddress()})
-}
-
-// locallyGeneratedTransactions return a subset of the given transactionGroups array by filtering out transactions that are not locally generated.
-func (s *syncState) locallyGeneratedTransactions(pendingTransactions *pendingTransactionGroupsSnapshot) (result []pooldata.SignedTxGroup) {
- if pendingTransactions.latestLocallyOriginatedGroupCounter == pooldata.InvalidSignedTxGroupCounter || len(pendingTransactions.pendingTransactionsGroups) == 0 {
- return []pooldata.SignedTxGroup{}
- }
- n := sort.Search(len(pendingTransactions.pendingTransactionsGroups), func(i int) bool {
- return pendingTransactions.pendingTransactionsGroups[i].GroupCounter >= pendingTransactions.latestLocallyOriginatedGroupCounter
- })
- if n == len(pendingTransactions.pendingTransactionsGroups) {
- n--
- }
- result = make([]pooldata.SignedTxGroup, n+1)
-
- count := 0
- for i := 0; i <= n; i++ {
- txnGroup := pendingTransactions.pendingTransactionsGroups[i]
- if !txnGroup.LocallyOriginated {
- continue
- }
- result[count] = txnGroup
- count++
- }
- return result[:count]
-}
diff --git a/txnsync/outgoing_test.go b/txnsync/outgoing_test.go
deleted file mode 100644
index 3136161ef..000000000
--- a/txnsync/outgoing_test.go
+++ /dev/null
@@ -1,615 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "context"
- "reflect"
- "sync"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/algorand/go-algorand/util/execpool"
- "github.com/algorand/go-algorand/util/timers"
-)
-
-type mockAsyncLogger struct {
- logging.Logger
- warnCalled *bool
-}
-
-func (m mockAsyncLogger) outgoingMessage(mstat msgStats) {
-}
-
-func (m mockAsyncLogger) incomingMessage(mstat msgStats) {
-}
-
-func (m mockAsyncLogger) Infof(string, ...interface{}) {}
-
-func (m mockAsyncLogger) Warnf(string, ...interface{}) {
- if m.warnCalled != nil {
- *m.warnCalled = true
- }
-}
-
-func TestAsyncMessageSent(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- var s syncState
- s.clock = timers.MakeMonotonicClock(time.Now())
- s.log = mockAsyncLogger{}
- s.incomingMessagesQ = makeIncomingMessageQueue()
- defer s.incomingMessagesQ.shutdown()
-
- asyncEncoder := messageAsyncEncoder{
- state: &s,
- messageData: sentMessageMetadata{
- message: &transactionBlockMessage{
- Version: txnBlockMessageVersion,
- Round: 1,
- },
- peer: &Peer{},
- },
- roundClock: timers.MakeMonotonicClock(time.Now()),
- sentMessagesCh: s.outgoingMessagesCallbackCh,
- }
-
- oldTimestamp := asyncEncoder.messageData.sentTimestamp
- a.Equal(asyncEncoder.asyncMessageSent(false, 0), errTransactionSyncOutgoingMessageSendFailed)
- err := asyncEncoder.asyncMessageSent(true, 1337)
- a.Equal(err, errTransactionSyncOutgoingMessageQueueFull)
- a.Equal(asyncEncoder.messageData.sentTimestamp, oldTimestamp)
- a.Equal(asyncEncoder.messageData.sequenceNumber, uint64(1337))
-
- // Make this buffered for now so we catch the select statement
- asyncEncoder.sentMessagesCh = make(chan sentMessageMetadata, 1)
-
- err = asyncEncoder.asyncMessageSent(true, 1337)
- a.Nil(err)
- a.Equal(1, len(asyncEncoder.sentMessagesCh))
-}
-
-type mockAsyncNodeConnector struct {
- NodeConnector
- called *bool
- largeTxnGroup bool
-}
-
-func (m mockAsyncNodeConnector) Random(rng uint64) uint64 {
- // We need to be deterministic in our "randomness" for the tests
- return 42
-}
-
-func (m mockAsyncNodeConnector) SendPeerMessage(netPeer interface{}, msg []byte, callback SendMessageCallback) {
- *m.called = true
-}
-
-func (m mockAsyncNodeConnector) GetPendingTransactionGroups() (txGroups []pooldata.SignedTxGroup, latestLocallyOriginatedGroupCounter uint64) {
- if m.largeTxnGroup {
- rval := []pooldata.SignedTxGroup{}
- for i := 0; i < 100000; i++ {
- // Because we use this with non-relay nodes, the syncState will
- // use the locallyGeneratedTransactions() function.
- // To make sure we fill the values appropriately, we are going to
- // set every value here to be locally originated
- // Additionally, we want the encoded length to be 1000 (or something rather large)
- // to make sure that we can attain partial messages (see TestSendMessageLoop test)
- rval = append(rval, pooldata.SignedTxGroup{EncodedLength: 1000, LocallyOriginated: true})
- }
-
- return rval, 1
- }
- return []pooldata.SignedTxGroup{}, 1
-}
-
-// TestAsyncEncodeAndSendErr Tests response when encodeTransactionGroups doesn't return an error
-func TestAsyncEncodeAndSendNonErr(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- var s syncState
- s.clock = timers.MakeMonotonicClock(time.Now())
- warnCalled := false
- s.log = mockAsyncLogger{warnCalled: &warnCalled}
- sendPeerMessageCalled := false
- s.node = mockAsyncNodeConnector{called: &sendPeerMessageCalled}
- s.messageSendWaitGroup = sync.WaitGroup{}
-
- txnGrps := []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- Transactions: []transactions.SignedTxn{
- transactions.SignedTxn{
- Txn: transactions.Transaction{
- Type: protocol.AssetConfigTx,
- },
- },
- },
- },
- }
-
- asyncEncoder := messageAsyncEncoder{
- state: &s,
- messageData: sentMessageMetadata{
- message: &transactionBlockMessage{
- Version: txnBlockMessageVersion,
- Round: 1,
- },
- transactionGroups: txnGrps,
- peer: &Peer{},
- },
- roundClock: timers.MakeMonotonicClock(time.Now()),
- }
-
- asyncEncoder.state.messageSendWaitGroup.Add(1)
-
- err := asyncEncoder.asyncEncodeAndSend(nil)
-
- a.Nil(err)
- a.False(warnCalled)
- a.True(sendPeerMessageCalled)
- a.Nil(asyncEncoder.messageData.transactionGroups)
-}
-
-// TestAsyncEncodeAndSendErr Tests response when encodeTransactionGroups returns an error
-func TestAsyncEncodeAndSendErr(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- var s syncState
- s.clock = timers.MakeMonotonicClock(time.Now())
- warnCalled := false
- s.log = mockAsyncLogger{warnCalled: &warnCalled}
- sendPeerMessageCalled := false
- s.node = mockAsyncNodeConnector{called: &sendPeerMessageCalled}
- s.messageSendWaitGroup = sync.WaitGroup{}
-
- txnGrps := []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- Transactions: []transactions.SignedTxn{
- transactions.SignedTxn{
- Txn: transactions.Transaction{
- Type: protocol.UnknownTx,
- },
- },
- },
- },
- }
-
- asyncEncoder := messageAsyncEncoder{
- state: &s,
- messageData: sentMessageMetadata{
- message: &transactionBlockMessage{
- Version: txnBlockMessageVersion,
- Round: 1,
- },
- transactionGroups: txnGrps,
- peer: &Peer{},
- },
- roundClock: timers.MakeMonotonicClock(time.Now()),
- }
-
- asyncEncoder.state.messageSendWaitGroup.Add(1)
-
- err := asyncEncoder.asyncEncodeAndSend(nil)
-
- a.Nil(err)
- a.True(warnCalled)
- a.True(sendPeerMessageCalled)
-
-}
-
-// TestAsyncEncodeAndSend Tests that SendPeerMessage is called in the node connector
-func TestAsyncEncodeAndSend(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- var s syncState
- s.clock = timers.MakeMonotonicClock(time.Now())
- s.log = mockAsyncLogger{}
- sendPeerMessageCalled := false
- s.node = mockAsyncNodeConnector{called: &sendPeerMessageCalled}
- s.messageSendWaitGroup = sync.WaitGroup{}
-
- asyncEncoder := messageAsyncEncoder{
- state: &s,
- messageData: sentMessageMetadata{
- message: &transactionBlockMessage{
- Version: txnBlockMessageVersion,
- Round: 1,
- },
- peer: &Peer{},
- },
- roundClock: timers.MakeMonotonicClock(time.Now()),
- }
-
- asyncEncoder.state.messageSendWaitGroup.Add(1)
-
- err := asyncEncoder.asyncEncodeAndSend(nil)
- a.Nil(err)
- a.True(sendPeerMessageCalled)
- a.NotZero(asyncEncoder.messageData.sentTimestamp)
-
-}
-
-// TestAssemblePeerMessage_messageConstBloomFilter Tests assemblePeerMessage with messageConstBloomFilter msgOps
-func TestAssemblePeerMessage_messageConstBloomFilter(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- s := syncState{
- node: mockAsyncNodeConnector{},
- clock: timers.MakeMonotonicClock(time.Now()),
- }
-
- s.profiler = makeProfiler(1*time.Millisecond, s.clock, s.log, 1*time.Millisecond)
-
- peer := Peer{}
-
- pendingTransactions := pendingTransactionGroupsSnapshot{
- pendingTransactionsGroups: []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{},
- },
- }
-
- peer.setLocalRequestParams(111, 222)
- peer.lastReceivedMessageTimestamp = 100
- peer.lastReceivedMessageLocalRound = s.round
-
- expectedFilter := s.makeBloomFilter(requestParams{Offset: 111, Modulator: 222}, pendingTransactions.pendingTransactionsGroups, nil, &s.lastBloomFilter)
-
- s.isRelay = true
- peer.isOutgoing = true
- peer.state = peerStateLateBloom
-
- metaMessage, _, responseTime := s.assemblePeerMessage(&peer, &pendingTransactions)
-
- a.Equal(metaMessage.message.UpdatedRequestParams.Modulator, byte(222))
- a.Equal(metaMessage.message.UpdatedRequestParams.Offset, byte(111))
- a.Equal(metaMessage.peer, &peer)
- a.Equal(metaMessage.message.Version, int32(txnBlockMessageVersion))
- a.Equal(metaMessage.message.Round, s.round)
- a.True(responseTime >= 0)
- a.Equal(s.lastBloomFilter, expectedFilter)
-}
-
-// TestAssemblePeerMessage_messageConstBloomFilterNonRelay Tests assemblePeerMessage with messageConstBloomFilter msgOps in a non-relay scenario
-func TestAssemblePeerMessage_messageConstBloomFilterNonRelay(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- s := syncState{
- node: mockAsyncNodeConnector{largeTxnGroup: true},
- clock: timers.MakeMonotonicClock(time.Now()),
- }
-
- s.profiler = makeProfiler(1*time.Millisecond, s.clock, s.log, 1*time.Millisecond)
-
- peer := Peer{}
-
- pendingTransactions := pendingTransactionGroupsSnapshot{
- pendingTransactionsGroups: []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{},
- },
- }
-
- peer.setLocalRequestParams(111, 222)
- peer.lastReceivedMessageTimestamp = 100
- peer.lastReceivedMessageLocalRound = s.round
-
- expectedFilter := s.makeBloomFilter(requestParams{Offset: 111, Modulator: 222}, pendingTransactions.pendingTransactionsGroups, nil, &s.lastBloomFilter)
-
- s.isRelay = false
- s.fetchTransactions = true
- peer.isOutgoing = true
- peer.state = peerStateLateBloom
-
- metaMessage, _, responseTime := s.assemblePeerMessage(&peer, &pendingTransactions)
-
- a.Equal(metaMessage.message.UpdatedRequestParams.Modulator, byte(222))
- a.Equal(metaMessage.message.UpdatedRequestParams.Offset, byte(111))
- a.Equal(metaMessage.peer, &peer)
- a.Equal(metaMessage.message.Version, int32(txnBlockMessageVersion))
- a.Equal(metaMessage.message.Round, s.round)
- a.True(responseTime >= 0)
- a.NotEqual(s.lastBloomFilter, expectedFilter)
-}
-
-// TestAssemblePeerMessage_messageConstNextMinDelay_messageConstUpdateRequestParams Tests assemblePeerMessage with messageConstNextMinDelay | messageConstUpdateRequestParams msgOps
-func TestAssemblePeerMessage_messageConstNextMinDelay_messageConstUpdateRequestParams(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- s := syncState{clock: timers.MakeMonotonicClock(time.Now())}
-
- s.profiler = makeProfiler(1*time.Millisecond, s.clock, s.log, 1*time.Millisecond)
-
- peer := Peer{}
-
- pendingTransactions := pendingTransactionGroupsSnapshot{}
-
- peer.setLocalRequestParams(111, 222)
- peer.lastReceivedMessageTimestamp = 100
- peer.lastReceivedMessageLocalRound = s.round
-
- s.isRelay = true
- s.lastBeta = 123 * time.Nanosecond
-
- metaMessage, _, responseTime := s.assemblePeerMessage(&peer, &pendingTransactions)
-
- a.Equal(metaMessage.message.UpdatedRequestParams.Modulator, byte(222))
- a.Equal(metaMessage.message.UpdatedRequestParams.Offset, byte(111))
- a.Equal(metaMessage.peer, &peer)
- a.Equal(metaMessage.message.Version, int32(txnBlockMessageVersion))
- a.Equal(metaMessage.message.Round, s.round)
- a.True(responseTime >= 0)
- a.Equal(metaMessage.message.MsgSync.NextMsgMinDelay, uint64(s.lastBeta.Nanoseconds())*2)
-
-}
-
-// TestAssemblePeerMessage_messageConstTransactions Tests assemblePeerMessage messageConstTransactions msgOps
-func TestAssemblePeerMessage_messageConstTransactions(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- s := syncState{clock: timers.MakeMonotonicClock(time.Now())}
-
- s.profiler = makeProfiler(1*time.Millisecond, s.clock, s.log, 1*time.Millisecond)
-
- peer := Peer{}
-
- pendingTransactions := pendingTransactionGroupsSnapshot{
- latestLocallyOriginatedGroupCounter: 1,
- pendingTransactionsGroups: []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- LocallyOriginated: true,
- EncodedLength: 2,
- },
- },
- }
-
- peer.setLocalRequestParams(111, 222)
- peer.lastReceivedMessageTimestamp = 100
- peer.lastReceivedMessageLocalRound = s.round
- peer.requestedTransactionsModulator = 2
- peer.recentSentTransactions = makeTransactionCache(5, 10, 20)
-
- s.isRelay = false
- peer.isOutgoing = true
- peer.state = peerStateHoldsoff
-
- metaMessage, _, _ := s.assemblePeerMessage(&peer, &pendingTransactions)
-
- a.Equal(len(metaMessage.transactionGroups), 1)
- a.True(reflect.DeepEqual(metaMessage.transactionGroups[0], pendingTransactions.pendingTransactionsGroups[0]))
-
-}
-
-// TestLocallyGeneratedTransactions Separately tests that generating transactions are being
-// correctly made given a signed transaction group array.
-func TestLocallyGeneratedTransactions(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- pendingTransactions := &pendingTransactionGroupsSnapshot{}
-
- s := syncState{}
-
- pendingTransactions.latestLocallyOriginatedGroupCounter = 1
-
- a.Equal(s.locallyGeneratedTransactions(pendingTransactions), []pooldata.SignedTxGroup{})
-
- pendingTransactions.pendingTransactionsGroups = []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- LocallyOriginated: true,
- EncodedLength: 2,
- },
- pooldata.SignedTxGroup{
- LocallyOriginated: false,
- EncodedLength: 1,
- },
- pooldata.SignedTxGroup{
- LocallyOriginated: true,
- EncodedLength: 3,
- },
- }
-
- pendingTransactions.latestLocallyOriginatedGroupCounter = pooldata.InvalidSignedTxGroupCounter
-
- a.Equal(s.locallyGeneratedTransactions(pendingTransactions), []pooldata.SignedTxGroup{})
-
- pendingTransactions.latestLocallyOriginatedGroupCounter = 1
-
- expected := []pooldata.SignedTxGroup{
-
- pooldata.SignedTxGroup{
- LocallyOriginated: true,
- EncodedLength: 2,
- },
-
- pooldata.SignedTxGroup{
- LocallyOriginated: true,
- EncodedLength: 3,
- },
- }
-
- a.Equal(s.locallyGeneratedTransactions(pendingTransactions), expected)
-
-}
-
-type mockBacklogThreadPool struct {
- execpool.BacklogPool
- enqueueCalled *int
-}
-
-func (b *mockBacklogThreadPool) EnqueueBacklog(enqueueCtx context.Context, t execpool.ExecFunc, arg interface{}, out chan interface{}) error {
- if b.enqueueCalled != nil {
- *b.enqueueCalled++
- }
-
- return nil
-}
-
-// TestEnqueue directly tests that enqueue will call the Done() function for the messageSendWaitGroup
-func TestEnqueue(t *testing.T) {
-
- partitiontest.PartitionTest(t)
-
- s := syncState{clock: timers.MakeMonotonicClock(time.Now())}
- s.log = mockAsyncLogger{}
- s.node = &mockNodeConnector{}
- s.threadpool = execpool.MakeBacklog(execpool.MakePool(t), 5, execpool.LowPriority, t)
-
- prof := makeProfiler(2*time.Millisecond, s.clock, s.log, 3*time.Millisecond)
- s.profiler = prof
-
- asyncEncoder := messageAsyncEncoder{
- state: &s,
- messageData: sentMessageMetadata{
- message: &transactionBlockMessage{
- Version: txnBlockMessageVersion,
- Round: 1,
- },
- peer: &Peer{},
- },
- roundClock: timers.MakeMonotonicClock(time.Now()),
- }
-
- asyncEncoder.enqueue()
-
- // Wait for the enqueued function to return the messageSendWaitGroup
- s.messageSendWaitGroup.Wait()
-
- // Dummy require to make sure we pass this test...the real value of this test
- // is to make sure that the wait group is appropriately set
- require.True(t, true)
-
-}
-
-// TestSendMessageLoop tests the send message loop
-func TestSendMessageLoop(t *testing.T) {
-
- partitiontest.PartitionTest(t)
-
- enqueueCalled := 0
-
- s := syncState{
- clock: timers.MakeMonotonicClock(time.Now()),
- scheduler: makePeerScheduler(),
- }
- s.log = mockAsyncLogger{}
- // Get a large amount of signed txns with a low data exchange rate
- // to get partial messages to trigger peerOpsClearInterruptible
- s.node = &mockAsyncNodeConnector{largeTxnGroup: true}
- s.threadpool = &mockBacklogThreadPool{enqueueCalled: &enqueueCalled}
-
- prof := makeProfiler(2*time.Millisecond, s.clock, s.log, 3*time.Millisecond)
- s.profiler = prof
- s.interruptablePeersMap = make(map[*Peer]int)
-
- peers := []*Peer{
- // peerOpsReschedule
- &Peer{
- recentSentTransactions: makeTransactionCache(10, 20, 10),
- requestedTransactionsModulator: 2,
- // Reduced rate to trigger partial messages
- dataExchangeRate: 10,
- // greater than 0 for state machine logic
- nextStateTimestamp: 1 * time.Millisecond,
- },
- &Peer{
- recentSentTransactions: makeTransactionCache(10, 20, 10),
- requestedTransactionsModulator: 2,
- // Reduced rate to trigger partial messages
- dataExchangeRate: 10,
- // greater than 0 for state machine logic
- nextStateTimestamp: 1 * time.Millisecond,
- },
- }
-
- // Add the peers to test that peerOpsClearInterruptible removes them
-
- for _, p := range peers {
- s.interruptablePeers = append(s.interruptablePeers, p)
- s.interruptablePeersMap[p] = len(s.interruptablePeers) - 1
- }
-
- // The deadline is set to a ridiculously high number to make sure that we cycle through all our peers
- // and not break
- s.sendMessageLoop(s.clock.Since(), s.clock.DeadlineMonitorAt(s.clock.Since()+5*time.Minute), peers)
-
- require.Equal(t, 2, enqueueCalled)
- require.Equal(t, 0, len(s.interruptablePeersMap))
-
-}
-
-// TestEvaluateOutgoingMessage tests the evaluateOutgoingMessage function of syncState
-func TestEvaluateOutgoingMessage(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- s := syncState{clock: timers.MakeMonotonicClock(time.Now())}
- s.log = mockAsyncLogger{}
-
- peer := Peer{
- recentSentTransactions: makeTransactionCache(10, 20, 10),
- }
-
- sentMessage := sentMessageMetadata{
- sentTimestamp: time.Duration(time.Millisecond * 1234),
- message: &transactionBlockMessage{Round: 3},
- sequenceNumber: 42,
- projectedSequenceNumber: 44,
- encodedMessageSize: 23,
- peer: &peer,
- }
-
- s.evaluateOutgoingMessage(sentMessage)
- // This should be zero because sequenceNumber and projectedSequenceNumber are not equal
- a.Equal(peer.lastSentMessageTimestamp, 0*time.Millisecond)
-
- a.Equal(peer.lastSentMessageSequenceNumber, uint64(42))
- a.Equal(peer.lastSentMessageRound, basics.Round(3))
- a.Equal(peer.lastSentMessageSize, 23)
-
- sentMessage.sequenceNumber = sentMessage.projectedSequenceNumber
-
- s.evaluateOutgoingMessage(sentMessage)
- a.Equal(peer.lastSentMessageTimestamp, 1234*time.Millisecond)
-
- a.Equal(peer.lastSentMessageSequenceNumber, uint64(44))
- a.Equal(peer.lastSentMessageRound, basics.Round(3))
- a.Equal(peer.lastSentMessageSize, 23)
-}
diff --git a/txnsync/peer.go b/txnsync/peer.go
deleted file mode 100644
index b33ed0ed1..000000000
--- a/txnsync/peer.go
+++ /dev/null
@@ -1,852 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "math"
- "sort"
- "time"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/data/transactions"
-)
-
-//msgp:ignore peerState
-type peerState int
-
-//msgp:ignore peersOps
-type peersOps int
-
-//msgp:ignore messageConstructionOps
-type messageConstructionOps int
-
-const maxIncomingBloomFilterHistory = 200
-
-// shortTermRecentTransactionsSentBufferLength is the size of the short term storage for the recently sent transaction ids.
-// it should be configured sufficiently high so that any number of transaction sent would not exceed that number before
-// the other peer has a chance of sending a feedback. ( when the feedback is received, we will store these IDs into the long-term cache )
-const shortTermRecentTransactionsSentBufferLength = 5000
-
-// pendingUnconfirmedRemoteMessages is the number of messages we would cache before receiving a feedback from the other
-// peer that these message have been accepted. The general guideline here is that if we have a message every 200ms on one side
-// and a message every 20ms on the other, then the ratio of 200/20 = 10, should be the number of required messages (min).
-const pendingUnconfirmedRemoteMessages = 20
-
-// longTermRecentTransactionsSentBufferLength is the size of the long term transaction id cache.
-const longTermRecentTransactionsSentBufferLength = 15000
-const minDataExchangeRateThreshold = 500 * 1024 // 500KB/s, which is ~3.9Mbps
-const maxDataExchangeRateThreshold = 100 * 1024 * 1024 / 8 // 100Mbps
-const defaultDataExchangeRate = minDataExchangeRateThreshold
-const defaultRelayToRelayDataExchangeRate = 10 * 1024 * 1024 / 8 // 10Mbps
-const bloomFilterRetryCount = 3 // number of bloom filters we would try against each transaction group before skipping it.
-const maxTransactionGroupTrackers = 15 // number of different bloom filter parameters we store before rolling over
-
-const (
- // peerStateStartup is before the timeout for the sending the first message to the peer has reached.
- // for an outgoing peer, it means that an incoming message arrived, and one or more messages need to be sent out.
- peerStateStartup peerState = iota
- // peerStateHoldsoff is set once a message was sent to a peer, and we're holding off before sending additional messages.
- peerStateHoldsoff
- // peerStateInterrupt is set once the holdoff period for the peer have expired.
- peerStateInterrupt
- // peerStateLateBloom is set for outgoing peers on relays, indicating that the next message should be a bloom filter only message.
- peerStateLateBloom
-
- peerOpsSendMessage peersOps = 1
- peerOpsSetInterruptible peersOps = 2
- peerOpsClearInterruptible peersOps = 4
- peerOpsReschedule peersOps = 8
-
- messageConstBloomFilter messageConstructionOps = 1
- messageConstTransactions messageConstructionOps = 2
- messageConstNextMinDelay messageConstructionOps = 4
- messageConstUpdateRequestParams messageConstructionOps = 8
-
- // defaultSignificantMessageThreshold is the minimal transmitted message size which would be used for recalculating the
- // data exchange rate.
- defaultSignificantMessageThreshold = 50000
-)
-
-// incomingBloomFilter stores an incoming bloom filter, along with the associated round number.
-// the round number allow us to prune filters from rounds n-2 and below.
-type incomingBloomFilter struct {
- filter *testableBloomFilter
- round basics.Round
-}
-
-// Peer contains peer-related data which extends the data "known" and managed by the network package.
-type Peer struct {
- // networkPeer is the network package exported peer. It's created on construction and never change afterward.
- networkPeer interface{}
- // isOutgoing defines whether the peer is an outgoing peer or not. For relays, this is meaningful as these have
- // slightly different message timing logic.
- isOutgoing bool
- // significantMessageThreshold is the minimal transmitted message size which would be used for recalculating the
- // data exchange rate. When significantMessageThreshold is equal to math.MaxUint64, no data exchange rate updates would be
- // performed.
- significantMessageThreshold uint64
- // state defines the peer state ( in terms of state machine state ). It's touched only by the sync main state machine
- state peerState
-
- log Logger
-
- // lastRound is the latest round reported by the peer.
- lastRound basics.Round
-
- // incomingMessages contains the incoming messages from this peer. This heap help us to reorder the incoming messages so that
- // we could process them in the tcp-transport order.
- incomingMessages messageOrderingHeap
-
- // nextReceivedMessageSeq is a counter containing the next message sequence number that we expect to see from this peer.
- nextReceivedMessageSeq uint64 // the next message seq that we expect to receive from that peer; implies that all previous messages have been accepted.
-
- // recentIncomingBloomFilters contains the recent list of bloom filters sent from the peer. When considering sending transactions, we check this
- // array to determine if the peer already has this message.
- recentIncomingBloomFilters []incomingBloomFilter
-
- // recentSentTransactions contains the recently sent transactions. It's needed since we don't want to rely on the other peer's bloom filter while
- // sending back-to-back messages.
- recentSentTransactions *transactionCache
- // recentSentTransactionsRound is the round associated with the cache of recently sent transactions. We keep this variable around so that we can
- // flush the cache on every round so that we can give pending transaction another chance of being transmitted.
- recentSentTransactionsRound basics.Round
-
- // these two fields describe "what does that peer asked us to send it"
- requestedTransactionsModulator byte
- requestedTransactionsOffset byte
-
- // lastSentMessageSequenceNumber is the last sequence number of the message that we sent.
- lastSentMessageSequenceNumber uint64
- // lastSentMessageRound is the round the last sent message was sent on. The timestamps are relative to the beginning of the round
- // and therefore need to be evaluated togather.
- lastSentMessageRound basics.Round
- // lastSentMessageTimestamp the timestamp at which the last message was sent.
- lastSentMessageTimestamp time.Duration
- // lastSentMessageSize is the encoded message size of the last sent message
- lastSentMessageSize int
- // lastSentBloomFilter is the last bloom filter that was sent to this peer.
- // This bloom filter could be stale if no bloom filter was included in the last message.
- lastSentBloomFilter bloomFilter
-
- // sentFilterParams records the Round and max txn group counter of the last filter sent to a peer (for each {Modulator,Offset}).
- // From this an efficient next filter can be calculated for just the new txns, or a full filter after a Round turnover.
- sentFilterParams sentFilters
-
- // lastConfirmedMessageSeqReceived is the last message sequence number that was confirmed by the peer to have been accepted.
- lastConfirmedMessageSeqReceived uint64
- lastReceivedMessageLocalRound basics.Round
- lastReceivedMessageTimestamp time.Duration
- lastReceivedMessageSize int
- lastReceivedMessageNextMsgMinDelay time.Duration
-
- // dataExchangeRate is the combined upload/download rate in bytes/second
- dataExchangeRate uint64
- // cachedLatency is the measured network latency of a peer, updated every round
- cachedLatency time.Duration
-
- // these two fields describe "what does the local peer want the remote peer to send back"
- localTransactionsModulator byte
- localTransactionsBaseOffset byte
-
- // lastTransactionSelectionTracker tracks the last transaction group counter that we've evaluated on the selectPendingTransactions method.
- // it used to ensure that on subsequent calls, we won't need to scan the entire pending transactions array from the beginning.
- // the implementation here is breaking it up per request params, so that we can apply the above logic per request params ( i.e. different
- // offset/modulator ), as well as add retry attempts for multiple bloom filters.
- lastTransactionSelectionTracker transactionGroupCounterTracker
-
- // nextStateTimestamp indicates the next timestamp where the peer state would need to be changed.
- // it used to allow sending partial message while retaining the "next-beta time", or, in the case of outgoing relays,
- // its being used to hold when we need to send the last (bloom) message.
- nextStateTimestamp time.Duration
- // messageSeriesPendingTransactions contain the transactions we are sending in the current "message-series". It allows us to pick a given
- // "snapshot" from the transaction pool, and send that "snapshot" to completion before attempting to re-iterate.
- messageSeriesPendingTransactions []pooldata.SignedTxGroup
-
- // transactionPoolAckCh is passed to the transaction handler when incoming transaction arrives. The channel is passed upstream, so that once
- // a transaction is added to the transaction pool, we can get some feedback for that.
- transactionPoolAckCh chan uint64
-
- // transactionPoolAckMessages maintain a list of the recent incoming messages sequence numbers whose transactions were added fully to the transaction
- // pool. This list is being flushed out every time we send a message to the peer.
- transactionPoolAckMessages []uint64
-
- // used by the selectPendingTransactions method, the lastSelectedTransactionsCount contains the number of entries selected on the previous iteration.
- // this value is used to optimize the memory preallocation for the selection IDs array.
- lastSelectedTransactionsCount int
-}
-
-// requestParamsGroupCounterState stores the latest group counters for a given set of request params.
-// we use this to ensure we can have multiple iteration of bloom filter scanning over each individual
-// transaction group. This method allow us to reduce the bloom filter errors while avoid scanning the
-// list of transactions redundently.
-//msgp:ignore transactionGroupCounterState
-type requestParamsGroupCounterState struct {
- offset byte
- modulator byte
- groupCounters [bloomFilterRetryCount]uint64
-}
-
-// transactionGroupCounterTracker manages the group counter state for each request param.
-//msgp:ignore transactionGroupCounterTracker
-type transactionGroupCounterTracker []requestParamsGroupCounterState
-
-// get returns the group counter for a given set of request param.
-func (t *transactionGroupCounterTracker) get(offset, modulator byte) uint64 {
- i := t.index(offset, modulator)
- if i >= 0 {
- return (*t)[i].groupCounters[0]
- }
- return 0
-}
-
-// set updates the group counter for a given set of request param. If no such request
-// param currently exists, it create it.
-func (t *transactionGroupCounterTracker) set(offset, modulator byte, counter uint64) {
- i := t.index(offset, modulator)
- if i >= 0 {
- (*t)[i].groupCounters[0] = counter
- return
- }
- // if it doesn't exists -
- state := requestParamsGroupCounterState{
- offset: offset,
- modulator: modulator,
- }
- state.groupCounters[0] = counter
-
- if len(*t) == maxTransactionGroupTrackers {
- // shift all entries by one.
- copy((*t)[0:], (*t)[1:])
- (*t)[maxTransactionGroupTrackers-1] = state
- } else {
- *t = append(*t, state)
- }
-}
-
-// roll the counters for a given requests params, so that we would go back and
-// rescan some of the previous transaction groups ( but not all !) when selectPendingTransactions is called.
-func (t *transactionGroupCounterTracker) roll(offset, modulator byte) {
- i := t.index(offset, modulator)
- if i < 0 {
- return
- }
-
- if (*t)[i].groupCounters[1] >= (*t)[i].groupCounters[0] {
- return
- }
- firstGroupCounter := (*t)[i].groupCounters[0]
- copy((*t)[i].groupCounters[0:], (*t)[i].groupCounters[1:])
- (*t)[i].groupCounters[bloomFilterRetryCount-1] = firstGroupCounter
-}
-
-// index is a helper method for the transactionGroupCounterTracker, helping to locate the index of
-// a requestParamsGroupCounterState in the array that matches the provided request params. The method
-// uses a linear search, which works best against small arrays.
-func (t *transactionGroupCounterTracker) index(offset, modulator byte) int {
- for i, counter := range *t {
- if counter.offset == offset && counter.modulator == modulator {
- return i
- }
- }
- return -1
-}
-
-func makePeer(networkPeer interface{}, isOutgoing bool, isLocalNodeRelay bool, cfg *config.Local, log Logger, latency time.Duration) *Peer {
- p := &Peer{
- networkPeer: networkPeer,
- isOutgoing: isOutgoing,
- recentSentTransactions: makeTransactionCache(shortTermRecentTransactionsSentBufferLength, longTermRecentTransactionsSentBufferLength, pendingUnconfirmedRemoteMessages),
- dataExchangeRate: defaultDataExchangeRate,
- cachedLatency: latency,
- transactionPoolAckCh: make(chan uint64, maxAcceptedMsgSeq),
- transactionPoolAckMessages: make([]uint64, 0, maxAcceptedMsgSeq),
- significantMessageThreshold: defaultSignificantMessageThreshold,
- log: log,
- }
- if isLocalNodeRelay {
- p.requestedTransactionsModulator = 1
- p.dataExchangeRate = defaultRelayToRelayDataExchangeRate
- }
- if cfg.TransactionSyncDataExchangeRate > 0 {
- p.dataExchangeRate = cfg.TransactionSyncDataExchangeRate
- p.significantMessageThreshold = math.MaxUint64
- }
- if cfg.TransactionSyncSignificantMessageThreshold > 0 && cfg.TransactionSyncDataExchangeRate == 0 {
- p.significantMessageThreshold = cfg.TransactionSyncSignificantMessageThreshold
- }
- // increase the number of total created peers.
- txsyncCreatedPeersTotal.Inc(nil)
- return p
-}
-
-// GetNetworkPeer returns the network peer associated with this particular peer.
-func (p *Peer) GetNetworkPeer() interface{} {
- return p.networkPeer
-}
-
-// GetTransactionPoolAckChannel returns the transaction pool ack channel
-func (p *Peer) GetTransactionPoolAckChannel() chan uint64 {
- return p.transactionPoolAckCh
-}
-
-// dequeuePendingTransactionPoolAckMessages removed the pending entries from transactionPoolAckCh and add them to transactionPoolAckMessages
-func (p *Peer) dequeuePendingTransactionPoolAckMessages() {
- for {
- select {
- case msgSeq := <-p.transactionPoolAckCh:
- if len(p.transactionPoolAckMessages) == maxAcceptedMsgSeq {
- p.transactionPoolAckMessages = append(p.transactionPoolAckMessages[1:], msgSeq)
- } else {
- p.transactionPoolAckMessages = append(p.transactionPoolAckMessages, msgSeq)
- }
- default:
- return
- }
- }
-}
-
-// outgoing related methods :
-
-// getAcceptedMessages returns the content of the transactionPoolAckMessages and clear the existing buffer.
-func (p *Peer) getAcceptedMessages() []uint64 {
- p.dequeuePendingTransactionPoolAckMessages()
- acceptedMessages := p.transactionPoolAckMessages
- p.transactionPoolAckMessages = make([]uint64, 0, maxAcceptedMsgSeq)
- return acceptedMessages
-}
-
-func (p *Peer) selectPendingTransactions(pendingTransactions []pooldata.SignedTxGroup, sendWindow time.Duration, round basics.Round, bloomFilterSize int) (selectedTxns []pooldata.SignedTxGroup, selectedTxnIDs []transactions.Txid, partialTransactionsSet bool) {
- // if peer is too far back, don't send it any transactions ( or if the peer is not interested in transactions )
- if p.lastRound < round.SubSaturate(1) || p.requestedTransactionsModulator == 0 {
- return nil, nil, false
- }
-
- if len(p.messageSeriesPendingTransactions) > 0 {
- pendingTransactions = p.messageSeriesPendingTransactions
- }
-
- if len(pendingTransactions) == 0 {
- return nil, nil, false
- }
-
- // flush the recent sent transaction cache on the beginning of a new round to give pending transactions another
- // chance of being transmitted.
- if p.recentSentTransactionsRound != round {
- p.recentSentTransactions.reset()
- p.recentSentTransactionsRound = round
- }
-
- windowLengthBytes := int(uint64(sendWindow) * p.dataExchangeRate / uint64(time.Second))
- windowLengthBytes -= bloomFilterSize
-
- accumulatedSize := 0
-
- lastTransactionSelectionGroupCounter := p.lastTransactionSelectionTracker.get(p.requestedTransactionsOffset, p.requestedTransactionsModulator)
-
- startIndex := sort.Search(len(pendingTransactions), func(i int) bool {
- return pendingTransactions[i].GroupCounter >= lastTransactionSelectionGroupCounter
- })
-
- selectedIDsSliceLength := len(pendingTransactions) - startIndex
- if selectedIDsSliceLength > p.lastSelectedTransactionsCount*2 {
- selectedIDsSliceLength = p.lastSelectedTransactionsCount * 2
- }
- selectedTxnIDs = make([]transactions.Txid, 0, selectedIDsSliceLength)
- selectedTxns = make([]pooldata.SignedTxGroup, 0, selectedIDsSliceLength)
-
- windowSizedReached := false
- hasMorePendingTransactions := false
-
- // create a list of all the bloom filters that might need to be tested. This list excludes bloom filters
- // which has the same modulator and a different offset.
- var effectiveBloomFilters []int
- effectiveBloomFilters = make([]int, 0, len(p.recentIncomingBloomFilters))
- for filterIdx := len(p.recentIncomingBloomFilters) - 1; filterIdx >= 0; filterIdx-- {
- if p.recentIncomingBloomFilters[filterIdx].filter == nil {
- continue
- }
- if p.recentIncomingBloomFilters[filterIdx].filter.encodingParams.Modulator != p.requestedTransactionsModulator || p.recentIncomingBloomFilters[filterIdx].filter.encodingParams.Offset != p.requestedTransactionsOffset {
- continue
- }
- effectiveBloomFilters = append(effectiveBloomFilters, filterIdx)
- }
-
- // removedTxn := 0
- grpIdx := startIndex
-scanLoop:
- for ; grpIdx < len(pendingTransactions); grpIdx++ {
- txID := pendingTransactions[grpIdx].GroupTransactionID
-
- // check if the peer would be interested in these messages -
- if p.requestedTransactionsModulator > 1 {
- if txidToUint64(txID)%uint64(p.requestedTransactionsModulator) != uint64(p.requestedTransactionsOffset) {
- continue
- }
- }
-
- // filter out transactions that we already previously sent.
- if p.recentSentTransactions.contained(txID) {
- // we already sent that transaction. no need to send again.
- continue
- }
-
- // check if the peer already received these messages from a different source other than us.
- for _, filterIdx := range effectiveBloomFilters {
- if p.recentIncomingBloomFilters[filterIdx].filter.test(txID) {
- // removedTxn++
- continue scanLoop
- }
- }
-
- if windowSizedReached {
- hasMorePendingTransactions = true
- break
- }
- selectedTxns = append(selectedTxns, pendingTransactions[grpIdx])
- selectedTxnIDs = append(selectedTxnIDs, txID)
-
- // add the size of the transaction group
- accumulatedSize += pendingTransactions[grpIdx].EncodedLength
-
- if accumulatedSize > windowLengthBytes {
- windowSizedReached = true
- }
- }
-
- p.lastSelectedTransactionsCount = len(selectedTxnIDs)
-
- // if we've over-allocated, resize the buffer; This becomes important on relays,
- // as storing these arrays can consume considerable amount of memory.
- if len(selectedTxnIDs)*2 < cap(selectedTxnIDs) {
- exactBuffer := make([]transactions.Txid, len(selectedTxnIDs))
- copy(exactBuffer, selectedTxnIDs)
- selectedTxnIDs = exactBuffer
- }
-
- // update the lastTransactionSelectionGroupCounter if needed -
- // if we selected any transaction to be sent, update the lastTransactionSelectionGroupCounter with the latest
- // group counter. If the startIndex was *after* the last pending transaction, it means that we don't
- // need to update the lastTransactionSelectionGroupCounter since it's already ahead of everything in the pending transactions.
- if grpIdx >= 0 && startIndex < len(pendingTransactions) {
- if grpIdx == len(pendingTransactions) {
- if grpIdx > 0 {
- p.lastTransactionSelectionTracker.set(p.requestedTransactionsOffset, p.requestedTransactionsModulator, pendingTransactions[grpIdx-1].GroupCounter+1)
- }
- } else {
- p.lastTransactionSelectionTracker.set(p.requestedTransactionsOffset, p.requestedTransactionsModulator, pendingTransactions[grpIdx].GroupCounter)
- }
- }
-
- if !hasMorePendingTransactions {
- // we're done with the current sequence.
- p.messageSeriesPendingTransactions = nil
- }
-
- // fmt.Printf("selectPendingTransactions : selected %d transactions, %d not needed and aborted after exceeding data length %d/%d more = %v\n", len(selectedTxnIDs), removedTxn, accumulatedSize, windowLengthBytes, hasMorePendingTransactions)
-
- return selectedTxns, selectedTxnIDs, hasMorePendingTransactions
-}
-
-// getLocalRequestParams returns the local requests params
-func (p *Peer) getLocalRequestParams() (offset, modulator byte) {
- return p.localTransactionsBaseOffset, p.localTransactionsModulator
-}
-
-// update the peer once the message was sent successfully.
-func (p *Peer) updateMessageSent(txMsg *transactionBlockMessage, selectedTxnIDs []transactions.Txid, timestamp time.Duration, sequenceNumber uint64, messageSize int) {
- p.recentSentTransactions.addSlice(selectedTxnIDs, sequenceNumber, timestamp)
- p.lastSentMessageSequenceNumber = sequenceNumber
- p.lastSentMessageRound = txMsg.Round
- p.lastSentMessageTimestamp = timestamp
- p.lastSentMessageSize = messageSize
-}
-
-// update the peer's lastSentBloomFilter.
-func (p *Peer) updateSentBoomFilter(filter bloomFilter, round basics.Round) {
- if filter.encodedLength > 0 {
- p.lastSentBloomFilter = filter
- p.sentFilterParams.setSentFilter(filter, round)
- }
-}
-
-// setLocalRequestParams stores the peer request params.
-func (p *Peer) setLocalRequestParams(offset, modulator uint64) {
- if modulator > 255 {
- modulator = 255
- }
- p.localTransactionsModulator = byte(modulator)
- if modulator != 0 {
- p.localTransactionsBaseOffset = byte(offset % modulator)
- }
-}
-
-// peers array functions
-
-// incomingPeersOnly scan the input peers array and return a subset of the peers that are incoming peers.
-func incomingPeersOnly(peers []*Peer) (incomingPeers []*Peer) {
- incomingPeers = make([]*Peer, 0, len(peers))
- for _, peer := range peers {
- if !peer.isOutgoing {
- incomingPeers = append(incomingPeers, peer)
- }
- }
- return
-}
-
-// incoming related functions
-
-// addIncomingBloomFilter keeps the most recent {maxIncomingBloomFilterHistory} filters
-func (p *Peer) addIncomingBloomFilter(round basics.Round, incomingFilter *testableBloomFilter, currentRound basics.Round) {
- minRound := currentRound.SubSaturate(2)
- if round < minRound {
- // ignore data from the past
- return
- }
- bf := incomingBloomFilter{
- round: round,
- filter: incomingFilter,
- }
- elemOk := func(i int) bool {
- ribf := p.recentIncomingBloomFilters[i]
- if ribf.filter == nil {
- return false
- }
- if ribf.round < minRound {
- return false
- }
- if incomingFilter.clearPrevious && ribf.filter.encodingParams.Offset == incomingFilter.encodingParams.Offset && ribf.filter.encodingParams.Modulator == incomingFilter.encodingParams.Modulator {
- return false
- }
- return true
- }
- // compact the prior list to the front of the array.
- // order doesn't matter.
- pos := 0
- last := len(p.recentIncomingBloomFilters) - 1
- oldestRound := currentRound + 1
- firstOfOldest := -1
- for pos <= last {
- if elemOk(pos) {
- if p.recentIncomingBloomFilters[pos].round < oldestRound {
- oldestRound = p.recentIncomingBloomFilters[pos].round
- firstOfOldest = pos
- }
- pos++
- continue
- }
- p.recentIncomingBloomFilters[pos] = p.recentIncomingBloomFilters[last]
- p.recentIncomingBloomFilters[last].filter = nil // GC
- last--
- }
- p.recentIncomingBloomFilters = p.recentIncomingBloomFilters[:last+1]
- // Simple case: append
- if last+1 < maxIncomingBloomFilterHistory {
- p.recentIncomingBloomFilters = append(p.recentIncomingBloomFilters, bf)
- return
- }
- // Too much traffic case: replace the first thing we find of the oldest round
- if firstOfOldest >= 0 {
- p.recentIncomingBloomFilters[firstOfOldest] = bf
- return
- }
- // This line should be unreachable, but putting in an error log to test that assumption.
- p.log.Error("addIncomingBloomFilter failed to trim p.recentIncomingBloomFilters (new filter lost)")
-}
-
-func (p *Peer) updateRequestParams(modulator, offset byte) {
- p.requestedTransactionsModulator = modulator
- p.requestedTransactionsOffset = offset
-}
-
-// update the recentSentTransactions with the incoming transaction groups. This would prevent us from sending the received transactions back to the
-// peer that sent it to us. This comes in addition to the bloom filter, if being sent by the other peer.
-func (p *Peer) updateIncomingTransactionGroups(txnGroups []pooldata.SignedTxGroup) {
- for _, txnGroup := range txnGroups {
- if len(txnGroup.Transactions) > 0 {
- // The GroupTransactionID field is not yet updated, so we'll be calculating it's value here and passing it.
- p.recentSentTransactions.add(txnGroup.Transactions.ID())
- }
- }
-}
-
-func (p *Peer) updateIncomingMessageTiming(timings timingParams, currentRound basics.Round, currentTime time.Duration, timeInQueue time.Duration, peerLatency time.Duration, incomingMessageSize int) {
- p.lastConfirmedMessageSeqReceived = timings.RefTxnBlockMsgSeq
- // if we received a message that references our previous message, see if they occurred on the same round
- if p.lastConfirmedMessageSeqReceived == p.lastSentMessageSequenceNumber && p.lastSentMessageRound == currentRound && p.lastSentMessageTimestamp > 0 {
- // if so, we might be able to calculate the bandwidth.
- timeSinceLastMessageWasSent := currentTime - timeInQueue - p.lastSentMessageTimestamp
- networkMessageSize := uint64(p.lastSentMessageSize + incomingMessageSize)
- if timings.ResponseElapsedTime != 0 && peerLatency > 0 && timeSinceLastMessageWasSent > time.Duration(timings.ResponseElapsedTime)+peerLatency && networkMessageSize >= p.significantMessageThreshold {
- networkTrasmitTime := timeSinceLastMessageWasSent - time.Duration(timings.ResponseElapsedTime) - peerLatency
- dataExchangeRate := uint64(time.Second) * networkMessageSize / uint64(networkTrasmitTime)
-
- // clamp data exchange rate to realistic metrics
- if dataExchangeRate < minDataExchangeRateThreshold {
- dataExchangeRate = minDataExchangeRateThreshold
- } else if dataExchangeRate > maxDataExchangeRateThreshold {
- dataExchangeRate = maxDataExchangeRateThreshold
- }
- // fmt.Printf("incoming message : updating data exchange to %d; network msg size = %d+%d, transmit time = %v\n", dataExchangeRate, p.lastSentMessageSize, incomingMessageSize, networkTrasmitTime)
- p.dataExchangeRate = dataExchangeRate
- }
-
- // given that we've (maybe) updated the data exchange rate, we need to clear out the lastSendMessage information
- // so we won't use that again on a subsequent incoming message.
- p.lastSentMessageSequenceNumber = 0
- p.lastSentMessageRound = 0
- p.lastSentMessageTimestamp = 0
- p.lastSentMessageSize = 0
- }
- p.lastReceivedMessageLocalRound = currentRound
- p.lastReceivedMessageTimestamp = currentTime - timeInQueue
- p.lastReceivedMessageSize = incomingMessageSize
- p.lastReceivedMessageNextMsgMinDelay = time.Duration(timings.NextMsgMinDelay) * time.Nanosecond
- p.recentSentTransactions.acknowledge(timings.AcceptedMsgSeq)
-}
-
-// advancePeerState is called when a peer schedule arrives, before we're doing any operation.
-// The method would determine whether a message need to be sent, and adjust the peer state
-// accordingly.
-func (p *Peer) advancePeerState(currenTime time.Duration, isRelay bool) (ops peersOps) {
- if isRelay {
- if p.isOutgoing {
- // outgoing peers are "special", as they respond to messages rather then generating their own.
- // we need to figure the special state needed for "late bloom filter message"
- switch p.state {
- case peerStateStartup:
- p.nextStateTimestamp = currenTime + p.lastReceivedMessageNextMsgMinDelay
- messagesCount := p.lastReceivedMessageNextMsgMinDelay / messageTimeWindow
- if messagesCount <= 2 {
- // we have time to send only a single message. This message need to include both transactions and bloom filter.
- p.state = peerStateLateBloom
- } else {
- // we have enough time to send multiple messages, make the first n-1 message have no bloom filter, and have the last one
- // include a bloom filter.
- p.state = peerStateHoldsoff
- }
-
- // send a message
- ops |= peerOpsSendMessage
- case peerStateHoldsoff:
- // calculate how more messages we can send ( if needed )
- messagesCount := (p.nextStateTimestamp - currenTime) / messageTimeWindow
- if messagesCount <= 2 {
- // we have time to send only a single message. This message need to include both transactions and bloom filter.
- p.state = peerStateLateBloom
- }
-
- // send a message
- ops |= peerOpsSendMessage
-
- // the rescehduling would be done in the sendMessageLoop, since we need to know if additional messages are needed.
- case peerStateLateBloom:
- // send a message
- ops |= peerOpsSendMessage
-
- default:
- // this isn't expected, so we can just ignore this.
- // todo : log
- }
- } else {
- // non-outgoing
- switch p.state {
- case peerStateStartup:
- p.state = peerStateHoldsoff
- fallthrough
- case peerStateHoldsoff:
- // prepare the send message array.
- ops |= peerOpsSendMessage
- default: // peerStateInterrupt & peerStateLateBloom
- // this isn't expected, so we can just ignore this.
- // todo : log
- }
- }
- } else {
- switch p.state {
- case peerStateStartup:
- p.state = peerStateHoldsoff
- ops |= peerOpsSendMessage
-
- case peerStateHoldsoff:
- if p.nextStateTimestamp == 0 {
- p.state = peerStateInterrupt
- ops |= peerOpsSetInterruptible | peerOpsReschedule
- } else {
- ops |= peerOpsSendMessage
- }
-
- case peerStateInterrupt:
- p.state = peerStateHoldsoff
- ops |= peerOpsSendMessage | peerOpsClearInterruptible
-
- default: // peerStateLateBloom
- // this isn't expected, so we can just ignore this.
- // todo : log
- }
- }
- return ops
-}
-
-// getMessageConstructionOps constructs the messageConstructionOps that would be needed when
-// sending a message back to the peer. The two arguments are:
-// - isRelay defines whether the local node is a relay.
-// - fetchTransactions defines whether the local node is interested in receiving transactions from
-// the peer ( this is essentially allow us to skip receiving transactions for non-relays that aren't going
-// to make any proposals )
-func (p *Peer) getMessageConstructionOps(isRelay bool, fetchTransactions bool) (ops messageConstructionOps) {
- // on outgoing peers of relays, we want have some custom logic.
- if isRelay {
- if p.isOutgoing {
- switch p.state {
- case peerStateLateBloom:
- if p.localTransactionsModulator != 0 {
- ops |= messageConstBloomFilter
- }
- case peerStateHoldsoff:
- ops |= messageConstTransactions
- }
- } else {
- if p.requestedTransactionsModulator != 0 {
- ops |= messageConstTransactions
- if p.nextStateTimestamp == 0 && p.localTransactionsModulator != 0 {
- ops |= messageConstBloomFilter
- }
- }
- if p.nextStateTimestamp == 0 {
- ops |= messageConstNextMinDelay
- }
- }
- ops |= messageConstUpdateRequestParams
- } else {
- ops |= messageConstTransactions // send transactions to the other peer
- if fetchTransactions {
- switch p.localTransactionsModulator {
- case 0:
- // don't send bloom filter.
- case 1:
- // special optimization if we have just one relay that we're connected to:
- // generate the bloom filter only once per 2*beta message.
- // this would reduce the number of unneeded bloom filters generation dramatically.
- // that single relay would know which messages it previously sent us, and would refrain from
- // sending these again.
- if p.nextStateTimestamp == 0 {
- ops |= messageConstBloomFilter
- }
- default:
- ops |= messageConstBloomFilter
- }
- ops |= messageConstUpdateRequestParams
- }
- }
- return ops
-}
-
-// getNextScheduleOffset is called after a message was sent to the peer, and we need to evaluate the next
-// scheduling time.
-func (p *Peer) getNextScheduleOffset(isRelay bool, beta time.Duration, partialMessage bool, currentTime time.Duration) (offset time.Duration, ops peersOps) {
- if partialMessage {
- if isRelay {
- if p.isOutgoing {
- if p.state == peerStateHoldsoff {
- // we have enough time to send another message.
- return messageTimeWindow, peerOpsReschedule
- }
- } else {
- // a partial message was sent to an incoming peer
- if p.nextStateTimestamp > time.Duration(0) {
- if currentTime+messageTimeWindow*2 < p.nextStateTimestamp {
- // we have enough time to send another message
- return messageTimeWindow, peerOpsReschedule
- }
- // we don't have enough time to send another message.
- next := p.nextStateTimestamp
- p.nextStateTimestamp = 0
- return next - currentTime, peerOpsReschedule
- }
- p.nextStateTimestamp = currentTime + 2*beta
- return messageTimeWindow, peerOpsReschedule
- }
- } else {
- if p.nextStateTimestamp > time.Duration(0) {
- if currentTime+messageTimeWindow*2 < p.nextStateTimestamp {
- // we have enough time to send another message
- return messageTimeWindow, peerOpsReschedule
- }
- // we don't have enough time, so don't get into "interrupt" state,
- // since we're already sending messages.
- next := p.nextStateTimestamp
- p.nextStateTimestamp = 0
- p.messageSeriesPendingTransactions = nil
- // move to the next state.
- p.state = peerStateHoldsoff
- return next - currentTime, peerOpsReschedule | peerOpsClearInterruptible
-
- }
- // this is the first message
- p.nextStateTimestamp = currentTime + 2*beta
-
- return messageTimeWindow, peerOpsReschedule
- }
- } else {
- if isRelay {
- if p.isOutgoing {
- if p.state == peerStateHoldsoff {
- // even that we're done now, we need to send another message that would contain the bloom filter
- p.state = peerStateLateBloom
-
- bloomMessageExtrapolatedSendingTime := messageTimeWindow
- // try to improve the sending time by using the last sent bloom filter as the expected message size.
- if p.lastSentBloomFilter.containedTxnsRange.transactionsCount > 0 {
- lastBloomFilterSize := uint64(p.lastSentBloomFilter.encodedLength)
- bloomMessageExtrapolatedSendingTime = time.Duration(lastBloomFilterSize * p.dataExchangeRate)
- }
-
- next := p.nextStateTimestamp - bloomMessageExtrapolatedSendingTime - currentTime
- p.nextStateTimestamp = 0
- return next, peerOpsReschedule
- }
- p.nextStateTimestamp = 0
- } else {
- // we sent a message to an incoming connection. No more data to send.
- if p.nextStateTimestamp > time.Duration(0) {
- next := p.nextStateTimestamp
- p.nextStateTimestamp = 0
- return next - currentTime, peerOpsReschedule
- }
- p.nextStateTimestamp = 0
- return beta * 2, peerOpsReschedule
- }
- } else {
- if p.nextStateTimestamp > time.Duration(0) {
- next := p.nextStateTimestamp
- p.nextStateTimestamp = 0
- return next - currentTime, peerOpsReschedule
- }
- return beta, peerOpsReschedule
- }
- }
- return time.Duration(0), 0
-}
-
-func (p *Peer) networkAddress() string {
- if peerAddress, supportInterface := p.networkPeer.(networkPeerAddress); supportInterface {
- return peerAddress.GetAddress()
- }
- return ""
-}
diff --git a/txnsync/peer_test.go b/txnsync/peer_test.go
deleted file mode 100644
index fb18e7d22..000000000
--- a/txnsync/peer_test.go
+++ /dev/null
@@ -1,1011 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "fmt"
- "reflect"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-// TestGetSetTransactionGroupCounterTracker tests the get/set capabilities for the counter
-func TestGetSetTransactionGroupCounterTracker(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- grp := transactionGroupCounterTracker{}
-
- a.Equal(grp.get(0, 0), uint64(0))
-
- grp.set(0, 0, 2)
- a.Equal(grp.get(0, 0), uint64(2))
- grp.set(1, 0, 5)
- a.Equal(grp.get(1, 0), uint64(5))
-
- grp = transactionGroupCounterTracker{}
-
- for i := 0; i < maxTransactionGroupTrackers+1; i++ {
- grp.set(byte(i+1), 0, uint64(i+1))
- }
-
- a.True(reflect.DeepEqual(grp[0], requestParamsGroupCounterState{offset: 2, groupCounters: [bloomFilterRetryCount]uint64{2, 0, 0}}))
-
- for i := 1; i < maxTransactionGroupTrackers; i++ {
- if !reflect.DeepEqual(grp[i], requestParamsGroupCounterState{offset: byte(i + 2), groupCounters: [bloomFilterRetryCount]uint64{uint64(i + 2), 0, 0}}) {
- t.Errorf("For value %d got: %v", i, grp[i])
- }
- }
-
-}
-
-// TestIndexTransactionGroupCounterTracker tests the index function specifically
-func TestIndexTransactionGroupCounterTracker(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- grp := transactionGroupCounterTracker{
- {
- offset: 0,
- modulator: 0,
- groupCounters: [bloomFilterRetryCount]uint64{},
- },
- {
- offset: 1,
- modulator: 23,
- groupCounters: [bloomFilterRetryCount]uint64{},
- },
- }
-
- a := require.New(t)
- a.Equal(grp.index(2, 2), -1)
- a.Equal(grp.index(0, 0), 0)
- a.Equal(grp.index(1, 23), 1)
-}
-
-// TestRollTransactionGroupCounterTracker tests that rolling works and doesn't panic
-func TestRollTransactionGroupCounterTracker(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- defer func() {
- if r := recover(); r != nil {
- a.False(true, "Something panicked during TestRollTransactionGroupCounterTracker")
- }
- }()
-
- grp1 := transactionGroupCounterTracker{
- {
- offset: 0,
- modulator: 0,
- groupCounters: [bloomFilterRetryCount]uint64{},
- },
- }
-
- grp1.roll(0, 0)
- grp1.roll(0, 2)
-
- grp2 := transactionGroupCounterTracker{
- {
- offset: 0,
- modulator: 0,
- groupCounters: [bloomFilterRetryCount]uint64{0, 1},
- },
- }
-
- grp2.roll(0, 0)
- grp2.roll(0, 2)
- a.True(grp2[0].groupCounters[0] == 0)
- a.True(grp2[0].groupCounters[1] == 1)
-
- grp3 := transactionGroupCounterTracker{
- {
- offset: 0,
- modulator: 0,
- groupCounters: [bloomFilterRetryCount]uint64{2, 1, 0},
- },
- }
-
- grp3.roll(0, 0)
- a.Equal(grp3[0].groupCounters, [bloomFilterRetryCount]uint64{1, 0, 2})
- grp3.roll(0, 1)
-
-}
-
-// TestGetNextScheduleOffset tests the state machine of getNextScheduleOffset
-func TestGetNextScheduleOffset(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- type args struct {
- isRelay bool
- beta time.Duration
- partialMessage bool
- currentTime time.Duration
- }
-
- type results struct {
- offset time.Duration
- ops peersOps
- }
-
- tests := []struct {
- fxn func(p *Peer)
- arg args
- result results
- postFxn func(s peerState) bool
- }{
- {
- fxn: func(p *Peer) { p.nextStateTimestamp = 2 * time.Millisecond },
- arg: args{false, time.Millisecond, false, 1 * time.Millisecond},
- result: results{1 * time.Millisecond, peerOpsReschedule},
- postFxn: func(s peerState) bool { return true },
- },
-
- {
- fxn: func(p *Peer) { p.nextStateTimestamp = 0 * time.Millisecond },
- arg: args{false, 3 * time.Millisecond, false, 1 * time.Millisecond},
- result: results{3 * time.Millisecond, peerOpsReschedule},
- postFxn: func(s peerState) bool { return true },
- },
-
- // --
-
- {
- fxn: func(p *Peer) { p.isOutgoing = false; p.nextStateTimestamp = 0 * time.Millisecond },
- arg: args{true, 3 * time.Millisecond, false, 1 * time.Millisecond},
- result: results{6 * time.Millisecond, peerOpsReschedule},
- postFxn: func(s peerState) bool { return true },
- },
-
- {
- fxn: func(p *Peer) { p.isOutgoing = false; p.nextStateTimestamp = 9 * time.Millisecond },
- arg: args{true, 3 * time.Millisecond, false, 1 * time.Millisecond},
- result: results{8 * time.Millisecond, peerOpsReschedule},
- postFxn: func(s peerState) bool { return true },
- },
-
- // --
-
- {
- fxn: func(p *Peer) { p.isOutgoing = true; p.state = peerStateLateBloom },
- arg: args{true, 3 * time.Millisecond, false, 1 * time.Millisecond},
- result: results{0 * time.Millisecond, 0},
- postFxn: func(s peerState) bool { return true },
- },
-
- {
- fxn: func(p *Peer) {
- p.isOutgoing = true
- p.state = peerStateHoldsoff
- p.lastSentBloomFilter.containedTxnsRange.transactionsCount = 0
- p.nextStateTimestamp = 2 * messageTimeWindow
- },
- arg: args{true, 3 * time.Millisecond, false, 1 * time.Millisecond},
- result: results{messageTimeWindow - 1*time.Millisecond, peerOpsReschedule},
- postFxn: func(s peerState) bool { return s == peerStateLateBloom },
- },
-
- {
- fxn: func(p *Peer) { p.nextStateTimestamp = 0 },
- arg: args{false, 3 * time.Millisecond, true, 1 * time.Millisecond},
- result: results{messageTimeWindow, peerOpsReschedule},
- postFxn: func(s peerState) bool { return true },
- },
-
- {
- fxn: func(p *Peer) { p.nextStateTimestamp = messageTimeWindow * 3 },
- arg: args{false, 3 * time.Millisecond, true, 1 * time.Millisecond},
- result: results{messageTimeWindow, peerOpsReschedule},
- postFxn: func(s peerState) bool { return true },
- },
-
- {
- fxn: func(p *Peer) { p.nextStateTimestamp = messageTimeWindow * 2 },
- arg: args{false, 3 * time.Millisecond, true, 1 * time.Millisecond},
- result: results{2*messageTimeWindow - 1*time.Millisecond, peerOpsReschedule | peerOpsClearInterruptible},
- postFxn: func(s peerState) bool { return s == peerStateHoldsoff },
- },
-
- // --
-
- {
- fxn: func(p *Peer) { p.isOutgoing = true },
- arg: args{true, 3 * time.Millisecond, true, 1 * time.Millisecond},
- result: results{time.Duration(0), 0},
- postFxn: func(s peerState) bool { return true },
- },
-
- {
- fxn: func(p *Peer) { p.isOutgoing = true; p.state = peerStateHoldsoff },
- arg: args{true, 3 * time.Millisecond, true, 1 * time.Millisecond},
- result: results{messageTimeWindow, peerOpsReschedule},
- postFxn: func(s peerState) bool { return true },
- },
-
- {
- fxn: func(p *Peer) { p.isOutgoing = false; p.nextStateTimestamp = 0 },
- arg: args{true, 3 * time.Millisecond, true, 1 * time.Millisecond},
- result: results{messageTimeWindow, peerOpsReschedule},
- postFxn: func(s peerState) bool { return true },
- },
-
- {
- fxn: func(p *Peer) { p.isOutgoing = false; p.nextStateTimestamp = 9 * time.Millisecond },
- arg: args{true, 3 * time.Millisecond, true, 1 * time.Millisecond},
- result: results{8 * time.Millisecond, peerOpsReschedule},
- postFxn: func(s peerState) bool { return true },
- },
- }
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
-
- for i, test := range tests {
- t.Run(fmt.Sprint(i), func(t *testing.T) {
- p := makePeer(nil, true, true, &config, log, 0)
- if test.fxn != nil {
- test.fxn(p)
- }
-
- offset, ops := p.getNextScheduleOffset(test.arg.isRelay, test.arg.beta, test.arg.partialMessage, test.arg.currentTime)
-
- r := results{offset, ops}
-
- if !test.postFxn(p.state) {
- t.Errorf("getNextScheduleOffset() state = %v", p.state)
- }
-
- if !reflect.DeepEqual(r, test.result) {
- t.Errorf("getNextScheduleOffset() = %v, want %v", r, test.result)
- }
-
- })
- }
-
-}
-
-// TestGetMessageConstructionOps tests the state machine of getMessageConstructionOps
-func TestGetMessageConstructionOps(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- type args struct {
- isRelay bool
- fetchTransactions bool
- }
-
- peerStateLateBloomState := peerStateLateBloom
- peerStateHoldsoffState := peerStateHoldsoff
-
- tests := []struct {
- fxn func(p *Peer)
- arg args
- result messageConstructionOps
- state *peerState
- }{
- {
- fxn: func(p *Peer) {},
- arg: args{false, false},
- result: messageConstTransactions,
- state: nil,
- },
- {
- fxn: func(p *Peer) { p.localTransactionsModulator = 0 },
- arg: args{false, true},
- result: messageConstUpdateRequestParams | messageConstTransactions,
- state: nil,
- },
- {
- fxn: func(p *Peer) { p.localTransactionsModulator = 1; p.nextStateTimestamp = 1 },
- arg: args{false, true},
- result: messageConstUpdateRequestParams | messageConstTransactions,
- state: nil,
- },
- {
- fxn: func(p *Peer) { p.localTransactionsModulator = 1; p.nextStateTimestamp = 0 },
- arg: args{false, true},
- result: messageConstUpdateRequestParams | messageConstTransactions | messageConstBloomFilter,
- state: nil,
- },
- {
- fxn: func(p *Peer) { p.localTransactionsModulator = 1; p.nextStateTimestamp = 99 },
- arg: args{false, true},
- result: messageConstUpdateRequestParams | messageConstTransactions,
- state: nil,
- },
- // --
-
- {
- fxn: func(p *Peer) { p.isOutgoing = false; p.requestedTransactionsModulator = 0; p.nextStateTimestamp = 0 },
- arg: args{true, true},
- result: messageConstUpdateRequestParams | messageConstNextMinDelay,
- state: nil,
- },
- {
- fxn: func(p *Peer) { p.isOutgoing = false; p.requestedTransactionsModulator = 0; p.nextStateTimestamp = 1 },
- arg: args{true, true},
- result: messageConstUpdateRequestParams,
- state: nil,
- },
-
- {
- fxn: func(p *Peer) {
- p.isOutgoing = false
- p.localTransactionsModulator = 1
- p.requestedTransactionsModulator = 1
- p.nextStateTimestamp = 0
- },
- arg: args{true, true},
- result: messageConstUpdateRequestParams | messageConstNextMinDelay | messageConstTransactions | messageConstBloomFilter,
- state: nil,
- },
- {
- fxn: func(p *Peer) { p.isOutgoing = false; p.requestedTransactionsModulator = 1; p.nextStateTimestamp = 1 },
- arg: args{true, true},
- result: messageConstUpdateRequestParams | messageConstTransactions,
- state: nil,
- },
-
- // --
-
- {
- fxn: func(p *Peer) { p.isOutgoing = true; p.state = peerStateLateBloom; p.localTransactionsModulator = 0 },
- arg: args{true, true},
- result: messageConstUpdateRequestParams,
- state: &peerStateLateBloomState,
- },
-
- {
- fxn: func(p *Peer) { p.isOutgoing = true; p.state = peerStateLateBloom; p.localTransactionsModulator = 1 },
- arg: args{true, true},
- result: messageConstUpdateRequestParams | messageConstBloomFilter,
- state: &peerStateLateBloomState,
- },
-
- {
- fxn: func(p *Peer) { p.isOutgoing = true; p.state = peerStateHoldsoff; p.localTransactionsModulator = 1 },
- arg: args{true, true},
- result: messageConstUpdateRequestParams | messageConstTransactions,
- state: &peerStateHoldsoffState,
- },
- }
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
- for i, test := range tests {
- t.Run(fmt.Sprint(i), func(t *testing.T) {
- p := makePeer(nil, true, true, &config, log, 0)
- if test.fxn != nil {
- test.fxn(p)
- }
-
- gotOps := p.getMessageConstructionOps(test.arg.isRelay, test.arg.fetchTransactions)
-
- if test.state != nil && p.state != *test.state {
- t.Errorf("getMessageConstructionOps() state = %v, want %v", p.state, test.state)
- }
-
- if gotOps != test.result {
- t.Errorf("getMessageConstructionOps() = %v, want %v", gotOps, test.result)
- }
-
- })
- }
-
-}
-
-// TestAdvancePeerState tests the state machine of advancePeerState
-func TestAdvancePeerState(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- type args struct {
- currentTime time.Duration
- isRelay bool
- }
-
- tests := []struct {
- fxn func(p *Peer)
- arg args
- result peersOps
- state peerState
- }{
- {
- fxn: func(p *Peer) { p.state = peerStateStartup },
- arg: args{time.Millisecond, false},
- result: peerOpsSendMessage,
- state: peerStateHoldsoff,
- },
- {
- fxn: func(p *Peer) { p.state = peerStateHoldsoff; p.nextStateTimestamp = 0 },
- arg: args{time.Millisecond, false},
- result: peerOpsSetInterruptible | peerOpsReschedule,
- state: peerStateInterrupt,
- },
- {
- fxn: func(p *Peer) { p.state = peerStateHoldsoff; p.nextStateTimestamp = 1 },
- arg: args{time.Millisecond, false},
- result: peerOpsSendMessage,
- state: peerStateHoldsoff,
- },
- {
- fxn: func(p *Peer) { p.state = peerStateInterrupt },
- arg: args{time.Millisecond, false},
- result: peerOpsSendMessage | peerOpsClearInterruptible,
- state: peerStateHoldsoff,
- },
- // --
- {
- fxn: func(p *Peer) { p.isOutgoing = false; p.state = peerStateStartup },
- arg: args{time.Millisecond, true},
- result: peerOpsSendMessage,
- state: peerStateHoldsoff,
- },
- {
- fxn: func(p *Peer) { p.isOutgoing = false; p.state = peerStateHoldsoff },
- arg: args{time.Millisecond, true},
- result: peerOpsSendMessage,
- state: peerStateHoldsoff,
- },
- // --
-
- {
- fxn: func(p *Peer) {
- p.isOutgoing = true
- p.state = peerStateStartup
- p.lastReceivedMessageNextMsgMinDelay = messageTimeWindow * 2
- },
- arg: args{time.Millisecond, true},
- result: peerOpsSendMessage,
- state: peerStateLateBloom,
- },
- {
- fxn: func(p *Peer) {
- p.isOutgoing = true
- p.state = peerStateStartup
- p.lastReceivedMessageNextMsgMinDelay = messageTimeWindow * 3
- },
- arg: args{time.Millisecond, true},
- result: peerOpsSendMessage,
- state: peerStateHoldsoff,
- },
-
- {
- fxn: func(p *Peer) {
- p.isOutgoing = true
- p.state = peerStateHoldsoff
- p.nextStateTimestamp = messageTimeWindow * 2
- },
- arg: args{0 * time.Millisecond, true},
- result: peerOpsSendMessage,
- state: peerStateLateBloom,
- },
-
- {
- fxn: func(p *Peer) {
- p.isOutgoing = true
- p.state = peerStateHoldsoff
- p.nextStateTimestamp = messageTimeWindow * 3
- },
- arg: args{0 * time.Millisecond, true},
- result: peerOpsSendMessage,
- state: peerStateHoldsoff,
- },
-
- {
- fxn: func(p *Peer) { p.isOutgoing = true; p.state = peerStateLateBloom },
- arg: args{time.Millisecond, true},
- result: peerOpsSendMessage,
- state: peerStateLateBloom,
- },
- }
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
- for i, test := range tests {
- t.Run(string(rune(i)), func(t *testing.T) {
- p := makePeer(nil, true, true, &config, log, 0)
- if test.fxn != nil {
- test.fxn(p)
- }
-
- gotOps := p.advancePeerState(test.arg.currentTime, test.arg.isRelay)
-
- if p.state != test.state {
- t.Errorf("advancePeerState() state = %v, want %v", p.state, test.state)
- }
-
- if gotOps != test.result {
- t.Errorf("advancePeerState() = %v, want %v", gotOps, test.result)
- }
-
- })
- }
-}
-
-// TestUpdateIncomingMessageTiming tests updating the incoming message timing
-func TestUpdateIncomingMessageTiming(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
- p := makePeer(nil, true, true, &config, log, 0)
-
- currentRound := basics.Round(1)
- currentTime := time.Millisecond * 123
- currentMessageSize := int(p.significantMessageThreshold)
- timing := timingParams{NextMsgMinDelay: 42}
-
- // Test direct assignment
-
- p.lastConfirmedMessageSeqReceived = p.lastSentMessageSequenceNumber + 1
-
- p.updateIncomingMessageTiming(timing, currentRound, currentTime, 0, time.Millisecond, currentMessageSize)
-
- a.Equal(p.lastReceivedMessageLocalRound, currentRound)
- a.Equal(p.lastReceivedMessageTimestamp, currentTime)
- a.Equal(p.lastReceivedMessageSize, currentMessageSize)
- a.Equal(p.lastReceivedMessageNextMsgMinDelay, time.Duration(timing.NextMsgMinDelay)*time.Nanosecond)
-
- // Test entering if statement
-
- p.lastConfirmedMessageSeqReceived = p.lastSentMessageSequenceNumber
- p.lastSentMessageRound = currentRound
- timing.ResponseElapsedTime = 1
- p.lastSentMessageTimestamp = 1 * time.Millisecond
- currentMessageSize = maxDataExchangeRateThreshold + 1
- p.updateIncomingMessageTiming(timing, currentRound, currentTime, 0, time.Millisecond, currentMessageSize)
-
- a.Equal(uint64(maxDataExchangeRateThreshold), p.dataExchangeRate)
-
- p.lastConfirmedMessageSeqReceived = p.lastSentMessageSequenceNumber
- p.lastSentMessageRound = currentRound
- timing.ResponseElapsedTime = 1
- p.lastSentMessageTimestamp = 1 * time.Millisecond
- p.lastSentMessageSize = 0
- currentMessageSize = int(p.significantMessageThreshold)
- currentTime = time.Millisecond * 1000
- p.updateIncomingMessageTiming(timing, currentRound, currentTime, 0, time.Millisecond, currentMessageSize)
-
- a.Equal(uint64(minDataExchangeRateThreshold), p.dataExchangeRate)
-
- p.lastConfirmedMessageSeqReceived = p.lastSentMessageSequenceNumber
- p.lastSentMessageRound = currentRound
- timing.ResponseElapsedTime = uint64(time.Millisecond)
- p.lastSentMessageTimestamp = 1 * time.Millisecond
- p.lastSentMessageSize = 0
- currentMessageSize = 100000
- currentTime = time.Millisecond * 123
- p.updateIncomingMessageTiming(timing, currentRound, currentTime, time.Millisecond, time.Millisecond*100, currentMessageSize)
-
- a.Equal(uint64(5000000), p.dataExchangeRate)
-}
-
-// TestUpdateIncomingTransactionGroups tests updating the incoming transaction groups
-func TestUpdateIncomingTransactionGroups(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- var txnGroups []pooldata.SignedTxGroup
-
- for i := 0; i < 10; i++ {
-
- tmp := pooldata.SignedTxGroup{
- Transactions: []transactions.SignedTxn{transactions.SignedTxn{
- Sig: crypto.Signature{},
- Msig: crypto.MultisigSig{},
- Lsig: transactions.LogicSig{},
- Txn: transactions.Transaction{},
- AuthAddr: basics.Address{},
- }},
- LocallyOriginated: false,
- GroupCounter: 0,
- GroupTransactionID: transactions.Txid{byte(i)},
- EncodedLength: 0,
- }
- txnGroups = append(txnGroups, tmp)
- }
-
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
- p := makePeer(nil, true, true, &config, log, 0)
-
- p.recentSentTransactions.reset()
-
- for i := 0; i < 10; i++ {
- txid := transactions.Txid{byte(i)}
- a.False(p.recentSentTransactions.contained(txid))
- }
-
-}
-
-// TestUpdateRequestParams tests updating the request parameters
-func TestUpdateRequestParams(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
- p := makePeer(nil, true, true, &config, log, 0)
- oldModulator := p.requestedTransactionsModulator
- oldOffset := p.requestedTransactionsOffset
-
- p.updateRequestParams(oldModulator, oldOffset)
- a.Equal(p.requestedTransactionsModulator, oldModulator)
- a.Equal(p.requestedTransactionsOffset, oldOffset)
-
- p.updateRequestParams(oldModulator+1, oldOffset+1)
- a.Equal(p.requestedTransactionsModulator, oldModulator+1)
- a.Equal(p.requestedTransactionsOffset, oldOffset+1)
-
-}
-
-// bloom.GenericFilter
-type nopFilter struct{}
-
-func (nf *nopFilter) Set(x []byte) {}
-func (nf *nopFilter) Test(x []byte) bool {
- return false
-}
-func (nf *nopFilter) MarshalBinary() ([]byte, error) {
- return nil, nil
-}
-func (nf *nopFilter) UnmarshalBinary(data []byte) error {
- return nil
-}
-
-// TestAddIncomingBloomFilter tests adding an incoming bloom filter
-func TestAddIncomingBloomFilter(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
- p := makePeer(nil, true, true, &config, log, 0)
-
- for i := 0; i < 2*maxIncomingBloomFilterHistory; i++ {
- bf := &testableBloomFilter{
- encodingParams: requestParams{
- _struct: struct{}{},
- Offset: byte(i),
- Modulator: 0,
- },
- filter: &nopFilter{},
- }
- p.addIncomingBloomFilter(basics.Round(i), bf, basics.Round(i))
- }
-
- // filters from current round, -1, and -2 are kept. => 3
- a.Equal(3, len(p.recentIncomingBloomFilters))
-
- for i := 0; i < 2*maxIncomingBloomFilterHistory; i++ {
- bf := &testableBloomFilter{
- encodingParams: requestParams{
- _struct: struct{}{},
- Offset: byte(i),
- Modulator: 0,
- },
- filter: &nopFilter{},
- }
- p.addIncomingBloomFilter(basics.Round(i), bf, 0)
- }
-
- a.Equal(maxIncomingBloomFilterHistory, len(p.recentIncomingBloomFilters))
-}
-
-// TestSelectPendingTransactions tests selectPendingTransactions
-func TestSelectPendingTransactions(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- type args struct {
- pendingTransactions []pooldata.SignedTxGroup
- sendWindow time.Duration
- round basics.Round
- bloomFilterSize int
- }
-
- type results struct {
- selectedTxns []pooldata.SignedTxGroup
- selectedTxnIDs []transactions.Txid
- partialTransactionsSet bool
- }
-
- tests := []struct {
- name string
- fxn func(p *Peer)
- arg args
- result results
- }{
- {"Case 1", func(p *Peer) { p.lastRound = 98 }, args{nil, time.Millisecond, 100, 0}, results{nil, nil, false}},
- {"Case 2", func(p *Peer) { p.lastRound = 101; p.requestedTransactionsModulator = 0 }, args{nil, time.Millisecond, 100, 0}, results{nil, nil, false}},
- {"Case 3", func(p *Peer) { p.lastRound = 200; p.messageSeriesPendingTransactions = nil }, args{[]pooldata.SignedTxGroup{}, time.Millisecond, 100, 0}, results{nil, nil, false}},
- }
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- p := makePeer(nil, true, true, &config, log, 0)
- if test.fxn != nil {
- test.fxn(p)
- }
- var r results
- r.selectedTxns, r.selectedTxnIDs, r.partialTransactionsSet = p.selectPendingTransactions(test.arg.pendingTransactions, test.arg.sendWindow, test.arg.round, test.arg.bloomFilterSize)
- if !reflect.DeepEqual(r, test.result) {
- t.Errorf("selectPendingTransactions() gotSelectedTxns = %v, want %v", r, test.result)
- }
- })
- }
-}
-
-// TestSelectedMessagesModulator tests the use of the modulator on the returned list
-func TestSelectedMessagesModulator(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- peer := Peer{}
-
- peer.lastRound = 10
- peer.requestedTransactionsModulator = 2
- peer.requestedTransactionsOffset = 1
- peer.lastSelectedTransactionsCount = 1
- peer.dataExchangeRate = 1000
- peer.recentSentTransactions = makeTransactionCache(10, 10, 0)
-
- dig1 := crypto.Digest{0x1, 0, 0, 0, 0, 0, 0, 0, 0}
- dig2 := crypto.Digest{0x2, 0, 0, 0, 0, 0, 0, 0, 0}
- dig3 := crypto.Digest{0x3, 0, 0, 0, 0, 0, 0, 0, 0}
- dig4 := crypto.Digest{0x4, 0, 0, 0, 0, 0, 0, 0, 0}
- dig5 := crypto.Digest{0x5, 0, 0, 0, 0, 0, 0, 0, 0}
- dig6 := crypto.Digest{0x6, 0, 0, 0, 0, 0, 0, 0, 0}
-
- a.Equal(txidToUint64(transactions.Txid(dig1)), uint64(1))
- a.Equal(txidToUint64(transactions.Txid(dig2)), uint64(2))
- a.Equal(txidToUint64(transactions.Txid(dig3)), uint64(3))
- a.Equal(txidToUint64(transactions.Txid(dig4)), uint64(4))
- a.Equal(txidToUint64(transactions.Txid(dig5)), uint64(5))
- a.Equal(txidToUint64(transactions.Txid(dig6)), uint64(6))
-
- pendingTransations := []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{GroupCounter: 1, GroupTransactionID: transactions.Txid(dig1), EncodedLength: 1},
- pooldata.SignedTxGroup{GroupCounter: 2, GroupTransactionID: transactions.Txid(dig2), EncodedLength: 1},
- pooldata.SignedTxGroup{GroupCounter: 3, GroupTransactionID: transactions.Txid(dig3), EncodedLength: 1},
- pooldata.SignedTxGroup{GroupCounter: 4, GroupTransactionID: transactions.Txid(dig4), EncodedLength: 1},
- pooldata.SignedTxGroup{GroupCounter: 5, GroupTransactionID: transactions.Txid(dig5), EncodedLength: 1},
- pooldata.SignedTxGroup{GroupCounter: 6, GroupTransactionID: transactions.Txid(dig6), EncodedLength: 1},
- }
-
- selectedTxns, _, _ := peer.selectPendingTransactions(pendingTransations, time.Millisecond, 5, 0)
-
- a.Equal(len(selectedTxns), 2)
- a.Equal(selectedTxns[0].GroupCounter, uint64(1))
- a.Equal(selectedTxns[1].GroupCounter, uint64(3))
-
-}
-
-// TestGetAcceptedMessages tests get accepted messages
-func TestGetAcceptedMessages(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
- p := makePeer(nil, true, true, &config, log, 0)
-
- var testList []uint64
- chPtr := &p.transactionPoolAckCh
-
- for i := uint64(0); i < maxAcceptedMsgSeq; i++ {
- *chPtr <- i
- testList = append(testList, i)
- }
-
- a.Equal(len(*chPtr), 64)
- a.Equal(p.getAcceptedMessages(), testList)
- a.Equal(len(*chPtr), 0)
- a.Equal(len(p.transactionPoolAckMessages), 0)
-
-}
-
-// TestDequeuePendingTransactionPoolAckMessages tests dequeuePendingTransactionPoolAckMessages
-func TestDequeuePendingTransactionPoolAckMessages(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
- p := makePeer(nil, true, true, &config, log, 0)
-
- ch := p.transactionPoolAckCh
- var testList []uint64
-
- for i := uint64(0); i < maxAcceptedMsgSeq; i++ {
- ch <- i
- testList = append(testList, i)
- }
-
- p.dequeuePendingTransactionPoolAckMessages()
-
- a.Equal(p.transactionPoolAckMessages, testList)
-
- testList = testList[:0]
-
- ch = p.transactionPoolAckCh
-
- // Note the +1
- for i := uint64(0); i < (maxAcceptedMsgSeq + 1); i++ {
- if i >= maxAcceptedMsgSeq {
- // Channel is bounded at maxAcceptedMsgSeq so we need to flush it
- p.dequeuePendingTransactionPoolAckMessages()
- testList = append(testList[1:], i)
- } else {
- testList = append(testList, i)
- }
-
- ch <- i
- }
-
- p.dequeuePendingTransactionPoolAckMessages()
-
- a.Equal(p.transactionPoolAckMessages, testList)
-
-}
-
-// TestUpdateMessageSent Tests whether we can update the messages sent fields
-func TestUpdateMessageSent(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
- p := makePeer(nil, true, true, &config, log, 0)
-
- txMsg := &transactionBlockMessage{
- Version: txnBlockMessageVersion,
- Round: 42,
- }
-
- txnIds := []transactions.Txid{transactions.Txid(crypto.Hash([]byte{0x31, 0x32}))}
- timestamp := 10 * time.Second
- sequenceNumber := uint64(23)
- messageSize := 35
- bFilter := bloomFilter{}
-
- a.False(p.recentSentTransactions.contained(txnIds[0]))
-
- p.updateMessageSent(txMsg, txnIds, timestamp, sequenceNumber, messageSize)
-
- a.True(p.recentSentTransactions.contained(txnIds[0]))
- a.Equal(p.lastSentMessageSequenceNumber, sequenceNumber)
- a.Equal(p.lastSentMessageRound, txMsg.Round)
- a.Equal(p.lastSentMessageTimestamp, timestamp)
- a.Equal(p.lastSentMessageSize, messageSize)
-
- p.updateSentBoomFilter(bFilter, 0)
-
- a.Equal(p.lastSentBloomFilter, bFilter)
-
-}
-
-// TestIncomingPeersOnly Tests whether we can extract outgoing peers only
-func TestIncomingPeersOnly(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
- p1 := makePeer(nil, true, true, &config, log, 0)
- p2 := makePeer(nil, true, false, &config, log, 0)
- p3 := makePeer(nil, false, true, &config, log, 0)
- p4 := makePeer(nil, false, false, &config, log, 0)
-
- peers := []*Peer{p1, p2, p3, p4}
-
- incomingPeers := incomingPeersOnly(peers)
-
- a.Equal(len(incomingPeers), 2)
- a.Equal(incomingPeers[0], p3)
- a.Equal(incomingPeers[1], p4)
-}
-
-// TestLocalRequestParams Tests setting and getting local request params
-func TestLocalRequestParams(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
- p := makePeer(nil, true, true, &config, log, 0)
-
- p.setLocalRequestParams(256, 256)
- offset, modulator := p.getLocalRequestParams()
- a.Equal(offset, uint8(1))
- a.Equal(modulator, uint8(255))
-
- p.setLocalRequestParams(23, 256)
- offset, modulator = p.getLocalRequestParams()
- a.Equal(offset, uint8(23))
- a.Equal(modulator, uint8(255))
-
-}
-
-// TestSimpleGetters Tests the "simple" getters for the Peer Object
-func TestSimpleGetters(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
- var sentinelInterface interface{}
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
- p := makePeer(sentinelInterface, true, true, &config, log, 0)
-
- a.Equal(p.GetNetworkPeer(), sentinelInterface)
- a.Equal(p.GetTransactionPoolAckChannel(), p.transactionPoolAckCh)
-}
-
-// TestMakePeer Tests the Peer factory function
-func TestMakePeer(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- var sentinelInterface interface{}
- config := config.GetDefaultLocal()
- tlog := logging.TestingLog(t)
- log := wrapLogger(tlog, &config)
- p1 := makePeer(sentinelInterface, true, true, &config, log, 0)
-
- a.NotNil(p1)
- a.Equal(p1.networkPeer, sentinelInterface)
- a.Equal(p1.isOutgoing, true)
- a.Equal(p1.recentSentTransactions, makeTransactionCache(shortTermRecentTransactionsSentBufferLength, longTermRecentTransactionsSentBufferLength, pendingUnconfirmedRemoteMessages))
- a.Equal(p1.requestedTransactionsModulator, uint8(1))
- a.Equal(p1.dataExchangeRate, uint64(defaultRelayToRelayDataExchangeRate))
-
- // Check that we have different values if the local node relay is false
- p2 := makePeer(sentinelInterface, true, false, &config, log, 0)
-
- a.NotNil(p2)
- a.Equal(p1.networkPeer, sentinelInterface)
- a.Equal(p1.isOutgoing, true)
- a.Equal(p1.recentSentTransactions, makeTransactionCache(shortTermRecentTransactionsSentBufferLength, longTermRecentTransactionsSentBufferLength, pendingUnconfirmedRemoteMessages))
- a.Equal(p2.requestedTransactionsModulator, uint8(0))
- a.Equal(p2.dataExchangeRate, uint64(defaultDataExchangeRate))
-
-}
diff --git a/txnsync/peerscheduler.go b/txnsync/peerscheduler.go
deleted file mode 100644
index fc1481c85..000000000
--- a/txnsync/peerscheduler.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "container/heap"
- "sort"
- "time"
-)
-
-//msgp:ignore peerBuckets
-type peerBuckets []peerBucket
-
-type peerScheduler struct {
- peers peerBuckets
- nextPeers map[*Peer][]int // nextPeers holds an array of ordered indices where this Peer object is on the peers peerBuckets
- node NodeConnector
-}
-
-// makePeerScheduler initializes a peer scheduler object.
-func makePeerScheduler() peerScheduler {
- return peerScheduler{
- nextPeers: make(map[*Peer][]int),
- }
-}
-
-//msgp:ignore peerBucket
-type peerBucket struct {
- peer *Peer
- next time.Duration
-}
-
-// Push implements heap.Interface
-func (p *peerScheduler) Push(x interface{}) {
- entry := x.(peerBucket)
- p.peers = append(p.peers, entry)
- p.nextPeers[entry.peer] = append(p.nextPeers[entry.peer], len(p.peers)-1)
-
- if len(p.nextPeers[entry.peer]) > 1 {
- peerIndices := p.nextPeers[entry.peer]
- sort.Slice(peerIndices, func(i, j int) bool {
- return p.peers[peerIndices[i]].next < p.peers[peerIndices[j]].next
- })
- }
-
-}
-
-// Pop implements heap.Interface
-func (p *peerScheduler) Pop() interface{} {
- end := len(p.peers) - 1
- res := p.peers[end]
-
- // delete from the map only if it's the last entry
- peerIndices := p.nextPeers[res.peer]
-
- if peerIndices[0] != end {
- // this case is possible when the peer has two elements in p.peers.
- // and both have the same next value.
- for idx, x := range peerIndices {
- if x == end {
- peerIndices[0], peerIndices[idx] = peerIndices[idx], peerIndices[0]
- break
- }
- }
- }
- // the peer index must be the first entry.
- peerIndices = peerIndices[1:]
-
- // store if non-empty.
- if len(peerIndices) > 0 {
- p.nextPeers[res.peer] = peerIndices
- } else {
- delete(p.nextPeers, res.peer)
- }
-
- p.peers[end] = peerBucket{}
- p.peers = p.peers[0:end]
- return res
-}
-
-// Len implements heap.Interface
-func (p *peerScheduler) Len() int {
- return len(p.peers)
-}
-
-func (p *peerScheduler) replaceIndices(indices []int, i, j int) {
-
- for idx, x := range indices {
- if x == i {
- indices[idx] = j
- } else if x == j {
- indices[idx] = i
- }
- }
- sort.Slice(indices, func(i, j int) bool {
- return p.peers[indices[i]].next < p.peers[indices[j]].next
- })
-}
-
-// Swap implements heap.Interface
-func (p *peerScheduler) Swap(i, j int) {
- p.peers[i], p.peers[j] = p.peers[j], p.peers[i]
- if p.peers[i].peer == p.peers[j].peer {
- indices := p.nextPeers[p.peers[i].peer]
- sort.Slice(indices, func(x, y int) bool {
- return p.peers[indices[x]].next < p.peers[indices[y]].next
- })
- return
- }
- p.replaceIndices(p.nextPeers[p.peers[i].peer], i, j)
- p.replaceIndices(p.nextPeers[p.peers[j].peer], i, j)
-}
-
-// Less implements heap.Interface
-func (p *peerScheduler) Less(i, j int) bool {
- return p.peers[i].next < p.peers[j].next
-}
-
-// refresh the current schedule by creating new schedule for each of the peers.
-func (p *peerScheduler) scheduleNewRound(peers []*Peer) {
- // clear the existings peers list.
- p.peers = make(peerBuckets, 0, len(peers))
- p.nextPeers = make(map[*Peer][]int)
- for _, peer := range peers {
- peerEntry := peerBucket{peer: peer}
- peerEntry.next = kickoffTime + time.Duration(p.node.Random(uint64(randomRange)))
-
- p.peers = append(p.peers, peerEntry)
- p.nextPeers[peer] = []int{len(p.peers) - 1}
- }
- heap.Init(p)
-}
-
-func (p *peerScheduler) nextDuration() time.Duration {
- if len(p.peers) == 0 {
- return time.Duration(0)
- }
- return p.peers[0].next
-}
-
-func (p *peerScheduler) getNextPeers() (outPeers []*Peer) {
- next := p.nextDuration()
-
- // pull out of the heap all the entries that have next smaller or equal to the above next.
- for len(p.peers) > 0 && p.peers[0].next <= next {
- bucket := heap.Remove(p, 0).(peerBucket)
- outPeers = append(outPeers, bucket.peer)
- }
-
- // in many cases, we'll have only a single peer; however, in case we have multiple
- // ( which is more likely when we're "running late" ), we want to make sure to remove
- // duplicate ones.
- if len(outPeers) > 1 {
- // note that the algorithm here ensures that we retain the peer order from above
- // while dropping off recurring peers.
- peersMap := make(map[*Peer]bool, len(outPeers))
- offset := 0
- peersMap[outPeers[0]] = true
- for i := 1; i < len(outPeers); i++ {
- if peersMap[outPeers[i]] {
- // we already had this peer.
- offset++
- continue
- }
- // we haven't seen this peer.
- outPeers[i-offset] = outPeers[i]
- peersMap[outPeers[i]] = true
- }
- outPeers = outPeers[:len(outPeers)-offset]
- }
- return
-}
-
-func (p *peerScheduler) schedulePeer(peer *Peer, next time.Duration) {
- bucket := peerBucket{peer: peer, next: next}
- heap.Push(p, bucket)
-}
-
-func (p *peerScheduler) peerDuration(peer *Peer) time.Duration {
- peerIndices := p.nextPeers[peer]
- if len(peerIndices) == 0 {
- return time.Duration(0)
- }
- bucket := heap.Remove(p, peerIndices[0]).(peerBucket)
- return bucket.next
-}
diff --git a/txnsync/peerscheduler_test.go b/txnsync/peerscheduler_test.go
deleted file mode 100644
index 7da6a0dd0..000000000
--- a/txnsync/peerscheduler_test.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "math"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-// TestBasics tests that the push, pop, len, swap and less functions perform appropriately
-func TestBasics(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- ps := makePeerScheduler()
-
- require.Equal(t, 0, ps.Len())
-
- peers := []Peer{
- Peer{
- lastSentMessageSequenceNumber: 123,
- },
- Peer{
- lastSentMessageSequenceNumber: 456,
- },
- }
-
- ps.Push(peerBucket{&peers[0], 0 * time.Millisecond})
- ps.Push(peerBucket{&peers[1], 1 * time.Millisecond})
-
- require.Equal(t, 2, ps.Len())
-
- require.Equal(t, uint64(123), ps.peers[0].peer.lastSentMessageSequenceNumber)
- require.Equal(t, uint64(456), ps.peers[1].peer.lastSentMessageSequenceNumber)
- require.True(t, ps.Less(0, 1))
-
- ps.Swap(0, 1)
- require.Equal(t, uint64(123), ps.peers[1].peer.lastSentMessageSequenceNumber)
- require.Equal(t, uint64(456), ps.peers[0].peer.lastSentMessageSequenceNumber)
- require.True(t, ps.Less(1, 0))
-
- backPeer := ps.Pop().(peerBucket)
-
- require.Equal(t, uint64(123), backPeer.peer.lastSentMessageSequenceNumber)
-
- backPeer = ps.Pop().(peerBucket)
-
- require.Equal(t, uint64(456), backPeer.peer.lastSentMessageSequenceNumber)
-
- require.Equal(t, 0, ps.Len())
-}
-
-// TestSchedulerBasics tests the basic scheduler helper functions
-func TestSchedulerBasics(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- ps := makePeerScheduler()
-
- peers := []Peer{
- Peer{
- lastSentMessageSequenceNumber: 123,
- },
- Peer{
- lastSentMessageSequenceNumber: 456,
- },
- Peer{
- lastSentMessageSequenceNumber: 789,
- },
- }
-
- require.Equal(t, 0*time.Millisecond, ps.nextDuration())
- ps.schedulePeer(&peers[0], 2*time.Millisecond)
- ps.schedulePeer(&peers[1], 1*time.Millisecond)
- ps.schedulePeer(&peers[2], 3*time.Millisecond)
-
- require.Equal(t, 3, ps.Len())
-
- require.Equal(t, 1*time.Millisecond, ps.nextDuration())
-
- require.Equal(t, 3*time.Millisecond, ps.peerDuration(&peers[2]))
- require.Equal(t, 2, ps.Len())
-
- require.Equal(t, 1*time.Millisecond, ps.peerDuration(&peers[1]))
- require.Equal(t, 1, ps.Len())
-
- require.Equal(t, 2*time.Millisecond, ps.peerDuration(&peers[0]))
- require.Equal(t, 0, ps.Len())
-
- require.Equal(t, 0*time.Millisecond, ps.peerDuration(&peers[0]))
-}
-
-// TestScheduleNewRound tests the scheduleNewRound method
-func TestScheduleNewRound(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- ps := makePeerScheduler()
- ps.node = &mockNodeConnector{}
-
- peers := []Peer{
- Peer{
- lastSentMessageSequenceNumber: 123,
- },
- Peer{
- lastSentMessageSequenceNumber: 456,
- },
- Peer{
- lastSentMessageSequenceNumber: 789,
- },
- }
-
- peers2 := []Peer{
- Peer{
- lastSentMessageSequenceNumber: 321,
- },
- Peer{
- lastSentMessageSequenceNumber: 654,
- },
- Peer{
- lastSentMessageSequenceNumber: 987,
- },
- Peer{
- lastSentMessageSequenceNumber: 146,
- },
- }
-
- ps.schedulePeer(&peers[0], 2*time.Millisecond)
- ps.schedulePeer(&peers[1], 1*time.Millisecond)
- ps.schedulePeer(&peers[2], 3*time.Millisecond)
- require.Equal(t, 3, ps.Len())
-
- ps.scheduleNewRound([]*Peer{&peers2[0], &peers2[1], &peers2[2], &peers2[3]})
- require.Equal(t, 4, ps.Len())
-
-}
-
-// TestNextPeers tests that the nextPeers function
-func TestNextPeers(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- ps := makePeerScheduler()
- ps.node = &mockNodeConnector{}
-
- peers := []Peer{
- Peer{
- lastSentMessageSequenceNumber: 1,
- },
- Peer{
- lastSentMessageSequenceNumber: 2,
- },
- Peer{
- lastSentMessageSequenceNumber: 3,
- },
- }
-
- ps.schedulePeer(&peers[0], 1*time.Millisecond)
- ps.schedulePeer(&peers[1], 2*time.Millisecond)
- ps.schedulePeer(&peers[1], 2*time.Millisecond)
- ps.schedulePeer(&peers[2], 2*time.Millisecond)
-
- require.Equal(t, 4, ps.Len())
-
- outPeers := ps.getNextPeers()
-
- require.Equal(t, 3, ps.Len())
- require.Equal(t, 1, len(outPeers))
- require.Equal(t, uint64(1), outPeers[0].lastSentMessageSequenceNumber)
-
- outPeers = ps.getNextPeers()
-
- require.Equal(t, 0, ps.Len())
- require.Equal(t, 2, len(outPeers))
- require.Equal(t, uint64(3), outPeers[0].lastSentMessageSequenceNumber)
- require.Equal(t, uint64(2), outPeers[1].lastSentMessageSequenceNumber)
-
-}
-
-func TestNextPeersLargeSet(t *testing.T) {
-
- partitiontest.PartitionTest(t)
-
- ps := makePeerScheduler()
- ps.node = &mockNodeConnector{}
-
- numPeers := 100
- dupTimesPerPeer := 3
- numTimeSamples := 10
-
- peers := make([]*Peer, 0, numPeers)
- for x := 1; x <= numPeers; x++ {
- peer := Peer{lastSentMessageSequenceNumber: uint64(x)}
- peers = append(peers, &peer)
- }
- require.Equal(t, int64(0), int64(ps.nextDuration()))
-
- // Add peers with random next values
- ps.scheduleNewRound(peers)
- checkMonotonicNexts(&ps, t)
- checkIndexMatch(&ps, t)
-
- // Add nexts with defined values to guarantee duplicate values
- for dups := 0; dups < dupTimesPerPeer; dups++ {
- for x := 1; x <= numTimeSamples; x++ {
- for p := 0; p < len(peers); p++ {
- val := int64(math.Abs(math.Sin(float64(x))) * 100.00)
- ps.schedulePeer(peers[p], time.Millisecond*time.Duration(val))
- }
- }
- }
- outPeers := ps.getNextPeers()
- require.Equal(t, numPeers, len(outPeers))
- checkMonotonicNexts(&ps, t)
- checkIndexMatch(&ps, t)
-
- // Repeatedly schedule and remove peers, and varify the invariants are honored
- for dups := 0; dups < dupTimesPerPeer; dups++ {
- for x := 1; x <= numTimeSamples*numPeers; {
- for p := 0; p < len(peers); p++ {
- val := int64(math.Abs(math.Sin(float64(x))) * 10.00)
- ps.schedulePeer(peers[p], time.Millisecond*time.Duration(val))
- x++
- }
- checkMonotonicNexts(&ps, t)
- checkIndexMatch(&ps, t)
- outPeers := ps.getNextPeers()
- require.GreaterOrEqual(t, len(outPeers), 0)
- }
- }
-
- // Drain the peers and make sure goes down to 0 without errors
- for _, peerB := range ps.peers {
- prev := int64(0)
- for {
- dur := int64(ps.peerDuration(peerB.peer))
- if dur == 0 {
- break
- }
- require.GreaterOrEqual(t, dur, prev)
- prev = dur
- }
- checkMonotonicNexts(&ps, t)
- checkIndexMatch(&ps, t)
- }
-}
-
-func checkMonotonicNexts(ps *peerScheduler, t *testing.T) {
- for _, s := range ps.nextPeers {
- prevIdx := -1
- for _, idx := range s {
- if prevIdx == -1 {
- prevIdx = idx
- }
- require.LessOrEqual(t, int64(ps.peers[prevIdx].next), int64(ps.peers[idx].next))
- prevIdx = idx
- }
- }
-}
-
-func checkIndexMatch(ps *peerScheduler, t *testing.T) {
- for peer, s := range ps.nextPeers {
- for _, peerIdx := range s {
- require.Equal(t, peer, ps.peers[peerIdx].peer)
-
- }
- }
-}
diff --git a/txnsync/profiler.go b/txnsync/profiler.go
deleted file mode 100644
index 0332faa3a..000000000
--- a/txnsync/profiler.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "time"
-
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/logging/telemetryspec"
- "github.com/algorand/go-algorand/util/timers"
-)
-
-//msgp:ignore profElements
-type profElements int
-
-const (
- profElementIdle = iota
- profElementTxChange
- profElementNewRound
- profElementPeerState
- profElementIncomingMsg
- profElementOutgoingMsg
- profElementNextOffset
-
- // detached elements
- profElementGetTxnsGroups
- profElementAssembleMessage
- profElementMakeBloomFilter
- profElementTxnsSelection
-
- profElementLast
- profFirstDetachedElement = profElementGetTxnsGroups
-)
-
-// The profiler struct provides profiling information regarding the main loop performance
-// characteristics. Using it provides statistics information about the recent duty cycle utilization,
-// that could be used when trying to throttle the accuracy and performance of the transaction sync.
-type profiler struct {
- // clock used as the source clock for measurements.
- clock timers.WallClock
- // elements contains the elements we want to measure. The user of this struct would not interact
- // with this variable directly. Instead, he/she would use getElement to get the element for a specific
- // profElements and use the start()/end() methods on that element.
- elements []*element
- // log is used to report the outcome of the measuring.
- log logging.Logger
-
- // profile contains all the elements indices, in order of arrival. It allows us to maintain a moving window.
- profile []int
- // profileSum is the total amount of time tracked by the profile array.
- profileSum time.Duration
- // profileSpan is the max span of the array ( or - the window ) that we would like to maintain.
- profileSpan time.Duration
- // lastProfileLog is the last time we've logged to the telemetry.
- lastProfileLog time.Duration
- // logInterval defines what is the frequency at which we send an event to the telemetry. Zero to disable.
- logInterval time.Duration
-}
-
-// element represent a single tracked element that would be profiled.
-type element struct {
- // id is the index of the element in the profiler's elements array.
- id int
- // lastStart is the timestamp of the last time we called "start"
- lastStart time.Duration
- // profiler points to the parent profiler.
- profiler *profiler
- // times contains the times we've monitored for this element.
- times []time.Duration
- // total is the total accumulated time for this element ( i.e. sum(times) )
- total time.Duration
- // detached indicate whether this is a detached elements or not. Detached elements don't add to the total amount of time
- // counted by the profiler, allowing them to overlap with other elements.
- detached bool
-}
-
-func makeProfiler(span time.Duration, clock timers.WallClock, log logging.Logger, logInterval time.Duration) *profiler {
- prof := &profiler{
- profileSpan: span,
- clock: clock,
- log: log,
- logInterval: logInterval,
- }
- prof.createElements()
- return prof
-}
-
-func (p *profiler) createElements() {
- for element := 0; element < profElementLast; element++ {
- p.createElement(element >= profFirstDetachedElement)
- }
-}
-
-func (p *profiler) createElement(detached bool) *element {
- i := len(p.elements)
- e := &element{
- id: i,
- profiler: p,
- detached: detached,
- }
- p.elements = append(p.elements, e)
- return e
-}
-
-func (p *profiler) getElement(el profElements) *element {
- return p.elements[el]
-}
-
-func (p *profiler) prune() {
- for p.profileSum > p.profileSpan {
- // remove the first elements from the profile.
- i := p.profile[0]
- e := p.elements[i]
- dt := e.times[0]
-
- e.total -= dt
- if !e.detached {
- p.profileSum -= dt
- }
-
- p.profile = p.profile[1:]
- e.times = e.times[1:]
- }
-}
-
-func (p *profiler) maybeLogProfile() {
- // do we have the log profile enabled ?
- if p.logInterval == 0 {
- return
- }
- // do we have enough samples ? ( i.e. at least 50% sample time )
- if p.profileSum < p.profileSpan/2 {
- return
- }
- // have we sent metrics recently ?
- curTime := p.clock.Since()
- if curTime-p.lastProfileLog <= p.logInterval {
- return
- }
- p.lastProfileLog = curTime
- p.logProfile()
-}
-
-func (p *profiler) logProfile() {
- metrics := telemetryspec.TransactionSyncProfilingMetrics{
- TotalOps: uint64(len(p.profile)),
- IdleOps: uint64(len(p.elements[profElementIdle].times)),
- TransactionPoolChangedOps: uint64(len(p.elements[profElementTxChange].times)),
- NewRoundOps: uint64(len(p.elements[profElementNewRound].times)),
- PeerStateOps: uint64(len(p.elements[profElementPeerState].times)),
- IncomingMsgOps: uint64(len(p.elements[profElementIncomingMsg].times)),
- OutgoingMsgOps: uint64(len(p.elements[profElementOutgoingMsg].times)),
- NextOffsetOps: uint64(len(p.elements[profElementNextOffset].times)),
- GetTxnGroupsOps: uint64(len(p.elements[profElementGetTxnsGroups].times)),
- AssembleMessageOps: uint64(len(p.elements[profElementAssembleMessage].times)),
- MakeBloomFilterOps: uint64(len(p.elements[profElementMakeBloomFilter].times)),
- SelectPendingTransactionsOps: uint64(len(p.elements[profElementTxnsSelection].times)),
-
- TotalDuration: p.profileSum,
- IdlePercent: float64(p.elements[profElementIdle].total) * 100.0 / float64(p.profileSum),
- TransactionPoolChangedPercent: float64(p.elements[profElementTxChange].total) * 100.0 / float64(p.profileSum),
- NewRoundPercent: float64(p.elements[profElementNewRound].total) * 100.0 / float64(p.profileSum),
- PeerStatePercent: float64(p.elements[profElementPeerState].total) * 100.0 / float64(p.profileSum),
- IncomingMsgPercent: float64(p.elements[profElementIncomingMsg].total) * 100.0 / float64(p.profileSum),
- OutgoingMsgPercent: float64(p.elements[profElementOutgoingMsg].total) * 100.0 / float64(p.profileSum),
- NextOffsetPercent: float64(p.elements[profElementNextOffset].total) * 100.0 / float64(p.profileSum),
- GetTxnGroupsPercent: float64(p.elements[profElementGetTxnsGroups].total) * 100.0 / float64(p.profileSum),
- AssembleMessagePercent: float64(p.elements[profElementAssembleMessage].total) * 100.0 / float64(p.profileSum),
- MakeBloomFilterPercent: float64(p.elements[profElementMakeBloomFilter].total) * 100.0 / float64(p.profileSum),
- SelectPendingTransactionsPercent: float64(p.elements[profElementTxnsSelection].total) * 100.0 / float64(p.profileSum),
- }
-
- p.log.Metrics(telemetryspec.Transaction, metrics, struct{}{})
-}
-
-func (e *element) start() {
- if e.profiler.logInterval > 0 {
- e.lastStart = e.profiler.clock.Since()
- }
-}
-
-func (e *element) end() {
- if e.profiler.logInterval == 0 {
- return
- }
- diff := e.profiler.clock.Since() - e.lastStart
- e.total += diff
- e.times = append(e.times, diff)
- e.profiler.profile = append(e.profiler.profile, e.id)
-
- if !e.detached {
- e.profiler.profileSum += diff
- e.profiler.prune()
- e.profiler.maybeLogProfile()
- }
-}
diff --git a/txnsync/profiler_test.go b/txnsync/profiler_test.go
deleted file mode 100644
index 6ac4e0f06..000000000
--- a/txnsync/profiler_test.go
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/logging/telemetryspec"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/algorand/go-algorand/util/timers"
-)
-
-// Create a logger that hooks the "Metrics" function to signal that we have
-// indeed sent some metrics
-type metricsLogger struct {
- Logger
- sentLogger *bool
-}
-
-func makeMetricsLogger(sentLogger *bool) metricsLogger {
- return metricsLogger{
- sentLogger: sentLogger,
- }
-}
-
-func (n metricsLogger) Metrics(category telemetryspec.Category, metrics telemetryspec.MetricDetails, details interface{}) {
- *n.sentLogger = true
-}
-
-// TestPrune Test the prune capabilities of the profiler. We want to simulate
-// the conditions to show that the profiler will "remove" elements when needed.
-func TestPrune(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- prof := makeProfiler(2*time.Millisecond, nil, nil, 3*time.Millisecond)
- a := require.New(t)
-
- a.NotNil(prof)
- a.NotNil(prof.elements)
-
- prof.profileSum = 2
- prof.profileSpan = 1
-
- prof.profile = append(prof.profile, 0)
-
- firstElement := &prof.elements[0]
-
- (*firstElement).detached = false
-
- (*firstElement).times = append((*firstElement).times, time.Duration(2), time.Duration(2))
- (*firstElement).total = time.Duration(4)
-
- a.Equal(len(prof.profile), 1)
- a.Equal(len((*firstElement).times), 2)
-
- prof.prune()
-
- a.Equal(len(prof.profile), 0)
- a.Equal(len((*firstElement).times), 1)
- a.Equal((*firstElement).total, time.Duration(2))
-
-}
-
-// TestProfilerStartEndZero Test functionality if the log interval is 0
-func TestProfilerStartEndZero(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var s syncState
- s.clock = timers.MakeMonotonicClock(time.Now())
- prof := makeProfiler(2*time.Millisecond, s.clock, nil, 0*time.Millisecond)
- a := require.New(t)
-
- a.NotNil(prof)
- a.NotNil(prof.elements)
-
- firstElement := &prof.elements[0]
-
- oldLastStart := (*firstElement).lastStart
- oldTotal := (*firstElement).total
-
- (*firstElement).start()
- time.Sleep(5 * time.Millisecond)
- (*firstElement).end()
-
- a.Equal(oldLastStart, (*firstElement).lastStart)
- a.Equal(oldTotal, (*firstElement).total)
-
-}
-
-// TestProfilerStartEndEnabled Test profiler functionality if log interval is non-zero.
-// This test will assume that a successful start()-end() call
-// will produce a non-zero profile sum.
-//
-// This test forces "detached element" logic to be run.
-func TestProfilerStartEndEnabled(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var s syncState
- s.clock = timers.MakeMonotonicClock(time.Now())
- tmp := false
- // Need to supply logger just in case log profile is called
- nl := makeMetricsLogger(&tmp)
- prof := makeProfiler(2*time.Millisecond, s.clock, nl, 3*time.Millisecond)
-
- a := require.New(t)
-
- a.NotNil(prof)
- a.NotNil(prof.elements)
-
- element := prof.getElement(0)
-
- // Ensure that we trip the if statement
- element.detached = false
-
- a.Equal(element.total, time.Duration(0))
- a.Equal(len(element.times), 0)
- a.Equal(len(element.profiler.profile), 0)
- a.Equal(element.profiler.profileSum, time.Duration(0))
-
- element.start()
- element.end()
- a.NotEqual(element.total, time.Duration(0))
- a.Equal(len(element.times), 1)
- a.Equal(len(element.profiler.profile), 1)
- a.NotEqual(element.profiler.profileSum, time.Duration(0))
-
-}
-
-// TestProfilerStartEndDisabled Test start-end functionality with detached elements.
-func TestProfilerStartEndDisabled(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var s syncState
- s.clock = timers.MakeMonotonicClock(time.Now())
- prof := makeProfiler(2*time.Millisecond, s.clock, nil, 3*time.Millisecond)
- a := require.New(t)
-
- a.NotNil(prof)
- a.NotNil(prof.elements)
-
- element := prof.getElement(0)
-
- // Set to true so we don't trip the if statement for now
- element.detached = true
-
- a.Equal(element.total, time.Duration(0))
- a.Equal(len(element.times), 0)
- a.Equal(len(element.profiler.profile), 0)
-
- element.start()
- element.end()
- a.NotEqual(element.total, time.Duration(0))
- a.Equal(len(element.times), 1)
- a.Equal(len(element.profiler.profile), 1)
-
-}
-
-// TestMaybeLogProfile Test that Metrics are only sent when all conditions are met and not
-// sent if they are not.
-func TestMaybeLogProfile(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- sentMetrics := false
-
- var s syncState
- nl := makeMetricsLogger(&sentMetrics)
- s.clock = timers.MakeMonotonicClock(time.Now())
- prof := makeProfiler(2*time.Millisecond, s.clock, nl, 3*time.Millisecond)
-
- a := require.New(t)
-
- a.NotNil(prof)
- a.NotNil(prof.elements)
-
- // --
- prof.logInterval = 0
- prof.maybeLogProfile()
- a.False(sentMetrics)
- prof.logInterval = 1
-
- // --
-
- prof.profileSum = 1
- prof.profileSpan = 4
- prof.maybeLogProfile()
- a.False(sentMetrics)
-
- prof.profileSum = 4
- prof.profileSpan = 4
-
- // --
- prof.logInterval = 2147483647 // Make this stupidly high to make sure we hit the if statement
- prof.lastProfileLog = prof.clock.Since()
- prof.maybeLogProfile()
- a.False(sentMetrics)
-
- // The last call to maybeLogProfile should set lastProfileLog to cur time
- prof.logInterval = 1 * time.Nanosecond
- // Sleep some time so we are above 1 ns of duration with a high degree of certainty
- time.Sleep(200 * time.Millisecond)
-
- prof.maybeLogProfile()
- a.True(sentMetrics)
-
-}
-
-// TestGetElement Tests that getting an element returns it properly
-func TestGetElement(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var s syncState
- prof := makeProfiler(2*time.Millisecond, s.clock, s.log, 3*time.Millisecond)
- a := require.New(t)
-
- a.NotNil(prof)
- a.NotNil(prof.elements)
-
- for i := 0; i < profElementLast; i++ {
- e := prof.getElement(profElements(i))
-
- a.Equal(e.id, i)
- }
-
-}
-
-// TestMakeProfiler Ensures that makeProfiler() returns a valid profiler.
-func TestMakeProfiler(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var s syncState
- prof := makeProfiler(2*time.Millisecond, s.clock, s.log, 3*time.Millisecond)
- a := require.New(t)
-
- a.NotNil(prof)
- a.NotNil(prof.elements)
-
- a.Equal(prof.profileSpan, 2*time.Millisecond)
- a.Equal(prof.logInterval, 3*time.Millisecond)
- a.Equal(len(prof.elements), profElementLast)
-
- for i, e := range prof.elements {
- a.Equal(e.id, i)
- a.Equal(e.profiler, prof)
-
- if i < profFirstDetachedElement {
- a.False(e.detached)
- } else {
- a.True(e.detached)
- }
- }
-
-}
diff --git a/txnsync/sent_filter.go b/txnsync/sent_filter.go
deleted file mode 100644
index 603a73a12..000000000
--- a/txnsync/sent_filter.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "github.com/algorand/go-algorand/data/basics"
-)
-
-//msgp:ignore sentFilterStat
-type sentFilterStat struct {
- // .Modulator .Offset
- EncodingParams requestParams
-
- // lastCounter is the group counter of the last txn group included in a sent filter
- lastCounter uint64
-
- round basics.Round
-}
-
-// sentFilters is the set of filter stats for one peer to another peer.
-// There should be at most one entry per (Modulator,Offset)
-//msgp:ignore sentFilters
-type sentFilters []sentFilterStat
-
-const maxSentFilterSet = 10
-
-func (sf *sentFilters) setSentFilter(filter bloomFilter, round basics.Round) {
- encodingParams := filter.encoded.EncodingParams
- for i, sfs := range *sf {
- if sfs.EncodingParams == encodingParams {
- (*sf)[i].lastCounter = filter.containedTxnsRange.lastCounter
- (*sf)[i].round = round
- return
- }
- }
- nsf := sentFilterStat{
- EncodingParams: encodingParams,
- lastCounter: filter.containedTxnsRange.lastCounter,
- round: round,
- }
- *sf = append(*sf, nsf)
- // trim oldest content if we're too long
- for len(*sf) > maxSentFilterSet {
- oldestRound := round
- popCandidate := -1
- for i, sfs := range *sf {
- if sfs.round < oldestRound {
- oldestRound = sfs.round
- popCandidate = i
- }
- }
- if popCandidate >= 0 {
- last := len(*sf) - 1
- (*sf)[popCandidate] = (*sf)[last]
- *sf = (*sf)[:last]
- break
- }
- }
-}
-
-func (sf *sentFilters) nextFilterGroup(encodingParams requestParams) (lastCounter uint64, round basics.Round) {
- for _, sfs := range *sf {
- if sfs.EncodingParams == encodingParams {
- return sfs.lastCounter + 1, sfs.round
- }
- }
- return 0, 0 // include everything since the start
-}
diff --git a/txnsync/sent_filter_test.go b/txnsync/sent_filter_test.go
deleted file mode 100644
index f441dc76d..000000000
--- a/txnsync/sent_filter_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "testing"
-
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
-)
-
-func TestSentFilterSet(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- var sf sentFilters
-
- ep := requestParams{Offset: 4, Modulator: 255}
- bf := bloomFilter{
- containedTxnsRange: transactionsRange{1, 42, 99},
- encoded: encodedBloomFilter{EncodingParams: ep},
- }
-
- // what goes in ..
- sf.setSentFilter(bf, basics.Round(13))
-
- // .. comes out
- lastCounter, lcRound := sf.nextFilterGroup(ep)
- a.Equal(uint64(42+1), lastCounter)
- a.Equal(basics.Round(13), lcRound)
-
- for i := 0; i < maxSentFilterSet; i++ {
- bf.encoded.EncodingParams.Offset++
- sf.setSentFilter(bf, basics.Round(14+i))
- }
-
- // first oldest entry will have been lost
- lastCounter, lcRound = sf.nextFilterGroup(ep)
- a.Equal(uint64(0), lastCounter)
- a.Equal(basics.Round(0), lcRound)
-}
diff --git a/txnsync/service.go b/txnsync/service.go
deleted file mode 100644
index d131b2805..000000000
--- a/txnsync/service.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "context"
- "sync"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/util/execpool"
-)
-
-// Service is the transaction sync main service object.
-type Service struct {
- ctx context.Context
- cancelCtx context.CancelFunc
- waitGroup sync.WaitGroup
-
- state syncState
-}
-
-// MakeTransactionSyncService creates a new Service object
-func MakeTransactionSyncService(log logging.Logger, conn NodeConnector, isRelay bool, genesisID string, genesisHash crypto.Digest, cfg config.Local, threadpool execpool.BacklogPool) *Service {
- s := &Service{
- state: syncState{
- node: conn,
- log: wrapLogger(log, &cfg),
- isRelay: isRelay,
- genesisID: genesisID,
- genesisHash: genesisHash,
- config: cfg,
- threadpool: threadpool,
- scheduler: makePeerScheduler(),
- },
- }
- s.state.service = s
- s.state.xorBuilder.MaxIterations = 10
- return s
-}
-
-// Start starts the transaction sync
-func (s *Service) Start() {
- s.ctx, s.cancelCtx = context.WithCancel(context.Background())
- s.waitGroup.Add(1)
-
- go s.state.mainloop(s.ctx, &s.waitGroup)
-}
-
-// Stop stops the transaction sync
-func (s *Service) Stop() {
- // cancel the context
- s.cancelCtx()
- // wait until the mainloop exists.
- s.waitGroup.Wait()
- // clear the context, as we won't be using it anymore.
- s.cancelCtx, s.ctx = nil, nil
-}
-
-// GetIncomingMessageHandler returns the message handler.
-func (s *Service) GetIncomingMessageHandler() IncomingMessageHandler {
- return s.state.asyncIncomingMessageHandler
-}
diff --git a/txnsync/service_test.go b/txnsync/service_test.go
deleted file mode 100644
index 2b262fc18..000000000
--- a/txnsync/service_test.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "encoding/binary"
- "math/rand"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/algorand/go-algorand/util/execpool"
- "github.com/algorand/go-algorand/util/timers"
-)
-
-type mockLogger struct {
- logging.Logger
-}
-
-type mockNodeConnector struct {
- NodeConnector
- calledEvents *bool
- peerInfo PeerInfo
- updatingPeers bool
- transactionPoolSize int
- peers []PeerInfo
-}
-
-func makeMockNodeConnector(calledEvents *bool) mockNodeConnector {
- return mockNodeConnector{calledEvents: calledEvents}
-}
-
-func (fn *mockNodeConnector) Events() <-chan Event {
- if fn.calledEvents != nil {
- *fn.calledEvents = true
- }
- return nil
-}
-
-func (fn *mockNodeConnector) GetCurrentRoundSettings() (out RoundSettings) { return }
-
-func (fn *mockNodeConnector) Clock() (out timers.WallClock) {
- return timers.MakeMonotonicClock(time.Now())
-}
-
-func (fn *mockNodeConnector) Random(rng uint64) uint64 {
- var xb [8]byte
- rand.Read(xb[:])
- rv := binary.LittleEndian.Uint64(xb[:])
- return rv % rng
-}
-
-func (fn *mockNodeConnector) GetPeers() []PeerInfo { return fn.peers }
-
-func (fn *mockNodeConnector) GetPeer(interface{}) (out PeerInfo) {
- return fn.peerInfo
-}
-
-func (fn *mockNodeConnector) UpdatePeers(txsyncPeers []*Peer, netPeers []interface{}, peersAverageDataExchangeRate uint64) {
- fn.updatingPeers = true
-}
-func (fn *mockNodeConnector) SendPeerMessage(netPeer interface{}, msg []byte, callback SendMessageCallback) {
-}
-
-func (fn *mockNodeConnector) GetPeerLatency(netPeer interface{}) time.Duration {
- return 0
-}
-
-func (fn *mockNodeConnector) GetPendingTransactionGroups() (txGroups []pooldata.SignedTxGroup, latestLocallyOriginatedGroupCounter uint64) {
- return
-}
-func (fn *mockNodeConnector) IncomingTransactionGroups(peer *Peer, messageSeq uint64, txGroups []pooldata.SignedTxGroup) (transactionPoolSize int) {
- return fn.transactionPoolSize
-}
-func (fn *mockNodeConnector) NotifyMonitor() chan struct{} { return nil }
-
-type mockThreadPool struct {
- execpool.BacklogPool
-}
-
-// TestStartStopTransactionSyncService test that we can start and stop the transaction sync service
-func TestStartStopTransactionSyncService(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- calledEventsInNodeConnector := false
-
- a := require.New(t)
-
- mLogger := mockLogger{}
- mNodeConnector := makeMockNodeConnector(&calledEventsInNodeConnector)
- cfg := config.GetDefaultLocal()
- mThreadPool := mockThreadPool{}
-
- hashDigest := crypto.Hash([]byte{0x41, 0x6b, 0x69, 0x6b, 0x69})
-
- service := MakeTransactionSyncService(mLogger, &mNodeConnector, true, "GENID", hashDigest, cfg, mThreadPool)
-
- a.NotNil(service)
-
- service.Start()
- service.Stop()
-
- a.True(calledEventsInNodeConnector)
-
- a.Nil(service.cancelCtx)
- a.Nil(service.ctx)
-
-}
-
-// TestMakeTransactionSyncService tests that an appropriate transaction sync service was made
-func TestMakeTransactionSyncService(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- mLogger := mockLogger{}
- mNodeConnector := &mockNodeConnector{}
- cfg := config.GetDefaultLocal()
- mThreadPool := mockThreadPool{}
-
- hashDigest := crypto.Hash([]byte{0x41, 0x6b, 0x69, 0x6b, 0x69})
-
- service1 := MakeTransactionSyncService(mLogger, mNodeConnector, true, "GENID", hashDigest, cfg, mThreadPool)
-
- a.NotNil(service1)
-
- a.Equal(service1.state.node, mNodeConnector)
- a.Equal(service1.state.log, wrapLogger(mLogger, &cfg))
- a.Equal(service1.state.isRelay, true)
- a.Equal(service1.state.genesisID, "GENID")
- a.Equal(service1.state.genesisHash, hashDigest)
- a.Equal(service1.state.config, cfg)
- a.Equal(service1.state.threadpool, mThreadPool)
- a.Equal(service1.state.service, service1)
- a.Equal(service1.state.xorBuilder.MaxIterations, 10)
-
- service2 := MakeTransactionSyncService(mLogger, mNodeConnector, false, "GENID2", hashDigest, cfg, mThreadPool)
-
- a.NotNil(service1)
-
- a.Equal(service2.state.node, mNodeConnector)
- a.Equal(service2.state.log, wrapLogger(mLogger, &cfg))
- a.Equal(service2.state.isRelay, false)
- a.Equal(service2.state.genesisID, "GENID2")
- a.Equal(service2.state.genesisHash, hashDigest)
- a.Equal(service2.state.config, cfg)
- a.Equal(service2.state.threadpool, mThreadPool)
- a.Equal(service2.state.service, service2)
- a.Equal(service2.state.xorBuilder.MaxIterations, 10)
-
-}
diff --git a/txnsync/transactionCache.go b/txnsync/transactionCache.go
deleted file mode 100644
index 69569547d..000000000
--- a/txnsync/transactionCache.go
+++ /dev/null
@@ -1,374 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "sort"
- "time"
-
- "github.com/algorand/go-algorand/data/transactions"
-)
-
-// cachedEntriesPerMap is the number of entries the longTermTransactionCache will have in each of it's
-// buckets. When looking up an entry, we don't want to have too many entries, hence, the number of maps we
-// maintain shouldn't be too high. On the flip side, keeping small number of maps means that we drop out
-// large portion of our cache. The number 917 here was picked as a sufficiently large prime number, which
-// would mean that if longTermRecentTransactionsSentBufferLength=15K, then we would have about 16 maps.
-const cachedEntriesPerMap = 917
-
-// cacheHistoryDuration is the time we will keep a transaction in the cache, assuming that the cache
-// storage would not get recycled first. When applied to transactions maps in the longTermTransactionCache, this
-// applies to the timestamp of the most recent transaction in the map.
-const cacheHistoryDuration = 10 * time.Second
-
-// transactionCache is a cache of recently sent transactions ids, allowing to limit the size of the historical kept transactions.
-// transactionCache has FIFO replacement.
-// implementation is a simple cyclic-buffer with a map to accelerate lookups.
-// internally, it's being manages as two tier cache, where the long-term cache is bigger and requires acknowledgements.
-//msgp:ignore transactionCache
-type transactionCache struct {
- shortTermCache shortTermTransactionCache
- longTermCache longTermTransactionCache
- ackPendingTxids []ackPendingTxids
-}
-
-//msgp:ignore ackPendingTxids
-type ackPendingTxids struct {
- txids []transactions.Txid
- seq uint64
- timestamp time.Duration
-}
-
-// shortTermCacheEntry is used as the data container of a double linked list item
-// in the shortTermTransactionCache object.
-//msgp:ignore shortTermCacheEntry
-type shortTermCacheEntry struct {
- txid transactions.Txid // the transaction ID
- prev *shortTermCacheEntry // previous entry in the circular linked list
- next *shortTermCacheEntry // next entry in the circular linked list
-}
-
-//msgp:ignore shortTermTransactionCache
-type shortTermTransactionCache struct {
- size int // the maximum number of elements in the short term cache
- head *shortTermCacheEntry // pointer to first element in the linked list; the head is the "oldest" entry in the list, and would get pruned first.
- free *shortTermCacheEntry // pointer to a free element list
- transactionsMap map[transactions.Txid]*shortTermCacheEntry // map of the entries included
-}
-
-//msgp:ignore longTermTransactionCache
-type longTermTransactionCache struct {
- current int
- transactionsMap []map[transactions.Txid]bool
- timestamps []time.Duration
-}
-
-// detach remove the entry from the list it's currently part of.
-// the return value is false if the item is the only entry in the list
-// or true otherwise.
-func (ce *shortTermCacheEntry) detach() bool {
- if ce.next == ce.prev {
- return false
- }
- ce.prev.next = ce.next
- ce.next.prev = ce.prev
- return true
-}
-
-// addToList add the element to the tail of the list who's head is provided.
-func (ce *shortTermCacheEntry) addToList(head *shortTermCacheEntry) {
- tail := head.prev
- tail.next = ce
- head.prev = ce
- ce.prev = tail
- ce.next = head
-}
-
-// makeTransactionCache creates the transaction cache
-func makeTransactionCache(shortTermSize, longTermSize, pendingAckTxids int) *transactionCache {
- txnCache := &transactionCache{
- shortTermCache: shortTermTransactionCache{
- size: shortTermSize,
- transactionsMap: make(map[transactions.Txid]*shortTermCacheEntry, shortTermSize),
- },
- ackPendingTxids: make([]ackPendingTxids, 0, pendingAckTxids),
- longTermCache: longTermTransactionCache{
- transactionsMap: make([]map[transactions.Txid]bool, (longTermSize+cachedEntriesPerMap-1)/cachedEntriesPerMap),
- timestamps: make([]time.Duration, (longTermSize+cachedEntriesPerMap-1)/cachedEntriesPerMap),
- },
- }
- // initialize only the first entry; the rest would be created dynamically.
- txnCache.longTermCache.transactionsMap[0] = make(map[transactions.Txid]bool, cachedEntriesPerMap)
- return txnCache
-}
-
-// add adds a single trasaction ID to the short term cache.
-func (lru *transactionCache) add(txid transactions.Txid) {
- lru.shortTermCache.add(txid)
-}
-
-// addSlice adds a slice to both the short term cache as well as the pending ack transaction ids.
-func (lru *transactionCache) addSlice(txids []transactions.Txid, msgSeq uint64, timestamp time.Duration) {
- for _, txid := range txids {
- lru.shortTermCache.add(txid)
- }
- // verify that the new msgSeq is bigger than the previous we have.
- if len(lru.ackPendingTxids) > 0 {
- if lru.ackPendingTxids[len(lru.ackPendingTxids)-1].seq >= msgSeq {
- return
- }
- }
-
- if len(lru.ackPendingTxids) == cap(lru.ackPendingTxids) {
- // roll this array without reallocation.
- copy(lru.ackPendingTxids, lru.ackPendingTxids[1:])
- // update the last entry of the array.
- lru.ackPendingTxids[len(lru.ackPendingTxids)-1] = ackPendingTxids{txids: txids, seq: msgSeq, timestamp: timestamp}
- } else {
- lru.ackPendingTxids = append(lru.ackPendingTxids, ackPendingTxids{txids: txids, seq: msgSeq, timestamp: timestamp})
- }
-
- // clear the entries that are too old.
- lastValidEntry := -1
- for i, entry := range lru.ackPendingTxids {
- if entry.timestamp < timestamp-cacheHistoryDuration {
- lastValidEntry = i
- } else {
- break
- }
- }
- if lastValidEntry >= 0 {
- // copy the elements
- var i int
- for i = 0; i < len(lru.ackPendingTxids)-1-lastValidEntry; i++ {
- lru.ackPendingTxids[i] = lru.ackPendingTxids[i+lastValidEntry+1]
- }
- // clear the rest of the entries.
- for ; i < len(lru.ackPendingTxids); i++ {
- lru.ackPendingTxids[i] = ackPendingTxids{}
- }
- // reset the slice
- lru.ackPendingTxids = lru.ackPendingTxids[:len(lru.ackPendingTxids)-lastValidEntry-1]
- }
-}
-
-// contained checks if a given transaction ID is contained in either the short term or long term cache
-func (lru *transactionCache) contained(txid transactions.Txid) bool {
- return lru.shortTermCache.contained(txid) || lru.longTermCache.contained(txid)
-}
-
-// reset clears the short term cache
-func (lru *transactionCache) reset() {
- lru.shortTermCache.reset()
-}
-
-// acknowledge process a given slice of previously sent message sequence numbers. The transaction IDs that
-// were previously sent with these sequence numbers are being added to the long term cache.
-func (lru *transactionCache) acknowledge(seqs []uint64) {
- for _, seq := range seqs {
- i := sort.Search(len(lru.ackPendingTxids), func(i int) bool {
- return lru.ackPendingTxids[i].seq >= seq
- })
- // if not found, skip it.
- if i >= len(lru.ackPendingTxids) || seq != lru.ackPendingTxids[i].seq {
- continue
- }
- lru.longTermCache.add(lru.ackPendingTxids[i].txids, lru.ackPendingTxids[i].timestamp)
- lru.longTermCache.prune(lru.ackPendingTxids[i].timestamp - cacheHistoryDuration)
- // clear out the entry at lru.ackPendingTxids[i] so that the GC could reclaim it.
- lru.ackPendingTxids[i] = ackPendingTxids{}
- // and delete the entry from the array
- lru.ackPendingTxids = append(lru.ackPendingTxids[:i], lru.ackPendingTxids[i+1:]...)
- }
-}
-
-// add a given transaction ID to the short term cache.
-func (st *shortTermTransactionCache) add(txid transactions.Txid) {
- entry, exists := st.transactionsMap[txid]
- if exists {
- // promote
- if entry.next != entry.prev {
- // disconnect the current one; no need to test return code since we know
- // there will be more elements on the list.
- entry.detach()
-
- // there are other elements on the list.
- // if the given entry happen to be the first entry, then pick
- // the next entry.
- if entry == st.head {
- st.head = entry.next
- }
- // add to the tail of the list.
- entry.addToList(st.head)
- } else { //nolint:staticcheck
- // no other elements on the list -
- // nothing to do in this case.
- }
- return
- }
-
- mapLen := len(st.transactionsMap)
- if mapLen >= st.size {
- // we reached size, delete the oldest entry.
- t := st.head
-
- // disconnect the current one; no need to test return code since we know
- // there will be more elements on the list.
- t.detach()
-
- // replace the first entry with the next one.
- st.head = t.next
-
- // delete the current value from the map.
- delete(st.transactionsMap, t.txid)
-
- // copy the new transaction id into the existing object.
- copy(t.txid[:], txid[:])
-
- // place the new entry as the last entry on the list.
- t.addToList(st.head)
-
- // add the new entry to the map
- st.transactionsMap[txid] = t
- return
- }
-
- // grab an entry from the free list ( if any )
- entry = st.free
- if entry != nil {
- if entry.detach() {
- st.free = entry.next
- } else {
- st.free = nil
- }
- copy(entry.txid[:], txid[:])
- } else {
- // the free list doesn't have an entry - allocate a new one.
- entry = &shortTermCacheEntry{
- txid: txid,
- }
- }
- if st.head == nil {
- st.head = entry
- entry.next = entry
- entry.prev = entry
- } else {
- entry.addToList(st.head)
- }
- st.transactionsMap[txid] = entry
-}
-
-// contained checks if the given transaction id presents in the short term cache
-func (st *shortTermTransactionCache) contained(txid transactions.Txid) bool {
- return st.transactionsMap[txid] != nil
-}
-
-// reset clears the short term cache
-func (st *shortTermTransactionCache) reset() {
- if st.head == nil {
- return
- }
- st.transactionsMap = make(map[transactions.Txid]*shortTermCacheEntry, st.size)
- if st.free == nil {
- st.free = st.head
- st.head = nil
- return
- }
- used := st.head
- free := st.free
- free.prev.next = used
- used.prev.next = free
- lastFree := free.prev
- free.prev = used.prev
- used.prev = lastFree
- st.head = nil
-}
-
-// contained checks if the given transaction id presents in the log term cache
-func (lt *longTermTransactionCache) contained(txid transactions.Txid) bool {
- for i := lt.current; i >= 0; i-- {
- if lt.transactionsMap[i][txid] {
- return true
- }
- }
- for i := len(lt.transactionsMap) - 1; i > lt.current; i-- {
- if lt.transactionsMap[i][txid] {
- return true
- }
- }
- return false
-}
-
-// add a given slice of transaction IDs to the long term transaction cache, at a given timestamp.
-func (lt *longTermTransactionCache) add(slice []transactions.Txid, timestamp time.Duration) {
- for {
- lt.timestamps[lt.current] = timestamp
- availableEntries := cachedEntriesPerMap - len(lt.transactionsMap[lt.current])
- txMap := lt.transactionsMap[lt.current]
- if txMap == nil {
- txMap = make(map[transactions.Txid]bool, cachedEntriesPerMap)
- }
- if len(slice) <= availableEntries {
- // just add them all.
- for _, txid := range slice {
- txMap[txid] = true
- }
- lt.transactionsMap[lt.current] = txMap
- return
- }
-
- // otherwise, add as many as we can fit -
- for i := 0; i < availableEntries; i++ {
- txMap[slice[i]] = true
- }
- lt.transactionsMap[lt.current] = txMap
-
- // remove the ones we've already added from the slice.
- slice = slice[availableEntries:]
-
- // move to the next map.
- lt.current = (lt.current + 1) % len(lt.transactionsMap)
-
- // if full, reset bucket.
- if len(lt.transactionsMap[lt.current]) >= cachedEntriesPerMap || lt.transactionsMap[lt.current] == nil {
- // reset.
- lt.transactionsMap[lt.current] = make(map[transactions.Txid]bool, cachedEntriesPerMap)
- }
- }
-}
-
-// prune the long term cache by clearing out all the cached transaction IDs maps that are dated before the given
-// timestamp
-func (lt *longTermTransactionCache) prune(timestamp time.Duration) {
- // find the index of the first entry where the timestamp is still valid.
- latestValidIndex := sort.Search(len(lt.transactionsMap), func(i int) bool {
- arrayIndex := (i + lt.current + 1) % len(lt.transactionsMap)
- return lt.timestamps[arrayIndex] > timestamp
- })
-
- // find the first non-empty map index.
- firstValidIndex := sort.Search(len(lt.transactionsMap), func(i int) bool {
- arrayIndex := (i + lt.current + 1) % len(lt.transactionsMap)
- return lt.timestamps[arrayIndex] != time.Duration(0)
- })
-
- for i := firstValidIndex - 1; i < latestValidIndex; i++ {
- arrayIndex := (i + lt.current + 1) % len(lt.transactionsMap)
- lt.timestamps[arrayIndex] = time.Duration(0)
- lt.transactionsMap[lt.current] = nil
- }
-}
diff --git a/txnsync/transactionCache_test.go b/txnsync/transactionCache_test.go
deleted file mode 100644
index daf6cb8bc..000000000
--- a/txnsync/transactionCache_test.go
+++ /dev/null
@@ -1,333 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "reflect"
- "strconv"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-// TestTransactionCache General smoke test for the transaction cache
-func TestTransactionCache(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var txid transactions.Txid
- a := makeTransactionCache(5, 10, 20)
- for repeat := 0; repeat < 2; repeat++ {
- // add 5
- for i := 0; i < 5; i++ {
- txid[0] = byte(i)
- a.add(txid)
- }
-
- // all 5 still there
- for i := 0; i < 5; i++ {
- txid[0] = byte(i)
- require.True(t, a.contained(txid), "txid: %v", txid[:])
- }
-
- // repeatedly adding existing data doesn't lose anything
- txid[0] = 1
- a.add(txid)
- a.add(txid)
- a.add(txid)
- for i := 0; i < 5; i++ {
- txid[0] = byte(i)
- require.True(t, a.contained(txid), "txid: %v", txid[:])
- }
-
- // adding a sixth forgets the first
- txid[0] = 5
- a.add(txid)
- for i := 1; i < 6; i++ {
- txid[0] = byte(i)
- require.True(t, a.contained(txid), "txid: %v", txid[:])
- }
- txid[0] = 0
- require.False(t, a.contained(txid))
-
- // adding a seventh forgets the third
- txid[0] = 6
- a.add(txid)
- for i := 3; i < 7; i++ {
- txid[0] = byte(i)
- require.True(t, a.contained(txid), "txid: %v", txid[:])
- }
- txid[0] = 1
- require.True(t, a.contained(txid), "txid: %v", txid[:])
- txid[0] = 2
- require.False(t, a.contained(txid))
- a.reset()
- }
-}
-
-// TestTransactionCacheResetting is a simple reset testing, ensuring we know how to recycle entries from the free list.
-func TestTransactionCacheResetting(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var txid transactions.Txid
- a := makeTransactionCache(5, 10, 20)
- // add 5
- for i := 0; i < 5; i++ {
- txid[0] = byte(i)
- a.add(txid)
- }
- // re-add the first one again, to promote it.
- txid[0] = 0
- a.add(txid)
- a.reset()
- // add 3
- for i := 0; i < 3; i++ {
- txid[0] = byte(i)
- a.add(txid)
- }
- a.reset()
- // add 2
- for i := 0; i < 2; i++ {
- txid[0] = byte(i)
- a.add(txid)
- }
- // verify the two are there.
- for i := 0; i < 2; i++ {
- txid[0] = byte(i)
- require.True(t, a.contained(txid), "txid: %v", txid[:])
- }
-}
-
-// TestTransactionCacheAddSlice tests addSlice functionality of the transaction cache
-func TestTransactionCacheAddSlice(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- tc := makeTransactionCache(5, 10, 20)
- curTimestamp := time.Duration(0)
- msgSeq := uint64(0)
- slice := make([]transactions.Txid, 10)
- for i := 0; i < 50; i++ {
- tc.addSlice(slice, msgSeq, curTimestamp)
- curTimestamp += cacheHistoryDuration / 10
- msgSeq++
- require.LessOrEqual(t, len(tc.ackPendingTxids), 11)
- }
- curTimestamp += cacheHistoryDuration
- tc.addSlice(slice, msgSeq, curTimestamp)
- require.LessOrEqual(t, len(tc.ackPendingTxids), 1)
-}
-
-// TestAddSliceSeqReturn Tests that if the ackPendingTxIds is bigger that the msgSeq then we return
-func TestAddSliceSeqReturn(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- tc := makeTransactionCache(5, 10, 20)
- curTimestamp := time.Duration(cacheHistoryDuration)
- msgSeq := uint64(1)
- slice := make([]transactions.Txid, 10)
- tc.addSlice(slice, msgSeq, curTimestamp)
-
- tcLen := len(tc.ackPendingTxids)
-
- tc.addSlice(slice, 0, curTimestamp)
- require.Equal(t, tcLen, len(tc.ackPendingTxids))
- msgSeq++
- tc.addSlice(slice, msgSeq, curTimestamp+(cacheHistoryDuration/10))
- require.Equal(t, tcLen+1, len(tc.ackPendingTxids))
-
-}
-
-// TestAddSliceCapacity tests that we correctly copy the ackPendingTxids when at capacity
-func TestAddSliceCapacity(t *testing.T) {
- partitiontest.PartitionTest(t)
- tc := makeTransactionCache(5, 10, 5)
-
- curTimestamp := time.Duration(0)
- msgSeq := uint64(0)
- slice := make([]transactions.Txid, 10)
- for i := 0; i < 50; i++ {
- tc.addSlice(slice, msgSeq, curTimestamp)
- curTimestamp += cacheHistoryDuration / 10
- msgSeq++
- require.LessOrEqual(t, len(tc.ackPendingTxids), 6)
- }
-
-}
-
-func (ce *shortTermCacheEntry) getLength() int {
- length := 0
- if ce == nil {
- return length
- }
- length++
- cur := ce
- for ; cur != nil && ce != cur.next; cur = cur.next {
- length++
- }
- return length
-}
-
-// TestShortTermCacheReset tests that the short term cache is reset
-func TestShortTermCacheReset(t *testing.T) {
- partitiontest.PartitionTest(t)
- tc := makeTransactionCache(5, 10, 5)
- require.Nil(t, tc.shortTermCache.head)
- require.Nil(t, tc.shortTermCache.free)
- require.Equal(t, 0, len(tc.shortTermCache.transactionsMap))
-
- var txid transactions.Txid
- for i := 0; i < 5; i++ {
- txid[0] = byte(i)
- tc.add(txid)
- }
- require.Equal(t, 5, tc.shortTermCache.head.getLength())
-
- require.Nil(t, tc.shortTermCache.free)
- require.NotNil(t, tc.shortTermCache.head)
- require.Equal(t, 5, len(tc.shortTermCache.transactionsMap))
-
- tc.reset()
- require.Equal(t, 0, tc.shortTermCache.head.getLength())
- require.Equal(t, 5, tc.shortTermCache.free.getLength())
-
- require.Nil(t, tc.shortTermCache.head)
- require.NotNil(t, tc.shortTermCache.free)
- require.Equal(t, 0, len(tc.shortTermCache.transactionsMap))
-
- for i := 0; i < 2; i++ {
- txid[0] = byte(i)
- tc.add(txid)
- }
-
- tc.reset()
- require.Equal(t, 5, tc.shortTermCache.free.getLength())
-}
-
-// TestCacheAcknowledge tests that the acknowledge function correctly adds entries
-func TestCacheAcknowledge(t *testing.T) {
- partitiontest.PartitionTest(t)
- tc := makeTransactionCache(5, 10, 5)
-
- curTimestamp := time.Duration(0)
- msgSeq := uint64(0)
- slice := make([]transactions.Txid, 10)
- for i := 0; i < 5; i++ {
- tc.addSlice(slice, msgSeq, curTimestamp)
- curTimestamp += cacheHistoryDuration / 20
- msgSeq++
- require.LessOrEqual(t, len(tc.ackPendingTxids), 5)
- }
-
- require.Equal(t, 1, len(tc.longTermCache.transactionsMap))
- require.Equal(t, 0, tc.longTermCache.current)
-
- // The 10 is purposely past the range for the checking
- seqs := []uint64{10, 1, 2, 3}
- tc.acknowledge(seqs)
- require.Equal(t, 2, len(tc.ackPendingTxids))
- require.Equal(t, uint64(0), tc.ackPendingTxids[0].seq)
- require.Equal(t, uint64(4), tc.ackPendingTxids[1].seq)
-
-}
-
-// TestCacheAddAndContains tests adding to the long term cache and if we can test if it contains it
-func TestCacheAddAndContains(t *testing.T) {
- partitiontest.PartitionTest(t)
- tc := makeTransactionCache(5, 2*cachedEntriesPerMap, 5)
-
- // We want two scenarios: Smaller than cachedEntriesPerMap and bigger
- smallSlice := make([]transactions.Txid, cachedEntriesPerMap/2)
-
- // Fill with random numbers
- for i := 0; i < cachedEntriesPerMap/2; i++ {
- tx := &smallSlice[i]
- tx[0] = byte((i + 37) % 255)
- tx[1] = byte((i + 2) % 255)
- tx[2] = byte((i + 42) % 255)
- tx[3] = byte((i + 23) % 255)
- }
-
- bigSlice := make([]transactions.Txid, 2*cachedEntriesPerMap)
-
- // Fill with sequential numbers
- for i := 0; i < 2*cachedEntriesPerMap; i++ {
- tx := &bigSlice[i]
- bs := []byte(strconv.Itoa(i))
- d := crypto.Hash(bs)
-
- *tx = transactions.Txid(d)
- }
-
- curTimestamp := time.Duration(0)
-
- ltc := &tc.longTermCache
- require.Equal(t, 2, len(ltc.transactionsMap))
-
- require.Equal(t, 0, ltc.current)
-
- ltc.add(smallSlice, curTimestamp)
-
- require.Equal(t, 0, ltc.current)
-
- sliceMap := make(map[transactions.Txid]bool)
- for _, txid := range smallSlice {
- sliceMap[txid] = true
- }
-
- require.True(t, reflect.DeepEqual(sliceMap, ltc.transactionsMap[0]))
-
- ltc.add(bigSlice, curTimestamp)
-
- // Given that we already added small slice, we should "overflow"
- // and expect that the transaction map contains a modified version of big slice
-
- slice := bigSlice
- for {
- availableEntries := cachedEntriesPerMap - len(sliceMap)
- if len(slice) <= availableEntries {
- for _, txid := range slice {
- sliceMap[txid] = true
- }
- break
- }
-
- for i := 0; i < availableEntries; i++ {
- sliceMap[slice[i]] = true
- }
-
- slice = slice[availableEntries:]
-
- if len(sliceMap) >= cachedEntriesPerMap {
- sliceMap = make(map[transactions.Txid]bool)
- }
-
- }
-
- require.Equal(t, 0, ltc.current)
- require.True(t, reflect.DeepEqual(sliceMap, ltc.transactionsMap[0]))
-
- bs := []byte(strconv.Itoa(cachedEntriesPerMap))
- d := crypto.Hash(bs)
- targetTxID := transactions.Txid(d)
-
- require.True(t, ltc.contained(targetTxID))
-
-}
diff --git a/txnsync/txngroups.go b/txnsync/txngroups.go
deleted file mode 100644
index fdec93785..000000000
--- a/txnsync/txngroups.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "errors"
- "fmt"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/util/compress"
-)
-
-// Deflate performance constants measured by BenchmarkTxnGroupCompression
-const estimatedDeflateCompressionSpeed = 121123260.0 // bytes per second of how fast Deflate compresses data
-const estimatedDeflateCompressionGains = 0.32 // fraction of data reduced by Deflate on txnsync msgs
-
-const minEncodedTransactionGroupsCompressionThreshold = 1000
-
-const maxCompressionRatio = 20 // don't allow more than 95% compression
-
-func (s *syncState) encodeTransactionGroups(inTxnGroups []pooldata.SignedTxGroup, dataExchangeRate uint64) (packedTransactionGroups, error) {
- txnCount := 0
- for _, txGroup := range inTxnGroups {
- txnCount += len(txGroup.Transactions)
- }
- stub := txGroupsEncodingStub{
- TotalTransactionsCount: uint64(txnCount),
- TransactionGroupCount: uint64(len(inTxnGroups)),
- TransactionGroupSizes: make([]byte, 0, len(inTxnGroups)),
- }
-
- bitmaskLen := bytesNeededBitmask(int(stub.TotalTransactionsCount))
- index := 0
- for _, txGroup := range inTxnGroups {
- if len(txGroup.Transactions) > 1 {
- for _, txn := range txGroup.Transactions {
- if err := stub.deconstructSignedTransaction(index, &txn); err != nil {
- return packedTransactionGroups{}, fmt.Errorf("failed to encodeTransactionGroups: %w", err)
- }
- index++
- }
- stub.TransactionGroupSizes = append(stub.TransactionGroupSizes, byte(len(txGroup.Transactions)-1))
- }
- }
- compactNibblesArray(&stub.TransactionGroupSizes)
- for _, txGroup := range inTxnGroups {
- if len(txGroup.Transactions) == 1 {
- for _, txn := range txGroup.Transactions {
- if !txn.Txn.Group.MsgIsZero() {
- if len(stub.BitmaskGroup) == 0 {
- stub.BitmaskGroup = make(bitmask, bitmaskLen)
- }
- stub.BitmaskGroup.setBit(index)
- }
- if err := stub.deconstructSignedTransaction(index, &txn); err != nil {
- return packedTransactionGroups{}, fmt.Errorf("failed to encodeTransactionGroups: %w", err)
- }
- index++
- }
- }
- }
- stub.finishDeconstructSignedTransactions()
-
- encoded := stub.MarshalMsg(getMessageBuffer())
-
- // check if time saved by compression: estimatedDeflateCompressionGains * len(msg) / dataExchangeRate
- // is greater than by time spent during compression: len(msg) / estimatedDeflateCompressionSpeed
- if len(encoded) > minEncodedTransactionGroupsCompressionThreshold && float32(dataExchangeRate) < (estimatedDeflateCompressionGains*estimatedDeflateCompressionSpeed) {
- compressedBytes, compressionFormat := s.compressTransactionGroupsBytes(encoded)
- if compressionFormat != compressionFormatNone {
- packedGroups := packedTransactionGroups{
- Bytes: compressedBytes,
- CompressionFormat: compressionFormat,
- LenDecompressedBytes: uint64(len(encoded)),
- }
- releaseMessageBuffer(encoded)
- return packedGroups, nil
- }
- }
-
- return packedTransactionGroups{
- Bytes: encoded,
- CompressionFormat: compressionFormatNone,
- }, nil
-}
-
-func (s *syncState) compressTransactionGroupsBytes(uncompressedData []byte) ([]byte, byte) {
- b := getMessageBuffer()
- if cap(b) < len(uncompressedData) {
- releaseMessageBuffer(b)
- b = make([]byte, 0, len(uncompressedData))
- }
- _, compressedData, err := compress.Compress(uncompressedData, b, 1)
- if err != nil {
- if errors.Is(err, compress.ErrShortBuffer) {
- s.log.Debugf("compression had negative effect, made message bigger: original msg length: %d", len(uncompressedData))
- } else {
- s.log.Warnf("failed to compress %d bytes txnsync msg: %v", len(uncompressedData), err)
- }
- releaseMessageBuffer(b)
- return uncompressedData, compressionFormatNone
- }
- if len(uncompressedData) > len(compressedData)*maxCompressionRatio {
- s.log.Infof("compression exceeded compression ratio: compressed data len: %d", len(compressedData))
- releaseMessageBuffer(b)
- return uncompressedData, compressionFormatNone
- } else if len(uncompressedData) <= len(compressedData) {
- // compression wasn't effective and ended up spending more data.
- releaseMessageBuffer(b)
- return uncompressedData, compressionFormatNone
- }
- return compressedData, compressionFormatDeflate
-}
-
-func decodeTransactionGroups(ptg packedTransactionGroups, genesisID string, genesisHash crypto.Digest) (txnGroups []pooldata.SignedTxGroup, err error) {
- data := ptg.Bytes
- if len(data) == 0 {
- return nil, nil
- }
-
- switch ptg.CompressionFormat {
- case compressionFormatNone:
- case compressionFormatDeflate:
- data, err = decompressTransactionGroupsBytes(data, ptg.LenDecompressedBytes)
- if err != nil {
- return
- }
- defer releaseMessageBuffer(data)
- default:
- return nil, fmt.Errorf("invalid compressionFormat, %d", ptg.CompressionFormat)
- }
- var stub txGroupsEncodingStub
- _, err = stub.UnmarshalMsg(data)
- if err != nil {
- return nil, err
- }
-
- if stub.TransactionGroupCount > maxEncodedTransactionGroups {
- return nil, errors.New("invalid TransactionGroupCount")
- }
-
- if stub.TotalTransactionsCount > uint64(maxEncodedTransactionGroups*config.MaxTxGroupSize) {
- return nil, errors.New("invalid TotalTransactionsCount")
- }
-
- stx := make([]transactions.SignedTxn, stub.TotalTransactionsCount)
-
- err = stub.reconstructSignedTransactions(stx, genesisID, genesisHash)
- if err != nil {
- return nil, err
- }
-
- txnGroups = make([]pooldata.SignedTxGroup, stub.TransactionGroupCount)
- for txnCounter, txnGroupIndex := 0, 0; txnCounter < int(stub.TotalTransactionsCount); txnGroupIndex++ {
- size := 1
- if txnGroupIndex < len(stub.TransactionGroupSizes)*2 {
- nibble, err := getNibble(stub.TransactionGroupSizes, txnGroupIndex)
- if err != nil {
- return nil, err
- }
- size = int(nibble) + 1
- }
- txnGroups[txnGroupIndex].Transactions = stx[txnCounter : txnCounter+size]
- txnCounter += size
- }
-
- err = addGroupHashes(txnGroups, int(stub.TotalTransactionsCount), stub.BitmaskGroup)
- if err != nil {
- return nil, err
- }
-
- return txnGroups, nil
-}
-
-func decompressTransactionGroupsBytes(data []byte, lenDecompressedBytes uint64) (decoded []byte, err error) {
- compressionRatio := lenDecompressedBytes / uint64(len(data)) // data should have been compressed between 0 and 95%
- if lenDecompressedBytes > maxEncodedTransactionGroupBytes || compressionRatio <= 0 || compressionRatio >= maxCompressionRatio {
- return nil, fmt.Errorf("invalid lenDecompressedBytes: %d, len(data): %d", lenDecompressedBytes, len(data))
- }
-
- out := getMessageBuffer()
- if uint64(cap(out)) < lenDecompressedBytes {
- releaseMessageBuffer(out)
- out = make([]byte, 0, lenDecompressedBytes)
- }
-
- decoded, err = compress.Decompress(data, out)
- if err != nil {
- releaseMessageBuffer(out)
- decoded = nil
- return
- }
- if uint64(len(decoded)) != lenDecompressedBytes {
- releaseMessageBuffer(out)
- decoded = nil
- return nil, fmt.Errorf("lenDecompressedBytes didn't match: expected %d, actual %d", lenDecompressedBytes, len(decoded))
- }
- return
-}
-
-func releaseEncodedTransactionGroups(buffer []byte) {
- if buffer == nil {
- return
- }
-
- releaseMessageBuffer(buffer)
-}
diff --git a/txnsync/txngroups_test.go b/txnsync/txngroups_test.go
deleted file mode 100644
index 6306494f3..000000000
--- a/txnsync/txngroups_test.go
+++ /dev/null
@@ -1,530 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package txnsync
-
-import (
- "context"
- "database/sql"
- "flag"
- "io"
- "io/ioutil"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/pooldata"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/ledger/ledgercore"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/rpcs"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/algorand/go-algorand/util/db"
-)
-
-var blockDBFilename = flag.String("db", "", "Location of block db")
-var startRound = flag.Int("start", 0, "Starting round")
-var endRound = flag.Int("end", 10, "Ending round")
-
-func TestNibble(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var b []byte
- for i := 0; i < 10; i++ {
- b = append(b, byte(i))
- }
- compactNibblesArray(&b)
- for i := 0; i < 10; i++ {
- val, err := getNibble(b, i)
- require.NoError(t, err)
- require.Equal(t, byte(i), val)
- }
-}
-
-// old encoding method
-func encodeTransactionGroupsOld(inTxnGroups []pooldata.SignedTxGroup) []byte {
- stub := txGroupsEncodingStubOld{
- TxnGroups: make([]txnGroups, len(inTxnGroups)),
- }
- for i := range inTxnGroups {
- stub.TxnGroups[i] = txnGroups(inTxnGroups[i].Transactions)
- }
-
- return stub.MarshalMsg(protocol.GetEncodingBuf()[:0])
-}
-
-// old decoding method
-func decodeTransactionGroupsOld(bytes []byte) (txnGroups []pooldata.SignedTxGroup, err error) {
- if len(bytes) == 0 {
- return nil, nil
- }
- var stub txGroupsEncodingStubOld
- _, err = stub.UnmarshalMsg(bytes)
- if err != nil {
- return nil, err
- }
- txnGroups = make([]pooldata.SignedTxGroup, len(stub.TxnGroups))
- for i := range stub.TxnGroups {
- txnGroups[i].Transactions = pooldata.SignedTxnSlice(stub.TxnGroups[i])
- }
- return txnGroups, nil
-}
-
-func TestTxnGroupEncodingSmall(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genesisHash := crypto.Hash([]byte("gh"))
- genesisID := "gID"
-
- inTxnGroups := []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- Transactions: []transactions.SignedTxn{
- {
- Txn: transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: basics.Address(crypto.Hash([]byte("2"))),
- Fee: basics.MicroAlgos{Raw: 100},
- GenesisHash: genesisHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: basics.Address(crypto.Hash([]byte("4"))),
- Amount: basics.MicroAlgos{Raw: 1000},
- },
- },
- Sig: crypto.Signature{1},
- },
- },
- },
- pooldata.SignedTxGroup{
- Transactions: []transactions.SignedTxn{
- {
- Txn: transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: basics.Address(crypto.Hash([]byte("1"))),
- Fee: basics.MicroAlgos{Raw: 100},
- GenesisHash: genesisHash,
- GenesisID: genesisID,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: basics.Address(crypto.Hash([]byte("2"))),
- Amount: basics.MicroAlgos{Raw: 1000},
- },
- },
- Sig: crypto.Signature{2},
- },
- {
- Txn: transactions.Transaction{
- Type: protocol.KeyRegistrationTx,
- Header: transactions.Header{
- Sender: basics.Address(crypto.Hash([]byte("1"))),
- GenesisHash: genesisHash,
- GenesisID: genesisID,
- },
- },
- Sig: crypto.Signature{3},
- },
- },
- },
- pooldata.SignedTxGroup{
- Transactions: []transactions.SignedTxn{
- {
- Txn: transactions.Transaction{
- Type: protocol.AssetConfigTx,
- Header: transactions.Header{
- Sender: basics.Address(crypto.Hash([]byte("1"))),
- Fee: basics.MicroAlgos{Raw: 100},
- GenesisHash: genesisHash,
- },
- },
- Sig: crypto.Signature{4},
- },
- {
- Txn: transactions.Transaction{
- Type: protocol.AssetFreezeTx,
- Header: transactions.Header{
- Sender: basics.Address(crypto.Hash([]byte("1"))),
- GenesisHash: genesisHash,
- },
- },
- Sig: crypto.Signature{5},
- },
- {
- Txn: transactions.Transaction{
- Type: protocol.CompactCertTx,
- Header: transactions.Header{
- Sender: basics.Address(crypto.Hash([]byte("1"))),
- GenesisHash: genesisHash,
- },
- },
- Msig: crypto.MultisigSig{Version: 1},
- },
- },
- },
- }
- err := addGroupHashes(inTxnGroups, 6, []byte{1})
- require.NoError(t, err)
- var s syncState
- ptg, err := s.encodeTransactionGroups(inTxnGroups, 1000000000)
- require.NoError(t, err)
- require.Equal(t, ptg.CompressionFormat, compressionFormatNone)
- out, err := decodeTransactionGroups(ptg, genesisID, genesisHash)
- require.NoError(t, err)
- require.ElementsMatch(t, inTxnGroups, out)
-}
-
-// txnGroupsData fetches a sample dataset of txns, specify numBlocks up to 969
-func txnGroupsData(numBlocks int) (txnGroups []pooldata.SignedTxGroup, genesisID string, genesisHash crypto.Digest, err error) {
- dat, err := ioutil.ReadFile("../test/testdata/mainnetblocks")
- if err != nil {
- return
- }
- dec := protocol.NewDecoderBytes(dat)
- blocksData := make([]rpcs.EncodedBlockCert, numBlocks)
- for i := 0; i < len(blocksData); i++ {
- err = dec.Decode(&blocksData[i])
- if err == io.EOF {
- break
- }
- if err != nil {
- return
- }
- }
-
- for _, blockData := range blocksData {
- block := blockData.Block
- genesisID = block.GenesisID()
- genesisHash = block.GenesisHash()
- var payset [][]transactions.SignedTxnWithAD
- payset, err = block.DecodePaysetGroups()
- if err != nil {
- return
- }
- for _, txns := range payset {
- var txnGroup pooldata.SignedTxGroup
- for _, txn := range txns {
- txnGroup.Transactions = append(txnGroup.Transactions, txn.SignedTxn)
- }
- txnGroups = append(txnGroups, txnGroup)
- }
- }
- return
-}
-
-func TestTxnGroupEncodingLarge(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- txnGroups, genesisID, genesisHash, err := txnGroupsData(969)
- require.NoError(t, err)
-
- var s syncState
- ptg, err := s.encodeTransactionGroups(txnGroups, 0)
- require.NoError(t, err)
- require.Equal(t, ptg.CompressionFormat, compressionFormatDeflate)
- out, err := decodeTransactionGroups(ptg, genesisID, genesisHash)
- require.NoError(t, err)
- require.ElementsMatch(t, txnGroups, out)
-
- encodedGroupsBytes := encodeTransactionGroupsOld(txnGroups)
- out, err = decodeTransactionGroupsOld(encodedGroupsBytes)
- require.NoError(t, err)
- require.ElementsMatch(t, txnGroups, out)
-
- // check dataset
- count := make(map[protocol.TxType]int)
- sigs := 0
- msigs := 0
- lsigs := 0
- for _, txg := range txnGroups {
- for _, txn := range txg.Transactions {
- count[txn.Txn.Type]++
- if !txn.Sig.MsgIsZero() {
- sigs++
- }
- if !txn.Msig.MsgIsZero() {
- msigs++
- }
- if !txn.Lsig.MsgIsZero() {
- lsigs++
- }
- }
- }
- require.Equal(t, 2, len(count))
- require.Equal(t, 18351, count["axfer"])
- require.Equal(t, 1663, count["pay"])
- require.Equal(t, 20005, sigs)
- require.Equal(t, 9, msigs)
- require.Equal(t, 0, lsigs)
-}
-
-func BenchmarkTxnGroupEncoding(b *testing.B) {
- txnGroups, _, _, err := txnGroupsData(4)
- require.NoError(b, err)
- var size int
- var s syncState
-
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- ptg, err := s.encodeTransactionGroups(txnGroups, 1000000000)
- require.NoError(b, err)
- size = len(ptg.Bytes)
- releaseEncodedTransactionGroups(ptg.Bytes)
- }
-
- b.ReportMetric(float64(size), "encodedDataBytes")
-}
-
-func BenchmarkTxnGroupCompression(b *testing.B) {
- txnGroups, _, _, err := txnGroupsData(4)
- require.NoError(b, err)
- var size int
- var s syncState
- ptg, err := s.encodeTransactionGroups(txnGroups, 1000000000)
- require.NoError(b, err)
- require.Equal(b, ptg.CompressionFormat, compressionFormatNone)
-
- b.ReportAllocs()
- b.ResetTimer()
- loopStartTime := time.Now()
- for i := 0; i < b.N; i++ {
- compressedGroupBytes, compressionFormat := s.compressTransactionGroupsBytes(ptg.Bytes)
- require.Equal(b, compressionFormat, compressionFormatDeflate)
- size = len(compressedGroupBytes)
- }
- loopDuration := time.Since(loopStartTime)
- b.StopTimer()
- b.ReportMetric(float64(len(ptg.Bytes)*b.N)/loopDuration.Seconds(), "estimatedGzipCompressionSpeed")
- b.ReportMetric(float64(len(ptg.Bytes)-size)/float64(len(ptg.Bytes)), "estimatedGzipCompressionGains")
-}
-
-func BenchmarkTxnGroupDecoding(b *testing.B) {
- txnGroups, genesisID, genesisHash, err := txnGroupsData(4)
- require.NoError(b, err)
-
- var s syncState
- ptg, err := s.encodeTransactionGroups(txnGroups, 1000000000)
- require.NoError(b, err)
- require.Equal(b, ptg.CompressionFormat, compressionFormatNone)
- require.NoError(b, err)
-
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err = decodeTransactionGroups(ptg, genesisID, genesisHash)
- require.NoError(b, err)
- }
-}
-
-func BenchmarkTxnGroupDecompression(b *testing.B) {
- txnGroups, _, _, err := txnGroupsData(4)
- require.NoError(b, err)
-
- var s syncState
- ptg, err := s.encodeTransactionGroups(txnGroups, 0)
- require.NoError(b, err)
- require.Equal(b, ptg.CompressionFormat, compressionFormatDeflate)
-
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err = decompressTransactionGroupsBytes(ptg.Bytes, ptg.LenDecompressedBytes)
- require.NoError(b, err)
- }
-}
-
-func BenchmarkTxnGroupEncodingOld(b *testing.B) {
- txnGroups, _, _, err := txnGroupsData(4)
- require.NoError(b, err)
- var encodedGroupsBytes []byte
-
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- encodedGroupsBytes = encodeTransactionGroupsOld(txnGroups)
- releaseEncodedTransactionGroups(encodedGroupsBytes)
- }
-
- b.ReportMetric(float64(len(encodedGroupsBytes)), "encodedDataBytes")
-}
-
-func BenchmarkTxnGroupDecodingOld(b *testing.B) {
- txnGroups, _, _, err := txnGroupsData(4)
- require.NoError(b, err)
-
- encodedGroupsBytes := encodeTransactionGroupsOld(txnGroups)
-
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err = decodeTransactionGroupsOld(encodedGroupsBytes)
- require.NoError(b, err)
- }
-}
-
-// TestTxnGroupEncodingReflection generates random
-// txns of each type using reflection
-func TestTxnGroupEncodingReflection(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- for i := 0; i < 10; i++ {
- v0, err := protocol.RandomizeObject(&transactions.SignedTxn{})
- require.NoError(t, err)
- stx, ok := v0.(*transactions.SignedTxn)
- require.True(t, ok)
-
- var txns []transactions.SignedTxn
- for _, txType := range protocol.TxnTypes {
- txn := *stx
- txn.Txn.PaymentTxnFields = transactions.PaymentTxnFields{}
- txn.Txn.KeyregTxnFields = transactions.KeyregTxnFields{}
- txn.Txn.AssetConfigTxnFields = transactions.AssetConfigTxnFields{}
- txn.Txn.AssetTransferTxnFields = transactions.AssetTransferTxnFields{}
- txn.Txn.AssetFreezeTxnFields = transactions.AssetFreezeTxnFields{}
- txn.Txn.ApplicationCallTxnFields = transactions.ApplicationCallTxnFields{}
- txn.Txn.CompactCertTxnFields = transactions.CompactCertTxnFields{}
- txn.Txn.Type = txType
- txn.Lsig.Logic = []byte("logic")
- switch i % 3 {
- case 0: // only have normal sig
- txn.Msig = crypto.MultisigSig{}
- txn.Lsig = transactions.LogicSig{}
- case 1: // only have multi sig
- txn.Sig = crypto.Signature{}
- txn.Lsig = transactions.LogicSig{}
- case 2: // only have logic sig
- txn.Msig = crypto.MultisigSig{}
- txn.Sig = crypto.Signature{}
- }
- switch txType {
- case protocol.UnknownTx:
- continue
- case protocol.PaymentTx:
- v0, err := protocol.RandomizeObject(&txn.Txn.PaymentTxnFields)
- require.NoError(t, err)
- PaymentTxnFields, ok := v0.(*transactions.PaymentTxnFields)
- require.True(t, ok)
- txn.Txn.PaymentTxnFields = *PaymentTxnFields
- case protocol.KeyRegistrationTx:
- v0, err := protocol.RandomizeObject(&txn.Txn.KeyregTxnFields)
- require.NoError(t, err)
- KeyregTxnFields, ok := v0.(*transactions.KeyregTxnFields)
- require.True(t, ok)
- txn.Txn.KeyregTxnFields = *KeyregTxnFields
- case protocol.AssetConfigTx:
- v0, err := protocol.RandomizeObject(&txn.Txn.AssetConfigTxnFields)
- require.NoError(t, err)
- AssetConfigTxnFields, ok := v0.(*transactions.AssetConfigTxnFields)
- require.True(t, ok)
- txn.Txn.AssetConfigTxnFields = *AssetConfigTxnFields
- case protocol.AssetTransferTx:
- v0, err := protocol.RandomizeObject(&txn.Txn.AssetTransferTxnFields)
- require.NoError(t, err)
- AssetTransferTxnFields, ok := v0.(*transactions.AssetTransferTxnFields)
- require.True(t, ok)
- txn.Txn.AssetTransferTxnFields = *AssetTransferTxnFields
- case protocol.AssetFreezeTx:
- v0, err := protocol.RandomizeObject(&txn.Txn.AssetFreezeTxnFields)
- require.NoError(t, err)
- AssetFreezeTxnFields, ok := v0.(*transactions.AssetFreezeTxnFields)
- require.True(t, ok)
- txn.Txn.AssetFreezeTxnFields = *AssetFreezeTxnFields
- case protocol.ApplicationCallTx:
- v0, err := protocol.RandomizeObject(&txn.Txn.ApplicationCallTxnFields)
- require.NoError(t, err)
- ApplicationCallTxnFields, ok := v0.(*transactions.ApplicationCallTxnFields)
- require.True(t, ok)
- txn.Txn.ApplicationCallTxnFields = *ApplicationCallTxnFields
- txn.Txn.ApplicationCallTxnFields.OnCompletion = 1
- case protocol.CompactCertTx:
- v0, err := protocol.RandomizeObject(&txn.Txn.CompactCertTxnFields)
- require.NoError(t, err)
- CompactCertTxnFields, ok := v0.(*transactions.CompactCertTxnFields)
- require.True(t, ok)
- txn.Txn.CompactCertTxnFields = *CompactCertTxnFields
- default:
- require.Fail(t, "unsupported txntype for txnsync msg encoding")
- }
- txn.Txn.Group = crypto.Digest{}
- txns = append(txns, txn)
- }
- txnGroups := []pooldata.SignedTxGroup{
- pooldata.SignedTxGroup{
- Transactions: txns,
- },
- }
- err = addGroupHashes(txnGroups, len(txns), []byte{1})
- require.NoError(t, err)
- var s syncState
- ptg, err := s.encodeTransactionGroups(txnGroups, 0)
- require.NoError(t, err)
- out, err := decodeTransactionGroups(ptg, stx.Txn.GenesisID, stx.Txn.GenesisHash)
- require.NoError(t, err)
- require.ElementsMatch(t, txnGroups, out)
- }
-}
-
-// pass in flag -db to specify db, start round, end round
-func TestTxnGroupEncodingArchival(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- if *blockDBFilename == "" {
- t.Skip("no archival node db was provided")
- }
- blockDBs, err := db.OpenPair(*blockDBFilename, false)
- require.NoError(t, err)
- for r := basics.Round(*startRound); r < basics.Round(*endRound); r++ {
- var block bookkeeping.Block
- err = blockDBs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- var buf []byte
- err = tx.QueryRow("SELECT blkdata FROM blocks WHERE rnd=?", r).Scan(&buf)
- if err != nil {
- if err == sql.ErrNoRows {
- err = ledgercore.ErrNoEntry{Round: r}
- }
- return err
- }
- return protocol.Decode(buf, &block)
- })
- require.NoError(t, err)
-
- var txnGroups []pooldata.SignedTxGroup
- genesisID := block.GenesisID()
- genesisHash := block.GenesisHash()
- var payset [][]transactions.SignedTxnWithAD
- payset, err := block.DecodePaysetGroups()
- require.NoError(t, err)
- for _, txns := range payset {
- var txnGroup pooldata.SignedTxGroup
- for _, txn := range txns {
- txnGroup.Transactions = append(txnGroup.Transactions, txn.SignedTxn)
- }
- txnGroups = append(txnGroups, txnGroup)
- }
-
- var s syncState
- ptg, err := s.encodeTransactionGroups(txnGroups, 0)
- require.NoError(t, err)
- out, err := decodeTransactionGroups(ptg, genesisID, genesisHash)
- require.NoError(t, err)
- require.ElementsMatch(t, txnGroups, out)
- }
-}
diff --git a/util/bloom/bloom.go b/util/bloom/bloom.go
index cee316e32..eeffc88ef 100644
--- a/util/bloom/bloom.go
+++ b/util/bloom/bloom.go
@@ -118,31 +118,22 @@ func BinaryMarshalLength(numElements int, falsePositiveRate float64) int64 {
return filterBytes + 8 // adding 8 to match 4 prefix array, plus 4 bytes for the numHashes uint32
}
-// UnmarshalBinary implements encoding.BinaryUnmarshaller interface
-func (f *Filter) UnmarshalBinary(data []byte) error {
+// UnmarshalBinary restores the state of the filter from raw data
+func UnmarshalBinary(data []byte) (*Filter, error) {
+ f := &Filter{}
if len(data) <= 8 {
- return errors.New("short data")
+ return nil, errors.New("short data")
}
f.numHashes = binary.BigEndian.Uint32(data[0:4])
if f.numHashes > maxHashes {
- return errors.New("too many hashes")
+ return nil, errors.New("too many hashes")
}
copy(f.prefix[:], data[4:8])
f.data = data[8:]
f.preimageStagingBuffer = make([]byte, len(f.prefix), len(f.prefix)+32)
f.hashStagingBuffer = make([]uint32, f.numHashes+3)
copy(f.preimageStagingBuffer, f.prefix[:])
- return nil
-}
-
-// UnmarshalBinary restores the state of the filter from raw data
-func UnmarshalBinary(data []byte) (*Filter, error) {
- f := &Filter{}
- err := f.UnmarshalBinary(data)
- if err != nil {
- f = nil
- }
- return f, err
+ return f, nil
}
// MarshalJSON defines how this filter should be encoded to JSON
diff --git a/util/bloom/bloom_test.go b/util/bloom/bloom_test.go
index 12799e6a3..2380b8d87 100644
--- a/util/bloom/bloom_test.go
+++ b/util/bloom/bloom_test.go
@@ -56,7 +56,7 @@ func TestOptimal(t *testing.T) {
numFP := []int{100, 25, 5}
if testing.Short() {
- numElementsCases = []int{2000, 20000}
+ numElementsCases = []int{2000, 200000}
fpRateCases = []float64{0.001, 0.00001}
numFP = []int{100, 25}
}
@@ -99,7 +99,6 @@ func closeEnough(a, b, maxerr float64) (bool, float64) {
}
// based on "github.com/willf/bloom"
-// Changes here might need to be replicated to xor_test.go estimateFalsePositiveRateXor()
func (f *Filter) estimateFalsePositiveRate(numAdded uint32, numFP int) float64 {
x := make([]byte, 4)
for i := uint32(0); i < numAdded; i++ {
@@ -183,21 +182,18 @@ func TestMarshalJSON(t *testing.T) {
}
}
-const largeFilterElements = 150000
-
-// BenchmarkCreateLargeBloomFilter should have the same structure as xor_test.go BenchmarkCreateLargeXorFilter
-func BenchmarkCreateLargeBloomFilter(b *testing.B) {
+func BenchmarkCreateLargeFilter(b *testing.B) {
// dialing mu=25000; 3 servers; so each mailbox is 75000 real and 75000 noise
// for a total of 150000 elements in the dialing bloom filter
+ numElements := 150000
for i := 0; i < b.N; i++ {
- numBits, numHashes := Optimal(largeFilterElements, 1e-10)
+ numBits, numHashes := Optimal(numElements, 1e-10)
f := New(numBits, numHashes, 1234)
x := make([]byte, 4)
- for i := uint32(0); i < uint32(largeFilterElements); i++ {
+ for i := uint32(0); i < uint32(numElements); i++ {
binary.BigEndian.PutUint32(x, i)
f.Set(x)
}
- f.MarshalBinary()
}
}
@@ -355,26 +351,24 @@ func BenchmarkBloomFilterSet(b *testing.B) {
}
}
-const filterTestElements = 1000000
-
-// See also BenchmarkXorFilterTest
func BenchmarkBloomFilterTest(b *testing.B) {
- sizeBits, numHashes := Optimal(filterTestElements, 0.01)
+ bfElements := 1000000
+ sizeBits, numHashes := Optimal(bfElements, 0.01)
prefix := uint32(0)
bf := New(sizeBits, numHashes, prefix)
- dataset := make([][]byte, filterTestElements)
- for n := 0; n < filterTestElements; n++ {
+ dataset := make([][]byte, bfElements)
+ for n := 0; n < bfElements; n++ {
hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)})
dataset[n] = hash[:]
}
// set half of them.
- for n := 0; n < filterTestElements/2; n++ {
+ for n := 0; n < bfElements/2; n++ {
bf.Set(dataset[n])
}
b.ResetTimer()
for x := 0; x < b.N; x++ {
- bf.Test(dataset[x%filterTestElements])
+ bf.Test(dataset[x%bfElements])
}
}
diff --git a/util/bloom/generic.go b/util/bloom/generic.go
deleted file mode 100644
index 991edb5aa..000000000
--- a/util/bloom/generic.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package bloom
-
-// GenericFilter is the interface for either bloom.Filter or bloom.XorFilter
-type GenericFilter interface {
- // The input x is expected to be a slice with a length of 8 bytes or more.
- Set(x []byte)
- // The input x is expected to be a slice with a length of 8 bytes or more.
- Test(x []byte) bool
- MarshalBinary() ([]byte, error)
- UnmarshalBinary(data []byte) error
-}
diff --git a/util/bloom/xor.go b/util/bloom/xor.go
deleted file mode 100644
index b79dbfa5f..000000000
--- a/util/bloom/xor.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package bloom
-
-import (
- "encoding/binary"
- "encoding/json"
- "errors"
-
- "github.com/algorand/xorfilter"
-)
-
-// XorBuilder is a local alias for xorfilter.Builder
-type XorBuilder = xorfilter.Builder
-
-// XorFilter is a faster more efficient alternative to a Bloom filter
-// An XorFilter object can be used as is or with optional adittional setup.
-type XorFilter struct {
- xor *xorfilter.Xor32
- holding []uint64
-
- b *XorBuilder
-}
-
-// NewXor returns an XorFilter with an internal map created with a size hint and an optional *XorBuilder (may be nil)
-// The Builder is not thread safe and should only be used by one thread at a time.
-func NewXor(hint int, builder *XorBuilder) *XorFilter {
- return &XorFilter{
- b: builder,
- holding: make([]uint64, 0, hint),
- }
-}
-
-// Set adds the value to the filter.
-func (xf *XorFilter) Set(x []byte) {
- k := binary.BigEndian.Uint64(x)
- xf.holding = append(xf.holding, k)
-}
-
-// Test checks whether x is present in the filter.
-// May return (rare) erroneous true values, but false is precise.
-func (xf *XorFilter) Test(x []byte) bool {
- k := binary.BigEndian.Uint64(x)
- if xf.xor != nil {
- return xf.xor.Contains(k)
- }
- return false
-}
-
-const sizeofInt32 = 4
-
-// MarshalBinary implements encoding.BinaryMarshaller interface
-func (xf *XorFilter) MarshalBinary() ([]byte, error) {
- if len(xf.holding) != 0 {
- var err error
- if xf.b != nil {
- xf.xor, err = xf.b.Populate32(xf.holding)
- } else {
- xf.xor, err = xorfilter.Populate32(xf.holding)
- }
- if err != nil {
- return nil, err
- }
- }
- if xf.xor == nil || (len(xf.xor.Fingerprints) == 0) {
- // TODO: some other encoding for empty set?
- return nil, nil
- }
- out := make([]byte, binary.MaxVarintLen64+binary.MaxVarintLen32+binary.MaxVarintLen32+(len(xf.xor.Fingerprints)*sizeofInt32))
- pos := 0
- pos += binary.PutUvarint(out[pos:], xf.xor.Seed)
- pos += binary.PutUvarint(out[pos:], uint64(xf.xor.BlockLength))
- pos += binary.PutUvarint(out[pos:], uint64(len(xf.xor.Fingerprints)))
- for _, v := range xf.xor.Fingerprints {
- binary.LittleEndian.PutUint32(out[pos:], v)
- pos += sizeofInt32
- }
- out = out[:pos]
- return out, nil
-}
-
-// ErrBadBinary is returned when UnmarshalBinary fails
-var ErrBadBinary = errors.New("bad XorFilter binary")
-
-// TODO: make this an option to UnmarshalBinary, or a settable global, ...
-const maxFingerprints = 1000000
-
-// UnmarshalBinary implements encoding.BinaryUnmarshaller interface
-func (xf *XorFilter) UnmarshalBinary(data []byte) error {
- pos := 0
- var dp int
- xor := new(xorfilter.Xor32)
- xor.Seed, dp = binary.Uvarint(data[pos:])
- if dp <= 0 {
- return ErrBadBinary
- }
- pos += dp
- blockLength, dp := binary.Uvarint(data[pos:])
- if dp <= 0 {
- return ErrBadBinary
- }
- xor.BlockLength = uint32(blockLength)
- pos += dp
- lenFingerprints, dp := binary.Uvarint(data[pos:])
- if dp <= 0 {
- return ErrBadBinary
- }
- pos += dp
- if lenFingerprints > 0 {
- if lenFingerprints > maxFingerprints {
- return ErrBadBinary
- }
- if (lenFingerprints * sizeofInt32) > uint64(len(data)-pos) {
- return ErrBadBinary
- }
- xor.Fingerprints = make([]uint32, lenFingerprints)
- for i := 0; i < int(lenFingerprints); i++ {
- xor.Fingerprints[i] = binary.LittleEndian.Uint32(data[pos:])
- pos += sizeofInt32
- }
- xf.xor = xor
- } else {
- xf.xor = nil
- }
- return nil
-}
-
-// MarshalJSON implements encoding/json.Marshaller interface
-func (xf *XorFilter) MarshalJSON() ([]byte, error) {
- data, err := xf.MarshalBinary()
- if err != nil {
- return nil, err
- }
- return json.Marshal(data)
-}
-
-// UnmarshalJSON implements encoding/json.Unmarshaler interface
-func (xf *XorFilter) UnmarshalJSON(data []byte) error {
- var blob []byte
- err := json.Unmarshal(data, &blob)
- if err != nil {
- return err
- }
- return xf.UnmarshalBinary(blob)
-}
-
-// XorFilter8 is a faster more efficient alternative to a Bloom filter
-// An XorFilter8 object can be used as is or with optional adittional setup.
-// XorFilter8 uses 1/4 the space of XorFilter (32 bit)
-type XorFilter8 struct {
- xor *xorfilter.Xor8
- holding []uint64
-
- b *XorBuilder
-}
-
-// NewXor8 returns an XorFilter8 with an internal map created with a size hint and an optional *XorBuilder (may be nil)
-// The Builder is not thread safe and should only be used by one thread at a time.
-func NewXor8(hint int, builder *XorBuilder) *XorFilter8 {
- return &XorFilter8{
- b: builder,
- }
-}
-
-// Set adds the value to the filter.
-func (xf *XorFilter8) Set(x []byte) {
- k := binary.BigEndian.Uint64(x)
- xf.holding = append(xf.holding, k)
-}
-
-// Test checks whether x is present in the filter.
-// May return (rare) erroneous true values, but false is precise.
-func (xf *XorFilter8) Test(x []byte) bool {
- k := binary.BigEndian.Uint64(x)
- if xf.xor != nil {
- return xf.xor.Contains(k)
- }
- return false
-}
-
-// MarshalBinary implements encoding.BinaryMarshaller interface
-func (xf *XorFilter8) MarshalBinary() ([]byte, error) {
- if len(xf.holding) != 0 {
- var err error
- if xf.b != nil {
- xf.xor, err = xf.b.Populate(xf.holding)
- } else {
- xf.xor, err = xorfilter.Populate(xf.holding)
- }
- if err != nil {
- return nil, err
- }
- }
- if xf.xor == nil || (len(xf.xor.Fingerprints) == 0) {
- // TODO: some other encoding for empty set?
- return nil, nil
- }
- out := make([]byte, binary.MaxVarintLen64+binary.MaxVarintLen32+binary.MaxVarintLen32+(len(xf.xor.Fingerprints)))
- pos := 0
- pos += binary.PutUvarint(out[pos:], xf.xor.Seed)
- pos += binary.PutUvarint(out[pos:], uint64(xf.xor.BlockLength))
- pos += binary.PutUvarint(out[pos:], uint64(len(xf.xor.Fingerprints)))
- copy(out[pos:], xf.xor.Fingerprints)
- pos += len(xf.xor.Fingerprints)
- out = out[:pos]
- return out, nil
-}
-
-// UnmarshalBinary implements encoding.BinaryUnmarshaller interface
-func (xf *XorFilter8) UnmarshalBinary(data []byte) error {
- pos := 0
- var dp int
- xor := new(xorfilter.Xor8)
- xor.Seed, dp = binary.Uvarint(data[pos:])
- if dp <= 0 {
- return ErrBadBinary
- }
- pos += dp
- blockLength, dp := binary.Uvarint(data[pos:])
- if dp <= 0 {
- return ErrBadBinary
- }
- xor.BlockLength = uint32(blockLength)
- pos += dp
- lenFingerprints, dp := binary.Uvarint(data[pos:])
- if dp <= 0 {
- return ErrBadBinary
- }
- pos += dp
- if lenFingerprints > 0 {
- if lenFingerprints > maxFingerprints {
- return ErrBadBinary
- }
- if lenFingerprints > uint64(len(data)-pos) {
- return ErrBadBinary
- }
- xor.Fingerprints = make([]byte, lenFingerprints)
- copy(xor.Fingerprints, data[pos:])
- xf.xor = xor
- } else {
- xf.xor = nil
- }
- return nil
-}
-
-// MarshalJSON implements encoding/json.Marshaller interface
-func (xf *XorFilter8) MarshalJSON() ([]byte, error) {
- data, err := xf.MarshalBinary()
- if err != nil {
- return nil, err
- }
- return json.Marshal(data)
-}
-
-// UnmarshalJSON implements encoding/json.Unmarshaler interface
-func (xf *XorFilter8) UnmarshalJSON(data []byte) error {
- var blob []byte
- err := json.Unmarshal(data, &blob)
- if err != nil {
- return err
- }
- return xf.UnmarshalBinary(blob)
-}
diff --git a/util/bloom/xor_test.go b/util/bloom/xor_test.go
deleted file mode 100644
index 0ad839bc7..000000000
--- a/util/bloom/xor_test.go
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package bloom
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "math/rand"
- "runtime"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/xorfilter"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-func TestXorBloom(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- numElementsCases := []int{2000, 20000, 200000}
- fpRateCases := []float64{0.0042} //, 0.00001, 0.0000001}
- numFP := []int{100, 25, 5}
- if testing.Short() {
- numElementsCases = []int{2000, 20000}
- numFP = []int{100, 25}
- }
- for _, numElements := range numElementsCases {
- for i, fpRate := range fpRateCases {
- actualRate := estimateFalsePositiveRateXor(t, numElements, numFP[i])
- if actualRate < fpRate {
- t.Logf("\tOK: numElements=%v want %v, got %v", numElements, fpRate, actualRate)
- continue
- }
-
- t.Errorf("numElements=%v want %v, got %v", numElements, fpRate, actualRate)
- }
- }
-}
-
-// like bloom_test.go estimateFalsePositiveRate()
-// based on "github.com/willf/bloom"
-func estimateFalsePositiveRateXor(t *testing.T, numAdded int, numFP int) float64 {
- var xf XorFilter
- maxDuration := 5 * time.Second
- if testing.Short() {
- maxDuration = 100 * time.Millisecond
- }
- x := make([]byte, 8)
- for i := 0; i < numAdded; i++ {
- binary.BigEndian.PutUint32(x, uint32(i))
- xf.Set(x)
- }
-
- xord, err := xf.MarshalBinary()
- require.NoError(t, err)
- var nxf XorFilter8
- err = nxf.UnmarshalBinary(xord)
- require.NoError(t, err)
-
- start := time.Now()
- falsePositives := 0
- numRounds := 0
- for i := 0; falsePositives < numFP; i++ {
- binary.BigEndian.PutUint32(x, uint32(numAdded+i+1))
- if nxf.Test(x) {
- falsePositives++
- }
- numRounds++
- if numRounds%10000 == 0 {
- dt := time.Now().Sub(start)
- if dt > maxDuration {
- t.Logf("t %s > max duration %s without finding false positive rate", dt, maxDuration)
- break
- }
- }
- }
-
- return float64(falsePositives) / float64(numRounds)
-}
-
-func TestByte32FalsePositive(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- t.Parallel()
- var filterSizes = []int{1000, 5000, 10000, 50000, 100000}
- for _, filterSetSize := range filterSizes {
- //const filterSetSize = 100000
- txids := make([][]byte, filterSetSize)
- store := make([]byte, 32*filterSetSize)
- rand.Read(store)
- for i := 0; i < filterSetSize; i++ {
- txids[i] = store[i*32 : (i+1)*32]
- }
-
- notIn := func(t []byte) bool {
- for _, v := range txids {
- if bytes.Equal(t, v) {
- return false
- }
- }
- return true
- }
-
- var xf XorFilter
-
- fpRate := 0.01
- //fpRate := 0.004
- numBits, numHashes := Optimal(filterSetSize, fpRate)
- bf := New(numBits, numHashes, 0x12345678)
-
- for _, v := range txids {
- xf.Set(v)
- bf.Set(v)
- }
-
- xord, err := xf.MarshalBinary()
- require.NoError(t, err)
- var nxf XorFilter
- err = nxf.UnmarshalBinary(xord)
- require.NoError(t, err)
-
- bloomData, err := bf.MarshalBinary()
- require.NoError(t, err)
-
- t.Logf("filter for %d * [32]byte, bloom %d bytes, xor8 %d bytes",
- filterSetSize, len(bloomData), len(xord))
-
- xfalsePositives := 0
- bfalsePositives := 0
- const testN = 100000
- var tt [32]byte
- for i := 0; i < testN; i++ {
- rand.Read(tt[:])
- xhit := nxf.Test(tt[:])
- bhit := bf.Test(tt[:])
- if xhit || bhit {
- falsePositive := notIn(tt[:])
- if xhit && falsePositive {
- xfalsePositives++
- }
- if bhit && falsePositive {
- bfalsePositives++
- }
- }
- }
-
- t.Logf("false positives bloom %d/%d, xor %d/%d", bfalsePositives, testN, xfalsePositives, testN)
- bfp := float64(bfalsePositives) / float64(testN)
- xfp := float64(xfalsePositives) / float64(testN)
- if bfp > (fpRate * 1.2) {
- t.Errorf("bloom false positive too high: %f", bfp)
- }
- if xfp > (fpRate * 1.2) {
- t.Errorf("xor false positive too high: %f", xfp)
- }
- }
-}
-
-type GenericFilterFactory func() GenericFilter
-
-func memTestFilter(t *testing.T, filterFactory GenericFilterFactory, filterSetSize int) {
- // setup
- txids := make([][]byte, filterSetSize)
- store := make([]byte, 32*filterSetSize)
- rand.Read(store)
- for i := 0; i < filterSetSize; i++ {
- txids[i] = store[i*32 : (i+1)*32]
- }
- runtime.GC()
-
- var memAfterSetup runtime.MemStats
- runtime.ReadMemStats(&memAfterSetup)
-
- f := filterFactory()
- for _, v := range txids {
- f.Set(v)
- }
- data, err := f.MarshalBinary()
- require.NoError(t, err)
-
- var memAfterSerialize runtime.MemStats
- runtime.ReadMemStats(&memAfterSerialize)
-
- nf := filterFactory()
- err = nf.UnmarshalBinary(data)
- require.NoError(t, err)
-
- var memAfterDeserialize runtime.MemStats
- runtime.ReadMemStats(&memAfterDeserialize)
-
- t.Logf("build mem[%d]: %s", filterSetSize, memDelta(&memAfterSetup, &memAfterSerialize))
- t.Logf("load mem[%d]: %s", filterSetSize, memDelta(&memAfterSerialize, &memAfterDeserialize))
-}
-
-func memDelta(a, b *runtime.MemStats) string {
- dMallocs := b.Mallocs - a.Mallocs
- dFrees := b.Frees - a.Frees
- dAllocated := b.HeapAlloc - a.HeapAlloc
- return fmt.Sprintf("%d mallocs, %d frees, %d bytes allocated", dMallocs, dFrees, dAllocated)
-}
-
-func TestMemXor(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- t.Parallel()
- var xb xorfilter.Builder
- xff := func() GenericFilter {
- xf := NewXor(5000, &xb)
- return xf
- }
- memTestFilter(t, xff, 5000)
- memTestFilter(t, xff, 5000)
-}
-
-func TestMemBloom(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- t.Parallel()
- fpRate := 0.004
- filterSetSize := 5000
- numBits, numHashes := Optimal(filterSetSize, fpRate)
- bff := func() GenericFilter {
- return New(numBits, numHashes, 0x12345678)
- }
- memTestFilter(t, bff, filterSetSize)
-}
-
-// TestFilterSize tests different sizes of inputs against xor8 and xor32 and check
-// that the generated marshaled byte representation aligns with the expected size.
-func TestFilterSize(t *testing.T) {
- partitiontest.PartitionTest(t)
- var builder XorBuilder
- for size := 1000; size < 50000; size = ((size + size/2) / 100) * 100 {
- xor := NewXor(size, &builder)
- for i := 0; i < size; i++ {
- digest := crypto.Hash([]byte{byte(i), byte(i >> 8), byte(i >> 16)})
- xor.Set(digest[:])
- }
- out, err := xor.MarshalBinary()
- require.NoError(t, err)
- bytesElement := float32(len(out)) / float32(size)
- fmt.Printf("Xor32 filter for %d elements takes %d bytes, %f bytes/element\n", size, len(out), bytesElement)
- require.GreaterOrEqual(t, bytesElement, float32(4.9))
- require.LessOrEqual(t, bytesElement, float32(5.1))
- }
- for size := 1000; size < 50000; size = ((size + size/2) / 100) * 100 {
- xor := NewXor8(size, &builder)
- for i := 0; i < size; i++ {
- digest := crypto.Hash([]byte{byte(i), byte(i >> 8), byte(i >> 16)})
- xor.Set(digest[:])
- }
- out, err := xor.MarshalBinary()
- require.NoError(t, err)
- bytesElement := float32(len(out)) / float32(size)
- fmt.Printf("Xor8 filter for %d elements takes %d bytes, %f bytes/element\n", size, len(out), bytesElement)
- require.GreaterOrEqual(t, bytesElement, float32(1.23))
- require.LessOrEqual(t, bytesElement, float32(1.28))
- }
-}
-
-// BenchmarkCreateLargeXorFilter should have the same structure as bloom_test.go BenchmarkCreateLargeBloomFilter
-func BenchmarkCreateLargeXorFilter(b *testing.B) {
- // dialing mu=25000; 3 servers; so each mailbox is 75000 real and 75000 noise
- // for a total of 150000 elements in the dialing bloom filter
- var xb xorfilter.Builder
- for i := 0; i < b.N; i++ {
- xf := NewXor(largeFilterElements, &xb)
- x := make([]byte, 8)
- for i := uint32(0); i < uint32(largeFilterElements); i++ {
- binary.BigEndian.PutUint32(x, i)
- xf.Set(x)
- }
- xf.MarshalBinary()
- }
-}
-
-// See Also BenchmarkBloomFilterTest
-func BenchmarkXorFilterTest(b *testing.B) {
- // sizeBits, numHashes := Optimal(filterTestElements, 0.01)
- // prefix := uint32(0)
- // bf := New(sizeBits, numHashes, prefix)
- var xf XorFilter
- dataset := make([][]byte, filterTestElements)
- for n := 0; n < filterTestElements; n++ {
- hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)})
- dataset[n] = hash[:]
- }
- // set half of them.
- for n := 0; n < filterTestElements/2; n++ {
- xf.Set(dataset[n])
- }
-
- xord, err := xf.MarshalBinary()
- require.NoError(b, err)
- var nxf XorFilter
- err = nxf.UnmarshalBinary(xord)
- require.NoError(b, err)
-
- b.ResetTimer()
- for x := 0; x < b.N; x++ {
- nxf.Test(dataset[x%filterTestElements])
- }
-}
diff --git a/util/compress/deflate.go b/util/compress/deflate.go
deleted file mode 100644
index 188885b5a..000000000
--- a/util/compress/deflate.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package compress
-
-// #cgo CFLAGS: -Wall -std=c99 -I${SRCDIR}/libdeflate
-// #cgo amd64 CFLAGS: -DX86 -D__x86_64__ -D__i386__
-// #cgo arm64 CFLAGS: -DARM
-// #cgo arm CFLAGS: -DARM
-// #cgo linux,amd64 CFLAGS: -march=sandybridge
-// #cgo darwin,amd64 CFLAGS: -march=tremont
-// #include <stdint.h>
-// int isNull(void * c) {
-// if(!c) {
-// return 1;
-// }
-// return 0;
-// };
-//
-// #ifdef X86
-// #include "lib/x86/cpu_features.c"
-// #endif
-// #ifdef ARM
-// #include "lib/arm/cpu_features.c"
-// #endif
-// #define dispatch crc32_dispatch
-// #include "lib/crc32.c"
-// #undef dispatch
-// #define dispatch compress_dispatch
-// #define bitbuf_t compress_bitbuf_t
-// #include "lib/deflate_compress.c"
-// #undef bitbuf_t
-// #undef dispatch
-// #undef BITBUF_NBITS
-// #include "lib/deflate_decompress.c"
-// #include "lib/gzip_compress.c"
-// #include "lib/gzip_decompress.c"
-// #include "lib/utils.c"
-import "C"
-
-import (
- "errors"
- "runtime"
- "unsafe"
-)
-
-var (
- // ErrOutOfMemory is returned when we fail to allocate the compressor/decompressor
- ErrOutOfMemory = errors.New("out of memory")
- // ErrShortBuffer is returned when the output buffer is too small to fit the compressed/decompressed data
- ErrShortBuffer = errors.New("short buffer")
- // ErrNoInput is returned when no input buffer data is provided
- ErrNoInput = errors.New("empty input")
- // ErrBadData is returned when the compressed data is corrupted
- ErrBadData = errors.New("data was corrupted, invalid or unsupported")
- // ErrInsufficientSpace is returned when the provided output buffer is found to be too small during the decompression
- ErrInsufficientSpace = errors.New("decompression failed: buffer too short. Retry with larger buffer")
- // ErrShortOutput should not be generated using the current libdeflate usage, but remain here for compatibility
- ErrShortOutput = errors.New("buffer too long: decompressed to fewer bytes than expected, indicating possible error in decompression. Make sure your out buffer has the exact length of the decompressed data or pass nil for out")
- // ErrPartiallyConsumedData is returned if only a subset of the input data was consumed during the decompression
- ErrPartiallyConsumedData = errors.New("partially consumed data")
-
- // ErrUnknown returned when the libdeflate returns unexpected enum error
- ErrUnknown = errors.New("unknown error code from decompressor library")
-)
-
-// Compress the input buffer into the output buffer.
-func Compress(in, out []byte, compressLevel int) (int, []byte, error) {
- if len(in) == 0 {
- return 0, out, ErrNoInput
- }
- if cap(out) == 0 {
- return 0, out, ErrShortBuffer
- }
-
- if compressLevel < 1 {
- compressLevel = 1
- } else if compressLevel > 12 {
- compressLevel = 12
- }
-
- c := C.libdeflate_alloc_compressor(C.int(compressLevel))
- if C.isNull(unsafe.Pointer(c)) == 1 {
- return 0, out, ErrOutOfMemory
- }
- defer func() {
- C.libdeflate_free_compressor(c)
- }()
- inAddr := startMemAddr(in)
- outAddr := startMemAddr(out)
-
- written := int(C.libdeflate_gzip_compress(c, unsafe.Pointer(inAddr), C.size_t(len(in)), unsafe.Pointer(outAddr), C.size_t(cap(out))))
-
- if written == 0 {
- return written, out, ErrShortBuffer
- }
- return written, out[:written], nil
-}
-
-// Decompress decompresses the input buffer data into the output buffer.
-func Decompress(in, out []byte) ([]byte, error) {
- if len(in) == 0 {
- return out, ErrNoInput
- }
- if cap(out) == 0 {
- return out, ErrShortBuffer
- }
- dc := C.libdeflate_alloc_decompressor()
- if C.isNull(unsafe.Pointer(dc)) == 1 {
- return out, ErrOutOfMemory
- }
- defer func() {
- C.libdeflate_free_decompressor(dc)
- }()
-
- inAddr := startMemAddr(in)
- outAddr := startMemAddr(out)
-
- var actualInBytes C.size_t
- var actualOutBytes C.size_t
- r := C.libdeflate_gzip_decompress_ex(dc, unsafe.Pointer(inAddr), C.size_t(len(in)), unsafe.Pointer(outAddr), C.size_t(cap(out)), &actualInBytes, &actualOutBytes)
-
- runtime.KeepAlive(&actualInBytes)
- runtime.KeepAlive(&actualOutBytes)
- switch r {
- case C.LIBDEFLATE_SUCCESS:
- if actualInBytes != C.size_t(len(in)) {
- // return an error if not all the data was consumed.
- return out, ErrPartiallyConsumedData
- }
- return out[:actualOutBytes], nil
- case C.LIBDEFLATE_BAD_DATA:
- return out, ErrBadData
- case C.LIBDEFLATE_SHORT_OUTPUT:
- return out, ErrShortOutput
- case C.LIBDEFLATE_INSUFFICIENT_SPACE:
- return out, ErrInsufficientSpace
- default:
- return out, ErrUnknown
- }
-}
-
-func startMemAddr(b []byte) *byte {
- if len(b) > 0 {
- return &b[0]
- }
-
- b = append(b, 0)
- ptr := &b[0]
-
- return ptr
-}
-
-func init() {
- // initialize dispatch tables. This is important since we want to avoid race conditions when running the dispatch over multiple cores.
- decompressedBuffer := []byte{1, 2, 3, 4}
- compressedBuffer := make([]byte, 128)
- _, compressedOutput, _ := Compress(decompressedBuffer, compressedBuffer, 9)
- decompressedBuffer = make([]byte, 128)
- Decompress(compressedOutput, decompressedBuffer) //nolint:errcheck // static call that won't fail
-}
diff --git a/util/compress/deflate_test.go b/util/compress/deflate_test.go
deleted file mode 100644
index cf16e72a1..000000000
--- a/util/compress/deflate_test.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package compress
-
-import (
- "bytes"
- "compress/gzip"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-func TestTrivialCompression(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- bufLen := 10240
- buffer := make([]byte, bufLen)
- for i := range buffer {
- buffer[i] = byte(i % 256)
- }
-
- compressedBuffer := make([]byte, 0, bufLen)
- len, compressedOutput, err := Compress(buffer, compressedBuffer, 9)
- require.NoError(t, err)
- require.NotZero(t, len)
- require.Equal(t, compressedBuffer[:len], compressedOutput)
-
- decompressedBuffer := make([]byte, 0, bufLen)
- decompressedOutput, err := Decompress(compressedOutput, decompressedBuffer)
- require.NoError(t, err)
- require.Equal(t, decompressedOutput, buffer)
-}
-
-func BenchmarkCompression(b *testing.B) {
- bufLen := 1024000
- buffer := make([]byte, bufLen)
- outBuffer := make([]byte, 0, bufLen)
- for i := range buffer {
- buffer[i] = byte(i % 256)
- }
- var targetLength int
- b.Run("compress/gzip", func(b *testing.B) {
- for k := 0; k < b.N; k++ {
- outBuffer := bytes.NewBuffer(outBuffer)
- writer := gzip.NewWriter(outBuffer)
- writer.Write(buffer)
- writer.Close()
- targetLength = outBuffer.Len()
- }
- })
- // figure out desired compression level.
- compressionLevel := 1
- for {
- len, _, _ := Compress(buffer, outBuffer, compressionLevel)
- if len <= targetLength+128 || compressionLevel > 11 {
- break
- }
- compressionLevel++
- }
- b.Run("deflateCompression", func(b *testing.B) {
- for k := 0; k < b.N; k++ {
- Compress(buffer, outBuffer[:cap(outBuffer)], compressionLevel)
- }
- })
-}
-
-func BenchmarkDecompression(b *testing.B) {
- bufLen := 1024000
- decompressedBuffer := make([]byte, bufLen)
- for i := range decompressedBuffer {
- decompressedBuffer[i] = byte(i % 256)
- }
-
- // create the compress/gzip compressed buffer.
- gzipCompressedBuffer := bytes.NewBuffer([]byte{})
- writer := gzip.NewWriter(gzipCompressedBuffer)
- writer.Write(decompressedBuffer)
- writer.Close()
- gzipCompressedBytes := gzipCompressedBuffer.Bytes()
-
- // create the deflate compressed buffer.
- deflateCompressedBuffer := make([]byte, 0, bufLen)
- _, deflateCompressedBuffer, _ = Compress(decompressedBuffer, deflateCompressedBuffer, 1)
-
- b.Run("compress/gzip", func(b *testing.B) {
- for k := 0; k < b.N; k++ {
- stage := make([]byte, 1024)
- reader, err := gzip.NewReader(bytes.NewBuffer(gzipCompressedBytes))
- require.NoError(b, err)
- for {
- n, err := reader.Read(stage[:])
- if n == 0 || err != nil {
- break
- }
- }
- reader.Close()
- gzipCompressedBuffer.Reset()
- }
- })
-
- b.Run("deflateCompression", func(b *testing.B) {
- outBuffer := make([]byte, 0, bufLen)
- for k := 0; k < b.N; k++ {
- Decompress(deflateCompressedBuffer, outBuffer[:cap(outBuffer)])
- }
- })
-}
diff --git a/util/compress/libdeflate/.cirrus.yml b/util/compress/libdeflate/.cirrus.yml
deleted file mode 100644
index 602f0a68a..000000000
--- a/util/compress/libdeflate/.cirrus.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-task:
- freebsd_instance:
- matrix:
- - image_family: freebsd-11-3-snap
- - image_family: freebsd-12-1-snap
- install_script: pkg install -y gmake
- script:
- - gmake check
diff --git a/util/compress/libdeflate/.github/workflows/ci.yml b/util/compress/libdeflate/.github/workflows/ci.yml
deleted file mode 100644
index ff7cfdbb4..000000000
--- a/util/compress/libdeflate/.github/workflows/ci.yml
+++ /dev/null
@@ -1,123 +0,0 @@
-name: CI
-on: [pull_request]
-env:
- CFLAGS: -Werror
-
-jobs:
- x86_64-build-and-test:
- name: Build and test (x86_64, ${{ matrix.os }}, ${{ matrix.compiler }})
- strategy:
- matrix:
- os: [ubuntu-20.04, ubuntu-18.04, ubuntu-16.04]
- compiler: [gcc, clang]
- exclude:
- # clang 3.8.0-2ubuntu4 crashes with:
- # "fatal error: error in backend: Cannot select: 0x21025a0: v64i8 = X86ISD::VBROADCAST 0x2101fb0"
- - os: ubuntu-16.04
- compiler: clang
- runs-on: ${{ matrix.os }}
- env:
- CC: ${{ matrix.compiler }}
- steps:
- - uses: actions/checkout@v2
- - name: Install dependencies
- run: |
- sudo apt-get update
- sudo apt-get install -y clang llvm libz-dev valgrind
- - run: scripts/run_tests.sh
-
- other-arch-build-and-test:
- name: Build and test (${{ matrix.arch }}, Debian Buster, ${{ matrix.compiler }})
- strategy:
- matrix:
- arch: [armv6, armv7, aarch64, s390x, ppc64le]
- compiler: [gcc, clang]
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- - uses: uraimo/run-on-arch-action@v2.0.5
- with:
- arch: ${{ matrix.arch }}
- distro: buster
- githubToken: ${{ github.token }}
- install: |
- apt-get update
- apt-get install -y build-essential clang llvm libz-dev valgrind
- run: |
- # Valgrind and ASAN crash on at least s390x, ppc64le, and aarch64
- # here. (It's probably something related to the QEMU user-mode
- # emulation that run-on-arch-action uses.)
- export SKIP_VALGRIND=1
- export SKIP_ASAN=1
-
- case ${{ matrix.arch }} in
- s390x)
- # On s390x, in freestanding builds the shared library links to an
- # external symbol __clzdi2, even when -static-libgcc is used.
- export SKIP_FREESTANDING=1
- ;;
- aarch64)
- # "ldd: exited with unknown exit code (139)"
- if [ ${{ matrix.compiler }} = clang ]; then
- export SKIP_SHARED_LIB=1
- fi
- ;;
- esac
-
- export CC=${{ matrix.compiler }}
- scripts/run_tests.sh
-
- macos-build-and-test:
- name: Build and test (macOS)
- runs-on: macos-latest
- steps:
- - uses: actions/checkout@v2
- - run: make all check
-
- windows-build-and-test:
- name: Build and test (Windows)
- runs-on: windows-latest
- steps:
- - uses: actions/checkout@v2
- - shell: bash
- run: |
- PATH="C:\\msys64\\mingw64\\bin:C:\\msys64\\usr\\bin:$PATH" \
- make CC=gcc all check
-
- run-clang-static-analyzer:
- name: Run clang static analyzer
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- - name: Install dependencies
- run: |
- sudo apt-get update
- sudo apt-get install -y clang-tools
- - name: Run clang static analyzer
- run: make scan-build
-
- run-shellcheck:
- name: Run shellcheck
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- - name: Install dependencies
- run: |
- sudo apt-get update
- sudo apt-get install -y shellcheck
- - name: Run shellcheck
- run: make shellcheck
-
- cross-compile-for-windows:
- name: Cross compile for Windows
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- - name: Install dependencies
- run: |
- sudo apt-get update
- sudo apt-get install -y gcc-mingw-w64-i686 gcc-mingw-w64-x86-64 libz-mingw-w64-dev
- - name: 32-bit build
- run: make CC=i686-w64-mingw32-gcc all test_programs
- - name: 64-bit build
- run: make CC=x86_64-w64-mingw32-gcc all test_programs
diff --git a/util/compress/libdeflate/.gitignore b/util/compress/libdeflate/.gitignore
deleted file mode 100644
index f0b086118..000000000
--- a/util/compress/libdeflate/.gitignore
+++ /dev/null
@@ -1,21 +0,0 @@
-*.a
-*.def
-*.dll
-*.dllobj
-*.dylib
-*.exe
-*.exp
-*.lib
-*.o
-*.obj
-*.so
-*.so.*
-/.build-config
-/programs/config.h
-/benchmark
-/checksum
-/gzip
-/gunzip
-/test_*
-tags
-cscope*
diff --git a/util/compress/libdeflate/COPYING b/util/compress/libdeflate/COPYING
deleted file mode 100644
index 1f1b81cd5..000000000
--- a/util/compress/libdeflate/COPYING
+++ /dev/null
@@ -1,21 +0,0 @@
-Copyright 2016 Eric Biggers
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation files
-(the "Software"), to deal in the Software without restriction,
-including without limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of the Software,
-and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/util/compress/libdeflate/Makefile b/util/compress/libdeflate/Makefile
deleted file mode 100644
index 276d75d09..000000000
--- a/util/compress/libdeflate/Makefile
+++ /dev/null
@@ -1,372 +0,0 @@
-#
-# Use 'make help' to list available targets.
-#
-# Define V=1 to enable "verbose" mode, showing all executed commands.
-#
-# Define USE_SHARED_LIB to link the binaries to the shared library version of
-# libdeflate rather than to the static library version.
-#
-# Define DECOMPRESSION_ONLY to omit all compression code, building a
-# decompression-only library. If doing this, you must also build a specific
-# library target such as 'libdeflate.a', as the programs will no longer compile.
-#
-# Define DISABLE_GZIP to disable support for the gzip wrapper format.
-#
-# Define DISABLE_ZLIB to disable support for the zlib wrapper format.
-#
-# Define PREFIX to override the installation prefix, like './configure --prefix'
-# in autotools-based projects (default: /usr/local)
-#
-# Define BINDIR to override where to install binaries, like './configure
-# --bindir' in autotools-based projects (default: PREFIX/bin)
-#
-# Define INCDIR to override where to install headers, like './configure
-# --includedir' in autotools-based projects (default: PREFIX/include)
-#
-# Define LIBDIR to override where to install libraries, like './configure
-# --libdir' in autotools-based projects (default: PREFIX/lib)
-#
-# Define DESTDIR to override the installation destination directory
-# (default: empty string)
-#
-# Define FREESTANDING to build a freestanding library, i.e. a library that
-# doesn't link to any libc functions like malloc(), free(), and memcpy().
-# All users will need to call libdeflate_set_memory_allocator().
-#
-# You can also specify a custom CC, CFLAGS, CPPFLAGS, and/or LDFLAGS.
-#
-##############################################################################
-
-#### Common compiler flags. You can add additional flags by defining CFLAGS
-#### in the environment or on the 'make' command line.
-####
-#### The default optimization flags can be overridden, e.g. via CFLAGS="-O3" or
-#### CFLAGS="-O0 -fno-omit-frame-pointer". But this usually isn't recommended;
-#### you're unlikely to get significantly better performance even with -O3.
-
-cc-option = $(shell if $(CC) $(1) -c -x c /dev/null -o /dev/null \
- 1>&2 2>/dev/null; then echo $(1); fi)
-
-override CFLAGS := \
- -O2 -fomit-frame-pointer -std=c99 -I. -Wall -Wundef \
- $(call cc-option,-Wpedantic) \
- $(call cc-option,-Wdeclaration-after-statement) \
- $(call cc-option,-Wmissing-prototypes) \
- $(call cc-option,-Wstrict-prototypes) \
- $(call cc-option,-Wvla) \
- $(call cc-option,-Wimplicit-fallthrough) \
- $(CFLAGS)
-
-FREESTANDING :=
-ifdef FREESTANDING
-override CPPFLAGS += -DFREESTANDING
-LIB_CFLAGS += -ffreestanding -nostdlib
-endif
-
-# Don't use this option except for testing; it isn't a stable interface.
-TEST_SUPPORT__DO_NOT_USE :=
-ifdef TEST_SUPPORT__DO_NOT_USE
-override CPPFLAGS += -DTEST_SUPPORT__DO_NOT_USE
-endif
-
-##############################################################################
-
-PREFIX ?= /usr/local
-BINDIR ?= $(PREFIX)/bin
-INCDIR ?= $(PREFIX)/include
-LIBDIR ?= $(PREFIX)/lib
-
-SOVERSION := 0
-
-STATIC_LIB_SUFFIX := .a
-PROG_SUFFIX :=
-PROG_CFLAGS :=
-HARD_LINKS := 1
-
-# Compiling for Windows with MinGW?
-ifneq ($(findstring -mingw,$(shell $(CC) -dumpmachine 2>/dev/null)),)
- STATIC_LIB_SUFFIX := static.lib
- SHARED_LIB := libdeflate.dll
- SHARED_LIB_SYMLINK :=
- SHARED_LIB_CFLAGS :=
- SHARED_LIB_LDFLAGS := -Wl,--out-implib,libdeflate.lib \
- -Wl,--output-def,libdeflate.def \
- -Wl,--add-stdcall-alias
- PROG_SUFFIX := .exe
- PROG_CFLAGS := -static -municode
- HARD_LINKS :=
- override CFLAGS := $(CFLAGS) $(call cc-option,-Wno-pedantic-ms-format)
-
- # If AR was not already overridden, then derive it from $(CC).
- # Note that CC may take different forms, e.g. "cc", "gcc",
- # "x86_64-w64-mingw32-gcc", or "x86_64-w64-mingw32-gcc-6.3.1".
- # On Windows it may also have a .exe extension.
- ifeq ($(AR),ar)
- AR := $(shell echo $(CC) | \
- sed -E 's/g?cc(-?[0-9]+(\.[0-9]+)*)?(\.exe)?$$/ar\3/')
- endif
-
-# macOS?
-else ifeq ($(shell uname),Darwin)
- SHARED_LIB := libdeflate.$(SOVERSION).dylib
- SHARED_LIB_SYMLINK := libdeflate.dylib
- SHARED_LIB_CFLAGS := -fPIC
- SHARED_LIB_LDFLAGS := -install_name $(SHARED_LIB)
-
-# Linux, FreeBSD, etc.
-else
- SHARED_LIB := libdeflate.so.$(SOVERSION)
- SHARED_LIB_SYMLINK := libdeflate.so
- SHARED_LIB_CFLAGS := -fPIC
- SHARED_LIB_LDFLAGS := -Wl,-soname=$(SHARED_LIB)
-endif
-
-##############################################################################
-
-#### Quiet make is enabled by default. Define V=1 to disable.
-
-ifneq ($(findstring s,$(MAKEFLAGS)),s)
-ifneq ($(V),1)
- QUIET_CC = @echo ' CC ' $@;
- QUIET_CCLD = @echo ' CCLD ' $@;
- QUIET_AR = @echo ' AR ' $@;
- QUIET_LN = @echo ' LN ' $@;
- QUIET_CP = @echo ' CP ' $@;
- QUIET_GEN = @echo ' GEN ' $@;
-endif
-endif
-
-##############################################################################
-
-# Rebuild if a user-specified setting that affects the build changed.
-.build-config: FORCE
- @flags=$$( \
- echo 'USE_SHARED_LIB=$(USE_SHARED_LIB)'; \
- echo 'DECOMPRESSION_ONLY=$(DECOMPRESSION_ONLY)'; \
- echo 'DISABLE_GZIP=$(DISABLE_GZIP)'; \
- echo 'DISABLE_ZLIB=$(DISABLE_ZLIB)'; \
- echo 'FREESTANDING=$(FREESTANDING)'; \
- echo 'CC=$(CC)'; \
- echo 'CFLAGS=$(CFLAGS)'; \
- echo 'CPPFLAGS=$(CPPFLAGS)'; \
- echo 'LDFLAGS=$(LDFLAGS)'; \
- ); \
- if [ "$$flags" != "`cat $@ 2>/dev/null`" ]; then \
- [ -e $@ ] && echo "Rebuilding due to new settings"; \
- echo "$$flags" > $@; \
- fi
-
-##############################################################################
-
-COMMON_HEADERS := $(wildcard common/*.h) libdeflate.h
-DEFAULT_TARGETS :=
-
-#### Library
-
-STATIC_LIB := libdeflate$(STATIC_LIB_SUFFIX)
-
-LIB_CFLAGS += $(CFLAGS) -fvisibility=hidden -D_ANSI_SOURCE
-
-LIB_HEADERS := $(wildcard lib/*.h) $(wildcard lib/*/*.h)
-
-LIB_SRC := lib/deflate_decompress.c lib/utils.c \
- $(wildcard lib/*/cpu_features.c)
-
-DECOMPRESSION_ONLY :=
-ifndef DECOMPRESSION_ONLY
- LIB_SRC += lib/deflate_compress.c
-endif
-
-DISABLE_ZLIB :=
-ifndef DISABLE_ZLIB
- LIB_SRC += lib/adler32.c lib/zlib_decompress.c
- ifndef DECOMPRESSION_ONLY
- LIB_SRC += lib/zlib_compress.c
- endif
-endif
-
-DISABLE_GZIP :=
-ifndef DISABLE_GZIP
- LIB_SRC += lib/crc32.c lib/gzip_decompress.c
- ifndef DECOMPRESSION_ONLY
- LIB_SRC += lib/gzip_compress.c
- endif
-endif
-
-STATIC_LIB_OBJ := $(LIB_SRC:.c=.o)
-SHARED_LIB_OBJ := $(LIB_SRC:.c=.shlib.o)
-
-# Compile static library object files
-$(STATIC_LIB_OBJ): %.o: %.c $(LIB_HEADERS) $(COMMON_HEADERS) .build-config
- $(QUIET_CC) $(CC) -o $@ -c $(CPPFLAGS) $(LIB_CFLAGS) $<
-
-# Compile shared library object files
-$(SHARED_LIB_OBJ): %.shlib.o: %.c $(LIB_HEADERS) $(COMMON_HEADERS) .build-config
- $(QUIET_CC) $(CC) -o $@ -c $(CPPFLAGS) $(LIB_CFLAGS) \
- $(SHARED_LIB_CFLAGS) -DLIBDEFLATE_DLL $<
-
-# Create static library
-$(STATIC_LIB):$(STATIC_LIB_OBJ)
- $(QUIET_AR) $(AR) cr $@ $+
-
-DEFAULT_TARGETS += $(STATIC_LIB)
-
-# Create shared library
-$(SHARED_LIB):$(SHARED_LIB_OBJ)
- $(QUIET_CCLD) $(CC) -o $@ $(LDFLAGS) $(LIB_CFLAGS) \
- $(SHARED_LIB_LDFLAGS) -shared $+
-
-DEFAULT_TARGETS += $(SHARED_LIB)
-
-ifdef SHARED_LIB_SYMLINK
-# Create the symlink libdeflate.so => libdeflate.so.$SOVERSION
-$(SHARED_LIB_SYMLINK):$(SHARED_LIB)
- $(QUIET_LN) ln -sf $+ $@
-DEFAULT_TARGETS += $(SHARED_LIB_SYMLINK)
-endif
-
-##############################################################################
-
-#### Programs
-
-PROG_CFLAGS += $(CFLAGS) \
- -D_POSIX_C_SOURCE=200809L \
- -D_FILE_OFFSET_BITS=64 \
- -DHAVE_CONFIG_H
-
-ALL_PROG_COMMON_HEADERS := programs/config.h \
- programs/prog_util.h \
- programs/test_util.h
-PROG_COMMON_SRC := programs/prog_util.c \
- programs/tgetopt.c
-NONTEST_PROG_SRC := programs/gzip.c
-TEST_PROG_COMMON_SRC := programs/test_util.c
-TEST_PROG_SRC := programs/benchmark.c \
- programs/checksum.c \
- $(filter-out $(TEST_PROG_COMMON_SRC),$(wildcard programs/test_*.c))
-
-NONTEST_PROGRAMS := $(NONTEST_PROG_SRC:programs/%.c=%$(PROG_SUFFIX))
-DEFAULT_TARGETS += $(NONTEST_PROGRAMS)
-TEST_PROGRAMS := $(TEST_PROG_SRC:programs/%.c=%$(PROG_SUFFIX))
-
-PROG_COMMON_OBJ := $(PROG_COMMON_SRC:%.c=%.o)
-NONTEST_PROG_OBJ := $(NONTEST_PROG_SRC:%.c=%.o)
-TEST_PROG_COMMON_OBJ := $(TEST_PROG_COMMON_SRC:%.c=%.o)
-TEST_PROG_OBJ := $(TEST_PROG_SRC:%.c=%.o)
-
-ALL_PROG_OBJ := $(PROG_COMMON_OBJ) $(NONTEST_PROG_OBJ) \
- $(TEST_PROG_COMMON_OBJ) $(TEST_PROG_OBJ)
-
-# Generate autodetected configuration header
-programs/config.h:scripts/detect.sh .build-config
- $(QUIET_GEN) CC="$(CC)" CFLAGS="$(PROG_CFLAGS)" $< > $@
-
-# Compile program object files
-$(ALL_PROG_OBJ): %.o: %.c $(ALL_PROG_COMMON_HEADERS) $(COMMON_HEADERS) \
- .build-config
- $(QUIET_CC) $(CC) -o $@ -c $(CPPFLAGS) $(PROG_CFLAGS) $<
-
-# Link the programs.
-#
-# Note: the test programs are not compiled by default. One reason is that the
-# test programs must be linked with zlib for doing comparisons.
-
-ifdef USE_SHARED_LIB
-LIB := $(SHARED_LIB)
-else
-LIB := $(STATIC_LIB)
-endif
-
-$(NONTEST_PROGRAMS): %$(PROG_SUFFIX): programs/%.o $(PROG_COMMON_OBJ) $(LIB)
- $(QUIET_CCLD) $(CC) -o $@ $(LDFLAGS) $(PROG_CFLAGS) $+
-
-$(TEST_PROGRAMS): %$(PROG_SUFFIX): programs/%.o $(PROG_COMMON_OBJ) \
- $(TEST_PROG_COMMON_OBJ) $(LIB)
- $(QUIET_CCLD) $(CC) -o $@ $(LDFLAGS) $(PROG_CFLAGS) $+ -lz
-
-ifdef HARD_LINKS
-# Hard link gunzip to gzip
-gunzip$(PROG_SUFFIX):gzip$(PROG_SUFFIX)
- $(QUIET_LN) ln -f $< $@
-else
-# No hard links; copy gzip to gunzip
-gunzip$(PROG_SUFFIX):gzip$(PROG_SUFFIX)
- $(QUIET_CP) cp -f $< $@
-endif
-
-DEFAULT_TARGETS += gunzip$(PROG_SUFFIX)
-
-##############################################################################
-
-all:$(DEFAULT_TARGETS)
-
-# Install the files. Note: not all versions of the 'install' program have the
-# '-D' and '-t' options, so don't use them; use portable commands only.
-install:all
- install -d $(DESTDIR)$(LIBDIR) $(DESTDIR)$(INCDIR) $(DESTDIR)$(BINDIR)
- install -m644 $(STATIC_LIB) $(DESTDIR)$(LIBDIR)
- install -m755 $(SHARED_LIB) $(DESTDIR)$(LIBDIR)
- install -m644 libdeflate.h $(DESTDIR)$(INCDIR)
- install -m755 gzip$(PROG_SUFFIX) \
- $(DESTDIR)$(BINDIR)/libdeflate-gzip$(PROG_SUFFIX)
- ln -f $(DESTDIR)$(BINDIR)/libdeflate-gzip$(PROG_SUFFIX) \
- $(DESTDIR)$(BINDIR)/libdeflate-gunzip$(PROG_SUFFIX)
- if [ -n "$(SHARED_LIB_SYMLINK)" ]; then \
- ln -sf $(SHARED_LIB) \
- $(DESTDIR)$(LIBDIR)/$(SHARED_LIB_SYMLINK); \
- fi
-
-uninstall:
- rm -f $(DESTDIR)$(LIBDIR)/$(STATIC_LIB) \
- $(DESTDIR)$(LIBDIR)/$(SHARED_LIB) \
- $(DESTDIR)$(INCDIR)/libdeflate.h \
- $(DESTDIR)$(BINDIR)/libdeflate-gzip$(PROG_SUFFIX) \
- $(DESTDIR)$(BINDIR)/libdeflate-gunzip$(PROG_SUFFIX)
- if [ -n "$(SHARED_LIB_SYMLINK)" ]; then \
- rm -f $(DESTDIR)$(LIBDIR)/$(SHARED_LIB_SYMLINK); \
- fi
-
-test_programs:$(TEST_PROGRAMS)
-
-# A minimal 'make check' target. This only runs some quick tests;
-# use scripts/run_tests.sh if you want to run the full tests.
-check:test_programs
- LD_LIBRARY_PATH=. ./benchmark$(PROG_SUFFIX) < ./benchmark$(PROG_SUFFIX)
- for prog in test_*; do \
- LD_LIBRARY_PATH=. ./$$prog || exit 1; \
- done
-
-# Run the clang static analyzer.
-scan-build:
- scan-build --status-bugs make all test_programs
-
-# Run shellcheck on all shell scripts.
-shellcheck:
- shellcheck scripts/*.sh
-
-help:
- @echo "Available targets:"
- @echo "------------------"
- @for target in $(DEFAULT_TARGETS) $(TEST_PROGRAMS); do \
- echo -e "$$target"; \
- done
-
-clean:
- rm -f *.a *.dll *.exe *.exp *.dylib *.so \
- lib/*.o lib/*/*.o \
- lib/*.obj lib/*/*.obj \
- lib/*.dllobj lib/*/*.dllobj \
- programs/*.o programs/*.obj \
- $(DEFAULT_TARGETS) $(TEST_PROGRAMS) programs/config.h \
- libdeflate.lib libdeflate.def libdeflatestatic.lib \
- .build-config
-
-realclean: clean
- rm -f tags cscope*
-
-FORCE:
-
-.PHONY: all install uninstall test_programs check scan-build shellcheck help \
- clean realclean
-
-.DEFAULT_GOAL = all
diff --git a/util/compress/libdeflate/Makefile.msc b/util/compress/libdeflate/Makefile.msc
deleted file mode 100644
index 14496187f..000000000
--- a/util/compress/libdeflate/Makefile.msc
+++ /dev/null
@@ -1,65 +0,0 @@
-#
-# Makefile for the Microsoft toolchain
-#
-# Usage:
-# nmake /f Makefile.msc
-#
-
-.SUFFIXES: .c .obj .dllobj
-
-CC = cl
-LD = link
-AR = lib
-CFLAGS = /MD /O2 -I.
-LDFLAGS =
-
-STATIC_LIB = libdeflatestatic.lib
-SHARED_LIB = libdeflate.dll
-IMPORT_LIB = libdeflate.lib
-
-STATIC_LIB_OBJ = \
- lib/adler32.obj \
- lib/crc32.obj \
- lib/deflate_compress.obj \
- lib/deflate_decompress.obj \
- lib/gzip_compress.obj \
- lib/gzip_decompress.obj \
- lib/utils.obj \
- lib/x86/cpu_features.obj \
- lib/zlib_compress.obj \
- lib/zlib_decompress.obj
-
-SHARED_LIB_OBJ = $(STATIC_LIB_OBJ:.obj=.dllobj)
-
-PROG_COMMON_OBJ = programs/prog_util.obj \
- programs/tgetopt.obj \
- $(STATIC_LIB)
-
-PROG_CFLAGS = $(CFLAGS) -Iprograms
-
-all: $(STATIC_LIB) $(SHARED_LIB) $(IMPORT_LIB) gzip.exe gunzip.exe
-
-.c.obj:
- $(CC) -c /Fo$@ $(CFLAGS) $**
-
-.c.dllobj:
- $(CC) -c /Fo$@ $(CFLAGS) /DLIBDEFLATE_DLL $**
-
-$(STATIC_LIB): $(STATIC_LIB_OBJ)
- $(AR) $(ARFLAGS) -out:$@ $(STATIC_LIB_OBJ)
-
-$(SHARED_LIB): $(SHARED_LIB_OBJ)
- $(LD) $(LDFLAGS) -out:$@ -dll -implib:$(IMPORT_LIB) $(SHARED_LIB_OBJ)
-
-$(IMPORT_LIB): $(SHARED_LIB)
-
-gzip.exe:programs/gzip.obj $(PROG_COMMON_OBJ)
- $(LD) $(LDFLAGS) -out:$@ $**
-
-gunzip.exe:gzip.exe
- copy $** $@
-
-clean:
- -del *.dll *.exe *.exp libdeflate.lib libdeflatestatic.lib gzip.lib \
- lib\*.obj lib\x86\*.obj lib\*.dllobj lib\x86\*.dllobj \
- programs\*.obj 2>nul
diff --git a/util/compress/libdeflate/NEWS b/util/compress/libdeflate/NEWS
deleted file mode 100644
index b1704dd18..000000000
--- a/util/compress/libdeflate/NEWS
+++ /dev/null
@@ -1,200 +0,0 @@
-Version 1.7:
- Added support for compression level 0, "no compression".
-
- Added an ARM CRC32 instruction accelerated implementation of CRC32.
-
- Added support for linking the programs to the shared library version of
- libdeflate rather than to the static library version.
-
- Made the compression level affect the minimum input size at which
- compression is attempted.
-
- Fixed undefined behavior in x86 Adler32 implementation.
- (No miscompilations were observed in practice.)
-
- Fixed undefined behavior in x86 CPU feature code.
- (No miscompilations were observed in practice.)
-
- Fixed installing shared lib symlink on macOS.
-
- Documented third-party bindings.
-
- Made a lot of improvements to the testing scripts and the CI
- configuration file.
-
- Lots of other small improvements and cleanups.
-
-Version 1.6:
- Prevented gcc 10 from miscompiling libdeflate (workaround for
- https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94994).
-
- Removed workaround for gcc 5 and earlier producing slow code on
- ARM32. If this affects you, please upgrade your compiler.
-
- New API function: libdeflate_zlib_decompress_ex(). It provides
- the actual size of the stream that was decompressed, like the
- gzip and DEFLATE equivalents.
-
- libdeflate_zlib_decompress() now accepts trailing bytes after
- the end of the stream, like the gzip and DEFLATE equivalents.
-
- Added support for custom memory allocators.
- (New API function: libdeflate_set_memory_allocator())
-
- Added support for building the library in freestanding mode.
-
- Building libdeflate no longer requires CPPFLAGS=-Icommon.
-
-Version 1.5:
- Fixed up stdcall support on 32-bit Windows: the functions are
- now exported using both suffixed and non-suffixed names, and
- fixed libdeflate.h to be MSVC-compatible again.
-
-Version 1.4:
- The 32-bit Windows build of libdeflate now uses the "stdcall"
- calling convention instead of "cdecl". If you're calling
- libdeflate.dll directly from C or C++, you'll need to recompile
- your code. If you're calling it from another language, or
- calling it indirectly using LoadLibrary(), you'll need to update
- your code to use the stdcall calling convention.
-
- The Makefile now supports building libdeflate as a shared
- library (.dylib) on macOS.
-
- Fixed a bug where support for certain optimizations and optional
- features (file access hints and more precise timestamps) was
- incorrectly omitted when libdeflate was compiled with -Werror.
-
- Added 'make check' target to the Makefile.
-
- Added CI configuration files.
-
-Version 1.3:
- `make install` now supports customizing the directories into
- which binaries, headers, and libraries are installed.
-
- `make install` now installs into /usr/local by default.
- To change it, use e.g. `make install PREFIX=/usr`.
-
- `make install` now works on more platforms.
-
- The Makefile now supports overriding the optimization flags.
-
- The compression functions now correctly handle an output data
- buffer >= 4 GiB in size, and `gzip` and `gunzip` now correctly
- handle multi-gigabyte files (if enough memory is available).
-
-Version 1.2:
- Slight improvements to decompression speed.
-
- Added an AVX-512BW implementation of Adler-32.
-
- The Makefile now supports a user-specified installation PREFIX.
-
- Fixed build error with some Visual Studio versions.
-
-Version 1.1:
- Fixed crash in CRC-32 code when the prebuilt libdeflate for
- 32-bit Windows was called by a program built with Visual Studio.
-
- Improved the worst-case decompression speed of malicious data.
-
- Fixed build error when compiling for an ARM processor without
- hardware floating point support.
-
- Improved performance on the PowerPC64 architecture.
-
- Added soname to libdeflate.so, to make packaging easier.
-
- Added 'make install' target to the Makefile.
-
- The Makefile now supports user-specified CPPFLAGS.
-
- The Windows binary releases now include the import library for
- libdeflate.dll. libdeflate.lib is now the import library, and
- libdeflatestatic.lib is the static library.
-
-Version 1.0:
- Added support for multi-member gzip files.
-
- Moved architecture-specific code into subdirectories. If you
- aren't using the provided Makefile to build libdeflate, you now
- need to compile lib/*.c and lib/*/*.c instead of just lib/*.c.
-
- Added an ARM PMULL implementation of CRC-32, which speeds up
- gzip compression and decompression on 32-bit and 64-bit ARM
- processors that have the Cryptography Extensions.
-
- Improved detection of CPU features, resulting in accelerated
- functions being used in more cases. This includes:
-
- - Detect CPU features on 32-bit x86, not just 64-bit as was
- done previously.
-
- - Detect CPU features on ARM, both 32 and 64-bit.
- (Limited to Linux only currently.)
-
-Version 0.8:
- Build fixes for certain platforms and compilers.
-
- libdeflate now produces the same output on all CPU architectures.
-
- Improved documentation for building libdeflate on Windows.
-
-Version 0.7:
- Fixed a very rare bug that caused data to be compressed incorrectly.
- The bug affected compression levels 7 and below since libdeflate v0.2.
- Although there have been no user reports of the bug, and I believe it
- would have been highly unlikely to encounter on realistic data, it could
- occur on data specially crafted to reproduce it.
-
- Fixed a compilation error when building with clang 3.7.
-
-Version 0.6:
- Various improvements to the gzip program's behavior.
-
- Faster CRC-32 on AVX-capable processors.
-
- Other minor changes.
-
-Version 0.5:
- The CRC-32 checksum algorithm has been optimized with carryless
- multiplication instructions for x86_64 (PCLMUL). This speeds up gzip
- compression and decompression.
-
- Build fixes for certain platforms and compilers.
-
- Added more test programs and scripts.
-
- libdeflate is now entirely MIT-licensed.
-
-Version 0.4:
- The Adler-32 checksum algorithm has been optimized with vector
- instructions for x86_64 (SSE2 and AVX2) and ARM (NEON). This speeds up
- zlib compression and decompression.
-
- To avoid naming collisions, functions and definitions in libdeflate's
- API have been renamed to be prefixed with "libdeflate_" or
- "LIBDEFLATE_". Programs using the old API will need to be updated.
-
- Various bug fixes and other improvements.
-
-Version 0.3:
- Some bug fixes and other minor changes.
-
-Version 0.2:
- Implemented a new block splitting algorithm which typically improves the
- compression ratio slightly at all compression levels.
-
- The compressor now outputs each block using the cheapest type (dynamic
- Huffman, static Huffman, or uncompressed).
-
- The gzip program has received an overhaul and now behaves more like the
- standard version.
-
- Build system updates, including: some build options were changed and
- some build options were removed, and the default 'make' target now
- includes the gzip program as well as the library.
-
-Version 0.1:
- Initial official release.
diff --git a/util/compress/libdeflate/README.md b/util/compress/libdeflate/README.md
deleted file mode 100644
index 9342e9e9b..000000000
--- a/util/compress/libdeflate/README.md
+++ /dev/null
@@ -1,283 +0,0 @@
-# Overview
-
-libdeflate is a library for fast, whole-buffer DEFLATE-based compression and
-decompression.
-
-The supported formats are:
-
-- DEFLATE (raw)
-- zlib (a.k.a. DEFLATE with a zlib wrapper)
-- gzip (a.k.a. DEFLATE with a gzip wrapper)
-
-libdeflate is heavily optimized. It is significantly faster than the zlib
-library, both for compression and decompression, and especially on x86
-processors. In addition, libdeflate provides optional high compression modes
-that provide a better compression ratio than the zlib's "level 9".
-
-libdeflate itself is a library, but the following command-line programs which
-use this library are also provided:
-
-* gzip (or gunzip), a program which mostly behaves like the standard equivalent,
- except that it does not yet have good streaming support and therefore does not
- yet support very large files
-* benchmark, a program for benchmarking in-memory compression and decompression
-
-## Table of Contents
-
-- [Building](#building)
- - [For UNIX](#for-unix)
- - [For macOS](#for-macos)
- - [For Windows](#for-windows)
- - [Using Cygwin](#using-cygwin)
- - [Using MSYS2](#using-msys2)
-- [API](#api)
-- [Bindings for other programming languages](#bindings-for-other-programming-languages)
-- [DEFLATE vs. zlib vs. gzip](#deflate-vs-zlib-vs-gzip)
-- [Compression levels](#compression-levels)
-- [Motivation](#motivation)
-- [License](#license)
-
-
-# Building
-
-## For UNIX
-
-Just run `make`, then (if desired) `make install`. You need GNU Make and either
-GCC or Clang. GCC is recommended because it builds slightly faster binaries.
-
-By default, the following targets are built: the static library `libdeflate.a`,
-the shared library `libdeflate.so`, the `gzip` program, and the `gunzip` program
-(which is actually just a hard link to `gzip`). Benchmarking and test programs
-such as `benchmark` are not built by default. You can run `make help` to
-display the available build targets.
-
-There are also many options which can be set on the `make` command line, e.g. to
-omit library features or to customize the directories into which `make install`
-installs files. See the Makefile for details.
-
-## For macOS
-
-Prebuilt macOS binaries can be installed with [Homebrew](https://brew.sh):
-
- brew install libdeflate
-
-But if you need to build the binaries yourself, see the section for UNIX above.
-
-## For Windows
-
-Prebuilt Windows binaries can be downloaded from
-https://github.com/ebiggers/libdeflate/releases. But if you need to build the
-binaries yourself, MinGW (gcc) is the recommended compiler to use. If you're
-performing the build *on* Windows (as opposed to cross-compiling for Windows on
-Linux, for example), you'll need to follow the directions in **one** of the two
-sections below to set up a minimal UNIX-compatible environment using either
-Cygwin or MSYS2, then do the build. (Other MinGW distributions may not work, as
-they often omit basic UNIX tools such as `sh`.)
-
-Alternatively, libdeflate may be built using the Visual Studio toolchain by
-running `nmake /f Makefile.msc`. However, while this is supported in the sense
-that it will produce working binaries, it is not recommended because the
-binaries built with MinGW will be significantly faster.
-
-Also note that 64-bit binaries are faster than 32-bit binaries and should be
-preferred whenever possible.
-
-### Using Cygwin
-
-Run the Cygwin installer, available from https://cygwin.com/setup-x86_64.exe.
-When you get to the package selection screen, choose the following additional
-packages from category "Devel":
-
-- git
-- make
-- mingw64-i686-binutils
-- mingw64-i686-gcc-g++
-- mingw64-x86_64-binutils
-- mingw64-x86_64-gcc-g++
-
-(You may skip the mingw64-i686 packages if you don't need to build 32-bit
-binaries.)
-
-After the installation finishes, open a Cygwin terminal. Then download
-libdeflate's source code (if you haven't already) and `cd` into its directory:
-
- git clone https://github.com/ebiggers/libdeflate
- cd libdeflate
-
-(Note that it's not required to use `git`; an alternative is to extract a .zip
-or .tar.gz archive of the source code downloaded from the releases page.
-Also, in case you need to find it in the file browser, note that your home
-directory in Cygwin is usually located at `C:\cygwin64\home\<your username>`.)
-
-Then, to build 64-bit binaries:
-
- make CC=x86_64-w64-mingw32-gcc
-
-or to build 32-bit binaries:
-
- make CC=i686-w64-mingw32-gcc
-
-### Using MSYS2
-
-Run the MSYS2 installer, available from http://www.msys2.org/. After
-installing, open an MSYS2 shell and run:
-
- pacman -Syu
-
-Say `y`, then when it's finished, close the shell window and open a new one.
-Then run the same command again:
-
- pacman -Syu
-
-Then, install the packages needed to build libdeflate:
-
- pacman -S git \
- make \
- mingw-w64-i686-binutils \
- mingw-w64-i686-gcc \
- mingw-w64-x86_64-binutils \
- mingw-w64-x86_64-gcc
-
-(You may skip the mingw-w64-i686 packages if you don't need to build 32-bit
-binaries.)
-
-Then download libdeflate's source code (if you haven't already):
-
- git clone https://github.com/ebiggers/libdeflate
-
-(Note that it's not required to use `git`; an alternative is to extract a .zip
-or .tar.gz archive of the source code downloaded from the releases page.
-Also, in case you need to find it in the file browser, note that your home
-directory in MSYS2 is usually located at `C:\msys64\home\<your username>`.)
-
-Then, to build 64-bit binaries, open "MSYS2 MinGW 64-bit" from the Start menu
-and run the following commands:
-
- cd libdeflate
- make clean
- make
-
-Or to build 32-bit binaries, do the same but use "MSYS2 MinGW 32-bit" instead.
-
-# API
-
-libdeflate has a simple API that is not zlib-compatible. You can create
-compressors and decompressors and use them to compress or decompress buffers.
-See libdeflate.h for details.
-
-There is currently no support for streaming. This has been considered, but it
-always significantly increases complexity and slows down fast paths.
-Unfortunately, at this point it remains a future TODO. So: if your application
-compresses data in "chunks", say, less than 1 MB in size, then libdeflate is a
-great choice for you; that's what it's designed to do. This is perfect for
-certain use cases such as transparent filesystem compression. But if your
-application compresses large files as a single compressed stream, similarly to
-the `gzip` program, then libdeflate isn't for you.
-
-Note that with chunk-based compression, you generally should have the
-uncompressed size of each chunk stored outside of the compressed data itself.
-This enables you to allocate an output buffer of the correct size without
-guessing. However, libdeflate's decompression routines do optionally provide
-the actual number of output bytes in case you need it.
-
-Windows developers: note that the calling convention of libdeflate.dll is
-"stdcall" -- the same as the Win32 API. If you call into libdeflate.dll using a
-non-C/C++ language, or dynamically using LoadLibrary(), make sure to use the
-stdcall convention. Using the wrong convention may crash your application.
-(Note: older versions of libdeflate used the "cdecl" convention instead.)
-
-# Bindings for other programming languages
-
-The libdeflate project itself only provides a C library. If you need to use
-libdeflate from a programming language other than C or C++, consider using the
-following bindings:
-
-* C#: [LibDeflate.NET](https://github.com/jzebedee/LibDeflate.NET)
-* Go: [go-libdeflate](https://github.com/4kills/go-libdeflate)
-* Java: [libdeflate-java](https://github.com/astei/libdeflate-java)
-* Julia: [LibDeflate.jl](https://github.com/jakobnissen/LibDeflate.jl)
-* Python: [deflate](https://github.com/dcwatson/deflate)
-* Ruby: [libdeflate-ruby](https://github.com/kaorimatz/libdeflate-ruby)
-* Rust: [libdeflater](https://github.com/adamkewley/libdeflater)
-
-Note: these are third-party projects which haven't necessarily been vetted by
-the authors of libdeflate. Please direct all questions, bugs, and improvements
-for these bindings to their authors.
-
-# DEFLATE vs. zlib vs. gzip
-
-The DEFLATE format ([rfc1951](https://www.ietf.org/rfc/rfc1951.txt)), the zlib
-format ([rfc1950](https://www.ietf.org/rfc/rfc1950.txt)), and the gzip format
-([rfc1952](https://www.ietf.org/rfc/rfc1952.txt)) are commonly confused with
-each other as well as with the [zlib software library](http://zlib.net), which
-actually supports all three formats. libdeflate (this library) also supports
-all three formats.
-
-Briefly, DEFLATE is a raw compressed stream, whereas zlib and gzip are different
-wrappers for this stream. Both zlib and gzip include checksums, but gzip can
-include extra information such as the original filename. Generally, you should
-choose a format as follows:
-
-- If you are compressing whole files with no subdivisions, similar to the `gzip`
- program, you probably should use the gzip format.
-- Otherwise, if you don't need the features of the gzip header and footer but do
- still want a checksum for corruption detection, you probably should use the
- zlib format.
-- Otherwise, you probably should use raw DEFLATE. This is ideal if you don't
- need checksums, e.g. because they're simply not needed for your use case or
- because you already compute your own checksums that are stored separately from
- the compressed stream.
-
-Note that gzip and zlib streams can be distinguished from each other based on
-their starting bytes, but this is not necessarily true of raw DEFLATE streams.
-
-# Compression levels
-
-An often-underappreciated fact of compression formats such as DEFLATE is that
-there are an enormous number of different ways that a given input could be
-compressed. Different algorithms and different amounts of computation time will
-result in different compression ratios, while remaining equally compatible with
-the decompressor.
-
-For this reason, the commonly used zlib library provides nine compression
-levels. Level 1 is the fastest but provides the worst compression; level 9
-provides the best compression but is the slowest. It defaults to level 6.
-libdeflate uses this same design but is designed to improve on both zlib's
-performance *and* compression ratio at every compression level. In addition,
-libdeflate's levels go [up to 12](https://xkcd.com/670/) to make room for a
-minimum-cost-path based algorithm (sometimes called "optimal parsing") that can
-significantly improve on zlib's compression ratio.
-
-If you are using DEFLATE (or zlib, or gzip) in your application, you should test
-different levels to see which works best for your application.
-
-# Motivation
-
-Despite DEFLATE's widespread use mainly through the zlib library, in the
-compression community this format from the early 1990s is often considered
-obsolete. And in a few significant ways, it is.
-
-So why implement DEFLATE at all, instead of focusing entirely on
-bzip2/LZMA/xz/LZ4/LZX/ZSTD/Brotli/LZHAM/LZFSE/[insert cool new format here]?
-
-To do something better, you need to understand what came before. And it turns
-out that most ideas from DEFLATE are still relevant. Many of the newer formats
-share a similar structure as DEFLATE, with different tweaks. The effects of
-trivial but very useful tweaks, such as increasing the sliding window size, are
-often confused with the effects of nontrivial but less useful tweaks. And
-actually, many of these formats are similar enough that common algorithms and
-optimizations (e.g. those dealing with LZ77 matchfinding) can be reused.
-
-In addition, comparing compressors fairly is difficult because the performance
-of a compressor depends heavily on optimizations which are not intrinsic to the
-compression format itself. In this respect, the zlib library sometimes compares
-poorly to certain newer code because zlib is not well optimized for modern
-processors. libdeflate addresses this by providing an optimized DEFLATE
-implementation which can be used for benchmarking purposes. And, of course,
-real applications can use it as well.
-
-# License
-
-libdeflate is [MIT-licensed](COPYING).
-
-I am not aware of any patents or patent applications relevant to libdeflate.
diff --git a/util/compress/libdeflate/common/common_defs.h b/util/compress/libdeflate/common/common_defs.h
deleted file mode 100644
index d56c5cf9b..000000000
--- a/util/compress/libdeflate/common/common_defs.h
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * common_defs.h
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef COMMON_COMMON_DEFS_H
-#define COMMON_COMMON_DEFS_H
-
-#ifdef __GNUC__
-# include "compiler_gcc.h"
-#elif defined(_MSC_VER)
-# include "compiler_msc.h"
-#else
-# pragma message("Unrecognized compiler. Please add a header file for your compiler. Compilation will proceed, but performance may suffer!")
-#endif
-
-/* ========================================================================== */
-/* Type definitions */
-/* ========================================================================== */
-
-#include <stddef.h> /* size_t */
-
-#ifndef __bool_true_false_are_defined
-# include <stdbool.h> /* bool */
-#endif
-
-/* Fixed-width integer types */
-#include <stdint.h>
-typedef uint8_t u8;
-typedef uint16_t u16;
-typedef uint32_t u32;
-typedef uint64_t u64;
-typedef int8_t s8;
-typedef int16_t s16;
-typedef int32_t s32;
-typedef int64_t s64;
-
-/*
- * Word type of the target architecture. Use 'size_t' instead of 'unsigned
- * long' to account for platforms such as Windows that use 32-bit 'unsigned
- * long' on 64-bit architectures.
- */
-typedef size_t machine_word_t;
-
-/* Number of bytes in a word */
-#define WORDBYTES ((int)sizeof(machine_word_t))
-
-/* Number of bits in a word */
-#define WORDBITS (8 * WORDBYTES)
-
-/* ========================================================================== */
-/* Optional compiler features */
-/* ========================================================================== */
-
-/* LIBEXPORT - export a function from a shared library */
-#ifndef LIBEXPORT
-# define LIBEXPORT
-#endif
-
-/* inline - suggest that a function be inlined */
-#ifndef inline
-# define inline
-#endif
-
-/* forceinline - force a function to be inlined, if possible */
-#ifndef forceinline
-# define forceinline inline
-#endif
-
-/* restrict - annotate a non-aliased pointer */
-#ifndef restrict
-# define restrict
-#endif
-
-/* likely(expr) - hint that an expression is usually true */
-#ifndef likely
-# define likely(expr) (expr)
-#endif
-
-/* unlikely(expr) - hint that an expression is usually false */
-#ifndef unlikely
-# define unlikely(expr) (expr)
-#endif
-
-/* prefetchr(addr) - prefetch into L1 cache for read */
-#ifndef prefetchr
-# define prefetchr(addr)
-#endif
-
-/* prefetchw(addr) - prefetch into L1 cache for write */
-#ifndef prefetchw
-# define prefetchw(addr)
-#endif
-
-/* Does the compiler support the 'target' function attribute? */
-#ifndef COMPILER_SUPPORTS_TARGET_FUNCTION_ATTRIBUTE
-# define COMPILER_SUPPORTS_TARGET_FUNCTION_ATTRIBUTE 0
-#endif
-
-/* Which targets are supported with the 'target' function attribute? */
-#ifndef COMPILER_SUPPORTS_BMI2_TARGET
-# define COMPILER_SUPPORTS_BMI2_TARGET 0
-#endif
-#ifndef COMPILER_SUPPORTS_AVX_TARGET
-# define COMPILER_SUPPORTS_AVX_TARGET 0
-#endif
-#ifndef COMPILER_SUPPORTS_AVX512BW_TARGET
-# define COMPILER_SUPPORTS_AVX512BW_TARGET 0
-#endif
-
-/*
- * Which targets are supported with the 'target' function attribute and have
- * intrinsics that work within 'target'-ed functions?
- */
-#ifndef COMPILER_SUPPORTS_SSE2_TARGET_INTRINSICS
-# define COMPILER_SUPPORTS_SSE2_TARGET_INTRINSICS 0
-#endif
-#ifndef COMPILER_SUPPORTS_PCLMUL_TARGET_INTRINSICS
-# define COMPILER_SUPPORTS_PCLMUL_TARGET_INTRINSICS 0
-#endif
-#ifndef COMPILER_SUPPORTS_AVX2_TARGET_INTRINSICS
-# define COMPILER_SUPPORTS_AVX2_TARGET_INTRINSICS 0
-#endif
-#ifndef COMPILER_SUPPORTS_AVX512BW_TARGET_INTRINSICS
-# define COMPILER_SUPPORTS_AVX512BW_TARGET_INTRINSICS 0
-#endif
-#ifndef COMPILER_SUPPORTS_NEON_TARGET_INTRINSICS
-# define COMPILER_SUPPORTS_NEON_TARGET_INTRINSICS 0
-#endif
-#ifndef COMPILER_SUPPORTS_PMULL_TARGET_INTRINSICS
-# define COMPILER_SUPPORTS_PMULL_TARGET_INTRINSICS 0
-#endif
-#ifndef COMPILER_SUPPORTS_CRC32_TARGET_INTRINSICS
-# define COMPILER_SUPPORTS_CRC32_TARGET_INTRINSICS 0
-#endif
-
-/* _aligned_attribute(n) - declare that the annotated variable, or variables of
- * the annotated type, are to be aligned on n-byte boundaries */
-#ifndef _aligned_attribute
-#endif
-
-/* ========================================================================== */
-/* Miscellaneous macros */
-/* ========================================================================== */
-
-#define ARRAY_LEN(A) (sizeof(A) / sizeof((A)[0]))
-#define MIN(a, b) ((a) <= (b) ? (a) : (b))
-#define MAX(a, b) ((a) >= (b) ? (a) : (b))
-#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
-#define STATIC_ASSERT(expr) ((void)sizeof(char[1 - 2 * !(expr)]))
-#define ALIGN(n, a) (((n) + (a) - 1) & ~((a) - 1))
-
-/* ========================================================================== */
-/* Endianness handling */
-/* ========================================================================== */
-
-/*
- * CPU_IS_LITTLE_ENDIAN() - a macro which evaluates to 1 if the CPU is little
- * endian or 0 if it is big endian. The macro should be defined in a way such
- * that the compiler can evaluate it at compilation time. If not defined, a
- * fallback is used.
- */
-#ifndef CPU_IS_LITTLE_ENDIAN
-static forceinline int CPU_IS_LITTLE_ENDIAN(void)
-{
- union {
- unsigned int v;
- unsigned char b;
- } u;
- u.v = 1;
- return u.b;
-}
-#endif
-
-/* bswap16(n) - swap the bytes of a 16-bit integer */
-#ifndef bswap16
-static forceinline u16 bswap16(u16 n)
-{
- return (n << 8) | (n >> 8);
-}
-#endif
-
-/* bswap32(n) - swap the bytes of a 32-bit integer */
-#ifndef bswap32
-static forceinline u32 bswap32(u32 n)
-{
- return ((n & 0x000000FF) << 24) |
- ((n & 0x0000FF00) << 8) |
- ((n & 0x00FF0000) >> 8) |
- ((n & 0xFF000000) >> 24);
-}
-#endif
-
-/* bswap64(n) - swap the bytes of a 64-bit integer */
-#ifndef bswap64
-static forceinline u64 bswap64(u64 n)
-{
- return ((n & 0x00000000000000FF) << 56) |
- ((n & 0x000000000000FF00) << 40) |
- ((n & 0x0000000000FF0000) << 24) |
- ((n & 0x00000000FF000000) << 8) |
- ((n & 0x000000FF00000000) >> 8) |
- ((n & 0x0000FF0000000000) >> 24) |
- ((n & 0x00FF000000000000) >> 40) |
- ((n & 0xFF00000000000000) >> 56);
-}
-#endif
-
-#define le16_bswap(n) (CPU_IS_LITTLE_ENDIAN() ? (n) : bswap16(n))
-#define le32_bswap(n) (CPU_IS_LITTLE_ENDIAN() ? (n) : bswap32(n))
-#define le64_bswap(n) (CPU_IS_LITTLE_ENDIAN() ? (n) : bswap64(n))
-#define be16_bswap(n) (CPU_IS_LITTLE_ENDIAN() ? bswap16(n) : (n))
-#define be32_bswap(n) (CPU_IS_LITTLE_ENDIAN() ? bswap32(n) : (n))
-#define be64_bswap(n) (CPU_IS_LITTLE_ENDIAN() ? bswap64(n) : (n))
-
-/* ========================================================================== */
-/* Unaligned memory accesses */
-/* ========================================================================== */
-
-/*
- * UNALIGNED_ACCESS_IS_FAST should be defined to 1 if unaligned memory accesses
- * can be performed efficiently on the target platform.
- */
-#ifndef UNALIGNED_ACCESS_IS_FAST
-# define UNALIGNED_ACCESS_IS_FAST 0
-#endif
-
-/* ========================================================================== */
-/* Bit scan functions */
-/* ========================================================================== */
-
-/*
- * Bit Scan Reverse (BSR) - find the 0-based index (relative to the least
- * significant end) of the *most* significant 1 bit in the input value. The
- * input value must be nonzero!
- */
-
-#ifndef bsr32
-static forceinline unsigned
-bsr32(u32 n)
-{
- unsigned i = 0;
- while ((n >>= 1) != 0)
- i++;
- return i;
-}
-#endif
-
-#ifndef bsr64
-static forceinline unsigned
-bsr64(u64 n)
-{
- unsigned i = 0;
- while ((n >>= 1) != 0)
- i++;
- return i;
-}
-#endif
-
-static forceinline unsigned
-bsrw(machine_word_t n)
-{
- STATIC_ASSERT(WORDBITS == 32 || WORDBITS == 64);
- if (WORDBITS == 32)
- return bsr32(n);
- else
- return bsr64(n);
-}
-
-/*
- * Bit Scan Forward (BSF) - find the 0-based index (relative to the least
- * significant end) of the *least* significant 1 bit in the input value. The
- * input value must be nonzero!
- */
-
-#ifndef bsf32
-static forceinline unsigned
-bsf32(u32 n)
-{
- unsigned i = 0;
- while ((n & 1) == 0) {
- i++;
- n >>= 1;
- }
- return i;
-}
-#endif
-
-#ifndef bsf64
-static forceinline unsigned
-bsf64(u64 n)
-{
- unsigned i = 0;
- while ((n & 1) == 0) {
- i++;
- n >>= 1;
- }
- return i;
-}
-#endif
-
-static forceinline unsigned
-bsfw(machine_word_t n)
-{
- STATIC_ASSERT(WORDBITS == 32 || WORDBITS == 64);
- if (WORDBITS == 32)
- return bsf32(n);
- else
- return bsf64(n);
-}
-
-#endif /* COMMON_COMMON_DEFS_H */
diff --git a/util/compress/libdeflate/common/compiler_gcc.h b/util/compress/libdeflate/common/compiler_gcc.h
deleted file mode 100644
index 2a45b05f3..000000000
--- a/util/compress/libdeflate/common/compiler_gcc.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * compiler_gcc.h - definitions for the GNU C Compiler. This also handles clang
- * and the Intel C Compiler (icc).
- *
- * TODO: icc is not well tested, so some things are currently disabled even
- * though they maybe can be enabled on some icc versions.
- */
-
-#if !defined(__clang__) && !defined(__INTEL_COMPILER)
-# define GCC_PREREQ(major, minor) \
- (__GNUC__ > (major) || \
- (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
-#else
-# define GCC_PREREQ(major, minor) 0
-#endif
-
-/* Note: only check the clang version when absolutely necessary!
- * "Vendors" such as Apple can use different version numbers. */
-#ifdef __clang__
-# ifdef __apple_build_version__
-# define CLANG_PREREQ(major, minor, apple_version) \
- (__apple_build_version__ >= (apple_version))
-# else
-# define CLANG_PREREQ(major, minor, apple_version) \
- (__clang_major__ > (major) || \
- (__clang_major__ == (major) && __clang_minor__ >= (minor)))
-# endif
-#else
-# define CLANG_PREREQ(major, minor, apple_version) 0
-#endif
-
-#ifndef __has_attribute
-# define __has_attribute(attribute) 0
-#endif
-#ifndef __has_feature
-# define __has_feature(feature) 0
-#endif
-#ifndef __has_builtin
-# define __has_builtin(builtin) 0
-#endif
-
-#ifdef _WIN32
-# define LIBEXPORT __declspec(dllexport)
-#else
-# define LIBEXPORT __attribute__((visibility("default")))
-#endif
-
-#define inline inline
-#define forceinline inline __attribute__((always_inline))
-#define restrict __restrict__
-#define likely(expr) __builtin_expect(!!(expr), 1)
-#define unlikely(expr) __builtin_expect(!!(expr), 0)
-#define prefetchr(addr) __builtin_prefetch((addr), 0)
-#define prefetchw(addr) __builtin_prefetch((addr), 1)
-#define _aligned_attribute(n) __attribute__((aligned(n)))
-
-#define COMPILER_SUPPORTS_TARGET_FUNCTION_ATTRIBUTE \
- (GCC_PREREQ(4, 4) || __has_attribute(target))
-
-#if COMPILER_SUPPORTS_TARGET_FUNCTION_ATTRIBUTE
-
-# if defined(__i386__) || defined(__x86_64__)
-
-# define COMPILER_SUPPORTS_PCLMUL_TARGET \
- (GCC_PREREQ(4, 4) || __has_builtin(__builtin_ia32_pclmulqdq128))
-
-# define COMPILER_SUPPORTS_AVX_TARGET \
- (GCC_PREREQ(4, 6) || __has_builtin(__builtin_ia32_maxps256))
-
-# define COMPILER_SUPPORTS_BMI2_TARGET \
- (GCC_PREREQ(4, 7) || __has_builtin(__builtin_ia32_pdep_di))
-
-# define COMPILER_SUPPORTS_AVX2_TARGET \
- (GCC_PREREQ(4, 7) || __has_builtin(__builtin_ia32_psadbw256))
-
-# define COMPILER_SUPPORTS_AVX512BW_TARGET \
- (GCC_PREREQ(5, 1) || __has_builtin(__builtin_ia32_psadbw512))
-
- /*
- * Prior to gcc 4.9 (r200349) and clang 3.8 (r239883), x86 intrinsics
- * not available in the main target could not be used in 'target'
- * attribute functions. Unfortunately clang has no feature test macro
- * for this so we have to check its version.
- */
-# if GCC_PREREQ(4, 9) || CLANG_PREREQ(3, 8, 7030000)
-# define COMPILER_SUPPORTS_SSE2_TARGET_INTRINSICS 1
-# define COMPILER_SUPPORTS_PCLMUL_TARGET_INTRINSICS \
- COMPILER_SUPPORTS_PCLMUL_TARGET
-# define COMPILER_SUPPORTS_AVX2_TARGET_INTRINSICS \
- COMPILER_SUPPORTS_AVX2_TARGET
-# define COMPILER_SUPPORTS_AVX512BW_TARGET_INTRINSICS \
- COMPILER_SUPPORTS_AVX512BW_TARGET
-# endif
-
-# elif defined(__arm__) || defined(__aarch64__)
-
- /*
- * Determine whether NEON and crypto intrinsics are supported.
- *
- * With gcc prior to 6.1, (r230411 for arm32, r226563 for arm64), neither
- * was available unless enabled in the main target.
- *
- * But even after that, to include <arm_neon.h> (which contains both the
- * basic NEON intrinsics and the crypto intrinsics) the main target still
- * needs to have:
- * - gcc: hardware floating point support
- * - clang: NEON support (but not necessarily crypto support)
- */
-# if (GCC_PREREQ(6, 1) && defined(__ARM_FP)) || \
- (defined(__clang__) && defined(__ARM_NEON))
-# define COMPILER_SUPPORTS_NEON_TARGET_INTRINSICS 1
- /*
- * The crypto intrinsics are broken on arm32 with clang, even when using
- * -mfpu=crypto-neon-fp-armv8, because clang's <arm_neon.h> puts them
- * behind __aarch64__. Undefine __ARM_FEATURE_CRYPTO in that case...
- */
-# if defined(__clang__) && defined(__arm__)
-# undef __ARM_FEATURE_CRYPTO
-# elif __has_builtin(__builtin_neon_vmull_p64) || !defined(__clang__)
-# define COMPILER_SUPPORTS_PMULL_TARGET_INTRINSICS 1
-# endif
-# endif
-
- /*
- * Determine whether CRC32 intrinsics are supported.
- *
- * With gcc r274827 or later (gcc 10.1+, 9.3+, or 8.4+), or with clang,
- * they work as expected. (Well, not quite. There's still a bug, but we
- * have to work around it later when including arm_acle.h.)
- */
-# if GCC_PREREQ(10, 1) || \
- (GCC_PREREQ(9, 3) && !GCC_PREREQ(10, 0)) || \
- (GCC_PREREQ(8, 4) && !GCC_PREREQ(9, 0)) || \
- (defined(__clang__) && __has_builtin(__builtin_arm_crc32b))
-# define COMPILER_SUPPORTS_CRC32_TARGET_INTRINSICS 1
-# endif
-
-# endif /* __arm__ || __aarch64__ */
-
-#endif /* COMPILER_SUPPORTS_TARGET_FUNCTION_ATTRIBUTE */
-
-/*
- * Prior to gcc 5.1 and clang 3.9, emmintrin.h only defined vectors of signed
- * integers (e.g. __v4si), not vectors of unsigned integers (e.g. __v4su). But
- * we need the unsigned ones in order to avoid signed integer overflow, which is
- * undefined behavior. Add the missing definitions for the unsigned ones if
- * needed.
- */
-#if (GCC_PREREQ(4, 0) && !GCC_PREREQ(5, 1)) || \
- (defined(__clang__) && !CLANG_PREREQ(3, 9, 8020000)) || \
- defined(__INTEL_COMPILER)
-typedef unsigned long long __v2du __attribute__((__vector_size__(16)));
-typedef unsigned int __v4su __attribute__((__vector_size__(16)));
-typedef unsigned short __v8hu __attribute__((__vector_size__(16)));
-typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
-typedef unsigned long long __v4du __attribute__((__vector_size__(32)));
-typedef unsigned int __v8su __attribute__((__vector_size__(32)));
-typedef unsigned short __v16hu __attribute__((__vector_size__(32)));
-typedef unsigned char __v32qu __attribute__((__vector_size__(32)));
-#endif
-
-#ifdef __INTEL_COMPILER
-typedef int __v16si __attribute__((__vector_size__(64)));
-typedef short __v32hi __attribute__((__vector_size__(64)));
-typedef char __v64qi __attribute__((__vector_size__(64)));
-#endif
-
-/* Newer gcc supports __BYTE_ORDER__. Older gcc doesn't. */
-#ifdef __BYTE_ORDER__
-# define CPU_IS_LITTLE_ENDIAN() (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
-#endif
-
-#if GCC_PREREQ(4, 8) || __has_builtin(__builtin_bswap16)
-# define bswap16 __builtin_bswap16
-#endif
-
-#if GCC_PREREQ(4, 3) || __has_builtin(__builtin_bswap32)
-# define bswap32 __builtin_bswap32
-#endif
-
-#if GCC_PREREQ(4, 3) || __has_builtin(__builtin_bswap64)
-# define bswap64 __builtin_bswap64
-#endif
-
-#if defined(__x86_64__) || defined(__i386__) || \
- defined(__ARM_FEATURE_UNALIGNED) || defined(__powerpc64__) || \
- /*
- * For all compilation purposes, WebAssembly behaves like any other CPU
- * instruction set. Even though WebAssembly engine might be running on top
- * of different actual CPU architectures, the WebAssembly spec itself
- * permits unaligned access and it will be fast on most of those platforms,
- * and simulated at the engine level on others, so it's worth treating it
- * as a CPU architecture with fast unaligned access.
- */ defined(__wasm__)
-# define UNALIGNED_ACCESS_IS_FAST 1
-#endif
-
-#define bsr32(n) (31 - __builtin_clz(n))
-#define bsr64(n) (63 - __builtin_clzll(n))
-#define bsf32(n) __builtin_ctz(n)
-#define bsf64(n) __builtin_ctzll(n)
diff --git a/util/compress/libdeflate/common/compiler_msc.h b/util/compress/libdeflate/common/compiler_msc.h
deleted file mode 100644
index 18cfa128f..000000000
--- a/util/compress/libdeflate/common/compiler_msc.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * compiler_msc.h - definitions for the Microsoft C Compiler
- */
-
-#include <stdint.h>
-#include <stdlib.h> /* for _byteswap_*() */
-
-#define LIBEXPORT __declspec(dllexport)
-
-/*
- * Old versions (e.g. VS2010) of MSC don't have the C99 header stdbool.h.
- * Beware: the below replacement isn't fully standard, since normally any value
- * != 0 should be implicitly cast to a bool with value 1... but that doesn't
- * happen if bool is really just an 'int'.
- */
-typedef int bool;
-#define true 1
-#define false 0
-#define __bool_true_false_are_defined 1
-
-/* Define ssize_t */
-#ifdef _WIN64
-typedef long long ssize_t;
-#else
-typedef int ssize_t;
-#endif
-
-/* Assume a little endian architecture with fast unaligned access */
-#define CPU_IS_LITTLE_ENDIAN() 1
-#define UNALIGNED_ACCESS_IS_FAST 1
-
-/* __restrict has nonstandard behavior; don't use it */
-#define restrict
-
-/* ... but we can use __inline and __forceinline */
-#define inline __inline
-#define forceinline __forceinline
-
-/* Byte swap functions */
-#define bswap16 _byteswap_ushort
-#define bswap32 _byteswap_ulong
-#define bswap64 _byteswap_uint64
-
-/* Bit scan functions (32-bit) */
-
-static forceinline unsigned
-bsr32(uint32_t n)
-{
- _BitScanReverse(&n, n);
- return n;
-}
-#define bsr32 bsr32
-
-static forceinline unsigned
-bsf32(uint32_t n)
-{
- _BitScanForward(&n, n);
- return n;
-}
-#define bsf32 bsf32
-
-#ifdef _M_X64 /* Bit scan functions (64-bit) */
-
-static forceinline unsigned
-bsr64(uint64_t n)
-{
- _BitScanReverse64(&n, n);
- return n;
-}
-#define bsr64 bsr64
-
-static forceinline unsigned
-bsf64(uint64_t n)
-{
- _BitScanForward64(&n, n);
- return n;
-}
-#define bsf64 bsf64
-
-#endif /* _M_X64 */
diff --git a/util/compress/libdeflate/lib/adler32.c b/util/compress/libdeflate/lib/adler32.c
deleted file mode 100644
index 32ab0ceb5..000000000
--- a/util/compress/libdeflate/lib/adler32.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * adler32.c - Adler-32 checksum algorithm
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "lib_common.h"
-#include "libdeflate.h"
-
-/* The Adler-32 divisor, or "base", value. */
-#define DIVISOR 65521
-
-/*
- * MAX_CHUNK_SIZE is the most bytes that can be processed without the
- * possibility of s2 overflowing when it is represented as an unsigned 32-bit
- * integer. This value was computed using the following Python script:
- *
- * divisor = 65521
- * count = 0
- * s1 = divisor - 1
- * s2 = divisor - 1
- * while True:
- * s1 += 0xFF
- * s2 += s1
- * if s2 > 0xFFFFFFFF:
- * break
- * count += 1
- * print(count)
- *
- * Note that to get the correct worst-case value, we must assume that every byte
- * has value 0xFF and that s1 and s2 started with the highest possible values
- * modulo the divisor.
- */
-#define MAX_CHUNK_SIZE 5552
-
-typedef u32 (*adler32_func_t)(u32, const u8 *, size_t);
-
-/* Include architecture-specific implementations if available */
-#undef DEFAULT_IMPL
-#undef DISPATCH
-#if defined(__arm__) || defined(__aarch64__)
-# include "arm/adler32_impl.h"
-#elif defined(__i386__) || defined(__x86_64__)
-# include "x86/adler32_impl.h"
-#endif
-
-/* Define a generic implementation if needed */
-#ifndef DEFAULT_IMPL
-#define DEFAULT_IMPL adler32_generic
-static u32 adler32_generic(u32 adler, const u8 *p, size_t size)
-{
- u32 s1 = adler & 0xFFFF;
- u32 s2 = adler >> 16;
- const u8 * const end = p + size;
-
- while (p != end) {
- size_t chunk_size = MIN(end - p, MAX_CHUNK_SIZE);
- const u8 *chunk_end = p + chunk_size;
- size_t num_unrolled_iterations = chunk_size / 4;
-
- while (num_unrolled_iterations--) {
- s1 += *p++;
- s2 += s1;
- s1 += *p++;
- s2 += s1;
- s1 += *p++;
- s2 += s1;
- s1 += *p++;
- s2 += s1;
- }
- while (p != chunk_end) {
- s1 += *p++;
- s2 += s1;
- }
- s1 %= DIVISOR;
- s2 %= DIVISOR;
- }
-
- return (s2 << 16) | s1;
-}
-#endif /* !DEFAULT_IMPL */
-
-#ifdef DISPATCH
-static u32 dispatch(u32, const u8 *, size_t);
-
-static volatile adler32_func_t adler32_impl = dispatch;
-
-/* Choose the fastest implementation at runtime */
-static u32 dispatch(u32 adler, const u8 *buffer, size_t size)
-{
- adler32_func_t f = arch_select_adler32_func();
-
- if (f == NULL)
- f = DEFAULT_IMPL;
-
- adler32_impl = f;
- return adler32_impl(adler, buffer, size);
-}
-#else
-# define adler32_impl DEFAULT_IMPL /* only one implementation, use it */
-#endif
-
-LIBDEFLATEEXPORT u32 LIBDEFLATEAPI
-libdeflate_adler32(u32 adler, const void *buffer, size_t size)
-{
- if (buffer == NULL) /* return initial value */
- return 1;
- return adler32_impl(adler, buffer, size);
-}
diff --git a/util/compress/libdeflate/lib/adler32_vec_template.h b/util/compress/libdeflate/lib/adler32_vec_template.h
deleted file mode 100644
index 4eb8c2a82..000000000
--- a/util/compress/libdeflate/lib/adler32_vec_template.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * adler32_vec_template.h - template for vectorized Adler-32 implementations
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * This file contains a template for vectorized Adler-32 implementations.
- *
- * The inner loop between reductions modulo 65521 of an unvectorized Adler-32
- * implementation looks something like this:
- *
- * do {
- * s1 += *p;
- * s2 += s1;
- * } while (++p != chunk_end);
- *
- * For vectorized calculation of s1, we only need to sum the input bytes. They
- * can be accumulated into multiple counters which are eventually summed
- * together.
- *
- * For vectorized calculation of s2, the basic idea is that for each iteration
- * that processes N bytes, we can perform the following vectorizable
- * calculation:
- *
- * s2 += N*byte_1 + (N-1)*byte_2 + (N-2)*byte_3 + ... + 1*byte_N
- *
- * Or, equivalently, we can sum the byte_1...byte_N for each iteration into N
- * separate counters, then do the multiplications by N...1 just once at the end
- * rather than once per iteration.
- *
- * Also, we must account for how previous bytes will affect s2 by doing the
- * following at beginning of each iteration:
- *
- * s2 += s1 * N
- *
- * Furthermore, like s1, "s2" can actually be multiple counters which are
- * eventually summed together.
- */
-
-static u32 ATTRIBUTES
-FUNCNAME(u32 adler, const u8 *p, size_t size)
-{
- u32 s1 = adler & 0xFFFF;
- u32 s2 = adler >> 16;
- const u8 * const end = p + size;
- const u8 *vend;
- const size_t max_chunk_size =
- MIN(MAX_CHUNK_SIZE, IMPL_MAX_CHUNK_SIZE) -
- (MIN(MAX_CHUNK_SIZE, IMPL_MAX_CHUNK_SIZE) %
- IMPL_SEGMENT_SIZE);
-
- /* Process a byte at a time until the needed alignment is reached */
- if (p != end && (uintptr_t)p % IMPL_ALIGNMENT) {
- do {
- s1 += *p++;
- s2 += s1;
- } while (p != end && (uintptr_t)p % IMPL_ALIGNMENT);
- s1 %= DIVISOR;
- s2 %= DIVISOR;
- }
-
- /*
- * Process "chunks" of bytes using vector instructions. Chunk sizes are
- * limited to MAX_CHUNK_SIZE, which guarantees that s1 and s2 never
- * overflow before being reduced modulo DIVISOR. For vector processing,
- * chunk sizes are also made evenly divisible by IMPL_SEGMENT_SIZE and
- * may be further limited to IMPL_MAX_CHUNK_SIZE.
- */
- STATIC_ASSERT(IMPL_SEGMENT_SIZE % IMPL_ALIGNMENT == 0);
- vend = end - ((size_t)(end - p) % IMPL_SEGMENT_SIZE);
- while (p != vend) {
- size_t chunk_size = MIN((size_t)(vend - p), max_chunk_size);
-
- s2 += s1 * chunk_size;
-
- FUNCNAME_CHUNK((const void *)p, (const void *)(p + chunk_size),
- &s1, &s2);
-
- p += chunk_size;
- s1 %= DIVISOR;
- s2 %= DIVISOR;
- }
-
- /* Process any remaining bytes */
- if (p != end) {
- do {
- s1 += *p++;
- s2 += s1;
- } while (p != end);
- s1 %= DIVISOR;
- s2 %= DIVISOR;
- }
-
- return (s2 << 16) | s1;
-}
-
-#undef FUNCNAME
-#undef FUNCNAME_CHUNK
-#undef ATTRIBUTES
-#undef IMPL_ALIGNMENT
-#undef IMPL_SEGMENT_SIZE
-#undef IMPL_MAX_CHUNK_SIZE
diff --git a/util/compress/libdeflate/lib/arm/adler32_impl.h b/util/compress/libdeflate/lib/arm/adler32_impl.h
deleted file mode 100644
index 17e56c004..000000000
--- a/util/compress/libdeflate/lib/arm/adler32_impl.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * arm/adler32_impl.h - ARM implementations of Adler-32 checksum algorithm
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef LIB_ARM_ADLER32_IMPL_H
-#define LIB_ARM_ADLER32_IMPL_H
-
-#include "cpu_features.h"
-
-/* NEON implementation */
-#undef DISPATCH_NEON
-#if !defined(DEFAULT_IMPL) && \
- (defined(__ARM_NEON) || (ARM_CPU_FEATURES_ENABLED && \
- COMPILER_SUPPORTS_NEON_TARGET_INTRINSICS))
-# define FUNCNAME adler32_neon
-# define FUNCNAME_CHUNK adler32_neon_chunk
-# define IMPL_ALIGNMENT 16
-# define IMPL_SEGMENT_SIZE 32
-/* Prevent unsigned overflow of the 16-bit precision byte counters */
-# define IMPL_MAX_CHUNK_SIZE (32 * (0xFFFF / 0xFF))
-# ifdef __ARM_NEON
-# define ATTRIBUTES
-# define DEFAULT_IMPL adler32_neon
-# else
-# ifdef __arm__
-# define ATTRIBUTES __attribute__((target("fpu=neon")))
-# else
-# define ATTRIBUTES __attribute__((target("+simd")))
-# endif
-# define DISPATCH 1
-# define DISPATCH_NEON 1
-# endif
-# include <arm_neon.h>
-static forceinline ATTRIBUTES void
-adler32_neon_chunk(const uint8x16_t *p, const uint8x16_t * const end,
- u32 *s1, u32 *s2)
-{
- uint32x4_t v_s1 = (uint32x4_t) { 0, 0, 0, 0 };
- uint32x4_t v_s2 = (uint32x4_t) { 0, 0, 0, 0 };
- uint16x8_t v_byte_sums_a = (uint16x8_t) { 0, 0, 0, 0, 0, 0, 0, 0 };
- uint16x8_t v_byte_sums_b = (uint16x8_t) { 0, 0, 0, 0, 0, 0, 0, 0 };
- uint16x8_t v_byte_sums_c = (uint16x8_t) { 0, 0, 0, 0, 0, 0, 0, 0 };
- uint16x8_t v_byte_sums_d = (uint16x8_t) { 0, 0, 0, 0, 0, 0, 0, 0 };
-
- do {
- const uint8x16_t bytes1 = *p++;
- const uint8x16_t bytes2 = *p++;
- uint16x8_t tmp;
-
- v_s2 += v_s1;
-
- /* Vector Pairwise Add Long (u8 => u16) */
- tmp = vpaddlq_u8(bytes1);
-
- /* Vector Pairwise Add and Accumulate Long (u8 => u16) */
- tmp = vpadalq_u8(tmp, bytes2);
-
- /* Vector Pairwise Add and Accumulate Long (u16 => u32) */
- v_s1 = vpadalq_u16(v_s1, tmp);
-
- /* Vector Add Wide (u8 => u16) */
- v_byte_sums_a = vaddw_u8(v_byte_sums_a, vget_low_u8(bytes1));
- v_byte_sums_b = vaddw_u8(v_byte_sums_b, vget_high_u8(bytes1));
- v_byte_sums_c = vaddw_u8(v_byte_sums_c, vget_low_u8(bytes2));
- v_byte_sums_d = vaddw_u8(v_byte_sums_d, vget_high_u8(bytes2));
-
- } while (p != end);
-
- /* Vector Shift Left (u32) */
- v_s2 = vqshlq_n_u32(v_s2, 5);
-
- /* Vector Multiply Accumulate Long (u16 => u32) */
- v_s2 = vmlal_u16(v_s2, vget_low_u16(v_byte_sums_a), (uint16x4_t) { 32, 31, 30, 29 });
- v_s2 = vmlal_u16(v_s2, vget_high_u16(v_byte_sums_a), (uint16x4_t) { 28, 27, 26, 25 });
- v_s2 = vmlal_u16(v_s2, vget_low_u16(v_byte_sums_b), (uint16x4_t) { 24, 23, 22, 21 });
- v_s2 = vmlal_u16(v_s2, vget_high_u16(v_byte_sums_b), (uint16x4_t) { 20, 19, 18, 17 });
- v_s2 = vmlal_u16(v_s2, vget_low_u16(v_byte_sums_c), (uint16x4_t) { 16, 15, 14, 13 });
- v_s2 = vmlal_u16(v_s2, vget_high_u16(v_byte_sums_c), (uint16x4_t) { 12, 11, 10, 9 });
- v_s2 = vmlal_u16(v_s2, vget_low_u16 (v_byte_sums_d), (uint16x4_t) { 8, 7, 6, 5 });
- v_s2 = vmlal_u16(v_s2, vget_high_u16(v_byte_sums_d), (uint16x4_t) { 4, 3, 2, 1 });
-
- *s1 += v_s1[0] + v_s1[1] + v_s1[2] + v_s1[3];
- *s2 += v_s2[0] + v_s2[1] + v_s2[2] + v_s2[3];
-}
-# include "../adler32_vec_template.h"
-#endif /* NEON implementation */
-
-#ifdef DISPATCH
-static inline adler32_func_t
-arch_select_adler32_func(void)
-{
- u32 features = get_cpu_features();
-
-#ifdef DISPATCH_NEON
- if (features & ARM_CPU_FEATURE_NEON)
- return adler32_neon;
-#endif
- return NULL;
-}
-#endif /* DISPATCH */
-
-#endif /* LIB_ARM_ADLER32_IMPL_H */
diff --git a/util/compress/libdeflate/lib/arm/cpu_features.c b/util/compress/libdeflate/lib/arm/cpu_features.c
deleted file mode 100644
index 60b1be3ee..000000000
--- a/util/compress/libdeflate/lib/arm/cpu_features.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * arm/cpu_features.c - feature detection for ARM processors
- *
- * Copyright 2018 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * ARM processors don't have a standard way for unprivileged programs to detect
- * processor features. But, on Linux we can read the AT_HWCAP and AT_HWCAP2
- * values from /proc/self/auxv.
- *
- * Ideally we'd use the C library function getauxval(), but it's not guaranteed
- * to be available: it was only added to glibc in 2.16, and in Android it was
- * added to API level 18 for ARM and level 21 for AArch64.
- */
-
-#include "../cpu_features_common.h" /* must be included first */
-#include "cpu_features.h"
-
-#if ARM_CPU_FEATURES_ENABLED
-
-#include <errno.h>
-#include <fcntl.h>
-#include <string.h>
-#include <unistd.h>
-
-#define AT_HWCAP 16
-#define AT_HWCAP2 26
-
-volatile u32 _cpu_features = 0;
-
-static void scan_auxv(unsigned long *hwcap, unsigned long *hwcap2)
-{
- int fd;
- unsigned long auxbuf[32];
- int filled = 0;
- int i;
-
- fd = open("/proc/self/auxv", O_RDONLY);
- if (fd < 0)
- return;
-
- for (;;) {
- do {
- int ret = read(fd, &((char *)auxbuf)[filled],
- sizeof(auxbuf) - filled);
- if (ret <= 0) {
- if (ret < 0 && errno == EINTR)
- continue;
- goto out;
- }
- filled += ret;
- } while (filled < 2 * sizeof(long));
-
- i = 0;
- do {
- unsigned long type = auxbuf[i];
- unsigned long value = auxbuf[i + 1];
-
- if (type == AT_HWCAP)
- *hwcap = value;
- else if (type == AT_HWCAP2)
- *hwcap2 = value;
- i += 2;
- filled -= 2 * sizeof(long);
- } while (filled >= 2 * sizeof(long));
-
- memmove(auxbuf, &auxbuf[i], filled);
- }
-out:
- close(fd);
-}
-
-static const struct cpu_feature arm_cpu_feature_table[] = {
- {ARM_CPU_FEATURE_NEON, "neon"},
- {ARM_CPU_FEATURE_PMULL, "pmull"},
- {ARM_CPU_FEATURE_CRC32, "crc32"},
-};
-
-void setup_cpu_features(void)
-{
- u32 features = 0;
- unsigned long hwcap = 0;
- unsigned long hwcap2 = 0;
-
- scan_auxv(&hwcap, &hwcap2);
-
-#ifdef __arm__
- STATIC_ASSERT(sizeof(long) == 4);
- if (hwcap & (1 << 12)) /* HWCAP_NEON */
- features |= ARM_CPU_FEATURE_NEON;
- if (hwcap2 & (1 << 1)) /* HWCAP2_PMULL */
- features |= ARM_CPU_FEATURE_PMULL;
- if (hwcap2 & (1 << 4)) /* HWCAP2_CRC32 */
- features |= ARM_CPU_FEATURE_CRC32;
-#else
- STATIC_ASSERT(sizeof(long) == 8);
- if (hwcap & (1 << 1)) /* HWCAP_ASIMD */
- features |= ARM_CPU_FEATURE_NEON;
- if (hwcap & (1 << 4)) /* HWCAP_PMULL */
- features |= ARM_CPU_FEATURE_PMULL;
- if (hwcap & (1 << 7)) /* HWCAP_CRC32 */
- features |= ARM_CPU_FEATURE_CRC32;
-#endif
-
- disable_cpu_features_for_testing(&features, arm_cpu_feature_table,
- ARRAY_LEN(arm_cpu_feature_table));
-
- _cpu_features = features | ARM_CPU_FEATURES_KNOWN;
-}
-
-#endif /* ARM_CPU_FEATURES_ENABLED */
diff --git a/util/compress/libdeflate/lib/arm/cpu_features.h b/util/compress/libdeflate/lib/arm/cpu_features.h
deleted file mode 100644
index 69d723598..000000000
--- a/util/compress/libdeflate/lib/arm/cpu_features.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * arm/cpu_features.h - feature detection for ARM processors
- */
-
-#ifndef LIB_ARM_CPU_FEATURES_H
-#define LIB_ARM_CPU_FEATURES_H
-
-#include "../lib_common.h"
-
-#if (defined(__arm__) || defined(__aarch64__)) && \
- defined(__linux__) && \
- COMPILER_SUPPORTS_TARGET_FUNCTION_ATTRIBUTE && \
- !defined(FREESTANDING)
-# define ARM_CPU_FEATURES_ENABLED 1
-#else
-# define ARM_CPU_FEATURES_ENABLED 0
-#endif
-
-#if ARM_CPU_FEATURES_ENABLED
-
-#define ARM_CPU_FEATURE_NEON 0x00000001
-#define ARM_CPU_FEATURE_PMULL 0x00000002
-#define ARM_CPU_FEATURE_CRC32 0x00000004
-
-#define ARM_CPU_FEATURES_KNOWN 0x80000000
-
-extern volatile u32 _cpu_features;
-
-void setup_cpu_features(void);
-
-static inline u32 get_cpu_features(void)
-{
- if (_cpu_features == 0)
- setup_cpu_features();
- return _cpu_features;
-}
-
-#endif /* ARM_CPU_FEATURES_ENABLED */
-
-#endif /* LIB_ARM_CPU_FEATURES_H */
diff --git a/util/compress/libdeflate/lib/arm/crc32_impl.h b/util/compress/libdeflate/lib/arm/crc32_impl.h
deleted file mode 100644
index 238a85a80..000000000
--- a/util/compress/libdeflate/lib/arm/crc32_impl.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * arm/crc32_impl.h
- *
- * Copyright 2017 Jun He <jun.he@linaro.org>
- * Copyright 2018 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef LIB_ARM_CRC32_IMPL_H
-#define LIB_ARM_CRC32_IMPL_H
-
-#include "cpu_features.h"
-
-/* Implementation using ARM CRC32 instructions */
-#undef DISPATCH_ARM
-#if !defined(DEFAULT_IMPL) && \
- (defined(__ARM_FEATURE_CRC32) || \
- (ARM_CPU_FEATURES_ENABLED && COMPILER_SUPPORTS_CRC32_TARGET_INTRINSICS))
-# ifdef __ARM_FEATURE_CRC32
-# define ATTRIBUTES
-# define DEFAULT_IMPL crc32_arm
-# else
-# ifdef __arm__
-# ifdef __clang__
-# define ATTRIBUTES __attribute__((target("armv8-a,crc")))
-# else
-# define ATTRIBUTES __attribute__((target("arch=armv8-a+crc")))
-# endif
-# else
-# ifdef __clang__
-# define ATTRIBUTES __attribute__((target("crc")))
-# else
-# define ATTRIBUTES __attribute__((target("+crc")))
-# endif
-# endif
-# define DISPATCH 1
-# define DISPATCH_ARM 1
-# endif
-
-/*
- * gcc's (as of 10.1) version of arm_acle.h for arm32, and clang's (as of
- * 10.0.1) version of arm_acle.h for both arm32 and arm64, have a bug where they
- * only define the CRC32 functions like __crc32b() when __ARM_FEATURE_CRC32 is
- * defined. That prevents them from being used via __attribute__((target)) when
- * the main target doesn't have CRC32 support enabled. The actual built-ins
- * like __builtin_arm_crc32b() are available and work, however; it's just the
- * wrappers in arm_acle.h like __crc32b() that erroneously don't get defined.
- * Work around this by manually defining __ARM_FEATURE_CRC32.
- */
-#ifndef __ARM_FEATURE_CRC32
-# define __ARM_FEATURE_CRC32 1
-#endif
-#include <arm_acle.h>
-
-static u32 ATTRIBUTES
-crc32_arm(u32 remainder, const u8 *p, size_t size)
-{
- while (size != 0 && (uintptr_t)p & 7) {
- remainder = __crc32b(remainder, *p++);
- size--;
- }
-
- while (size >= 32) {
- remainder = __crc32d(remainder, le64_bswap(*((u64 *)p + 0)));
- remainder = __crc32d(remainder, le64_bswap(*((u64 *)p + 1)));
- remainder = __crc32d(remainder, le64_bswap(*((u64 *)p + 2)));
- remainder = __crc32d(remainder, le64_bswap(*((u64 *)p + 3)));
- p += 32;
- size -= 32;
- }
-
- while (size >= 8) {
- remainder = __crc32d(remainder, le64_bswap(*(u64 *)p));
- p += 8;
- size -= 8;
- }
-
- while (size != 0) {
- remainder = __crc32b(remainder, *p++);
- size--;
- }
-
- return remainder;
-}
-#undef ATTRIBUTES
-#endif /* Implementation using ARM CRC32 instructions */
-
-/*
- * CRC-32 folding with ARM Crypto extension-PMULL
- *
- * This works the same way as the x86 PCLMUL version.
- * See x86/crc32_pclmul_template.h for an explanation.
- */
-#undef DISPATCH_PMULL
-#if !defined(DEFAULT_IMPL) && \
- (defined(__ARM_FEATURE_CRYPTO) || \
- (ARM_CPU_FEATURES_ENABLED && \
- COMPILER_SUPPORTS_PMULL_TARGET_INTRINSICS)) && \
- /* not yet tested on big endian, probably needs changes to work there */ \
- (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
-# define FUNCNAME crc32_pmull
-# define FUNCNAME_ALIGNED crc32_pmull_aligned
-# ifdef __ARM_FEATURE_CRYPTO
-# define ATTRIBUTES
-# define DEFAULT_IMPL crc32_pmull
-# else
-# ifdef __arm__
-# define ATTRIBUTES __attribute__((target("fpu=crypto-neon-fp-armv8")))
-# else
-# ifdef __clang__
-# define ATTRIBUTES __attribute__((target("crypto")))
-# else
-# define ATTRIBUTES __attribute__((target("+crypto")))
-# endif
-# endif
-# define DISPATCH 1
-# define DISPATCH_PMULL 1
-# endif
-
-#include <arm_neon.h>
-
-static forceinline ATTRIBUTES uint8x16_t
-clmul_00(uint8x16_t a, uint8x16_t b)
-{
- return (uint8x16_t)vmull_p64((poly64_t)vget_low_u8(a),
- (poly64_t)vget_low_u8(b));
-}
-
-static forceinline ATTRIBUTES uint8x16_t
-clmul_10(uint8x16_t a, uint8x16_t b)
-{
- return (uint8x16_t)vmull_p64((poly64_t)vget_low_u8(a),
- (poly64_t)vget_high_u8(b));
-}
-
-static forceinline ATTRIBUTES uint8x16_t
-clmul_11(uint8x16_t a, uint8x16_t b)
-{
- return (uint8x16_t)vmull_high_p64((poly64x2_t)a, (poly64x2_t)b);
-}
-
-static forceinline ATTRIBUTES uint8x16_t
-fold_128b(uint8x16_t dst, uint8x16_t src, uint8x16_t multipliers)
-{
- return dst ^ clmul_00(src, multipliers) ^ clmul_11(src, multipliers);
-}
-
-static forceinline ATTRIBUTES u32
-crc32_pmull_aligned(u32 remainder, const uint8x16_t *p, size_t nr_segs)
-{
- /* Constants precomputed by gen_crc32_multipliers.c. Do not edit! */
- const uint8x16_t multipliers_4 =
- (uint8x16_t)(uint64x2_t){ 0x8F352D95, 0x1D9513D7 };
- const uint8x16_t multipliers_1 =
- (uint8x16_t)(uint64x2_t){ 0xAE689191, 0xCCAA009E };
- const uint8x16_t final_multiplier =
- (uint8x16_t)(uint64x2_t){ 0xB8BC6765 };
- const uint8x16_t mask32 = (uint8x16_t)(uint32x4_t){ 0xFFFFFFFF };
- const uint8x16_t barrett_reduction_constants =
- (uint8x16_t)(uint64x2_t){ 0x00000001F7011641,
- 0x00000001DB710641 };
- const uint8x16_t zeroes = (uint8x16_t){ 0 };
-
- const uint8x16_t * const end = p + nr_segs;
- const uint8x16_t * const end512 = p + (nr_segs & ~3);
- uint8x16_t x0, x1, x2, x3;
-
- x0 = *p++ ^ (uint8x16_t)(uint32x4_t){ remainder };
- if (nr_segs >= 4) {
- x1 = *p++;
- x2 = *p++;
- x3 = *p++;
-
- /* Fold 512 bits at a time */
- while (p != end512) {
- x0 = fold_128b(*p++, x0, multipliers_4);
- x1 = fold_128b(*p++, x1, multipliers_4);
- x2 = fold_128b(*p++, x2, multipliers_4);
- x3 = fold_128b(*p++, x3, multipliers_4);
- }
-
- /* Fold 512 bits => 128 bits */
- x1 = fold_128b(x1, x0, multipliers_1);
- x2 = fold_128b(x2, x1, multipliers_1);
- x0 = fold_128b(x3, x2, multipliers_1);
- }
-
- /* Fold 128 bits at a time */
- while (p != end)
- x0 = fold_128b(*p++, x0, multipliers_1);
-
- /* Fold 128 => 96 bits, implicitly appending 32 zeroes */
- x0 = vextq_u8(x0, zeroes, 8) ^ clmul_10(x0, multipliers_1);
-
- /* Fold 96 => 64 bits */
- x0 = vextq_u8(x0, zeroes, 4) ^ clmul_00(x0 & mask32, final_multiplier);
-
- /* Reduce 64 => 32 bits using Barrett reduction */
- x1 = x0;
- x0 = clmul_00(x0 & mask32, barrett_reduction_constants);
- x0 = clmul_10(x0 & mask32, barrett_reduction_constants);
- return vgetq_lane_u32((uint32x4_t)(x0 ^ x1), 1);
-}
-#define IMPL_ALIGNMENT 16
-#define IMPL_SEGMENT_SIZE 16
-#include "../crc32_vec_template.h"
-#endif /* PMULL implementation */
-
-#ifdef DISPATCH
-static inline crc32_func_t
-arch_select_crc32_func(void)
-{
- u32 features = get_cpu_features();
-
-#ifdef DISPATCH_ARM
- if (features & ARM_CPU_FEATURE_CRC32)
- return crc32_arm;
-#endif
-#ifdef DISPATCH_PMULL
- if (features & ARM_CPU_FEATURE_PMULL)
- return crc32_pmull;
-#endif
- return NULL;
-}
-#endif /* DISPATCH */
-
-#endif /* LIB_ARM_CRC32_IMPL_H */
diff --git a/util/compress/libdeflate/lib/arm/matchfinder_impl.h b/util/compress/libdeflate/lib/arm/matchfinder_impl.h
deleted file mode 100644
index da0d2fd79..000000000
--- a/util/compress/libdeflate/lib/arm/matchfinder_impl.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * arm/matchfinder_impl.h - ARM implementations of matchfinder functions
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef LIB_ARM_MATCHFINDER_IMPL_H
-#define LIB_ARM_MATCHFINDER_IMPL_H
-
-#ifdef __ARM_NEON
-# include <arm_neon.h>
-static forceinline void
-matchfinder_init_neon(mf_pos_t *data, size_t size)
-{
- int16x8_t *p = (int16x8_t *)data;
- int16x8_t v = (int16x8_t) {
- MATCHFINDER_INITVAL, MATCHFINDER_INITVAL, MATCHFINDER_INITVAL,
- MATCHFINDER_INITVAL, MATCHFINDER_INITVAL, MATCHFINDER_INITVAL,
- MATCHFINDER_INITVAL, MATCHFINDER_INITVAL,
- };
-
- STATIC_ASSERT(MATCHFINDER_MEM_ALIGNMENT % sizeof(*p) == 0);
- STATIC_ASSERT(MATCHFINDER_SIZE_ALIGNMENT % (4 * sizeof(*p)) == 0);
- STATIC_ASSERT(sizeof(mf_pos_t) == 2);
-
- do {
- p[0] = v;
- p[1] = v;
- p[2] = v;
- p[3] = v;
- p += 4;
- size -= 4 * sizeof(*p);
- } while (size != 0);
-}
-#define matchfinder_init matchfinder_init_neon
-
-static forceinline void
-matchfinder_rebase_neon(mf_pos_t *data, size_t size)
-{
- int16x8_t *p = (int16x8_t *)data;
- int16x8_t v = (int16x8_t) {
- (u16)-MATCHFINDER_WINDOW_SIZE, (u16)-MATCHFINDER_WINDOW_SIZE,
- (u16)-MATCHFINDER_WINDOW_SIZE, (u16)-MATCHFINDER_WINDOW_SIZE,
- (u16)-MATCHFINDER_WINDOW_SIZE, (u16)-MATCHFINDER_WINDOW_SIZE,
- (u16)-MATCHFINDER_WINDOW_SIZE, (u16)-MATCHFINDER_WINDOW_SIZE,
- };
-
- STATIC_ASSERT(MATCHFINDER_MEM_ALIGNMENT % sizeof(*p) == 0);
- STATIC_ASSERT(MATCHFINDER_SIZE_ALIGNMENT % (4 * sizeof(*p)) == 0);
- STATIC_ASSERT(sizeof(mf_pos_t) == 2);
-
- do {
- p[0] = vqaddq_s16(p[0], v);
- p[1] = vqaddq_s16(p[1], v);
- p[2] = vqaddq_s16(p[2], v);
- p[3] = vqaddq_s16(p[3], v);
- p += 4;
- size -= 4 * sizeof(*p);
- } while (size != 0);
-}
-#define matchfinder_rebase matchfinder_rebase_neon
-
-#endif /* __ARM_NEON */
-
-#endif /* LIB_ARM_MATCHFINDER_IMPL_H */
diff --git a/util/compress/libdeflate/lib/bt_matchfinder.h b/util/compress/libdeflate/lib/bt_matchfinder.h
deleted file mode 100644
index a994a571c..000000000
--- a/util/compress/libdeflate/lib/bt_matchfinder.h
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * bt_matchfinder.h - Lempel-Ziv matchfinding with a hash table of binary trees
- *
- * Originally public domain; changes after 2016-09-07 are copyrighted.
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * ----------------------------------------------------------------------------
- *
- * This is a Binary Trees (bt) based matchfinder.
- *
- * The main data structure is a hash table where each hash bucket contains a
- * binary tree of sequences whose first 4 bytes share the same hash code. Each
- * sequence is identified by its starting position in the input buffer. Each
- * binary tree is always sorted such that each left child represents a sequence
- * lexicographically lesser than its parent and each right child represents a
- * sequence lexicographically greater than its parent.
- *
- * The algorithm processes the input buffer sequentially. At each byte
- * position, the hash code of the first 4 bytes of the sequence beginning at
- * that position (the sequence being matched against) is computed. This
- * identifies the hash bucket to use for that position. Then, a new binary tree
- * node is created to represent the current sequence. Then, in a single tree
- * traversal, the hash bucket's binary tree is searched for matches and is
- * re-rooted at the new node.
- *
- * Compared to the simpler algorithm that uses linked lists instead of binary
- * trees (see hc_matchfinder.h), the binary tree version gains more information
- * at each node visitation. Ideally, the binary tree version will examine only
- * 'log(n)' nodes to find the same matches that the linked list version will
- * find by examining 'n' nodes. In addition, the binary tree version can
- * examine fewer bytes at each node by taking advantage of the common prefixes
- * that result from the sort order, whereas the linked list version may have to
- * examine up to the full length of the match at each node.
- *
- * However, it is not always best to use the binary tree version. It requires
- * nearly twice as much memory as the linked list version, and it takes time to
- * keep the binary trees sorted, even at positions where the compressor does not
- * need matches. Generally, when doing fast compression on small buffers,
- * binary trees are the wrong approach. They are best suited for thorough
- * compression and/or large buffers.
- *
- * ----------------------------------------------------------------------------
- */
-
-#ifndef LIB_BT_MATCHFINDER_H
-#define LIB_BT_MATCHFINDER_H
-
-#include "matchfinder_common.h"
-
-#define BT_MATCHFINDER_HASH3_ORDER 16
-#define BT_MATCHFINDER_HASH3_WAYS 2
-#define BT_MATCHFINDER_HASH4_ORDER 16
-
-#define BT_MATCHFINDER_TOTAL_HASH_SIZE \
- (((1UL << BT_MATCHFINDER_HASH3_ORDER) * BT_MATCHFINDER_HASH3_WAYS + \
- (1UL << BT_MATCHFINDER_HASH4_ORDER)) * sizeof(mf_pos_t))
-
-/* Representation of a match found by the bt_matchfinder */
-struct lz_match {
-
- /* The number of bytes matched. */
- u16 length;
-
- /* The offset back from the current position that was matched. */
- u16 offset;
-};
-
-struct bt_matchfinder {
-
- /* The hash table for finding length 3 matches */
- mf_pos_t hash3_tab[1UL << BT_MATCHFINDER_HASH3_ORDER][BT_MATCHFINDER_HASH3_WAYS];
-
- /* The hash table which contains the roots of the binary trees for
- * finding length 4+ matches */
- mf_pos_t hash4_tab[1UL << BT_MATCHFINDER_HASH4_ORDER];
-
- /* The child node references for the binary trees. The left and right
- * children of the node for the sequence with position 'pos' are
- * 'child_tab[pos * 2]' and 'child_tab[pos * 2 + 1]', respectively. */
- mf_pos_t child_tab[2UL * MATCHFINDER_WINDOW_SIZE];
-
-}
-#ifdef _aligned_attribute
-_aligned_attribute(MATCHFINDER_MEM_ALIGNMENT)
-#endif
-;
-
-/* Prepare the matchfinder for a new input buffer. */
-static forceinline void
-bt_matchfinder_init(struct bt_matchfinder *mf)
-{
- STATIC_ASSERT(BT_MATCHFINDER_TOTAL_HASH_SIZE %
- MATCHFINDER_SIZE_ALIGNMENT == 0);
-
- matchfinder_init((mf_pos_t *)mf, BT_MATCHFINDER_TOTAL_HASH_SIZE);
-}
-
-static forceinline void
-bt_matchfinder_slide_window(struct bt_matchfinder *mf)
-{
- STATIC_ASSERT(sizeof(*mf) % MATCHFINDER_SIZE_ALIGNMENT == 0);
-
- matchfinder_rebase((mf_pos_t *)mf, sizeof(*mf));
-}
-
-static forceinline mf_pos_t *
-bt_left_child(struct bt_matchfinder *mf, s32 node)
-{
- return &mf->child_tab[2 * (node & (MATCHFINDER_WINDOW_SIZE - 1)) + 0];
-}
-
-static forceinline mf_pos_t *
-bt_right_child(struct bt_matchfinder *mf, s32 node)
-{
- return &mf->child_tab[2 * (node & (MATCHFINDER_WINDOW_SIZE - 1)) + 1];
-}
-
-/* The minimum permissible value of 'max_len' for bt_matchfinder_get_matches()
- * and bt_matchfinder_skip_position(). There must be sufficiently many bytes
- * remaining to load a 32-bit integer from the *next* position. */
-#define BT_MATCHFINDER_REQUIRED_NBYTES 5
-
-/* Advance the binary tree matchfinder by one byte, optionally recording
- * matches. @record_matches should be a compile-time constant. */
-static forceinline struct lz_match *
-bt_matchfinder_advance_one_byte(struct bt_matchfinder * const restrict mf,
- const u8 * const restrict in_base,
- const ptrdiff_t cur_pos,
- const u32 max_len,
- const u32 nice_len,
- const u32 max_search_depth,
- u32 * const restrict next_hashes,
- u32 * const restrict best_len_ret,
- struct lz_match * restrict lz_matchptr,
- const bool record_matches)
-{
- const u8 *in_next = in_base + cur_pos;
- u32 depth_remaining = max_search_depth;
- const s32 cutoff = cur_pos - MATCHFINDER_WINDOW_SIZE;
- u32 next_hashseq;
- u32 hash3;
- u32 hash4;
- s32 cur_node;
-#if BT_MATCHFINDER_HASH3_WAYS >= 2
- s32 cur_node_2;
-#endif
- const u8 *matchptr;
- mf_pos_t *pending_lt_ptr, *pending_gt_ptr;
- u32 best_lt_len, best_gt_len;
- u32 len;
- u32 best_len = 3;
-
- STATIC_ASSERT(BT_MATCHFINDER_HASH3_WAYS >= 1 &&
- BT_MATCHFINDER_HASH3_WAYS <= 2);
-
- next_hashseq = get_unaligned_le32(in_next + 1);
-
- hash3 = next_hashes[0];
- hash4 = next_hashes[1];
-
- next_hashes[0] = lz_hash(next_hashseq & 0xFFFFFF, BT_MATCHFINDER_HASH3_ORDER);
- next_hashes[1] = lz_hash(next_hashseq, BT_MATCHFINDER_HASH4_ORDER);
- prefetchw(&mf->hash3_tab[next_hashes[0]]);
- prefetchw(&mf->hash4_tab[next_hashes[1]]);
-
- cur_node = mf->hash3_tab[hash3][0];
- mf->hash3_tab[hash3][0] = cur_pos;
-#if BT_MATCHFINDER_HASH3_WAYS >= 2
- cur_node_2 = mf->hash3_tab[hash3][1];
- mf->hash3_tab[hash3][1] = cur_node;
-#endif
- if (record_matches && cur_node > cutoff) {
- u32 seq3 = load_u24_unaligned(in_next);
- if (seq3 == load_u24_unaligned(&in_base[cur_node])) {
- lz_matchptr->length = 3;
- lz_matchptr->offset = in_next - &in_base[cur_node];
- lz_matchptr++;
- }
- #if BT_MATCHFINDER_HASH3_WAYS >= 2
- else if (cur_node_2 > cutoff &&
- seq3 == load_u24_unaligned(&in_base[cur_node_2]))
- {
- lz_matchptr->length = 3;
- lz_matchptr->offset = in_next - &in_base[cur_node_2];
- lz_matchptr++;
- }
- #endif
- }
-
- cur_node = mf->hash4_tab[hash4];
- mf->hash4_tab[hash4] = cur_pos;
-
- pending_lt_ptr = bt_left_child(mf, cur_pos);
- pending_gt_ptr = bt_right_child(mf, cur_pos);
-
- if (cur_node <= cutoff) {
- *pending_lt_ptr = MATCHFINDER_INITVAL;
- *pending_gt_ptr = MATCHFINDER_INITVAL;
- *best_len_ret = best_len;
- return lz_matchptr;
- }
-
- best_lt_len = 0;
- best_gt_len = 0;
- len = 0;
-
- for (;;) {
- matchptr = &in_base[cur_node];
-
- if (matchptr[len] == in_next[len]) {
- len = lz_extend(in_next, matchptr, len + 1, max_len);
- if (!record_matches || len > best_len) {
- if (record_matches) {
- best_len = len;
- lz_matchptr->length = len;
- lz_matchptr->offset = in_next - matchptr;
- lz_matchptr++;
- }
- if (len >= nice_len) {
- *pending_lt_ptr = *bt_left_child(mf, cur_node);
- *pending_gt_ptr = *bt_right_child(mf, cur_node);
- *best_len_ret = best_len;
- return lz_matchptr;
- }
- }
- }
-
- if (matchptr[len] < in_next[len]) {
- *pending_lt_ptr = cur_node;
- pending_lt_ptr = bt_right_child(mf, cur_node);
- cur_node = *pending_lt_ptr;
- best_lt_len = len;
- if (best_gt_len < len)
- len = best_gt_len;
- } else {
- *pending_gt_ptr = cur_node;
- pending_gt_ptr = bt_left_child(mf, cur_node);
- cur_node = *pending_gt_ptr;
- best_gt_len = len;
- if (best_lt_len < len)
- len = best_lt_len;
- }
-
- if (cur_node <= cutoff || !--depth_remaining) {
- *pending_lt_ptr = MATCHFINDER_INITVAL;
- *pending_gt_ptr = MATCHFINDER_INITVAL;
- *best_len_ret = best_len;
- return lz_matchptr;
- }
- }
-}
-
-/*
- * Retrieve a list of matches with the current position.
- *
- * @mf
- * The matchfinder structure.
- * @in_base
- * Pointer to the next byte in the input buffer to process _at the last
- * time bt_matchfinder_init() or bt_matchfinder_slide_window() was called_.
- * @cur_pos
- * The current position in the input buffer relative to @in_base (the
- * position of the sequence being matched against).
- * @max_len
- * The maximum permissible match length at this position. Must be >=
- * BT_MATCHFINDER_REQUIRED_NBYTES.
- * @nice_len
- * Stop searching if a match of at least this length is found.
- * Must be <= @max_len.
- * @max_search_depth
- * Limit on the number of potential matches to consider. Must be >= 1.
- * @next_hashes
- * The precomputed hash codes for the sequence beginning at @in_next.
- * These will be used and then updated with the precomputed hashcodes for
- * the sequence beginning at @in_next + 1.
- * @best_len_ret
- * If a match of length >= 4 was found, then the length of the longest such
- * match is written here; otherwise 3 is written here. (Note: this is
- * redundant with the 'struct lz_match' array, but this is easier for the
- * compiler to optimize when inlined and the caller immediately does a
- * check against 'best_len'.)
- * @lz_matchptr
- * An array in which this function will record the matches. The recorded
- * matches will be sorted by strictly increasing length and (non-strictly)
- * increasing offset. The maximum number of matches that may be found is
- * 'nice_len - 2'.
- *
- * The return value is a pointer to the next available slot in the @lz_matchptr
- * array. (If no matches were found, this will be the same as @lz_matchptr.)
- */
-static forceinline struct lz_match *
-bt_matchfinder_get_matches(struct bt_matchfinder *mf,
- const u8 *in_base,
- ptrdiff_t cur_pos,
- u32 max_len,
- u32 nice_len,
- u32 max_search_depth,
- u32 next_hashes[2],
- u32 *best_len_ret,
- struct lz_match *lz_matchptr)
-{
- return bt_matchfinder_advance_one_byte(mf,
- in_base,
- cur_pos,
- max_len,
- nice_len,
- max_search_depth,
- next_hashes,
- best_len_ret,
- lz_matchptr,
- true);
-}
-
-/*
- * Advance the matchfinder, but don't record any matches.
- *
- * This is very similar to bt_matchfinder_get_matches() because both functions
- * must do hashing and tree re-rooting.
- */
-static forceinline void
-bt_matchfinder_skip_position(struct bt_matchfinder *mf,
- const u8 *in_base,
- ptrdiff_t cur_pos,
- u32 nice_len,
- u32 max_search_depth,
- u32 next_hashes[2])
-{
- u32 best_len;
- bt_matchfinder_advance_one_byte(mf,
- in_base,
- cur_pos,
- nice_len,
- nice_len,
- max_search_depth,
- next_hashes,
- &best_len,
- NULL,
- false);
-}
-
-#endif /* LIB_BT_MATCHFINDER_H */
diff --git a/util/compress/libdeflate/lib/cpu_features_common.h b/util/compress/libdeflate/lib/cpu_features_common.h
deleted file mode 100644
index 570b62dbb..000000000
--- a/util/compress/libdeflate/lib/cpu_features_common.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * cpu_features_common.h - code shared by all lib/$arch/cpu_features.c
- *
- * Copyright 2020 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef LIB_CPU_FEATURES_COMMON_H
-#define LIB_CPU_FEATURES_COMMON_H
-
-#if defined(TEST_SUPPORT__DO_NOT_USE) && !defined(FREESTANDING)
-# define _GNU_SOURCE 1 /* for strdup() and strtok_r() */
-# include <stdio.h>
-# include <stdlib.h>
-# include <string.h>
-#endif
-
-#include "lib_common.h"
-
-struct cpu_feature {
- u32 bit;
- const char *name;
-};
-
-#if defined(TEST_SUPPORT__DO_NOT_USE) && !defined(FREESTANDING)
-/* Disable any features that are listed in $LIBDEFLATE_DISABLE_CPU_FEATURES. */
-static inline void
-disable_cpu_features_for_testing(u32 *features,
- const struct cpu_feature *feature_table,
- size_t feature_table_length)
-{
- char *env_value, *strbuf, *p, *saveptr = NULL;
- size_t i;
-
- env_value = getenv("LIBDEFLATE_DISABLE_CPU_FEATURES");
- if (!env_value)
- return;
- strbuf = strdup(env_value);
- if (!strbuf)
- abort();
- p = strtok_r(strbuf, ",", &saveptr);
- while (p) {
- for (i = 0; i < feature_table_length; i++) {
- if (strcmp(p, feature_table[i].name) == 0) {
- *features &= ~feature_table[i].bit;
- break;
- }
- }
- if (i == feature_table_length) {
- fprintf(stderr,
- "unrecognized feature in LIBDEFLATE_DISABLE_CPU_FEATURES: \"%s\"\n",
- p);
- abort();
- }
- p = strtok_r(NULL, ",", &saveptr);
- }
- free(strbuf);
-}
-#else /* TEST_SUPPORT__DO_NOT_USE */
-static inline void
-disable_cpu_features_for_testing(u32 *features,
- const struct cpu_feature *feature_table,
- size_t feature_table_length)
-{
-}
-#endif /* !TEST_SUPPORT__DO_NOT_USE */
-
-#endif /* LIB_CPU_FEATURES_COMMON_H */
diff --git a/util/compress/libdeflate/lib/crc32.c b/util/compress/libdeflate/lib/crc32.c
deleted file mode 100644
index 6adacc5dc..000000000
--- a/util/compress/libdeflate/lib/crc32.c
+++ /dev/null
@@ -1,313 +0,0 @@
-/*
- * crc32.c - CRC-32 checksum algorithm for the gzip format
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * High-level description of CRC
- * =============================
- *
- * Consider a bit sequence 'bits[1...len]'. Interpret 'bits' as the "message"
- * polynomial M(x) with coefficients in GF(2) (the field of integers modulo 2),
- * where the coefficient of 'x^i' is 'bits[len - i]'. Then, compute:
- *
- * R(x) = M(x)*x^n mod G(x)
- *
- * where G(x) is a selected "generator" polynomial of degree 'n'. The remainder
- * R(x) is a polynomial of max degree 'n - 1'. The CRC of 'bits' is R(x)
- * interpreted as a bitstring of length 'n'.
- *
- * CRC used in gzip
- * ================
- *
- * In the gzip format (RFC 1952):
- *
- * - The bitstring to checksum is formed from the bytes of the uncompressed
- * data by concatenating the bits from the bytes in order, proceeding
- * from the low-order bit to the high-order bit within each byte.
- *
- * - The generator polynomial G(x) is: x^32 + x^26 + x^23 + x^22 + x^16 +
- * x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1.
- * Consequently, the CRC length is 32 bits ("CRC-32").
- *
- * - The highest order 32 coefficients of M(x)*x^n are inverted.
- *
- * - All 32 coefficients of R(x) are inverted.
- *
- * The two inversions cause added leading and trailing zero bits to affect the
- * resulting CRC, whereas with a regular CRC such bits would have no effect on
- * the CRC.
- *
- * Computation and optimizations
- * =============================
- *
- * We can compute R(x) through "long division", maintaining only 32 bits of
- * state at any given time. Multiplication by 'x' can be implemented as
- * right-shifting by 1 (assuming the polynomial<=>bitstring mapping where the
- * highest order bit represents the coefficient of x^0), and both addition and
- * subtraction can be implemented as bitwise exclusive OR (since we are working
- * in GF(2)). Here is an unoptimized implementation:
- *
- * static u32 crc32_gzip(const u8 *buffer, size_t size)
- * {
- * u32 remainder = 0;
- * const u32 divisor = 0xEDB88320;
- *
- * for (size_t i = 0; i < size * 8 + 32; i++) {
- * int bit;
- * u32 multiple;
- *
- * if (i < size * 8)
- * bit = (buffer[i / 8] >> (i % 8)) & 1;
- * else
- * bit = 0; // one of the 32 appended 0 bits
- *
- * if (i < 32) // the first 32 bits are inverted
- * bit ^= 1;
- *
- * if (remainder & 1)
- * multiple = divisor;
- * else
- * multiple = 0;
- *
- * remainder >>= 1;
- * remainder |= (u32)bit << 31;
- * remainder ^= multiple;
- * }
- *
- * return ~remainder;
- * }
- *
- * In this implementation, the 32-bit integer 'remainder' maintains the
- * remainder of the currently processed portion of the message (with 32 zero
- * bits appended) when divided by the generator polynomial. 'remainder' is the
- * representation of R(x), and 'divisor' is the representation of G(x) excluding
- * the x^32 coefficient. For each bit to process, we multiply R(x) by 'x^1',
- * then add 'x^0' if the new bit is a 1. If this causes R(x) to gain a nonzero
- * x^32 term, then we subtract G(x) from R(x).
- *
- * We can speed this up by taking advantage of the fact that XOR is commutative
- * and associative, so the order in which we combine the inputs into 'remainder'
- * is unimportant. And since each message bit we add doesn't affect the choice
- * of 'multiple' until 32 bits later, we need not actually add each message bit
- * until that point:
- *
- * static u32 crc32_gzip(const u8 *buffer, size_t size)
- * {
- * u32 remainder = ~0;
- * const u32 divisor = 0xEDB88320;
- *
- * for (size_t i = 0; i < size * 8; i++) {
- * int bit;
- * u32 multiple;
- *
- * bit = (buffer[i / 8] >> (i % 8)) & 1;
- * remainder ^= bit;
- * if (remainder & 1)
- * multiple = divisor;
- * else
- * multiple = 0;
- * remainder >>= 1;
- * remainder ^= multiple;
- * }
- *
- * return ~remainder;
- * }
- *
- * With the above implementation we get the effect of 32 appended 0 bits for
- * free; they never affect the choice of a divisor, nor would they change the
- * value of 'remainder' if they were to be actually XOR'ed in. And by starting
- * with a remainder of all 1 bits, we get the effect of complementing the first
- * 32 message bits.
- *
- * The next optimization is to process the input in multi-bit units. Suppose
- * that we insert the next 'n' message bits into the remainder. Then we get an
- * intermediate remainder of length '32 + n' bits, and the CRC of the extra 'n'
- * bits is the amount by which the low 32 bits of the remainder will change as a
- * result of cancelling out those 'n' bits. Taking n=8 (one byte) and
- * precomputing a table containing the CRC of each possible byte, we get
- * crc32_slice1() defined below.
- *
- * As a further optimization, we could increase the multi-bit unit size to 16.
- * However, that is inefficient because the table size explodes from 256 entries
- * (1024 bytes) to 65536 entries (262144 bytes), which wastes memory and won't
- * fit in L1 cache on typical processors.
- *
- * However, we can actually process 4 bytes at a time using 4 different tables
- * with 256 entries each. Logically, we form a 64-bit intermediate remainder
- * and cancel out the high 32 bits in 8-bit chunks. Bits 32-39 are cancelled
- * out by the CRC of those bits, whereas bits 40-47 are be cancelled out by the
- * CRC of those bits with 8 zero bits appended, and so on. This method is
- * implemented in crc32_slice4(), defined below.
- *
- * In crc32_slice8(), this method is extended to 8 bytes at a time. The
- * intermediate remainder (which we never actually store explicitly) is 96 bits.
- *
- * On CPUs that support fast carryless multiplication, CRCs can be computed even
- * more quickly via "folding". See e.g. the x86 PCLMUL implementation.
- */
-
-#include "lib_common.h"
-#include "libdeflate.h"
-
-typedef u32 (*crc32_func_t)(u32, const u8 *, size_t);
-
-/* Include architecture-specific implementations if available */
-#undef CRC32_SLICE1
-#undef CRC32_SLICE4
-#undef CRC32_SLICE8
-#undef DEFAULT_IMPL
-#undef DISPATCH
-#if defined(__arm__) || defined(__aarch64__)
-# include "arm/crc32_impl.h"
-#elif defined(__i386__) || defined(__x86_64__)
-# include "x86/crc32_impl.h"
-#endif
-
-/*
- * Define a generic implementation (crc32_slice8()) if needed. crc32_slice1()
- * may also be needed as a fallback for architecture-specific implementations.
- */
-
-#ifndef DEFAULT_IMPL
-# define CRC32_SLICE8 1
-# define DEFAULT_IMPL crc32_slice8
-#endif
-
-#if defined(CRC32_SLICE1) || defined(CRC32_SLICE4) || defined(CRC32_SLICE8)
-#include "crc32_table.h"
-static forceinline u32
-crc32_update_byte(u32 remainder, u8 next_byte)
-{
- return (remainder >> 8) ^ crc32_table[(u8)remainder ^ next_byte];
-}
-#endif
-
-#ifdef CRC32_SLICE1
-static u32
-crc32_slice1(u32 remainder, const u8 *buffer, size_t size)
-{
- size_t i;
-
- STATIC_ASSERT(ARRAY_LEN(crc32_table) >= 0x100);
-
- for (i = 0; i < size; i++)
- remainder = crc32_update_byte(remainder, buffer[i]);
- return remainder;
-}
-#endif /* CRC32_SLICE1 */
-
-#ifdef CRC32_SLICE4
-static u32
-crc32_slice4(u32 remainder, const u8 *buffer, size_t size)
-{
- const u8 *p = buffer;
- const u8 *end = buffer + size;
- const u8 *end32;
-
- STATIC_ASSERT(ARRAY_LEN(crc32_table) >= 0x400);
-
- for (; ((uintptr_t)p & 3) && p != end; p++)
- remainder = crc32_update_byte(remainder, *p);
-
- end32 = p + ((end - p) & ~3);
- for (; p != end32; p += 4) {
- u32 v = le32_bswap(*(const u32 *)p);
- remainder =
- crc32_table[0x300 + (u8)((remainder ^ v) >> 0)] ^
- crc32_table[0x200 + (u8)((remainder ^ v) >> 8)] ^
- crc32_table[0x100 + (u8)((remainder ^ v) >> 16)] ^
- crc32_table[0x000 + (u8)((remainder ^ v) >> 24)];
- }
-
- for (; p != end; p++)
- remainder = crc32_update_byte(remainder, *p);
-
- return remainder;
-}
-#endif /* CRC32_SLICE4 */
-
-#ifdef CRC32_SLICE8
-static u32
-crc32_slice8(u32 remainder, const u8 *buffer, size_t size)
-{
- const u8 *p = buffer;
- const u8 *end = buffer + size;
- const u8 *end64;
-
- STATIC_ASSERT(ARRAY_LEN(crc32_table) >= 0x800);
-
- for (; ((uintptr_t)p & 7) && p != end; p++)
- remainder = crc32_update_byte(remainder, *p);
-
- end64 = p + ((end - p) & ~7);
- for (; p != end64; p += 8) {
- u32 v1 = le32_bswap(*(const u32 *)(p + 0));
- u32 v2 = le32_bswap(*(const u32 *)(p + 4));
- remainder =
- crc32_table[0x700 + (u8)((remainder ^ v1) >> 0)] ^
- crc32_table[0x600 + (u8)((remainder ^ v1) >> 8)] ^
- crc32_table[0x500 + (u8)((remainder ^ v1) >> 16)] ^
- crc32_table[0x400 + (u8)((remainder ^ v1) >> 24)] ^
- crc32_table[0x300 + (u8)(v2 >> 0)] ^
- crc32_table[0x200 + (u8)(v2 >> 8)] ^
- crc32_table[0x100 + (u8)(v2 >> 16)] ^
- crc32_table[0x000 + (u8)(v2 >> 24)];
- }
-
- for (; p != end; p++)
- remainder = crc32_update_byte(remainder, *p);
-
- return remainder;
-}
-#endif /* CRC32_SLICE8 */
-
-#ifdef DISPATCH
-static u32 dispatch(u32, const u8 *, size_t);
-
-static volatile crc32_func_t crc32_impl = dispatch;
-
-/* Choose the fastest implementation at runtime */
-static u32 dispatch(u32 remainder, const u8 *buffer, size_t size)
-{
- crc32_func_t f = arch_select_crc32_func();
-
- if (f == NULL)
- f = DEFAULT_IMPL;
-
- crc32_impl = f;
- return crc32_impl(remainder, buffer, size);
-}
-#else
-# define crc32_impl DEFAULT_IMPL /* only one implementation, use it */
-#endif
-
-LIBDEFLATEEXPORT u32 LIBDEFLATEAPI
-libdeflate_crc32(u32 remainder, const void *buffer, size_t size)
-{
- if (buffer == NULL) /* return initial value */
- return 0;
- return ~crc32_impl(~remainder, buffer, size);
-}
diff --git a/util/compress/libdeflate/lib/crc32_table.h b/util/compress/libdeflate/lib/crc32_table.h
deleted file mode 100644
index 05421b982..000000000
--- a/util/compress/libdeflate/lib/crc32_table.h
+++ /dev/null
@@ -1,526 +0,0 @@
-/*
- * crc32_table.h - data table to accelerate CRC-32 computation
- *
- * THIS FILE WAS AUTOMATICALLY GENERATED BY gen_crc32_table.c. DO NOT EDIT.
- */
-
-#include <stdint.h>
-
-static const uint32_t crc32_table[] = {
- 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
- 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
- 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
- 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
- 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
- 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
- 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
- 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
- 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
- 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
- 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
- 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
- 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
- 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
- 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
- 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
- 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
- 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
- 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
- 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
- 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
- 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
- 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
- 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
- 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
- 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
- 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
- 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
- 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
- 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
- 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
- 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
- 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
- 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
- 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
- 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
- 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
- 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
- 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
- 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
- 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
- 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
- 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
- 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
- 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
- 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
- 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
- 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
- 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
- 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
- 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
- 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
- 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
- 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
- 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
- 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
- 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
- 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
- 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
- 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
- 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
- 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
- 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
- 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
-#if defined(CRC32_SLICE4) || defined(CRC32_SLICE8)
- 0x00000000, 0x191b3141, 0x32366282, 0x2b2d53c3,
- 0x646cc504, 0x7d77f445, 0x565aa786, 0x4f4196c7,
- 0xc8d98a08, 0xd1c2bb49, 0xfaefe88a, 0xe3f4d9cb,
- 0xacb54f0c, 0xb5ae7e4d, 0x9e832d8e, 0x87981ccf,
- 0x4ac21251, 0x53d92310, 0x78f470d3, 0x61ef4192,
- 0x2eaed755, 0x37b5e614, 0x1c98b5d7, 0x05838496,
- 0x821b9859, 0x9b00a918, 0xb02dfadb, 0xa936cb9a,
- 0xe6775d5d, 0xff6c6c1c, 0xd4413fdf, 0xcd5a0e9e,
- 0x958424a2, 0x8c9f15e3, 0xa7b24620, 0xbea97761,
- 0xf1e8e1a6, 0xe8f3d0e7, 0xc3de8324, 0xdac5b265,
- 0x5d5daeaa, 0x44469feb, 0x6f6bcc28, 0x7670fd69,
- 0x39316bae, 0x202a5aef, 0x0b07092c, 0x121c386d,
- 0xdf4636f3, 0xc65d07b2, 0xed705471, 0xf46b6530,
- 0xbb2af3f7, 0xa231c2b6, 0x891c9175, 0x9007a034,
- 0x179fbcfb, 0x0e848dba, 0x25a9de79, 0x3cb2ef38,
- 0x73f379ff, 0x6ae848be, 0x41c51b7d, 0x58de2a3c,
- 0xf0794f05, 0xe9627e44, 0xc24f2d87, 0xdb541cc6,
- 0x94158a01, 0x8d0ebb40, 0xa623e883, 0xbf38d9c2,
- 0x38a0c50d, 0x21bbf44c, 0x0a96a78f, 0x138d96ce,
- 0x5ccc0009, 0x45d73148, 0x6efa628b, 0x77e153ca,
- 0xbabb5d54, 0xa3a06c15, 0x888d3fd6, 0x91960e97,
- 0xded79850, 0xc7cca911, 0xece1fad2, 0xf5facb93,
- 0x7262d75c, 0x6b79e61d, 0x4054b5de, 0x594f849f,
- 0x160e1258, 0x0f152319, 0x243870da, 0x3d23419b,
- 0x65fd6ba7, 0x7ce65ae6, 0x57cb0925, 0x4ed03864,
- 0x0191aea3, 0x188a9fe2, 0x33a7cc21, 0x2abcfd60,
- 0xad24e1af, 0xb43fd0ee, 0x9f12832d, 0x8609b26c,
- 0xc94824ab, 0xd05315ea, 0xfb7e4629, 0xe2657768,
- 0x2f3f79f6, 0x362448b7, 0x1d091b74, 0x04122a35,
- 0x4b53bcf2, 0x52488db3, 0x7965de70, 0x607eef31,
- 0xe7e6f3fe, 0xfefdc2bf, 0xd5d0917c, 0xcccba03d,
- 0x838a36fa, 0x9a9107bb, 0xb1bc5478, 0xa8a76539,
- 0x3b83984b, 0x2298a90a, 0x09b5fac9, 0x10aecb88,
- 0x5fef5d4f, 0x46f46c0e, 0x6dd93fcd, 0x74c20e8c,
- 0xf35a1243, 0xea412302, 0xc16c70c1, 0xd8774180,
- 0x9736d747, 0x8e2de606, 0xa500b5c5, 0xbc1b8484,
- 0x71418a1a, 0x685abb5b, 0x4377e898, 0x5a6cd9d9,
- 0x152d4f1e, 0x0c367e5f, 0x271b2d9c, 0x3e001cdd,
- 0xb9980012, 0xa0833153, 0x8bae6290, 0x92b553d1,
- 0xddf4c516, 0xc4eff457, 0xefc2a794, 0xf6d996d5,
- 0xae07bce9, 0xb71c8da8, 0x9c31de6b, 0x852aef2a,
- 0xca6b79ed, 0xd37048ac, 0xf85d1b6f, 0xe1462a2e,
- 0x66de36e1, 0x7fc507a0, 0x54e85463, 0x4df36522,
- 0x02b2f3e5, 0x1ba9c2a4, 0x30849167, 0x299fa026,
- 0xe4c5aeb8, 0xfdde9ff9, 0xd6f3cc3a, 0xcfe8fd7b,
- 0x80a96bbc, 0x99b25afd, 0xb29f093e, 0xab84387f,
- 0x2c1c24b0, 0x350715f1, 0x1e2a4632, 0x07317773,
- 0x4870e1b4, 0x516bd0f5, 0x7a468336, 0x635db277,
- 0xcbfad74e, 0xd2e1e60f, 0xf9ccb5cc, 0xe0d7848d,
- 0xaf96124a, 0xb68d230b, 0x9da070c8, 0x84bb4189,
- 0x03235d46, 0x1a386c07, 0x31153fc4, 0x280e0e85,
- 0x674f9842, 0x7e54a903, 0x5579fac0, 0x4c62cb81,
- 0x8138c51f, 0x9823f45e, 0xb30ea79d, 0xaa1596dc,
- 0xe554001b, 0xfc4f315a, 0xd7626299, 0xce7953d8,
- 0x49e14f17, 0x50fa7e56, 0x7bd72d95, 0x62cc1cd4,
- 0x2d8d8a13, 0x3496bb52, 0x1fbbe891, 0x06a0d9d0,
- 0x5e7ef3ec, 0x4765c2ad, 0x6c48916e, 0x7553a02f,
- 0x3a1236e8, 0x230907a9, 0x0824546a, 0x113f652b,
- 0x96a779e4, 0x8fbc48a5, 0xa4911b66, 0xbd8a2a27,
- 0xf2cbbce0, 0xebd08da1, 0xc0fdde62, 0xd9e6ef23,
- 0x14bce1bd, 0x0da7d0fc, 0x268a833f, 0x3f91b27e,
- 0x70d024b9, 0x69cb15f8, 0x42e6463b, 0x5bfd777a,
- 0xdc656bb5, 0xc57e5af4, 0xee530937, 0xf7483876,
- 0xb809aeb1, 0xa1129ff0, 0x8a3fcc33, 0x9324fd72,
- 0x00000000, 0x01c26a37, 0x0384d46e, 0x0246be59,
- 0x0709a8dc, 0x06cbc2eb, 0x048d7cb2, 0x054f1685,
- 0x0e1351b8, 0x0fd13b8f, 0x0d9785d6, 0x0c55efe1,
- 0x091af964, 0x08d89353, 0x0a9e2d0a, 0x0b5c473d,
- 0x1c26a370, 0x1de4c947, 0x1fa2771e, 0x1e601d29,
- 0x1b2f0bac, 0x1aed619b, 0x18abdfc2, 0x1969b5f5,
- 0x1235f2c8, 0x13f798ff, 0x11b126a6, 0x10734c91,
- 0x153c5a14, 0x14fe3023, 0x16b88e7a, 0x177ae44d,
- 0x384d46e0, 0x398f2cd7, 0x3bc9928e, 0x3a0bf8b9,
- 0x3f44ee3c, 0x3e86840b, 0x3cc03a52, 0x3d025065,
- 0x365e1758, 0x379c7d6f, 0x35dac336, 0x3418a901,
- 0x3157bf84, 0x3095d5b3, 0x32d36bea, 0x331101dd,
- 0x246be590, 0x25a98fa7, 0x27ef31fe, 0x262d5bc9,
- 0x23624d4c, 0x22a0277b, 0x20e69922, 0x2124f315,
- 0x2a78b428, 0x2bbade1f, 0x29fc6046, 0x283e0a71,
- 0x2d711cf4, 0x2cb376c3, 0x2ef5c89a, 0x2f37a2ad,
- 0x709a8dc0, 0x7158e7f7, 0x731e59ae, 0x72dc3399,
- 0x7793251c, 0x76514f2b, 0x7417f172, 0x75d59b45,
- 0x7e89dc78, 0x7f4bb64f, 0x7d0d0816, 0x7ccf6221,
- 0x798074a4, 0x78421e93, 0x7a04a0ca, 0x7bc6cafd,
- 0x6cbc2eb0, 0x6d7e4487, 0x6f38fade, 0x6efa90e9,
- 0x6bb5866c, 0x6a77ec5b, 0x68315202, 0x69f33835,
- 0x62af7f08, 0x636d153f, 0x612bab66, 0x60e9c151,
- 0x65a6d7d4, 0x6464bde3, 0x662203ba, 0x67e0698d,
- 0x48d7cb20, 0x4915a117, 0x4b531f4e, 0x4a917579,
- 0x4fde63fc, 0x4e1c09cb, 0x4c5ab792, 0x4d98dda5,
- 0x46c49a98, 0x4706f0af, 0x45404ef6, 0x448224c1,
- 0x41cd3244, 0x400f5873, 0x4249e62a, 0x438b8c1d,
- 0x54f16850, 0x55330267, 0x5775bc3e, 0x56b7d609,
- 0x53f8c08c, 0x523aaabb, 0x507c14e2, 0x51be7ed5,
- 0x5ae239e8, 0x5b2053df, 0x5966ed86, 0x58a487b1,
- 0x5deb9134, 0x5c29fb03, 0x5e6f455a, 0x5fad2f6d,
- 0xe1351b80, 0xe0f771b7, 0xe2b1cfee, 0xe373a5d9,
- 0xe63cb35c, 0xe7fed96b, 0xe5b86732, 0xe47a0d05,
- 0xef264a38, 0xeee4200f, 0xeca29e56, 0xed60f461,
- 0xe82fe2e4, 0xe9ed88d3, 0xebab368a, 0xea695cbd,
- 0xfd13b8f0, 0xfcd1d2c7, 0xfe976c9e, 0xff5506a9,
- 0xfa1a102c, 0xfbd87a1b, 0xf99ec442, 0xf85cae75,
- 0xf300e948, 0xf2c2837f, 0xf0843d26, 0xf1465711,
- 0xf4094194, 0xf5cb2ba3, 0xf78d95fa, 0xf64fffcd,
- 0xd9785d60, 0xd8ba3757, 0xdafc890e, 0xdb3ee339,
- 0xde71f5bc, 0xdfb39f8b, 0xddf521d2, 0xdc374be5,
- 0xd76b0cd8, 0xd6a966ef, 0xd4efd8b6, 0xd52db281,
- 0xd062a404, 0xd1a0ce33, 0xd3e6706a, 0xd2241a5d,
- 0xc55efe10, 0xc49c9427, 0xc6da2a7e, 0xc7184049,
- 0xc25756cc, 0xc3953cfb, 0xc1d382a2, 0xc011e895,
- 0xcb4dafa8, 0xca8fc59f, 0xc8c97bc6, 0xc90b11f1,
- 0xcc440774, 0xcd866d43, 0xcfc0d31a, 0xce02b92d,
- 0x91af9640, 0x906dfc77, 0x922b422e, 0x93e92819,
- 0x96a63e9c, 0x976454ab, 0x9522eaf2, 0x94e080c5,
- 0x9fbcc7f8, 0x9e7eadcf, 0x9c381396, 0x9dfa79a1,
- 0x98b56f24, 0x99770513, 0x9b31bb4a, 0x9af3d17d,
- 0x8d893530, 0x8c4b5f07, 0x8e0de15e, 0x8fcf8b69,
- 0x8a809dec, 0x8b42f7db, 0x89044982, 0x88c623b5,
- 0x839a6488, 0x82580ebf, 0x801eb0e6, 0x81dcdad1,
- 0x8493cc54, 0x8551a663, 0x8717183a, 0x86d5720d,
- 0xa9e2d0a0, 0xa820ba97, 0xaa6604ce, 0xaba46ef9,
- 0xaeeb787c, 0xaf29124b, 0xad6fac12, 0xacadc625,
- 0xa7f18118, 0xa633eb2f, 0xa4755576, 0xa5b73f41,
- 0xa0f829c4, 0xa13a43f3, 0xa37cfdaa, 0xa2be979d,
- 0xb5c473d0, 0xb40619e7, 0xb640a7be, 0xb782cd89,
- 0xb2cddb0c, 0xb30fb13b, 0xb1490f62, 0xb08b6555,
- 0xbbd72268, 0xba15485f, 0xb853f606, 0xb9919c31,
- 0xbcde8ab4, 0xbd1ce083, 0xbf5a5eda, 0xbe9834ed,
- 0x00000000, 0xb8bc6765, 0xaa09c88b, 0x12b5afee,
- 0x8f629757, 0x37def032, 0x256b5fdc, 0x9dd738b9,
- 0xc5b428ef, 0x7d084f8a, 0x6fbde064, 0xd7018701,
- 0x4ad6bfb8, 0xf26ad8dd, 0xe0df7733, 0x58631056,
- 0x5019579f, 0xe8a530fa, 0xfa109f14, 0x42acf871,
- 0xdf7bc0c8, 0x67c7a7ad, 0x75720843, 0xcdce6f26,
- 0x95ad7f70, 0x2d111815, 0x3fa4b7fb, 0x8718d09e,
- 0x1acfe827, 0xa2738f42, 0xb0c620ac, 0x087a47c9,
- 0xa032af3e, 0x188ec85b, 0x0a3b67b5, 0xb28700d0,
- 0x2f503869, 0x97ec5f0c, 0x8559f0e2, 0x3de59787,
- 0x658687d1, 0xdd3ae0b4, 0xcf8f4f5a, 0x7733283f,
- 0xeae41086, 0x525877e3, 0x40edd80d, 0xf851bf68,
- 0xf02bf8a1, 0x48979fc4, 0x5a22302a, 0xe29e574f,
- 0x7f496ff6, 0xc7f50893, 0xd540a77d, 0x6dfcc018,
- 0x359fd04e, 0x8d23b72b, 0x9f9618c5, 0x272a7fa0,
- 0xbafd4719, 0x0241207c, 0x10f48f92, 0xa848e8f7,
- 0x9b14583d, 0x23a83f58, 0x311d90b6, 0x89a1f7d3,
- 0x1476cf6a, 0xaccaa80f, 0xbe7f07e1, 0x06c36084,
- 0x5ea070d2, 0xe61c17b7, 0xf4a9b859, 0x4c15df3c,
- 0xd1c2e785, 0x697e80e0, 0x7bcb2f0e, 0xc377486b,
- 0xcb0d0fa2, 0x73b168c7, 0x6104c729, 0xd9b8a04c,
- 0x446f98f5, 0xfcd3ff90, 0xee66507e, 0x56da371b,
- 0x0eb9274d, 0xb6054028, 0xa4b0efc6, 0x1c0c88a3,
- 0x81dbb01a, 0x3967d77f, 0x2bd27891, 0x936e1ff4,
- 0x3b26f703, 0x839a9066, 0x912f3f88, 0x299358ed,
- 0xb4446054, 0x0cf80731, 0x1e4da8df, 0xa6f1cfba,
- 0xfe92dfec, 0x462eb889, 0x549b1767, 0xec277002,
- 0x71f048bb, 0xc94c2fde, 0xdbf98030, 0x6345e755,
- 0x6b3fa09c, 0xd383c7f9, 0xc1366817, 0x798a0f72,
- 0xe45d37cb, 0x5ce150ae, 0x4e54ff40, 0xf6e89825,
- 0xae8b8873, 0x1637ef16, 0x048240f8, 0xbc3e279d,
- 0x21e91f24, 0x99557841, 0x8be0d7af, 0x335cb0ca,
- 0xed59b63b, 0x55e5d15e, 0x47507eb0, 0xffec19d5,
- 0x623b216c, 0xda874609, 0xc832e9e7, 0x708e8e82,
- 0x28ed9ed4, 0x9051f9b1, 0x82e4565f, 0x3a58313a,
- 0xa78f0983, 0x1f336ee6, 0x0d86c108, 0xb53aa66d,
- 0xbd40e1a4, 0x05fc86c1, 0x1749292f, 0xaff54e4a,
- 0x322276f3, 0x8a9e1196, 0x982bbe78, 0x2097d91d,
- 0x78f4c94b, 0xc048ae2e, 0xd2fd01c0, 0x6a4166a5,
- 0xf7965e1c, 0x4f2a3979, 0x5d9f9697, 0xe523f1f2,
- 0x4d6b1905, 0xf5d77e60, 0xe762d18e, 0x5fdeb6eb,
- 0xc2098e52, 0x7ab5e937, 0x680046d9, 0xd0bc21bc,
- 0x88df31ea, 0x3063568f, 0x22d6f961, 0x9a6a9e04,
- 0x07bda6bd, 0xbf01c1d8, 0xadb46e36, 0x15080953,
- 0x1d724e9a, 0xa5ce29ff, 0xb77b8611, 0x0fc7e174,
- 0x9210d9cd, 0x2aacbea8, 0x38191146, 0x80a57623,
- 0xd8c66675, 0x607a0110, 0x72cfaefe, 0xca73c99b,
- 0x57a4f122, 0xef189647, 0xfdad39a9, 0x45115ecc,
- 0x764dee06, 0xcef18963, 0xdc44268d, 0x64f841e8,
- 0xf92f7951, 0x41931e34, 0x5326b1da, 0xeb9ad6bf,
- 0xb3f9c6e9, 0x0b45a18c, 0x19f00e62, 0xa14c6907,
- 0x3c9b51be, 0x842736db, 0x96929935, 0x2e2efe50,
- 0x2654b999, 0x9ee8defc, 0x8c5d7112, 0x34e11677,
- 0xa9362ece, 0x118a49ab, 0x033fe645, 0xbb838120,
- 0xe3e09176, 0x5b5cf613, 0x49e959fd, 0xf1553e98,
- 0x6c820621, 0xd43e6144, 0xc68bceaa, 0x7e37a9cf,
- 0xd67f4138, 0x6ec3265d, 0x7c7689b3, 0xc4caeed6,
- 0x591dd66f, 0xe1a1b10a, 0xf3141ee4, 0x4ba87981,
- 0x13cb69d7, 0xab770eb2, 0xb9c2a15c, 0x017ec639,
- 0x9ca9fe80, 0x241599e5, 0x36a0360b, 0x8e1c516e,
- 0x866616a7, 0x3eda71c2, 0x2c6fde2c, 0x94d3b949,
- 0x090481f0, 0xb1b8e695, 0xa30d497b, 0x1bb12e1e,
- 0x43d23e48, 0xfb6e592d, 0xe9dbf6c3, 0x516791a6,
- 0xccb0a91f, 0x740cce7a, 0x66b96194, 0xde0506f1,
-#endif /* CRC32_SLICE4 || CRC32_SLICE8 */
-#if defined(CRC32_SLICE8)
- 0x00000000, 0x3d6029b0, 0x7ac05360, 0x47a07ad0,
- 0xf580a6c0, 0xc8e08f70, 0x8f40f5a0, 0xb220dc10,
- 0x30704bc1, 0x0d106271, 0x4ab018a1, 0x77d03111,
- 0xc5f0ed01, 0xf890c4b1, 0xbf30be61, 0x825097d1,
- 0x60e09782, 0x5d80be32, 0x1a20c4e2, 0x2740ed52,
- 0x95603142, 0xa80018f2, 0xefa06222, 0xd2c04b92,
- 0x5090dc43, 0x6df0f5f3, 0x2a508f23, 0x1730a693,
- 0xa5107a83, 0x98705333, 0xdfd029e3, 0xe2b00053,
- 0xc1c12f04, 0xfca106b4, 0xbb017c64, 0x866155d4,
- 0x344189c4, 0x0921a074, 0x4e81daa4, 0x73e1f314,
- 0xf1b164c5, 0xccd14d75, 0x8b7137a5, 0xb6111e15,
- 0x0431c205, 0x3951ebb5, 0x7ef19165, 0x4391b8d5,
- 0xa121b886, 0x9c419136, 0xdbe1ebe6, 0xe681c256,
- 0x54a11e46, 0x69c137f6, 0x2e614d26, 0x13016496,
- 0x9151f347, 0xac31daf7, 0xeb91a027, 0xd6f18997,
- 0x64d15587, 0x59b17c37, 0x1e1106e7, 0x23712f57,
- 0x58f35849, 0x659371f9, 0x22330b29, 0x1f532299,
- 0xad73fe89, 0x9013d739, 0xd7b3ade9, 0xead38459,
- 0x68831388, 0x55e33a38, 0x124340e8, 0x2f236958,
- 0x9d03b548, 0xa0639cf8, 0xe7c3e628, 0xdaa3cf98,
- 0x3813cfcb, 0x0573e67b, 0x42d39cab, 0x7fb3b51b,
- 0xcd93690b, 0xf0f340bb, 0xb7533a6b, 0x8a3313db,
- 0x0863840a, 0x3503adba, 0x72a3d76a, 0x4fc3feda,
- 0xfde322ca, 0xc0830b7a, 0x872371aa, 0xba43581a,
- 0x9932774d, 0xa4525efd, 0xe3f2242d, 0xde920d9d,
- 0x6cb2d18d, 0x51d2f83d, 0x167282ed, 0x2b12ab5d,
- 0xa9423c8c, 0x9422153c, 0xd3826fec, 0xeee2465c,
- 0x5cc29a4c, 0x61a2b3fc, 0x2602c92c, 0x1b62e09c,
- 0xf9d2e0cf, 0xc4b2c97f, 0x8312b3af, 0xbe729a1f,
- 0x0c52460f, 0x31326fbf, 0x7692156f, 0x4bf23cdf,
- 0xc9a2ab0e, 0xf4c282be, 0xb362f86e, 0x8e02d1de,
- 0x3c220dce, 0x0142247e, 0x46e25eae, 0x7b82771e,
- 0xb1e6b092, 0x8c869922, 0xcb26e3f2, 0xf646ca42,
- 0x44661652, 0x79063fe2, 0x3ea64532, 0x03c66c82,
- 0x8196fb53, 0xbcf6d2e3, 0xfb56a833, 0xc6368183,
- 0x74165d93, 0x49767423, 0x0ed60ef3, 0x33b62743,
- 0xd1062710, 0xec660ea0, 0xabc67470, 0x96a65dc0,
- 0x248681d0, 0x19e6a860, 0x5e46d2b0, 0x6326fb00,
- 0xe1766cd1, 0xdc164561, 0x9bb63fb1, 0xa6d61601,
- 0x14f6ca11, 0x2996e3a1, 0x6e369971, 0x5356b0c1,
- 0x70279f96, 0x4d47b626, 0x0ae7ccf6, 0x3787e546,
- 0x85a73956, 0xb8c710e6, 0xff676a36, 0xc2074386,
- 0x4057d457, 0x7d37fde7, 0x3a978737, 0x07f7ae87,
- 0xb5d77297, 0x88b75b27, 0xcf1721f7, 0xf2770847,
- 0x10c70814, 0x2da721a4, 0x6a075b74, 0x576772c4,
- 0xe547aed4, 0xd8278764, 0x9f87fdb4, 0xa2e7d404,
- 0x20b743d5, 0x1dd76a65, 0x5a7710b5, 0x67173905,
- 0xd537e515, 0xe857cca5, 0xaff7b675, 0x92979fc5,
- 0xe915e8db, 0xd475c16b, 0x93d5bbbb, 0xaeb5920b,
- 0x1c954e1b, 0x21f567ab, 0x66551d7b, 0x5b3534cb,
- 0xd965a31a, 0xe4058aaa, 0xa3a5f07a, 0x9ec5d9ca,
- 0x2ce505da, 0x11852c6a, 0x562556ba, 0x6b457f0a,
- 0x89f57f59, 0xb49556e9, 0xf3352c39, 0xce550589,
- 0x7c75d999, 0x4115f029, 0x06b58af9, 0x3bd5a349,
- 0xb9853498, 0x84e51d28, 0xc34567f8, 0xfe254e48,
- 0x4c059258, 0x7165bbe8, 0x36c5c138, 0x0ba5e888,
- 0x28d4c7df, 0x15b4ee6f, 0x521494bf, 0x6f74bd0f,
- 0xdd54611f, 0xe03448af, 0xa794327f, 0x9af41bcf,
- 0x18a48c1e, 0x25c4a5ae, 0x6264df7e, 0x5f04f6ce,
- 0xed242ade, 0xd044036e, 0x97e479be, 0xaa84500e,
- 0x4834505d, 0x755479ed, 0x32f4033d, 0x0f942a8d,
- 0xbdb4f69d, 0x80d4df2d, 0xc774a5fd, 0xfa148c4d,
- 0x78441b9c, 0x4524322c, 0x028448fc, 0x3fe4614c,
- 0x8dc4bd5c, 0xb0a494ec, 0xf704ee3c, 0xca64c78c,
- 0x00000000, 0xcb5cd3a5, 0x4dc8a10b, 0x869472ae,
- 0x9b914216, 0x50cd91b3, 0xd659e31d, 0x1d0530b8,
- 0xec53826d, 0x270f51c8, 0xa19b2366, 0x6ac7f0c3,
- 0x77c2c07b, 0xbc9e13de, 0x3a0a6170, 0xf156b2d5,
- 0x03d6029b, 0xc88ad13e, 0x4e1ea390, 0x85427035,
- 0x9847408d, 0x531b9328, 0xd58fe186, 0x1ed33223,
- 0xef8580f6, 0x24d95353, 0xa24d21fd, 0x6911f258,
- 0x7414c2e0, 0xbf481145, 0x39dc63eb, 0xf280b04e,
- 0x07ac0536, 0xccf0d693, 0x4a64a43d, 0x81387798,
- 0x9c3d4720, 0x57619485, 0xd1f5e62b, 0x1aa9358e,
- 0xebff875b, 0x20a354fe, 0xa6372650, 0x6d6bf5f5,
- 0x706ec54d, 0xbb3216e8, 0x3da66446, 0xf6fab7e3,
- 0x047a07ad, 0xcf26d408, 0x49b2a6a6, 0x82ee7503,
- 0x9feb45bb, 0x54b7961e, 0xd223e4b0, 0x197f3715,
- 0xe82985c0, 0x23755665, 0xa5e124cb, 0x6ebdf76e,
- 0x73b8c7d6, 0xb8e41473, 0x3e7066dd, 0xf52cb578,
- 0x0f580a6c, 0xc404d9c9, 0x4290ab67, 0x89cc78c2,
- 0x94c9487a, 0x5f959bdf, 0xd901e971, 0x125d3ad4,
- 0xe30b8801, 0x28575ba4, 0xaec3290a, 0x659ffaaf,
- 0x789aca17, 0xb3c619b2, 0x35526b1c, 0xfe0eb8b9,
- 0x0c8e08f7, 0xc7d2db52, 0x4146a9fc, 0x8a1a7a59,
- 0x971f4ae1, 0x5c439944, 0xdad7ebea, 0x118b384f,
- 0xe0dd8a9a, 0x2b81593f, 0xad152b91, 0x6649f834,
- 0x7b4cc88c, 0xb0101b29, 0x36846987, 0xfdd8ba22,
- 0x08f40f5a, 0xc3a8dcff, 0x453cae51, 0x8e607df4,
- 0x93654d4c, 0x58399ee9, 0xdeadec47, 0x15f13fe2,
- 0xe4a78d37, 0x2ffb5e92, 0xa96f2c3c, 0x6233ff99,
- 0x7f36cf21, 0xb46a1c84, 0x32fe6e2a, 0xf9a2bd8f,
- 0x0b220dc1, 0xc07ede64, 0x46eaacca, 0x8db67f6f,
- 0x90b34fd7, 0x5bef9c72, 0xdd7beedc, 0x16273d79,
- 0xe7718fac, 0x2c2d5c09, 0xaab92ea7, 0x61e5fd02,
- 0x7ce0cdba, 0xb7bc1e1f, 0x31286cb1, 0xfa74bf14,
- 0x1eb014d8, 0xd5ecc77d, 0x5378b5d3, 0x98246676,
- 0x852156ce, 0x4e7d856b, 0xc8e9f7c5, 0x03b52460,
- 0xf2e396b5, 0x39bf4510, 0xbf2b37be, 0x7477e41b,
- 0x6972d4a3, 0xa22e0706, 0x24ba75a8, 0xefe6a60d,
- 0x1d661643, 0xd63ac5e6, 0x50aeb748, 0x9bf264ed,
- 0x86f75455, 0x4dab87f0, 0xcb3ff55e, 0x006326fb,
- 0xf135942e, 0x3a69478b, 0xbcfd3525, 0x77a1e680,
- 0x6aa4d638, 0xa1f8059d, 0x276c7733, 0xec30a496,
- 0x191c11ee, 0xd240c24b, 0x54d4b0e5, 0x9f886340,
- 0x828d53f8, 0x49d1805d, 0xcf45f2f3, 0x04192156,
- 0xf54f9383, 0x3e134026, 0xb8873288, 0x73dbe12d,
- 0x6eded195, 0xa5820230, 0x2316709e, 0xe84aa33b,
- 0x1aca1375, 0xd196c0d0, 0x5702b27e, 0x9c5e61db,
- 0x815b5163, 0x4a0782c6, 0xcc93f068, 0x07cf23cd,
- 0xf6999118, 0x3dc542bd, 0xbb513013, 0x700de3b6,
- 0x6d08d30e, 0xa65400ab, 0x20c07205, 0xeb9ca1a0,
- 0x11e81eb4, 0xdab4cd11, 0x5c20bfbf, 0x977c6c1a,
- 0x8a795ca2, 0x41258f07, 0xc7b1fda9, 0x0ced2e0c,
- 0xfdbb9cd9, 0x36e74f7c, 0xb0733dd2, 0x7b2fee77,
- 0x662adecf, 0xad760d6a, 0x2be27fc4, 0xe0beac61,
- 0x123e1c2f, 0xd962cf8a, 0x5ff6bd24, 0x94aa6e81,
- 0x89af5e39, 0x42f38d9c, 0xc467ff32, 0x0f3b2c97,
- 0xfe6d9e42, 0x35314de7, 0xb3a53f49, 0x78f9ecec,
- 0x65fcdc54, 0xaea00ff1, 0x28347d5f, 0xe368aefa,
- 0x16441b82, 0xdd18c827, 0x5b8cba89, 0x90d0692c,
- 0x8dd55994, 0x46898a31, 0xc01df89f, 0x0b412b3a,
- 0xfa1799ef, 0x314b4a4a, 0xb7df38e4, 0x7c83eb41,
- 0x6186dbf9, 0xaada085c, 0x2c4e7af2, 0xe712a957,
- 0x15921919, 0xdececabc, 0x585ab812, 0x93066bb7,
- 0x8e035b0f, 0x455f88aa, 0xc3cbfa04, 0x089729a1,
- 0xf9c19b74, 0x329d48d1, 0xb4093a7f, 0x7f55e9da,
- 0x6250d962, 0xa90c0ac7, 0x2f987869, 0xe4c4abcc,
- 0x00000000, 0xa6770bb4, 0x979f1129, 0x31e81a9d,
- 0xf44f2413, 0x52382fa7, 0x63d0353a, 0xc5a73e8e,
- 0x33ef4e67, 0x959845d3, 0xa4705f4e, 0x020754fa,
- 0xc7a06a74, 0x61d761c0, 0x503f7b5d, 0xf64870e9,
- 0x67de9cce, 0xc1a9977a, 0xf0418de7, 0x56368653,
- 0x9391b8dd, 0x35e6b369, 0x040ea9f4, 0xa279a240,
- 0x5431d2a9, 0xf246d91d, 0xc3aec380, 0x65d9c834,
- 0xa07ef6ba, 0x0609fd0e, 0x37e1e793, 0x9196ec27,
- 0xcfbd399c, 0x69ca3228, 0x582228b5, 0xfe552301,
- 0x3bf21d8f, 0x9d85163b, 0xac6d0ca6, 0x0a1a0712,
- 0xfc5277fb, 0x5a257c4f, 0x6bcd66d2, 0xcdba6d66,
- 0x081d53e8, 0xae6a585c, 0x9f8242c1, 0x39f54975,
- 0xa863a552, 0x0e14aee6, 0x3ffcb47b, 0x998bbfcf,
- 0x5c2c8141, 0xfa5b8af5, 0xcbb39068, 0x6dc49bdc,
- 0x9b8ceb35, 0x3dfbe081, 0x0c13fa1c, 0xaa64f1a8,
- 0x6fc3cf26, 0xc9b4c492, 0xf85cde0f, 0x5e2bd5bb,
- 0x440b7579, 0xe27c7ecd, 0xd3946450, 0x75e36fe4,
- 0xb044516a, 0x16335ade, 0x27db4043, 0x81ac4bf7,
- 0x77e43b1e, 0xd19330aa, 0xe07b2a37, 0x460c2183,
- 0x83ab1f0d, 0x25dc14b9, 0x14340e24, 0xb2430590,
- 0x23d5e9b7, 0x85a2e203, 0xb44af89e, 0x123df32a,
- 0xd79acda4, 0x71edc610, 0x4005dc8d, 0xe672d739,
- 0x103aa7d0, 0xb64dac64, 0x87a5b6f9, 0x21d2bd4d,
- 0xe47583c3, 0x42028877, 0x73ea92ea, 0xd59d995e,
- 0x8bb64ce5, 0x2dc14751, 0x1c295dcc, 0xba5e5678,
- 0x7ff968f6, 0xd98e6342, 0xe86679df, 0x4e11726b,
- 0xb8590282, 0x1e2e0936, 0x2fc613ab, 0x89b1181f,
- 0x4c162691, 0xea612d25, 0xdb8937b8, 0x7dfe3c0c,
- 0xec68d02b, 0x4a1fdb9f, 0x7bf7c102, 0xdd80cab6,
- 0x1827f438, 0xbe50ff8c, 0x8fb8e511, 0x29cfeea5,
- 0xdf879e4c, 0x79f095f8, 0x48188f65, 0xee6f84d1,
- 0x2bc8ba5f, 0x8dbfb1eb, 0xbc57ab76, 0x1a20a0c2,
- 0x8816eaf2, 0x2e61e146, 0x1f89fbdb, 0xb9fef06f,
- 0x7c59cee1, 0xda2ec555, 0xebc6dfc8, 0x4db1d47c,
- 0xbbf9a495, 0x1d8eaf21, 0x2c66b5bc, 0x8a11be08,
- 0x4fb68086, 0xe9c18b32, 0xd82991af, 0x7e5e9a1b,
- 0xefc8763c, 0x49bf7d88, 0x78576715, 0xde206ca1,
- 0x1b87522f, 0xbdf0599b, 0x8c184306, 0x2a6f48b2,
- 0xdc27385b, 0x7a5033ef, 0x4bb82972, 0xedcf22c6,
- 0x28681c48, 0x8e1f17fc, 0xbff70d61, 0x198006d5,
- 0x47abd36e, 0xe1dcd8da, 0xd034c247, 0x7643c9f3,
- 0xb3e4f77d, 0x1593fcc9, 0x247be654, 0x820cede0,
- 0x74449d09, 0xd23396bd, 0xe3db8c20, 0x45ac8794,
- 0x800bb91a, 0x267cb2ae, 0x1794a833, 0xb1e3a387,
- 0x20754fa0, 0x86024414, 0xb7ea5e89, 0x119d553d,
- 0xd43a6bb3, 0x724d6007, 0x43a57a9a, 0xe5d2712e,
- 0x139a01c7, 0xb5ed0a73, 0x840510ee, 0x22721b5a,
- 0xe7d525d4, 0x41a22e60, 0x704a34fd, 0xd63d3f49,
- 0xcc1d9f8b, 0x6a6a943f, 0x5b828ea2, 0xfdf58516,
- 0x3852bb98, 0x9e25b02c, 0xafcdaab1, 0x09baa105,
- 0xfff2d1ec, 0x5985da58, 0x686dc0c5, 0xce1acb71,
- 0x0bbdf5ff, 0xadcafe4b, 0x9c22e4d6, 0x3a55ef62,
- 0xabc30345, 0x0db408f1, 0x3c5c126c, 0x9a2b19d8,
- 0x5f8c2756, 0xf9fb2ce2, 0xc813367f, 0x6e643dcb,
- 0x982c4d22, 0x3e5b4696, 0x0fb35c0b, 0xa9c457bf,
- 0x6c636931, 0xca146285, 0xfbfc7818, 0x5d8b73ac,
- 0x03a0a617, 0xa5d7ada3, 0x943fb73e, 0x3248bc8a,
- 0xf7ef8204, 0x519889b0, 0x6070932d, 0xc6079899,
- 0x304fe870, 0x9638e3c4, 0xa7d0f959, 0x01a7f2ed,
- 0xc400cc63, 0x6277c7d7, 0x539fdd4a, 0xf5e8d6fe,
- 0x647e3ad9, 0xc209316d, 0xf3e12bf0, 0x55962044,
- 0x90311eca, 0x3646157e, 0x07ae0fe3, 0xa1d90457,
- 0x579174be, 0xf1e67f0a, 0xc00e6597, 0x66796e23,
- 0xa3de50ad, 0x05a95b19, 0x34414184, 0x92364a30,
- 0x00000000, 0xccaa009e, 0x4225077d, 0x8e8f07e3,
- 0x844a0efa, 0x48e00e64, 0xc66f0987, 0x0ac50919,
- 0xd3e51bb5, 0x1f4f1b2b, 0x91c01cc8, 0x5d6a1c56,
- 0x57af154f, 0x9b0515d1, 0x158a1232, 0xd92012ac,
- 0x7cbb312b, 0xb01131b5, 0x3e9e3656, 0xf23436c8,
- 0xf8f13fd1, 0x345b3f4f, 0xbad438ac, 0x767e3832,
- 0xaf5e2a9e, 0x63f42a00, 0xed7b2de3, 0x21d12d7d,
- 0x2b142464, 0xe7be24fa, 0x69312319, 0xa59b2387,
- 0xf9766256, 0x35dc62c8, 0xbb53652b, 0x77f965b5,
- 0x7d3c6cac, 0xb1966c32, 0x3f196bd1, 0xf3b36b4f,
- 0x2a9379e3, 0xe639797d, 0x68b67e9e, 0xa41c7e00,
- 0xaed97719, 0x62737787, 0xecfc7064, 0x205670fa,
- 0x85cd537d, 0x496753e3, 0xc7e85400, 0x0b42549e,
- 0x01875d87, 0xcd2d5d19, 0x43a25afa, 0x8f085a64,
- 0x562848c8, 0x9a824856, 0x140d4fb5, 0xd8a74f2b,
- 0xd2624632, 0x1ec846ac, 0x9047414f, 0x5ced41d1,
- 0x299dc2ed, 0xe537c273, 0x6bb8c590, 0xa712c50e,
- 0xadd7cc17, 0x617dcc89, 0xeff2cb6a, 0x2358cbf4,
- 0xfa78d958, 0x36d2d9c6, 0xb85dde25, 0x74f7debb,
- 0x7e32d7a2, 0xb298d73c, 0x3c17d0df, 0xf0bdd041,
- 0x5526f3c6, 0x998cf358, 0x1703f4bb, 0xdba9f425,
- 0xd16cfd3c, 0x1dc6fda2, 0x9349fa41, 0x5fe3fadf,
- 0x86c3e873, 0x4a69e8ed, 0xc4e6ef0e, 0x084cef90,
- 0x0289e689, 0xce23e617, 0x40ace1f4, 0x8c06e16a,
- 0xd0eba0bb, 0x1c41a025, 0x92cea7c6, 0x5e64a758,
- 0x54a1ae41, 0x980baedf, 0x1684a93c, 0xda2ea9a2,
- 0x030ebb0e, 0xcfa4bb90, 0x412bbc73, 0x8d81bced,
- 0x8744b5f4, 0x4beeb56a, 0xc561b289, 0x09cbb217,
- 0xac509190, 0x60fa910e, 0xee7596ed, 0x22df9673,
- 0x281a9f6a, 0xe4b09ff4, 0x6a3f9817, 0xa6959889,
- 0x7fb58a25, 0xb31f8abb, 0x3d908d58, 0xf13a8dc6,
- 0xfbff84df, 0x37558441, 0xb9da83a2, 0x7570833c,
- 0x533b85da, 0x9f918544, 0x111e82a7, 0xddb48239,
- 0xd7718b20, 0x1bdb8bbe, 0x95548c5d, 0x59fe8cc3,
- 0x80de9e6f, 0x4c749ef1, 0xc2fb9912, 0x0e51998c,
- 0x04949095, 0xc83e900b, 0x46b197e8, 0x8a1b9776,
- 0x2f80b4f1, 0xe32ab46f, 0x6da5b38c, 0xa10fb312,
- 0xabcaba0b, 0x6760ba95, 0xe9efbd76, 0x2545bde8,
- 0xfc65af44, 0x30cfafda, 0xbe40a839, 0x72eaa8a7,
- 0x782fa1be, 0xb485a120, 0x3a0aa6c3, 0xf6a0a65d,
- 0xaa4de78c, 0x66e7e712, 0xe868e0f1, 0x24c2e06f,
- 0x2e07e976, 0xe2ade9e8, 0x6c22ee0b, 0xa088ee95,
- 0x79a8fc39, 0xb502fca7, 0x3b8dfb44, 0xf727fbda,
- 0xfde2f2c3, 0x3148f25d, 0xbfc7f5be, 0x736df520,
- 0xd6f6d6a7, 0x1a5cd639, 0x94d3d1da, 0x5879d144,
- 0x52bcd85d, 0x9e16d8c3, 0x1099df20, 0xdc33dfbe,
- 0x0513cd12, 0xc9b9cd8c, 0x4736ca6f, 0x8b9ccaf1,
- 0x8159c3e8, 0x4df3c376, 0xc37cc495, 0x0fd6c40b,
- 0x7aa64737, 0xb60c47a9, 0x3883404a, 0xf42940d4,
- 0xfeec49cd, 0x32464953, 0xbcc94eb0, 0x70634e2e,
- 0xa9435c82, 0x65e95c1c, 0xeb665bff, 0x27cc5b61,
- 0x2d095278, 0xe1a352e6, 0x6f2c5505, 0xa386559b,
- 0x061d761c, 0xcab77682, 0x44387161, 0x889271ff,
- 0x825778e6, 0x4efd7878, 0xc0727f9b, 0x0cd87f05,
- 0xd5f86da9, 0x19526d37, 0x97dd6ad4, 0x5b776a4a,
- 0x51b26353, 0x9d1863cd, 0x1397642e, 0xdf3d64b0,
- 0x83d02561, 0x4f7a25ff, 0xc1f5221c, 0x0d5f2282,
- 0x079a2b9b, 0xcb302b05, 0x45bf2ce6, 0x89152c78,
- 0x50353ed4, 0x9c9f3e4a, 0x121039a9, 0xdeba3937,
- 0xd47f302e, 0x18d530b0, 0x965a3753, 0x5af037cd,
- 0xff6b144a, 0x33c114d4, 0xbd4e1337, 0x71e413a9,
- 0x7b211ab0, 0xb78b1a2e, 0x39041dcd, 0xf5ae1d53,
- 0x2c8e0fff, 0xe0240f61, 0x6eab0882, 0xa201081c,
- 0xa8c40105, 0x646e019b, 0xeae10678, 0x264b06e6,
-#endif /* CRC32_SLICE8 */
-};
diff --git a/util/compress/libdeflate/lib/crc32_vec_template.h b/util/compress/libdeflate/lib/crc32_vec_template.h
deleted file mode 100644
index 9a2ad5bde..000000000
--- a/util/compress/libdeflate/lib/crc32_vec_template.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * crc32_vec_template.h - template for vectorized CRC-32 implementations
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#define CRC32_SLICE1 1
-static u32 crc32_slice1(u32, const u8 *, size_t);
-
-/*
- * Template for vectorized CRC-32 implementations.
- *
- * Note: on unaligned ends of the buffer, we fall back to crc32_slice1() instead
- * of crc32_slice8() because only a few bytes need to be processed, so a smaller
- * table is preferable.
- */
-static u32 ATTRIBUTES
-FUNCNAME(u32 remainder, const u8 *p, size_t size)
-{
- if ((uintptr_t)p % IMPL_ALIGNMENT) {
- size_t n = MIN(size, -(uintptr_t)p % IMPL_ALIGNMENT);
-
- remainder = crc32_slice1(remainder, p, n);
- p += n;
- size -= n;
- }
- if (size >= IMPL_SEGMENT_SIZE) {
- remainder = FUNCNAME_ALIGNED(remainder, (const void *)p,
- size / IMPL_SEGMENT_SIZE);
- p += size - (size % IMPL_SEGMENT_SIZE);
- size %= IMPL_SEGMENT_SIZE;
- }
- return crc32_slice1(remainder, p, size);
-}
-
-#undef FUNCNAME
-#undef FUNCNAME_ALIGNED
-#undef ATTRIBUTES
-#undef IMPL_ALIGNMENT
-#undef IMPL_SEGMENT_SIZE
diff --git a/util/compress/libdeflate/lib/decompress_template.h b/util/compress/libdeflate/lib/decompress_template.h
deleted file mode 100644
index c6bcf9f52..000000000
--- a/util/compress/libdeflate/lib/decompress_template.h
+++ /dev/null
@@ -1,421 +0,0 @@
-/*
- * decompress_template.h
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * This is the actual DEFLATE decompression routine, lifted out of
- * deflate_decompress.c so that it can be compiled multiple times with different
- * target instruction sets.
- */
-
-static enum libdeflate_result ATTRIBUTES
-FUNCNAME(struct libdeflate_decompressor * restrict d,
- const void * restrict in, size_t in_nbytes,
- void * restrict out, size_t out_nbytes_avail,
- size_t *actual_in_nbytes_ret, size_t *actual_out_nbytes_ret)
-{
- u8 *out_next = out;
- u8 * const out_end = out_next + out_nbytes_avail;
- const u8 *in_next = in;
- const u8 * const in_end = in_next + in_nbytes;
- bitbuf_t bitbuf = 0;
- unsigned bitsleft = 0;
- size_t overrun_count = 0;
- unsigned i;
- unsigned is_final_block;
- unsigned block_type;
- u16 len;
- u16 nlen;
- unsigned num_litlen_syms;
- unsigned num_offset_syms;
- u16 tmp16;
- u32 tmp32;
-
-next_block:
- /* Starting to read the next block. */
- ;
-
- STATIC_ASSERT(CAN_ENSURE(1 + 2 + 5 + 5 + 4));
- ENSURE_BITS(1 + 2 + 5 + 5 + 4);
-
- /* BFINAL: 1 bit */
- is_final_block = POP_BITS(1);
-
- /* BTYPE: 2 bits */
- block_type = POP_BITS(2);
-
- if (block_type == DEFLATE_BLOCKTYPE_DYNAMIC_HUFFMAN) {
-
- /* Dynamic Huffman block. */
-
- /* The order in which precode lengths are stored. */
- static const u8 deflate_precode_lens_permutation[DEFLATE_NUM_PRECODE_SYMS] = {
- 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
- };
-
- unsigned num_explicit_precode_lens;
-
- /* Read the codeword length counts. */
-
- STATIC_ASSERT(DEFLATE_NUM_LITLEN_SYMS == ((1 << 5) - 1) + 257);
- num_litlen_syms = POP_BITS(5) + 257;
-
- STATIC_ASSERT(DEFLATE_NUM_OFFSET_SYMS == ((1 << 5) - 1) + 1);
- num_offset_syms = POP_BITS(5) + 1;
-
- STATIC_ASSERT(DEFLATE_NUM_PRECODE_SYMS == ((1 << 4) - 1) + 4);
- num_explicit_precode_lens = POP_BITS(4) + 4;
-
- d->static_codes_loaded = false;
-
- /* Read the precode codeword lengths. */
- STATIC_ASSERT(DEFLATE_MAX_PRE_CODEWORD_LEN == (1 << 3) - 1);
- for (i = 0; i < num_explicit_precode_lens; i++) {
- ENSURE_BITS(3);
- d->u.precode_lens[deflate_precode_lens_permutation[i]] = POP_BITS(3);
- }
-
- for (; i < DEFLATE_NUM_PRECODE_SYMS; i++)
- d->u.precode_lens[deflate_precode_lens_permutation[i]] = 0;
-
- /* Build the decode table for the precode. */
- SAFETY_CHECK(build_precode_decode_table(d));
-
- /* Expand the literal/length and offset codeword lengths. */
- for (i = 0; i < num_litlen_syms + num_offset_syms; ) {
- u32 entry;
- unsigned presym;
- u8 rep_val;
- unsigned rep_count;
-
- ENSURE_BITS(DEFLATE_MAX_PRE_CODEWORD_LEN + 7);
-
- /* (The code below assumes that the precode decode table
- * does not have any subtables.) */
- STATIC_ASSERT(PRECODE_TABLEBITS == DEFLATE_MAX_PRE_CODEWORD_LEN);
-
- /* Read the next precode symbol. */
- entry = d->u.l.precode_decode_table[BITS(DEFLATE_MAX_PRE_CODEWORD_LEN)];
- REMOVE_BITS(entry & HUFFDEC_LENGTH_MASK);
- presym = entry >> HUFFDEC_RESULT_SHIFT;
-
- if (presym < 16) {
- /* Explicit codeword length */
- d->u.l.lens[i++] = presym;
- continue;
- }
-
- /* Run-length encoded codeword lengths */
-
- /* Note: we don't need verify that the repeat count
- * doesn't overflow the number of elements, since we
- * have enough extra spaces to allow for the worst-case
- * overflow (138 zeroes when only 1 length was
- * remaining).
- *
- * In the case of the small repeat counts (presyms 16
- * and 17), it is fastest to always write the maximum
- * number of entries. That gets rid of branches that
- * would otherwise be required.
- *
- * It is not just because of the numerical order that
- * our checks go in the order 'presym < 16', 'presym ==
- * 16', and 'presym == 17'. For typical data this is
- * ordered from most frequent to least frequent case.
- */
- STATIC_ASSERT(DEFLATE_MAX_LENS_OVERRUN == 138 - 1);
-
- if (presym == 16) {
- /* Repeat the previous length 3 - 6 times */
- SAFETY_CHECK(i != 0);
- rep_val = d->u.l.lens[i - 1];
- STATIC_ASSERT(3 + ((1 << 2) - 1) == 6);
- rep_count = 3 + POP_BITS(2);
- d->u.l.lens[i + 0] = rep_val;
- d->u.l.lens[i + 1] = rep_val;
- d->u.l.lens[i + 2] = rep_val;
- d->u.l.lens[i + 3] = rep_val;
- d->u.l.lens[i + 4] = rep_val;
- d->u.l.lens[i + 5] = rep_val;
- i += rep_count;
- } else if (presym == 17) {
- /* Repeat zero 3 - 10 times */
- STATIC_ASSERT(3 + ((1 << 3) - 1) == 10);
- rep_count = 3 + POP_BITS(3);
- d->u.l.lens[i + 0] = 0;
- d->u.l.lens[i + 1] = 0;
- d->u.l.lens[i + 2] = 0;
- d->u.l.lens[i + 3] = 0;
- d->u.l.lens[i + 4] = 0;
- d->u.l.lens[i + 5] = 0;
- d->u.l.lens[i + 6] = 0;
- d->u.l.lens[i + 7] = 0;
- d->u.l.lens[i + 8] = 0;
- d->u.l.lens[i + 9] = 0;
- i += rep_count;
- } else {
- /* Repeat zero 11 - 138 times */
- STATIC_ASSERT(11 + ((1 << 7) - 1) == 138);
- rep_count = 11 + POP_BITS(7);
- memset(&d->u.l.lens[i], 0,
- rep_count * sizeof(d->u.l.lens[i]));
- i += rep_count;
- }
- }
- } else if (block_type == DEFLATE_BLOCKTYPE_UNCOMPRESSED) {
-
- /* Uncompressed block: copy 'len' bytes literally from the input
- * buffer to the output buffer. */
-
- ALIGN_INPUT();
-
- SAFETY_CHECK(in_end - in_next >= 4);
-
- len = READ_U16();
- nlen = READ_U16();
-
- SAFETY_CHECK(len == (u16)~nlen);
- if (unlikely(len > out_end - out_next))
- return LIBDEFLATE_INSUFFICIENT_SPACE;
- SAFETY_CHECK(len <= in_end - in_next);
-
- memcpy(out_next, in_next, len);
- in_next += len;
- out_next += len;
-
- goto block_done;
-
- } else {
- SAFETY_CHECK(block_type == DEFLATE_BLOCKTYPE_STATIC_HUFFMAN);
-
- /*
- * Static Huffman block: build the decode tables for the static
- * codes. Skip doing so if the tables are already set up from
- * an earlier static block; this speeds up decompression of
- * degenerate input of many empty or very short static blocks.
- *
- * Afterwards, the remainder is the same as decompressing a
- * dynamic Huffman block.
- */
-
- if (d->static_codes_loaded)
- goto have_decode_tables;
-
- d->static_codes_loaded = true;
-
- STATIC_ASSERT(DEFLATE_NUM_LITLEN_SYMS == 288);
- STATIC_ASSERT(DEFLATE_NUM_OFFSET_SYMS == 32);
-
- for (i = 0; i < 144; i++)
- d->u.l.lens[i] = 8;
- for (; i < 256; i++)
- d->u.l.lens[i] = 9;
- for (; i < 280; i++)
- d->u.l.lens[i] = 7;
- for (; i < 288; i++)
- d->u.l.lens[i] = 8;
-
- for (; i < 288 + 32; i++)
- d->u.l.lens[i] = 5;
-
- num_litlen_syms = 288;
- num_offset_syms = 32;
- }
-
- /* Decompressing a Huffman block (either dynamic or static) */
-
- SAFETY_CHECK(build_offset_decode_table(d, num_litlen_syms, num_offset_syms));
- SAFETY_CHECK(build_litlen_decode_table(d, num_litlen_syms, num_offset_syms));
-have_decode_tables:
-
- /* The main DEFLATE decode loop */
- for (;;) {
- u32 entry;
- u32 length;
- u32 offset;
- const u8 *src;
- u8 *dst;
-
- /* Decode a litlen symbol. */
- ENSURE_BITS(DEFLATE_MAX_LITLEN_CODEWORD_LEN);
- entry = d->u.litlen_decode_table[BITS(LITLEN_TABLEBITS)];
- if (entry & HUFFDEC_SUBTABLE_POINTER) {
- /* Litlen subtable required (uncommon case) */
- REMOVE_BITS(LITLEN_TABLEBITS);
- entry = d->u.litlen_decode_table[
- ((entry >> HUFFDEC_RESULT_SHIFT) & 0xFFFF) +
- BITS(entry & HUFFDEC_LENGTH_MASK)];
- }
- REMOVE_BITS(entry & HUFFDEC_LENGTH_MASK);
- if (entry & HUFFDEC_LITERAL) {
- /* Literal */
- if (unlikely(out_next == out_end))
- return LIBDEFLATE_INSUFFICIENT_SPACE;
- *out_next++ = (u8)(entry >> HUFFDEC_RESULT_SHIFT);
- continue;
- }
-
- /* Match or end-of-block */
-
- entry >>= HUFFDEC_RESULT_SHIFT;
- ENSURE_BITS(MAX_ENSURE);
-
- /* Pop the extra length bits and add them to the length base to
- * produce the full length. */
- length = (entry >> HUFFDEC_LENGTH_BASE_SHIFT) +
- POP_BITS(entry & HUFFDEC_EXTRA_LENGTH_BITS_MASK);
-
- /* The match destination must not end after the end of the
- * output buffer. For efficiency, combine this check with the
- * end-of-block check. We're using 0 for the special
- * end-of-block length, so subtract 1 and it turn it into
- * SIZE_MAX. */
- STATIC_ASSERT(HUFFDEC_END_OF_BLOCK_LENGTH == 0);
- if (unlikely((size_t)length - 1 >= out_end - out_next)) {
- if (unlikely(length != HUFFDEC_END_OF_BLOCK_LENGTH))
- return LIBDEFLATE_INSUFFICIENT_SPACE;
- goto block_done;
- }
-
- /* Decode the match offset. */
-
- entry = d->offset_decode_table[BITS(OFFSET_TABLEBITS)];
- if (entry & HUFFDEC_SUBTABLE_POINTER) {
- /* Offset subtable required (uncommon case) */
- REMOVE_BITS(OFFSET_TABLEBITS);
- entry = d->offset_decode_table[
- ((entry >> HUFFDEC_RESULT_SHIFT) & 0xFFFF) +
- BITS(entry & HUFFDEC_LENGTH_MASK)];
- }
- REMOVE_BITS(entry & HUFFDEC_LENGTH_MASK);
- entry >>= HUFFDEC_RESULT_SHIFT;
-
- STATIC_ASSERT(CAN_ENSURE(DEFLATE_MAX_EXTRA_LENGTH_BITS +
- DEFLATE_MAX_OFFSET_CODEWORD_LEN) &&
- CAN_ENSURE(DEFLATE_MAX_EXTRA_OFFSET_BITS));
- if (!CAN_ENSURE(DEFLATE_MAX_EXTRA_LENGTH_BITS +
- DEFLATE_MAX_OFFSET_CODEWORD_LEN +
- DEFLATE_MAX_EXTRA_OFFSET_BITS))
- ENSURE_BITS(DEFLATE_MAX_EXTRA_OFFSET_BITS);
-
- /* Pop the extra offset bits and add them to the offset base to
- * produce the full offset. */
- offset = (entry & HUFFDEC_OFFSET_BASE_MASK) +
- POP_BITS(entry >> HUFFDEC_EXTRA_OFFSET_BITS_SHIFT);
-
- /* The match source must not begin before the beginning of the
- * output buffer. */
- SAFETY_CHECK(offset <= out_next - (const u8 *)out);
-
- /*
- * Copy the match: 'length' bytes at 'out_next - offset' to
- * 'out_next', possibly overlapping. If the match doesn't end
- * too close to the end of the buffer and offset >= WORDBYTES ||
- * offset == 1, take a fast path which copies a word at a time
- * -- potentially more than the length of the match, but that's
- * fine as long as we check for enough extra space.
- *
- * The remaining cases are not performance-critical so are
- * handled by a simple byte-by-byte copy.
- */
-
- src = out_next - offset;
- dst = out_next;
- out_next += length;
-
- if (UNALIGNED_ACCESS_IS_FAST &&
- /* max overrun is writing 3 words for a min length match */
- likely(out_end - out_next >=
- 3 * WORDBYTES - DEFLATE_MIN_MATCH_LEN)) {
- if (offset >= WORDBYTES) { /* words don't overlap? */
- copy_word_unaligned(src, dst);
- src += WORDBYTES;
- dst += WORDBYTES;
- copy_word_unaligned(src, dst);
- src += WORDBYTES;
- dst += WORDBYTES;
- do {
- copy_word_unaligned(src, dst);
- src += WORDBYTES;
- dst += WORDBYTES;
- } while (dst < out_next);
- } else if (offset == 1) {
- /* RLE encoding of previous byte, common if the
- * data contains many repeated bytes */
- machine_word_t v = repeat_byte(*src);
-
- store_word_unaligned(v, dst);
- dst += WORDBYTES;
- store_word_unaligned(v, dst);
- dst += WORDBYTES;
- do {
- store_word_unaligned(v, dst);
- dst += WORDBYTES;
- } while (dst < out_next);
- } else {
- *dst++ = *src++;
- *dst++ = *src++;
- do {
- *dst++ = *src++;
- } while (dst < out_next);
- }
- } else {
- STATIC_ASSERT(DEFLATE_MIN_MATCH_LEN == 3);
- *dst++ = *src++;
- *dst++ = *src++;
- do {
- *dst++ = *src++;
- } while (dst < out_next);
- }
- }
-
-block_done:
- /* Finished decoding a block. */
-
- if (!is_final_block)
- goto next_block;
-
- /* That was the last block. */
-
- /* Discard any readahead bits and check for excessive overread */
- ALIGN_INPUT();
-
- /* Optionally return the actual number of bytes read */
- if (actual_in_nbytes_ret)
- *actual_in_nbytes_ret = in_next - (u8 *)in;
-
- /* Optionally return the actual number of bytes written */
- if (actual_out_nbytes_ret) {
- *actual_out_nbytes_ret = out_next - (u8 *)out;
- } else {
- if (out_next != out_end)
- return LIBDEFLATE_SHORT_OUTPUT;
- }
- return LIBDEFLATE_SUCCESS;
-}
-
-#undef FUNCNAME
-#undef ATTRIBUTES
diff --git a/util/compress/libdeflate/lib/deflate_compress.c b/util/compress/libdeflate/lib/deflate_compress.c
deleted file mode 100644
index cf4379824..000000000
--- a/util/compress/libdeflate/lib/deflate_compress.c
+++ /dev/null
@@ -1,2854 +0,0 @@
-/*
- * deflate_compress.c - a compressor for DEFLATE
- *
- * Originally public domain; changes after 2016-09-07 are copyrighted.
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "deflate_compress.h"
-#include "deflate_constants.h"
-#include "unaligned.h"
-
-#include "libdeflate.h"
-
-/*
- * By default, the near-optimal parsing algorithm is enabled at compression
- * level 8 and above. The near-optimal parsing algorithm produces a compression
- * ratio significantly better than the greedy and lazy algorithms implemented
- * here, and also the algorithm used by zlib at level 9. However, it is slow.
- */
-#define SUPPORT_NEAR_OPTIMAL_PARSING 1
-
-/*
- * Define to 1 to maintain the full map from match offsets to offset slots.
- * This slightly speeds up translations of match offsets to offset slots, but it
- * uses 32769 bytes of memory rather than the 512 bytes used by the condensed
- * map. The speedup provided by the larger map is most helpful when the
- * near-optimal parsing algorithm is being used.
- */
-#define USE_FULL_OFFSET_SLOT_FAST SUPPORT_NEAR_OPTIMAL_PARSING
-
-/*
- * DEFLATE uses a 32768 byte sliding window; set the matchfinder parameters
- * appropriately.
- */
-#define MATCHFINDER_WINDOW_ORDER 15
-
-#include "hc_matchfinder.h"
-#if SUPPORT_NEAR_OPTIMAL_PARSING
-# include "bt_matchfinder.h"
-#endif
-
-/*
- * The compressor always chooses a block of at least MIN_BLOCK_LENGTH bytes,
- * except if the last block has to be shorter.
- */
-#define MIN_BLOCK_LENGTH 10000
-
-/*
- * The compressor attempts to end blocks after SOFT_MAX_BLOCK_LENGTH bytes, but
- * the final length might be slightly longer due to matches extending beyond
- * this limit.
- */
-#define SOFT_MAX_BLOCK_LENGTH 300000
-
-/*
- * The number of observed matches or literals that represents sufficient data to
- * decide whether the current block should be terminated or not.
- */
-#define NUM_OBSERVATIONS_PER_BLOCK_CHECK 512
-
-
-#if SUPPORT_NEAR_OPTIMAL_PARSING
-/* Constants specific to the near-optimal parsing algorithm */
-
-/*
- * The maximum number of matches the matchfinder can find at a single position.
- * Since the matchfinder never finds more than one match for the same length,
- * presuming one of each possible length is sufficient for an upper bound.
- * (This says nothing about whether it is worthwhile to consider so many
- * matches; this is just defining the worst case.)
- */
-# define MAX_MATCHES_PER_POS (DEFLATE_MAX_MATCH_LEN - DEFLATE_MIN_MATCH_LEN + 1)
-
-/*
- * The number of lz_match structures in the match cache, excluding the extra
- * "overflow" entries. This value should be high enough so that nearly the
- * time, all matches found in a given block can fit in the match cache.
- * However, fallback behavior (immediately terminating the block) on cache
- * overflow is still required.
- */
-# define CACHE_LENGTH (SOFT_MAX_BLOCK_LENGTH * 5)
-
-#endif /* SUPPORT_NEAR_OPTIMAL_PARSING */
-
-/*
- * These are the compressor-side limits on the codeword lengths for each Huffman
- * code. To make outputting bits slightly faster, some of these limits are
- * lower than the limits defined by the DEFLATE format. This does not
- * significantly affect the compression ratio, at least for the block lengths we
- * use.
- */
-#define MAX_LITLEN_CODEWORD_LEN 14
-#define MAX_OFFSET_CODEWORD_LEN DEFLATE_MAX_OFFSET_CODEWORD_LEN
-#define MAX_PRE_CODEWORD_LEN DEFLATE_MAX_PRE_CODEWORD_LEN
-
-/* Table: length slot => length slot base value */
-static const unsigned deflate_length_slot_base[] = {
- 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ,
- 11 , 13 , 15 , 17 , 19 , 23 , 27 , 31 ,
- 35 , 43 , 51 , 59 , 67 , 83 , 99 , 115 ,
- 131 , 163 , 195 , 227 , 258 ,
-};
-
-/* Table: length slot => number of extra length bits */
-static const u8 deflate_extra_length_bits[] = {
- 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
- 1 , 1 , 1 , 1 , 2 , 2 , 2 , 2 ,
- 3 , 3 , 3 , 3 , 4 , 4 , 4 , 4 ,
- 5 , 5 , 5 , 5 , 0 ,
-};
-
-/* Table: offset slot => offset slot base value */
-static const unsigned deflate_offset_slot_base[] = {
- 1 , 2 , 3 , 4 , 5 , 7 , 9 , 13 ,
- 17 , 25 , 33 , 49 , 65 , 97 , 129 , 193 ,
- 257 , 385 , 513 , 769 , 1025 , 1537 , 2049 , 3073 ,
- 4097 , 6145 , 8193 , 12289 , 16385 , 24577 ,
-};
-
-/* Table: offset slot => number of extra offset bits */
-static const u8 deflate_extra_offset_bits[] = {
- 0 , 0 , 0 , 0 , 1 , 1 , 2 , 2 ,
- 3 , 3 , 4 , 4 , 5 , 5 , 6 , 6 ,
- 7 , 7 , 8 , 8 , 9 , 9 , 10 , 10 ,
- 11 , 11 , 12 , 12 , 13 , 13 ,
-};
-
-/* Table: length => length slot */
-static const u8 deflate_length_slot[DEFLATE_MAX_MATCH_LEN + 1] = {
- 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12,
- 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16,
- 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18,
- 18, 19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20,
- 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
- 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
- 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
- 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
- 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25,
- 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
- 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26,
- 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
- 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
- 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
- 27, 27, 28,
-};
-
-/* The order in which precode codeword lengths are stored */
-static const u8 deflate_precode_lens_permutation[DEFLATE_NUM_PRECODE_SYMS] = {
- 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
-};
-
-/* Codewords for the DEFLATE Huffman codes. */
-struct deflate_codewords {
- u32 litlen[DEFLATE_NUM_LITLEN_SYMS];
- u32 offset[DEFLATE_NUM_OFFSET_SYMS];
-};
-
-/* Codeword lengths (in bits) for the DEFLATE Huffman codes.
- * A zero length means the corresponding symbol had zero frequency. */
-struct deflate_lens {
- u8 litlen[DEFLATE_NUM_LITLEN_SYMS];
- u8 offset[DEFLATE_NUM_OFFSET_SYMS];
-};
-
-/* Codewords and lengths for the DEFLATE Huffman codes. */
-struct deflate_codes {
- struct deflate_codewords codewords;
- struct deflate_lens lens;
-};
-
-/* Symbol frequency counters for the DEFLATE Huffman codes. */
-struct deflate_freqs {
- u32 litlen[DEFLATE_NUM_LITLEN_SYMS];
- u32 offset[DEFLATE_NUM_OFFSET_SYMS];
-};
-
-#if SUPPORT_NEAR_OPTIMAL_PARSING
-
-/* Costs for the near-optimal parsing algorithm. */
-struct deflate_costs {
-
- /* The cost to output each possible literal. */
- u32 literal[DEFLATE_NUM_LITERALS];
-
- /* The cost to output each possible match length. */
- u32 length[DEFLATE_MAX_MATCH_LEN + 1];
-
- /* The cost to output a match offset of each possible offset slot. */
- u32 offset_slot[DEFLATE_NUM_OFFSET_SYMS];
-};
-
-/*
- * COST_SHIFT is a scaling factor that makes it possible to consider fractional
- * bit costs. A token requiring 'n' bits to represent has cost n << COST_SHIFT.
- *
- * Note: this is only useful as a statistical trick for when the true costs are
- * unknown. In reality, each token in DEFLATE requires a whole number of bits
- * to output.
- */
-#define COST_SHIFT 3
-
-/*
- * The NOSTAT_BITS value for a given alphabet is the number of bits assumed to
- * be needed to output a symbol that was unused in the previous optimization
- * pass. Assigning a default cost allows the symbol to be used in the next
- * optimization pass. However, the cost should be relatively high because the
- * symbol probably won't be used very many times (if at all).
- */
-#define LITERAL_NOSTAT_BITS 13
-#define LENGTH_NOSTAT_BITS 13
-#define OFFSET_NOSTAT_BITS 10
-
-#endif /* SUPPORT_NEAR_OPTIMAL_PARSING */
-
-/*
- * Represents a run of literals followed by a match or end-of-block. This
- * struct is needed to temporarily store items chosen by the parser, since items
- * cannot be written until all items for the block have been chosen and the
- * block's Huffman codes have been computed.
- */
-struct deflate_sequence {
-
- /* Bits 0..22: the number of literals in this run. This may be 0 and
- * can be at most about SOFT_MAX_BLOCK_LENGTH. The literals are not
- * stored explicitly in this structure; instead, they are read directly
- * from the uncompressed data.
- *
- * Bits 23..31: the length of the match which follows the literals, or 0
- * if this literal run was the last in the block, so there is no match
- * which follows it. */
- u32 litrunlen_and_length;
-
- /* If 'length' doesn't indicate end-of-block, then this is the offset of
- * the match which follows the literals. */
- u16 offset;
-
- /* If 'length' doesn't indicate end-of-block, then this is the offset
- * symbol of the match which follows the literals. */
- u8 offset_symbol;
-
- /* If 'length' doesn't indicate end-of-block, then this is the length
- * slot of the match which follows the literals. */
- u8 length_slot;
-};
-
-#if SUPPORT_NEAR_OPTIMAL_PARSING
-
-/*
- * This structure represents a byte position in the input data and a node in the
- * graph of possible match/literal choices for the current block.
- *
- * Logically, each incoming edge to this node is labeled with a literal or a
- * match that can be taken to reach this position from an earlier position; and
- * each outgoing edge from this node is labeled with a literal or a match that
- * can be taken to advance from this position to a later position.
- *
- * But these "edges" are actually stored elsewhere (in 'match_cache'). Here we
- * associate with each node just two pieces of information:
- *
- * 'cost_to_end' is the minimum cost to reach the end of the block from
- * this position.
- *
- * 'item' represents the literal or match that must be chosen from here to
- * reach the end of the block with the minimum cost. Equivalently, this
- * can be interpreted as the label of the outgoing edge on the minimum-cost
- * path to the "end of block" node from this node.
- */
-struct deflate_optimum_node {
-
- u32 cost_to_end;
-
- /*
- * Notes on the match/literal representation used here:
- *
- * The low bits of 'item' are the length: 1 if this is a literal,
- * or the match length if this is a match.
- *
- * The high bits of 'item' are the actual literal byte if this is a
- * literal, or the match offset if this is a match.
- */
-#define OPTIMUM_OFFSET_SHIFT 9
-#define OPTIMUM_LEN_MASK (((u32)1 << OPTIMUM_OFFSET_SHIFT) - 1)
- u32 item;
-
-};
-
-#endif /* SUPPORT_NEAR_OPTIMAL_PARSING */
-
-/* Block split statistics. See "Block splitting algorithm" below. */
-#define NUM_LITERAL_OBSERVATION_TYPES 8
-#define NUM_MATCH_OBSERVATION_TYPES 2
-#define NUM_OBSERVATION_TYPES (NUM_LITERAL_OBSERVATION_TYPES + NUM_MATCH_OBSERVATION_TYPES)
-struct block_split_stats {
- u32 new_observations[NUM_OBSERVATION_TYPES];
- u32 observations[NUM_OBSERVATION_TYPES];
- u32 num_new_observations;
- u32 num_observations;
-};
-
-/* The main DEFLATE compressor structure */
-struct libdeflate_compressor {
-
- /* Pointer to the compress() implementation chosen at allocation time */
- size_t (*impl)(struct libdeflate_compressor *,
- const u8 *, size_t, u8 *, size_t);
-
- /* Frequency counters for the current block */
- struct deflate_freqs freqs;
-
- /* Dynamic Huffman codes for the current block */
- struct deflate_codes codes;
-
- /* Static Huffman codes */
- struct deflate_codes static_codes;
-
- /* Block split statistics for the currently pending block */
- struct block_split_stats split_stats;
-
- /* A table for fast lookups of offset slot by match offset.
- *
- * If the full table is being used, it is a direct mapping from offset
- * to offset slot.
- *
- * If the condensed table is being used, the first 256 entries map
- * directly to the offset slots of offsets 1 through 256. The next 256
- * entries map to the offset slots for the remaining offsets, stepping
- * through the offsets with a stride of 128. This relies on the fact
- * that each of the remaining offset slots contains at least 128 offsets
- * and has an offset base that is a multiple of 128. */
-#if USE_FULL_OFFSET_SLOT_FAST
- u8 offset_slot_fast[DEFLATE_MAX_MATCH_OFFSET + 1];
-#else
- u8 offset_slot_fast[512];
-#endif
-
- /* The "nice" match length: if a match of this length is found, choose
- * it immediately without further consideration. */
- unsigned nice_match_length;
-
- /* The maximum search depth: consider at most this many potential
- * matches at each position. */
- unsigned max_search_depth;
-
- /* The compression level with which this compressor was created. */
- unsigned compression_level;
-
- /* Anything smaller than this we won't bother trying to compress. */
- unsigned min_size_to_compress;
-
- /* Temporary space for Huffman code output */
- u32 precode_freqs[DEFLATE_NUM_PRECODE_SYMS];
- u8 precode_lens[DEFLATE_NUM_PRECODE_SYMS];
- u32 precode_codewords[DEFLATE_NUM_PRECODE_SYMS];
- unsigned precode_items[DEFLATE_NUM_LITLEN_SYMS + DEFLATE_NUM_OFFSET_SYMS];
- unsigned num_litlen_syms;
- unsigned num_offset_syms;
- unsigned num_explicit_lens;
- unsigned num_precode_items;
-
- union {
- /* Data for greedy or lazy parsing */
- struct {
- /* Hash chain matchfinder */
- struct hc_matchfinder hc_mf;
-
- /* The matches and literals that the parser has chosen
- * for the current block. The required length of this
- * array is limited by the maximum number of matches
- * that can ever be chosen for a single block, plus one
- * for the special entry at the end. */
- struct deflate_sequence sequences[
- DIV_ROUND_UP(SOFT_MAX_BLOCK_LENGTH,
- DEFLATE_MIN_MATCH_LEN) + 1];
- } g; /* (g)reedy */
-
- #if SUPPORT_NEAR_OPTIMAL_PARSING
- /* Data for near-optimal parsing */
- struct {
-
- /* Binary tree matchfinder */
- struct bt_matchfinder bt_mf;
-
- /*
- * Cached matches for the current block. This array
- * contains the matches that were found at each position
- * in the block. Specifically, for each position, there
- * is a list of matches found at that position, if any,
- * sorted by strictly increasing length. In addition,
- * following the matches for each position, there is a
- * special 'struct lz_match' whose 'length' member
- * contains the number of matches found at that
- * position, and whose 'offset' member contains the
- * literal at that position.
- *
- * Note: in rare cases, there will be a very high number
- * of matches in the block and this array will overflow.
- * If this happens, we force the end of the current
- * block. CACHE_LENGTH is the length at which we
- * actually check for overflow. The extra slots beyond
- * this are enough to absorb the worst case overflow,
- * which occurs if starting at &match_cache[CACHE_LENGTH
- * - 1], we write MAX_MATCHES_PER_POS matches and a
- * match count header, then skip searching for matches
- * at 'DEFLATE_MAX_MATCH_LEN - 1' positions and write
- * the match count header for each.
- */
- struct lz_match match_cache[CACHE_LENGTH +
- MAX_MATCHES_PER_POS +
- DEFLATE_MAX_MATCH_LEN - 1];
-
- /*
- * Array of nodes, one per position, for running the
- * minimum-cost path algorithm.
- *
- * This array must be large enough to accommodate the
- * worst-case number of nodes, which occurs if we find a
- * match of length DEFLATE_MAX_MATCH_LEN at position
- * SOFT_MAX_BLOCK_LENGTH - 1, producing a block of
- * length SOFT_MAX_BLOCK_LENGTH - 1 +
- * DEFLATE_MAX_MATCH_LEN. Add one for the end-of-block
- * node.
- */
- struct deflate_optimum_node optimum_nodes[SOFT_MAX_BLOCK_LENGTH - 1 +
- DEFLATE_MAX_MATCH_LEN + 1];
-
- /* The current cost model being used. */
- struct deflate_costs costs;
-
- unsigned num_optim_passes;
- } n; /* (n)ear-optimal */
- #endif /* SUPPORT_NEAR_OPTIMAL_PARSING */
-
- } p; /* (p)arser */
-};
-
-/*
- * The type for the bitbuffer variable, which temporarily holds bits that are
- * being packed into bytes and written to the output buffer. For best
- * performance, this should have size equal to a machine word.
- */
-typedef machine_word_t bitbuf_t;
-#define BITBUF_NBITS (8 * sizeof(bitbuf_t))
-
-/* Can the specified number of bits always be added to 'bitbuf' after any
- * pending bytes have been flushed? */
-#define CAN_BUFFER(n) ((n) <= BITBUF_NBITS - 7)
-
-/*
- * Structure to keep track of the current state of sending bits to the
- * compressed output buffer.
- */
-struct deflate_output_bitstream {
-
- /* Bits that haven't yet been written to the output buffer. */
- bitbuf_t bitbuf;
-
- /* Number of bits currently held in @bitbuf. */
- unsigned bitcount;
-
- /* Pointer to the beginning of the output buffer. */
- u8 *begin;
-
- /* Pointer to the position in the output buffer at which the next byte
- * should be written. */
- u8 *next;
-
- /* Pointer just past the end of the output buffer. */
- u8 *end;
-};
-
-/*
- * OUTPUT_END_PADDING is the size, in bytes, of the extra space that must be
- * present following os->end, in order to not overrun the buffer when generating
- * output. When UNALIGNED_ACCESS_IS_FAST, we need at least sizeof(bitbuf_t)
- * bytes for put_unaligned_leword(). Otherwise we need only 1 byte. However,
- * to make the compression algorithm produce the same result on all CPU
- * architectures (which is sometimes desirable), we have to unconditionally use
- * the maximum for any CPU, which is sizeof(bitbuf_t) == 8.
- */
-#define OUTPUT_END_PADDING 8
-
-/* Initialize the output bitstream. 'size' is assumed to be at least
- * OUTPUT_END_PADDING. */
-static void
-deflate_init_output(struct deflate_output_bitstream *os,
- void *buffer, size_t size)
-{
- os->bitbuf = 0;
- os->bitcount = 0;
- os->begin = buffer;
- os->next = os->begin;
- os->end = os->begin + size - OUTPUT_END_PADDING;
-}
-
-/* Add some bits to the bitbuffer variable of the output bitstream. The caller
- * must make sure there is enough room. */
-static forceinline void
-deflate_add_bits(struct deflate_output_bitstream *os,
- const bitbuf_t bits, const unsigned num_bits)
-{
- os->bitbuf |= bits << os->bitcount;
- os->bitcount += num_bits;
-}
-
-/* Flush bits from the bitbuffer variable to the output buffer. */
-static forceinline void
-deflate_flush_bits(struct deflate_output_bitstream *os)
-{
- if (UNALIGNED_ACCESS_IS_FAST) {
- /* Flush a whole word (branchlessly). */
- put_unaligned_leword(os->bitbuf, os->next);
- os->bitbuf >>= os->bitcount & ~7;
- os->next += MIN(os->end - os->next, os->bitcount >> 3);
- os->bitcount &= 7;
- } else {
- /* Flush a byte at a time. */
- while (os->bitcount >= 8) {
- *os->next = os->bitbuf;
- if (os->next != os->end)
- os->next++;
- os->bitcount -= 8;
- os->bitbuf >>= 8;
- }
- }
-}
-
-/* Align the bitstream on a byte boundary. */
-static forceinline void
-deflate_align_bitstream(struct deflate_output_bitstream *os)
-{
- os->bitcount += -os->bitcount & 7;
- deflate_flush_bits(os);
-}
-
-/*
- * Flush any remaining bits to the output buffer if needed. Return the total
- * number of bytes written to the output buffer, or 0 if an overflow occurred.
- */
-static size_t
-deflate_flush_output(struct deflate_output_bitstream *os)
-{
- if (os->next == os->end) /* overflow? */
- return 0;
-
- while ((int)os->bitcount > 0) {
- *os->next++ = os->bitbuf;
- os->bitcount -= 8;
- os->bitbuf >>= 8;
- }
-
- return os->next - os->begin;
-}
-
-/* Given the binary tree node A[subtree_idx] whose children already
- * satisfy the maxheap property, swap the node with its greater child
- * until it is greater than both its children, so that the maxheap
- * property is satisfied in the subtree rooted at A[subtree_idx]. */
-static void
-heapify_subtree(u32 A[], unsigned length, unsigned subtree_idx)
-{
- unsigned parent_idx;
- unsigned child_idx;
- u32 v;
-
- v = A[subtree_idx];
- parent_idx = subtree_idx;
- while ((child_idx = parent_idx * 2) <= length) {
- if (child_idx < length && A[child_idx + 1] > A[child_idx])
- child_idx++;
- if (v >= A[child_idx])
- break;
- A[parent_idx] = A[child_idx];
- parent_idx = child_idx;
- }
- A[parent_idx] = v;
-}
-
-/* Rearrange the array 'A' so that it satisfies the maxheap property.
- * 'A' uses 1-based indices, so the children of A[i] are A[i*2] and A[i*2 + 1].
- */
-static void
-heapify_array(u32 A[], unsigned length)
-{
- unsigned subtree_idx;
-
- for (subtree_idx = length / 2; subtree_idx >= 1; subtree_idx--)
- heapify_subtree(A, length, subtree_idx);
-}
-
-/*
- * Sort the array 'A', which contains 'length' unsigned 32-bit integers.
- *
- * Note: name this function heap_sort() instead of heapsort() to avoid colliding
- * with heapsort() from stdlib.h on BSD-derived systems --- though this isn't
- * necessary when compiling with -D_ANSI_SOURCE, which is the better solution.
- */
-static void
-heap_sort(u32 A[], unsigned length)
-{
- A--; /* Use 1-based indices */
-
- heapify_array(A, length);
-
- while (length >= 2) {
- u32 tmp = A[length];
- A[length] = A[1];
- A[1] = tmp;
- length--;
- heapify_subtree(A, length, 1);
- }
-}
-
-#define NUM_SYMBOL_BITS 10
-#define SYMBOL_MASK ((1 << NUM_SYMBOL_BITS) - 1)
-
-#define GET_NUM_COUNTERS(num_syms) ((((num_syms) + 3 / 4) + 3) & ~3)
-/*
- * Sort the symbols primarily by frequency and secondarily by symbol
- * value. Discard symbols with zero frequency and fill in an array with
- * the remaining symbols, along with their frequencies. The low
- * NUM_SYMBOL_BITS bits of each array entry will contain the symbol
- * value, and the remaining bits will contain the frequency.
- *
- * @num_syms
- * Number of symbols in the alphabet.
- * Can't be greater than (1 << NUM_SYMBOL_BITS).
- *
- * @freqs[num_syms]
- * The frequency of each symbol.
- *
- * @lens[num_syms]
- * An array that eventually will hold the length of each codeword.
- * This function only fills in the codeword lengths for symbols that
- * have zero frequency, which are not well defined per se but will
- * be set to 0.
- *
- * @symout[num_syms]
- * The output array, described above.
- *
- * Returns the number of entries in 'symout' that were filled. This is
- * the number of symbols that have nonzero frequency.
- */
-static unsigned
-sort_symbols(unsigned num_syms, const u32 freqs[restrict],
- u8 lens[restrict], u32 symout[restrict])
-{
- unsigned sym;
- unsigned i;
- unsigned num_used_syms;
- unsigned num_counters;
- unsigned counters[GET_NUM_COUNTERS(DEFLATE_MAX_NUM_SYMS)];
-
- /* We rely on heapsort, but with an added optimization. Since
- * it's common for most symbol frequencies to be low, we first do
- * a count sort using a limited number of counters. High
- * frequencies will be counted in the last counter, and only they
- * will be sorted with heapsort.
- *
- * Note: with more symbols, it is generally beneficial to have more
- * counters. About 1 counter per 4 symbols seems fast.
- *
- * Note: I also tested radix sort, but even for large symbol
- * counts (> 255) and frequencies bounded at 16 bits (enabling
- * radix sort by just two base-256 digits), it didn't seem any
- * faster than the method implemented here.
- *
- * Note: I tested the optimized quicksort implementation from
- * glibc (with indirection overhead removed), but it was only
- * marginally faster than the simple heapsort implemented here.
- *
- * Tests were done with building the codes for LZX. Results may
- * vary for different compression algorithms...! */
-
- num_counters = GET_NUM_COUNTERS(num_syms);
-
- memset(counters, 0, num_counters * sizeof(counters[0]));
-
- /* Count the frequencies. */
- for (sym = 0; sym < num_syms; sym++)
- counters[MIN(freqs[sym], num_counters - 1)]++;
-
- /* Make the counters cumulative, ignoring the zero-th, which
- * counted symbols with zero frequency. As a side effect, this
- * calculates the number of symbols with nonzero frequency. */
- num_used_syms = 0;
- for (i = 1; i < num_counters; i++) {
- unsigned count = counters[i];
- counters[i] = num_used_syms;
- num_used_syms += count;
- }
-
- /* Sort nonzero-frequency symbols using the counters. At the
- * same time, set the codeword lengths of zero-frequency symbols
- * to 0. */
- for (sym = 0; sym < num_syms; sym++) {
- u32 freq = freqs[sym];
- if (freq != 0) {
- symout[counters[MIN(freq, num_counters - 1)]++] =
- sym | (freq << NUM_SYMBOL_BITS);
- } else {
- lens[sym] = 0;
- }
- }
-
- /* Sort the symbols counted in the last counter. */
- heap_sort(symout + counters[num_counters - 2],
- counters[num_counters - 1] - counters[num_counters - 2]);
-
- return num_used_syms;
-}
-
-/*
- * Build the Huffman tree.
- *
- * This is an optimized implementation that
- * (a) takes advantage of the frequencies being already sorted;
- * (b) only generates non-leaf nodes, since the non-leaf nodes of a
- * Huffman tree are sufficient to generate a canonical code;
- * (c) Only stores parent pointers, not child pointers;
- * (d) Produces the nodes in the same memory used for input
- * frequency information.
- *
- * Array 'A', which contains 'sym_count' entries, is used for both input
- * and output. For this function, 'sym_count' must be at least 2.
- *
- * For input, the array must contain the frequencies of the symbols,
- * sorted in increasing order. Specifically, each entry must contain a
- * frequency left shifted by NUM_SYMBOL_BITS bits. Any data in the low
- * NUM_SYMBOL_BITS bits of the entries will be ignored by this function.
- * Although these bits will, in fact, contain the symbols that correspond
- * to the frequencies, this function is concerned with frequencies only
- * and keeps the symbols as-is.
- *
- * For output, this function will produce the non-leaf nodes of the
- * Huffman tree. These nodes will be stored in the first (sym_count - 1)
- * entries of the array. Entry A[sym_count - 2] will represent the root
- * node. Each other node will contain the zero-based index of its parent
- * node in 'A', left shifted by NUM_SYMBOL_BITS bits. The low
- * NUM_SYMBOL_BITS bits of each entry in A will be kept as-is. Again,
- * note that although these low bits will, in fact, contain a symbol
- * value, this symbol will have *no relationship* with the Huffman tree
- * node that happens to occupy the same slot. This is because this
- * implementation only generates the non-leaf nodes of the tree.
- */
-static void
-build_tree(u32 A[], unsigned sym_count)
-{
- /* Index, in 'A', of next lowest frequency symbol that has not
- * yet been processed. */
- unsigned i = 0;
-
- /* Index, in 'A', of next lowest frequency parentless non-leaf
- * node; or, if equal to 'e', then no such node exists yet. */
- unsigned b = 0;
-
- /* Index, in 'A', of next node to allocate as a non-leaf. */
- unsigned e = 0;
-
- do {
- unsigned m, n;
- u32 freq_shifted;
-
- /* Choose the two next lowest frequency entries. */
-
- if (i != sym_count &&
- (b == e || (A[i] >> NUM_SYMBOL_BITS) <= (A[b] >> NUM_SYMBOL_BITS)))
- m = i++;
- else
- m = b++;
-
- if (i != sym_count &&
- (b == e || (A[i] >> NUM_SYMBOL_BITS) <= (A[b] >> NUM_SYMBOL_BITS)))
- n = i++;
- else
- n = b++;
-
- /* Allocate a non-leaf node and link the entries to it.
- *
- * If we link an entry that we're visiting for the first
- * time (via index 'i'), then we're actually linking a
- * leaf node and it will have no effect, since the leaf
- * will be overwritten with a non-leaf when index 'e'
- * catches up to it. But it's not any slower to
- * unconditionally set the parent index.
- *
- * We also compute the frequency of the non-leaf node as
- * the sum of its two children's frequencies. */
-
- freq_shifted = (A[m] & ~SYMBOL_MASK) + (A[n] & ~SYMBOL_MASK);
-
- A[m] = (A[m] & SYMBOL_MASK) | (e << NUM_SYMBOL_BITS);
- A[n] = (A[n] & SYMBOL_MASK) | (e << NUM_SYMBOL_BITS);
- A[e] = (A[e] & SYMBOL_MASK) | freq_shifted;
- e++;
- } while (sym_count - e > 1);
- /* When just one entry remains, it is a "leaf" that was
- * linked to some other node. We ignore it, since the
- * rest of the array contains the non-leaves which we
- * need. (Note that we're assuming the cases with 0 or 1
- * symbols were handled separately.) */
-}
-
-/*
- * Given the stripped-down Huffman tree constructed by build_tree(),
- * determine the number of codewords that should be assigned each
- * possible length, taking into account the length-limited constraint.
- *
- * @A
- * The array produced by build_tree(), containing parent index
- * information for the non-leaf nodes of the Huffman tree. Each
- * entry in this array is a node; a node's parent always has a
- * greater index than that node itself. This function will
- * overwrite the parent index information in this array, so
- * essentially it will destroy the tree. However, the data in the
- * low NUM_SYMBOL_BITS of each entry will be preserved.
- *
- * @root_idx
- * The 0-based index of the root node in 'A', and consequently one
- * less than the number of tree node entries in 'A'. (Or, really 2
- * less than the actual length of 'A'.)
- *
- * @len_counts
- * An array of length ('max_codeword_len' + 1) in which the number of
- * codewords having each length <= max_codeword_len will be
- * returned.
- *
- * @max_codeword_len
- * The maximum permissible codeword length.
- */
-static void
-compute_length_counts(u32 A[restrict], unsigned root_idx,
- unsigned len_counts[restrict], unsigned max_codeword_len)
-{
- unsigned len;
- int node;
-
- /* The key observations are:
- *
- * (1) We can traverse the non-leaf nodes of the tree, always
- * visiting a parent before its children, by simply iterating
- * through the array in reverse order. Consequently, we can
- * compute the depth of each node in one pass, overwriting the
- * parent indices with depths.
- *
- * (2) We can initially assume that in the real Huffman tree,
- * both children of the root are leaves. This corresponds to two
- * codewords of length 1. Then, whenever we visit a (non-leaf)
- * node during the traversal, we modify this assumption to
- * account for the current node *not* being a leaf, but rather
- * its two children being leaves. This causes the loss of one
- * codeword for the current depth and the addition of two
- * codewords for the current depth plus one.
- *
- * (3) We can handle the length-limited constraint fairly easily
- * by simply using the largest length available when a depth
- * exceeds max_codeword_len.
- */
-
- for (len = 0; len <= max_codeword_len; len++)
- len_counts[len] = 0;
- len_counts[1] = 2;
-
- /* Set the root node's depth to 0. */
- A[root_idx] &= SYMBOL_MASK;
-
- for (node = root_idx - 1; node >= 0; node--) {
-
- /* Calculate the depth of this node. */
-
- unsigned parent = A[node] >> NUM_SYMBOL_BITS;
- unsigned parent_depth = A[parent] >> NUM_SYMBOL_BITS;
- unsigned depth = parent_depth + 1;
- unsigned len = depth;
-
- /* Set the depth of this node so that it is available
- * when its children (if any) are processed. */
-
- A[node] = (A[node] & SYMBOL_MASK) | (depth << NUM_SYMBOL_BITS);
-
- /* If needed, decrease the length to meet the
- * length-limited constraint. This is not the optimal
- * method for generating length-limited Huffman codes!
- * But it should be good enough. */
- if (len >= max_codeword_len) {
- len = max_codeword_len;
- do {
- len--;
- } while (len_counts[len] == 0);
- }
-
- /* Account for the fact that we have a non-leaf node at
- * the current depth. */
- len_counts[len]--;
- len_counts[len + 1] += 2;
- }
-}
-
-/*
- * Generate the codewords for a canonical Huffman code.
- *
- * @A
- * The output array for codewords. In addition, initially this
- * array must contain the symbols, sorted primarily by frequency and
- * secondarily by symbol value, in the low NUM_SYMBOL_BITS bits of
- * each entry.
- *
- * @len
- * Output array for codeword lengths.
- *
- * @len_counts
- * An array that provides the number of codewords that will have
- * each possible length <= max_codeword_len.
- *
- * @max_codeword_len
- * Maximum length, in bits, of each codeword.
- *
- * @num_syms
- * Number of symbols in the alphabet, including symbols with zero
- * frequency. This is the length of the 'A' and 'len' arrays.
- */
-static void
-gen_codewords(u32 A[restrict], u8 lens[restrict],
- const unsigned len_counts[restrict],
- unsigned max_codeword_len, unsigned num_syms)
-{
- u32 next_codewords[DEFLATE_MAX_CODEWORD_LEN + 1];
- unsigned i;
- unsigned len;
- unsigned sym;
-
- /* Given the number of codewords that will have each length,
- * assign codeword lengths to symbols. We do this by assigning
- * the lengths in decreasing order to the symbols sorted
- * primarily by increasing frequency and secondarily by
- * increasing symbol value. */
- for (i = 0, len = max_codeword_len; len >= 1; len--) {
- unsigned count = len_counts[len];
- while (count--)
- lens[A[i++] & SYMBOL_MASK] = len;
- }
-
- /* Generate the codewords themselves. We initialize the
- * 'next_codewords' array to provide the lexicographically first
- * codeword of each length, then assign codewords in symbol
- * order. This produces a canonical code. */
- next_codewords[0] = 0;
- next_codewords[1] = 0;
- for (len = 2; len <= max_codeword_len; len++)
- next_codewords[len] =
- (next_codewords[len - 1] + len_counts[len - 1]) << 1;
-
- for (sym = 0; sym < num_syms; sym++)
- A[sym] = next_codewords[lens[sym]]++;
-}
-
-/*
- * ---------------------------------------------------------------------
- * make_canonical_huffman_code()
- * ---------------------------------------------------------------------
- *
- * Given an alphabet and the frequency of each symbol in it, construct a
- * length-limited canonical Huffman code.
- *
- * @num_syms
- * The number of symbols in the alphabet. The symbols are the
- * integers in the range [0, num_syms - 1]. This parameter must be
- * at least 2 and can't be greater than (1 << NUM_SYMBOL_BITS).
- *
- * @max_codeword_len
- * The maximum permissible codeword length.
- *
- * @freqs
- * An array of @num_syms entries, each of which specifies the
- * frequency of the corresponding symbol. It is valid for some,
- * none, or all of the frequencies to be 0.
- *
- * @lens
- * An array of @num_syms entries in which this function will return
- * the length, in bits, of the codeword assigned to each symbol.
- * Symbols with 0 frequency will not have codewords per se, but
- * their entries in this array will be set to 0. No lengths greater
- * than @max_codeword_len will be assigned.
- *
- * @codewords
- * An array of @num_syms entries in which this function will return
- * the codeword for each symbol, right-justified and padded on the
- * left with zeroes. Codewords for symbols with 0 frequency will be
- * undefined.
- *
- * ---------------------------------------------------------------------
- *
- * This function builds a length-limited canonical Huffman code.
- *
- * A length-limited Huffman code contains no codewords longer than some
- * specified length, and has exactly (with some algorithms) or
- * approximately (with the algorithm used here) the minimum weighted path
- * length from the root, given this constraint.
- *
- * A canonical Huffman code satisfies the properties that a longer
- * codeword never lexicographically precedes a shorter codeword, and the
- * lexicographic ordering of codewords of the same length is the same as
- * the lexicographic ordering of the corresponding symbols. A canonical
- * Huffman code, or more generally a canonical prefix code, can be
- * reconstructed from only a list containing the codeword length of each
- * symbol.
- *
- * The classic algorithm to generate a Huffman code creates a node for
- * each symbol, then inserts these nodes into a min-heap keyed by symbol
- * frequency. Then, repeatedly, the two lowest-frequency nodes are
- * removed from the min-heap and added as the children of a new node
- * having frequency equal to the sum of its two children, which is then
- * inserted into the min-heap. When only a single node remains in the
- * min-heap, it is the root of the Huffman tree. The codeword for each
- * symbol is determined by the path needed to reach the corresponding
- * node from the root. Descending to the left child appends a 0 bit,
- * whereas descending to the right child appends a 1 bit.
- *
- * The classic algorithm is relatively easy to understand, but it is
- * subject to a number of inefficiencies. In practice, it is fastest to
- * first sort the symbols by frequency. (This itself can be subject to
- * an optimization based on the fact that most frequencies tend to be
- * low.) At the same time, we sort secondarily by symbol value, which
- * aids the process of generating a canonical code. Then, during tree
- * construction, no heap is necessary because both the leaf nodes and the
- * unparented non-leaf nodes can be easily maintained in sorted order.
- * Consequently, there can never be more than two possibilities for the
- * next-lowest-frequency node.
- *
- * In addition, because we're generating a canonical code, we actually
- * don't need the leaf nodes of the tree at all, only the non-leaf nodes.
- * This is because for canonical code generation we don't need to know
- * where the symbols are in the tree. Rather, we only need to know how
- * many leaf nodes have each depth (codeword length). And this
- * information can, in fact, be quickly generated from the tree of
- * non-leaves only.
- *
- * Furthermore, we can build this stripped-down Huffman tree directly in
- * the array in which the codewords are to be generated, provided that
- * these array slots are large enough to hold a symbol and frequency
- * value.
- *
- * Still furthermore, we don't even need to maintain explicit child
- * pointers. We only need the parent pointers, and even those can be
- * overwritten in-place with depth information as part of the process of
- * extracting codeword lengths from the tree. So in summary, we do NOT
- * need a big structure like:
- *
- * struct huffman_tree_node {
- * unsigned int symbol;
- * unsigned int frequency;
- * unsigned int depth;
- * struct huffman_tree_node *left_child;
- * struct huffman_tree_node *right_child;
- * };
- *
- *
- * ... which often gets used in "naive" implementations of Huffman code
- * generation.
- *
- * Many of these optimizations are based on the implementation in 7-Zip
- * (source file: C/HuffEnc.c), which has been placed in the public domain
- * by Igor Pavlov.
- */
-static void
-make_canonical_huffman_code(unsigned num_syms, unsigned max_codeword_len,
- const u32 freqs[restrict],
- u8 lens[restrict], u32 codewords[restrict])
-{
- u32 *A = codewords;
- unsigned num_used_syms;
-
- STATIC_ASSERT(DEFLATE_MAX_NUM_SYMS <= 1 << NUM_SYMBOL_BITS);
-
- /* We begin by sorting the symbols primarily by frequency and
- * secondarily by symbol value. As an optimization, the array
- * used for this purpose ('A') shares storage with the space in
- * which we will eventually return the codewords. */
-
- num_used_syms = sort_symbols(num_syms, freqs, lens, A);
-
- /* 'num_used_syms' is the number of symbols with nonzero
- * frequency. This may be less than @num_syms. 'num_used_syms'
- * is also the number of entries in 'A' that are valid. Each
- * entry consists of a distinct symbol and a nonzero frequency
- * packed into a 32-bit integer. */
-
- /* Handle special cases where only 0 or 1 symbols were used (had
- * nonzero frequency). */
-
- if (unlikely(num_used_syms == 0)) {
- /* Code is empty. sort_symbols() already set all lengths
- * to 0, so there is nothing more to do. */
- return;
- }
-
- if (unlikely(num_used_syms == 1)) {
- /* Only one symbol was used, so we only need one
- * codeword. But two codewords are needed to form the
- * smallest complete Huffman code, which uses codewords 0
- * and 1. Therefore, we choose another symbol to which
- * to assign a codeword. We use 0 (if the used symbol is
- * not 0) or 1 (if the used symbol is 0). In either
- * case, the lesser-valued symbol must be assigned
- * codeword 0 so that the resulting code is canonical. */
-
- unsigned sym = A[0] & SYMBOL_MASK;
- unsigned nonzero_idx = sym ? sym : 1;
-
- codewords[0] = 0;
- lens[0] = 1;
- codewords[nonzero_idx] = 1;
- lens[nonzero_idx] = 1;
- return;
- }
-
- /* Build a stripped-down version of the Huffman tree, sharing the
- * array 'A' with the symbol values. Then extract length counts
- * from the tree and use them to generate the final codewords. */
-
- build_tree(A, num_used_syms);
-
- {
- unsigned len_counts[DEFLATE_MAX_CODEWORD_LEN + 1];
-
- compute_length_counts(A, num_used_syms - 2,
- len_counts, max_codeword_len);
-
- gen_codewords(A, lens, len_counts, max_codeword_len, num_syms);
- }
-}
-
-/*
- * Clear the Huffman symbol frequency counters.
- * This must be called when starting a new DEFLATE block.
- */
-static void
-deflate_reset_symbol_frequencies(struct libdeflate_compressor *c)
-{
- memset(&c->freqs, 0, sizeof(c->freqs));
-}
-
-/* Reverse the Huffman codeword 'codeword', which is 'len' bits in length. */
-static u32
-deflate_reverse_codeword(u32 codeword, u8 len)
-{
- /* The following branchless algorithm is faster than going bit by bit.
- * Note: since no codewords are longer than 16 bits, we only need to
- * reverse the low 16 bits of the 'u32'. */
- STATIC_ASSERT(DEFLATE_MAX_CODEWORD_LEN <= 16);
-
- /* Flip adjacent 1-bit fields */
- codeword = ((codeword & 0x5555) << 1) | ((codeword & 0xAAAA) >> 1);
-
- /* Flip adjacent 2-bit fields */
- codeword = ((codeword & 0x3333) << 2) | ((codeword & 0xCCCC) >> 2);
-
- /* Flip adjacent 4-bit fields */
- codeword = ((codeword & 0x0F0F) << 4) | ((codeword & 0xF0F0) >> 4);
-
- /* Flip adjacent 8-bit fields */
- codeword = ((codeword & 0x00FF) << 8) | ((codeword & 0xFF00) >> 8);
-
- /* Return the high 'len' bits of the bit-reversed 16 bit value. */
- return codeword >> (16 - len);
-}
-
-/* Make a canonical Huffman code with bit-reversed codewords. */
-static void
-deflate_make_huffman_code(unsigned num_syms, unsigned max_codeword_len,
- const u32 freqs[], u8 lens[], u32 codewords[])
-{
- unsigned sym;
-
- make_canonical_huffman_code(num_syms, max_codeword_len,
- freqs, lens, codewords);
-
- for (sym = 0; sym < num_syms; sym++)
- codewords[sym] = deflate_reverse_codeword(codewords[sym], lens[sym]);
-}
-
-/*
- * Build the literal/length and offset Huffman codes for a DEFLATE block.
- *
- * This takes as input the frequency tables for each code and produces as output
- * a set of tables that map symbols to codewords and codeword lengths.
- */
-static void
-deflate_make_huffman_codes(const struct deflate_freqs *freqs,
- struct deflate_codes *codes)
-{
- STATIC_ASSERT(MAX_LITLEN_CODEWORD_LEN <= DEFLATE_MAX_LITLEN_CODEWORD_LEN);
- STATIC_ASSERT(MAX_OFFSET_CODEWORD_LEN <= DEFLATE_MAX_OFFSET_CODEWORD_LEN);
-
- deflate_make_huffman_code(DEFLATE_NUM_LITLEN_SYMS,
- MAX_LITLEN_CODEWORD_LEN,
- freqs->litlen,
- codes->lens.litlen,
- codes->codewords.litlen);
-
- deflate_make_huffman_code(DEFLATE_NUM_OFFSET_SYMS,
- MAX_OFFSET_CODEWORD_LEN,
- freqs->offset,
- codes->lens.offset,
- codes->codewords.offset);
-}
-
-/* Initialize c->static_codes. */
-static void
-deflate_init_static_codes(struct libdeflate_compressor *c)
-{
- unsigned i;
-
- for (i = 0; i < 144; i++)
- c->freqs.litlen[i] = 1 << (9 - 8);
- for (; i < 256; i++)
- c->freqs.litlen[i] = 1 << (9 - 9);
- for (; i < 280; i++)
- c->freqs.litlen[i] = 1 << (9 - 7);
- for (; i < 288; i++)
- c->freqs.litlen[i] = 1 << (9 - 8);
-
- for (i = 0; i < 32; i++)
- c->freqs.offset[i] = 1 << (5 - 5);
-
- deflate_make_huffman_codes(&c->freqs, &c->static_codes);
-}
-
-/* Return the offset slot for the specified match offset. */
-static forceinline unsigned
-deflate_get_offset_slot(struct libdeflate_compressor *c, unsigned offset)
-{
-#if USE_FULL_OFFSET_SLOT_FAST
- return c->offset_slot_fast[offset];
-#else
- if (offset <= 256)
- return c->offset_slot_fast[offset - 1];
- else
- return c->offset_slot_fast[256 + ((offset - 1) >> 7)];
-#endif
-}
-
-/* Write the header fields common to all DEFLATE block types. */
-static void
-deflate_write_block_header(struct deflate_output_bitstream *os,
- bool is_final_block, unsigned block_type)
-{
- deflate_add_bits(os, is_final_block, 1);
- deflate_add_bits(os, block_type, 2);
- deflate_flush_bits(os);
-}
-
-static unsigned
-deflate_compute_precode_items(const u8 lens[restrict],
- const unsigned num_lens,
- u32 precode_freqs[restrict],
- unsigned precode_items[restrict])
-{
- unsigned *itemptr;
- unsigned run_start;
- unsigned run_end;
- unsigned extra_bits;
- u8 len;
-
- memset(precode_freqs, 0,
- DEFLATE_NUM_PRECODE_SYMS * sizeof(precode_freqs[0]));
-
- itemptr = precode_items;
- run_start = 0;
- do {
- /* Find the next run of codeword lengths. */
-
- /* len = the length being repeated */
- len = lens[run_start];
-
- /* Extend the run. */
- run_end = run_start;
- do {
- run_end++;
- } while (run_end != num_lens && len == lens[run_end]);
-
- if (len == 0) {
- /* Run of zeroes. */
-
- /* Symbol 18: RLE 11 to 138 zeroes at a time. */
- while ((run_end - run_start) >= 11) {
- extra_bits = MIN((run_end - run_start) - 11, 0x7F);
- precode_freqs[18]++;
- *itemptr++ = 18 | (extra_bits << 5);
- run_start += 11 + extra_bits;
- }
-
- /* Symbol 17: RLE 3 to 10 zeroes at a time. */
- if ((run_end - run_start) >= 3) {
- extra_bits = MIN((run_end - run_start) - 3, 0x7);
- precode_freqs[17]++;
- *itemptr++ = 17 | (extra_bits << 5);
- run_start += 3 + extra_bits;
- }
- } else {
-
- /* A run of nonzero lengths. */
-
- /* Symbol 16: RLE 3 to 6 of the previous length. */
- if ((run_end - run_start) >= 4) {
- precode_freqs[len]++;
- *itemptr++ = len;
- run_start++;
- do {
- extra_bits = MIN((run_end - run_start) - 3, 0x3);
- precode_freqs[16]++;
- *itemptr++ = 16 | (extra_bits << 5);
- run_start += 3 + extra_bits;
- } while ((run_end - run_start) >= 3);
- }
- }
-
- /* Output any remaining lengths without RLE. */
- while (run_start != run_end) {
- precode_freqs[len]++;
- *itemptr++ = len;
- run_start++;
- }
- } while (run_start != num_lens);
-
- return itemptr - precode_items;
-}
-
-/*
- * Huffman codeword lengths for dynamic Huffman blocks are compressed using a
- * separate Huffman code, the "precode", which contains a symbol for each
- * possible codeword length in the larger code as well as several special
- * symbols to represent repeated codeword lengths (a form of run-length
- * encoding). The precode is itself constructed in canonical form, and its
- * codeword lengths are represented literally in 19 3-bit fields that
- * immediately precede the compressed codeword lengths of the larger code.
- */
-
-/* Precompute the information needed to output Huffman codes. */
-static void
-deflate_precompute_huffman_header(struct libdeflate_compressor *c)
-{
- /* Compute how many litlen and offset symbols are needed. */
-
- for (c->num_litlen_syms = DEFLATE_NUM_LITLEN_SYMS;
- c->num_litlen_syms > 257;
- c->num_litlen_syms--)
- if (c->codes.lens.litlen[c->num_litlen_syms - 1] != 0)
- break;
-
- for (c->num_offset_syms = DEFLATE_NUM_OFFSET_SYMS;
- c->num_offset_syms > 1;
- c->num_offset_syms--)
- if (c->codes.lens.offset[c->num_offset_syms - 1] != 0)
- break;
-
- /* If we're not using the full set of literal/length codeword lengths,
- * then temporarily move the offset codeword lengths over so that the
- * literal/length and offset codeword lengths are contiguous. */
-
- STATIC_ASSERT(offsetof(struct deflate_lens, offset) ==
- DEFLATE_NUM_LITLEN_SYMS);
-
- if (c->num_litlen_syms != DEFLATE_NUM_LITLEN_SYMS) {
- memmove((u8 *)&c->codes.lens + c->num_litlen_syms,
- (u8 *)&c->codes.lens + DEFLATE_NUM_LITLEN_SYMS,
- c->num_offset_syms);
- }
-
- /* Compute the "items" (RLE / literal tokens and extra bits) with which
- * the codeword lengths in the larger code will be output. */
- c->num_precode_items =
- deflate_compute_precode_items((u8 *)&c->codes.lens,
- c->num_litlen_syms +
- c->num_offset_syms,
- c->precode_freqs,
- c->precode_items);
-
- /* Build the precode. */
- STATIC_ASSERT(MAX_PRE_CODEWORD_LEN <= DEFLATE_MAX_PRE_CODEWORD_LEN);
- deflate_make_huffman_code(DEFLATE_NUM_PRECODE_SYMS,
- MAX_PRE_CODEWORD_LEN,
- c->precode_freqs, c->precode_lens,
- c->precode_codewords);
-
- /* Count how many precode lengths we actually need to output. */
- for (c->num_explicit_lens = DEFLATE_NUM_PRECODE_SYMS;
- c->num_explicit_lens > 4;
- c->num_explicit_lens--)
- if (c->precode_lens[deflate_precode_lens_permutation[
- c->num_explicit_lens - 1]] != 0)
- break;
-
- /* Restore the offset codeword lengths if needed. */
- if (c->num_litlen_syms != DEFLATE_NUM_LITLEN_SYMS) {
- memmove((u8 *)&c->codes.lens + DEFLATE_NUM_LITLEN_SYMS,
- (u8 *)&c->codes.lens + c->num_litlen_syms,
- c->num_offset_syms);
- }
-}
-
-/* Output the Huffman codes. */
-static void
-deflate_write_huffman_header(struct libdeflate_compressor *c,
- struct deflate_output_bitstream *os)
-{
- unsigned i;
-
- deflate_add_bits(os, c->num_litlen_syms - 257, 5);
- deflate_add_bits(os, c->num_offset_syms - 1, 5);
- deflate_add_bits(os, c->num_explicit_lens - 4, 4);
- deflate_flush_bits(os);
-
- /* Output the lengths of the codewords in the precode. */
- for (i = 0; i < c->num_explicit_lens; i++) {
- deflate_add_bits(os, c->precode_lens[
- deflate_precode_lens_permutation[i]], 3);
- deflate_flush_bits(os);
- }
-
- /* Output the encoded lengths of the codewords in the larger code. */
- for (i = 0; i < c->num_precode_items; i++) {
- unsigned precode_item = c->precode_items[i];
- unsigned precode_sym = precode_item & 0x1F;
- deflate_add_bits(os, c->precode_codewords[precode_sym],
- c->precode_lens[precode_sym]);
- if (precode_sym >= 16) {
- if (precode_sym == 16)
- deflate_add_bits(os, precode_item >> 5, 2);
- else if (precode_sym == 17)
- deflate_add_bits(os, precode_item >> 5, 3);
- else
- deflate_add_bits(os, precode_item >> 5, 7);
- }
- STATIC_ASSERT(CAN_BUFFER(DEFLATE_MAX_PRE_CODEWORD_LEN + 7));
- deflate_flush_bits(os);
- }
-}
-
-static void
-deflate_write_sequences(struct deflate_output_bitstream * restrict os,
- const struct deflate_codes * restrict codes,
- const struct deflate_sequence sequences[restrict],
- const u8 * restrict in_next)
-{
- const struct deflate_sequence *seq = sequences;
-
- for (;;) {
- u32 litrunlen = seq->litrunlen_and_length & 0x7FFFFF;
- unsigned length = seq->litrunlen_and_length >> 23;
- unsigned length_slot;
- unsigned litlen_symbol;
- unsigned offset_symbol;
-
- if (litrunlen) {
- #if 1
- while (litrunlen >= 4) {
- unsigned lit0 = in_next[0];
- unsigned lit1 = in_next[1];
- unsigned lit2 = in_next[2];
- unsigned lit3 = in_next[3];
-
- deflate_add_bits(os, codes->codewords.litlen[lit0],
- codes->lens.litlen[lit0]);
- if (!CAN_BUFFER(2 * MAX_LITLEN_CODEWORD_LEN))
- deflate_flush_bits(os);
-
- deflate_add_bits(os, codes->codewords.litlen[lit1],
- codes->lens.litlen[lit1]);
- if (!CAN_BUFFER(4 * MAX_LITLEN_CODEWORD_LEN))
- deflate_flush_bits(os);
-
- deflate_add_bits(os, codes->codewords.litlen[lit2],
- codes->lens.litlen[lit2]);
- if (!CAN_BUFFER(2 * MAX_LITLEN_CODEWORD_LEN))
- deflate_flush_bits(os);
-
- deflate_add_bits(os, codes->codewords.litlen[lit3],
- codes->lens.litlen[lit3]);
- deflate_flush_bits(os);
- in_next += 4;
- litrunlen -= 4;
- }
- if (litrunlen-- != 0) {
- deflate_add_bits(os, codes->codewords.litlen[*in_next],
- codes->lens.litlen[*in_next]);
- if (!CAN_BUFFER(3 * MAX_LITLEN_CODEWORD_LEN))
- deflate_flush_bits(os);
- in_next++;
- if (litrunlen-- != 0) {
- deflate_add_bits(os, codes->codewords.litlen[*in_next],
- codes->lens.litlen[*in_next]);
- if (!CAN_BUFFER(3 * MAX_LITLEN_CODEWORD_LEN))
- deflate_flush_bits(os);
- in_next++;
- if (litrunlen-- != 0) {
- deflate_add_bits(os, codes->codewords.litlen[*in_next],
- codes->lens.litlen[*in_next]);
- if (!CAN_BUFFER(3 * MAX_LITLEN_CODEWORD_LEN))
- deflate_flush_bits(os);
- in_next++;
- }
- }
- if (CAN_BUFFER(3 * MAX_LITLEN_CODEWORD_LEN))
- deflate_flush_bits(os);
- }
- #else
- do {
- unsigned lit = *in_next++;
- deflate_add_bits(os, codes->codewords.litlen[lit],
- codes->lens.litlen[lit]);
- deflate_flush_bits(os);
- } while (--litrunlen);
- #endif
- }
-
- if (length == 0)
- return;
-
- in_next += length;
-
- length_slot = seq->length_slot;
- litlen_symbol = 257 + length_slot;
-
- /* Litlen symbol */
- deflate_add_bits(os, codes->codewords.litlen[litlen_symbol],
- codes->lens.litlen[litlen_symbol]);
-
- /* Extra length bits */
- STATIC_ASSERT(CAN_BUFFER(MAX_LITLEN_CODEWORD_LEN +
- DEFLATE_MAX_EXTRA_LENGTH_BITS));
- deflate_add_bits(os, length - deflate_length_slot_base[length_slot],
- deflate_extra_length_bits[length_slot]);
-
- if (!CAN_BUFFER(MAX_LITLEN_CODEWORD_LEN +
- DEFLATE_MAX_EXTRA_LENGTH_BITS +
- MAX_OFFSET_CODEWORD_LEN +
- DEFLATE_MAX_EXTRA_OFFSET_BITS))
- deflate_flush_bits(os);
-
- /* Offset symbol */
- offset_symbol = seq->offset_symbol;
- deflate_add_bits(os, codes->codewords.offset[offset_symbol],
- codes->lens.offset[offset_symbol]);
-
- if (!CAN_BUFFER(MAX_OFFSET_CODEWORD_LEN +
- DEFLATE_MAX_EXTRA_OFFSET_BITS))
- deflate_flush_bits(os);
-
- /* Extra offset bits */
- deflate_add_bits(os, seq->offset - deflate_offset_slot_base[offset_symbol],
- deflate_extra_offset_bits[offset_symbol]);
-
- deflate_flush_bits(os);
-
- seq++;
- }
-}
-
-#if SUPPORT_NEAR_OPTIMAL_PARSING
-/*
- * Follow the minimum-cost path in the graph of possible match/literal choices
- * for the current block and write out the matches/literals using the specified
- * Huffman codes.
- *
- * Note: this is slightly duplicated with deflate_write_sequences(), the reason
- * being that we don't want to waste time translating between intermediate
- * match/literal representations.
- */
-static void
-deflate_write_item_list(struct deflate_output_bitstream *os,
- const struct deflate_codes *codes,
- struct libdeflate_compressor *c,
- u32 block_length)
-{
- struct deflate_optimum_node *cur_node = &c->p.n.optimum_nodes[0];
- struct deflate_optimum_node * const end_node = &c->p.n.optimum_nodes[block_length];
- do {
- unsigned length = cur_node->item & OPTIMUM_LEN_MASK;
- unsigned offset = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
- unsigned litlen_symbol;
- unsigned length_slot;
- unsigned offset_slot;
-
- if (length == 1) {
- /* Literal */
- litlen_symbol = offset;
- deflate_add_bits(os, codes->codewords.litlen[litlen_symbol],
- codes->lens.litlen[litlen_symbol]);
- deflate_flush_bits(os);
- } else {
- /* Match length */
- length_slot = deflate_length_slot[length];
- litlen_symbol = 257 + length_slot;
- deflate_add_bits(os, codes->codewords.litlen[litlen_symbol],
- codes->lens.litlen[litlen_symbol]);
-
- deflate_add_bits(os, length - deflate_length_slot_base[length_slot],
- deflate_extra_length_bits[length_slot]);
-
- if (!CAN_BUFFER(MAX_LITLEN_CODEWORD_LEN +
- DEFLATE_MAX_EXTRA_LENGTH_BITS +
- MAX_OFFSET_CODEWORD_LEN +
- DEFLATE_MAX_EXTRA_OFFSET_BITS))
- deflate_flush_bits(os);
-
-
- /* Match offset */
- offset_slot = deflate_get_offset_slot(c, offset);
- deflate_add_bits(os, codes->codewords.offset[offset_slot],
- codes->lens.offset[offset_slot]);
-
- if (!CAN_BUFFER(MAX_OFFSET_CODEWORD_LEN +
- DEFLATE_MAX_EXTRA_OFFSET_BITS))
- deflate_flush_bits(os);
-
- deflate_add_bits(os, offset - deflate_offset_slot_base[offset_slot],
- deflate_extra_offset_bits[offset_slot]);
-
- deflate_flush_bits(os);
- }
- cur_node += length;
- } while (cur_node != end_node);
-}
-#endif /* SUPPORT_NEAR_OPTIMAL_PARSING */
-
-/* Output the end-of-block symbol. */
-static void
-deflate_write_end_of_block(struct deflate_output_bitstream *os,
- const struct deflate_codes *codes)
-{
- deflate_add_bits(os, codes->codewords.litlen[DEFLATE_END_OF_BLOCK],
- codes->lens.litlen[DEFLATE_END_OF_BLOCK]);
- deflate_flush_bits(os);
-}
-
-static void
-deflate_write_uncompressed_block(struct deflate_output_bitstream *os,
- const u8 *data, u16 len,
- bool is_final_block)
-{
- deflate_write_block_header(os, is_final_block,
- DEFLATE_BLOCKTYPE_UNCOMPRESSED);
- deflate_align_bitstream(os);
-
- if (4 + (u32)len >= os->end - os->next) {
- os->next = os->end;
- return;
- }
-
- put_unaligned_le16(len, os->next);
- os->next += 2;
- put_unaligned_le16(~len, os->next);
- os->next += 2;
- memcpy(os->next, data, len);
- os->next += len;
-}
-
-static void
-deflate_write_uncompressed_blocks(struct deflate_output_bitstream *os,
- const u8 *data, size_t data_length,
- bool is_final_block)
-{
- do {
- u16 len = MIN(data_length, UINT16_MAX);
-
- deflate_write_uncompressed_block(os, data, len,
- is_final_block && len == data_length);
- data += len;
- data_length -= len;
- } while (data_length != 0);
-}
-
-/*
- * Choose the best type of block to use (dynamic Huffman, static Huffman, or
- * uncompressed), then output it.
- */
-static void
-deflate_flush_block(struct libdeflate_compressor * restrict c,
- struct deflate_output_bitstream * restrict os,
- const u8 * restrict block_begin, u32 block_length,
- bool is_final_block, bool use_item_list)
-{
- static const u8 deflate_extra_precode_bits[DEFLATE_NUM_PRECODE_SYMS] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7,
- };
-
- /* Costs are measured in bits */
- u32 dynamic_cost = 0;
- u32 static_cost = 0;
- u32 uncompressed_cost = 0;
- struct deflate_codes *codes;
- int block_type;
- unsigned sym;
-
- /* Tally the end-of-block symbol. */
- c->freqs.litlen[DEFLATE_END_OF_BLOCK]++;
-
- /* Build dynamic Huffman codes. */
- deflate_make_huffman_codes(&c->freqs, &c->codes);
-
- /* Account for the cost of sending dynamic Huffman codes. */
- deflate_precompute_huffman_header(c);
- dynamic_cost += 5 + 5 + 4 + (3 * c->num_explicit_lens);
- for (sym = 0; sym < DEFLATE_NUM_PRECODE_SYMS; sym++) {
- u32 extra = deflate_extra_precode_bits[sym];
- dynamic_cost += c->precode_freqs[sym] *
- (extra + c->precode_lens[sym]);
- }
-
- /* Account for the cost of encoding literals. */
- for (sym = 0; sym < 256; sym++) {
- dynamic_cost += c->freqs.litlen[sym] *
- c->codes.lens.litlen[sym];
- }
- for (sym = 0; sym < 144; sym++)
- static_cost += c->freqs.litlen[sym] * 8;
- for (; sym < 256; sym++)
- static_cost += c->freqs.litlen[sym] * 9;
-
- /* Account for the cost of encoding the end-of-block symbol. */
- dynamic_cost += c->codes.lens.litlen[256];
- static_cost += 7;
-
- /* Account for the cost of encoding lengths. */
- for (sym = 257; sym < 257 + ARRAY_LEN(deflate_extra_length_bits); sym++) {
- u32 extra = deflate_extra_length_bits[sym - 257];
- dynamic_cost += c->freqs.litlen[sym] *
- (extra + c->codes.lens.litlen[sym]);
- static_cost += c->freqs.litlen[sym] *
- (extra + c->static_codes.lens.litlen[sym]);
- }
-
- /* Account for the cost of encoding offsets. */
- for (sym = 0; sym < ARRAY_LEN(deflate_extra_offset_bits); sym++) {
- u32 extra = deflate_extra_offset_bits[sym];
- dynamic_cost += c->freqs.offset[sym] *
- (extra + c->codes.lens.offset[sym]);
- static_cost += c->freqs.offset[sym] * (extra + 5);
- }
-
- /* Compute the cost of using uncompressed blocks. */
- uncompressed_cost += (-(os->bitcount + 3) & 7) + 32 +
- (40 * (DIV_ROUND_UP(block_length,
- UINT16_MAX) - 1)) +
- (8 * block_length);
-
- /* Choose the cheapest block type. */
- if (dynamic_cost < MIN(static_cost, uncompressed_cost)) {
- block_type = DEFLATE_BLOCKTYPE_DYNAMIC_HUFFMAN;
- codes = &c->codes;
- } else if (static_cost < uncompressed_cost) {
- block_type = DEFLATE_BLOCKTYPE_STATIC_HUFFMAN;
- codes = &c->static_codes;
- } else {
- block_type = DEFLATE_BLOCKTYPE_UNCOMPRESSED;
- }
-
- /* Now actually output the block. */
-
- if (block_type == DEFLATE_BLOCKTYPE_UNCOMPRESSED) {
- /* Note: the length being flushed may exceed the maximum length
- * of an uncompressed block (65535 bytes). Therefore, more than
- * one uncompressed block might be needed. */
- deflate_write_uncompressed_blocks(os, block_begin, block_length,
- is_final_block);
- } else {
- /* Output the block header. */
- deflate_write_block_header(os, is_final_block, block_type);
-
- /* Output the Huffman codes (dynamic Huffman blocks only). */
- if (block_type == DEFLATE_BLOCKTYPE_DYNAMIC_HUFFMAN)
- deflate_write_huffman_header(c, os);
-
- /* Output the literals, matches, and end-of-block symbol. */
- #if SUPPORT_NEAR_OPTIMAL_PARSING
- if (use_item_list)
- deflate_write_item_list(os, codes, c, block_length);
- else
- #endif
- deflate_write_sequences(os, codes, c->p.g.sequences,
- block_begin);
- deflate_write_end_of_block(os, codes);
- }
-}
-
-static forceinline void
-deflate_choose_literal(struct libdeflate_compressor *c, unsigned literal,
- u32 *litrunlen_p)
-{
- c->freqs.litlen[literal]++;
- ++*litrunlen_p;
-}
-
-static forceinline void
-deflate_choose_match(struct libdeflate_compressor *c,
- unsigned length, unsigned offset,
- u32 *litrunlen_p, struct deflate_sequence **next_seq_p)
-{
- struct deflate_sequence *seq = *next_seq_p;
- unsigned length_slot = deflate_length_slot[length];
- unsigned offset_slot = deflate_get_offset_slot(c, offset);
-
- c->freqs.litlen[257 + length_slot]++;
- c->freqs.offset[offset_slot]++;
-
- seq->litrunlen_and_length = ((u32)length << 23) | *litrunlen_p;
- seq->offset = offset;
- seq->length_slot = length_slot;
- seq->offset_symbol = offset_slot;
-
- *litrunlen_p = 0;
- *next_seq_p = seq + 1;
-}
-
-static forceinline void
-deflate_finish_sequence(struct deflate_sequence *seq, u32 litrunlen)
-{
- seq->litrunlen_and_length = litrunlen; /* length = 0 */
-}
-
-/******************************************************************************/
-
-/*
- * Block splitting algorithm. The problem is to decide when it is worthwhile to
- * start a new block with new Huffman codes. There is a theoretically optimal
- * solution: recursively consider every possible block split, considering the
- * exact cost of each block, and choose the minimum cost approach. But this is
- * far too slow. Instead, as an approximation, we can count symbols and after
- * every N symbols, compare the expected distribution of symbols based on the
- * previous data with the actual distribution. If they differ "by enough", then
- * start a new block.
- *
- * As an optimization and heuristic, we don't distinguish between every symbol
- * but rather we combine many symbols into a single "observation type". For
- * literals we only look at the high bits and low bits, and for matches we only
- * look at whether the match is long or not. The assumption is that for typical
- * "real" data, places that are good block boundaries will tend to be noticeable
- * based only on changes in these aggregate frequencies, without looking for
- * subtle differences in individual symbols. For example, a change from ASCII
- * bytes to non-ASCII bytes, or from few matches (generally less compressible)
- * to many matches (generally more compressible), would be easily noticed based
- * on the aggregates.
- *
- * For determining whether the frequency distributions are "different enough" to
- * start a new block, the simply heuristic of splitting when the sum of absolute
- * differences exceeds a constant seems to be good enough. We also add a number
- * proportional to the block length so that the algorithm is more likely to end
- * long blocks than short blocks. This reflects the general expectation that it
- * will become increasingly beneficial to start a new block as the current
- * block grows longer.
- *
- * Finally, for an approximation, it is not strictly necessary that the exact
- * symbols being used are considered. With "near-optimal parsing", for example,
- * the actual symbols that will be used are unknown until after the block
- * boundary is chosen and the block has been optimized. Since the final choices
- * cannot be used, we can use preliminary "greedy" choices instead.
- */
-
-/* Initialize the block split statistics when starting a new block. */
-static void
-init_block_split_stats(struct block_split_stats *stats)
-{
- int i;
-
- for (i = 0; i < NUM_OBSERVATION_TYPES; i++) {
- stats->new_observations[i] = 0;
- stats->observations[i] = 0;
- }
- stats->num_new_observations = 0;
- stats->num_observations = 0;
-}
-
-/* Literal observation. Heuristic: use the top 2 bits and low 1 bits of the
- * literal, for 8 possible literal observation types. */
-static forceinline void
-observe_literal(struct block_split_stats *stats, u8 lit)
-{
- stats->new_observations[((lit >> 5) & 0x6) | (lit & 1)]++;
- stats->num_new_observations++;
-}
-
-/* Match observation. Heuristic: use one observation type for "short match" and
- * one observation type for "long match". */
-static forceinline void
-observe_match(struct block_split_stats *stats, unsigned length)
-{
- stats->new_observations[NUM_LITERAL_OBSERVATION_TYPES + (length >= 9)]++;
- stats->num_new_observations++;
-}
-
-static bool
-do_end_block_check(struct block_split_stats *stats, u32 block_length)
-{
- int i;
-
- if (stats->num_observations > 0) {
-
- /* Note: to avoid slow divisions, we do not divide by
- * 'num_observations', but rather do all math with the numbers
- * multiplied by 'num_observations'. */
- u32 total_delta = 0;
- for (i = 0; i < NUM_OBSERVATION_TYPES; i++) {
- u32 expected = stats->observations[i] * stats->num_new_observations;
- u32 actual = stats->new_observations[i] * stats->num_observations;
- u32 delta = (actual > expected) ? actual - expected :
- expected - actual;
- total_delta += delta;
- }
-
- /* Ready to end the block? */
- if (total_delta + (block_length / 4096) * stats->num_observations >=
- NUM_OBSERVATIONS_PER_BLOCK_CHECK * 200 / 512 * stats->num_observations)
- return true;
- }
-
- for (i = 0; i < NUM_OBSERVATION_TYPES; i++) {
- stats->num_observations += stats->new_observations[i];
- stats->observations[i] += stats->new_observations[i];
- stats->new_observations[i] = 0;
- }
- stats->num_new_observations = 0;
- return false;
-}
-
-static forceinline bool
-should_end_block(struct block_split_stats *stats,
- const u8 *in_block_begin, const u8 *in_next, const u8 *in_end)
-{
- /* Ready to check block split statistics? */
- if (stats->num_new_observations < NUM_OBSERVATIONS_PER_BLOCK_CHECK ||
- in_next - in_block_begin < MIN_BLOCK_LENGTH ||
- in_end - in_next < MIN_BLOCK_LENGTH)
- return false;
-
- return do_end_block_check(stats, in_next - in_block_begin);
-}
-
-/******************************************************************************/
-
-/*
- * This is the level 0 "compressor". It always outputs uncompressed blocks.
- */
-static size_t
-deflate_compress_none(struct libdeflate_compressor * restrict c,
- const u8 * restrict in, size_t in_nbytes,
- u8 * restrict out, size_t out_nbytes_avail)
-{
- struct deflate_output_bitstream os;
-
- deflate_init_output(&os, out, out_nbytes_avail);
-
- deflate_write_uncompressed_blocks(&os, in, in_nbytes, true);
-
- return deflate_flush_output(&os);
-}
-
-/*
- * This is the "greedy" DEFLATE compressor. It always chooses the longest match.
- */
-static size_t
-deflate_compress_greedy(struct libdeflate_compressor * restrict c,
- const u8 * restrict in, size_t in_nbytes,
- u8 * restrict out, size_t out_nbytes_avail)
-{
- const u8 *in_next = in;
- const u8 *in_end = in_next + in_nbytes;
- struct deflate_output_bitstream os;
- const u8 *in_cur_base = in_next;
- unsigned max_len = DEFLATE_MAX_MATCH_LEN;
- unsigned nice_len = MIN(c->nice_match_length, max_len);
- u32 next_hashes[2] = {0, 0};
-
- deflate_init_output(&os, out, out_nbytes_avail);
- hc_matchfinder_init(&c->p.g.hc_mf);
-
- do {
- /* Starting a new DEFLATE block. */
-
- const u8 * const in_block_begin = in_next;
- const u8 * const in_max_block_end =
- in_next + MIN(in_end - in_next, SOFT_MAX_BLOCK_LENGTH);
- u32 litrunlen = 0;
- struct deflate_sequence *next_seq = c->p.g.sequences;
-
- init_block_split_stats(&c->split_stats);
- deflate_reset_symbol_frequencies(c);
-
- do {
- u32 length;
- u32 offset;
-
- /* Decrease the maximum and nice match lengths if we're
- * approaching the end of the input buffer. */
- if (unlikely(max_len > in_end - in_next)) {
- max_len = in_end - in_next;
- nice_len = MIN(nice_len, max_len);
- }
-
- length = hc_matchfinder_longest_match(&c->p.g.hc_mf,
- &in_cur_base,
- in_next,
- DEFLATE_MIN_MATCH_LEN - 1,
- max_len,
- nice_len,
- c->max_search_depth,
- next_hashes,
- &offset);
-
- if (length >= DEFLATE_MIN_MATCH_LEN) {
- /* Match found. */
- deflate_choose_match(c, length, offset,
- &litrunlen, &next_seq);
- observe_match(&c->split_stats, length);
- in_next = hc_matchfinder_skip_positions(&c->p.g.hc_mf,
- &in_cur_base,
- in_next + 1,
- in_end,
- length - 1,
- next_hashes);
- } else {
- /* No match found. */
- deflate_choose_literal(c, *in_next, &litrunlen);
- observe_literal(&c->split_stats, *in_next);
- in_next++;
- }
-
- /* Check if it's time to output another block. */
- } while (in_next < in_max_block_end &&
- !should_end_block(&c->split_stats, in_block_begin, in_next, in_end));
-
- deflate_finish_sequence(next_seq, litrunlen);
- deflate_flush_block(c, &os, in_block_begin,
- in_next - in_block_begin,
- in_next == in_end, false);
- } while (in_next != in_end);
-
- return deflate_flush_output(&os);
-}
-
-/*
- * This is the "lazy" DEFLATE compressor. Before choosing a match, it checks to
- * see if there's a longer match at the next position. If yes, it outputs a
- * literal and continues to the next position. If no, it outputs the match.
- */
-static size_t
-deflate_compress_lazy(struct libdeflate_compressor * restrict c,
- const u8 * restrict in, size_t in_nbytes,
- u8 * restrict out, size_t out_nbytes_avail)
-{
- const u8 *in_next = in;
- const u8 *in_end = in_next + in_nbytes;
- struct deflate_output_bitstream os;
- const u8 *in_cur_base = in_next;
- unsigned max_len = DEFLATE_MAX_MATCH_LEN;
- unsigned nice_len = MIN(c->nice_match_length, max_len);
- u32 next_hashes[2] = {0, 0};
-
- deflate_init_output(&os, out, out_nbytes_avail);
- hc_matchfinder_init(&c->p.g.hc_mf);
-
- do {
- /* Starting a new DEFLATE block. */
-
- const u8 * const in_block_begin = in_next;
- const u8 * const in_max_block_end =
- in_next + MIN(in_end - in_next, SOFT_MAX_BLOCK_LENGTH);
- u32 litrunlen = 0;
- struct deflate_sequence *next_seq = c->p.g.sequences;
-
- init_block_split_stats(&c->split_stats);
- deflate_reset_symbol_frequencies(c);
-
- do {
- unsigned cur_len;
- unsigned cur_offset;
- unsigned next_len;
- unsigned next_offset;
-
- if (unlikely(in_end - in_next < DEFLATE_MAX_MATCH_LEN)) {
- max_len = in_end - in_next;
- nice_len = MIN(nice_len, max_len);
- }
-
- /* Find the longest match at the current position. */
- cur_len = hc_matchfinder_longest_match(&c->p.g.hc_mf,
- &in_cur_base,
- in_next,
- DEFLATE_MIN_MATCH_LEN - 1,
- max_len,
- nice_len,
- c->max_search_depth,
- next_hashes,
- &cur_offset);
- in_next += 1;
-
- if (cur_len < DEFLATE_MIN_MATCH_LEN) {
- /* No match found. Choose a literal. */
- deflate_choose_literal(c, *(in_next - 1), &litrunlen);
- observe_literal(&c->split_stats, *(in_next - 1));
- continue;
- }
-
- have_cur_match:
- observe_match(&c->split_stats, cur_len);
-
- /* We have a match at the current position. */
-
- /* If the current match is very long, choose it
- * immediately. */
- if (cur_len >= nice_len) {
- deflate_choose_match(c, cur_len, cur_offset,
- &litrunlen, &next_seq);
- in_next = hc_matchfinder_skip_positions(&c->p.g.hc_mf,
- &in_cur_base,
- in_next,
- in_end,
- cur_len - 1,
- next_hashes);
- continue;
- }
-
- /*
- * Try to find a match at the next position.
- *
- * Note: since we already have a match at the *current*
- * position, we use only half the 'max_search_depth'
- * when checking the *next* position. This is a useful
- * trade-off because it's more worthwhile to use a
- * greater search depth on the initial match.
- *
- * Note: it's possible to structure the code such that
- * there's only one call to longest_match(), which
- * handles both the "find the initial match" and "try to
- * find a longer match" cases. However, it is faster to
- * have two call sites, with longest_match() inlined at
- * each.
- */
- if (unlikely(in_end - in_next < DEFLATE_MAX_MATCH_LEN)) {
- max_len = in_end - in_next;
- nice_len = MIN(nice_len, max_len);
- }
- next_len = hc_matchfinder_longest_match(&c->p.g.hc_mf,
- &in_cur_base,
- in_next,
- cur_len,
- max_len,
- nice_len,
- c->max_search_depth / 2,
- next_hashes,
- &next_offset);
- in_next += 1;
-
- if (next_len > cur_len) {
- /* Found a longer match at the next position.
- * Output a literal. Then the next match
- * becomes the current match. */
- deflate_choose_literal(c, *(in_next - 2), &litrunlen);
- cur_len = next_len;
- cur_offset = next_offset;
- goto have_cur_match;
- }
-
- /* No longer match at the next position.
- * Output the current match. */
- deflate_choose_match(c, cur_len, cur_offset,
- &litrunlen, &next_seq);
- in_next = hc_matchfinder_skip_positions(&c->p.g.hc_mf,
- &in_cur_base,
- in_next,
- in_end,
- cur_len - 2,
- next_hashes);
-
- /* Check if it's time to output another block. */
- } while (in_next < in_max_block_end &&
- !should_end_block(&c->split_stats, in_block_begin, in_next, in_end));
-
- deflate_finish_sequence(next_seq, litrunlen);
- deflate_flush_block(c, &os, in_block_begin,
- in_next - in_block_begin,
- in_next == in_end, false);
- } while (in_next != in_end);
-
- return deflate_flush_output(&os);
-}
-
-#if SUPPORT_NEAR_OPTIMAL_PARSING
-
-/*
- * Follow the minimum-cost path in the graph of possible match/literal choices
- * for the current block and compute the frequencies of the Huffman symbols that
- * would be needed to output those matches and literals.
- */
-static void
-deflate_tally_item_list(struct libdeflate_compressor *c, u32 block_length)
-{
- struct deflate_optimum_node *cur_node = &c->p.n.optimum_nodes[0];
- struct deflate_optimum_node *end_node = &c->p.n.optimum_nodes[block_length];
- do {
- unsigned length = cur_node->item & OPTIMUM_LEN_MASK;
- unsigned offset = cur_node->item >> OPTIMUM_OFFSET_SHIFT;
-
- if (length == 1) {
- /* Literal */
- c->freqs.litlen[offset]++;
- } else {
- /* Match */
- c->freqs.litlen[257 + deflate_length_slot[length]]++;
- c->freqs.offset[deflate_get_offset_slot(c, offset)]++;
- }
- cur_node += length;
- } while (cur_node != end_node);
-}
-
-/* Set the current cost model from the codeword lengths specified in @lens. */
-static void
-deflate_set_costs_from_codes(struct libdeflate_compressor *c,
- const struct deflate_lens *lens)
-{
- unsigned i;
-
- /* Literals */
- for (i = 0; i < DEFLATE_NUM_LITERALS; i++) {
- u32 bits = (lens->litlen[i] ? lens->litlen[i] : LITERAL_NOSTAT_BITS);
- c->p.n.costs.literal[i] = bits << COST_SHIFT;
- }
-
- /* Lengths */
- for (i = DEFLATE_MIN_MATCH_LEN; i <= DEFLATE_MAX_MATCH_LEN; i++) {
- unsigned length_slot = deflate_length_slot[i];
- unsigned litlen_sym = 257 + length_slot;
- u32 bits = (lens->litlen[litlen_sym] ? lens->litlen[litlen_sym] : LENGTH_NOSTAT_BITS);
- bits += deflate_extra_length_bits[length_slot];
- c->p.n.costs.length[i] = bits << COST_SHIFT;
- }
-
- /* Offset slots */
- for (i = 0; i < ARRAY_LEN(deflate_offset_slot_base); i++) {
- u32 bits = (lens->offset[i] ? lens->offset[i] : OFFSET_NOSTAT_BITS);
- bits += deflate_extra_offset_bits[i];
- c->p.n.costs.offset_slot[i] = bits << COST_SHIFT;
- }
-}
-
-static forceinline u32
-deflate_default_literal_cost(unsigned literal)
-{
- STATIC_ASSERT(COST_SHIFT == 3);
- /* 66 is 8.25 bits/symbol */
- return 66;
-}
-
-static forceinline u32
-deflate_default_length_slot_cost(unsigned length_slot)
-{
- STATIC_ASSERT(COST_SHIFT == 3);
- /* 60 is 7.5 bits/symbol */
- return 60 + ((u32)deflate_extra_length_bits[length_slot] << COST_SHIFT);
-}
-
-static forceinline u32
-deflate_default_offset_slot_cost(unsigned offset_slot)
-{
- STATIC_ASSERT(COST_SHIFT == 3);
- /* 39 is 4.875 bits/symbol */
- return 39 + ((u32)deflate_extra_offset_bits[offset_slot] << COST_SHIFT);
-}
-
-/*
- * Set default symbol costs for the first block's first optimization pass.
- *
- * It works well to assume that each symbol is equally probable. This results
- * in each symbol being assigned a cost of (-log2(1.0/num_syms) * (1 <<
- * COST_SHIFT)) where 'num_syms' is the number of symbols in the corresponding
- * alphabet. However, we intentionally bias the parse towards matches rather
- * than literals by using a slightly lower default cost for length symbols than
- * for literals. This often improves the compression ratio slightly.
- */
-static void
-deflate_set_default_costs(struct libdeflate_compressor *c)
-{
- unsigned i;
-
- /* Literals */
- for (i = 0; i < DEFLATE_NUM_LITERALS; i++)
- c->p.n.costs.literal[i] = deflate_default_literal_cost(i);
-
- /* Lengths */
- for (i = DEFLATE_MIN_MATCH_LEN; i <= DEFLATE_MAX_MATCH_LEN; i++)
- c->p.n.costs.length[i] = deflate_default_length_slot_cost(
- deflate_length_slot[i]);
-
- /* Offset slots */
- for (i = 0; i < ARRAY_LEN(deflate_offset_slot_base); i++)
- c->p.n.costs.offset_slot[i] = deflate_default_offset_slot_cost(i);
-}
-
-static forceinline void
-deflate_adjust_cost(u32 *cost_p, u32 default_cost)
-{
- *cost_p += ((s32)default_cost - (s32)*cost_p) >> 1;
-}
-
-/*
- * Adjust the costs when beginning a new block.
- *
- * Since the current costs have been optimized for the data, it's undesirable to
- * throw them away and start over with the default costs. At the same time, we
- * don't want to bias the parse by assuming that the next block will be similar
- * to the current block. As a compromise, make the costs closer to the
- * defaults, but don't simply set them to the defaults.
- */
-static void
-deflate_adjust_costs(struct libdeflate_compressor *c)
-{
- unsigned i;
-
- /* Literals */
- for (i = 0; i < DEFLATE_NUM_LITERALS; i++)
- deflate_adjust_cost(&c->p.n.costs.literal[i],
- deflate_default_literal_cost(i));
-
- /* Lengths */
- for (i = DEFLATE_MIN_MATCH_LEN; i <= DEFLATE_MAX_MATCH_LEN; i++)
- deflate_adjust_cost(&c->p.n.costs.length[i],
- deflate_default_length_slot_cost(
- deflate_length_slot[i]));
-
- /* Offset slots */
- for (i = 0; i < ARRAY_LEN(deflate_offset_slot_base); i++)
- deflate_adjust_cost(&c->p.n.costs.offset_slot[i],
- deflate_default_offset_slot_cost(i));
-}
-
-/*
- * Find the minimum-cost path through the graph of possible match/literal
- * choices for this block.
- *
- * We find the minimum cost path from 'c->p.n.optimum_nodes[0]', which
- * represents the node at the beginning of the block, to
- * 'c->p.n.optimum_nodes[block_length]', which represents the node at the end of
- * the block. Edge costs are evaluated using the cost model 'c->p.n.costs'.
- *
- * The algorithm works backwards, starting at the end node and proceeding
- * backwards one node at a time. At each node, the minimum cost to reach the
- * end node is computed and the match/literal choice that begins that path is
- * saved.
- */
-static void
-deflate_find_min_cost_path(struct libdeflate_compressor *c,
- const u32 block_length,
- const struct lz_match *cache_ptr)
-{
- struct deflate_optimum_node *end_node = &c->p.n.optimum_nodes[block_length];
- struct deflate_optimum_node *cur_node = end_node;
-
- cur_node->cost_to_end = 0;
- do {
- unsigned num_matches;
- unsigned literal;
- u32 best_cost_to_end;
-
- cur_node--;
- cache_ptr--;
-
- num_matches = cache_ptr->length;
- literal = cache_ptr->offset;
-
- /* It's always possible to choose a literal. */
- best_cost_to_end = c->p.n.costs.literal[literal] +
- (cur_node + 1)->cost_to_end;
- cur_node->item = ((u32)literal << OPTIMUM_OFFSET_SHIFT) | 1;
-
- /* Also consider matches if there are any. */
- if (num_matches) {
- const struct lz_match *match;
- unsigned len;
- unsigned offset;
- unsigned offset_slot;
- u32 offset_cost;
- u32 cost_to_end;
-
- /*
- * Consider each length from the minimum
- * (DEFLATE_MIN_MATCH_LEN) to the length of the longest
- * match found at this position. For each length, we
- * consider only the smallest offset for which that
- * length is available. Although this is not guaranteed
- * to be optimal due to the possibility of a larger
- * offset costing less than a smaller offset to code,
- * this is a very useful heuristic.
- */
- match = cache_ptr - num_matches;
- len = DEFLATE_MIN_MATCH_LEN;
- do {
- offset = match->offset;
- offset_slot = deflate_get_offset_slot(c, offset);
- offset_cost = c->p.n.costs.offset_slot[offset_slot];
- do {
- cost_to_end = offset_cost +
- c->p.n.costs.length[len] +
- (cur_node + len)->cost_to_end;
- if (cost_to_end < best_cost_to_end) {
- best_cost_to_end = cost_to_end;
- cur_node->item = ((u32)offset << OPTIMUM_OFFSET_SHIFT) | len;
- }
- } while (++len <= match->length);
- } while (++match != cache_ptr);
- cache_ptr -= num_matches;
- }
- cur_node->cost_to_end = best_cost_to_end;
- } while (cur_node != &c->p.n.optimum_nodes[0]);
-}
-
-/*
- * Choose the literal/match sequence to use for the current block. The basic
- * algorithm finds a minimum-cost path through the block's graph of
- * literal/match choices, given a cost model. However, the cost of each symbol
- * is unknown until the Huffman codes have been built, but at the same time the
- * Huffman codes depend on the frequencies of chosen symbols. Consequently,
- * multiple passes must be used to try to approximate an optimal solution. The
- * first pass uses default costs, mixed with the costs from the previous block
- * if any. Later passes use the Huffman codeword lengths from the previous pass
- * as the costs.
- */
-static void
-deflate_optimize_block(struct libdeflate_compressor *c, u32 block_length,
- const struct lz_match *cache_ptr, bool is_first_block)
-{
- unsigned num_passes_remaining = c->p.n.num_optim_passes;
- u32 i;
-
- /* Force the block to really end at the desired length, even if some
- * matches extend beyond it. */
- for (i = block_length; i <= MIN(block_length - 1 + DEFLATE_MAX_MATCH_LEN,
- ARRAY_LEN(c->p.n.optimum_nodes) - 1); i++)
- c->p.n.optimum_nodes[i].cost_to_end = 0x80000000;
-
- /* Set the initial costs. */
- if (is_first_block)
- deflate_set_default_costs(c);
- else
- deflate_adjust_costs(c);
-
- for (;;) {
- /* Find the minimum cost path for this pass. */
- deflate_find_min_cost_path(c, block_length, cache_ptr);
-
- /* Compute frequencies of the chosen symbols. */
- deflate_reset_symbol_frequencies(c);
- deflate_tally_item_list(c, block_length);
-
- if (--num_passes_remaining == 0)
- break;
-
- /* At least one optimization pass remains; update the costs. */
- deflate_make_huffman_codes(&c->freqs, &c->codes);
- deflate_set_costs_from_codes(c, &c->codes.lens);
- }
-}
-
-/*
- * This is the "near-optimal" DEFLATE compressor. It computes the optimal
- * representation of each DEFLATE block using a minimum-cost path search over
- * the graph of possible match/literal choices for that block, assuming a
- * certain cost for each Huffman symbol.
- *
- * For several reasons, the end result is not guaranteed to be optimal:
- *
- * - Nonoptimal choice of blocks
- * - Heuristic limitations on which matches are actually considered
- * - Symbol costs are unknown until the symbols have already been chosen
- * (so iterative optimization must be used)
- */
-static size_t
-deflate_compress_near_optimal(struct libdeflate_compressor * restrict c,
- const u8 * restrict in, size_t in_nbytes,
- u8 * restrict out, size_t out_nbytes_avail)
-{
- const u8 *in_next = in;
- const u8 *in_end = in_next + in_nbytes;
- struct deflate_output_bitstream os;
- const u8 *in_cur_base = in_next;
- const u8 *in_next_slide = in_next + MIN(in_end - in_next, MATCHFINDER_WINDOW_SIZE);
- unsigned max_len = DEFLATE_MAX_MATCH_LEN;
- unsigned nice_len = MIN(c->nice_match_length, max_len);
- u32 next_hashes[2] = {0, 0};
-
- deflate_init_output(&os, out, out_nbytes_avail);
- bt_matchfinder_init(&c->p.n.bt_mf);
-
- do {
- /* Starting a new DEFLATE block. */
-
- struct lz_match *cache_ptr = c->p.n.match_cache;
- const u8 * const in_block_begin = in_next;
- const u8 * const in_max_block_end =
- in_next + MIN(in_end - in_next, SOFT_MAX_BLOCK_LENGTH);
- const u8 *next_observation = in_next;
-
- init_block_split_stats(&c->split_stats);
-
- /*
- * Find matches until we decide to end the block. We end the
- * block if any of the following is true:
- *
- * (1) Maximum block length has been reached
- * (2) Match catch may overflow.
- * (3) Block split heuristic says to split now.
- */
- do {
- struct lz_match *matches;
- unsigned best_len;
-
- /* Slide the window forward if needed. */
- if (in_next == in_next_slide) {
- bt_matchfinder_slide_window(&c->p.n.bt_mf);
- in_cur_base = in_next;
- in_next_slide = in_next + MIN(in_end - in_next,
- MATCHFINDER_WINDOW_SIZE);
- }
-
- /* Decrease the maximum and nice match lengths if we're
- * approaching the end of the input buffer. */
- if (unlikely(max_len > in_end - in_next)) {
- max_len = in_end - in_next;
- nice_len = MIN(nice_len, max_len);
- }
-
- /*
- * Find matches with the current position using the
- * binary tree matchfinder and save them in
- * 'match_cache'.
- *
- * Note: the binary tree matchfinder is more suited for
- * optimal parsing than the hash chain matchfinder. The
- * reasons for this include:
- *
- * - The binary tree matchfinder can find more matches
- * in the same number of steps.
- * - One of the major advantages of hash chains is that
- * skipping positions (not searching for matches at
- * them) is faster; however, with optimal parsing we
- * search for matches at almost all positions, so this
- * advantage of hash chains is negated.
- */
- matches = cache_ptr;
- best_len = 0;
- if (likely(max_len >= BT_MATCHFINDER_REQUIRED_NBYTES)) {
- cache_ptr = bt_matchfinder_get_matches(&c->p.n.bt_mf,
- in_cur_base,
- in_next - in_cur_base,
- max_len,
- nice_len,
- c->max_search_depth,
- next_hashes,
- &best_len,
- matches);
- }
-
- if (in_next >= next_observation) {
- if (best_len >= 4) {
- observe_match(&c->split_stats, best_len);
- next_observation = in_next + best_len;
- } else {
- observe_literal(&c->split_stats, *in_next);
- next_observation = in_next + 1;
- }
- }
-
- cache_ptr->length = cache_ptr - matches;
- cache_ptr->offset = *in_next;
- in_next++;
- cache_ptr++;
-
- /*
- * If there was a very long match found, don't cache any
- * matches for the bytes covered by that match. This
- * avoids degenerate behavior when compressing highly
- * redundant data, where the number of matches can be
- * very large.
- *
- * This heuristic doesn't actually hurt the compression
- * ratio very much. If there's a long match, then the
- * data must be highly compressible, so it doesn't
- * matter much what we do.
- */
- if (best_len >= DEFLATE_MIN_MATCH_LEN && best_len >= nice_len) {
- --best_len;
- do {
- if (in_next == in_next_slide) {
- bt_matchfinder_slide_window(&c->p.n.bt_mf);
- in_cur_base = in_next;
- in_next_slide = in_next + MIN(in_end - in_next,
- MATCHFINDER_WINDOW_SIZE);
- }
- if (unlikely(max_len > in_end - in_next)) {
- max_len = in_end - in_next;
- nice_len = MIN(nice_len, max_len);
- }
- if (max_len >= BT_MATCHFINDER_REQUIRED_NBYTES) {
- bt_matchfinder_skip_position(&c->p.n.bt_mf,
- in_cur_base,
- in_next - in_cur_base,
- nice_len,
- c->max_search_depth,
- next_hashes);
- }
- cache_ptr->length = 0;
- cache_ptr->offset = *in_next;
- in_next++;
- cache_ptr++;
- } while (--best_len);
- }
- } while (in_next < in_max_block_end &&
- cache_ptr < &c->p.n.match_cache[CACHE_LENGTH] &&
- !should_end_block(&c->split_stats, in_block_begin, in_next, in_end));
-
- /* All the matches for this block have been cached. Now choose
- * the sequence of items to output and flush the block. */
- deflate_optimize_block(c, in_next - in_block_begin, cache_ptr,
- in_block_begin == in);
- deflate_flush_block(c, &os, in_block_begin, in_next - in_block_begin,
- in_next == in_end, true);
- } while (in_next != in_end);
-
- return deflate_flush_output(&os);
-}
-
-#endif /* SUPPORT_NEAR_OPTIMAL_PARSING */
-
-/* Initialize c->offset_slot_fast. */
-static void
-deflate_init_offset_slot_fast(struct libdeflate_compressor *c)
-{
- unsigned offset_slot;
- unsigned offset;
- unsigned offset_end;
-
- for (offset_slot = 0;
- offset_slot < ARRAY_LEN(deflate_offset_slot_base);
- offset_slot++)
- {
- offset = deflate_offset_slot_base[offset_slot];
- #if USE_FULL_OFFSET_SLOT_FAST
- offset_end = offset + (1 << deflate_extra_offset_bits[offset_slot]);
- do {
- c->offset_slot_fast[offset] = offset_slot;
- } while (++offset != offset_end);
- #else
- if (offset <= 256) {
- offset_end = offset + (1 << deflate_extra_offset_bits[offset_slot]);
- do {
- c->offset_slot_fast[offset - 1] = offset_slot;
- } while (++offset != offset_end);
- } else {
- offset_end = offset + (1 << deflate_extra_offset_bits[offset_slot]);
- do {
- c->offset_slot_fast[256 + ((offset - 1) >> 7)] = offset_slot;
- } while ((offset += (1 << 7)) != offset_end);
- }
- #endif
- }
-}
-
-LIBDEFLATEEXPORT struct libdeflate_compressor * LIBDEFLATEAPI
-libdeflate_alloc_compressor(int compression_level)
-{
- struct libdeflate_compressor *c;
- size_t size = offsetof(struct libdeflate_compressor, p);
-
- if (compression_level < 0 || compression_level > 12)
- return NULL;
-
-#if SUPPORT_NEAR_OPTIMAL_PARSING
- if (compression_level >= 8)
- size += sizeof(c->p.n);
- else if (compression_level >= 1)
- size += sizeof(c->p.g);
-#else
- if (compression_level >= 1)
- size += sizeof(c->p.g);
-#endif
-
- c = libdeflate_aligned_malloc(MATCHFINDER_MEM_ALIGNMENT, size);
- if (!c)
- return NULL;
-
- c->compression_level = compression_level;
-
- /*
- * The higher the compression level, the more we should bother trying to
- * compress very small inputs.
- */
- c->min_size_to_compress = 56 - (compression_level * 4);
-
- switch (compression_level) {
- case 0:
- c->impl = deflate_compress_none;
- break;
- case 1:
- c->impl = deflate_compress_greedy;
- c->max_search_depth = 2;
- c->nice_match_length = 8;
- break;
- case 2:
- c->impl = deflate_compress_greedy;
- c->max_search_depth = 6;
- c->nice_match_length = 10;
- break;
- case 3:
- c->impl = deflate_compress_greedy;
- c->max_search_depth = 12;
- c->nice_match_length = 14;
- break;
- case 4:
- c->impl = deflate_compress_greedy;
- c->max_search_depth = 24;
- c->nice_match_length = 24;
- break;
- case 5:
- c->impl = deflate_compress_lazy;
- c->max_search_depth = 20;
- c->nice_match_length = 30;
- break;
- case 6:
- c->impl = deflate_compress_lazy;
- c->max_search_depth = 40;
- c->nice_match_length = 65;
- break;
- case 7:
- c->impl = deflate_compress_lazy;
- c->max_search_depth = 100;
- c->nice_match_length = 130;
- break;
-#if SUPPORT_NEAR_OPTIMAL_PARSING
- case 8:
- c->impl = deflate_compress_near_optimal;
- c->max_search_depth = 12;
- c->nice_match_length = 20;
- c->p.n.num_optim_passes = 1;
- break;
- case 9:
- c->impl = deflate_compress_near_optimal;
- c->max_search_depth = 16;
- c->nice_match_length = 26;
- c->p.n.num_optim_passes = 2;
- break;
- case 10:
- c->impl = deflate_compress_near_optimal;
- c->max_search_depth = 30;
- c->nice_match_length = 50;
- c->p.n.num_optim_passes = 2;
- break;
- case 11:
- c->impl = deflate_compress_near_optimal;
- c->max_search_depth = 60;
- c->nice_match_length = 80;
- c->p.n.num_optim_passes = 3;
- break;
- default:
- c->impl = deflate_compress_near_optimal;
- c->max_search_depth = 100;
- c->nice_match_length = 133;
- c->p.n.num_optim_passes = 4;
- break;
-#else
- case 8:
- c->impl = deflate_compress_lazy;
- c->max_search_depth = 150;
- c->nice_match_length = 200;
- break;
- default:
- c->impl = deflate_compress_lazy;
- c->max_search_depth = 200;
- c->nice_match_length = DEFLATE_MAX_MATCH_LEN;
- break;
-#endif
- }
-
- deflate_init_offset_slot_fast(c);
- deflate_init_static_codes(c);
-
- return c;
-}
-
-LIBDEFLATEEXPORT size_t LIBDEFLATEAPI
-libdeflate_deflate_compress(struct libdeflate_compressor *c,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail)
-{
- if (unlikely(out_nbytes_avail < OUTPUT_END_PADDING))
- return 0;
-
- /* For extremely small inputs just use a single uncompressed block. */
- if (unlikely(in_nbytes < c->min_size_to_compress)) {
- struct deflate_output_bitstream os;
- deflate_init_output(&os, out, out_nbytes_avail);
- if (in_nbytes == 0)
- in = &os; /* Avoid passing NULL to memcpy() */
- deflate_write_uncompressed_block(&os, in, in_nbytes, true);
- return deflate_flush_output(&os);
- }
-
- return (*c->impl)(c, in, in_nbytes, out, out_nbytes_avail);
-}
-
-LIBDEFLATEEXPORT void LIBDEFLATEAPI
-libdeflate_free_compressor(struct libdeflate_compressor *c)
-{
- libdeflate_aligned_free(c);
-}
-
-unsigned int
-deflate_get_compression_level(struct libdeflate_compressor *c)
-{
- return c->compression_level;
-}
-
-LIBDEFLATEEXPORT size_t LIBDEFLATEAPI
-libdeflate_deflate_compress_bound(struct libdeflate_compressor *c,
- size_t in_nbytes)
-{
- /*
- * The worst case is all uncompressed blocks where one block has length
- * <= MIN_BLOCK_LENGTH and the others have length MIN_BLOCK_LENGTH.
- * Each uncompressed block has 5 bytes of overhead: 1 for BFINAL, BTYPE,
- * and alignment to a byte boundary; 2 for LEN; and 2 for NLEN.
- */
- size_t max_num_blocks = MAX(DIV_ROUND_UP(in_nbytes, MIN_BLOCK_LENGTH), 1);
- return (5 * max_num_blocks) + in_nbytes + 1 + OUTPUT_END_PADDING;
-}
diff --git a/util/compress/libdeflate/lib/deflate_compress.h b/util/compress/libdeflate/lib/deflate_compress.h
deleted file mode 100644
index d97d019ae..000000000
--- a/util/compress/libdeflate/lib/deflate_compress.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef LIB_DEFLATE_COMPRESS_H
-#define LIB_DEFLATE_COMPRESS_H
-
-#include "lib_common.h"
-
-/* DEFLATE compression is private to deflate_compress.c, but we do need to be
- * able to query the compression level for zlib and gzip header generation. */
-
-struct libdeflate_compressor;
-
-unsigned int deflate_get_compression_level(struct libdeflate_compressor *c);
-
-#endif /* LIB_DEFLATE_COMPRESS_H */
diff --git a/util/compress/libdeflate/lib/deflate_constants.h b/util/compress/libdeflate/lib/deflate_constants.h
deleted file mode 100644
index a10b57dec..000000000
--- a/util/compress/libdeflate/lib/deflate_constants.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * deflate_constants.h - constants for the DEFLATE compression format
- */
-
-#ifndef LIB_DEFLATE_CONSTANTS_H
-#define LIB_DEFLATE_CONSTANTS_H
-
-/* Valid block types */
-#define DEFLATE_BLOCKTYPE_UNCOMPRESSED 0
-#define DEFLATE_BLOCKTYPE_STATIC_HUFFMAN 1
-#define DEFLATE_BLOCKTYPE_DYNAMIC_HUFFMAN 2
-
-/* Minimum and maximum supported match lengths (in bytes) */
-#define DEFLATE_MIN_MATCH_LEN 3
-#define DEFLATE_MAX_MATCH_LEN 258
-
-/* Minimum and maximum supported match offsets (in bytes) */
-#define DEFLATE_MIN_MATCH_OFFSET 1
-#define DEFLATE_MAX_MATCH_OFFSET 32768
-
-#define DEFLATE_MAX_WINDOW_SIZE 32768
-
-/* Number of symbols in each Huffman code. Note: for the literal/length
- * and offset codes, these are actually the maximum values; a given block
- * might use fewer symbols. */
-#define DEFLATE_NUM_PRECODE_SYMS 19
-#define DEFLATE_NUM_LITLEN_SYMS 288
-#define DEFLATE_NUM_OFFSET_SYMS 32
-
-/* The maximum number of symbols across all codes */
-#define DEFLATE_MAX_NUM_SYMS 288
-
-/* Division of symbols in the literal/length code */
-#define DEFLATE_NUM_LITERALS 256
-#define DEFLATE_END_OF_BLOCK 256
-#define DEFLATE_NUM_LEN_SYMS 31
-
-/* Maximum codeword length, in bits, within each Huffman code */
-#define DEFLATE_MAX_PRE_CODEWORD_LEN 7
-#define DEFLATE_MAX_LITLEN_CODEWORD_LEN 15
-#define DEFLATE_MAX_OFFSET_CODEWORD_LEN 15
-
-/* The maximum codeword length across all codes */
-#define DEFLATE_MAX_CODEWORD_LEN 15
-
-/* Maximum possible overrun when decoding codeword lengths */
-#define DEFLATE_MAX_LENS_OVERRUN 137
-
-/*
- * Maximum number of extra bits that may be required to represent a match
- * length or offset.
- *
- * TODO: are we going to have full DEFLATE64 support? If so, up to 16
- * length bits must be supported.
- */
-#define DEFLATE_MAX_EXTRA_LENGTH_BITS 5
-#define DEFLATE_MAX_EXTRA_OFFSET_BITS 14
-
-/* The maximum number of bits in which a match can be represented. This
- * is the absolute worst case, which assumes the longest possible Huffman
- * codewords and the maximum numbers of extra bits. */
-#define DEFLATE_MAX_MATCH_BITS \
- (DEFLATE_MAX_LITLEN_CODEWORD_LEN + DEFLATE_MAX_EXTRA_LENGTH_BITS + \
- DEFLATE_MAX_OFFSET_CODEWORD_LEN + DEFLATE_MAX_EXTRA_OFFSET_BITS)
-
-#endif /* LIB_DEFLATE_CONSTANTS_H */
diff --git a/util/compress/libdeflate/lib/deflate_decompress.c b/util/compress/libdeflate/lib/deflate_decompress.c
deleted file mode 100644
index 1990e74d6..000000000
--- a/util/compress/libdeflate/lib/deflate_decompress.c
+++ /dev/null
@@ -1,1000 +0,0 @@
-/*
- * deflate_decompress.c - a decompressor for DEFLATE
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * ---------------------------------------------------------------------------
- *
- * This is a highly optimized DEFLATE decompressor. When compiled with gcc on
- * x86_64, it decompresses data in about 52% of the time of zlib (48% if BMI2
- * instructions are available). On other architectures it should still be
- * significantly faster than zlib, but the difference may be smaller.
- *
- * Why this is faster than zlib's implementation:
- *
- * - Word accesses rather than byte accesses when reading input
- * - Word accesses rather than byte accesses when copying matches
- * - Faster Huffman decoding combined with various DEFLATE-specific tricks
- * - Larger bitbuffer variable that doesn't need to be filled as often
- * - Other optimizations to remove unnecessary branches
- * - Only full-buffer decompression is supported, so the code doesn't need to
- * support stopping and resuming decompression.
- * - On x86_64, compile a version of the decompression routine using BMI2
- * instructions and use it automatically at runtime when supported.
- */
-
-#include <limits.h>
-
-#include "deflate_constants.h"
-#include "unaligned.h"
-
-#include "libdeflate.h"
-
-/*
- * If the expression passed to SAFETY_CHECK() evaluates to false, then the
- * decompression routine immediately returns LIBDEFLATE_BAD_DATA, indicating the
- * compressed data is invalid.
- *
- * Theoretically, these checks could be disabled for specialized applications
- * where all input to the decompressor will be trusted.
- */
-#if 0
-# pragma message("UNSAFE DECOMPRESSION IS ENABLED. THIS MUST ONLY BE USED IF THE DECOMPRESSOR INPUT WILL ALWAYS BE TRUSTED!")
-# define SAFETY_CHECK(expr) (void)(expr)
-#else
-# define SAFETY_CHECK(expr) if (unlikely(!(expr))) return LIBDEFLATE_BAD_DATA
-#endif
-
-/*
- * Each TABLEBITS number is the base-2 logarithm of the number of entries in the
- * main portion of the corresponding decode table. Each number should be large
- * enough to ensure that for typical data, the vast majority of symbols can be
- * decoded by a direct lookup of the next TABLEBITS bits of compressed data.
- * However, this must be balanced against the fact that a larger table requires
- * more memory and requires more time to fill.
- *
- * Note: you cannot change a TABLEBITS number without also changing the
- * corresponding ENOUGH number!
- */
-#define PRECODE_TABLEBITS 7
-#define LITLEN_TABLEBITS 10
-#define OFFSET_TABLEBITS 8
-
-/*
- * Each ENOUGH number is the maximum number of decode table entries that may be
- * required for the corresponding Huffman code, including the main table and all
- * subtables. Each number depends on three parameters:
- *
- * (1) the maximum number of symbols in the code (DEFLATE_NUM_*_SYMS)
- * (2) the number of main table bits (the TABLEBITS numbers defined above)
- * (3) the maximum allowed codeword length (DEFLATE_MAX_*_CODEWORD_LEN)
- *
- * The ENOUGH numbers were computed using the utility program 'enough' from
- * zlib. This program enumerates all possible relevant Huffman codes to find
- * the worst-case usage of decode table entries.
- */
-#define PRECODE_ENOUGH 128 /* enough 19 7 7 */
-#define LITLEN_ENOUGH 1334 /* enough 288 10 15 */
-#define OFFSET_ENOUGH 402 /* enough 32 8 15 */
-
-/*
- * Type for codeword lengths.
- */
-typedef u8 len_t;
-
-/*
- * The main DEFLATE decompressor structure. Since this implementation only
- * supports full buffer decompression, this structure does not store the entire
- * decompression state, but rather only some arrays that are too large to
- * comfortably allocate on the stack.
- */
-struct libdeflate_decompressor {
-
- /*
- * The arrays aren't all needed at the same time. 'precode_lens' and
- * 'precode_decode_table' are unneeded after 'lens' has been filled.
- * Furthermore, 'lens' need not be retained after building the litlen
- * and offset decode tables. In fact, 'lens' can be in union with
- * 'litlen_decode_table' provided that 'offset_decode_table' is separate
- * and is built first.
- */
-
- union {
- len_t precode_lens[DEFLATE_NUM_PRECODE_SYMS];
-
- struct {
- len_t lens[DEFLATE_NUM_LITLEN_SYMS +
- DEFLATE_NUM_OFFSET_SYMS +
- DEFLATE_MAX_LENS_OVERRUN];
-
- u32 precode_decode_table[PRECODE_ENOUGH];
- } l;
-
- u32 litlen_decode_table[LITLEN_ENOUGH];
- } u;
-
- u32 offset_decode_table[OFFSET_ENOUGH];
-
- /* used only during build_decode_table() */
- u16 sorted_syms[DEFLATE_MAX_NUM_SYMS];
-
- bool static_codes_loaded;
-};
-
-/*****************************************************************************
- * Input bitstream *
- *****************************************************************************/
-
-/*
- * The state of the "input bitstream" consists of the following variables:
- *
- * - in_next: pointer to the next unread byte in the input buffer
- *
- * - in_end: pointer just past the end of the input buffer
- *
- * - bitbuf: a word-sized variable containing bits that have been read from
- * the input buffer. The buffered bits are right-aligned
- * (they're the low-order bits).
- *
- * - bitsleft: number of bits in 'bitbuf' that are valid.
- *
- * To make it easier for the compiler to optimize the code by keeping variables
- * in registers, these are declared as normal variables and manipulated using
- * macros.
- */
-
-/*
- * The type for the bitbuffer variable ('bitbuf' described above). For best
- * performance, this should have size equal to a machine word.
- *
- * 64-bit platforms have a significant advantage: they get a bigger bitbuffer
- * which they have to fill less often.
- */
-typedef machine_word_t bitbuf_t;
-
-/*
- * Number of bits the bitbuffer variable can hold.
- *
- * This is one less than the obvious value because of the optimized arithmetic
- * in FILL_BITS_WORDWISE() that leaves 'bitsleft' in the range
- * [WORDBITS - 8, WORDBITS - 1] rather than [WORDBITS - 7, WORDBITS].
- */
-#define BITBUF_NBITS (8 * sizeof(bitbuf_t) - 1)
-
-/*
- * The maximum number of bits that can be ensured in the bitbuffer variable,
- * i.e. the maximum value of 'n' that can be passed ENSURE_BITS(n). The decoder
- * only reads whole bytes from memory, so this is the lowest value of 'bitsleft'
- * at which another byte cannot be read without first consuming some bits.
- */
-#define MAX_ENSURE (BITBUF_NBITS - 7)
-
-/*
- * Evaluates to true if 'n' is a valid argument to ENSURE_BITS(n), or false if
- * 'n' is too large to be passed to ENSURE_BITS(n). Note: if 'n' is a compile
- * time constant, then this expression will be a compile-type constant.
- * Therefore, CAN_ENSURE() can be used choose between alternative
- * implementations at compile time.
- */
-#define CAN_ENSURE(n) ((n) <= MAX_ENSURE)
-
-/*
- * Fill the bitbuffer variable, reading one byte at a time.
- *
- * If we would overread the input buffer, we just don't read anything, leaving
- * the bits zeroed but marking them filled. This simplifies the decompressor
- * because it removes the need to distinguish between real overreads and
- * overreads that occur only because of the decompressor's own lookahead.
- *
- * The disadvantage is that real overreads are not detected immediately.
- * However, this is safe because the decompressor is still guaranteed to make
- * forward progress when presented never-ending 0 bits. In an existing block
- * output will be getting generated, whereas new blocks can only be uncompressed
- * (since the type code for uncompressed blocks is 0), for which we check for
- * previous overread. But even if we didn't check, uncompressed blocks would
- * fail to validate because LEN would not equal ~NLEN. So the decompressor will
- * eventually either detect that the output buffer is full, or detect invalid
- * input, or finish the final block.
- */
-#define FILL_BITS_BYTEWISE() \
-do { \
- if (likely(in_next != in_end)) \
- bitbuf |= (bitbuf_t)*in_next++ << bitsleft; \
- else \
- overrun_count++; \
- bitsleft += 8; \
-} while (bitsleft <= BITBUF_NBITS - 8)
-
-/*
- * Fill the bitbuffer variable by reading the next word from the input buffer
- * and branchlessly updating 'in_next' and 'bitsleft' based on how many bits
- * were filled. This can be significantly faster than FILL_BITS_BYTEWISE().
- * However, for this to work correctly, the word must be interpreted in
- * little-endian format. In addition, the memory access may be unaligned.
- * Therefore, this method is most efficient on little-endian architectures that
- * support fast unaligned access, such as x86 and x86_64.
- *
- * For faster updating of 'bitsleft', we consider the bitbuffer size in bits to
- * be 1 less than the word size and therefore be all 1 bits. Then the number of
- * bits filled is the value of the 0 bits in position >= 3 when changed to 1.
- * E.g. if words are 64 bits and bitsleft = 16 = b010000 then we refill b101000
- * = 40 bits = 5 bytes. This uses only 4 operations to update 'in_next' and
- * 'bitsleft': one each of +, ^, >>, and |. (Not counting operations the
- * compiler optimizes out.) In contrast, the alternative of:
- *
- * in_next += (BITBUF_NBITS - bitsleft) >> 3;
- * bitsleft += (BITBUF_NBITS - bitsleft) & ~7;
- *
- * (where BITBUF_NBITS would be WORDBITS rather than WORDBITS - 1) would on
- * average refill an extra bit, but uses 5 operations: two +, and one each of
- * -, >>, and &. Also the - and & must be completed before 'bitsleft' can be
- * updated, while the current solution updates 'bitsleft' with no dependencies.
- */
-#define FILL_BITS_WORDWISE() \
-do { \
- /* BITBUF_NBITS must be all 1's in binary, see above */ \
- STATIC_ASSERT((BITBUF_NBITS & (BITBUF_NBITS + 1)) == 0);\
- \
- bitbuf |= get_unaligned_leword(in_next) << bitsleft; \
- in_next += (bitsleft ^ BITBUF_NBITS) >> 3; \
- bitsleft |= BITBUF_NBITS & ~7; \
-} while (0)
-
-/*
- * Does the bitbuffer variable currently contain at least 'n' bits?
- */
-#define HAVE_BITS(n) (bitsleft >= (n))
-
-/*
- * Load more bits from the input buffer until the specified number of bits is
- * present in the bitbuffer variable. 'n' cannot be too large; see MAX_ENSURE
- * and CAN_ENSURE().
- */
-#define ENSURE_BITS(n) \
-if (!HAVE_BITS(n)) { \
- if (CPU_IS_LITTLE_ENDIAN() && \
- UNALIGNED_ACCESS_IS_FAST && \
- likely(in_end - in_next >= sizeof(bitbuf_t))) \
- FILL_BITS_WORDWISE(); \
- else \
- FILL_BITS_BYTEWISE(); \
-}
-
-/*
- * Return the next 'n' bits from the bitbuffer variable without removing them.
- */
-#define BITS(n) ((u32)bitbuf & (((u32)1 << (n)) - 1))
-
-/*
- * Remove the next 'n' bits from the bitbuffer variable.
- */
-#define REMOVE_BITS(n) (bitbuf >>= (n), bitsleft -= (n))
-
-/*
- * Remove and return the next 'n' bits from the bitbuffer variable.
- */
-#define POP_BITS(n) (tmp32 = BITS(n), REMOVE_BITS(n), tmp32)
-
-/*
- * Verify that the input buffer hasn't been overread, then align the input to
- * the next byte boundary, discarding any remaining bits in the current byte.
- *
- * Note that if the bitbuffer variable currently contains more than 7 bits, then
- * we must rewind 'in_next', effectively putting those bits back. Only the bits
- * in what would be the "current" byte if we were reading one byte at a time can
- * be actually discarded.
- */
-#define ALIGN_INPUT() \
-do { \
- SAFETY_CHECK(overrun_count <= (bitsleft >> 3)); \
- in_next -= (bitsleft >> 3) - overrun_count; \
- overrun_count = 0; \
- bitbuf = 0; \
- bitsleft = 0; \
-} while(0)
-
-/*
- * Read a 16-bit value from the input. This must have been preceded by a call
- * to ALIGN_INPUT(), and the caller must have already checked for overrun.
- */
-#define READ_U16() (tmp16 = get_unaligned_le16(in_next), in_next += 2, tmp16)
-
-/*****************************************************************************
- * Huffman decoding *
- *****************************************************************************/
-
-/*
- * A decode table for order TABLEBITS consists of a main table of (1 <<
- * TABLEBITS) entries followed by a variable number of subtables.
- *
- * The decoding algorithm takes the next TABLEBITS bits of compressed data and
- * uses them as an index into the decode table. The resulting entry is either a
- * "direct entry", meaning that it contains the value desired, or a "subtable
- * pointer", meaning that the entry references a subtable that must be indexed
- * using more bits of the compressed data to decode the symbol.
- *
- * Each decode table (a main table along with its subtables, if any) is
- * associated with a Huffman code. Logically, the result of a decode table
- * lookup is a symbol from the alphabet from which the corresponding Huffman
- * code was constructed. A symbol with codeword length n <= TABLEBITS is
- * associated with 2**(TABLEBITS - n) direct entries in the table, whereas a
- * symbol with codeword length n > TABLEBITS is associated with one or more
- * subtable entries.
- *
- * On top of this basic design, we implement several optimizations:
- *
- * - We store the length of each codeword directly in each of its decode table
- * entries. This allows the codeword length to be produced without indexing
- * an additional table.
- *
- * - When beneficial, we don't store the Huffman symbol itself, but instead data
- * generated from it. For example, when decoding an offset symbol in DEFLATE,
- * it's more efficient if we can decode the offset base and number of extra
- * offset bits directly rather than decoding the offset symbol and then
- * looking up both of those values in an additional table or tables.
- *
- * The size of each decode table entry is 32 bits, which provides slightly
- * better performance than 16-bit entries on 32 and 64 bit processers, provided
- * that the table doesn't get so large that it takes up too much memory and
- * starts generating cache misses. The bits of each decode table entry are
- * defined as follows:
- *
- * - Bits 30 -- 31: flags (see below)
- * - Bits 8 -- 29: decode result: a Huffman symbol or related data
- * - Bits 0 -- 7: codeword length
- */
-
-/*
- * This flag is set in all main decode table entries that represent subtable
- * pointers.
- */
-#define HUFFDEC_SUBTABLE_POINTER 0x80000000
-
-/*
- * This flag is set in all entries in the litlen decode table that represent
- * literals.
- */
-#define HUFFDEC_LITERAL 0x40000000
-
-/* Mask for extracting the codeword length from a decode table entry. */
-#define HUFFDEC_LENGTH_MASK 0xFF
-
-/* Shift to extract the decode result from a decode table entry. */
-#define HUFFDEC_RESULT_SHIFT 8
-
-/* Shift a decode result into its position in the decode table entry. */
-#define HUFFDEC_RESULT_ENTRY(result) ((u32)(result) << HUFFDEC_RESULT_SHIFT)
-
-/* The decode result for each precode symbol. There is no special optimization
- * for the precode; the decode result is simply the symbol value. */
-static const u32 precode_decode_results[DEFLATE_NUM_PRECODE_SYMS] = {
-#define ENTRY(presym) HUFFDEC_RESULT_ENTRY(presym)
- ENTRY(0) , ENTRY(1) , ENTRY(2) , ENTRY(3) ,
- ENTRY(4) , ENTRY(5) , ENTRY(6) , ENTRY(7) ,
- ENTRY(8) , ENTRY(9) , ENTRY(10) , ENTRY(11) ,
- ENTRY(12) , ENTRY(13) , ENTRY(14) , ENTRY(15) ,
- ENTRY(16) , ENTRY(17) , ENTRY(18) ,
-#undef ENTRY
-};
-
-/* The decode result for each litlen symbol. For literals, this is the literal
- * value itself and the HUFFDEC_LITERAL flag. For lengths, this is the length
- * base and the number of extra length bits. */
-static const u32 litlen_decode_results[DEFLATE_NUM_LITLEN_SYMS] = {
-
- /* Literals */
-#define ENTRY(literal) (HUFFDEC_LITERAL | HUFFDEC_RESULT_ENTRY(literal))
- ENTRY(0) , ENTRY(1) , ENTRY(2) , ENTRY(3) ,
- ENTRY(4) , ENTRY(5) , ENTRY(6) , ENTRY(7) ,
- ENTRY(8) , ENTRY(9) , ENTRY(10) , ENTRY(11) ,
- ENTRY(12) , ENTRY(13) , ENTRY(14) , ENTRY(15) ,
- ENTRY(16) , ENTRY(17) , ENTRY(18) , ENTRY(19) ,
- ENTRY(20) , ENTRY(21) , ENTRY(22) , ENTRY(23) ,
- ENTRY(24) , ENTRY(25) , ENTRY(26) , ENTRY(27) ,
- ENTRY(28) , ENTRY(29) , ENTRY(30) , ENTRY(31) ,
- ENTRY(32) , ENTRY(33) , ENTRY(34) , ENTRY(35) ,
- ENTRY(36) , ENTRY(37) , ENTRY(38) , ENTRY(39) ,
- ENTRY(40) , ENTRY(41) , ENTRY(42) , ENTRY(43) ,
- ENTRY(44) , ENTRY(45) , ENTRY(46) , ENTRY(47) ,
- ENTRY(48) , ENTRY(49) , ENTRY(50) , ENTRY(51) ,
- ENTRY(52) , ENTRY(53) , ENTRY(54) , ENTRY(55) ,
- ENTRY(56) , ENTRY(57) , ENTRY(58) , ENTRY(59) ,
- ENTRY(60) , ENTRY(61) , ENTRY(62) , ENTRY(63) ,
- ENTRY(64) , ENTRY(65) , ENTRY(66) , ENTRY(67) ,
- ENTRY(68) , ENTRY(69) , ENTRY(70) , ENTRY(71) ,
- ENTRY(72) , ENTRY(73) , ENTRY(74) , ENTRY(75) ,
- ENTRY(76) , ENTRY(77) , ENTRY(78) , ENTRY(79) ,
- ENTRY(80) , ENTRY(81) , ENTRY(82) , ENTRY(83) ,
- ENTRY(84) , ENTRY(85) , ENTRY(86) , ENTRY(87) ,
- ENTRY(88) , ENTRY(89) , ENTRY(90) , ENTRY(91) ,
- ENTRY(92) , ENTRY(93) , ENTRY(94) , ENTRY(95) ,
- ENTRY(96) , ENTRY(97) , ENTRY(98) , ENTRY(99) ,
- ENTRY(100) , ENTRY(101) , ENTRY(102) , ENTRY(103) ,
- ENTRY(104) , ENTRY(105) , ENTRY(106) , ENTRY(107) ,
- ENTRY(108) , ENTRY(109) , ENTRY(110) , ENTRY(111) ,
- ENTRY(112) , ENTRY(113) , ENTRY(114) , ENTRY(115) ,
- ENTRY(116) , ENTRY(117) , ENTRY(118) , ENTRY(119) ,
- ENTRY(120) , ENTRY(121) , ENTRY(122) , ENTRY(123) ,
- ENTRY(124) , ENTRY(125) , ENTRY(126) , ENTRY(127) ,
- ENTRY(128) , ENTRY(129) , ENTRY(130) , ENTRY(131) ,
- ENTRY(132) , ENTRY(133) , ENTRY(134) , ENTRY(135) ,
- ENTRY(136) , ENTRY(137) , ENTRY(138) , ENTRY(139) ,
- ENTRY(140) , ENTRY(141) , ENTRY(142) , ENTRY(143) ,
- ENTRY(144) , ENTRY(145) , ENTRY(146) , ENTRY(147) ,
- ENTRY(148) , ENTRY(149) , ENTRY(150) , ENTRY(151) ,
- ENTRY(152) , ENTRY(153) , ENTRY(154) , ENTRY(155) ,
- ENTRY(156) , ENTRY(157) , ENTRY(158) , ENTRY(159) ,
- ENTRY(160) , ENTRY(161) , ENTRY(162) , ENTRY(163) ,
- ENTRY(164) , ENTRY(165) , ENTRY(166) , ENTRY(167) ,
- ENTRY(168) , ENTRY(169) , ENTRY(170) , ENTRY(171) ,
- ENTRY(172) , ENTRY(173) , ENTRY(174) , ENTRY(175) ,
- ENTRY(176) , ENTRY(177) , ENTRY(178) , ENTRY(179) ,
- ENTRY(180) , ENTRY(181) , ENTRY(182) , ENTRY(183) ,
- ENTRY(184) , ENTRY(185) , ENTRY(186) , ENTRY(187) ,
- ENTRY(188) , ENTRY(189) , ENTRY(190) , ENTRY(191) ,
- ENTRY(192) , ENTRY(193) , ENTRY(194) , ENTRY(195) ,
- ENTRY(196) , ENTRY(197) , ENTRY(198) , ENTRY(199) ,
- ENTRY(200) , ENTRY(201) , ENTRY(202) , ENTRY(203) ,
- ENTRY(204) , ENTRY(205) , ENTRY(206) , ENTRY(207) ,
- ENTRY(208) , ENTRY(209) , ENTRY(210) , ENTRY(211) ,
- ENTRY(212) , ENTRY(213) , ENTRY(214) , ENTRY(215) ,
- ENTRY(216) , ENTRY(217) , ENTRY(218) , ENTRY(219) ,
- ENTRY(220) , ENTRY(221) , ENTRY(222) , ENTRY(223) ,
- ENTRY(224) , ENTRY(225) , ENTRY(226) , ENTRY(227) ,
- ENTRY(228) , ENTRY(229) , ENTRY(230) , ENTRY(231) ,
- ENTRY(232) , ENTRY(233) , ENTRY(234) , ENTRY(235) ,
- ENTRY(236) , ENTRY(237) , ENTRY(238) , ENTRY(239) ,
- ENTRY(240) , ENTRY(241) , ENTRY(242) , ENTRY(243) ,
- ENTRY(244) , ENTRY(245) , ENTRY(246) , ENTRY(247) ,
- ENTRY(248) , ENTRY(249) , ENTRY(250) , ENTRY(251) ,
- ENTRY(252) , ENTRY(253) , ENTRY(254) , ENTRY(255) ,
-#undef ENTRY
-
-#define HUFFDEC_EXTRA_LENGTH_BITS_MASK 0xFF
-#define HUFFDEC_LENGTH_BASE_SHIFT 8
-#define HUFFDEC_END_OF_BLOCK_LENGTH 0
-
-#define ENTRY(length_base, num_extra_bits) HUFFDEC_RESULT_ENTRY( \
- ((u32)(length_base) << HUFFDEC_LENGTH_BASE_SHIFT) | (num_extra_bits))
-
- /* End of block */
- ENTRY(HUFFDEC_END_OF_BLOCK_LENGTH, 0),
-
- /* Lengths */
- ENTRY(3 , 0) , ENTRY(4 , 0) , ENTRY(5 , 0) , ENTRY(6 , 0),
- ENTRY(7 , 0) , ENTRY(8 , 0) , ENTRY(9 , 0) , ENTRY(10 , 0),
- ENTRY(11 , 1) , ENTRY(13 , 1) , ENTRY(15 , 1) , ENTRY(17 , 1),
- ENTRY(19 , 2) , ENTRY(23 , 2) , ENTRY(27 , 2) , ENTRY(31 , 2),
- ENTRY(35 , 3) , ENTRY(43 , 3) , ENTRY(51 , 3) , ENTRY(59 , 3),
- ENTRY(67 , 4) , ENTRY(83 , 4) , ENTRY(99 , 4) , ENTRY(115, 4),
- ENTRY(131, 5) , ENTRY(163, 5) , ENTRY(195, 5) , ENTRY(227, 5),
- ENTRY(258, 0) , ENTRY(258, 0) , ENTRY(258, 0) ,
-#undef ENTRY
-};
-
-/* The decode result for each offset symbol. This is the offset base and the
- * number of extra offset bits. */
-static const u32 offset_decode_results[DEFLATE_NUM_OFFSET_SYMS] = {
-
-#define HUFFDEC_EXTRA_OFFSET_BITS_SHIFT 16
-#define HUFFDEC_OFFSET_BASE_MASK (((u32)1 << HUFFDEC_EXTRA_OFFSET_BITS_SHIFT) - 1)
-
-#define ENTRY(offset_base, num_extra_bits) HUFFDEC_RESULT_ENTRY( \
- ((u32)(num_extra_bits) << HUFFDEC_EXTRA_OFFSET_BITS_SHIFT) | \
- (offset_base))
- ENTRY(1 , 0) , ENTRY(2 , 0) , ENTRY(3 , 0) , ENTRY(4 , 0) ,
- ENTRY(5 , 1) , ENTRY(7 , 1) , ENTRY(9 , 2) , ENTRY(13 , 2) ,
- ENTRY(17 , 3) , ENTRY(25 , 3) , ENTRY(33 , 4) , ENTRY(49 , 4) ,
- ENTRY(65 , 5) , ENTRY(97 , 5) , ENTRY(129 , 6) , ENTRY(193 , 6) ,
- ENTRY(257 , 7) , ENTRY(385 , 7) , ENTRY(513 , 8) , ENTRY(769 , 8) ,
- ENTRY(1025 , 9) , ENTRY(1537 , 9) , ENTRY(2049 , 10) , ENTRY(3073 , 10) ,
- ENTRY(4097 , 11) , ENTRY(6145 , 11) , ENTRY(8193 , 12) , ENTRY(12289 , 12) ,
- ENTRY(16385 , 13) , ENTRY(24577 , 13) , ENTRY(32769 , 14) , ENTRY(49153 , 14) ,
-#undef ENTRY
-};
-
-/*
- * Build a table for fast decoding of symbols from a Huffman code. As input,
- * this function takes the codeword length of each symbol which may be used in
- * the code. As output, it produces a decode table for the canonical Huffman
- * code described by the codeword lengths. The decode table is built with the
- * assumption that it will be indexed with "bit-reversed" codewords, where the
- * low-order bit is the first bit of the codeword. This format is used for all
- * Huffman codes in DEFLATE.
- *
- * @decode_table
- * The array in which the decode table will be generated. This array must
- * have sufficient length; see the definition of the ENOUGH numbers.
- * @lens
- * An array which provides, for each symbol, the length of the
- * corresponding codeword in bits, or 0 if the symbol is unused. This may
- * alias @decode_table, since nothing is written to @decode_table until all
- * @lens have been consumed. All codeword lengths are assumed to be <=
- * @max_codeword_len but are otherwise considered untrusted. If they do
- * not form a valid Huffman code, then the decode table is not built and
- * %false is returned.
- * @num_syms
- * The number of symbols in the code, including all unused symbols.
- * @decode_results
- * An array which provides, for each symbol, the actual value to store into
- * the decode table. This value will be directly produced as the result of
- * decoding that symbol, thereby moving the indirection out of the decode
- * loop and into the table initialization.
- * @table_bits
- * The log base-2 of the number of main table entries to use.
- * @max_codeword_len
- * The maximum allowed codeword length for this Huffman code.
- * Must be <= DEFLATE_MAX_CODEWORD_LEN.
- * @sorted_syms
- * A temporary array of length @num_syms.
- *
- * Returns %true if successful; %false if the codeword lengths do not form a
- * valid Huffman code.
- */
-static bool
-build_decode_table(u32 decode_table[],
- const len_t lens[],
- const unsigned num_syms,
- const u32 decode_results[],
- const unsigned table_bits,
- const unsigned max_codeword_len,
- u16 *sorted_syms)
-{
- unsigned len_counts[DEFLATE_MAX_CODEWORD_LEN + 1];
- unsigned offsets[DEFLATE_MAX_CODEWORD_LEN + 1];
- unsigned sym; /* current symbol */
- unsigned codeword; /* current codeword, bit-reversed */
- unsigned len; /* current codeword length in bits */
- unsigned count; /* num codewords remaining with this length */
- u32 codespace_used; /* codespace used out of '2^max_codeword_len' */
- unsigned cur_table_end; /* end index of current table */
- unsigned subtable_prefix; /* codeword prefix of current subtable */
- unsigned subtable_start; /* start index of current subtable */
- unsigned subtable_bits; /* log2 of current subtable length */
-
- /* Count how many codewords have each length, including 0. */
- for (len = 0; len <= max_codeword_len; len++)
- len_counts[len] = 0;
- for (sym = 0; sym < num_syms; sym++)
- len_counts[lens[sym]]++;
-
- /*
- * Sort the symbols primarily by increasing codeword length and
- * secondarily by increasing symbol value; or equivalently by their
- * codewords in lexicographic order, since a canonical code is assumed.
- *
- * For efficiency, also compute 'codespace_used' in the same pass over
- * 'len_counts[]' used to build 'offsets[]' for sorting.
- */
-
- /* Ensure that 'codespace_used' cannot overflow. */
- STATIC_ASSERT(sizeof(codespace_used) == 4);
- STATIC_ASSERT(UINT32_MAX / (1U << (DEFLATE_MAX_CODEWORD_LEN - 1)) >=
- DEFLATE_MAX_NUM_SYMS);
-
- offsets[0] = 0;
- offsets[1] = len_counts[0];
- codespace_used = 0;
- for (len = 1; len < max_codeword_len; len++) {
- offsets[len + 1] = offsets[len] + len_counts[len];
- codespace_used = (codespace_used << 1) + len_counts[len];
- }
- codespace_used = (codespace_used << 1) + len_counts[len];
-
- for (sym = 0; sym < num_syms; sym++)
- sorted_syms[offsets[lens[sym]]++] = sym;
-
- sorted_syms += offsets[0]; /* Skip unused symbols */
-
- /* lens[] is done being used, so we can write to decode_table[] now. */
-
- /*
- * Check whether the lengths form a complete code (exactly fills the
- * codespace), an incomplete code (doesn't fill the codespace), or an
- * overfull code (overflows the codespace). A codeword of length 'n'
- * uses proportion '1/(2^n)' of the codespace. An overfull code is
- * nonsensical, so is considered invalid. An incomplete code is
- * considered valid only in two specific cases; see below.
- */
-
- /* overfull code? */
- if (unlikely(codespace_used > (1U << max_codeword_len)))
- return false;
-
- /* incomplete code? */
- if (unlikely(codespace_used < (1U << max_codeword_len))) {
- u32 entry;
- unsigned i;
-
- if (codespace_used == 0) {
- /*
- * An empty code is allowed. This can happen for the
- * offset code in DEFLATE, since a dynamic Huffman block
- * need not contain any matches.
- */
-
- /* sym=0, len=1 (arbitrary) */
- entry = decode_results[0] | 1;
- } else {
- /*
- * Allow codes with a single used symbol, with codeword
- * length 1. The DEFLATE RFC is unclear regarding this
- * case. What zlib's decompressor does is permit this
- * for the litlen and offset codes and assume the
- * codeword is '0' rather than '1'. We do the same
- * except we allow this for precodes too, since there's
- * no convincing reason to treat the codes differently.
- * We also assign both codewords '0' and '1' to the
- * symbol to avoid having to handle '1' specially.
- */
- if (codespace_used != (1U << (max_codeword_len - 1)) ||
- len_counts[1] != 1)
- return false;
- entry = decode_results[*sorted_syms] | 1;
- }
- /*
- * Note: the decode table still must be fully initialized, in
- * case the stream is malformed and contains bits from the part
- * of the codespace the incomplete code doesn't use.
- */
- for (i = 0; i < (1U << table_bits); i++)
- decode_table[i] = entry;
- return true;
- }
-
- /*
- * The lengths form a complete code. Now, enumerate the codewords in
- * lexicographic order and fill the decode table entries for each one.
- *
- * First, process all codewords with len <= table_bits. Each one gets
- * '2^(table_bits-len)' direct entries in the table.
- *
- * Since DEFLATE uses bit-reversed codewords, these entries aren't
- * consecutive but rather are spaced '2^len' entries apart. This makes
- * filling them naively somewhat awkward and inefficient, since strided
- * stores are less cache-friendly and preclude the use of word or
- * vector-at-a-time stores to fill multiple entries per instruction.
- *
- * To optimize this, we incrementally double the table size. When
- * processing codewords with length 'len', the table is treated as
- * having only '2^len' entries, so each codeword uses just one entry.
- * Then, each time 'len' is incremented, the table size is doubled and
- * the first half is copied to the second half. This significantly
- * improves performance over naively doing strided stores.
- *
- * Note that some entries copied for each table doubling may not have
- * been initialized yet, but it doesn't matter since they're guaranteed
- * to be initialized later (because the Huffman code is complete).
- */
- codeword = 0;
- len = 1;
- while ((count = len_counts[len]) == 0)
- len++;
- cur_table_end = 1U << len;
- while (len <= table_bits) {
- /* Process all 'count' codewords with length 'len' bits. */
- do {
- unsigned bit;
-
- /* Fill the first entry for the current codeword. */
- decode_table[codeword] =
- decode_results[*sorted_syms++] | len;
-
- if (codeword == cur_table_end - 1) {
- /* Last codeword (all 1's) */
- for (; len < table_bits; len++) {
- memcpy(&decode_table[cur_table_end],
- decode_table,
- cur_table_end *
- sizeof(decode_table[0]));
- cur_table_end <<= 1;
- }
- return true;
- }
- /*
- * To advance to the lexicographically next codeword in
- * the canonical code, the codeword must be incremented,
- * then 0's must be appended to the codeword as needed
- * to match the next codeword's length.
- *
- * Since the codeword is bit-reversed, appending 0's is
- * a no-op. However, incrementing it is nontrivial. To
- * do so efficiently, use the 'bsr' instruction to find
- * the last (highest order) 0 bit in the codeword, set
- * it, and clear any later (higher order) 1 bits. But
- * 'bsr' actually finds the highest order 1 bit, so to
- * use it first flip all bits in the codeword by XOR'ing
- * it with (1U << len) - 1 == cur_table_end - 1.
- */
- bit = 1U << bsr32(codeword ^ (cur_table_end - 1));
- codeword &= bit - 1;
- codeword |= bit;
- } while (--count);
-
- /* Advance to the next codeword length. */
- do {
- if (++len <= table_bits) {
- memcpy(&decode_table[cur_table_end],
- decode_table,
- cur_table_end * sizeof(decode_table[0]));
- cur_table_end <<= 1;
- }
- } while ((count = len_counts[len]) == 0);
- }
-
- /* Process codewords with len > table_bits. These require subtables. */
- cur_table_end = 1U << table_bits;
- subtable_prefix = -1;
- subtable_start = 0;
- for (;;) {
- u32 entry;
- unsigned i;
- unsigned stride;
- unsigned bit;
-
- /*
- * Start a new subtable if the first 'table_bits' bits of the
- * codeword don't match the prefix of the current subtable.
- */
- if ((codeword & ((1U << table_bits) - 1)) != subtable_prefix) {
- subtable_prefix = (codeword & ((1U << table_bits) - 1));
- subtable_start = cur_table_end;
- /*
- * Calculate the subtable length. If the codeword has
- * length 'table_bits + n', then the subtable needs
- * '2^n' entries. But it may need more; if fewer than
- * '2^n' codewords of length 'table_bits + n' remain,
- * then the length will need to be incremented to bring
- * in longer codewords until the subtable can be
- * completely filled. Note that because the Huffman
- * code is complete, it will always be possible to fill
- * the subtable eventually.
- */
- subtable_bits = len - table_bits;
- codespace_used = count;
- while (codespace_used < (1U << subtable_bits)) {
- subtable_bits++;
- codespace_used = (codespace_used << 1) +
- len_counts[table_bits + subtable_bits];
- }
- cur_table_end = subtable_start + (1U << subtable_bits);
-
- /*
- * Create the entry that points from the main table to
- * the subtable. This entry contains the index of the
- * start of the subtable and the number of bits with
- * which the subtable is indexed (the log base 2 of the
- * number of entries it contains).
- */
- decode_table[subtable_prefix] =
- HUFFDEC_SUBTABLE_POINTER |
- HUFFDEC_RESULT_ENTRY(subtable_start) |
- subtable_bits;
- }
-
- /* Fill the subtable entries for the current codeword. */
- entry = decode_results[*sorted_syms++] | (len - table_bits);
- i = subtable_start + (codeword >> table_bits);
- stride = 1U << (len - table_bits);
- do {
- decode_table[i] = entry;
- i += stride;
- } while (i < cur_table_end);
-
- /* Advance to the next codeword. */
- if (codeword == (1U << len) - 1) /* last codeword (all 1's)? */
- return true;
- bit = 1U << bsr32(codeword ^ ((1U << len) - 1));
- codeword &= bit - 1;
- codeword |= bit;
- count--;
- while (count == 0)
- count = len_counts[++len];
- }
-}
-
-/* Build the decode table for the precode. */
-static bool
-build_precode_decode_table(struct libdeflate_decompressor *d)
-{
- /* When you change TABLEBITS, you must change ENOUGH, and vice versa! */
- STATIC_ASSERT(PRECODE_TABLEBITS == 7 && PRECODE_ENOUGH == 128);
-
- return build_decode_table(d->u.l.precode_decode_table,
- d->u.precode_lens,
- DEFLATE_NUM_PRECODE_SYMS,
- precode_decode_results,
- PRECODE_TABLEBITS,
- DEFLATE_MAX_PRE_CODEWORD_LEN,
- d->sorted_syms);
-}
-
-/* Build the decode table for the literal/length code. */
-static bool
-build_litlen_decode_table(struct libdeflate_decompressor *d,
- unsigned num_litlen_syms, unsigned num_offset_syms)
-{
- /* When you change TABLEBITS, you must change ENOUGH, and vice versa! */
- STATIC_ASSERT(LITLEN_TABLEBITS == 10 && LITLEN_ENOUGH == 1334);
-
- return build_decode_table(d->u.litlen_decode_table,
- d->u.l.lens,
- num_litlen_syms,
- litlen_decode_results,
- LITLEN_TABLEBITS,
- DEFLATE_MAX_LITLEN_CODEWORD_LEN,
- d->sorted_syms);
-}
-
-/* Build the decode table for the offset code. */
-static bool
-build_offset_decode_table(struct libdeflate_decompressor *d,
- unsigned num_litlen_syms, unsigned num_offset_syms)
-{
- /* When you change TABLEBITS, you must change ENOUGH, and vice versa! */
- STATIC_ASSERT(OFFSET_TABLEBITS == 8 && OFFSET_ENOUGH == 402);
-
- return build_decode_table(d->offset_decode_table,
- d->u.l.lens + num_litlen_syms,
- num_offset_syms,
- offset_decode_results,
- OFFSET_TABLEBITS,
- DEFLATE_MAX_OFFSET_CODEWORD_LEN,
- d->sorted_syms);
-}
-
-static forceinline machine_word_t
-repeat_byte(u8 b)
-{
- machine_word_t v;
-
- STATIC_ASSERT(WORDBITS == 32 || WORDBITS == 64);
-
- v = b;
- v |= v << 8;
- v |= v << 16;
- v |= v << ((WORDBITS == 64) ? 32 : 0);
- return v;
-}
-
-static forceinline void
-copy_word_unaligned(const void *src, void *dst)
-{
- store_word_unaligned(load_word_unaligned(src), dst);
-}
-
-/*****************************************************************************
- * Main decompression routine
- *****************************************************************************/
-
-typedef enum libdeflate_result (*decompress_func_t)
- (struct libdeflate_decompressor * restrict d,
- const void * restrict in, size_t in_nbytes,
- void * restrict out, size_t out_nbytes_avail,
- size_t *actual_in_nbytes_ret, size_t *actual_out_nbytes_ret);
-
-#undef DEFAULT_IMPL
-#undef DISPATCH
-#if defined(__i386__) || defined(__x86_64__)
-# include "x86/decompress_impl.h"
-#endif
-
-#ifndef DEFAULT_IMPL
-# define FUNCNAME deflate_decompress_default
-# define ATTRIBUTES
-# include "decompress_template.h"
-# define DEFAULT_IMPL deflate_decompress_default
-#endif
-
-#ifdef DISPATCH
-static enum libdeflate_result
-dispatch(struct libdeflate_decompressor * restrict d,
- const void * restrict in, size_t in_nbytes,
- void * restrict out, size_t out_nbytes_avail,
- size_t *actual_in_nbytes_ret, size_t *actual_out_nbytes_ret);
-
-static volatile decompress_func_t decompress_impl = dispatch;
-
-/* Choose the fastest implementation at runtime */
-static enum libdeflate_result
-dispatch(struct libdeflate_decompressor * restrict d,
- const void * restrict in, size_t in_nbytes,
- void * restrict out, size_t out_nbytes_avail,
- size_t *actual_in_nbytes_ret, size_t *actual_out_nbytes_ret)
-{
- decompress_func_t f = arch_select_decompress_func();
-
- if (f == NULL)
- f = DEFAULT_IMPL;
-
- decompress_impl = f;
- return (*f)(d, in, in_nbytes, out, out_nbytes_avail,
- actual_in_nbytes_ret, actual_out_nbytes_ret);
-}
-#else
-# define decompress_impl DEFAULT_IMPL /* only one implementation, use it */
-#endif
-
-
-/*
- * This is the main DEFLATE decompression routine. See libdeflate.h for the
- * documentation.
- *
- * Note that the real code is in decompress_template.h. The part here just
- * handles calling the appropriate implementation depending on the CPU features
- * at runtime.
- */
-LIBDEFLATEEXPORT enum libdeflate_result LIBDEFLATEAPI
-libdeflate_deflate_decompress_ex(struct libdeflate_decompressor * restrict d,
- const void * restrict in, size_t in_nbytes,
- void * restrict out, size_t out_nbytes_avail,
- size_t *actual_in_nbytes_ret,
- size_t *actual_out_nbytes_ret)
-{
- return decompress_impl(d, in, in_nbytes, out, out_nbytes_avail,
- actual_in_nbytes_ret, actual_out_nbytes_ret);
-}
-
-LIBDEFLATEEXPORT enum libdeflate_result LIBDEFLATEAPI
-libdeflate_deflate_decompress(struct libdeflate_decompressor * restrict d,
- const void * restrict in, size_t in_nbytes,
- void * restrict out, size_t out_nbytes_avail,
- size_t *actual_out_nbytes_ret)
-{
- return libdeflate_deflate_decompress_ex(d, in, in_nbytes,
- out, out_nbytes_avail,
- NULL, actual_out_nbytes_ret);
-}
-
-LIBDEFLATEEXPORT struct libdeflate_decompressor * LIBDEFLATEAPI
-libdeflate_alloc_decompressor(void)
-{
- /*
- * Note that only certain parts of the decompressor actually must be
- * initialized here:
- *
- * - 'static_codes_loaded' must be initialized to false.
- *
- * - The first half of the main portion of each decode table must be
- * initialized to any value, to avoid reading from uninitialized
- * memory during table expansion in build_decode_table(). (Although,
- * this is really just to avoid warnings with dynamic tools like
- * valgrind, since build_decode_table() is guaranteed to initialize
- * all entries eventually anyway.)
- *
- * But for simplicity, we currently just zero the whole decompressor.
- */
- struct libdeflate_decompressor *d = libdeflate_malloc(sizeof(*d));
-
- if (d == NULL)
- return NULL;
- memset(d, 0, sizeof(*d));
- return d;
-}
-
-LIBDEFLATEEXPORT void LIBDEFLATEAPI
-libdeflate_free_decompressor(struct libdeflate_decompressor *d)
-{
- libdeflate_free(d);
-}
diff --git a/util/compress/libdeflate/lib/gzip_compress.c b/util/compress/libdeflate/lib/gzip_compress.c
deleted file mode 100644
index 3cb8803cf..000000000
--- a/util/compress/libdeflate/lib/gzip_compress.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * gzip_compress.c - compress with a gzip wrapper
- *
- * Originally public domain; changes after 2016-09-07 are copyrighted.
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "deflate_compress.h"
-#include "gzip_constants.h"
-#include "unaligned.h"
-
-#include "libdeflate.h"
-
-LIBDEFLATEEXPORT size_t LIBDEFLATEAPI
-libdeflate_gzip_compress(struct libdeflate_compressor *c,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail)
-{
- u8 *out_next = out;
- unsigned compression_level;
- u8 xfl;
- size_t deflate_size;
-
- if (out_nbytes_avail <= GZIP_MIN_OVERHEAD)
- return 0;
-
- /* ID1 */
- *out_next++ = GZIP_ID1;
- /* ID2 */
- *out_next++ = GZIP_ID2;
- /* CM */
- *out_next++ = GZIP_CM_DEFLATE;
- /* FLG */
- *out_next++ = 0;
- /* MTIME */
- put_unaligned_le32(GZIP_MTIME_UNAVAILABLE, out_next);
- out_next += 4;
- /* XFL */
- xfl = 0;
- compression_level = deflate_get_compression_level(c);
- if (compression_level < 2)
- xfl |= GZIP_XFL_FASTEST_COMPRESSION;
- else if (compression_level >= 8)
- xfl |= GZIP_XFL_SLOWEST_COMPRESSION;
- *out_next++ = xfl;
- /* OS */
- *out_next++ = GZIP_OS_UNKNOWN; /* OS */
-
- /* Compressed data */
- deflate_size = libdeflate_deflate_compress(c, in, in_nbytes, out_next,
- out_nbytes_avail - GZIP_MIN_OVERHEAD);
- if (deflate_size == 0)
- return 0;
- out_next += deflate_size;
-
- /* CRC32 */
- put_unaligned_le32(libdeflate_crc32(0, in, in_nbytes), out_next);
- out_next += 4;
-
- /* ISIZE */
- put_unaligned_le32((u32)in_nbytes, out_next);
- out_next += 4;
-
- return out_next - (u8 *)out;
-}
-
-LIBDEFLATEEXPORT size_t LIBDEFLATEAPI
-libdeflate_gzip_compress_bound(struct libdeflate_compressor *c,
- size_t in_nbytes)
-{
- return GZIP_MIN_OVERHEAD +
- libdeflate_deflate_compress_bound(c, in_nbytes);
-}
diff --git a/util/compress/libdeflate/lib/gzip_constants.h b/util/compress/libdeflate/lib/gzip_constants.h
deleted file mode 100644
index 35e4728d8..000000000
--- a/util/compress/libdeflate/lib/gzip_constants.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * gzip_constants.h - constants for the gzip wrapper format
- */
-
-#ifndef LIB_GZIP_CONSTANTS_H
-#define LIB_GZIP_CONSTANTS_H
-
-#define GZIP_MIN_HEADER_SIZE 10
-#define GZIP_FOOTER_SIZE 8
-#define GZIP_MIN_OVERHEAD (GZIP_MIN_HEADER_SIZE + GZIP_FOOTER_SIZE)
-
-#define GZIP_ID1 0x1F
-#define GZIP_ID2 0x8B
-
-#define GZIP_CM_DEFLATE 8
-
-#define GZIP_FTEXT 0x01
-#define GZIP_FHCRC 0x02
-#define GZIP_FEXTRA 0x04
-#define GZIP_FNAME 0x08
-#define GZIP_FCOMMENT 0x10
-#define GZIP_FRESERVED 0xE0
-
-#define GZIP_MTIME_UNAVAILABLE 0
-
-#define GZIP_XFL_SLOWEST_COMPRESSION 0x02
-#define GZIP_XFL_FASTEST_COMPRESSION 0x04
-
-#define GZIP_OS_FAT 0
-#define GZIP_OS_AMIGA 1
-#define GZIP_OS_VMS 2
-#define GZIP_OS_UNIX 3
-#define GZIP_OS_VM_CMS 4
-#define GZIP_OS_ATARI_TOS 5
-#define GZIP_OS_HPFS 6
-#define GZIP_OS_MACINTOSH 7
-#define GZIP_OS_Z_SYSTEM 8
-#define GZIP_OS_CP_M 9
-#define GZIP_OS_TOPS_20 10
-#define GZIP_OS_NTFS 11
-#define GZIP_OS_QDOS 12
-#define GZIP_OS_RISCOS 13
-#define GZIP_OS_UNKNOWN 255
-
-#endif /* LIB_GZIP_CONSTANTS_H */
diff --git a/util/compress/libdeflate/lib/gzip_decompress.c b/util/compress/libdeflate/lib/gzip_decompress.c
deleted file mode 100644
index 1b31d8a8e..000000000
--- a/util/compress/libdeflate/lib/gzip_decompress.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * gzip_decompress.c - decompress with a gzip wrapper
- *
- * Originally public domain; changes after 2016-09-07 are copyrighted.
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "gzip_constants.h"
-#include "unaligned.h"
-
-#include "libdeflate.h"
-
-LIBDEFLATEEXPORT enum libdeflate_result LIBDEFLATEAPI
-libdeflate_gzip_decompress_ex(struct libdeflate_decompressor *d,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail,
- size_t *actual_in_nbytes_ret,
- size_t *actual_out_nbytes_ret)
-{
- const u8 *in_next = in;
- const u8 * const in_end = in_next + in_nbytes;
- u8 flg;
- size_t actual_in_nbytes;
- size_t actual_out_nbytes;
- enum libdeflate_result result;
-
- if (in_nbytes < GZIP_MIN_OVERHEAD)
- return LIBDEFLATE_BAD_DATA;
-
- /* ID1 */
- if (*in_next++ != GZIP_ID1)
- return LIBDEFLATE_BAD_DATA;
- /* ID2 */
- if (*in_next++ != GZIP_ID2)
- return LIBDEFLATE_BAD_DATA;
- /* CM */
- if (*in_next++ != GZIP_CM_DEFLATE)
- return LIBDEFLATE_BAD_DATA;
- flg = *in_next++;
- /* MTIME */
- in_next += 4;
- /* XFL */
- in_next += 1;
- /* OS */
- in_next += 1;
-
- if (flg & GZIP_FRESERVED)
- return LIBDEFLATE_BAD_DATA;
-
- /* Extra field */
- if (flg & GZIP_FEXTRA) {
- u16 xlen = get_unaligned_le16(in_next);
- in_next += 2;
-
- if (in_end - in_next < (u32)xlen + GZIP_FOOTER_SIZE)
- return LIBDEFLATE_BAD_DATA;
-
- in_next += xlen;
- }
-
- /* Original file name (zero terminated) */
- if (flg & GZIP_FNAME) {
- while (*in_next++ != 0 && in_next != in_end)
- ;
- if (in_end - in_next < GZIP_FOOTER_SIZE)
- return LIBDEFLATE_BAD_DATA;
- }
-
- /* File comment (zero terminated) */
- if (flg & GZIP_FCOMMENT) {
- while (*in_next++ != 0 && in_next != in_end)
- ;
- if (in_end - in_next < GZIP_FOOTER_SIZE)
- return LIBDEFLATE_BAD_DATA;
- }
-
- /* CRC16 for gzip header */
- if (flg & GZIP_FHCRC) {
- in_next += 2;
- if (in_end - in_next < GZIP_FOOTER_SIZE)
- return LIBDEFLATE_BAD_DATA;
- }
-
- /* Compressed data */
- result = libdeflate_deflate_decompress_ex(d, in_next,
- in_end - GZIP_FOOTER_SIZE - in_next,
- out, out_nbytes_avail,
- &actual_in_nbytes,
- actual_out_nbytes_ret);
- if (result != LIBDEFLATE_SUCCESS)
- return result;
-
- if (actual_out_nbytes_ret)
- actual_out_nbytes = *actual_out_nbytes_ret;
- else
- actual_out_nbytes = out_nbytes_avail;
-
- in_next += actual_in_nbytes;
-
- /* CRC32 */
- if (libdeflate_crc32(0, out, actual_out_nbytes) !=
- get_unaligned_le32(in_next))
- return LIBDEFLATE_BAD_DATA;
- in_next += 4;
-
- /* ISIZE */
- if ((u32)actual_out_nbytes != get_unaligned_le32(in_next))
- return LIBDEFLATE_BAD_DATA;
- in_next += 4;
-
- if (actual_in_nbytes_ret)
- *actual_in_nbytes_ret = in_next - (u8 *)in;
-
- return LIBDEFLATE_SUCCESS;
-}
-
-LIBDEFLATEEXPORT enum libdeflate_result LIBDEFLATEAPI
-libdeflate_gzip_decompress(struct libdeflate_decompressor *d,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail,
- size_t *actual_out_nbytes_ret)
-{
- return libdeflate_gzip_decompress_ex(d, in, in_nbytes,
- out, out_nbytes_avail,
- NULL, actual_out_nbytes_ret);
-}
diff --git a/util/compress/libdeflate/lib/hc_matchfinder.h b/util/compress/libdeflate/lib/hc_matchfinder.h
deleted file mode 100644
index b81d32c6c..000000000
--- a/util/compress/libdeflate/lib/hc_matchfinder.h
+++ /dev/null
@@ -1,412 +0,0 @@
-/*
- * hc_matchfinder.h - Lempel-Ziv matchfinding with a hash table of linked lists
- *
- * Originally public domain; changes after 2016-09-07 are copyrighted.
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * ---------------------------------------------------------------------------
- *
- * Algorithm
- *
- * This is a Hash Chains (hc) based matchfinder.
- *
- * The main data structure is a hash table where each hash bucket contains a
- * linked list (or "chain") of sequences whose first 4 bytes share the same hash
- * code. Each sequence is identified by its starting position in the input
- * buffer.
- *
- * The algorithm processes the input buffer sequentially. At each byte
- * position, the hash code of the first 4 bytes of the sequence beginning at
- * that position (the sequence being matched against) is computed. This
- * identifies the hash bucket to use for that position. Then, this hash
- * bucket's linked list is searched for matches. Then, a new linked list node
- * is created to represent the current sequence and is prepended to the list.
- *
- * This algorithm has several useful properties:
- *
- * - It only finds true Lempel-Ziv matches; i.e., those where the matching
- * sequence occurs prior to the sequence being matched against.
- *
- * - The sequences in each linked list are always sorted by decreasing starting
- * position. Therefore, the closest (smallest offset) matches are found
- * first, which in many compression formats tend to be the cheapest to encode.
- *
- * - Although fast running time is not guaranteed due to the possibility of the
- * lists getting very long, the worst degenerate behavior can be easily
- * prevented by capping the number of nodes searched at each position.
- *
- * - If the compressor decides not to search for matches at a certain position,
- * then that position can be quickly inserted without searching the list.
- *
- * - The algorithm is adaptable to sliding windows: just store the positions
- * relative to a "base" value that is updated from time to time, and stop
- * searching each list when the sequences get too far away.
- *
- * ----------------------------------------------------------------------------
- *
- * Optimizations
- *
- * The main hash table and chains handle length 4+ matches. Length 3 matches
- * are handled by a separate hash table with no chains. This works well for
- * typical "greedy" or "lazy"-style compressors, where length 3 matches are
- * often only helpful if they have small offsets. Instead of searching a full
- * chain for length 3+ matches, the algorithm just checks for one close length 3
- * match, then focuses on finding length 4+ matches.
- *
- * The longest_match() and skip_positions() functions are inlined into the
- * compressors that use them. This isn't just about saving the overhead of a
- * function call. These functions are intended to be called from the inner
- * loops of compressors, where giving the compiler more control over register
- * allocation is very helpful. There is also significant benefit to be gained
- * from allowing the CPU to predict branches independently at each call site.
- * For example, "lazy"-style compressors can be written with two calls to
- * longest_match(), each of which starts with a different 'best_len' and
- * therefore has significantly different performance characteristics.
- *
- * Although any hash function can be used, a multiplicative hash is fast and
- * works well.
- *
- * On some processors, it is significantly faster to extend matches by whole
- * words (32 or 64 bits) instead of by individual bytes. For this to be the
- * case, the processor must implement unaligned memory accesses efficiently and
- * must have either a fast "find first set bit" instruction or a fast "find last
- * set bit" instruction, depending on the processor's endianness.
- *
- * The code uses one loop for finding the first match and one loop for finding a
- * longer match. Each of these loops is tuned for its respective task and in
- * combination are faster than a single generalized loop that handles both
- * tasks.
- *
- * The code also uses a tight inner loop that only compares the last and first
- * bytes of a potential match. It is only when these bytes match that a full
- * match extension is attempted.
- *
- * ----------------------------------------------------------------------------
- */
-
-#ifndef LIB_HC_MATCHFINDER_H
-#define LIB_HC_MATCHFINDER_H
-
-#include "matchfinder_common.h"
-
-#define HC_MATCHFINDER_HASH3_ORDER 15
-#define HC_MATCHFINDER_HASH4_ORDER 16
-
-#define HC_MATCHFINDER_TOTAL_HASH_SIZE \
- (((1UL << HC_MATCHFINDER_HASH3_ORDER) + \
- (1UL << HC_MATCHFINDER_HASH4_ORDER)) * sizeof(mf_pos_t))
-
-struct hc_matchfinder {
-
- /* The hash table for finding length 3 matches */
- mf_pos_t hash3_tab[1UL << HC_MATCHFINDER_HASH3_ORDER];
-
- /* The hash table which contains the first nodes of the linked lists for
- * finding length 4+ matches */
- mf_pos_t hash4_tab[1UL << HC_MATCHFINDER_HASH4_ORDER];
-
- /* The "next node" references for the linked lists. The "next node" of
- * the node for the sequence with position 'pos' is 'next_tab[pos]'. */
- mf_pos_t next_tab[MATCHFINDER_WINDOW_SIZE];
-
-}
-#ifdef _aligned_attribute
- _aligned_attribute(MATCHFINDER_MEM_ALIGNMENT)
-#endif
-;
-
-/* Prepare the matchfinder for a new input buffer. */
-static forceinline void
-hc_matchfinder_init(struct hc_matchfinder *mf)
-{
- STATIC_ASSERT(HC_MATCHFINDER_TOTAL_HASH_SIZE %
- MATCHFINDER_SIZE_ALIGNMENT == 0);
-
- matchfinder_init((mf_pos_t *)mf, HC_MATCHFINDER_TOTAL_HASH_SIZE);
-}
-
-static forceinline void
-hc_matchfinder_slide_window(struct hc_matchfinder *mf)
-{
- STATIC_ASSERT(sizeof(*mf) % MATCHFINDER_SIZE_ALIGNMENT == 0);
-
- matchfinder_rebase((mf_pos_t *)mf, sizeof(*mf));
-}
-
-/*
- * Find the longest match longer than 'best_len' bytes.
- *
- * @mf
- * The matchfinder structure.
- * @in_base_p
- * Location of a pointer which points to the place in the input data the
- * matchfinder currently stores positions relative to. This may be updated
- * by this function.
- * @cur_pos
- * The current position in the input buffer relative to @in_base (the
- * position of the sequence being matched against).
- * @best_len
- * Require a match longer than this length.
- * @max_len
- * The maximum permissible match length at this position.
- * @nice_len
- * Stop searching if a match of at least this length is found.
- * Must be <= @max_len.
- * @max_search_depth
- * Limit on the number of potential matches to consider. Must be >= 1.
- * @next_hashes
- * The precomputed hash codes for the sequence beginning at @in_next.
- * These will be used and then updated with the precomputed hashcodes for
- * the sequence beginning at @in_next + 1.
- * @offset_ret
- * If a match is found, its offset is returned in this location.
- *
- * Return the length of the match found, or 'best_len' if no match longer than
- * 'best_len' was found.
- */
-static forceinline u32
-hc_matchfinder_longest_match(struct hc_matchfinder * const restrict mf,
- const u8 ** const restrict in_base_p,
- const u8 * const restrict in_next,
- u32 best_len,
- const u32 max_len,
- const u32 nice_len,
- const u32 max_search_depth,
- u32 * const restrict next_hashes,
- u32 * const restrict offset_ret)
-{
- u32 depth_remaining = max_search_depth;
- const u8 *best_matchptr = in_next;
- mf_pos_t cur_node3, cur_node4;
- u32 hash3, hash4;
- u32 next_hashseq;
- u32 seq4;
- const u8 *matchptr;
- u32 len;
- u32 cur_pos = in_next - *in_base_p;
- const u8 *in_base;
- mf_pos_t cutoff;
-
- if (cur_pos == MATCHFINDER_WINDOW_SIZE) {
- hc_matchfinder_slide_window(mf);
- *in_base_p += MATCHFINDER_WINDOW_SIZE;
- cur_pos = 0;
- }
-
- in_base = *in_base_p;
- cutoff = cur_pos - MATCHFINDER_WINDOW_SIZE;
-
- if (unlikely(max_len < 5)) /* can we read 4 bytes from 'in_next + 1'? */
- goto out;
-
- /* Get the precomputed hash codes. */
- hash3 = next_hashes[0];
- hash4 = next_hashes[1];
-
- /* From the hash buckets, get the first node of each linked list. */
- cur_node3 = mf->hash3_tab[hash3];
- cur_node4 = mf->hash4_tab[hash4];
-
- /* Update for length 3 matches. This replaces the singleton node in the
- * 'hash3' bucket with the node for the current sequence. */
- mf->hash3_tab[hash3] = cur_pos;
-
- /* Update for length 4 matches. This prepends the node for the current
- * sequence to the linked list in the 'hash4' bucket. */
- mf->hash4_tab[hash4] = cur_pos;
- mf->next_tab[cur_pos] = cur_node4;
-
- /* Compute the next hash codes. */
- next_hashseq = get_unaligned_le32(in_next + 1);
- next_hashes[0] = lz_hash(next_hashseq & 0xFFFFFF, HC_MATCHFINDER_HASH3_ORDER);
- next_hashes[1] = lz_hash(next_hashseq, HC_MATCHFINDER_HASH4_ORDER);
- prefetchw(&mf->hash3_tab[next_hashes[0]]);
- prefetchw(&mf->hash4_tab[next_hashes[1]]);
-
- if (best_len < 4) { /* No match of length >= 4 found yet? */
-
- /* Check for a length 3 match if needed. */
-
- if (cur_node3 <= cutoff)
- goto out;
-
- seq4 = load_u32_unaligned(in_next);
-
- if (best_len < 3) {
- matchptr = &in_base[cur_node3];
- if (load_u24_unaligned(matchptr) == loaded_u32_to_u24(seq4)) {
- best_len = 3;
- best_matchptr = matchptr;
- }
- }
-
- /* Check for a length 4 match. */
-
- if (cur_node4 <= cutoff)
- goto out;
-
- for (;;) {
- /* No length 4 match found yet. Check the first 4 bytes. */
- matchptr = &in_base[cur_node4];
-
- if (load_u32_unaligned(matchptr) == seq4)
- break;
-
- /* The first 4 bytes did not match. Keep trying. */
- cur_node4 = mf->next_tab[cur_node4 & (MATCHFINDER_WINDOW_SIZE - 1)];
- if (cur_node4 <= cutoff || !--depth_remaining)
- goto out;
- }
-
- /* Found a match of length >= 4. Extend it to its full length. */
- best_matchptr = matchptr;
- best_len = lz_extend(in_next, best_matchptr, 4, max_len);
- if (best_len >= nice_len)
- goto out;
- cur_node4 = mf->next_tab[cur_node4 & (MATCHFINDER_WINDOW_SIZE - 1)];
- if (cur_node4 <= cutoff || !--depth_remaining)
- goto out;
- } else {
- if (cur_node4 <= cutoff || best_len >= nice_len)
- goto out;
- }
-
- /* Check for matches of length >= 5. */
-
- for (;;) {
- for (;;) {
- matchptr = &in_base[cur_node4];
-
- /* Already found a length 4 match. Try for a longer
- * match; start by checking either the last 4 bytes and
- * the first 4 bytes, or the last byte. (The last byte,
- * the one which would extend the match length by 1, is
- * the most important.) */
- #if UNALIGNED_ACCESS_IS_FAST
- if ((load_u32_unaligned(matchptr + best_len - 3) ==
- load_u32_unaligned(in_next + best_len - 3)) &&
- (load_u32_unaligned(matchptr) ==
- load_u32_unaligned(in_next)))
- #else
- if (matchptr[best_len] == in_next[best_len])
- #endif
- break;
-
- /* Continue to the next node in the list. */
- cur_node4 = mf->next_tab[cur_node4 & (MATCHFINDER_WINDOW_SIZE - 1)];
- if (cur_node4 <= cutoff || !--depth_remaining)
- goto out;
- }
-
- #if UNALIGNED_ACCESS_IS_FAST
- len = 4;
- #else
- len = 0;
- #endif
- len = lz_extend(in_next, matchptr, len, max_len);
- if (len > best_len) {
- /* This is the new longest match. */
- best_len = len;
- best_matchptr = matchptr;
- if (best_len >= nice_len)
- goto out;
- }
-
- /* Continue to the next node in the list. */
- cur_node4 = mf->next_tab[cur_node4 & (MATCHFINDER_WINDOW_SIZE - 1)];
- if (cur_node4 <= cutoff || !--depth_remaining)
- goto out;
- }
-out:
- *offset_ret = in_next - best_matchptr;
- return best_len;
-}
-
-/*
- * Advance the matchfinder, but don't search for matches.
- *
- * @mf
- * The matchfinder structure.
- * @in_base_p
- * Location of a pointer which points to the place in the input data the
- * matchfinder currently stores positions relative to. This may be updated
- * by this function.
- * @cur_pos
- * The current position in the input buffer relative to @in_base.
- * @end_pos
- * The end position of the input buffer, relative to @in_base.
- * @next_hashes
- * The precomputed hash codes for the sequence beginning at @in_next.
- * These will be used and then updated with the precomputed hashcodes for
- * the sequence beginning at @in_next + @count.
- * @count
- * The number of bytes to advance. Must be > 0.
- *
- * Returns @in_next + @count.
- */
-static forceinline const u8 *
-hc_matchfinder_skip_positions(struct hc_matchfinder * const restrict mf,
- const u8 ** const restrict in_base_p,
- const u8 *in_next,
- const u8 * const in_end,
- const u32 count,
- u32 * const restrict next_hashes)
-{
- u32 cur_pos;
- u32 hash3, hash4;
- u32 next_hashseq;
- u32 remaining = count;
-
- if (unlikely(count + 5 > in_end - in_next))
- return &in_next[count];
-
- cur_pos = in_next - *in_base_p;
- hash3 = next_hashes[0];
- hash4 = next_hashes[1];
- do {
- if (cur_pos == MATCHFINDER_WINDOW_SIZE) {
- hc_matchfinder_slide_window(mf);
- *in_base_p += MATCHFINDER_WINDOW_SIZE;
- cur_pos = 0;
- }
- mf->hash3_tab[hash3] = cur_pos;
- mf->next_tab[cur_pos] = mf->hash4_tab[hash4];
- mf->hash4_tab[hash4] = cur_pos;
-
- next_hashseq = get_unaligned_le32(++in_next);
- hash3 = lz_hash(next_hashseq & 0xFFFFFF, HC_MATCHFINDER_HASH3_ORDER);
- hash4 = lz_hash(next_hashseq, HC_MATCHFINDER_HASH4_ORDER);
- cur_pos++;
- } while (--remaining);
-
- prefetchw(&mf->hash3_tab[hash3]);
- prefetchw(&mf->hash4_tab[hash4]);
- next_hashes[0] = hash3;
- next_hashes[1] = hash4;
-
- return in_next;
-}
-
-#endif /* LIB_HC_MATCHFINDER_H */
diff --git a/util/compress/libdeflate/lib/lib_common.h b/util/compress/libdeflate/lib/lib_common.h
deleted file mode 100644
index 2eea56c72..000000000
--- a/util/compress/libdeflate/lib/lib_common.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * lib_common.h - internal header included by all library code
- */
-
-#ifndef LIB_LIB_COMMON_H
-#define LIB_LIB_COMMON_H
-
-#ifdef LIBDEFLATE_H
-# error "lib_common.h must always be included before libdeflate.h"
- /* because BUILDING_LIBDEFLATE must be set first */
-#endif
-
-#define BUILDING_LIBDEFLATE
-
-#include "../common/common_defs.h"
-
-/*
- * Prefix with "_libdeflate_" all global symbols which are not part of the API
- * and don't already have a "libdeflate" prefix. This avoids exposing overly
- * generic names when libdeflate is built as a static library.
- *
- * Note that the chosen prefix is not really important and can be changed
- * without breaking library users. It was just chosen so that the resulting
- * symbol names are unlikely to conflict with those from any other software.
- * Also note that this fixup has no useful effect when libdeflate is built as a
- * shared library, since these symbols are not exported.
- */
-#define SYM_FIXUP(sym) _libdeflate_##sym
-#define deflate_get_compression_level SYM_FIXUP(deflate_get_compression_level)
-#define _cpu_features SYM_FIXUP(_cpu_features)
-#define setup_cpu_features SYM_FIXUP(setup_cpu_features)
-
-void *libdeflate_malloc(size_t size);
-void libdeflate_free(void *ptr);
-
-void *libdeflate_aligned_malloc(size_t alignment, size_t size);
-void libdeflate_aligned_free(void *ptr);
-
-#ifdef FREESTANDING
-/*
- * With -ffreestanding, <string.h> may be missing, and we must provide
- * implementations of memset(), memcpy(), memmove(), and memcmp().
- * See https://gcc.gnu.org/onlinedocs/gcc/Standards.html
- *
- * Also, -ffreestanding disables interpreting calls to these functions as
- * built-ins. E.g., calling memcpy(&v, p, WORDBYTES) will make a function call,
- * not be optimized to a single load instruction. For performance reasons we
- * don't want that. So, declare these functions as macros that expand to the
- * corresponding built-ins. This approach is recommended in the gcc man page.
- * We still need the actual function definitions in case gcc calls them.
- */
-void *memset(void *s, int c, size_t n);
-#define memset(s, c, n) __builtin_memset((s), (c), (n))
-
-void *memcpy(void *dest, const void *src, size_t n);
-#define memcpy(dest, src, n) __builtin_memcpy((dest), (src), (n))
-
-void *memmove(void *dest, const void *src, size_t n);
-#define memmove(dest, src, n) __builtin_memmove((dest), (src), (n))
-
-int memcmp(const void *s1, const void *s2, size_t n);
-#define memcmp(s1, s2, n) __builtin_memcmp((s1), (s2), (n))
-#else
-#include <string.h>
-#endif
-
-#endif /* LIB_LIB_COMMON_H */
diff --git a/util/compress/libdeflate/lib/matchfinder_common.h b/util/compress/libdeflate/lib/matchfinder_common.h
deleted file mode 100644
index 49ff3343e..000000000
--- a/util/compress/libdeflate/lib/matchfinder_common.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * matchfinder_common.h - common code for Lempel-Ziv matchfinding
- */
-
-#ifndef LIB_MATCHFINDER_COMMON_H
-#define LIB_MATCHFINDER_COMMON_H
-
-#include "lib_common.h"
-#include "unaligned.h"
-
-#ifndef MATCHFINDER_WINDOW_ORDER
-# error "MATCHFINDER_WINDOW_ORDER must be defined!"
-#endif
-
-#define MATCHFINDER_WINDOW_SIZE (1UL << MATCHFINDER_WINDOW_ORDER)
-
-typedef s16 mf_pos_t;
-
-#define MATCHFINDER_INITVAL ((mf_pos_t)-MATCHFINDER_WINDOW_SIZE)
-
-/*
- * Required alignment of the matchfinder buffer pointer and size. The values
- * here come from the AVX-2 implementation, which is the worst case.
- */
-#define MATCHFINDER_MEM_ALIGNMENT 32
-#define MATCHFINDER_SIZE_ALIGNMENT 128
-
-#undef matchfinder_init
-#undef matchfinder_rebase
-#ifdef _aligned_attribute
-# if defined(__arm__) || defined(__aarch64__)
-# include "arm/matchfinder_impl.h"
-# elif defined(__i386__) || defined(__x86_64__)
-# include "x86/matchfinder_impl.h"
-# endif
-#endif
-
-/*
- * Initialize the hash table portion of the matchfinder.
- *
- * Essentially, this is an optimized memset().
- *
- * 'data' must be aligned to a MATCHFINDER_MEM_ALIGNMENT boundary, and
- * 'size' must be a multiple of MATCHFINDER_SIZE_ALIGNMENT.
- */
-#ifndef matchfinder_init
-static forceinline void
-matchfinder_init(mf_pos_t *data, size_t size)
-{
- size_t num_entries = size / sizeof(*data);
- size_t i;
-
- for (i = 0; i < num_entries; i++)
- data[i] = MATCHFINDER_INITVAL;
-}
-#endif
-
-/*
- * Slide the matchfinder by WINDOW_SIZE bytes.
- *
- * This must be called just after each WINDOW_SIZE bytes have been run through
- * the matchfinder.
- *
- * This will subtract WINDOW_SIZE bytes from each entry in the array specified.
- * The effect is that all entries are updated to be relative to the current
- * position, rather than the position WINDOW_SIZE bytes prior.
- *
- * Underflow is detected and replaced with signed saturation. This ensures that
- * once the sliding window has passed over a position, that position forever
- * remains out of bounds.
- *
- * The array passed in must contain all matchfinder data that is
- * position-relative. Concretely, this will include the hash table as well as
- * the table of positions that is used to link together the sequences in each
- * hash bucket. Note that in the latter table, the links are 1-ary in the case
- * of "hash chains", and 2-ary in the case of "binary trees". In either case,
- * the links need to be rebased in the same way.
- *
- * 'data' must be aligned to a MATCHFINDER_MEM_ALIGNMENT boundary, and
- * 'size' must be a multiple of MATCHFINDER_SIZE_ALIGNMENT.
- */
-#ifndef matchfinder_rebase
-static forceinline void
-matchfinder_rebase(mf_pos_t *data, size_t size)
-{
- size_t num_entries = size / sizeof(*data);
- size_t i;
-
- if (MATCHFINDER_WINDOW_SIZE == 32768) {
- /* Branchless version for 32768 byte windows. If the value was
- * already negative, clear all bits except the sign bit; this
- * changes the value to -32768. Otherwise, set the sign bit;
- * this is equivalent to subtracting 32768. */
- for (i = 0; i < num_entries; i++) {
- u16 v = data[i];
- u16 sign_bit = v & 0x8000;
- v &= sign_bit - ((sign_bit >> 15) ^ 1);
- v |= 0x8000;
- data[i] = v;
- }
- return;
- }
-
- for (i = 0; i < num_entries; i++) {
- if (data[i] >= 0)
- data[i] -= (mf_pos_t)-MATCHFINDER_WINDOW_SIZE;
- else
- data[i] = (mf_pos_t)-MATCHFINDER_WINDOW_SIZE;
- }
-}
-#endif
-
-/*
- * The hash function: given a sequence prefix held in the low-order bits of a
- * 32-bit value, multiply by a carefully-chosen large constant. Discard any
- * bits of the product that don't fit in a 32-bit value, but take the
- * next-highest @num_bits bits of the product as the hash value, as those have
- * the most randomness.
- */
-static forceinline u32
-lz_hash(u32 seq, unsigned num_bits)
-{
- return (u32)(seq * 0x1E35A7BD) >> (32 - num_bits);
-}
-
-/*
- * Return the number of bytes at @matchptr that match the bytes at @strptr, up
- * to a maximum of @max_len. Initially, @start_len bytes are matched.
- */
-static forceinline unsigned
-lz_extend(const u8 * const strptr, const u8 * const matchptr,
- const unsigned start_len, const unsigned max_len)
-{
- unsigned len = start_len;
- machine_word_t v_word;
-
- if (UNALIGNED_ACCESS_IS_FAST) {
-
- if (likely(max_len - len >= 4 * WORDBYTES)) {
-
- #define COMPARE_WORD_STEP \
- v_word = load_word_unaligned(&matchptr[len]) ^ \
- load_word_unaligned(&strptr[len]); \
- if (v_word != 0) \
- goto word_differs; \
- len += WORDBYTES; \
-
- COMPARE_WORD_STEP
- COMPARE_WORD_STEP
- COMPARE_WORD_STEP
- COMPARE_WORD_STEP
- #undef COMPARE_WORD_STEP
- }
-
- while (len + WORDBYTES <= max_len) {
- v_word = load_word_unaligned(&matchptr[len]) ^
- load_word_unaligned(&strptr[len]);
- if (v_word != 0)
- goto word_differs;
- len += WORDBYTES;
- }
- }
-
- while (len < max_len && matchptr[len] == strptr[len])
- len++;
- return len;
-
-word_differs:
- if (CPU_IS_LITTLE_ENDIAN())
- len += (bsfw(v_word) >> 3);
- else
- len += (WORDBITS - 1 - bsrw(v_word)) >> 3;
- return len;
-}
-
-#endif /* LIB_MATCHFINDER_COMMON_H */
diff --git a/util/compress/libdeflate/lib/unaligned.h b/util/compress/libdeflate/lib/unaligned.h
deleted file mode 100644
index bb48bf828..000000000
--- a/util/compress/libdeflate/lib/unaligned.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * unaligned.h - inline functions for unaligned memory accesses
- */
-
-#ifndef LIB_UNALIGNED_H
-#define LIB_UNALIGNED_H
-
-#include "lib_common.h"
-
-/***** Unaligned loads and stores without endianness conversion *****/
-
-/*
- * memcpy() is portable, and it usually gets optimized appropriately by modern
- * compilers. I.e., each memcpy() of 1, 2, 4, or WORDBYTES bytes gets compiled
- * to a load or store instruction, not to an actual function call.
- *
- * We no longer use the "packed struct" approach, as that is nonstandard, has
- * unclear semantics, and doesn't receive enough testing
- * (see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94994).
- *
- * arm32 with __ARM_FEATURE_UNALIGNED in gcc 5 and earlier is a known exception
- * where memcpy() generates inefficient code
- * (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67366). However, we no longer
- * consider that one case important enough to maintain different code for.
- * If you run into it, please just use a newer version of gcc (or use clang).
- */
-
-#define DEFINE_UNALIGNED_TYPE(type) \
-static forceinline type \
-load_##type##_unaligned(const void *p) \
-{ \
- type v; \
- memcpy(&v, p, sizeof(v)); \
- return v; \
-} \
- \
-static forceinline void \
-store_##type##_unaligned(type v, void *p) \
-{ \
- memcpy(p, &v, sizeof(v)); \
-}
-
-DEFINE_UNALIGNED_TYPE(u16)
-DEFINE_UNALIGNED_TYPE(u32)
-DEFINE_UNALIGNED_TYPE(u64)
-DEFINE_UNALIGNED_TYPE(machine_word_t)
-
-#define load_word_unaligned load_machine_word_t_unaligned
-#define store_word_unaligned store_machine_word_t_unaligned
-
-/***** Unaligned loads with endianness conversion *****/
-
-static forceinline u16
-get_unaligned_le16(const u8 *p)
-{
- if (UNALIGNED_ACCESS_IS_FAST)
- return le16_bswap(load_u16_unaligned(p));
- else
- return ((u16)p[1] << 8) | p[0];
-}
-
-static forceinline u16
-get_unaligned_be16(const u8 *p)
-{
- if (UNALIGNED_ACCESS_IS_FAST)
- return be16_bswap(load_u16_unaligned(p));
- else
- return ((u16)p[0] << 8) | p[1];
-}
-
-static forceinline u32
-get_unaligned_le32(const u8 *p)
-{
- if (UNALIGNED_ACCESS_IS_FAST)
- return le32_bswap(load_u32_unaligned(p));
- else
- return ((u32)p[3] << 24) | ((u32)p[2] << 16) |
- ((u32)p[1] << 8) | p[0];
-}
-
-static forceinline u32
-get_unaligned_be32(const u8 *p)
-{
- if (UNALIGNED_ACCESS_IS_FAST)
- return be32_bswap(load_u32_unaligned(p));
- else
- return ((u32)p[0] << 24) | ((u32)p[1] << 16) |
- ((u32)p[2] << 8) | p[3];
-}
-
-static forceinline u64
-get_unaligned_le64(const u8 *p)
-{
- if (UNALIGNED_ACCESS_IS_FAST)
- return le64_bswap(load_u64_unaligned(p));
- else
- return ((u64)p[7] << 56) | ((u64)p[6] << 48) |
- ((u64)p[5] << 40) | ((u64)p[4] << 32) |
- ((u64)p[3] << 24) | ((u64)p[2] << 16) |
- ((u64)p[1] << 8) | p[0];
-}
-
-static forceinline machine_word_t
-get_unaligned_leword(const u8 *p)
-{
- STATIC_ASSERT(WORDBITS == 32 || WORDBITS == 64);
- if (WORDBITS == 32)
- return get_unaligned_le32(p);
- else
- return get_unaligned_le64(p);
-}
-
-/***** Unaligned stores with endianness conversion *****/
-
-static forceinline void
-put_unaligned_le16(u16 v, u8 *p)
-{
- if (UNALIGNED_ACCESS_IS_FAST) {
- store_u16_unaligned(le16_bswap(v), p);
- } else {
- p[0] = (u8)(v >> 0);
- p[1] = (u8)(v >> 8);
- }
-}
-
-static forceinline void
-put_unaligned_be16(u16 v, u8 *p)
-{
- if (UNALIGNED_ACCESS_IS_FAST) {
- store_u16_unaligned(be16_bswap(v), p);
- } else {
- p[0] = (u8)(v >> 8);
- p[1] = (u8)(v >> 0);
- }
-}
-
-static forceinline void
-put_unaligned_le32(u32 v, u8 *p)
-{
- if (UNALIGNED_ACCESS_IS_FAST) {
- store_u32_unaligned(le32_bswap(v), p);
- } else {
- p[0] = (u8)(v >> 0);
- p[1] = (u8)(v >> 8);
- p[2] = (u8)(v >> 16);
- p[3] = (u8)(v >> 24);
- }
-}
-
-static forceinline void
-put_unaligned_be32(u32 v, u8 *p)
-{
- if (UNALIGNED_ACCESS_IS_FAST) {
- store_u32_unaligned(be32_bswap(v), p);
- } else {
- p[0] = (u8)(v >> 24);
- p[1] = (u8)(v >> 16);
- p[2] = (u8)(v >> 8);
- p[3] = (u8)(v >> 0);
- }
-}
-
-static forceinline void
-put_unaligned_le64(u64 v, u8 *p)
-{
- if (UNALIGNED_ACCESS_IS_FAST) {
- store_u64_unaligned(le64_bswap(v), p);
- } else {
- p[0] = (u8)(v >> 0);
- p[1] = (u8)(v >> 8);
- p[2] = (u8)(v >> 16);
- p[3] = (u8)(v >> 24);
- p[4] = (u8)(v >> 32);
- p[5] = (u8)(v >> 40);
- p[6] = (u8)(v >> 48);
- p[7] = (u8)(v >> 56);
- }
-}
-
-static forceinline void
-put_unaligned_leword(machine_word_t v, u8 *p)
-{
- STATIC_ASSERT(WORDBITS == 32 || WORDBITS == 64);
- if (WORDBITS == 32)
- put_unaligned_le32(v, p);
- else
- put_unaligned_le64(v, p);
-}
-
-/***** 24-bit loads *****/
-
-/*
- * Given a 32-bit value that was loaded with the platform's native endianness,
- * return a 32-bit value whose high-order 8 bits are 0 and whose low-order 24
- * bits contain the first 3 bytes, arranged in octets in a platform-dependent
- * order, at the memory location from which the input 32-bit value was loaded.
- */
-static forceinline u32
-loaded_u32_to_u24(u32 v)
-{
- if (CPU_IS_LITTLE_ENDIAN())
- return v & 0xFFFFFF;
- else
- return v >> 8;
-}
-
-/*
- * Load the next 3 bytes from the memory location @p into the 24 low-order bits
- * of a 32-bit value. The order in which the 3 bytes will be arranged as octets
- * in the 24 bits is platform-dependent. At least LOAD_U24_REQUIRED_NBYTES
- * bytes must be available at @p; note that this may be more than 3.
- */
-static forceinline u32
-load_u24_unaligned(const u8 *p)
-{
-#if UNALIGNED_ACCESS_IS_FAST
-# define LOAD_U24_REQUIRED_NBYTES 4
- return loaded_u32_to_u24(load_u32_unaligned(p));
-#else
-# define LOAD_U24_REQUIRED_NBYTES 3
- if (CPU_IS_LITTLE_ENDIAN())
- return ((u32)p[0] << 0) | ((u32)p[1] << 8) | ((u32)p[2] << 16);
- else
- return ((u32)p[2] << 0) | ((u32)p[1] << 8) | ((u32)p[0] << 16);
-#endif
-}
-
-#endif /* LIB_UNALIGNED_H */
diff --git a/util/compress/libdeflate/lib/utils.c b/util/compress/libdeflate/lib/utils.c
deleted file mode 100644
index c626af1f2..000000000
--- a/util/compress/libdeflate/lib/utils.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * utils.c - utility functions for libdeflate
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "lib_common.h"
-
-#include "libdeflate.h"
-
-#ifdef FREESTANDING
-# define malloc NULL
-# define free NULL
-#else
-# include <stdlib.h>
-#endif
-
-static void *(*libdeflate_malloc_func)(size_t) = malloc;
-static void (*libdeflate_free_func)(void *) = free;
-
-void *
-libdeflate_malloc(size_t size)
-{
- return (*libdeflate_malloc_func)(size);
-}
-
-void
-libdeflate_free(void *ptr)
-{
- (*libdeflate_free_func)(ptr);
-}
-
-void *
-libdeflate_aligned_malloc(size_t alignment, size_t size)
-{
- void *ptr = libdeflate_malloc(sizeof(void *) + alignment - 1 + size);
- if (ptr) {
- void *orig_ptr = ptr;
- ptr = (void *)ALIGN((uintptr_t)ptr + sizeof(void *), alignment);
- ((void **)ptr)[-1] = orig_ptr;
- }
- return ptr;
-}
-
-void
-libdeflate_aligned_free(void *ptr)
-{
- if (ptr)
- libdeflate_free(((void **)ptr)[-1]);
-}
-
-LIBDEFLATEEXPORT void LIBDEFLATEAPI
-libdeflate_set_memory_allocator(void *(*malloc_func)(size_t),
- void (*free_func)(void *))
-{
- libdeflate_malloc_func = malloc_func;
- libdeflate_free_func = free_func;
-}
-
-/*
- * Implementations of libc functions for freestanding library builds.
- * Normal library builds don't use these. Not optimized yet; usually the
- * compiler expands these functions and doesn't actually call them anyway.
- */
-#ifdef FREESTANDING
-#undef memset
-void * __attribute__((weak))
-memset(void *s, int c, size_t n)
-{
- u8 *p = s;
- size_t i;
-
- for (i = 0; i < n; i++)
- p[i] = c;
- return s;
-}
-
-#undef memcpy
-void * __attribute__((weak))
-memcpy(void *dest, const void *src, size_t n)
-{
- u8 *d = dest;
- const u8 *s = src;
- size_t i;
-
- for (i = 0; i < n; i++)
- d[i] = s[i];
- return dest;
-}
-
-#undef memmove
-void * __attribute__((weak))
-memmove(void *dest, const void *src, size_t n)
-{
- u8 *d = dest;
- const u8 *s = src;
- size_t i;
-
- if (d <= s)
- return memcpy(d, s, n);
-
- for (i = n; i > 0; i--)
- d[i - 1] = s[i - 1];
- return dest;
-}
-
-#undef memcmp
-int __attribute__((weak))
-memcmp(const void *s1, const void *s2, size_t n)
-{
- const u8 *p1 = s1;
- const u8 *p2 = s2;
- size_t i;
-
- for (i = 0; i < n; i++) {
- if (p1[i] != p2[i])
- return (int)p1[i] - (int)p2[i];
- }
- return 0;
-}
-#endif /* FREESTANDING */
diff --git a/util/compress/libdeflate/lib/x86/adler32_impl.h b/util/compress/libdeflate/lib/x86/adler32_impl.h
deleted file mode 100644
index f89bde585..000000000
--- a/util/compress/libdeflate/lib/x86/adler32_impl.h
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- * x86/adler32_impl.h - x86 implementations of Adler-32 checksum algorithm
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef LIB_X86_ADLER32_IMPL_H
-#define LIB_X86_ADLER32_IMPL_H
-
-#include "cpu_features.h"
-
-/*
- * The following macros horizontally sum the s1 counters and add them to the
- * real s1, and likewise for s2. They do this via a series of reductions, each
- * of which halves the vector length, until just one counter remains.
- *
- * The s1 reductions don't depend on the s2 reductions and vice versa, so for
- * efficiency they are interleaved. Also, every other s1 counter is 0 due to
- * the 'psadbw' instruction (_mm_sad_epu8) summing groups of 8 bytes rather than
- * 4; hence, one of the s1 reductions is skipped when going from 128 => 32 bits.
- */
-
-#define ADLER32_FINISH_VEC_CHUNK_128(s1, s2, v_s1, v_s2) \
-{ \
- __v4su s1_last = (v_s1), s2_last = (v_s2); \
- \
- /* 128 => 32 bits */ \
- s2_last += (__v4su)_mm_shuffle_epi32((__m128i)s2_last, 0x31); \
- s1_last += (__v4su)_mm_shuffle_epi32((__m128i)s1_last, 0x02); \
- s2_last += (__v4su)_mm_shuffle_epi32((__m128i)s2_last, 0x02); \
- \
- *(s1) += (u32)_mm_cvtsi128_si32((__m128i)s1_last); \
- *(s2) += (u32)_mm_cvtsi128_si32((__m128i)s2_last); \
-}
-
-#define ADLER32_FINISH_VEC_CHUNK_256(s1, s2, v_s1, v_s2) \
-{ \
- __v4su s1_128bit, s2_128bit; \
- \
- /* 256 => 128 bits */ \
- s1_128bit = (__v4su)_mm256_extracti128_si256((__m256i)(v_s1), 0) + \
- (__v4su)_mm256_extracti128_si256((__m256i)(v_s1), 1); \
- s2_128bit = (__v4su)_mm256_extracti128_si256((__m256i)(v_s2), 0) + \
- (__v4su)_mm256_extracti128_si256((__m256i)(v_s2), 1); \
- \
- ADLER32_FINISH_VEC_CHUNK_128((s1), (s2), s1_128bit, s2_128bit); \
-}
-
-#define ADLER32_FINISH_VEC_CHUNK_512(s1, s2, v_s1, v_s2) \
-{ \
- __v8su s1_256bit, s2_256bit; \
- \
- /* 512 => 256 bits */ \
- s1_256bit = (__v8su)_mm512_extracti64x4_epi64((__m512i)(v_s1), 0) + \
- (__v8su)_mm512_extracti64x4_epi64((__m512i)(v_s1), 1); \
- s2_256bit = (__v8su)_mm512_extracti64x4_epi64((__m512i)(v_s2), 0) + \
- (__v8su)_mm512_extracti64x4_epi64((__m512i)(v_s2), 1); \
- \
- ADLER32_FINISH_VEC_CHUNK_256((s1), (s2), s1_256bit, s2_256bit); \
-}
-
-/* AVX-512BW implementation: like the AVX2 one, but does 64 bytes at a time */
-#undef DISPATCH_AVX512BW
-#if !defined(DEFAULT_IMPL) && \
- /*
- * clang before v3.9 is missing some AVX-512BW intrinsics including
- * _mm512_sad_epu8(), a.k.a. __builtin_ia32_psadbw512. So just make using
- * AVX-512BW, even when __AVX512BW__ is defined, conditional on
- * COMPILER_SUPPORTS_AVX512BW_TARGET where we check for that builtin.
- */ \
- COMPILER_SUPPORTS_AVX512BW_TARGET && \
- (defined(__AVX512BW__) || (X86_CPU_FEATURES_ENABLED && \
- COMPILER_SUPPORTS_AVX512BW_TARGET_INTRINSICS))
-# define FUNCNAME adler32_avx512bw
-# define FUNCNAME_CHUNK adler32_avx512bw_chunk
-# define IMPL_ALIGNMENT 64
-# define IMPL_SEGMENT_SIZE 64
-# define IMPL_MAX_CHUNK_SIZE MAX_CHUNK_SIZE
-# ifdef __AVX512BW__
-# define ATTRIBUTES
-# define DEFAULT_IMPL adler32_avx512bw
-# else
-# define ATTRIBUTES __attribute__((target("avx512bw")))
-# define DISPATCH 1
-# define DISPATCH_AVX512BW 1
-# endif
-# include <immintrin.h>
-static forceinline ATTRIBUTES void
-adler32_avx512bw_chunk(const __m512i *p, const __m512i *const end,
- u32 *s1, u32 *s2)
-{
- const __m512i zeroes = _mm512_setzero_si512();
- const __v64qi multipliers = (__v64qi){
- 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49,
- 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33,
- 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17,
- 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1,
- };
- const __v32hi ones = (__v32hi)_mm512_set1_epi16(1);
- __v16si v_s1 = (__v16si)zeroes;
- __v16si v_s1_sums = (__v16si)zeroes;
- __v16si v_s2 = (__v16si)zeroes;
-
- do {
- /* Load the next 64-byte segment */
- __m512i bytes = *p++;
-
- /* Multiply the bytes by 64...1 (the number of times they need
- * to be added to s2) and add adjacent products */
- __v32hi sums = (__v32hi)_mm512_maddubs_epi16(
- bytes, (__m512i)multipliers);
-
- /* Keep sum of all previous s1 counters, for adding to s2 later.
- * This allows delaying the multiplication by 64 to the end. */
- v_s1_sums += v_s1;
-
- /* Add the sum of each group of 8 bytes to the corresponding s1
- * counter */
- v_s1 += (__v16si)_mm512_sad_epu8(bytes, zeroes);
-
- /* Add the sum of each group of 4 products of the bytes by
- * 64...1 to the corresponding s2 counter */
- v_s2 += (__v16si)_mm512_madd_epi16((__m512i)sums,
- (__m512i)ones);
- } while (p != end);
-
- /* Finish the s2 counters by adding the sum of the s1 values at the
- * beginning of each segment, multiplied by the segment size (64) */
- v_s2 += (__v16si)_mm512_slli_epi32((__m512i)v_s1_sums, 6);
-
- /* Add the counters to the real s1 and s2 */
- ADLER32_FINISH_VEC_CHUNK_512(s1, s2, v_s1, v_s2);
-}
-# include "../adler32_vec_template.h"
-#endif /* AVX-512BW implementation */
-
-/* AVX2 implementation: like the AVX-512BW one, but does 32 bytes at a time */
-#undef DISPATCH_AVX2
-#if !defined(DEFAULT_IMPL) && \
- (defined(__AVX2__) || (X86_CPU_FEATURES_ENABLED && \
- COMPILER_SUPPORTS_AVX2_TARGET_INTRINSICS))
-# define FUNCNAME adler32_avx2
-# define FUNCNAME_CHUNK adler32_avx2_chunk
-# define IMPL_ALIGNMENT 32
-# define IMPL_SEGMENT_SIZE 32
-# define IMPL_MAX_CHUNK_SIZE MAX_CHUNK_SIZE
-# ifdef __AVX2__
-# define ATTRIBUTES
-# define DEFAULT_IMPL adler32_avx2
-# else
-# define ATTRIBUTES __attribute__((target("avx2")))
-# define DISPATCH 1
-# define DISPATCH_AVX2 1
-# endif
-# include <immintrin.h>
-static forceinline ATTRIBUTES void
-adler32_avx2_chunk(const __m256i *p, const __m256i *const end, u32 *s1, u32 *s2)
-{
- const __m256i zeroes = _mm256_setzero_si256();
- const __v32qu multipliers = (__v32qu){
- 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17,
- 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1,
- };
- const __v16hu ones = (__v16hu)_mm256_set1_epi16(1);
- __v8su v_s1 = (__v8su)zeroes;
- __v8su v_s1_sums = (__v8su)zeroes;
- __v8su v_s2 = (__v8su)zeroes;
-
- do {
- /* Load the next 32-byte segment */
- __m256i bytes = *p++;
-
- /* Multiply the bytes by 32...1 (the number of times they need
- * to be added to s2) and add adjacent products */
- __v16hu sums = (__v16hu)_mm256_maddubs_epi16(
- bytes, (__m256i)multipliers);
-
- /* Keep sum of all previous s1 counters, for adding to s2 later.
- * This allows delaying the multiplication by 32 to the end. */
- v_s1_sums += v_s1;
-
- /* Add the sum of each group of 8 bytes to the corresponding s1
- * counter */
- v_s1 += (__v8su)_mm256_sad_epu8(bytes, zeroes);
-
- /* Add the sum of each group of 4 products of the bytes by
- * 32...1 to the corresponding s2 counter */
- v_s2 += (__v8su)_mm256_madd_epi16((__m256i)sums, (__m256i)ones);
- } while (p != end);
-
- /* Finish the s2 counters by adding the sum of the s1 values at the
- * beginning of each segment, multiplied by the segment size (32) */
- v_s2 += (__v8su)_mm256_slli_epi32((__m256i)v_s1_sums, 5);
-
- /* Add the counters to the real s1 and s2 */
- ADLER32_FINISH_VEC_CHUNK_256(s1, s2, v_s1, v_s2);
-}
-# include "../adler32_vec_template.h"
-#endif /* AVX2 implementation */
-
-/* SSE2 implementation */
-#undef DISPATCH_SSE2
-#if !defined(DEFAULT_IMPL) && \
- (defined(__SSE2__) || (X86_CPU_FEATURES_ENABLED && \
- COMPILER_SUPPORTS_SSE2_TARGET_INTRINSICS))
-# define FUNCNAME adler32_sse2
-# define FUNCNAME_CHUNK adler32_sse2_chunk
-# define IMPL_ALIGNMENT 16
-# define IMPL_SEGMENT_SIZE 32
-/*
- * The 16-bit precision byte counters must not be allowed to undergo *signed*
- * overflow, otherwise the signed multiplications at the end (_mm_madd_epi16)
- * would behave incorrectly.
- */
-# define IMPL_MAX_CHUNK_SIZE (32 * (0x7FFF / 0xFF))
-# ifdef __SSE2__
-# define ATTRIBUTES
-# define DEFAULT_IMPL adler32_sse2
-# else
-# define ATTRIBUTES __attribute__((target("sse2")))
-# define DISPATCH 1
-# define DISPATCH_SSE2 1
-# endif
-# include <emmintrin.h>
-static forceinline ATTRIBUTES void
-adler32_sse2_chunk(const __m128i *p, const __m128i *const end, u32 *s1, u32 *s2)
-{
- const __m128i zeroes = _mm_setzero_si128();
-
- /* s1 counters: 32-bit, sum of bytes */
- __v4su v_s1 = (__v4su)zeroes;
-
- /* s2 counters: 32-bit, sum of s1 values */
- __v4su v_s2 = (__v4su)zeroes;
-
- /*
- * Thirty-two 16-bit counters for byte sums. Each accumulates the bytes
- * that eventually need to be multiplied by a number 32...1 for addition
- * into s2.
- */
- __v8hu v_byte_sums_a = (__v8hu)zeroes;
- __v8hu v_byte_sums_b = (__v8hu)zeroes;
- __v8hu v_byte_sums_c = (__v8hu)zeroes;
- __v8hu v_byte_sums_d = (__v8hu)zeroes;
-
- do {
- /* Load the next 32 bytes */
- const __m128i bytes1 = *p++;
- const __m128i bytes2 = *p++;
-
- /*
- * Accumulate the previous s1 counters into the s2 counters.
- * Logically, this really should be v_s2 += v_s1 * 32, but we
- * can do the multiplication (or left shift) later.
- */
- v_s2 += v_s1;
-
- /*
- * s1 update: use "Packed Sum of Absolute Differences" to add
- * the bytes horizontally with 8 bytes per sum. Then add the
- * sums to the s1 counters.
- */
- v_s1 += (__v4su)_mm_sad_epu8(bytes1, zeroes);
- v_s1 += (__v4su)_mm_sad_epu8(bytes2, zeroes);
-
- /*
- * Also accumulate the bytes into 32 separate counters that have
- * 16-bit precision.
- */
- v_byte_sums_a += (__v8hu)_mm_unpacklo_epi8(bytes1, zeroes);
- v_byte_sums_b += (__v8hu)_mm_unpackhi_epi8(bytes1, zeroes);
- v_byte_sums_c += (__v8hu)_mm_unpacklo_epi8(bytes2, zeroes);
- v_byte_sums_d += (__v8hu)_mm_unpackhi_epi8(bytes2, zeroes);
-
- } while (p != end);
-
- /* Finish calculating the s2 counters */
- v_s2 = (__v4su)_mm_slli_epi32((__m128i)v_s2, 5);
- v_s2 += (__v4su)_mm_madd_epi16((__m128i)v_byte_sums_a,
- (__m128i)(__v8hu){ 32, 31, 30, 29, 28, 27, 26, 25 });
- v_s2 += (__v4su)_mm_madd_epi16((__m128i)v_byte_sums_b,
- (__m128i)(__v8hu){ 24, 23, 22, 21, 20, 19, 18, 17 });
- v_s2 += (__v4su)_mm_madd_epi16((__m128i)v_byte_sums_c,
- (__m128i)(__v8hu){ 16, 15, 14, 13, 12, 11, 10, 9 });
- v_s2 += (__v4su)_mm_madd_epi16((__m128i)v_byte_sums_d,
- (__m128i)(__v8hu){ 8, 7, 6, 5, 4, 3, 2, 1 });
-
- /* Add the counters to the real s1 and s2 */
- ADLER32_FINISH_VEC_CHUNK_128(s1, s2, v_s1, v_s2);
-}
-# include "../adler32_vec_template.h"
-#endif /* SSE2 implementation */
-
-#ifdef DISPATCH
-static inline adler32_func_t
-arch_select_adler32_func(void)
-{
- u32 features = get_cpu_features();
-
-#ifdef DISPATCH_AVX512BW
- if (features & X86_CPU_FEATURE_AVX512BW)
- return adler32_avx512bw;
-#endif
-#ifdef DISPATCH_AVX2
- if (features & X86_CPU_FEATURE_AVX2)
- return adler32_avx2;
-#endif
-#ifdef DISPATCH_SSE2
- if (features & X86_CPU_FEATURE_SSE2)
- return adler32_sse2;
-#endif
- return NULL;
-}
-#endif /* DISPATCH */
-
-#endif /* LIB_X86_ADLER32_IMPL_H */
diff --git a/util/compress/libdeflate/lib/x86/cpu_features.c b/util/compress/libdeflate/lib/x86/cpu_features.c
deleted file mode 100644
index e3471d468..000000000
--- a/util/compress/libdeflate/lib/x86/cpu_features.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * x86/cpu_features.c - feature detection for x86 processors
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "../cpu_features_common.h" /* must be included first */
-#include "cpu_features.h"
-
-#if X86_CPU_FEATURES_ENABLED
-
-volatile u32 _cpu_features = 0;
-
-/* With old GCC versions we have to manually save and restore the x86_32 PIC
- * register (ebx). See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47602 */
-#if defined(__i386__) && defined(__PIC__)
-# define EBX_CONSTRAINT "=&r"
-#else
-# define EBX_CONSTRAINT "=b"
-#endif
-
-/* Execute the CPUID instruction. */
-static inline void
-cpuid(u32 leaf, u32 subleaf, u32 *a, u32 *b, u32 *c, u32 *d)
-{
- __asm__(".ifnc %%ebx, %1; mov %%ebx, %1; .endif\n"
- "cpuid \n"
- ".ifnc %%ebx, %1; xchg %%ebx, %1; .endif\n"
- : "=a" (*a), EBX_CONSTRAINT (*b), "=c" (*c), "=d" (*d)
- : "a" (leaf), "c" (subleaf));
-}
-
-/* Read an extended control register. */
-static inline u64
-read_xcr(u32 index)
-{
- u32 edx, eax;
-
- /* Execute the "xgetbv" instruction. Old versions of binutils do not
- * recognize this instruction, so list the raw bytes instead. */
- __asm__ (".byte 0x0f, 0x01, 0xd0" : "=d" (edx), "=a" (eax) : "c" (index));
-
- return ((u64)edx << 32) | eax;
-}
-
-#undef BIT
-#define BIT(nr) (1UL << (nr))
-
-#define XCR0_BIT_SSE BIT(1)
-#define XCR0_BIT_AVX BIT(2)
-#define XCR0_BIT_OPMASK BIT(5)
-#define XCR0_BIT_ZMM_HI256 BIT(6)
-#define XCR0_BIT_HI16_ZMM BIT(7)
-
-#define IS_SET(reg, nr) ((reg) & BIT(nr))
-#define IS_ALL_SET(reg, mask) (((reg) & (mask)) == (mask))
-
-static const struct cpu_feature x86_cpu_feature_table[] = {
- {X86_CPU_FEATURE_SSE2, "sse2"},
- {X86_CPU_FEATURE_PCLMUL, "pclmul"},
- {X86_CPU_FEATURE_AVX, "avx"},
- {X86_CPU_FEATURE_AVX2, "avx2"},
- {X86_CPU_FEATURE_BMI2, "bmi2"},
- {X86_CPU_FEATURE_AVX512BW, "avx512bw"},
-};
-
-/* Initialize _cpu_features with bits for interesting processor features. */
-void setup_cpu_features(void)
-{
- u32 features = 0;
- u32 dummy1, dummy2, dummy3, dummy4;
- u32 max_function;
- u32 features_1, features_2, features_3, features_4;
- bool os_avx_support = false;
- bool os_avx512_support = false;
-
- /* Get maximum supported function */
- cpuid(0, 0, &max_function, &dummy2, &dummy3, &dummy4);
- if (max_function < 1)
- goto out;
-
- /* Standard feature flags */
- cpuid(1, 0, &dummy1, &dummy2, &features_2, &features_1);
-
- if (IS_SET(features_1, 26))
- features |= X86_CPU_FEATURE_SSE2;
-
- if (IS_SET(features_2, 1))
- features |= X86_CPU_FEATURE_PCLMUL;
-
- if (IS_SET(features_2, 27)) { /* OSXSAVE set? */
- u64 xcr0 = read_xcr(0);
-
- os_avx_support = IS_ALL_SET(xcr0,
- XCR0_BIT_SSE |
- XCR0_BIT_AVX);
-
- os_avx512_support = IS_ALL_SET(xcr0,
- XCR0_BIT_SSE |
- XCR0_BIT_AVX |
- XCR0_BIT_OPMASK |
- XCR0_BIT_ZMM_HI256 |
- XCR0_BIT_HI16_ZMM);
- }
-
- if (os_avx_support && IS_SET(features_2, 28))
- features |= X86_CPU_FEATURE_AVX;
-
- if (max_function < 7)
- goto out;
-
- /* Extended feature flags */
- cpuid(7, 0, &dummy1, &features_3, &features_4, &dummy4);
-
- if (os_avx_support && IS_SET(features_3, 5))
- features |= X86_CPU_FEATURE_AVX2;
-
- if (IS_SET(features_3, 8))
- features |= X86_CPU_FEATURE_BMI2;
-
- if (os_avx512_support && IS_SET(features_3, 30))
- features |= X86_CPU_FEATURE_AVX512BW;
-
-out:
- disable_cpu_features_for_testing(&features, x86_cpu_feature_table,
- ARRAY_LEN(x86_cpu_feature_table));
-
- _cpu_features = features | X86_CPU_FEATURES_KNOWN;
-}
-
-#endif /* X86_CPU_FEATURES_ENABLED */
diff --git a/util/compress/libdeflate/lib/x86/cpu_features.h b/util/compress/libdeflate/lib/x86/cpu_features.h
deleted file mode 100644
index 4c023539e..000000000
--- a/util/compress/libdeflate/lib/x86/cpu_features.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * x86/cpu_features.h - feature detection for x86 processors
- */
-
-#ifndef LIB_X86_CPU_FEATURES_H
-#define LIB_X86_CPU_FEATURES_H
-
-#include "../lib_common.h"
-
-#if (defined(__i386__) || defined(__x86_64__)) && \
- COMPILER_SUPPORTS_TARGET_FUNCTION_ATTRIBUTE
-# define X86_CPU_FEATURES_ENABLED 1
-#else
-# define X86_CPU_FEATURES_ENABLED 0
-#endif
-
-#if X86_CPU_FEATURES_ENABLED
-
-#define X86_CPU_FEATURE_SSE2 0x00000001
-#define X86_CPU_FEATURE_PCLMUL 0x00000002
-#define X86_CPU_FEATURE_AVX 0x00000004
-#define X86_CPU_FEATURE_AVX2 0x00000008
-#define X86_CPU_FEATURE_BMI2 0x00000010
-#define X86_CPU_FEATURE_AVX512BW 0x00000020
-
-#define X86_CPU_FEATURES_KNOWN 0x80000000
-
-extern volatile u32 _cpu_features;
-
-void setup_cpu_features(void);
-
-static inline u32 get_cpu_features(void)
-{
- if (_cpu_features == 0)
- setup_cpu_features();
- return _cpu_features;
-}
-
-#endif /* X86_CPU_FEATURES_ENABLED */
-
-#endif /* LIB_X86_CPU_FEATURES_H */
diff --git a/util/compress/libdeflate/lib/x86/crc32_impl.h b/util/compress/libdeflate/lib/x86/crc32_impl.h
deleted file mode 100644
index 14a686792..000000000
--- a/util/compress/libdeflate/lib/x86/crc32_impl.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * x86/crc32_impl.h - x86 implementations of CRC-32 checksum algorithm
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef LIB_X86_CRC32_IMPL_H
-#define LIB_X86_CRC32_IMPL_H
-
-#include "cpu_features.h"
-
-/*
- * Include the PCLMUL/AVX implementation? Although our PCLMUL-optimized CRC-32
- * function doesn't use any AVX intrinsics specifically, it can benefit a lot
- * from being compiled for an AVX target: on Skylake, ~16700 MB/s vs. ~10100
- * MB/s. I expect this is related to the PCLMULQDQ instructions being assembled
- * in the newer three-operand form rather than the older two-operand form.
- *
- * Note: this is only needed if __AVX__ is *not* defined, since otherwise the
- * "regular" PCLMUL implementation would already be AVX enabled.
- */
-#undef DISPATCH_PCLMUL_AVX
-#if !defined(DEFAULT_IMPL) && !defined(__AVX__) && \
- X86_CPU_FEATURES_ENABLED && COMPILER_SUPPORTS_AVX_TARGET && \
- (defined(__PCLMUL__) || COMPILER_SUPPORTS_PCLMUL_TARGET_INTRINSICS)
-# define FUNCNAME crc32_pclmul_avx
-# define FUNCNAME_ALIGNED crc32_pclmul_avx_aligned
-# define ATTRIBUTES __attribute__((target("pclmul,avx")))
-# define DISPATCH 1
-# define DISPATCH_PCLMUL_AVX 1
-# include "crc32_pclmul_template.h"
-#endif
-
-/* PCLMUL implementation */
-#undef DISPATCH_PCLMUL
-#if !defined(DEFAULT_IMPL) && \
- (defined(__PCLMUL__) || (X86_CPU_FEATURES_ENABLED && \
- COMPILER_SUPPORTS_PCLMUL_TARGET_INTRINSICS))
-# define FUNCNAME crc32_pclmul
-# define FUNCNAME_ALIGNED crc32_pclmul_aligned
-# ifdef __PCLMUL__
-# define ATTRIBUTES
-# define DEFAULT_IMPL crc32_pclmul
-# else
-# define ATTRIBUTES __attribute__((target("pclmul")))
-# define DISPATCH 1
-# define DISPATCH_PCLMUL 1
-# endif
-# include "crc32_pclmul_template.h"
-#endif
-
-#ifdef DISPATCH
-static inline crc32_func_t
-arch_select_crc32_func(void)
-{
- u32 features = get_cpu_features();
-
-#ifdef DISPATCH_PCLMUL_AVX
- if ((features & X86_CPU_FEATURE_PCLMUL) &&
- (features & X86_CPU_FEATURE_AVX))
- return crc32_pclmul_avx;
-#endif
-#ifdef DISPATCH_PCLMUL
- if (features & X86_CPU_FEATURE_PCLMUL)
- return crc32_pclmul;
-#endif
- return NULL;
-}
-#endif /* DISPATCH */
-
-#endif /* LIB_X86_CRC32_IMPL_H */
diff --git a/util/compress/libdeflate/lib/x86/crc32_pclmul_template.h b/util/compress/libdeflate/lib/x86/crc32_pclmul_template.h
deleted file mode 100644
index a5eda9b87..000000000
--- a/util/compress/libdeflate/lib/x86/crc32_pclmul_template.h
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * x86/crc32_pclmul_template.h
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <immintrin.h>
-
-/*
- * CRC-32 folding with PCLMULQDQ.
- *
- * The basic idea is to repeatedly "fold" each 512 bits into the next 512 bits,
- * producing an abbreviated message which is congruent the original message
- * modulo the generator polynomial G(x).
- *
- * Folding each 512 bits is implemented as eight 64-bit folds, each of which
- * uses one carryless multiplication instruction. It's expected that CPUs may
- * be able to execute some of these multiplications in parallel.
- *
- * Explanation of "folding": let A(x) be 64 bits from the message, and let B(x)
- * be 95 bits from a constant distance D later in the message. The relevant
- * portion of the message can be written as:
- *
- * M(x) = A(x)*x^D + B(x)
- *
- * ... where + and * represent addition and multiplication, respectively, of
- * polynomials over GF(2). Note that when implemented on a computer, these
- * operations are equivalent to XOR and carryless multiplication, respectively.
- *
- * For the purpose of CRC calculation, only the remainder modulo the generator
- * polynomial G(x) matters:
- *
- * M(x) mod G(x) = (A(x)*x^D + B(x)) mod G(x)
- *
- * Since the modulo operation can be applied anywhere in a sequence of additions
- * and multiplications without affecting the result, this is equivalent to:
- *
- * M(x) mod G(x) = (A(x)*(x^D mod G(x)) + B(x)) mod G(x)
- *
- * For any D, 'x^D mod G(x)' will be a polynomial with maximum degree 31, i.e.
- * a 32-bit quantity. So 'A(x) * (x^D mod G(x))' is equivalent to a carryless
- * multiplication of a 64-bit quantity by a 32-bit quantity, producing a 95-bit
- * product. Then, adding (XOR-ing) the product to B(x) produces a polynomial
- * with the same length as B(x) but with the same remainder as 'A(x)*x^D +
- * B(x)'. This is the basic fold operation with 64 bits.
- *
- * Note that the carryless multiplication instruction PCLMULQDQ actually takes
- * two 64-bit inputs and produces a 127-bit product in the low-order bits of a
- * 128-bit XMM register. This works fine, but care must be taken to account for
- * "bit endianness". With the CRC version implemented here, bits are always
- * ordered such that the lowest-order bit represents the coefficient of highest
- * power of x and the highest-order bit represents the coefficient of the lowest
- * power of x. This is backwards from the more intuitive order. Still,
- * carryless multiplication works essentially the same either way. It just must
- * be accounted for that when we XOR the 95-bit product in the low-order 95 bits
- * of a 128-bit XMM register into 128-bits of later data held in another XMM
- * register, we'll really be XOR-ing the product into the mathematically higher
- * degree end of those later bits, not the lower degree end as may be expected.
- *
- * So given that caveat and the fact that we process 512 bits per iteration, the
- * 'D' values we need for the two 64-bit halves of each 128 bits of data are:
- *
- * D = (512 + 95) - 64 for the higher-degree half of each 128 bits,
- * i.e. the lower order bits in the XMM register
- *
- * D = (512 + 95) - 128 for the lower-degree half of each 128 bits,
- * i.e. the higher order bits in the XMM register
- *
- * The required 'x^D mod G(x)' values were precomputed.
- *
- * When <= 512 bits remain in the message, we finish up by folding across
- * smaller distances. This works similarly; the distance D is just different,
- * so different constant multipliers must be used. Finally, once the remaining
- * message is just 64 bits, it is reduced to the CRC-32 using Barrett reduction
- * (explained later).
- *
- * For more information see the original paper from Intel:
- * "Fast CRC Computation for Generic Polynomials Using PCLMULQDQ Instruction"
- * December 2009
- * http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
- */
-static u32 ATTRIBUTES
-FUNCNAME_ALIGNED(u32 remainder, const __m128i *p, size_t nr_segs)
-{
- /* Constants precomputed by gen_crc32_multipliers.c. Do not edit! */
- const __v2di multipliers_4 = (__v2di){ 0x8F352D95, 0x1D9513D7 };
- const __v2di multipliers_2 = (__v2di){ 0xF1DA05AA, 0x81256527 };
- const __v2di multipliers_1 = (__v2di){ 0xAE689191, 0xCCAA009E };
- const __v2di final_multiplier = (__v2di){ 0xB8BC6765 };
- const __m128i mask32 = (__m128i)(__v4si){ 0xFFFFFFFF };
- const __v2di barrett_reduction_constants =
- (__v2di){ 0x00000001F7011641, 0x00000001DB710641 };
-
- const __m128i * const end = p + nr_segs;
- const __m128i * const end512 = p + (nr_segs & ~3);
- __m128i x0, x1, x2, x3;
-
- /*
- * Account for the current 'remainder', i.e. the CRC of the part of the
- * message already processed. Explanation: rewrite the message
- * polynomial M(x) in terms of the first part A(x), the second part
- * B(x), and the length of the second part in bits |B(x)| >= 32:
- *
- * M(x) = A(x)*x^|B(x)| + B(x)
- *
- * Then the CRC of M(x) is:
- *
- * CRC(M(x)) = CRC(A(x)*x^|B(x)| + B(x))
- * = CRC(A(x)*x^32*x^(|B(x)| - 32) + B(x))
- * = CRC(CRC(A(x))*x^(|B(x)| - 32) + B(x))
- *
- * Note: all arithmetic is modulo G(x), the generator polynomial; that's
- * why A(x)*x^32 can be replaced with CRC(A(x)) = A(x)*x^32 mod G(x).
- *
- * So the CRC of the full message is the CRC of the second part of the
- * message where the first 32 bits of the second part of the message
- * have been XOR'ed with the CRC of the first part of the message.
- */
- x0 = *p++;
- x0 ^= (__m128i)(__v4si){ remainder };
-
- if (p > end512) /* only 128, 256, or 384 bits of input? */
- goto _128_bits_at_a_time;
- x1 = *p++;
- x2 = *p++;
- x3 = *p++;
-
- /* Fold 512 bits at a time */
- for (; p != end512; p += 4) {
- __m128i y0, y1, y2, y3;
-
- y0 = p[0];
- y1 = p[1];
- y2 = p[2];
- y3 = p[3];
-
- /*
- * Note: the immediate constant for PCLMULQDQ specifies which
- * 64-bit halves of the 128-bit vectors to multiply:
- *
- * 0x00 means low halves (higher degree polynomial terms for us)
- * 0x11 means high halves (lower degree polynomial terms for us)
- */
- y0 ^= _mm_clmulepi64_si128(x0, multipliers_4, 0x00);
- y1 ^= _mm_clmulepi64_si128(x1, multipliers_4, 0x00);
- y2 ^= _mm_clmulepi64_si128(x2, multipliers_4, 0x00);
- y3 ^= _mm_clmulepi64_si128(x3, multipliers_4, 0x00);
- y0 ^= _mm_clmulepi64_si128(x0, multipliers_4, 0x11);
- y1 ^= _mm_clmulepi64_si128(x1, multipliers_4, 0x11);
- y2 ^= _mm_clmulepi64_si128(x2, multipliers_4, 0x11);
- y3 ^= _mm_clmulepi64_si128(x3, multipliers_4, 0x11);
-
- x0 = y0;
- x1 = y1;
- x2 = y2;
- x3 = y3;
- }
-
- /* Fold 512 bits => 128 bits */
- x2 ^= _mm_clmulepi64_si128(x0, multipliers_2, 0x00);
- x3 ^= _mm_clmulepi64_si128(x1, multipliers_2, 0x00);
- x2 ^= _mm_clmulepi64_si128(x0, multipliers_2, 0x11);
- x3 ^= _mm_clmulepi64_si128(x1, multipliers_2, 0x11);
- x3 ^= _mm_clmulepi64_si128(x2, multipliers_1, 0x00);
- x3 ^= _mm_clmulepi64_si128(x2, multipliers_1, 0x11);
- x0 = x3;
-
-_128_bits_at_a_time:
- while (p != end) {
- /* Fold 128 bits into next 128 bits */
- x1 = *p++;
- x1 ^= _mm_clmulepi64_si128(x0, multipliers_1, 0x00);
- x1 ^= _mm_clmulepi64_si128(x0, multipliers_1, 0x11);
- x0 = x1;
- }
-
- /* Now there are just 128 bits left, stored in 'x0'. */
-
- /*
- * Fold 128 => 96 bits. This also implicitly appends 32 zero bits,
- * which is equivalent to multiplying by x^32. This is needed because
- * the CRC is defined as M(x)*x^32 mod G(x), not just M(x) mod G(x).
- */
- x0 = _mm_srli_si128(x0, 8) ^
- _mm_clmulepi64_si128(x0, multipliers_1, 0x10);
-
- /* Fold 96 => 64 bits */
- x0 = _mm_srli_si128(x0, 4) ^
- _mm_clmulepi64_si128(x0 & mask32, final_multiplier, 0x00);
-
- /*
- * Finally, reduce 64 => 32 bits using Barrett reduction.
- *
- * Let M(x) = A(x)*x^32 + B(x) be the remaining message. The goal is to
- * compute R(x) = M(x) mod G(x). Since degree(B(x)) < degree(G(x)):
- *
- * R(x) = (A(x)*x^32 + B(x)) mod G(x)
- * = (A(x)*x^32) mod G(x) + B(x)
- *
- * Then, by the Division Algorithm there exists a unique q(x) such that:
- *
- * A(x)*x^32 mod G(x) = A(x)*x^32 - q(x)*G(x)
- *
- * Since the left-hand side is of maximum degree 31, the right-hand side
- * must be too. This implies that we can apply 'mod x^32' to the
- * right-hand side without changing its value:
- *
- * (A(x)*x^32 - q(x)*G(x)) mod x^32 = q(x)*G(x) mod x^32
- *
- * Note that '+' is equivalent to '-' in polynomials over GF(2).
- *
- * We also know that:
- *
- * / A(x)*x^32 \
- * q(x) = floor ( --------- )
- * \ G(x) /
- *
- * To compute this efficiently, we can multiply the top and bottom by
- * x^32 and move the division by G(x) to the top:
- *
- * / A(x) * floor(x^64 / G(x)) \
- * q(x) = floor ( ------------------------- )
- * \ x^32 /
- *
- * Note that floor(x^64 / G(x)) is a constant.
- *
- * So finally we have:
- *
- * / A(x) * floor(x^64 / G(x)) \
- * R(x) = B(x) + G(x)*floor ( ------------------------- )
- * \ x^32 /
- */
- x1 = x0;
- x0 = _mm_clmulepi64_si128(x0 & mask32, barrett_reduction_constants, 0x00);
- x0 = _mm_clmulepi64_si128(x0 & mask32, barrett_reduction_constants, 0x10);
- return _mm_cvtsi128_si32(_mm_srli_si128(x0 ^ x1, 4));
-}
-
-#define IMPL_ALIGNMENT 16
-#define IMPL_SEGMENT_SIZE 16
-#include "../crc32_vec_template.h"
diff --git a/util/compress/libdeflate/lib/x86/decompress_impl.h b/util/compress/libdeflate/lib/x86/decompress_impl.h
deleted file mode 100644
index de6d23631..000000000
--- a/util/compress/libdeflate/lib/x86/decompress_impl.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef LIB_X86_DECOMPRESS_IMPL_H
-#define LIB_X86_DECOMPRESS_IMPL_H
-
-#include "cpu_features.h"
-
-/* Include the BMI2-optimized version? */
-#undef DISPATCH_BMI2
-#if !defined(__BMI2__) && X86_CPU_FEATURES_ENABLED && \
- COMPILER_SUPPORTS_BMI2_TARGET
-# define FUNCNAME deflate_decompress_bmi2
-# define ATTRIBUTES __attribute__((target("bmi2")))
-# define DISPATCH 1
-# define DISPATCH_BMI2 1
-# include "../decompress_template.h"
-#endif
-
-#ifdef DISPATCH
-static inline decompress_func_t
-arch_select_decompress_func(void)
-{
- u32 features = get_cpu_features();
-
-#ifdef DISPATCH_BMI2
- if (features & X86_CPU_FEATURE_BMI2)
- return deflate_decompress_bmi2;
-#endif
- return NULL;
-}
-#endif /* DISPATCH */
-
-#endif /* LIB_X86_DECOMPRESS_IMPL_H */
diff --git a/util/compress/libdeflate/lib/x86/matchfinder_impl.h b/util/compress/libdeflate/lib/x86/matchfinder_impl.h
deleted file mode 100644
index 99fbebe8d..000000000
--- a/util/compress/libdeflate/lib/x86/matchfinder_impl.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * x86/matchfinder_impl.h - x86 implementations of matchfinder functions
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef LIB_X86_MATCHFINDER_IMPL_H
-#define LIB_X86_MATCHFINDER_IMPL_H
-
-#ifdef __AVX2__
-# include <immintrin.h>
-static forceinline void
-matchfinder_init_avx2(mf_pos_t *data, size_t size)
-{
- __m256i *p = (__m256i *)data;
- __m256i v = _mm256_set1_epi16(MATCHFINDER_INITVAL);
-
- STATIC_ASSERT(MATCHFINDER_MEM_ALIGNMENT % sizeof(*p) == 0);
- STATIC_ASSERT(MATCHFINDER_SIZE_ALIGNMENT % (4 * sizeof(*p)) == 0);
- STATIC_ASSERT(sizeof(mf_pos_t) == 2);
-
- do {
- p[0] = v;
- p[1] = v;
- p[2] = v;
- p[3] = v;
- p += 4;
- size -= 4 * sizeof(*p);
- } while (size != 0);
-}
-#define matchfinder_init matchfinder_init_avx2
-
-static forceinline void
-matchfinder_rebase_avx2(mf_pos_t *data, size_t size)
-{
- __m256i *p = (__m256i *)data;
- __m256i v = _mm256_set1_epi16((u16)-MATCHFINDER_WINDOW_SIZE);
-
- STATIC_ASSERT(MATCHFINDER_MEM_ALIGNMENT % sizeof(*p) == 0);
- STATIC_ASSERT(MATCHFINDER_SIZE_ALIGNMENT % (4 * sizeof(*p)) == 0);
- STATIC_ASSERT(sizeof(mf_pos_t) == 2);
-
- do {
- /* PADDSW: Add Packed Signed Integers With Signed Saturation */
- p[0] = _mm256_adds_epi16(p[0], v);
- p[1] = _mm256_adds_epi16(p[1], v);
- p[2] = _mm256_adds_epi16(p[2], v);
- p[3] = _mm256_adds_epi16(p[3], v);
- p += 4;
- size -= 4 * sizeof(*p);
- } while (size != 0);
-}
-#define matchfinder_rebase matchfinder_rebase_avx2
-
-#elif defined(__SSE2__)
-# include <emmintrin.h>
-static forceinline void
-matchfinder_init_sse2(mf_pos_t *data, size_t size)
-{
- __m128i *p = (__m128i *)data;
- __m128i v = _mm_set1_epi16(MATCHFINDER_INITVAL);
-
- STATIC_ASSERT(MATCHFINDER_MEM_ALIGNMENT % sizeof(*p) == 0);
- STATIC_ASSERT(MATCHFINDER_SIZE_ALIGNMENT % (4 * sizeof(*p)) == 0);
- STATIC_ASSERT(sizeof(mf_pos_t) == 2);
-
- do {
- p[0] = v;
- p[1] = v;
- p[2] = v;
- p[3] = v;
- p += 4;
- size -= 4 * sizeof(*p);
- } while (size != 0);
-}
-#define matchfinder_init matchfinder_init_sse2
-
-static forceinline void
-matchfinder_rebase_sse2(mf_pos_t *data, size_t size)
-{
- __m128i *p = (__m128i *)data;
- __m128i v = _mm_set1_epi16((u16)-MATCHFINDER_WINDOW_SIZE);
-
- STATIC_ASSERT(MATCHFINDER_MEM_ALIGNMENT % sizeof(*p) == 0);
- STATIC_ASSERT(MATCHFINDER_SIZE_ALIGNMENT % (4 * sizeof(*p)) == 0);
- STATIC_ASSERT(sizeof(mf_pos_t) == 2);
-
- do {
- /* PADDSW: Add Packed Signed Integers With Signed Saturation */
- p[0] = _mm_adds_epi16(p[0], v);
- p[1] = _mm_adds_epi16(p[1], v);
- p[2] = _mm_adds_epi16(p[2], v);
- p[3] = _mm_adds_epi16(p[3], v);
- p += 4;
- size -= 4 * sizeof(*p);
- } while (size != 0);
-}
-#define matchfinder_rebase matchfinder_rebase_sse2
-#endif /* __SSE2__ */
-
-#endif /* LIB_X86_MATCHFINDER_IMPL_H */
diff --git a/util/compress/libdeflate/lib/zlib_compress.c b/util/compress/libdeflate/lib/zlib_compress.c
deleted file mode 100644
index ab0075136..000000000
--- a/util/compress/libdeflate/lib/zlib_compress.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * zlib_compress.c - compress with a zlib wrapper
- *
- * Originally public domain; changes after 2016-09-07 are copyrighted.
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "deflate_compress.h"
-#include "unaligned.h"
-#include "zlib_constants.h"
-
-#include "libdeflate.h"
-
-LIBDEFLATEEXPORT size_t LIBDEFLATEAPI
-libdeflate_zlib_compress(struct libdeflate_compressor *c,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail)
-{
- u8 *out_next = out;
- u16 hdr;
- unsigned compression_level;
- unsigned level_hint;
- size_t deflate_size;
-
- if (out_nbytes_avail <= ZLIB_MIN_OVERHEAD)
- return 0;
-
- /* 2 byte header: CMF and FLG */
- hdr = (ZLIB_CM_DEFLATE << 8) | (ZLIB_CINFO_32K_WINDOW << 12);
- compression_level = deflate_get_compression_level(c);
- if (compression_level < 2)
- level_hint = ZLIB_FASTEST_COMPRESSION;
- else if (compression_level < 6)
- level_hint = ZLIB_FAST_COMPRESSION;
- else if (compression_level < 8)
- level_hint = ZLIB_DEFAULT_COMPRESSION;
- else
- level_hint = ZLIB_SLOWEST_COMPRESSION;
- hdr |= level_hint << 6;
- hdr |= 31 - (hdr % 31);
-
- put_unaligned_be16(hdr, out_next);
- out_next += 2;
-
- /* Compressed data */
- deflate_size = libdeflate_deflate_compress(c, in, in_nbytes, out_next,
- out_nbytes_avail - ZLIB_MIN_OVERHEAD);
- if (deflate_size == 0)
- return 0;
- out_next += deflate_size;
-
- /* ADLER32 */
- put_unaligned_be32(libdeflate_adler32(1, in, in_nbytes), out_next);
- out_next += 4;
-
- return out_next - (u8 *)out;
-}
-
-LIBDEFLATEEXPORT size_t LIBDEFLATEAPI
-libdeflate_zlib_compress_bound(struct libdeflate_compressor *c,
- size_t in_nbytes)
-{
- return ZLIB_MIN_OVERHEAD +
- libdeflate_deflate_compress_bound(c, in_nbytes);
-}
diff --git a/util/compress/libdeflate/lib/zlib_constants.h b/util/compress/libdeflate/lib/zlib_constants.h
deleted file mode 100644
index f304310c7..000000000
--- a/util/compress/libdeflate/lib/zlib_constants.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * zlib_constants.h - constants for the zlib wrapper format
- */
-
-#ifndef LIB_ZLIB_CONSTANTS_H
-#define LIB_ZLIB_CONSTANTS_H
-
-#define ZLIB_MIN_HEADER_SIZE 2
-#define ZLIB_FOOTER_SIZE 4
-#define ZLIB_MIN_OVERHEAD (ZLIB_MIN_HEADER_SIZE + ZLIB_FOOTER_SIZE)
-
-#define ZLIB_CM_DEFLATE 8
-
-#define ZLIB_CINFO_32K_WINDOW 7
-
-#define ZLIB_FASTEST_COMPRESSION 0
-#define ZLIB_FAST_COMPRESSION 1
-#define ZLIB_DEFAULT_COMPRESSION 2
-#define ZLIB_SLOWEST_COMPRESSION 3
-
-#endif /* LIB_ZLIB_CONSTANTS_H */
diff --git a/util/compress/libdeflate/lib/zlib_decompress.c b/util/compress/libdeflate/lib/zlib_decompress.c
deleted file mode 100644
index 0f6c71489..000000000
--- a/util/compress/libdeflate/lib/zlib_decompress.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * zlib_decompress.c - decompress with a zlib wrapper
- *
- * Originally public domain; changes after 2016-09-07 are copyrighted.
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "unaligned.h"
-#include "zlib_constants.h"
-
-#include "libdeflate.h"
-
-LIBDEFLATEEXPORT enum libdeflate_result LIBDEFLATEAPI
-libdeflate_zlib_decompress_ex(struct libdeflate_decompressor *d,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail,
- size_t *actual_in_nbytes_ret,
- size_t *actual_out_nbytes_ret)
-{
- const u8 *in_next = in;
- const u8 * const in_end = in_next + in_nbytes;
- u16 hdr;
- size_t actual_in_nbytes;
- size_t actual_out_nbytes;
- enum libdeflate_result result;
-
- if (in_nbytes < ZLIB_MIN_OVERHEAD)
- return LIBDEFLATE_BAD_DATA;
-
- /* 2 byte header: CMF and FLG */
- hdr = get_unaligned_be16(in_next);
- in_next += 2;
-
- /* FCHECK */
- if ((hdr % 31) != 0)
- return LIBDEFLATE_BAD_DATA;
-
- /* CM */
- if (((hdr >> 8) & 0xF) != ZLIB_CM_DEFLATE)
- return LIBDEFLATE_BAD_DATA;
-
- /* CINFO */
- if ((hdr >> 12) > ZLIB_CINFO_32K_WINDOW)
- return LIBDEFLATE_BAD_DATA;
-
- /* FDICT */
- if ((hdr >> 5) & 1)
- return LIBDEFLATE_BAD_DATA;
-
- /* Compressed data */
- result = libdeflate_deflate_decompress_ex(d, in_next,
- in_end - ZLIB_FOOTER_SIZE - in_next,
- out, out_nbytes_avail,
- &actual_in_nbytes, actual_out_nbytes_ret);
- if (result != LIBDEFLATE_SUCCESS)
- return result;
-
- if (actual_out_nbytes_ret)
- actual_out_nbytes = *actual_out_nbytes_ret;
- else
- actual_out_nbytes = out_nbytes_avail;
-
- in_next += actual_in_nbytes;
-
- /* ADLER32 */
- if (libdeflate_adler32(1, out, actual_out_nbytes) !=
- get_unaligned_be32(in_next))
- return LIBDEFLATE_BAD_DATA;
- in_next += 4;
-
- if (actual_in_nbytes_ret)
- *actual_in_nbytes_ret = in_next - (u8 *)in;
-
- return LIBDEFLATE_SUCCESS;
-}
-
-LIBDEFLATEEXPORT enum libdeflate_result LIBDEFLATEAPI
-libdeflate_zlib_decompress(struct libdeflate_decompressor *d,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail,
- size_t *actual_out_nbytes_ret)
-{
- return libdeflate_zlib_decompress_ex(d, in, in_nbytes,
- out, out_nbytes_avail,
- NULL, actual_out_nbytes_ret);
-}
diff --git a/util/compress/libdeflate/libdeflate.h b/util/compress/libdeflate/libdeflate.h
deleted file mode 100644
index cbcda0420..000000000
--- a/util/compress/libdeflate/libdeflate.h
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * libdeflate.h - public header for libdeflate
- */
-
-#ifndef LIBDEFLATE_H
-#define LIBDEFLATE_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define LIBDEFLATE_VERSION_MAJOR 1
-#define LIBDEFLATE_VERSION_MINOR 7
-#define LIBDEFLATE_VERSION_STRING "1.7"
-
-#include <stddef.h>
-#include <stdint.h>
-
-/*
- * On Windows, if you want to link to the DLL version of libdeflate, then
- * #define LIBDEFLATE_DLL. Note that the calling convention is "stdcall".
- */
-#ifdef LIBDEFLATE_DLL
-# ifdef BUILDING_LIBDEFLATE
-# define LIBDEFLATEEXPORT LIBEXPORT
-# elif defined(_WIN32) || defined(__CYGWIN__)
-# define LIBDEFLATEEXPORT __declspec(dllimport)
-# endif
-#endif
-#ifndef LIBDEFLATEEXPORT
-# define LIBDEFLATEEXPORT
-#endif
-
-#if defined(_WIN32) && !defined(_WIN64)
-# define LIBDEFLATEAPI_ABI __stdcall
-#else
-# define LIBDEFLATEAPI_ABI
-#endif
-
-#if defined(BUILDING_LIBDEFLATE) && defined(__GNUC__) && \
- defined(_WIN32) && !defined(_WIN64)
- /*
- * On 32-bit Windows, gcc assumes 16-byte stack alignment but MSVC only 4.
- * Realign the stack when entering libdeflate to avoid crashing in SSE/AVX
- * code when called from an MSVC-compiled application.
- */
-# define LIBDEFLATEAPI_STACKALIGN __attribute__((force_align_arg_pointer))
-#else
-# define LIBDEFLATEAPI_STACKALIGN
-#endif
-
-#define LIBDEFLATEAPI LIBDEFLATEAPI_ABI LIBDEFLATEAPI_STACKALIGN
-
-/* ========================================================================== */
-/* Compression */
-/* ========================================================================== */
-
-struct libdeflate_compressor;
-
-/*
- * libdeflate_alloc_compressor() allocates a new compressor that supports
- * DEFLATE, zlib, and gzip compression. 'compression_level' is the compression
- * level on a zlib-like scale but with a higher maximum value (1 = fastest, 6 =
- * medium/default, 9 = slow, 12 = slowest). Level 0 is also supported and means
- * "no compression", specifically "create a valid stream, but only emit
- * uncompressed blocks" (this will expand the data slightly).
- *
- * The return value is a pointer to the new compressor, or NULL if out of memory
- * or if the compression level is invalid (i.e. outside the range [0, 12]).
- *
- * Note: for compression, the sliding window size is defined at compilation time
- * to 32768, the largest size permissible in the DEFLATE format. It cannot be
- * changed at runtime.
- *
- * A single compressor is not safe to use by multiple threads concurrently.
- * However, different threads may use different compressors concurrently.
- */
-LIBDEFLATEEXPORT struct libdeflate_compressor * LIBDEFLATEAPI
-libdeflate_alloc_compressor(int compression_level);
-
-/*
- * libdeflate_deflate_compress() performs raw DEFLATE compression on a buffer of
- * data. The function attempts to compress 'in_nbytes' bytes of data located at
- * 'in' and write the results to 'out', which has space for 'out_nbytes_avail'
- * bytes. The return value is the compressed size in bytes, or 0 if the data
- * could not be compressed to 'out_nbytes_avail' bytes or fewer.
- */
-LIBDEFLATEEXPORT size_t LIBDEFLATEAPI
-libdeflate_deflate_compress(struct libdeflate_compressor *compressor,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail);
-
-/*
- * libdeflate_deflate_compress_bound() returns a worst-case upper bound on the
- * number of bytes of compressed data that may be produced by compressing any
- * buffer of length less than or equal to 'in_nbytes' using
- * libdeflate_deflate_compress() with the specified compressor. Mathematically,
- * this bound will necessarily be a number greater than or equal to 'in_nbytes'.
- * It may be an overestimate of the true upper bound. The return value is
- * guaranteed to be the same for all invocations with the same compressor and
- * same 'in_nbytes'.
- *
- * As a special case, 'compressor' may be NULL. This causes the bound to be
- * taken across *any* libdeflate_compressor that could ever be allocated with
- * this build of the library, with any options.
- *
- * Note that this function is not necessary in many applications. With
- * block-based compression, it is usually preferable to separately store the
- * uncompressed size of each block and to store any blocks that did not compress
- * to less than their original size uncompressed. In that scenario, there is no
- * need to know the worst-case compressed size, since the maximum number of
- * bytes of compressed data that may be used would always be one less than the
- * input length. You can just pass a buffer of that size to
- * libdeflate_deflate_compress() and store the data uncompressed if
- * libdeflate_deflate_compress() returns 0, indicating that the compressed data
- * did not fit into the provided output buffer.
- */
-LIBDEFLATEEXPORT size_t LIBDEFLATEAPI
-libdeflate_deflate_compress_bound(struct libdeflate_compressor *compressor,
- size_t in_nbytes);
-
-/*
- * Like libdeflate_deflate_compress(), but stores the data in the zlib wrapper
- * format.
- */
-LIBDEFLATEEXPORT size_t LIBDEFLATEAPI
-libdeflate_zlib_compress(struct libdeflate_compressor *compressor,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail);
-
-/*
- * Like libdeflate_deflate_compress_bound(), but assumes the data will be
- * compressed with libdeflate_zlib_compress() rather than with
- * libdeflate_deflate_compress().
- */
-LIBDEFLATEEXPORT size_t LIBDEFLATEAPI
-libdeflate_zlib_compress_bound(struct libdeflate_compressor *compressor,
- size_t in_nbytes);
-
-/*
- * Like libdeflate_deflate_compress(), but stores the data in the gzip wrapper
- * format.
- */
-LIBDEFLATEEXPORT size_t LIBDEFLATEAPI
-libdeflate_gzip_compress(struct libdeflate_compressor *compressor,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail);
-
-/*
- * Like libdeflate_deflate_compress_bound(), but assumes the data will be
- * compressed with libdeflate_gzip_compress() rather than with
- * libdeflate_deflate_compress().
- */
-LIBDEFLATEEXPORT size_t LIBDEFLATEAPI
-libdeflate_gzip_compress_bound(struct libdeflate_compressor *compressor,
- size_t in_nbytes);
-
-/*
- * libdeflate_free_compressor() frees a compressor that was allocated with
- * libdeflate_alloc_compressor(). If a NULL pointer is passed in, no action is
- * taken.
- */
-LIBDEFLATEEXPORT void LIBDEFLATEAPI
-libdeflate_free_compressor(struct libdeflate_compressor *compressor);
-
-/* ========================================================================== */
-/* Decompression */
-/* ========================================================================== */
-
-struct libdeflate_decompressor;
-
-/*
- * libdeflate_alloc_decompressor() allocates a new decompressor that can be used
- * for DEFLATE, zlib, and gzip decompression. The return value is a pointer to
- * the new decompressor, or NULL if out of memory.
- *
- * This function takes no parameters, and the returned decompressor is valid for
- * decompressing data that was compressed at any compression level and with any
- * sliding window size.
- *
- * A single decompressor is not safe to use by multiple threads concurrently.
- * However, different threads may use different decompressors concurrently.
- */
-LIBDEFLATEEXPORT struct libdeflate_decompressor * LIBDEFLATEAPI
-libdeflate_alloc_decompressor(void);
-
-/*
- * Result of a call to libdeflate_deflate_decompress(),
- * libdeflate_zlib_decompress(), or libdeflate_gzip_decompress().
- */
-enum libdeflate_result {
- /* Decompression was successful. */
- LIBDEFLATE_SUCCESS = 0,
-
- /* Decompressed failed because the compressed data was invalid, corrupt,
- * or otherwise unsupported. */
- LIBDEFLATE_BAD_DATA = 1,
-
- /* A NULL 'actual_out_nbytes_ret' was provided, but the data would have
- * decompressed to fewer than 'out_nbytes_avail' bytes. */
- LIBDEFLATE_SHORT_OUTPUT = 2,
-
- /* The data would have decompressed to more than 'out_nbytes_avail'
- * bytes. */
- LIBDEFLATE_INSUFFICIENT_SPACE = 3,
-};
-
-/*
- * libdeflate_deflate_decompress() decompresses the DEFLATE-compressed stream
- * from the buffer 'in' with compressed size up to 'in_nbytes' bytes. The
- * uncompressed data is written to 'out', a buffer with size 'out_nbytes_avail'
- * bytes. If decompression succeeds, then 0 (LIBDEFLATE_SUCCESS) is returned.
- * Otherwise, a nonzero result code such as LIBDEFLATE_BAD_DATA is returned. If
- * a nonzero result code is returned, then the contents of the output buffer are
- * undefined.
- *
- * Decompression stops at the end of the DEFLATE stream (as indicated by the
- * BFINAL flag), even if it is actually shorter than 'in_nbytes' bytes.
- *
- * libdeflate_deflate_decompress() can be used in cases where the actual
- * uncompressed size is known (recommended) or unknown (not recommended):
- *
- * - If the actual uncompressed size is known, then pass the actual
- * uncompressed size as 'out_nbytes_avail' and pass NULL for
- * 'actual_out_nbytes_ret'. This makes libdeflate_deflate_decompress() fail
- * with LIBDEFLATE_SHORT_OUTPUT if the data decompressed to fewer than the
- * specified number of bytes.
- *
- * - If the actual uncompressed size is unknown, then provide a non-NULL
- * 'actual_out_nbytes_ret' and provide a buffer with some size
- * 'out_nbytes_avail' that you think is large enough to hold all the
- * uncompressed data. In this case, if the data decompresses to less than
- * or equal to 'out_nbytes_avail' bytes, then
- * libdeflate_deflate_decompress() will write the actual uncompressed size
- * to *actual_out_nbytes_ret and return 0 (LIBDEFLATE_SUCCESS). Otherwise,
- * it will return LIBDEFLATE_INSUFFICIENT_SPACE if the provided buffer was
- * not large enough but no other problems were encountered, or another
- * nonzero result code if decompression failed for another reason.
- */
-LIBDEFLATEEXPORT enum libdeflate_result LIBDEFLATEAPI
-libdeflate_deflate_decompress(struct libdeflate_decompressor *decompressor,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail,
- size_t *actual_out_nbytes_ret);
-
-/*
- * Like libdeflate_deflate_decompress(), but adds the 'actual_in_nbytes_ret'
- * argument. If decompression succeeds and 'actual_in_nbytes_ret' is not NULL,
- * then the actual compressed size of the DEFLATE stream (aligned to the next
- * byte boundary) is written to *actual_in_nbytes_ret.
- */
-LIBDEFLATEEXPORT enum libdeflate_result LIBDEFLATEAPI
-libdeflate_deflate_decompress_ex(struct libdeflate_decompressor *decompressor,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail,
- size_t *actual_in_nbytes_ret,
- size_t *actual_out_nbytes_ret);
-
-/*
- * Like libdeflate_deflate_decompress(), but assumes the zlib wrapper format
- * instead of raw DEFLATE.
- *
- * Decompression will stop at the end of the zlib stream, even if it is shorter
- * than 'in_nbytes'. If you need to know exactly where the zlib stream ended,
- * use libdeflate_zlib_decompress_ex().
- */
-LIBDEFLATEEXPORT enum libdeflate_result LIBDEFLATEAPI
-libdeflate_zlib_decompress(struct libdeflate_decompressor *decompressor,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail,
- size_t *actual_out_nbytes_ret);
-
-/*
- * Like libdeflate_zlib_decompress(), but adds the 'actual_in_nbytes_ret'
- * argument. If 'actual_in_nbytes_ret' is not NULL and the decompression
- * succeeds (indicating that the first zlib-compressed stream in the input
- * buffer was decompressed), then the actual number of input bytes consumed is
- * written to *actual_in_nbytes_ret.
- */
-LIBDEFLATEEXPORT enum libdeflate_result LIBDEFLATEAPI
-libdeflate_zlib_decompress_ex(struct libdeflate_decompressor *decompressor,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail,
- size_t *actual_in_nbytes_ret,
- size_t *actual_out_nbytes_ret);
-
-/*
- * Like libdeflate_deflate_decompress(), but assumes the gzip wrapper format
- * instead of raw DEFLATE.
- *
- * If multiple gzip-compressed members are concatenated, then only the first
- * will be decompressed. Use libdeflate_gzip_decompress_ex() if you need
- * multi-member support.
- */
-LIBDEFLATEEXPORT enum libdeflate_result LIBDEFLATEAPI
-libdeflate_gzip_decompress(struct libdeflate_decompressor *decompressor,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail,
- size_t *actual_out_nbytes_ret);
-
-/*
- * Like libdeflate_gzip_decompress(), but adds the 'actual_in_nbytes_ret'
- * argument. If 'actual_in_nbytes_ret' is not NULL and the decompression
- * succeeds (indicating that the first gzip-compressed member in the input
- * buffer was decompressed), then the actual number of input bytes consumed is
- * written to *actual_in_nbytes_ret.
- */
-LIBDEFLATEEXPORT enum libdeflate_result LIBDEFLATEAPI
-libdeflate_gzip_decompress_ex(struct libdeflate_decompressor *decompressor,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail,
- size_t *actual_in_nbytes_ret,
- size_t *actual_out_nbytes_ret);
-
-/*
- * libdeflate_free_decompressor() frees a decompressor that was allocated with
- * libdeflate_alloc_decompressor(). If a NULL pointer is passed in, no action
- * is taken.
- */
-LIBDEFLATEEXPORT void LIBDEFLATEAPI
-libdeflate_free_decompressor(struct libdeflate_decompressor *decompressor);
-
-/* ========================================================================== */
-/* Checksums */
-/* ========================================================================== */
-
-/*
- * libdeflate_adler32() updates a running Adler-32 checksum with 'len' bytes of
- * data and returns the updated checksum. When starting a new checksum, the
- * required initial value for 'adler' is 1. This value is also returned when
- * 'buffer' is specified as NULL.
- */
-LIBDEFLATEEXPORT uint32_t LIBDEFLATEAPI
-libdeflate_adler32(uint32_t adler, const void *buffer, size_t len);
-
-
-/*
- * libdeflate_crc32() updates a running CRC-32 checksum with 'len' bytes of data
- * and returns the updated checksum. When starting a new checksum, the required
- * initial value for 'crc' is 0. This value is also returned when 'buffer' is
- * specified as NULL.
- */
-LIBDEFLATEEXPORT uint32_t LIBDEFLATEAPI
-libdeflate_crc32(uint32_t crc, const void *buffer, size_t len);
-
-/* ========================================================================== */
-/* Custom memory allocator */
-/* ========================================================================== */
-
-/*
- * Install a custom memory allocator which libdeflate will use for all memory
- * allocations. 'malloc_func' is a function that must behave like malloc(), and
- * 'free_func' is a function that must behave like free().
- *
- * There must not be any libdeflate_compressor or libdeflate_decompressor
- * structures in existence when calling this function.
- */
-LIBDEFLATEEXPORT void LIBDEFLATEAPI
-libdeflate_set_memory_allocator(void *(*malloc_func)(size_t),
- void (*free_func)(void *));
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* LIBDEFLATE_H */
diff --git a/util/compress/libdeflate/programs/benchmark.c b/util/compress/libdeflate/programs/benchmark.c
deleted file mode 100644
index 52af8dafc..000000000
--- a/util/compress/libdeflate/programs/benchmark.c
+++ /dev/null
@@ -1,696 +0,0 @@
-/*
- * benchmark.c - a compression testing and benchmark program
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "test_util.h"
-
-static const tchar *const optstring = T("0::1::2::3::4::5::6::7::8::9::C:D:eghs:VYZz");
-
-enum format {
- DEFLATE_FORMAT,
- ZLIB_FORMAT,
- GZIP_FORMAT,
-};
-
-struct compressor {
- int level;
- enum format format;
- const struct engine *engine;
- void *private;
-};
-
-struct decompressor {
- enum format format;
- const struct engine *engine;
- void *private;
-};
-
-struct engine {
- const tchar *name;
-
- bool (*init_compressor)(struct compressor *);
- size_t (*compress_bound)(struct compressor *, size_t);
- size_t (*compress)(struct compressor *, const void *, size_t,
- void *, size_t);
- void (*destroy_compressor)(struct compressor *);
-
- bool (*init_decompressor)(struct decompressor *);
- bool (*decompress)(struct decompressor *, const void *, size_t,
- void *, size_t);
- void (*destroy_decompressor)(struct decompressor *);
-};
-
-/******************************************************************************/
-
-static bool
-libdeflate_engine_init_compressor(struct compressor *c)
-{
- c->private = alloc_compressor(c->level);
- return c->private != NULL;
-}
-
-static size_t
-libdeflate_engine_compress_bound(struct compressor *c, size_t in_nbytes)
-{
- switch (c->format) {
- case ZLIB_FORMAT:
- return libdeflate_zlib_compress_bound(c->private, in_nbytes);
- case GZIP_FORMAT:
- return libdeflate_gzip_compress_bound(c->private, in_nbytes);
- default:
- return libdeflate_deflate_compress_bound(c->private, in_nbytes);
- }
-}
-
-static size_t
-libdeflate_engine_compress(struct compressor *c, const void *in,
- size_t in_nbytes, void *out, size_t out_nbytes_avail)
-{
- switch (c->format) {
- case ZLIB_FORMAT:
- return libdeflate_zlib_compress(c->private, in, in_nbytes,
- out, out_nbytes_avail);
- case GZIP_FORMAT:
- return libdeflate_gzip_compress(c->private, in, in_nbytes,
- out, out_nbytes_avail);
- default:
- return libdeflate_deflate_compress(c->private, in, in_nbytes,
- out, out_nbytes_avail);
- }
-}
-
-static void
-libdeflate_engine_destroy_compressor(struct compressor *c)
-{
- libdeflate_free_compressor(c->private);
-}
-
-static bool
-libdeflate_engine_init_decompressor(struct decompressor *d)
-{
- d->private = alloc_decompressor();
- return d->private != NULL;
-}
-
-static bool
-libdeflate_engine_decompress(struct decompressor *d, const void *in,
- size_t in_nbytes, void *out, size_t out_nbytes)
-{
- switch (d->format) {
- case ZLIB_FORMAT:
- return !libdeflate_zlib_decompress(d->private, in, in_nbytes,
- out, out_nbytes, NULL);
- case GZIP_FORMAT:
- return !libdeflate_gzip_decompress(d->private, in, in_nbytes,
- out, out_nbytes, NULL);
- default:
- return !libdeflate_deflate_decompress(d->private, in, in_nbytes,
- out, out_nbytes, NULL);
- }
-}
-
-static void
-libdeflate_engine_destroy_decompressor(struct decompressor *d)
-{
- libdeflate_free_decompressor(d->private);
-}
-
-static const struct engine libdeflate_engine = {
- .name = T("libdeflate"),
-
- .init_compressor = libdeflate_engine_init_compressor,
- .compress_bound = libdeflate_engine_compress_bound,
- .compress = libdeflate_engine_compress,
- .destroy_compressor = libdeflate_engine_destroy_compressor,
-
- .init_decompressor = libdeflate_engine_init_decompressor,
- .decompress = libdeflate_engine_decompress,
- .destroy_decompressor = libdeflate_engine_destroy_decompressor,
-};
-
-/******************************************************************************/
-
-static int
-get_libz_window_bits(enum format format)
-{
- const int windowBits = 15;
- switch (format) {
- case ZLIB_FORMAT:
- return windowBits;
- case GZIP_FORMAT:
- return windowBits + 16;
- default:
- return -windowBits;
- }
-}
-
-static bool
-libz_engine_init_compressor(struct compressor *c)
-{
- z_stream *z;
-
- if (c->level > 9) {
- msg("libz only supports up to compression level 9");
- return false;
- }
-
- z = xmalloc(sizeof(*z));
- if (z == NULL)
- return false;
-
- z->next_in = NULL;
- z->avail_in = 0;
- z->zalloc = NULL;
- z->zfree = NULL;
- z->opaque = NULL;
- if (deflateInit2(z, c->level, Z_DEFLATED,
- get_libz_window_bits(c->format),
- 8, Z_DEFAULT_STRATEGY) != Z_OK)
- {
- msg("unable to initialize deflater");
- free(z);
- return false;
- }
-
- c->private = z;
- return true;
-}
-
-static size_t
-libz_engine_compress_bound(struct compressor *c, size_t in_nbytes)
-{
- return deflateBound(c->private, in_nbytes);
-}
-
-static size_t
-libz_engine_compress(struct compressor *c, const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail)
-{
- z_stream *z = c->private;
-
- deflateReset(z);
-
- z->next_in = (void *)in;
- z->avail_in = in_nbytes;
- z->next_out = out;
- z->avail_out = out_nbytes_avail;
-
- if (deflate(z, Z_FINISH) != Z_STREAM_END)
- return 0;
-
- return out_nbytes_avail - z->avail_out;
-}
-
-static void
-libz_engine_destroy_compressor(struct compressor *c)
-{
- z_stream *z = c->private;
-
- deflateEnd(z);
- free(z);
-}
-
-static bool
-libz_engine_init_decompressor(struct decompressor *d)
-{
- z_stream *z;
-
- z = xmalloc(sizeof(*z));
- if (z == NULL)
- return false;
-
- z->next_in = NULL;
- z->avail_in = 0;
- z->zalloc = NULL;
- z->zfree = NULL;
- z->opaque = NULL;
- if (inflateInit2(z, get_libz_window_bits(d->format)) != Z_OK) {
- msg("unable to initialize inflater");
- free(z);
- return false;
- }
-
- d->private = z;
- return true;
-}
-
-static bool
-libz_engine_decompress(struct decompressor *d, const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes)
-{
- z_stream *z = d->private;
-
- inflateReset(z);
-
- z->next_in = (void *)in;
- z->avail_in = in_nbytes;
- z->next_out = out;
- z->avail_out = out_nbytes;
-
- return inflate(z, Z_FINISH) == Z_STREAM_END && z->avail_out == 0;
-}
-
-static void
-libz_engine_destroy_decompressor(struct decompressor *d)
-{
- z_stream *z = d->private;
-
- inflateEnd(z);
- free(z);
-}
-
-static const struct engine libz_engine = {
- .name = T("libz"),
-
- .init_compressor = libz_engine_init_compressor,
- .compress_bound = libz_engine_compress_bound,
- .compress = libz_engine_compress,
- .destroy_compressor = libz_engine_destroy_compressor,
-
- .init_decompressor = libz_engine_init_decompressor,
- .decompress = libz_engine_decompress,
- .destroy_decompressor = libz_engine_destroy_decompressor,
-};
-
-/******************************************************************************/
-
-static const struct engine * const all_engines[] = {
- &libdeflate_engine,
- &libz_engine,
-};
-
-#define DEFAULT_ENGINE libdeflate_engine
-
-static const struct engine *
-name_to_engine(const tchar *name)
-{
- size_t i;
-
- for (i = 0; i < ARRAY_LEN(all_engines); i++)
- if (tstrcmp(all_engines[i]->name, name) == 0)
- return all_engines[i];
- return NULL;
-}
-
-/******************************************************************************/
-
-static bool
-compressor_init(struct compressor *c, int level, enum format format,
- const struct engine *engine)
-{
- c->level = level;
- c->format = format;
- c->engine = engine;
- return engine->init_compressor(c);
-}
-
-static size_t
-compress_bound(struct compressor *c, size_t in_nbytes)
-{
- return c->engine->compress_bound(c, in_nbytes);
-}
-
-static size_t
-do_compress(struct compressor *c, const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail)
-{
- return c->engine->compress(c, in, in_nbytes, out, out_nbytes_avail);
-}
-
-static void
-compressor_destroy(struct compressor *c)
-{
- if (c->engine != NULL)
- c->engine->destroy_compressor(c);
-}
-
-static bool
-decompressor_init(struct decompressor *d, enum format format,
- const struct engine *engine)
-{
- d->format = format;
- d->engine = engine;
- return engine->init_decompressor(d);
-}
-
-static bool
-do_decompress(struct decompressor *d, const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes)
-{
- return d->engine->decompress(d, in, in_nbytes, out, out_nbytes);
-}
-
-static void
-decompressor_destroy(struct decompressor *d)
-{
- if (d->engine != NULL)
- d->engine->destroy_decompressor(d);
-}
-
-/******************************************************************************/
-
-static void
-show_available_engines(FILE *fp)
-{
- size_t i;
-
- fprintf(fp, "Available ENGINEs are: ");
- for (i = 0; i < ARRAY_LEN(all_engines); i++) {
- fprintf(fp, "%"TS, all_engines[i]->name);
- if (i < ARRAY_LEN(all_engines) - 1)
- fprintf(fp, ", ");
- }
- fprintf(fp, ". Default is %"TS"\n", DEFAULT_ENGINE.name);
-}
-
-static void
-show_usage(FILE *fp)
-{
- fprintf(fp,
-"Usage: %"TS" [-LVL] [-C ENGINE] [-D ENGINE] [-ghVz] [-s SIZE] [FILE]...\n"
-"Benchmark DEFLATE compression and decompression on the specified FILEs.\n"
-"\n"
-"Options:\n"
-" -0 no compression\n"
-" -1 fastest (worst) compression\n"
-" -6 medium compression (default)\n"
-" -12 slowest (best) compression\n"
-" -C ENGINE compression engine\n"
-" -D ENGINE decompression engine\n"
-" -e allow chunks to be expanded (implied by -0)\n"
-" -g use gzip format instead of raw DEFLATE\n"
-" -h print this help\n"
-" -s SIZE chunk size\n"
-" -V show version and legal information\n"
-" -z use zlib format instead of raw DEFLATE\n"
-"\n", prog_invocation_name);
-
- show_available_engines(fp);
-}
-
-static void
-show_version(void)
-{
- printf(
-"libdeflate compression benchmark program v" LIBDEFLATE_VERSION_STRING "\n"
-"Copyright 2016 Eric Biggers\n"
-"\n"
-"This program is free software which may be modified and/or redistributed\n"
-"under the terms of the MIT license. There is NO WARRANTY, to the extent\n"
-"permitted by law. See the COPYING file for details.\n"
- );
-}
-
-
-/******************************************************************************/
-
-static int
-do_benchmark(struct file_stream *in, void *original_buf, void *compressed_buf,
- void *decompressed_buf, u32 chunk_size,
- bool allow_expansion, size_t compressed_buf_size,
- struct compressor *compressor,
- struct decompressor *decompressor)
-{
- u64 total_uncompressed_size = 0;
- u64 total_compressed_size = 0;
- u64 total_compress_time = 0;
- u64 total_decompress_time = 0;
- ssize_t ret;
-
- while ((ret = xread(in, original_buf, chunk_size)) > 0) {
- u32 original_size = ret;
- size_t out_nbytes_avail;
- u32 compressed_size;
- u64 start_time;
- bool ok;
-
- total_uncompressed_size += original_size;
-
- if (allow_expansion) {
- out_nbytes_avail = compress_bound(compressor,
- original_size);
- if (out_nbytes_avail > compressed_buf_size) {
- msg("%"TS": bug in compress_bound()", in->name);
- return -1;
- }
- } else {
- out_nbytes_avail = original_size - 1;
- }
-
- /* Compress the chunk of data. */
- start_time = timer_ticks();
- compressed_size = do_compress(compressor,
- original_buf,
- original_size,
- compressed_buf,
- out_nbytes_avail);
- total_compress_time += timer_ticks() - start_time;
-
- if (compressed_size) {
- /* Successfully compressed the chunk of data. */
-
- /* Decompress the data we just compressed and compare
- * the result with the original. */
- start_time = timer_ticks();
- ok = do_decompress(decompressor,
- compressed_buf, compressed_size,
- decompressed_buf, original_size);
- total_decompress_time += timer_ticks() - start_time;
-
- if (!ok) {
- msg("%"TS": failed to decompress data",
- in->name);
- return -1;
- }
-
- if (memcmp(original_buf, decompressed_buf,
- original_size) != 0)
- {
- msg("%"TS": data did not decompress to "
- "original", in->name);
- return -1;
- }
-
- total_compressed_size += compressed_size;
- } else {
- /*
- * The chunk would have compressed to more than
- * out_nbytes_avail bytes.
- */
- if (allow_expansion) {
- msg("%"TS": bug in compress_bound()", in->name);
- return -1;
- }
- total_compressed_size += original_size;
- }
- }
-
- if (ret < 0)
- return ret;
-
- if (total_uncompressed_size == 0) {
- printf("\tFile was empty.\n");
- return 0;
- }
-
- if (total_compress_time == 0)
- total_compress_time = 1;
- if (total_decompress_time == 0)
- total_decompress_time = 1;
-
- printf("\tCompressed %"PRIu64 " => %"PRIu64" bytes (%u.%03u%%)\n",
- total_uncompressed_size, total_compressed_size,
- (unsigned int)(total_compressed_size * 100 /
- total_uncompressed_size),
- (unsigned int)(total_compressed_size * 100000 /
- total_uncompressed_size % 1000));
- printf("\tCompression time: %"PRIu64" ms (%"PRIu64" MB/s)\n",
- timer_ticks_to_ms(total_compress_time),
- timer_MB_per_s(total_uncompressed_size, total_compress_time));
- printf("\tDecompression time: %"PRIu64" ms (%"PRIu64" MB/s)\n",
- timer_ticks_to_ms(total_decompress_time),
- timer_MB_per_s(total_uncompressed_size, total_decompress_time));
-
- return 0;
-}
-
-int
-tmain(int argc, tchar *argv[])
-{
- u32 chunk_size = 1048576;
- int level = 6;
- enum format format = DEFLATE_FORMAT;
- const struct engine *compress_engine = &DEFAULT_ENGINE;
- const struct engine *decompress_engine = &DEFAULT_ENGINE;
- bool allow_expansion = false;
- struct compressor compressor = { 0 };
- struct decompressor decompressor = { 0 };
- size_t compressed_buf_size;
- void *original_buf = NULL;
- void *compressed_buf = NULL;
- void *decompressed_buf = NULL;
- tchar *default_file_list[] = { NULL };
- int opt_char;
- int i;
- int ret;
-
- begin_program(argv);
-
- while ((opt_char = tgetopt(argc, argv, optstring)) != -1) {
- switch (opt_char) {
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- level = parse_compression_level(opt_char, toptarg);
- if (level < 0)
- return 1;
- break;
- case 'C':
- compress_engine = name_to_engine(toptarg);
- if (compress_engine == NULL) {
- msg("invalid compression engine: \"%"TS"\"", toptarg);
- show_available_engines(stderr);
- return 1;
- }
- break;
- case 'D':
- decompress_engine = name_to_engine(toptarg);
- if (decompress_engine == NULL) {
- msg("invalid decompression engine: \"%"TS"\"", toptarg);
- show_available_engines(stderr);
- return 1;
- }
- break;
- case 'e':
- allow_expansion = true;
- break;
- case 'g':
- format = GZIP_FORMAT;
- break;
- case 'h':
- show_usage(stdout);
- return 0;
- case 's':
- chunk_size = tstrtoul(toptarg, NULL, 10);
- if (chunk_size == 0) {
- msg("invalid chunk size: \"%"TS"\"", toptarg);
- return 1;
- }
- break;
- case 'V':
- show_version();
- return 0;
- case 'Y': /* deprecated, use '-C libz' instead */
- compress_engine = &libz_engine;
- break;
- case 'Z': /* deprecated, use '-D libz' instead */
- decompress_engine = &libz_engine;
- break;
- case 'z':
- format = ZLIB_FORMAT;
- break;
- default:
- show_usage(stderr);
- return 1;
- }
- }
-
- argc -= toptind;
- argv += toptind;
-
- if (level == 0)
- allow_expansion = true;
-
- ret = -1;
- if (!compressor_init(&compressor, level, format, compress_engine))
- goto out;
- if (!decompressor_init(&decompressor, format, decompress_engine))
- goto out;
-
- if (allow_expansion)
- compressed_buf_size = compress_bound(&compressor, chunk_size);
- else
- compressed_buf_size = chunk_size - 1;
-
- original_buf = xmalloc(chunk_size);
- compressed_buf = xmalloc(compressed_buf_size);
- decompressed_buf = xmalloc(chunk_size);
-
- ret = -1;
- if (original_buf == NULL || compressed_buf == NULL ||
- decompressed_buf == NULL)
- goto out;
-
- if (argc == 0) {
- argv = default_file_list;
- argc = ARRAY_LEN(default_file_list);
- } else {
- for (i = 0; i < argc; i++)
- if (argv[i][0] == '-' && argv[i][1] == '\0')
- argv[i] = NULL;
- }
-
- printf("Benchmarking %s compression:\n",
- format == DEFLATE_FORMAT ? "DEFLATE" :
- format == ZLIB_FORMAT ? "zlib" : "gzip");
- printf("\tCompression level: %d\n", level);
- printf("\tChunk size: %"PRIu32"\n", chunk_size);
- printf("\tCompression engine: %"TS"\n", compress_engine->name);
- printf("\tDecompression engine: %"TS"\n", decompress_engine->name);
-
- for (i = 0; i < argc; i++) {
- struct file_stream in;
-
- ret = xopen_for_read(argv[i], true, &in);
- if (ret != 0)
- goto out;
-
- printf("Processing %"TS"...\n", in.name);
-
- ret = do_benchmark(&in, original_buf, compressed_buf,
- decompressed_buf, chunk_size,
- allow_expansion, compressed_buf_size,
- &compressor, &decompressor);
- xclose(&in);
- if (ret != 0)
- goto out;
- }
- ret = 0;
-out:
- free(decompressed_buf);
- free(compressed_buf);
- free(original_buf);
- decompressor_destroy(&decompressor);
- compressor_destroy(&compressor);
- return -ret;
-}
diff --git a/util/compress/libdeflate/programs/checksum.c b/util/compress/libdeflate/programs/checksum.c
deleted file mode 100644
index 0d394723b..000000000
--- a/util/compress/libdeflate/programs/checksum.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * checksum.c - Adler-32 and CRC-32 checksumming program
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "test_util.h"
-
-static const tchar *const optstring = T("Ahs:tZ");
-
-static void
-show_usage(FILE *fp)
-{
- fprintf(fp,
-"Usage: %"TS" [-A] [-h] [-s SIZE] [-t] [-Z] [FILE]...\n"
-"Calculate Adler-32 or CRC-32 checksums of the specified FILEs.\n"
-"\n"
-"Options:\n"
-" -A use Adler-32 (default is CRC-32)\n"
-" -h print this help\n"
-" -s SIZE chunk size\n"
-" -t show checksum speed, excluding I/O\n"
-" -Z use zlib implementation instead of libdeflate\n",
- prog_invocation_name);
-}
-
-typedef u32 (*cksum_fn_t)(u32, const void *, size_t);
-
-static u32
-adler32_libdeflate(u32 adler, const void *buf, size_t len)
-{
- return libdeflate_adler32(adler, buf, len);
-}
-
-static u32
-crc32_libdeflate(u32 crc, const void *buf, size_t len)
-{
- return libdeflate_crc32(crc, buf, len);
-}
-
-static u32
-adler32_zlib(u32 adler, const void *buf, size_t len)
-{
- return adler32(adler, buf, len);
-}
-
-static u32
-crc32_zlib(u32 crc, const void *buf, size_t len)
-{
- return crc32(crc, buf, len);
-}
-
-static int
-checksum_stream(struct file_stream *in, cksum_fn_t cksum, u32 *sum,
- void *buf, size_t bufsize, u64 *size_ret, u64 *elapsed_ret)
-{
- u64 size = 0;
- u64 elapsed = 0;
-
- for (;;) {
- ssize_t ret;
- u64 start_time;
-
- ret = xread(in, buf, bufsize);
- if (ret < 0)
- return ret;
- if (ret == 0)
- break;
-
- size += ret;
- start_time = timer_ticks();
- *sum = cksum(*sum, buf, ret);
- elapsed += timer_ticks() - start_time;
- }
-
- if (elapsed == 0)
- elapsed = 1;
- *size_ret = size;
- *elapsed_ret = elapsed;
- return 0;
-}
-
-int
-tmain(int argc, tchar *argv[])
-{
- bool use_adler32 = false;
- bool use_zlib_impl = false;
- bool do_timing = false;
- void *buf;
- size_t bufsize = 131072;
- tchar *default_file_list[] = { NULL };
- cksum_fn_t cksum;
- int opt_char;
- int i;
- int ret;
-
- begin_program(argv);
-
- while ((opt_char = tgetopt(argc, argv, optstring)) != -1) {
- switch (opt_char) {
- case 'A':
- use_adler32 = true;
- break;
- case 'h':
- show_usage(stdout);
- return 0;
- case 's':
- bufsize = tstrtoul(toptarg, NULL, 10);
- if (bufsize == 0) {
- msg("invalid chunk size: \"%"TS"\"", toptarg);
- return 1;
- }
- break;
- case 't':
- do_timing = true;
- break;
- case 'Z':
- use_zlib_impl = true;
- break;
- default:
- show_usage(stderr);
- return 1;
- }
- }
-
- argc -= toptind;
- argv += toptind;
-
- if (use_adler32) {
- if (use_zlib_impl)
- cksum = adler32_zlib;
- else
- cksum = adler32_libdeflate;
- } else {
- if (use_zlib_impl)
- cksum = crc32_zlib;
- else
- cksum = crc32_libdeflate;
- }
-
- buf = xmalloc(bufsize);
- if (buf == NULL)
- return 1;
-
- if (argc == 0) {
- argv = default_file_list;
- argc = ARRAY_LEN(default_file_list);
- } else {
- for (i = 0; i < argc; i++)
- if (argv[i][0] == '-' && argv[i][1] == '\0')
- argv[i] = NULL;
- }
-
- for (i = 0; i < argc; i++) {
- struct file_stream in;
- u32 sum = cksum(0, NULL, 0);
- u64 size = 0;
- u64 elapsed = 0;
-
- ret = xopen_for_read(argv[i], true, &in);
- if (ret != 0)
- goto out;
-
- ret = checksum_stream(&in, cksum, &sum, buf, bufsize,
- &size, &elapsed);
- if (ret == 0) {
- if (do_timing) {
- printf("%08"PRIx32"\t%"TS"\t"
- "%"PRIu64" ms\t%"PRIu64" MB/s\n",
- sum, in.name, timer_ticks_to_ms(elapsed),
- timer_MB_per_s(size, elapsed));
- } else {
- printf("%08"PRIx32"\t%"TS"\t\n", sum, in.name);
- }
- }
-
- xclose(&in);
-
- if (ret != 0)
- goto out;
- }
- ret = 0;
-out:
- free(buf);
- return -ret;
-}
diff --git a/util/compress/libdeflate/programs/gzip.c b/util/compress/libdeflate/programs/gzip.c
deleted file mode 100644
index a1d6d2a0d..000000000
--- a/util/compress/libdeflate/programs/gzip.c
+++ /dev/null
@@ -1,658 +0,0 @@
-/*
- * gzip.c - a file compression and decompression program
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "prog_util.h"
-
-#include <errno.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#ifdef _WIN32
-# include <sys/utime.h>
-#else
-# include <sys/time.h>
-# include <unistd.h>
-# include <utime.h>
-#endif
-
-struct options {
- bool to_stdout;
- bool decompress;
- bool force;
- bool keep;
- bool test;
- int compression_level;
- const tchar *suffix;
-};
-
-static const tchar *const optstring = T("1::2::3::4::5::6::7::8::9::cdfhknS:tV");
-
-static void
-show_usage(FILE *fp)
-{
- fprintf(fp,
-"Usage: %"TS" [-LEVEL] [-cdfhkV] [-S SUF] FILE...\n"
-"Compress or decompress the specified FILEs.\n"
-"\n"
-"Options:\n"
-" -1 fastest (worst) compression\n"
-" -6 medium compression (default)\n"
-" -12 slowest (best) compression\n"
-" -c write to standard output\n"
-" -d decompress\n"
-" -f overwrite existing output files\n"
-" -h print this help\n"
-" -k don't delete input files\n"
-" -S SUF use suffix SUF instead of .gz\n"
-" -t test file integrity\n"
-" -V show version and legal information\n",
- prog_invocation_name);
-}
-
-static void
-show_version(void)
-{
- printf(
-"gzip compression program v" LIBDEFLATE_VERSION_STRING "\n"
-"Copyright 2016 Eric Biggers\n"
-"\n"
-"This program is free software which may be modified and/or redistributed\n"
-"under the terms of the MIT license. There is NO WARRANTY, to the extent\n"
-"permitted by law. See the COPYING file for details.\n"
- );
-}
-
-/* Was the program invoked in decompression mode? */
-static bool
-is_gunzip(void)
-{
- if (tstrxcmp(prog_invocation_name, T("gunzip")) == 0)
- return true;
- if (tstrxcmp(prog_invocation_name, T("libdeflate-gunzip")) == 0)
- return true;
-#ifdef _WIN32
- if (tstrxcmp(prog_invocation_name, T("gunzip.exe")) == 0)
- return true;
- if (tstrxcmp(prog_invocation_name, T("libdeflate-gunzip.exe")) == 0)
- return true;
-#endif
- return false;
-}
-
-static const tchar *
-get_suffix(const tchar *path, const tchar *suffix)
-{
- size_t path_len = tstrlen(path);
- size_t suffix_len = tstrlen(suffix);
- const tchar *p;
-
- if (path_len <= suffix_len)
- return NULL;
- p = &path[path_len - suffix_len];
- if (tstrxcmp(p, suffix) == 0)
- return p;
- return NULL;
-}
-
-static bool
-has_suffix(const tchar *path, const tchar *suffix)
-{
- return get_suffix(path, suffix) != NULL;
-}
-
-static tchar *
-append_suffix(const tchar *path, const tchar *suffix)
-{
- size_t path_len = tstrlen(path);
- size_t suffix_len = tstrlen(suffix);
- tchar *suffixed_path;
-
- suffixed_path = xmalloc((path_len + suffix_len + 1) * sizeof(tchar));
- if (suffixed_path == NULL)
- return NULL;
- tmemcpy(suffixed_path, path, path_len);
- tmemcpy(&suffixed_path[path_len], suffix, suffix_len + 1);
- return suffixed_path;
-}
-
-static int
-do_compress(struct libdeflate_compressor *compressor,
- struct file_stream *in, struct file_stream *out)
-{
- const void *uncompressed_data = in->mmap_mem;
- size_t uncompressed_size = in->mmap_size;
- void *compressed_data;
- size_t actual_compressed_size;
- size_t max_compressed_size;
- int ret;
-
- max_compressed_size = libdeflate_gzip_compress_bound(compressor,
- uncompressed_size);
- compressed_data = xmalloc(max_compressed_size);
- if (compressed_data == NULL) {
- msg("%"TS": file is probably too large to be processed by this "
- "program", in->name);
- ret = -1;
- goto out;
- }
-
- actual_compressed_size = libdeflate_gzip_compress(compressor,
- uncompressed_data,
- uncompressed_size,
- compressed_data,
- max_compressed_size);
- if (actual_compressed_size == 0) {
- msg("Bug in libdeflate_gzip_compress_bound()!");
- ret = -1;
- goto out;
- }
-
- ret = full_write(out, compressed_data, actual_compressed_size);
-out:
- free(compressed_data);
- return ret;
-}
-
-static u32
-load_u32_gzip(const u8 *p)
-{
- return ((u32)p[0] << 0) | ((u32)p[1] << 8) |
- ((u32)p[2] << 16) | ((u32)p[3] << 24);
-}
-
-static int
-do_decompress(struct libdeflate_decompressor *decompressor,
- struct file_stream *in, struct file_stream *out,
- const struct options *options)
-{
- const u8 *compressed_data = in->mmap_mem;
- size_t compressed_size = in->mmap_size;
- void *uncompressed_data = NULL;
- size_t uncompressed_size;
- size_t actual_in_nbytes;
- size_t actual_out_nbytes;
- enum libdeflate_result result;
- int ret = 0;
-
- if (compressed_size < sizeof(u32)) {
- msg("%"TS": not in gzip format", in->name);
- ret = -1;
- goto out;
- }
-
- /*
- * Use the ISIZE field as a hint for the decompressed data size. It may
- * need to be increased later, however, because the file may contain
- * multiple gzip members and the particular ISIZE we happen to use may
- * not be the largest; or the real size may be >= 4 GiB, causing ISIZE
- * to overflow. In any case, make sure to allocate at least one byte.
- */
- uncompressed_size = load_u32_gzip(&compressed_data[compressed_size - 4]);
- if (uncompressed_size == 0)
- uncompressed_size = 1;
-
- do {
- if (uncompressed_data == NULL) {
- uncompressed_data = xmalloc(uncompressed_size);
- if (uncompressed_data == NULL) {
- msg("%"TS": file is probably too large to be "
- "processed by this program", in->name);
- ret = -1;
- goto out;
- }
- }
-
- result = libdeflate_gzip_decompress_ex(decompressor,
- compressed_data,
- compressed_size,
- uncompressed_data,
- uncompressed_size,
- &actual_in_nbytes,
- &actual_out_nbytes);
-
- if (result == LIBDEFLATE_INSUFFICIENT_SPACE) {
- if (uncompressed_size * 2 <= uncompressed_size) {
- msg("%"TS": file corrupt or too large to be "
- "processed by this program", in->name);
- ret = -1;
- goto out;
- }
- uncompressed_size *= 2;
- free(uncompressed_data);
- uncompressed_data = NULL;
- continue;
- }
-
- if (result != LIBDEFLATE_SUCCESS) {
- msg("%"TS": file corrupt or not in gzip format",
- in->name);
- ret = -1;
- goto out;
- }
-
- if (actual_in_nbytes == 0 ||
- actual_in_nbytes > compressed_size ||
- actual_out_nbytes > uncompressed_size) {
- msg("Bug in libdeflate_gzip_decompress_ex()!");
- ret = -1;
- goto out;
- }
-
- if (!options->test) {
- ret = full_write(out, uncompressed_data, actual_out_nbytes);
- if (ret != 0)
- goto out;
- }
-
- compressed_data += actual_in_nbytes;
- compressed_size -= actual_in_nbytes;
-
- } while (compressed_size != 0);
-out:
- free(uncompressed_data);
- return ret;
-}
-
-static int
-stat_file(struct file_stream *in, stat_t *stbuf, bool allow_hard_links)
-{
- if (tfstat(in->fd, stbuf) != 0) {
- msg("%"TS": unable to stat file", in->name);
- return -1;
- }
-
- if (!S_ISREG(stbuf->st_mode) && !in->is_standard_stream) {
- msg("%"TS" is %s -- skipping",
- in->name, S_ISDIR(stbuf->st_mode) ? "a directory" :
- "not a regular file");
- return -2;
- }
-
- if (stbuf->st_nlink > 1 && !allow_hard_links) {
- msg("%"TS" has multiple hard links -- skipping "
- "(use -f to process anyway)", in->name);
- return -2;
- }
-
- return 0;
-}
-
-static void
-restore_mode(struct file_stream *out, const stat_t *stbuf)
-{
-#ifndef _WIN32
- if (fchmod(out->fd, stbuf->st_mode) != 0)
- msg_errno("%"TS": unable to preserve mode", out->name);
-#endif
-}
-
-static void
-restore_owner_and_group(struct file_stream *out, const stat_t *stbuf)
-{
-#ifndef _WIN32
- if (fchown(out->fd, stbuf->st_uid, stbuf->st_gid) != 0) {
- msg_errno("%"TS": unable to preserve owner and group",
- out->name);
- }
-#endif
-}
-
-static void
-restore_timestamps(struct file_stream *out, const tchar *newpath,
- const stat_t *stbuf)
-{
- int ret;
-#if defined(HAVE_FUTIMENS) && defined(HAVE_STAT_NANOSECOND_PRECISION)
- struct timespec times[2] = {
- stbuf->st_atim, stbuf->st_mtim,
- };
- ret = futimens(out->fd, times);
-#elif defined(HAVE_FUTIMES) && defined(HAVE_STAT_NANOSECOND_PRECISION)
- struct timeval times[2] = {
- { stbuf->st_atim.tv_sec, stbuf->st_atim.tv_nsec / 1000, },
- { stbuf->st_mtim.tv_sec, stbuf->st_mtim.tv_nsec / 1000, },
- };
- ret = futimes(out->fd, times);
-#else
- struct tutimbuf times = {
- stbuf->st_atime, stbuf->st_mtime,
- };
- ret = tutime(newpath, &times);
-#endif
- if (ret != 0)
- msg_errno("%"TS": unable to preserve timestamps", out->name);
-}
-
-static void
-restore_metadata(struct file_stream *out, const tchar *newpath,
- const stat_t *stbuf)
-{
- restore_mode(out, stbuf);
- restore_owner_and_group(out, stbuf);
- restore_timestamps(out, newpath, stbuf);
-}
-
-static int
-decompress_file(struct libdeflate_decompressor *decompressor, const tchar *path,
- const struct options *options)
-{
- tchar *oldpath = (tchar *)path;
- tchar *newpath = NULL;
- struct file_stream in;
- struct file_stream out;
- stat_t stbuf;
- int ret;
- int ret2;
-
- if (path != NULL) {
- const tchar *suffix = get_suffix(path, options->suffix);
- if (suffix == NULL) {
- /*
- * Input file is unsuffixed. If the file doesn't exist,
- * then try it suffixed. Otherwise, if we're not
- * writing to stdout, skip the file with warning status.
- * Otherwise, go ahead and try to open the file anyway
- * (which will very likely fail).
- */
- if (tstat(path, &stbuf) != 0 && errno == ENOENT) {
- oldpath = append_suffix(path, options->suffix);
- if (oldpath == NULL)
- return -1;
- if (!options->to_stdout)
- newpath = (tchar *)path;
- } else if (!options->to_stdout) {
- msg("\"%"TS"\" does not end with the %"TS" "
- "suffix -- skipping",
- path, options->suffix);
- return -2;
- }
- } else if (!options->to_stdout) {
- /*
- * Input file is suffixed, and we're not writing to
- * stdout. Strip the suffix to get the path to the
- * output file.
- */
- newpath = xmalloc((suffix - oldpath + 1) *
- sizeof(tchar));
- if (newpath == NULL)
- return -1;
- tmemcpy(newpath, oldpath, suffix - oldpath);
- newpath[suffix - oldpath] = '\0';
- }
- }
-
- ret = xopen_for_read(oldpath, options->force || options->to_stdout,
- &in);
- if (ret != 0)
- goto out_free_paths;
-
- if (!options->force && isatty(in.fd)) {
- msg("Refusing to read compressed data from terminal. "
- "Use -f to override.\nFor help, use -h.");
- ret = -1;
- goto out_close_in;
- }
-
- ret = stat_file(&in, &stbuf, options->force || options->keep ||
- oldpath == NULL || newpath == NULL);
- if (ret != 0)
- goto out_close_in;
-
- ret = xopen_for_write(newpath, options->force, &out);
- if (ret != 0)
- goto out_close_in;
-
- /* TODO: need a streaming-friendly solution */
- ret = map_file_contents(&in, stbuf.st_size);
- if (ret != 0)
- goto out_close_out;
-
- ret = do_decompress(decompressor, &in, &out, options);
- if (ret != 0)
- goto out_close_out;
-
- if (oldpath != NULL && newpath != NULL)
- restore_metadata(&out, newpath, &stbuf);
- ret = 0;
-out_close_out:
- ret2 = xclose(&out);
- if (ret == 0)
- ret = ret2;
- if (ret != 0 && newpath != NULL)
- tunlink(newpath);
-out_close_in:
- xclose(&in);
- if (ret == 0 && oldpath != NULL && newpath != NULL && !options->keep)
- tunlink(oldpath);
-out_free_paths:
- if (newpath != path)
- free(newpath);
- if (oldpath != path)
- free(oldpath);
- return ret;
-}
-
-static int
-compress_file(struct libdeflate_compressor *compressor, const tchar *path,
- const struct options *options)
-{
- tchar *newpath = NULL;
- struct file_stream in;
- struct file_stream out;
- stat_t stbuf;
- int ret;
- int ret2;
-
- if (path != NULL && !options->to_stdout) {
- if (!options->force && has_suffix(path, options->suffix)) {
- msg("%"TS": already has %"TS" suffix -- skipping",
- path, options->suffix);
- return 0;
- }
- newpath = append_suffix(path, options->suffix);
- if (newpath == NULL)
- return -1;
- }
-
- ret = xopen_for_read(path, options->force || options->to_stdout, &in);
- if (ret != 0)
- goto out_free_newpath;
-
- ret = stat_file(&in, &stbuf, options->force || options->keep ||
- path == NULL || newpath == NULL);
- if (ret != 0)
- goto out_close_in;
-
- ret = xopen_for_write(newpath, options->force, &out);
- if (ret != 0)
- goto out_close_in;
-
- if (!options->force && isatty(out.fd)) {
- msg("Refusing to write compressed data to terminal. "
- "Use -f to override.\nFor help, use -h.");
- ret = -1;
- goto out_close_out;
- }
-
- /* TODO: need a streaming-friendly solution */
- ret = map_file_contents(&in, stbuf.st_size);
- if (ret != 0)
- goto out_close_out;
-
- ret = do_compress(compressor, &in, &out);
- if (ret != 0)
- goto out_close_out;
-
- if (path != NULL && newpath != NULL)
- restore_metadata(&out, newpath, &stbuf);
- ret = 0;
-out_close_out:
- ret2 = xclose(&out);
- if (ret == 0)
- ret = ret2;
- if (ret != 0 && newpath != NULL)
- tunlink(newpath);
-out_close_in:
- xclose(&in);
- if (ret == 0 && path != NULL && newpath != NULL && !options->keep)
- tunlink(path);
-out_free_newpath:
- free(newpath);
- return ret;
-}
-
-int
-tmain(int argc, tchar *argv[])
-{
- tchar *default_file_list[] = { NULL };
- struct options options;
- int opt_char;
- int i;
- int ret;
-
- begin_program(argv);
-
- options.to_stdout = false;
- options.decompress = is_gunzip();
- options.force = false;
- options.keep = false;
- options.test = false;
- options.compression_level = 6;
- options.suffix = T(".gz");
-
- while ((opt_char = tgetopt(argc, argv, optstring)) != -1) {
- switch (opt_char) {
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- options.compression_level =
- parse_compression_level(opt_char, toptarg);
- if (options.compression_level < 0)
- return 1;
- break;
- case 'c':
- options.to_stdout = true;
- break;
- case 'd':
- options.decompress = true;
- break;
- case 'f':
- options.force = true;
- break;
- case 'h':
- show_usage(stdout);
- return 0;
- case 'k':
- options.keep = true;
- break;
- case 'n':
- /*
- * -n means don't save or restore the original filename
- * in the gzip header. Currently this implementation
- * already behaves this way by default, so accept the
- * option as a no-op.
- */
- break;
- case 'S':
- options.suffix = toptarg;
- if (options.suffix[0] == T('\0')) {
- msg("invalid suffix");
- return 1;
- }
- break;
- case 't':
- options.test = true;
- options.decompress = true;
- options.to_stdout = true;
- /*
- * -t behaves just like the more commonly used -c
- * option, except that -t doesn't actually write
- * anything. For ease of implementation, just pretend
- * that -c was specified too.
- */
- break;
- case 'V':
- show_version();
- return 0;
- default:
- show_usage(stderr);
- return 1;
- }
- }
-
- argv += toptind;
- argc -= toptind;
-
- if (argc == 0) {
- argv = default_file_list;
- argc = ARRAY_LEN(default_file_list);
- } else {
- for (i = 0; i < argc; i++)
- if (argv[i][0] == '-' && argv[i][1] == '\0')
- argv[i] = NULL;
- }
-
- ret = 0;
- if (options.decompress) {
- struct libdeflate_decompressor *d;
-
- d = alloc_decompressor();
- if (d == NULL)
- return 1;
-
- for (i = 0; i < argc; i++)
- ret |= -decompress_file(d, argv[i], &options);
-
- libdeflate_free_decompressor(d);
- } else {
- struct libdeflate_compressor *c;
-
- c = alloc_compressor(options.compression_level);
- if (c == NULL)
- return 1;
-
- for (i = 0; i < argc; i++)
- ret |= -compress_file(c, argv[i], &options);
-
- libdeflate_free_compressor(c);
- }
-
- /*
- * If ret=0, there were no warnings or errors. Exit with status 0.
- * If ret=2, there was at least one warning. Exit with status 2.
- * Else, there was at least one error. Exit with status 1.
- */
- if (ret != 0 && ret != 2)
- ret = 1;
-
- return ret;
-}
diff --git a/util/compress/libdeflate/programs/prog_util.c b/util/compress/libdeflate/programs/prog_util.c
deleted file mode 100644
index 343828143..000000000
--- a/util/compress/libdeflate/programs/prog_util.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * prog_util.c - utility functions for programs
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "prog_util.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <stdarg.h>
-#ifdef _WIN32
-# include <windows.h>
-#else
-# include <unistd.h>
-# include <sys/mman.h>
-#endif
-
-#ifndef O_BINARY
-# define O_BINARY 0
-#endif
-#ifndef O_SEQUENTIAL
-# define O_SEQUENTIAL 0
-#endif
-#ifndef O_NOFOLLOW
-# define O_NOFOLLOW 0
-#endif
-#ifndef O_NONBLOCK
-# define O_NONBLOCK 0
-#endif
-#ifndef O_NOCTTY
-# define O_NOCTTY 0
-#endif
-
-/* The invocation name of the program (filename component only) */
-const tchar *prog_invocation_name;
-
-static void
-do_msg(const char *format, bool with_errno, va_list va)
-{
- int saved_errno = errno;
-
- fprintf(stderr, "%"TS": ", prog_invocation_name);
- vfprintf(stderr, format, va);
- if (with_errno)
- fprintf(stderr, ": %s\n", strerror(saved_errno));
- else
- fprintf(stderr, "\n");
-
- errno = saved_errno;
-}
-
-/* Print a message to standard error */
-void
-msg(const char *format, ...)
-{
- va_list va;
-
- va_start(va, format);
- do_msg(format, false, va);
- va_end(va);
-}
-
-/* Print a message to standard error, including a description of errno */
-void
-msg_errno(const char *format, ...)
-{
- va_list va;
-
- va_start(va, format);
- do_msg(format, true, va);
- va_end(va);
-}
-
-/* malloc() wrapper */
-void *
-xmalloc(size_t size)
-{
- void *p = malloc(size);
- if (p == NULL && size == 0)
- p = malloc(1);
- if (p == NULL)
- msg("Out of memory");
- return p;
-}
-
-/*
- * Retrieve a pointer to the filename component of the specified path.
- *
- * Note: this does not modify the path. Therefore, it is not guaranteed to work
- * properly for directories, since a path to a directory might have trailing
- * slashes.
- */
-static const tchar *
-get_filename(const tchar *path)
-{
- const tchar *slash = tstrrchr(path, '/');
-#ifdef _WIN32
- const tchar *backslash = tstrrchr(path, '\\');
- if (backslash != NULL && (slash == NULL || backslash > slash))
- slash = backslash;
-#endif
- if (slash != NULL)
- return slash + 1;
- return path;
-}
-
-void
-begin_program(tchar *argv[])
-{
- prog_invocation_name = get_filename(argv[0]);
-
-#ifdef FREESTANDING
- /* This allows testing freestanding library builds. */
- libdeflate_set_memory_allocator(malloc, free);
-#endif
-}
-
-/* Create a copy of 'path' surrounded by double quotes */
-static tchar *
-quote_path(const tchar *path)
-{
- size_t len = tstrlen(path);
- tchar *result;
-
- result = xmalloc((1 + len + 1 + 1) * sizeof(tchar));
- if (result == NULL)
- return NULL;
- result[0] = '"';
- tmemcpy(&result[1], path, len);
- result[1 + len] = '"';
- result[1 + len + 1] = '\0';
- return result;
-}
-
-/* Open a file for reading, or set up standard input for reading */
-int
-xopen_for_read(const tchar *path, bool symlink_ok, struct file_stream *strm)
-{
- strm->mmap_token = NULL;
- strm->mmap_mem = NULL;
-
- if (path == NULL) {
- strm->is_standard_stream = true;
- strm->name = T("standard input");
- strm->fd = STDIN_FILENO;
- #ifdef _WIN32
- _setmode(strm->fd, O_BINARY);
- #endif
- return 0;
- }
-
- strm->is_standard_stream = false;
-
- strm->name = quote_path(path);
- if (strm->name == NULL)
- return -1;
-
- strm->fd = topen(path, O_RDONLY | O_BINARY | O_NONBLOCK | O_NOCTTY |
- (symlink_ok ? 0 : O_NOFOLLOW) | O_SEQUENTIAL);
- if (strm->fd < 0) {
- msg_errno("Can't open %"TS" for reading", strm->name);
- free(strm->name);
- return -1;
- }
-
-#if defined(HAVE_POSIX_FADVISE) && (O_SEQUENTIAL == 0)
- (void)posix_fadvise(strm->fd, 0, 0, POSIX_FADV_SEQUENTIAL);
-#endif
-
- return 0;
-}
-
-/* Open a file for writing, or set up standard output for writing */
-int
-xopen_for_write(const tchar *path, bool overwrite, struct file_stream *strm)
-{
- int ret = -1;
-
- strm->mmap_token = NULL;
- strm->mmap_mem = NULL;
-
- if (path == NULL) {
- strm->is_standard_stream = true;
- strm->name = T("standard output");
- strm->fd = STDOUT_FILENO;
- #ifdef _WIN32
- _setmode(strm->fd, O_BINARY);
- #endif
- return 0;
- }
-
- strm->is_standard_stream = false;
-
- strm->name = quote_path(path);
- if (strm->name == NULL)
- goto err;
-retry:
- strm->fd = topen(path, O_WRONLY | O_BINARY | O_NOFOLLOW |
- O_CREAT | O_EXCL, 0644);
- if (strm->fd < 0) {
- if (errno != EEXIST) {
- msg_errno("Can't open %"TS" for writing", strm->name);
- goto err;
- }
- if (!overwrite) {
- if (!isatty(STDERR_FILENO) || !isatty(STDIN_FILENO)) {
- msg("%"TS" already exists; use -f to overwrite",
- strm->name);
- ret = -2; /* warning only */
- goto err;
- }
- fprintf(stderr, "%"TS": %"TS" already exists; "
- "overwrite? (y/n) ",
- prog_invocation_name, strm->name);
- if (getchar() != 'y') {
- msg("Not overwriting.");
- goto err;
- }
- }
- if (tunlink(path) != 0) {
- msg_errno("Unable to delete %"TS, strm->name);
- goto err;
- }
- goto retry;
- }
-
- return 0;
-
-err:
- free(strm->name);
- return ret;
-}
-
-/* Read the full contents of a file into memory */
-static int
-read_full_contents(struct file_stream *strm)
-{
- size_t filled = 0;
- size_t capacity = 4096;
- char *buf;
- int ret;
-
- buf = xmalloc(capacity);
- if (buf == NULL)
- return -1;
- do {
- if (filled == capacity) {
- char *newbuf;
-
- if (capacity == SIZE_MAX)
- goto oom;
- capacity += MIN(SIZE_MAX - capacity, capacity);
- newbuf = realloc(buf, capacity);
- if (newbuf == NULL)
- goto oom;
- buf = newbuf;
- }
- ret = xread(strm, &buf[filled], capacity - filled);
- if (ret < 0)
- goto err;
- filled += ret;
- } while (ret != 0);
-
- strm->mmap_mem = buf;
- strm->mmap_size = filled;
- return 0;
-
-err:
- free(buf);
- return ret;
-oom:
- msg("Out of memory! %"TS" is too large to be processed by "
- "this program as currently implemented.", strm->name);
- ret = -1;
- goto err;
-}
-
-/* Map the contents of a file into memory */
-int
-map_file_contents(struct file_stream *strm, u64 size)
-{
- if (size == 0) /* mmap isn't supported on empty files */
- return read_full_contents(strm);
-
- if (size > SIZE_MAX) {
- msg("%"TS" is too large to be processed by this program",
- strm->name);
- return -1;
- }
-#ifdef _WIN32
- strm->mmap_token = CreateFileMapping(
- (HANDLE)(intptr_t)_get_osfhandle(strm->fd),
- NULL, PAGE_READONLY, 0, 0, NULL);
- if (strm->mmap_token == NULL) {
- DWORD err = GetLastError();
- if (err == ERROR_BAD_EXE_FORMAT) /* mmap unsupported */
- return read_full_contents(strm);
- msg("Unable create file mapping for %"TS": Windows error %u",
- strm->name, (unsigned int)err);
- return -1;
- }
-
- strm->mmap_mem = MapViewOfFile((HANDLE)strm->mmap_token,
- FILE_MAP_READ, 0, 0, size);
- if (strm->mmap_mem == NULL) {
- msg("Unable to map %"TS" into memory: Windows error %u",
- strm->name, (unsigned int)GetLastError());
- CloseHandle((HANDLE)strm->mmap_token);
- return -1;
- }
-#else /* _WIN32 */
- strm->mmap_mem = mmap(NULL, size, PROT_READ, MAP_SHARED, strm->fd, 0);
- if (strm->mmap_mem == MAP_FAILED) {
- strm->mmap_mem = NULL;
- if (errno == ENODEV) /* mmap isn't supported on this file */
- return read_full_contents(strm);
- if (errno == ENOMEM) {
- msg("%"TS" is too large to be processed by this "
- "program", strm->name);
- } else {
- msg_errno("Unable to map %"TS" into memory",
- strm->name);
- }
- return -1;
- }
-
-#ifdef HAVE_POSIX_MADVISE
- (void)posix_madvise(strm->mmap_mem, size, POSIX_MADV_SEQUENTIAL);
-#endif
- strm->mmap_token = strm; /* anything that's not NULL */
-
-#endif /* !_WIN32 */
- strm->mmap_size = size;
- return 0;
-}
-
-/*
- * Read from a file, returning the full count to indicate all bytes were read, a
- * short count (possibly 0) to indicate EOF, or -1 to indicate error.
- */
-ssize_t
-xread(struct file_stream *strm, void *buf, size_t count)
-{
- char *p = buf;
- size_t orig_count = count;
-
- while (count != 0) {
- ssize_t res = read(strm->fd, p, MIN(count, INT_MAX));
- if (res == 0)
- break;
- if (res < 0) {
- if (errno == EAGAIN || errno == EINTR)
- continue;
- msg_errno("Error reading from %"TS, strm->name);
- return -1;
- }
- p += res;
- count -= res;
- }
- return orig_count - count;
-}
-
-/* Write to a file, returning 0 if all bytes were written or -1 on error */
-int
-full_write(struct file_stream *strm, const void *buf, size_t count)
-{
- const char *p = buf;
-
- while (count != 0) {
- ssize_t res = write(strm->fd, p, MIN(count, INT_MAX));
- if (res <= 0) {
- msg_errno("Error writing to %"TS, strm->name);
- return -1;
- }
- p += res;
- count -= res;
- }
- return 0;
-}
-
-/* Close a file, returning 0 on success or -1 on error */
-int
-xclose(struct file_stream *strm)
-{
- int ret = 0;
-
- if (!strm->is_standard_stream) {
- if (close(strm->fd) != 0) {
- msg_errno("Error closing %"TS, strm->name);
- ret = -1;
- }
- free(strm->name);
- }
-
- if (strm->mmap_token != NULL) {
-#ifdef _WIN32
- UnmapViewOfFile(strm->mmap_mem);
- CloseHandle((HANDLE)strm->mmap_token);
-#else
- munmap(strm->mmap_mem, strm->mmap_size);
-#endif
- strm->mmap_token = NULL;
- } else {
- free(strm->mmap_mem);
- }
- strm->mmap_mem = NULL;
- strm->fd = -1;
- strm->name = NULL;
- return ret;
-}
-
-/*
- * Parse the compression level given on the command line, returning the
- * compression level on success or -1 on error
- */
-int
-parse_compression_level(tchar opt_char, const tchar *arg)
-{
- int level;
-
- if (arg == NULL)
- arg = T("");
-
- if (opt_char < '0' || opt_char > '9')
- goto invalid;
- level = opt_char - '0';
-
- if (arg[0] != '\0') {
- if (arg[0] < '0' || arg[0] > '9')
- goto invalid;
- if (arg[1] != '\0') /* Levels are at most 2 digits */
- goto invalid;
- if (level == 0) /* Don't allow arguments like "-01" */
- goto invalid;
- level = (level * 10) + (arg[0] - '0');
- }
-
- if (level < 0 || level > 12)
- goto invalid;
-
- return level;
-
-invalid:
- msg("Invalid compression level: \"%"TC"%"TS"\". "
- "Must be an integer in the range [0, 12].", opt_char, arg);
- return -1;
-}
-
-/* Allocate a new DEFLATE compressor */
-struct libdeflate_compressor *
-alloc_compressor(int level)
-{
- struct libdeflate_compressor *c;
-
- c = libdeflate_alloc_compressor(level);
- if (c == NULL) {
- msg_errno("Unable to allocate compressor with "
- "compression level %d", level);
- }
- return c;
-}
-
-/* Allocate a new DEFLATE decompressor */
-struct libdeflate_decompressor *
-alloc_decompressor(void)
-{
- struct libdeflate_decompressor *d;
-
- d = libdeflate_alloc_decompressor();
- if (d == NULL)
- msg_errno("Unable to allocate decompressor");
-
- return d;
-}
diff --git a/util/compress/libdeflate/programs/prog_util.h b/util/compress/libdeflate/programs/prog_util.h
deleted file mode 100644
index fa3f7b519..000000000
--- a/util/compress/libdeflate/programs/prog_util.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * prog_util.h - utility functions for programs
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef PROGRAMS_PROG_UTIL_H
-#define PROGRAMS_PROG_UTIL_H
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "libdeflate.h"
-
-#include <limits.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#ifndef _WIN32
-# include <sys/types.h>
-#endif
-
-#include "../common/common_defs.h"
-
-#ifdef __GNUC__
-# define _printf(str_idx, args_idx) \
- __attribute__((format(printf, str_idx, args_idx)))
-#else
-# define _printf(str_idx, args_idx)
-#endif
-
-#ifdef _MSC_VER
-/*
- * Old versions (e.g. VS2010) of MSC have stdint.h but not the C99 header
- * inttypes.h. Work around this by defining the PRI* macros ourselves.
- */
-# define PRIu8 "hhu"
-# define PRIu16 "hu"
-# define PRIu32 "u"
-# define PRIu64 "llu"
-# define PRIi8 "hhi"
-# define PRIi16 "hi"
-# define PRIi32 "i"
-# define PRIi64 "lli"
-# define PRIx8 "hhx"
-# define PRIx16 "hx"
-# define PRIx32 "x"
-# define PRIx64 "llx"
-#else
-# include <inttypes.h>
-#endif
-
-#ifdef _WIN32
-
-/*
- * Definitions for Windows builds. Mainly, 'tchar' is defined to be the 2-byte
- * 'wchar_t' type instead of 'char'. This is the only "easy" way I know of to
- * get full Unicode support on Windows...
- */
-
-#include <wchar.h>
-int wmain(int argc, wchar_t **argv);
-# define tmain wmain
-# define tchar wchar_t
-# define _T(text) L##text
-# define T(text) _T(text)
-# define TS "ls"
-# define TC "lc"
-# define tmemcpy wmemcpy
-# define topen _wopen
-# define tstrchr wcschr
-# define tstrcmp wcscmp
-# define tstrlen wcslen
-# define tstrrchr wcsrchr
-# define tstrtoul wcstoul
-# define tstrxcmp wcsicmp
-# define tunlink _wunlink
-# define tutimbuf __utimbuf64
-# define tutime _wutime64
-# define tstat _wstat64
-# define tfstat _fstat64
-# define stat_t struct _stat64
-# ifdef _MSC_VER
-# define STDIN_FILENO 0
-# define STDOUT_FILENO 1
-# define STDERR_FILENO 2
-# define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
-# define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
-# endif
-
-#else /* _WIN32 */
-
-/* Standard definitions for everyone else */
-
-# define tmain main
-# define tchar char
-# define T(text) text
-# define TS "s"
-# define TC "c"
-# define tmemcpy memcpy
-# define topen open
-# define tstrchr strchr
-# define tstrcmp strcmp
-# define tstrlen strlen
-# define tstrrchr strrchr
-# define tstrtoul strtoul
-# define tstrxcmp strcmp
-# define tunlink unlink
-# define tutimbuf utimbuf
-# define tutime utime
-# define tstat stat
-# define tfstat fstat
-# define stat_t struct stat
-
-#endif /* !_WIN32 */
-
-extern const tchar *prog_invocation_name;
-
-void _printf(1, 2) msg(const char *fmt, ...);
-void _printf(1, 2) msg_errno(const char *fmt, ...);
-
-void *xmalloc(size_t size);
-
-void begin_program(tchar *argv[]);
-
-struct file_stream {
- int fd;
- tchar *name;
- bool is_standard_stream;
- void *mmap_token;
- void *mmap_mem;
- size_t mmap_size;
-};
-
-int xopen_for_read(const tchar *path, bool symlink_ok,
- struct file_stream *strm);
-int xopen_for_write(const tchar *path, bool force, struct file_stream *strm);
-int map_file_contents(struct file_stream *strm, u64 size);
-
-ssize_t xread(struct file_stream *strm, void *buf, size_t count);
-int full_write(struct file_stream *strm, const void *buf, size_t count);
-
-int xclose(struct file_stream *strm);
-
-int parse_compression_level(tchar opt_char, const tchar *arg);
-
-struct libdeflate_compressor *alloc_compressor(int level);
-struct libdeflate_decompressor *alloc_decompressor(void);
-
-/* tgetopt.c */
-
-extern tchar *toptarg;
-extern int toptind, topterr, toptopt;
-
-int tgetopt(int argc, tchar *argv[], const tchar *optstring);
-
-#endif /* PROGRAMS_PROG_UTIL_H */
diff --git a/util/compress/libdeflate/programs/test_checksums.c b/util/compress/libdeflate/programs/test_checksums.c
deleted file mode 100644
index 97054182f..000000000
--- a/util/compress/libdeflate/programs/test_checksums.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * test_checksums.c
- *
- * Verify that libdeflate's Adler-32 and CRC-32 functions produce the same
- * results as their zlib equivalents.
- */
-
-#include <stdlib.h>
-#include <time.h>
-
-#include "test_util.h"
-
-static unsigned int rng_seed;
-
-typedef u32 (*cksum_fn_t)(u32, const void *, size_t);
-
-static u32
-adler32_libdeflate(u32 adler, const void *buf, size_t len)
-{
- return libdeflate_adler32(adler, buf, len);
-}
-
-static u32
-crc32_libdeflate(u32 crc, const void *buf, size_t len)
-{
- return libdeflate_crc32(crc, buf, len);
-}
-
-static u32
-adler32_zlib(u32 adler, const void *buf, size_t len)
-{
- return adler32(adler, buf, len);
-}
-
-static u32
-crc32_zlib(u32 crc, const void *buf, size_t len)
-{
- return crc32(crc, buf, len);
-}
-
-static u32
-select_initial_crc(void)
-{
- if (rand() & 1)
- return 0;
- return ((u32)rand() << 16) | rand();
-}
-
-static u32
-select_initial_adler(void)
-{
- u32 lo, hi;
-
- if (rand() & 1)
- return 1;
-
- lo = (rand() % 4 == 0 ? 65520 : rand() % 65521);
- hi = (rand() % 4 == 0 ? 65520 : rand() % 65521);
- return (hi << 16) | lo;
-}
-
-static void
-test_initial_values(cksum_fn_t cksum, u32 expected)
-{
- ASSERT(cksum(0, NULL, 0) == expected);
- if (cksum != adler32_zlib) /* broken */
- ASSERT(cksum(0, NULL, 1) == expected);
- ASSERT(cksum(0, NULL, 1234) == expected);
- ASSERT(cksum(1234, NULL, 0) == expected);
- ASSERT(cksum(1234, NULL, 1234) == expected);
-}
-
-static void
-test_multipart(const u8 *buffer, size_t size, const char *name,
- cksum_fn_t cksum, u32 v, u32 expected)
-{
- size_t division = rand() % (size + 1);
- v = cksum(v, buffer, division);
- v = cksum(v, buffer + division, size - division);
- if (v != expected) {
- fprintf(stderr, "%s checksum failed multipart test\n", name);
- ASSERT(0);
- }
-}
-
-static void
-test_checksums(const void *buffer, size_t size, const char *name,
- cksum_fn_t cksum1, cksum_fn_t cksum2, u32 initial_value)
-{
- u32 v1 = cksum1(initial_value, buffer, size);
- u32 v2 = cksum2(initial_value, buffer, size);
-
- if (v1 != v2) {
- fprintf(stderr, "%s checksum mismatch\n", name);
- fprintf(stderr, "initial_value=0x%08"PRIx32", buffer=%p, "
- "size=%zu, buffer=", initial_value, buffer, size);
- for (size_t i = 0; i < MIN(size, 256); i++)
- fprintf(stderr, "%02x", ((const u8 *)buffer)[i]);
- if (size > 256)
- fprintf(stderr, "...");
- fprintf(stderr, "\n");
- ASSERT(0);
- }
-
- if ((rand() & 15) == 0) {
- test_multipart(buffer, size, name, cksum1, initial_value, v1);
- test_multipart(buffer, size, name, cksum2, initial_value, v1);
- }
-}
-
-static void
-test_crc32(const void *buffer, size_t size, u32 initial_value)
-{
- test_checksums(buffer, size, "CRC-32",
- crc32_libdeflate, crc32_zlib, initial_value);
-}
-
-static void
-test_adler32(const void *buffer, size_t size, u32 initial_value)
-{
- test_checksums(buffer, size, "Adler-32",
- adler32_libdeflate, adler32_zlib, initial_value);
-}
-
-static void test_random_buffers(u8 *buffer, u8 *guarded_buf_end,
- size_t limit, u32 num_iter)
-{
- for (u32 i = 0; i < num_iter; i++) {
- size_t start = rand() % limit;
- size_t len = rand() % (limit - start);
- u32 a0 = select_initial_adler();
- u32 c0 = select_initial_crc();
-
- for (size_t j = start; j < start + len; j++)
- buffer[j] = rand();
-
- /* Test with chosen size and alignment */
- test_adler32(&buffer[start], len, a0);
- test_crc32(&buffer[start], len, c0);
-
- /* Test with chosen size, with guard page after input buffer */
- memcpy(guarded_buf_end - len, &buffer[start], len);
- test_adler32(guarded_buf_end - len, len, a0);
- test_crc32(guarded_buf_end - len, len, c0);
- }
-}
-
-int
-tmain(int argc, tchar *argv[])
-{
- u8 *buffer = xmalloc(32768);
- u8 *guarded_buf_start, *guarded_buf_end;
-
- begin_program(argv);
-
- alloc_guarded_buffer(32768, &guarded_buf_start, &guarded_buf_end);
-
- rng_seed = time(NULL);
- srand(rng_seed);
-
- test_initial_values(adler32_libdeflate, 1);
- test_initial_values(adler32_zlib, 1);
- test_initial_values(crc32_libdeflate, 0);
- test_initial_values(crc32_zlib, 0);
-
- /* Test different buffer sizes and alignments */
- test_random_buffers(buffer, guarded_buf_end, 256, 5000);
- test_random_buffers(buffer, guarded_buf_end, 1024, 500);
- test_random_buffers(buffer, guarded_buf_end, 32768, 50);
-
- /*
- * Test Adler-32 overflow cases. For example, given all 0xFF bytes and
- * the highest possible initial (s1, s2) of (65520, 65520), then s2 if
- * stored as a 32-bit unsigned integer will overflow if > 5552 bytes are
- * processed. Implementations must make sure to reduce s2 modulo 65521
- * before that point. Also, some implementations make use of 16-bit
- * counters which can overflow earlier.
- */
- memset(buffer, 0xFF, 32768);
- for (u32 i = 0; i < 20; i++) {
- u32 initial_value;
-
- if (i == 0)
- initial_value = ((u32)65520 << 16) | 65520;
- else
- initial_value = select_initial_adler();
-
- test_adler32(buffer, 5553, initial_value);
- test_adler32(buffer, rand() % 32769, initial_value);
- buffer[rand() % 32768] = 0xFE;
- }
-
- free(buffer);
- free_guarded_buffer(guarded_buf_start, guarded_buf_end);
- return 0;
-}
diff --git a/util/compress/libdeflate/programs/test_custom_malloc.c b/util/compress/libdeflate/programs/test_custom_malloc.c
deleted file mode 100644
index 2bbb7f098..000000000
--- a/util/compress/libdeflate/programs/test_custom_malloc.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * test_custom_malloc.c
- *
- * Test libdeflate_set_memory_allocator().
- * Also test injecting allocation failures.
- */
-
-#include "test_util.h"
-
-static int malloc_count = 0;
-static int free_count = 0;
-
-static void *do_malloc(size_t size)
-{
- malloc_count++;
- return malloc(size);
-}
-
-static void *do_fail_malloc(size_t size)
-{
- malloc_count++;
- return NULL;
-}
-
-static void do_free(void *ptr)
-{
- free_count++;
- free(ptr);
-}
-
-int
-tmain(int argc, tchar *argv[])
-{
- int level;
- struct libdeflate_compressor *c;
- struct libdeflate_decompressor *d;
-
- begin_program(argv);
-
- /* Test that the custom allocator is actually used when requested. */
-
- libdeflate_set_memory_allocator(do_malloc, do_free);
- ASSERT(malloc_count == 0);
- ASSERT(free_count == 0);
-
- for (level = 0; level <= 12; level++) {
- malloc_count = free_count = 0;
- c = libdeflate_alloc_compressor(level);
- ASSERT(c != NULL);
- ASSERT(malloc_count == 1);
- ASSERT(free_count == 0);
- libdeflate_free_compressor(c);
- ASSERT(malloc_count == 1);
- ASSERT(free_count == 1);
- }
-
- malloc_count = free_count = 0;
- d = libdeflate_alloc_decompressor();
- ASSERT(d != NULL);
- ASSERT(malloc_count == 1);
- ASSERT(free_count == 0);
- libdeflate_free_decompressor(d);
- ASSERT(malloc_count == 1);
- ASSERT(free_count == 1);
-
- /* As long as we're here, also test injecting allocation failures. */
-
- libdeflate_set_memory_allocator(do_fail_malloc, do_free);
-
- for (level = 0; level <= 12; level++) {
- malloc_count = free_count = 0;
- c = libdeflate_alloc_compressor(level);
- ASSERT(c == NULL);
- ASSERT(malloc_count == 1);
- ASSERT(free_count == 0);
- }
-
- malloc_count = free_count = 0;
- d = libdeflate_alloc_decompressor();
- ASSERT(d == NULL);
- ASSERT(malloc_count == 1);
- ASSERT(free_count == 0);
-
- return 0;
-}
diff --git a/util/compress/libdeflate/programs/test_incomplete_codes.c b/util/compress/libdeflate/programs/test_incomplete_codes.c
deleted file mode 100644
index 4e441bccb..000000000
--- a/util/compress/libdeflate/programs/test_incomplete_codes.c
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * test_incomplete_codes.c
- *
- * Test that the decompressor accepts incomplete Huffman codes in certain
- * specific cases.
- */
-
-#include "test_util.h"
-
-static void
-verify_decompression_libdeflate(const u8 *in, size_t in_nbytes,
- u8 *out, size_t out_nbytes_avail,
- const u8 *expected_out,
- size_t expected_out_nbytes)
-{
- struct libdeflate_decompressor *d;
- enum libdeflate_result res;
- size_t actual_out_nbytes;
-
- d = libdeflate_alloc_decompressor();
- ASSERT(d != NULL);
-
- res = libdeflate_deflate_decompress(d, in, in_nbytes,
- out, out_nbytes_avail,
- &actual_out_nbytes);
- ASSERT(res == LIBDEFLATE_SUCCESS);
- ASSERT(actual_out_nbytes == expected_out_nbytes);
- ASSERT(memcmp(out, expected_out, actual_out_nbytes) == 0);
-
- libdeflate_free_decompressor(d);
-}
-
-static void
-verify_decompression_zlib(const u8 *in, size_t in_nbytes,
- u8 *out, size_t out_nbytes_avail,
- const u8 *expected_out, size_t expected_out_nbytes)
-{
- z_stream z;
- int res;
- size_t actual_out_nbytes;
-
- memset(&z, 0, sizeof(z));
- res = inflateInit2(&z, -15);
- ASSERT(res == Z_OK);
-
- z.next_in = (void *)in;
- z.avail_in = in_nbytes;
- z.next_out = (void *)out;
- z.avail_out = out_nbytes_avail;
- res = inflate(&z, Z_FINISH);
- ASSERT(res == Z_STREAM_END);
- actual_out_nbytes = out_nbytes_avail - z.avail_out;
- ASSERT(actual_out_nbytes == expected_out_nbytes);
- ASSERT(memcmp(out, expected_out, actual_out_nbytes) == 0);
-
- inflateEnd(&z);
-}
-
-static void
-verify_decompression(const u8 *in, size_t in_nbytes,
- u8 *out, size_t out_nbytes_avail,
- const u8 *expected_out, size_t expected_out_nbytes)
-{
- verify_decompression_libdeflate(in, in_nbytes, out, out_nbytes_avail,
- expected_out, expected_out_nbytes);
- verify_decompression_zlib(in, in_nbytes, out, out_nbytes_avail,
- expected_out, expected_out_nbytes);
-
-}
-
-/* Test that an empty offset code is accepted. */
-static void
-test_empty_offset_code(void)
-{
- static const u8 expected_out[] = { 'A', 'B', 'A', 'A' };
- u8 in[128];
- u8 out[128];
- struct output_bitstream os = { .next = in, .end = in + sizeof(in) };
- int i;
-
- /*
- * Generate a DEFLATE stream containing a "dynamic Huffman" block
- * containing literals, but no offsets; and having an empty offset code
- * (all codeword lengths set to 0).
- *
- * Litlen code:
- * litlensym_A freq=3 len=1 codeword= 0
- * litlensym_B freq=1 len=2 codeword=01
- * litlensym_256 (end-of-block) freq=1 len=2 codeword=11
- * Offset code:
- * (empty)
- *
- * Litlen and offset codeword lengths:
- * [0..'A'-1] = 0 presym_18
- * ['A'] = 1 presym_1
- * ['B'] = 2 presym_2
- * ['B'+1..255] = 0 presym_18 presym_18
- * [256] = 2 presym_2
- * [257] = 0 presym_0
- *
- * Precode:
- * presym_0 freq=1 len=3 codeword=011
- * presym_1 freq=1 len=3 codeword=111
- * presym_2 freq=2 len=2 codeword= 01
- * presym_18 freq=3 len=1 codeword= 0
- */
-
- ASSERT(put_bits(&os, 1, 1)); /* BFINAL: 1 */
- ASSERT(put_bits(&os, 2, 2)); /* BTYPE: DYNAMIC_HUFFMAN */
- ASSERT(put_bits(&os, 0, 5)); /* num_litlen_syms: 0 + 257 */
- ASSERT(put_bits(&os, 0, 5)); /* num_offset_syms: 0 + 1 */
- ASSERT(put_bits(&os, 14, 4)); /* num_explicit_precode_lens: 14 + 4 */
-
- /*
- * Precode codeword lengths: order is
- * [16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15]
- */
- for (i = 0; i < 2; i++) /* presym_{16,17}: len=0 */
- ASSERT(put_bits(&os, 0, 3));
- ASSERT(put_bits(&os, 1, 3)); /* presym_18: len=1 */
- ASSERT(put_bits(&os, 3, 3)); /* presym_0: len=3 */
- for (i = 0; i < 11; i++) /* presym_{8,...,13}: len=0 */
- ASSERT(put_bits(&os, 0, 3));
- ASSERT(put_bits(&os, 2, 3)); /* presym_2: len=2 */
- ASSERT(put_bits(&os, 0, 3)); /* presym_14: len=0 */
- ASSERT(put_bits(&os, 3, 3)); /* presym_1: len=3 */
-
- /* Litlen and offset codeword lengths */
- ASSERT(put_bits(&os, 0x0, 1) &&
- put_bits(&os, 54, 7)); /* presym_18, 65 zeroes */
- ASSERT(put_bits(&os, 0x7, 3)); /* presym_1 */
- ASSERT(put_bits(&os, 0x1, 2)); /* presym_2 */
- ASSERT(put_bits(&os, 0x0, 1) &&
- put_bits(&os, 89, 7)); /* presym_18, 100 zeroes */
- ASSERT(put_bits(&os, 0x0, 1) &&
- put_bits(&os, 78, 7)); /* presym_18, 89 zeroes */
- ASSERT(put_bits(&os, 0x1, 2)); /* presym_2 */
- ASSERT(put_bits(&os, 0x3, 3)); /* presym_0 */
-
- /* Litlen symbols */
- ASSERT(put_bits(&os, 0x0, 1)); /* litlensym_A */
- ASSERT(put_bits(&os, 0x1, 2)); /* litlensym_B */
- ASSERT(put_bits(&os, 0x0, 1)); /* litlensym_A */
- ASSERT(put_bits(&os, 0x0, 1)); /* litlensym_A */
- ASSERT(put_bits(&os, 0x3, 2)); /* litlensym_256 (end-of-block) */
-
- ASSERT(flush_bits(&os));
-
- verify_decompression(in, os.next - in, out, sizeof(out),
- expected_out, sizeof(expected_out));
-}
-
-/* Test that a litrunlen code containing only one symbol is accepted. */
-static void
-test_singleton_litrunlen_code(void)
-{
- u8 in[128];
- u8 out[128];
- struct output_bitstream os = { .next = in, .end = in + sizeof(in) };
- int i;
-
- /*
- * Litlen code:
- * litlensym_256 (end-of-block) freq=1 len=1 codeword=0
- * Offset code:
- * (empty)
- *
- * Litlen and offset codeword lengths:
- * [0..256] = 0 presym_18 presym_18
- * [256] = 1 presym_1
- * [257] = 0 presym_0
- *
- * Precode:
- * presym_0 freq=1 len=2 codeword=01
- * presym_1 freq=1 len=2 codeword=11
- * presym_18 freq=2 len=1 codeword= 0
- */
-
- ASSERT(put_bits(&os, 1, 1)); /* BFINAL: 1 */
- ASSERT(put_bits(&os, 2, 2)); /* BTYPE: DYNAMIC_HUFFMAN */
- ASSERT(put_bits(&os, 0, 5)); /* num_litlen_syms: 0 + 257 */
- ASSERT(put_bits(&os, 0, 5)); /* num_offset_syms: 0 + 1 */
- ASSERT(put_bits(&os, 14, 4)); /* num_explicit_precode_lens: 14 + 4 */
-
- /*
- * Precode codeword lengths: order is
- * [16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15]
- */
- for (i = 0; i < 2; i++) /* presym_{16,17}: len=0 */
- ASSERT(put_bits(&os, 0, 3));
- ASSERT(put_bits(&os, 1, 3)); /* presym_18: len=1 */
- ASSERT(put_bits(&os, 2, 3)); /* presym_0: len=2 */
- for (i = 0; i < 13; i++) /* presym_{8,...,14}: len=0 */
- ASSERT(put_bits(&os, 0, 3));
- ASSERT(put_bits(&os, 2, 3)); /* presym_1: len=2 */
-
- /* Litlen and offset codeword lengths */
- for (i = 0; i < 2; i++) {
- ASSERT(put_bits(&os, 0, 1) && /* presym_18, 128 zeroes */
- put_bits(&os, 117, 7));
- }
- ASSERT(put_bits(&os, 0x3, 2)); /* presym_1 */
- ASSERT(put_bits(&os, 0x1, 2)); /* presym_0 */
-
- /* Litlen symbols */
- ASSERT(put_bits(&os, 0x0, 1)); /* litlensym_256 (end-of-block) */
-
- ASSERT(flush_bits(&os));
-
- verify_decompression(in, os.next - in, out, sizeof(out), in, 0);
-}
-
-/* Test that an offset code containing only one symbol is accepted. */
-static void
-test_singleton_offset_code(void)
-{
- static const u8 expected_out[] = { 255, 255, 255, 255 };
- u8 in[128];
- u8 out[128];
- struct output_bitstream os = { .next = in, .end = in + sizeof(in) };
- int i;
-
- ASSERT(put_bits(&os, 1, 1)); /* BFINAL: 1 */
- ASSERT(put_bits(&os, 2, 2)); /* BTYPE: DYNAMIC_HUFFMAN */
-
- /*
- * Litlen code:
- * litlensym_255 freq=1 len=1 codeword= 0
- * litlensym_256 (end-of-block) freq=1 len=2 codeword=01
- * litlensym_257 (len 3) freq=1 len=2 codeword=11
- * Offset code:
- * offsetsym_0 (offset 0) freq=1 len=1 codeword=0
- *
- * Litlen and offset codeword lengths:
- * [0..254] = 0 presym_{18,18}
- * [255] = 1 presym_1
- * [256] = 1 presym_2
- * [257] = 1 presym_2
- * [258] = 1 presym_1
- *
- * Precode:
- * presym_1 freq=2 len=2 codeword=01
- * presym_2 freq=2 len=2 codeword=11
- * presym_18 freq=2 len=1 codeword= 0
- */
-
- ASSERT(put_bits(&os, 1, 5)); /* num_litlen_syms: 1 + 257 */
- ASSERT(put_bits(&os, 0, 5)); /* num_offset_syms: 0 + 1 */
- ASSERT(put_bits(&os, 14, 4)); /* num_explicit_precode_lens: 14 + 4 */
- /*
- * Precode codeword lengths: order is
- * [16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15]
- */
- for (i = 0; i < 2; i++) /* presym_{16,17}: len=0 */
- ASSERT(put_bits(&os, 0, 3));
- ASSERT(put_bits(&os, 1, 3)); /* presym_18: len=1 */
- for (i = 0; i < 12; i++) /* presym_{0,...,13}: len=0 */
- ASSERT(put_bits(&os, 0, 3));
- ASSERT(put_bits(&os, 2, 3)); /* presym_2: len=2 */
- ASSERT(put_bits(&os, 0, 3)); /* presym_14: len=0 */
- ASSERT(put_bits(&os, 2, 3)); /* presym_1: len=2 */
-
- /* Litlen and offset codeword lengths */
- ASSERT(put_bits(&os, 0x0, 1) && /* presym_18, 128 zeroes */
- put_bits(&os, 117, 7));
- ASSERT(put_bits(&os, 0x0, 1) && /* presym_18, 127 zeroes */
- put_bits(&os, 116, 7));
- ASSERT(put_bits(&os, 0x1, 2)); /* presym_1 */
- ASSERT(put_bits(&os, 0x3, 2)); /* presym_2 */
- ASSERT(put_bits(&os, 0x3, 2)); /* presym_2 */
- ASSERT(put_bits(&os, 0x1, 2)); /* presym_1 */
-
- /* Literal */
- ASSERT(put_bits(&os, 0x0, 1)); /* litlensym_255 */
-
- /* Match */
- ASSERT(put_bits(&os, 0x3, 2)); /* litlensym_257 */
- ASSERT(put_bits(&os, 0x0, 1)); /* offsetsym_0 */
-
- /* End of block */
- ASSERT(put_bits(&os, 0x1, 2)); /* litlensym_256 */
-
- ASSERT(flush_bits(&os));
-
- verify_decompression(in, os.next - in, out, sizeof(out),
- expected_out, sizeof(expected_out));
-}
-
-/* Test that an offset code containing only one symbol is accepted, even if that
- * symbol is not symbol 0. The codeword should be '0' in either case. */
-static void
-test_singleton_offset_code_notsymzero(void)
-{
- static const u8 expected_out[] = { 254, 255, 254, 255, 254 };
- u8 in[128];
- u8 out[128];
- struct output_bitstream os = { .next = in, .end = in + sizeof(in) };
- int i;
-
- ASSERT(put_bits(&os, 1, 1)); /* BFINAL: 1 */
- ASSERT(put_bits(&os, 2, 2)); /* BTYPE: DYNAMIC_HUFFMAN */
-
- /*
- * Litlen code:
- * litlensym_254 len=2 codeword=00
- * litlensym_255 len=2 codeword=10
- * litlensym_256 (end-of-block) len=2 codeword=01
- * litlensym_257 (len 3) len=2 codeword=11
- * Offset code:
- * offsetsym_1 (offset 2) len=1 codeword=0
- *
- * Litlen and offset codeword lengths:
- * [0..253] = 0 presym_{18,18}
- * [254] = 2 presym_2
- * [255] = 2 presym_2
- * [256] = 2 presym_2
- * [257] = 2 presym_2
- * [258] = 0 presym_0
- * [259] = 1 presym_1
- *
- * Precode:
- * presym_0 len=2 codeword=00
- * presym_1 len=2 codeword=10
- * presym_2 len=2 codeword=01
- * presym_18 len=2 codeword=11
- */
-
- ASSERT(put_bits(&os, 1, 5)); /* num_litlen_syms: 1 + 257 */
- ASSERT(put_bits(&os, 1, 5)); /* num_offset_syms: 1 + 1 */
- ASSERT(put_bits(&os, 14, 4)); /* num_explicit_precode_lens: 14 + 4 */
- /*
- * Precode codeword lengths: order is
- * [16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15]
- */
- for (i = 0; i < 2; i++) /* presym_{16,17}: len=0 */
- ASSERT(put_bits(&os, 0, 3));
- ASSERT(put_bits(&os, 2, 3)); /* presym_18: len=2 */
- ASSERT(put_bits(&os, 2, 3)); /* presym_0: len=2 */
- for (i = 0; i < 11; i++) /* presym_{8,...,13}: len=0 */
- ASSERT(put_bits(&os, 0, 3));
- ASSERT(put_bits(&os, 2, 3)); /* presym_2: len=2 */
- ASSERT(put_bits(&os, 0, 3)); /* presym_14: len=0 */
- ASSERT(put_bits(&os, 2, 3)); /* presym_1: len=2 */
-
- /* Litlen and offset codeword lengths */
- ASSERT(put_bits(&os, 0x3, 2) && /* presym_18, 128 zeroes */
- put_bits(&os, 117, 7));
- ASSERT(put_bits(&os, 0x3, 2) && /* presym_18, 126 zeroes */
- put_bits(&os, 115, 7));
- ASSERT(put_bits(&os, 0x1, 2)); /* presym_2 */
- ASSERT(put_bits(&os, 0x1, 2)); /* presym_2 */
- ASSERT(put_bits(&os, 0x1, 2)); /* presym_2 */
- ASSERT(put_bits(&os, 0x1, 2)); /* presym_2 */
- ASSERT(put_bits(&os, 0x0, 2)); /* presym_0 */
- ASSERT(put_bits(&os, 0x2, 2)); /* presym_1 */
-
- /* Literals */
- ASSERT(put_bits(&os, 0x0, 2)); /* litlensym_254 */
- ASSERT(put_bits(&os, 0x2, 2)); /* litlensym_255 */
-
- /* Match */
- ASSERT(put_bits(&os, 0x3, 2)); /* litlensym_257 */
- ASSERT(put_bits(&os, 0x0, 1)); /* offsetsym_1 */
-
- /* End of block */
- ASSERT(put_bits(&os, 0x1, 2)); /* litlensym_256 */
-
- ASSERT(flush_bits(&os));
-
- verify_decompression(in, os.next - in, out, sizeof(out),
- expected_out, sizeof(expected_out));
-}
-
-int
-tmain(int argc, tchar *argv[])
-{
- begin_program(argv);
-
- test_empty_offset_code();
- test_singleton_litrunlen_code();
- test_singleton_offset_code();
- test_singleton_offset_code_notsymzero();
-
- return 0;
-}
diff --git a/util/compress/libdeflate/programs/test_litrunlen_overflow.c b/util/compress/libdeflate/programs/test_litrunlen_overflow.c
deleted file mode 100644
index 7a9d5b1fe..000000000
--- a/util/compress/libdeflate/programs/test_litrunlen_overflow.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * test_litrunlen_overflow.c
- *
- * Regression test for commit f2f0df727444 ("deflate_compress: fix corruption
- * with long literal run"). Try to compress a file longer than 65535 bytes
- * where no 2-byte sequence (3 would be sufficient) is repeated <= 32768 bytes
- * apart, and the distribution of bytes remains constant throughout, and yet not
- * all bytes are used so the data is still slightly compressible. There will be
- * no matches in this data, but the compressor should still output a compressed
- * block, and this block should contain more than 65535 consecutive literals,
- * which triggered the bug.
- *
- * Note: on random data, this situation is extremely unlikely if the compressor
- * uses all matches it finds, since random data will on average have a 3-byte
- * match every (256**3)/32768 = 512 bytes.
- */
-
-#include "test_util.h"
-
-int
-tmain(int argc, tchar *argv[])
-{
- const size_t data_size = 2 * 250 * 251;
- u8 *orig_data, *compressed_data, *decompressed_data;
- int i, stride, multiple, j = 0;
- struct libdeflate_decompressor *d;
- static const int levels[] = { 3, 6, 12 };
-
- begin_program(argv);
-
- orig_data = xmalloc(data_size);
- compressed_data = xmalloc(data_size);
- decompressed_data = xmalloc(data_size);
-
- for (i = 0; i < 2; i++) {
- for (stride = 1; stride < 251; stride++) {
- for (multiple = 0; multiple < 251; multiple++)
- orig_data[j++] = (stride * multiple) % 251;
- }
- }
- ASSERT(j == data_size);
-
- d = libdeflate_alloc_decompressor();
- ASSERT(d != NULL);
-
- for (i = 0; i < ARRAY_LEN(levels); i++) {
- struct libdeflate_compressor *c;
- size_t csize;
- enum libdeflate_result res;
-
- c = libdeflate_alloc_compressor(levels[i]);
- ASSERT(c != NULL);
-
- csize = libdeflate_deflate_compress(c, orig_data, data_size,
- compressed_data, data_size);
- ASSERT(csize > 0 && csize < data_size);
-
- res = libdeflate_deflate_decompress(d, compressed_data, csize,
- decompressed_data,
- data_size, NULL);
- ASSERT(res == LIBDEFLATE_SUCCESS);
- ASSERT(memcmp(orig_data, decompressed_data, data_size) == 0);
-
- libdeflate_free_compressor(c);
- }
-
- libdeflate_free_decompressor(d);
- free(orig_data);
- free(compressed_data);
- free(decompressed_data);
- return 0;
-}
diff --git a/util/compress/libdeflate/programs/test_slow_decompression.c b/util/compress/libdeflate/programs/test_slow_decompression.c
deleted file mode 100644
index d5ac26245..000000000
--- a/util/compress/libdeflate/programs/test_slow_decompression.c
+++ /dev/null
@@ -1,472 +0,0 @@
-/*
- * test_slow_decompression.c
- *
- * Test how quickly libdeflate decompresses degenerate/malicious compressed data
- * streams that start new Huffman blocks extremely frequently.
- */
-
-#include "test_util.h"
-
-/*
- * Generate a DEFLATE stream containing all empty "static Huffman" blocks.
- *
- * libdeflate used to decompress this very slowly (~1000x slower than typical
- * data), but now it's much faster (only ~2x slower than typical data) because
- * now it skips rebuilding the decode tables for the static Huffman codes when
- * they're already loaded into the decompressor.
- */
-static void
-generate_empty_static_huffman_blocks(u8 *p, size_t len)
-{
- struct output_bitstream os = { .next = p, .end = p + len };
-
- while (put_bits(&os, 0, 1) && /* BFINAL: 0 */
- put_bits(&os, 1, 2) && /* BTYPE: STATIC_HUFFMAN */
- put_bits(&os, 0, 7)) /* litlensym_256 (end-of-block) */
- ;
-}
-
-static bool
-generate_empty_dynamic_huffman_block(struct output_bitstream *os)
-{
- int i;
-
- if (!put_bits(os, 0, 1)) /* BFINAL: 0 */
- return false;
- if (!put_bits(os, 2, 2)) /* BTYPE: DYNAMIC_HUFFMAN */
- return false;
-
- /*
- * Write a minimal Huffman code, then the end-of-block symbol.
- *
- * Litlen code:
- * litlensym_256 (end-of-block) freq=1 len=1 codeword=0
- * Offset code:
- * offsetsym_0 (unused) freq=0 len=1 codeword=0
- *
- * Litlen and offset codeword lengths:
- * [0..255] = 0 presym_{18,18}
- * [256] = 1 presym_1
- * [257] = 1 presym_1
- *
- * Precode:
- * presym_1 freq=2 len=1 codeword=0
- * presym_18 freq=2 len=1 codeword=1
- */
-
- if (!put_bits(os, 0, 5)) /* num_litlen_syms: 0 + 257 */
- return false;
- if (!put_bits(os, 0, 5)) /* num_offset_syms: 0 + 1 */
- return false;
- if (!put_bits(os, 14, 4)) /* num_explicit_precode_lens: 14 + 4 */
- return false;
- /*
- * Precode codeword lengths: order is
- * [16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15]
- */
- for (i = 0; i < 2; i++) { /* presym_{16,17}: len=0 */
- if (!put_bits(os, 0, 3))
- return false;
- }
- if (!put_bits(os, 1, 3)) /* presym_18: len=1 */
- return false;
- for (i = 0; i < 14; i++) { /* presym_{0,...,14}: len=0 */
- if (!put_bits(os, 0, 3))
- return false;
- }
- if (!put_bits(os, 1, 3)) /* presym_1: len=1 */
- return false;
-
- /* Litlen and offset codeword lengths */
- for (i = 0; i < 2; i++) {
- if (!put_bits(os, 1, 1) || /* presym_18, 128 zeroes */
- !put_bits(os, 117, 7))
- return false;
- }
- if (!put_bits(os, 0, 1)) /* presym_1 */
- return false;
- if (!put_bits(os, 0, 1)) /* presym_1 */
- return false;
- /* Done writing the Huffman codes */
-
- return put_bits(os, 0, 1); /* litlensym_256 (end-of-block) */
-}
-
-/*
- * Generate a DEFLATE stream containing all empty "dynamic Huffman" blocks.
- *
- * This is the worst known case currently, being ~100x slower to decompress than
- * typical data.
- */
-static void
-generate_empty_dynamic_huffman_blocks(u8 *p, size_t len)
-{
- struct output_bitstream os = { .next = p, .end = p + len };
-
- while (generate_empty_dynamic_huffman_block(&os))
- ;
-}
-
-#define NUM_ITERATIONS 100
-
-static u64
-do_test_libdeflate(const char *input_type, const u8 *in, size_t in_nbytes,
- u8 *out, size_t out_nbytes_avail)
-{
- struct libdeflate_decompressor *d;
- enum libdeflate_result res;
- u64 t;
- int i;
-
- d = libdeflate_alloc_decompressor();
- ASSERT(d != NULL);
-
- t = timer_ticks();
- for (i = 0; i < NUM_ITERATIONS; i++) {
- res = libdeflate_deflate_decompress(d, in, in_nbytes, out,
- out_nbytes_avail, NULL);
- ASSERT(res == LIBDEFLATE_BAD_DATA ||
- res == LIBDEFLATE_INSUFFICIENT_SPACE);
- }
- t = timer_ticks() - t;
-
- printf("[%s, libdeflate]: %"PRIu64" KB/s\n", input_type,
- timer_KB_per_s((u64)in_nbytes * NUM_ITERATIONS, t));
-
- libdeflate_free_decompressor(d);
- return t;
-}
-
-static u64
-do_test_zlib(const char *input_type, const u8 *in, size_t in_nbytes,
- u8 *out, size_t out_nbytes_avail)
-{
- z_stream z;
- int res;
- u64 t;
- int i;
-
- memset(&z, 0, sizeof(z));
- res = inflateInit2(&z, -15);
- ASSERT(res == Z_OK);
-
- t = timer_ticks();
- for (i = 0; i < NUM_ITERATIONS; i++) {
- inflateReset(&z);
- z.next_in = (void *)in;
- z.avail_in = in_nbytes;
- z.next_out = out;
- z.avail_out = out_nbytes_avail;
- res = inflate(&z, Z_FINISH);
- ASSERT(res == Z_BUF_ERROR || res == Z_DATA_ERROR);
- }
- t = timer_ticks() - t;
-
- printf("[%s, zlib ]: %"PRIu64" KB/s\n", input_type,
- timer_KB_per_s((u64)in_nbytes * NUM_ITERATIONS, t));
-
- inflateEnd(&z);
- return t;
-}
-
-/*
- * Test case from https://github.com/ebiggers/libdeflate/issues/33
- * with the gzip header and footer removed to leave just the DEFLATE stream
- */
-static const u8 orig_repro[3962] =
- "\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a"
- "\x6a\x6a\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20"
- "\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80\x28"
- "\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11"
- "\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48"
- "\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80"
- "\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80\x28\x00"
- "\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea"
- "\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea"
- "\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48"
- "\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20"
- "\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00"
- "\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11"
- "\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x63"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92"
- "\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00"
- "\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48"
- "\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20"
- "\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00"
- "\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x92\x63\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea"
- "\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48"
- "\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11"
- "\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00"
- "\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x92\x63\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11"
- "\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63"
- "\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea"
- "\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x92\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a"
- "\x6a\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80"
- "\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80\x28\x00"
- "\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00"
- "\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x92\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a\x6a"
- "\x6a\x6a\x6a\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00"
- "\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80"
- "\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00"
- "\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04"
- "\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20"
- "\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80\x28"
- "\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00"
- "\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04"
- "\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00"
- "\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28"
- "\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00"
- "\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x63\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04"
- "\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00"
- "\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28"
- "\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x92\x63\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00"
- "\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04"
- "\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00"
- "\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28"
- "\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x92\x63\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00"
- "\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92"
- "\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00"
- "\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x63\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00"
- "\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80"
- "\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00"
- "\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92"
- "\x63\x00\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00"
- "\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04"
- "\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00\x20"
- "\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x00\xea\x04\x48\x00\x20\x80\x28"
- "\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1a\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00"
- "\xea\x04\x48\x00\x20\x80\x28\x00\x00\x11\x00\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b"
- "\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x1b\x92\x63\x00\x04\xea\x48\x00\x20"
- "\x80\x28\x00\x00\x11\x1b\x1b\x1b\x1b\x92\x63\x00\xea\x04\x48\x00"
- "\x20\x80\x28\x00\x00\x11\x00\x00\x01\x04\x00\x3f\x00\x00\x00\x00"
- "\x28\xf7\xff\x00\xff\xff\xff\xff\x00\x00";
-
-int
-tmain(int argc, tchar *argv[])
-{
- u8 in[4096];
- u8 out[10000];
- u64 t, tz;
-
- begin_program(argv);
-
- begin_performance_test();
-
- /* static huffman case */
- generate_empty_static_huffman_blocks(in, sizeof(in));
- t = do_test_libdeflate("static huffman", in, sizeof(in),
- out, sizeof(out));
- tz = do_test_zlib("static huffman", in, sizeof(in), out, sizeof(out));
- /*
- * libdeflate is faster than zlib in this case, e.g.
- * [static huffman, libdeflate]: 215861 KB/s
- * [static huffman, zlib ]: 73651 KB/s
- */
- putchar('\n');
- ASSERT(t < tz);
-
- /* dynamic huffman case */
- generate_empty_dynamic_huffman_blocks(in, sizeof(in));
- t = do_test_libdeflate("dynamic huffman", in, sizeof(in),
- out, sizeof(out));
- tz = do_test_zlib("dynamic huffman", in, sizeof(in), out, sizeof(out));
- /*
- * libdeflate is slower than zlib in this case, though not super bad.
- * [dynamic huffman, libdeflate]: 6277 KB/s
- * [dynamic huffman, zlib ]: 10419 KB/s
- * FIXME: make it faster.
- */
- putchar('\n');
- ASSERT(t < 4 * tz);
-
- /* original reproducer */
- t = do_test_libdeflate("original repro", orig_repro, sizeof(orig_repro),
- out, sizeof(out));
- tz = do_test_zlib("original repro", orig_repro, sizeof(orig_repro),
- out, sizeof(out));
- ASSERT(t < tz);
-
- return 0;
-}
diff --git a/util/compress/libdeflate/programs/test_trailing_bytes.c b/util/compress/libdeflate/programs/test_trailing_bytes.c
deleted file mode 100644
index 92609ff46..000000000
--- a/util/compress/libdeflate/programs/test_trailing_bytes.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * test_trailing_bytes.c
- *
- * Test that decompression correctly stops at the end of the first DEFLATE,
- * zlib, or gzip stream, and doesn't process any additional trailing bytes.
- */
-
-#include "test_util.h"
-
-static const struct {
- size_t (LIBDEFLATEAPI *compress)(
- struct libdeflate_compressor *compressor,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail);
- enum libdeflate_result (LIBDEFLATEAPI *decompress)(
- struct libdeflate_decompressor *decompressor,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail,
- size_t *actual_out_nbytes_ret);
- enum libdeflate_result (LIBDEFLATEAPI *decompress_ex)(
- struct libdeflate_decompressor *decompressor,
- const void *in, size_t in_nbytes,
- void *out, size_t out_nbytes_avail,
- size_t *actual_in_nbytes_ret,
- size_t *actual_out_nbytes_ret);
-} codecs[] = {
- {
- .compress = libdeflate_deflate_compress,
- .decompress = libdeflate_deflate_decompress,
- .decompress_ex = libdeflate_deflate_decompress_ex,
- }, {
- .compress = libdeflate_zlib_compress,
- .decompress = libdeflate_zlib_decompress,
- .decompress_ex = libdeflate_zlib_decompress_ex,
- }, {
- .compress = libdeflate_gzip_compress,
- .decompress = libdeflate_gzip_decompress,
- .decompress_ex = libdeflate_gzip_decompress_ex,
- }
-};
-
-int
-tmain(int argc, tchar *argv[])
-{
- const size_t original_nbytes = 32768;
- const size_t compressed_nbytes_total = 32768;
- /*
- * Don't use the full buffer for compressed data, because we want to
- * test whether decompression can deal with additional trailing bytes.
- *
- * Note: we can't use a guarded buffer (i.e. a buffer where the byte
- * after compressed_nbytes is unmapped) because the decompressor may
- * read a few bytes beyond the end of the stream (but ultimately not
- * actually use those bytes) as long as they are within the buffer.
- */
- const size_t compressed_nbytes_avail = 30000;
- size_t i;
- u8 *original;
- u8 *compressed;
- u8 *decompressed;
- struct libdeflate_compressor *c;
- struct libdeflate_decompressor *d;
- size_t compressed_nbytes;
- enum libdeflate_result res;
- size_t actual_compressed_nbytes;
- size_t actual_decompressed_nbytes;
-
- begin_program(argv);
-
- ASSERT(compressed_nbytes_avail < compressed_nbytes_total);
-
- /* Prepare some dummy data to compress */
- original = xmalloc(original_nbytes);
- ASSERT(original != NULL);
- for (i = 0; i < original_nbytes; i++)
- original[i] = (i % 123) + (i % 1023);
-
- compressed = xmalloc(compressed_nbytes_total);
- ASSERT(compressed != NULL);
- memset(compressed, 0, compressed_nbytes_total);
-
- decompressed = xmalloc(original_nbytes);
- ASSERT(decompressed != NULL);
-
- c = libdeflate_alloc_compressor(6);
- ASSERT(c != NULL);
-
- d = libdeflate_alloc_decompressor();
- ASSERT(d != NULL);
-
- for (i = 0; i < ARRAY_LEN(codecs); i++) {
- compressed_nbytes = codecs[i].compress(c, original,
- original_nbytes,
- compressed,
- compressed_nbytes_avail);
- ASSERT(compressed_nbytes > 0);
- ASSERT(compressed_nbytes <= compressed_nbytes_avail);
-
- /* Test decompress() of stream that fills the whole buffer */
- actual_decompressed_nbytes = 0;
- memset(decompressed, 0, original_nbytes);
- res = codecs[i].decompress(d, compressed, compressed_nbytes,
- decompressed, original_nbytes,
- &actual_decompressed_nbytes);
- ASSERT(res == LIBDEFLATE_SUCCESS);
- ASSERT(actual_decompressed_nbytes == original_nbytes);
- ASSERT(memcmp(decompressed, original, original_nbytes) == 0);
-
- /* Test decompress_ex() of stream that fills the whole buffer */
- actual_compressed_nbytes = actual_decompressed_nbytes = 0;
- memset(decompressed, 0, original_nbytes);
- res = codecs[i].decompress_ex(d, compressed, compressed_nbytes,
- decompressed, original_nbytes,
- &actual_compressed_nbytes,
- &actual_decompressed_nbytes);
- ASSERT(res == LIBDEFLATE_SUCCESS);
- ASSERT(actual_compressed_nbytes == compressed_nbytes);
- ASSERT(actual_decompressed_nbytes == original_nbytes);
- ASSERT(memcmp(decompressed, original, original_nbytes) == 0);
-
- /* Test decompress() of stream with trailing bytes */
- actual_decompressed_nbytes = 0;
- memset(decompressed, 0, original_nbytes);
- res = codecs[i].decompress(d, compressed,
- compressed_nbytes_total,
- decompressed, original_nbytes,
- &actual_decompressed_nbytes);
- ASSERT(res == LIBDEFLATE_SUCCESS);
- ASSERT(actual_decompressed_nbytes == original_nbytes);
- ASSERT(memcmp(decompressed, original, original_nbytes) == 0);
-
- /* Test decompress_ex() of stream with trailing bytes */
- actual_compressed_nbytes = actual_decompressed_nbytes = 0;
- memset(decompressed, 0, original_nbytes);
- res = codecs[i].decompress_ex(d, compressed,
- compressed_nbytes_total,
- decompressed, original_nbytes,
- &actual_compressed_nbytes,
- &actual_decompressed_nbytes);
- ASSERT(res == LIBDEFLATE_SUCCESS);
- ASSERT(actual_compressed_nbytes == compressed_nbytes);
- ASSERT(actual_decompressed_nbytes == original_nbytes);
- ASSERT(memcmp(decompressed, original, original_nbytes) == 0);
- }
-
- free(original);
- free(compressed);
- free(decompressed);
- libdeflate_free_compressor(c);
- libdeflate_free_decompressor(d);
- return 0;
-}
diff --git a/util/compress/libdeflate/programs/test_util.c b/util/compress/libdeflate/programs/test_util.c
deleted file mode 100644
index 20e7c217f..000000000
--- a/util/compress/libdeflate/programs/test_util.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * test_util.c - utility functions for test programs
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _WIN32
-/* for MAP_ANONYMOUS or MAP_ANON, which unfortunately aren't part of POSIX... */
-# undef _POSIX_C_SOURCE
-# ifdef __APPLE__
-# define _DARWIN_C_SOURCE
-# elif defined(__linux__)
-# define _GNU_SOURCE
-# endif
-#endif
-
-#include "test_util.h"
-
-#include <fcntl.h>
-#include <time.h>
-#ifdef _WIN32
-# include <windows.h>
-#else
-# include <unistd.h>
-# include <sys/mman.h>
-# include <sys/time.h>
-#endif
-
-#ifndef MAP_ANONYMOUS
-# define MAP_ANONYMOUS MAP_ANON
-#endif
-
-/* Abort with an error message */
-_noreturn void
-assertion_failed(const char *expr, const char *file, int line)
-{
- msg("Assertion failed: %s at %s:%d", expr, file, line);
- abort();
-}
-
-void
-begin_performance_test(void)
-{
- /* Skip performance tests by default, since they can be flaky. */
- if (getenv("INCLUDE_PERF_TESTS") == NULL)
- exit(0);
-}
-
-static size_t
-get_page_size(void)
-{
-#ifdef _WIN32
- SYSTEM_INFO info;
-
- GetSystemInfo(&info);
- return info.dwPageSize;
-#else
- return sysconf(_SC_PAGESIZE);
-#endif
-}
-
-/* Allocate a buffer with guard pages */
-void
-alloc_guarded_buffer(size_t size, u8 **start_ret, u8 **end_ret)
-{
- const size_t pagesize = get_page_size();
- const size_t nr_pages = (size + pagesize - 1) / pagesize;
- u8 *base_addr;
- u8 *start, *end;
-#ifdef _WIN32
- DWORD oldProtect;
-#endif
-
- *start_ret = NULL;
- *end_ret = NULL;
-
-#ifdef _WIN32
- /* Allocate buffer and guard pages with no access. */
- base_addr = VirtualAlloc(NULL, (nr_pages + 2) * pagesize,
- MEM_COMMIT | MEM_RESERVE, PAGE_NOACCESS);
- if (!base_addr) {
- msg("Unable to allocate memory (VirtualAlloc): Windows error %u",
- (unsigned int)GetLastError());
- ASSERT(0);
- }
- start = base_addr + pagesize;
- end = start + (nr_pages * pagesize);
-
- /* Grant read+write access to just the buffer. */
- if (!VirtualProtect(start, end - start, PAGE_READWRITE, &oldProtect)) {
- msg("Unable to protect memory (VirtualProtect): Windows error %u",
- (unsigned int)GetLastError());
- VirtualFree(base_addr, 0, MEM_RELEASE);
- ASSERT(0);
- }
-#else
- /* Allocate buffer and guard pages. */
- base_addr = mmap(NULL, (nr_pages + 2) * pagesize, PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
- if (base_addr == (u8 *)MAP_FAILED) {
- msg_errno("Unable to allocate memory (anonymous mmap)");
- ASSERT(0);
- }
- start = base_addr + pagesize;
- end = start + (nr_pages * pagesize);
-
- /* Unmap the guard pages. */
- munmap(base_addr, pagesize);
- munmap(end, pagesize);
-#endif
- *start_ret = start;
- *end_ret = end;
-}
-
-/* Free a buffer that was allocated by alloc_guarded_buffer() */
-void
-free_guarded_buffer(u8 *start, u8 *end)
-{
- if (!start)
- return;
-#ifdef _WIN32
- VirtualFree(start - get_page_size(), 0, MEM_RELEASE);
-#else
- munmap(start, end - start);
-#endif
-}
-
-/*
- * Return the number of timer ticks that have elapsed since some unspecified
- * point fixed at the start of program execution
- */
-u64
-timer_ticks(void)
-{
-#ifdef _WIN32
- LARGE_INTEGER count;
-
- QueryPerformanceCounter(&count);
- return count.QuadPart;
-#elif defined(HAVE_CLOCK_GETTIME)
- struct timespec ts;
-
- clock_gettime(CLOCK_MONOTONIC, &ts);
- return (1000000000 * (u64)ts.tv_sec) + ts.tv_nsec;
-#else
- struct timeval tv;
-
- gettimeofday(&tv, NULL);
- return (1000000 * (u64)tv.tv_sec) + tv.tv_usec;
-#endif
-}
-
-/*
- * Return the number of timer ticks per second
- */
-static u64
-timer_frequency(void)
-{
-#ifdef _WIN32
- LARGE_INTEGER freq;
-
- QueryPerformanceFrequency(&freq);
- return freq.QuadPart;
-#elif defined(HAVE_CLOCK_GETTIME)
- return 1000000000;
-#else
- return 1000000;
-#endif
-}
-
-/*
- * Convert a number of elapsed timer ticks to milliseconds
- */
-u64 timer_ticks_to_ms(u64 ticks)
-{
- return ticks * 1000 / timer_frequency();
-}
-
-/*
- * Convert a byte count and a number of elapsed timer ticks to MB/s
- */
-u64 timer_MB_per_s(u64 bytes, u64 ticks)
-{
- return bytes * timer_frequency() / ticks / 1000000;
-}
-
-/*
- * Convert a byte count and a number of elapsed timer ticks to KB/s
- */
-u64 timer_KB_per_s(u64 bytes, u64 ticks)
-{
- return bytes * timer_frequency() / ticks / 1000;
-}
-
-bool
-put_bits(struct output_bitstream *os, machine_word_t bits, int num_bits)
-{
- os->bitbuf |= bits << os->bitcount;
- os->bitcount += num_bits;
- while (os->bitcount >= 8) {
- if (os->next == os->end)
- return false;
- *os->next++ = os->bitbuf;
- os->bitcount -= 8;
- os->bitbuf >>= 8;
- }
- return true;
-}
-
-bool
-flush_bits(struct output_bitstream *os)
-{
- while (os->bitcount > 0) {
- if (os->next == os->end)
- return false;
- *os->next++ = os->bitbuf;
- os->bitcount -= 8;
- os->bitbuf >>= 8;
- }
- os->bitcount = 0;
- return true;
-}
diff --git a/util/compress/libdeflate/programs/test_util.h b/util/compress/libdeflate/programs/test_util.h
deleted file mode 100644
index e10c154a2..000000000
--- a/util/compress/libdeflate/programs/test_util.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * test_util.h - utility functions for test programs
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef PROGRAMS_TEST_UTIL_H
-#define PROGRAMS_TEST_UTIL_H
-
-#include "prog_util.h"
-
-#include <zlib.h> /* for comparison purposes */
-
-#ifdef __GNUC__
-# define _noreturn __attribute__((noreturn))
-#else
-# define _noreturn
-#endif
-
-void _noreturn
-assertion_failed(const char *expr, const char *file, int line);
-
-#define ASSERT(expr) { if (unlikely(!(expr))) \
- assertion_failed(#expr, __FILE__, __LINE__); }
-
-void begin_performance_test(void);
-
-void alloc_guarded_buffer(size_t size, u8 **start_ret, u8 **end_ret);
-void free_guarded_buffer(u8 *start, u8 *end);
-
-u64 timer_ticks(void);
-u64 timer_ticks_to_ms(u64 ticks);
-u64 timer_MB_per_s(u64 bytes, u64 ticks);
-u64 timer_KB_per_s(u64 bytes, u64 ticks);
-
-struct output_bitstream {
- machine_word_t bitbuf;
- int bitcount;
- u8 *next;
- u8 *end;
-};
-
-bool put_bits(struct output_bitstream *os, machine_word_t bits, int num_bits);
-bool flush_bits(struct output_bitstream *os);
-
-#endif /* PROGRAMS_TEST_UTIL_H */
diff --git a/util/compress/libdeflate/programs/tgetopt.c b/util/compress/libdeflate/programs/tgetopt.c
deleted file mode 100644
index 868600d97..000000000
--- a/util/compress/libdeflate/programs/tgetopt.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * tgetopt.c - portable replacement for GNU getopt()
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "prog_util.h"
-
-tchar *toptarg;
-int toptind = 1, topterr = 1, toptopt;
-
-/*
- * This is a simple implementation of getopt(). It can be compiled with either
- * 'char' or 'wchar_t' as the character type.
- *
- * Do *not* use this implementation if you need any of the following features,
- * as they are not supported:
- * - Long options
- * - Option-related arguments retained in argv, not nulled out
- * - '+' and '-' characters in optstring
- */
-int
-tgetopt(int argc, tchar *argv[], const tchar *optstring)
-{
- static tchar empty[1];
- static tchar *nextchar;
- static bool done;
-
- if (toptind == 1) {
- /* Starting to scan a new argument vector */
- nextchar = NULL;
- done = false;
- }
-
- while (!done && (nextchar != NULL || toptind < argc)) {
- if (nextchar == NULL) {
- /* Scanning a new argument */
- tchar *arg = argv[toptind++];
- if (arg[0] == '-' && arg[1] != '\0') {
- if (arg[1] == '-' && arg[2] == '\0') {
- /* All args after "--" are nonoptions */
- argv[toptind - 1] = NULL;
- done = true;
- } else {
- /* Start of short option characters */
- nextchar = &arg[1];
- }
- }
- } else {
- /* More short options in previous arg */
- tchar opt = *nextchar;
- tchar *p = tstrchr(optstring, opt);
- if (p == NULL) {
- if (topterr)
- msg("invalid option -- '%"TC"'", opt);
- toptopt = opt;
- return '?';
- }
- /* 'opt' is a valid short option character */
- nextchar++;
- toptarg = NULL;
- if (*(p + 1) == ':') {
- /* 'opt' can take an argument */
- if (*nextchar != '\0') {
- /* Optarg is in same argv argument */
- toptarg = nextchar;
- nextchar = empty;
- } else if (toptind < argc && *(p + 2) != ':') {
- /* Optarg is next argv argument */
- argv[toptind - 1] = NULL;
- toptarg = argv[toptind++];
- } else if (*(p + 2) != ':') {
- if (topterr && *optstring != ':') {
- msg("option requires an "
- "argument -- '%"TC"'", opt);
- }
- toptopt = opt;
- opt = (*optstring == ':') ? ':' : '?';
- }
- }
- if (*nextchar == '\0') {
- argv[toptind - 1] = NULL;
- nextchar = NULL;
- }
- return opt;
- }
- }
-
- /* Done scanning. Move all nonoptions to the end, set optind to the
- * index of the first nonoption, and return -1. */
- toptind = argc;
- while (--argc > 0)
- if (argv[argc] != NULL)
- argv[--toptind] = argv[argc];
- done = true;
- return -1;
-}
diff --git a/util/compress/libdeflate/scripts/afl-fuzz/Makefile b/util/compress/libdeflate/scripts/afl-fuzz/Makefile
deleted file mode 100644
index c819797ba..000000000
--- a/util/compress/libdeflate/scripts/afl-fuzz/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-SRC := $(wildcard */*.c)
-EXE := $(SRC:.c=)
-
-CFLAGS := -O2 -s
-LDLIBS := -ldeflate
-LDFLAGS := -L../..
-CPPFLAGS := -I../..
-
-all:$(EXE)
-
-clean:
- rm -f $(EXE)
diff --git a/util/compress/libdeflate/scripts/afl-fuzz/deflate_compress/fuzz.c b/util/compress/libdeflate/scripts/afl-fuzz/deflate_compress/fuzz.c
deleted file mode 100644
index d65d17e05..000000000
--- a/util/compress/libdeflate/scripts/afl-fuzz/deflate_compress/fuzz.c
+++ /dev/null
@@ -1,40 +0,0 @@
-#include <assert.h>
-#include <libdeflate.h>
-#include <string.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/stat.h>
-
-int main(int argc, char **argv)
-{
- struct libdeflate_decompressor *d;
- struct libdeflate_compressor *c;
- int ret;
- int fd = open(argv[1], O_RDONLY);
- struct stat stbuf;
- assert(fd >= 0);
- ret = fstat(fd, &stbuf);
- assert(!ret);
-
- char in[stbuf.st_size];
- ret = read(fd, in, sizeof in);
- assert(ret == sizeof in);
-
- c = libdeflate_alloc_compressor(6);
- d = libdeflate_alloc_decompressor();
-
- char out[sizeof(in)];
- char checkarray[sizeof(in)];
-
- size_t csize = libdeflate_deflate_compress(c, in,sizeof in, out, sizeof out);
- if (csize) {
- enum libdeflate_result res;
- res = libdeflate_deflate_decompress(d, out, csize, checkarray, sizeof in, NULL);
- assert(!res);
- assert(!memcmp(in, checkarray, sizeof in));
- }
-
- libdeflate_free_compressor(c);
- libdeflate_free_decompressor(d);
- return 0;
-}
diff --git a/util/compress/libdeflate/scripts/afl-fuzz/deflate_compress/inputs/0 b/util/compress/libdeflate/scripts/afl-fuzz/deflate_compress/inputs/0
deleted file mode 100644
index 875bce73a..000000000
--- a/util/compress/libdeflate/scripts/afl-fuzz/deflate_compress/inputs/0
+++ /dev/null
Binary files differ
diff --git a/util/compress/libdeflate/scripts/afl-fuzz/deflate_decompress/fuzz.c b/util/compress/libdeflate/scripts/afl-fuzz/deflate_decompress/fuzz.c
deleted file mode 100644
index 8cc4ce55c..000000000
--- a/util/compress/libdeflate/scripts/afl-fuzz/deflate_decompress/fuzz.c
+++ /dev/null
@@ -1,28 +0,0 @@
-#include <assert.h>
-#include <libdeflate.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/stat.h>
-
-int main(int argc, char **argv)
-{
- struct libdeflate_decompressor *d;
- int ret;
- int fd = open(argv[1], O_RDONLY);
- struct stat stbuf;
- assert(fd >= 0);
- ret = fstat(fd, &stbuf);
- assert(!ret);
-
- char in[stbuf.st_size];
- ret = read(fd, in, sizeof in);
- assert(ret == sizeof in);
-
- char out[sizeof(in) * 3];
-
- d = libdeflate_alloc_decompressor();
-
- libdeflate_deflate_decompress(d, in, sizeof in, out, sizeof out, NULL);
- libdeflate_free_decompressor(d);
- return 0;
-}
diff --git a/util/compress/libdeflate/scripts/afl-fuzz/deflate_decompress/inputs/0 b/util/compress/libdeflate/scripts/afl-fuzz/deflate_decompress/inputs/0
deleted file mode 100644
index 19e3a346e..000000000
--- a/util/compress/libdeflate/scripts/afl-fuzz/deflate_decompress/inputs/0
+++ /dev/null
@@ -1,3 +0,0 @@
-uŽ1
-Â@EgÅBl5
-‚°VÅÒè6j—«X{i=•èl=€àΟ¬Ñlóßü™?tíÐç½D í¨ò=¯GÑ% ¾©—2xÔ‡7eðD½ÓÐs[ÔиUkÅ÷q¹ |R/åêµùë®°*F¢Mzš¼v°•`ÐÇórÐ1ªóB÷,lDuYj#0<ÅÕž2È0hE`¹øI°ÿìW \ No newline at end of file
diff --git a/util/compress/libdeflate/scripts/afl-fuzz/gzip_decompress/fuzz.c b/util/compress/libdeflate/scripts/afl-fuzz/gzip_decompress/fuzz.c
deleted file mode 100644
index aec50804c..000000000
--- a/util/compress/libdeflate/scripts/afl-fuzz/gzip_decompress/fuzz.c
+++ /dev/null
@@ -1,28 +0,0 @@
-#include <assert.h>
-#include <libdeflate.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/stat.h>
-
-int main(int argc, char **argv)
-{
- struct libdeflate_decompressor *d;
- int ret;
- int fd = open(argv[1], O_RDONLY);
- struct stat stbuf;
- assert(fd >= 0);
- ret = fstat(fd, &stbuf);
- assert(!ret);
-
- char in[stbuf.st_size];
- ret = read(fd, in, sizeof in);
- assert(ret == sizeof in);
-
- char out[sizeof(in) * 3];
-
- d = libdeflate_alloc_decompressor();
-
- libdeflate_gzip_decompress(d, in, sizeof in, out, sizeof out, NULL);
- libdeflate_free_decompressor(d);
- return 0;
-}
diff --git a/util/compress/libdeflate/scripts/afl-fuzz/gzip_decompress/inputs/0 b/util/compress/libdeflate/scripts/afl-fuzz/gzip_decompress/inputs/0
deleted file mode 100644
index 813c75359..000000000
--- a/util/compress/libdeflate/scripts/afl-fuzz/gzip_decompress/inputs/0
+++ /dev/null
Binary files differ
diff --git a/util/compress/libdeflate/scripts/afl-fuzz/prepare_for_fuzz.sh b/util/compress/libdeflate/scripts/afl-fuzz/prepare_for_fuzz.sh
deleted file mode 100755
index 06911c18b..000000000
--- a/util/compress/libdeflate/scripts/afl-fuzz/prepare_for_fuzz.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-
-set -e
-
-make -C ../../ clean
-make clean
-AFL_HARDEN=1 make CC=afl-gcc -C ../../
-AFL_HARDEN=1 make CC=afl-gcc
-
-for dir in $(find . -mindepth 1 -maxdepth 1 -type d); do
- rm -rf /tmp/$dir
- cp -va $dir /tmp/$dir
- mkdir -p /tmp/$dir/outputs
-done
diff --git a/util/compress/libdeflate/scripts/afl-fuzz/zlib_decompress/fuzz.c b/util/compress/libdeflate/scripts/afl-fuzz/zlib_decompress/fuzz.c
deleted file mode 100644
index 797343bbf..000000000
--- a/util/compress/libdeflate/scripts/afl-fuzz/zlib_decompress/fuzz.c
+++ /dev/null
@@ -1,28 +0,0 @@
-#include <assert.h>
-#include <libdeflate.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/stat.h>
-
-int main(int argc, char **argv)
-{
- struct libdeflate_decompressor *d;
- int ret;
- int fd = open(argv[1], O_RDONLY);
- struct stat stbuf;
- assert(fd >= 0);
- ret = fstat(fd, &stbuf);
- assert(!ret);
-
- char in[stbuf.st_size];
- ret = read(fd, in, sizeof in);
- assert(ret == sizeof in);
-
- char out[sizeof(in) * 3];
-
- d = libdeflate_alloc_decompressor();
-
- libdeflate_zlib_decompress(d, in, sizeof in, out, sizeof out, NULL);
- libdeflate_free_decompressor(d);
- return 0;
-}
diff --git a/util/compress/libdeflate/scripts/afl-fuzz/zlib_decompress/inputs/0 b/util/compress/libdeflate/scripts/afl-fuzz/zlib_decompress/inputs/0
deleted file mode 100644
index 292e9726d..000000000
--- a/util/compress/libdeflate/scripts/afl-fuzz/zlib_decompress/inputs/0
+++ /dev/null
@@ -1,3 +0,0 @@
-xœuŽ1
-Â@EgÅBl5
-‚°VÅÒè6j—«X{i=•èl=€àΟ¬Ñlóßü™?tíÐç½D í¨ò=¯GÑ% ¾©—2xÔ‡7eðD½ÓÐs[ÔиUkÅ÷q¹ |R/åêµùë®°*F¢Mzš¼v°•`ÐÇórÐ1ªóB÷,lDuYj#0<ÅÕž2È0hE`¹øI°ÿìWÂ-© \ No newline at end of file
diff --git a/util/compress/libdeflate/scripts/android_build.sh b/util/compress/libdeflate/scripts/android_build.sh
deleted file mode 100755
index 204ead2dc..000000000
--- a/util/compress/libdeflate/scripts/android_build.sh
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/bin/bash
-
-set -eu -o pipefail
-
-API_LEVEL=28
-ARCH=arm64
-CFLAGS=
-ENABLE_CRC=false
-ENABLE_CRYPTO=false
-NDKDIR=$HOME/android-ndk-r21d
-
-usage() {
- cat << EOF
-Usage: $0 [OPTION]... -- [MAKE_TARGET]...
-Build libdeflate for Android.
-
- --api-level=LEVEL Android API level to target (default: $API_LEVEL)
- --arch=ARCH Architecture: arm32|arm64 (default: $ARCH)
- --enable-crc Enable crc instructions
- --enable-crypto Enable crypto instructions
- --ndkdir=NDKDIR Android NDK directory (default: $NDKDIR)
-EOF
-}
-if ! options=$(getopt -o '' \
- -l 'api-level:,arch:,enable-crc,enable-crypto,help,ndkdir:' -- "$@"); then
- usage 1>&2
- exit 1
-fi
-
-eval set -- "$options"
-
-while [ $# -gt 0 ]; do
- case "$1" in
- --api-level)
- API_LEVEL="$2"
- shift
- ;;
- --arch)
- ARCH="$2"
- shift
- ;;
- --enable-crc)
- ENABLE_CRC=true
- ;;
- --enable-crypto)
- ENABLE_CRYPTO=true
- ;;
- --help)
- usage
- exit 0
- ;;
- --ndkdir)
- NDKDIR="$2"
- shift
- ;;
- --)
- shift
- break
- ;;
- *)
- echo 1>&2 "Unknown option \"$1\""
- usage 1>&2
- exit 1
- esac
- shift
-done
-
-BINDIR=$NDKDIR/toolchains/llvm/prebuilt/linux-x86_64/bin/
-
-case "$ARCH" in
-arm|arm32|aarch32)
- CC=$BINDIR/armv7a-linux-androideabi$API_LEVEL-clang
- if $ENABLE_CRC || $ENABLE_CRYPTO; then
- CFLAGS="-march=armv8-a"
- if $ENABLE_CRC; then
- CFLAGS+=" -mcrc"
- else
- CFLAGS+=" -mnocrc"
- fi
- if $ENABLE_CRYPTO; then
- CFLAGS+=" -mfpu=crypto-neon-fp-armv8"
- else
- CFLAGS+=" -mfpu=neon"
- fi
- fi
- ;;
-arm64|aarch64)
- CC=$BINDIR/aarch64-linux-android$API_LEVEL-clang
- features=""
- if $ENABLE_CRC; then
- features+="+crc"
- fi
- if $ENABLE_CRYPTO; then
- features+="+crypto"
- fi
- if [ -n "$features" ]; then
- CFLAGS="-march=armv8-a$features"
- fi
- ;;
-*)
- echo 1>&2 "Unknown architecture: \"$ARCH\""
- usage 1>&2
- exit 1
-esac
-
-cmd=(make "-j$(grep -c processor /proc/cpuinfo)" "CC=$CC" "CFLAGS=$CFLAGS" "$@")
-echo "${cmd[*]}"
-"${cmd[@]}"
diff --git a/util/compress/libdeflate/scripts/android_tests.sh b/util/compress/libdeflate/scripts/android_tests.sh
deleted file mode 100755
index fe70ce906..000000000
--- a/util/compress/libdeflate/scripts/android_tests.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/bash
-#
-# Test libdeflate on a connected arm64 Android device.
-# Requires the Android NDK (release 19 or later) and adb.
-
-set -eu -o pipefail
-cd "$(dirname "$0")/.."
-
-if [ $# -ne 0 ]; then
- echo 1>&2 "Usage: $0"
- exit 2
-fi
-
-# Use NDKDIR if specified in environment, else use default value.
-: "${NDKDIR:=$HOME/android-ndk-r21d}"
-if [ ! -e "$NDKDIR" ]; then
- cat 1>&2 << EOF
-Android NDK was not found in NDKDIR=$NDKDIR! Set the
-environmental variable NDKDIR to the location of your Android NDK installation.
-EOF
- exit 1
-fi
-
-CLEANUP_CMDS=()
-cleanup() {
- for cmd in "${CLEANUP_CMDS[@]}"; do
- eval "$cmd"
- done
-}
-trap cleanup EXIT
-
-# Use TESTDATA if specified in environment, else generate it.
-if [ -z "${TESTDATA:-}" ]; then
- # Generate default TESTDATA file.
- TESTDATA=$(mktemp -t libdeflate_testdata.XXXXXXXXXX)
- export TESTDATA
- CLEANUP_CMDS+=("rm -f '$TESTDATA'")
- find . '(' -name '*.c' -o -name '*.h' -o -name '*.sh' ')' \
- -exec cat '{}' ';' | head -c 1000000 > "$TESTDATA"
-fi
-
-TMPDIR=$(mktemp -d -t libdeflate_test.XXXXXXXXX)
-CLEANUP_CMDS+=("rm -r '$TMPDIR'")
-
-android_build_and_test() {
- echo "Running Android tests with $*"
-
- ./scripts/android_build.sh --ndkdir="$NDKDIR" "$@" \
- all test_programs > /dev/null
- adb push "$TESTDATA" ./scripts/exec_tests.sh benchmark test_* \
- /data/local/tmp/ > /dev/null
-
- # Note: adb shell always returns 0, even if the shell command fails...
- adb shell "cd /data/local/tmp && WRAPPER= TESTDATA=$(basename "$TESTDATA") sh exec_tests.sh" \
- > "$TMPDIR/adb.out"
- if ! grep -q "exec_tests finished successfully" "$TMPDIR/adb.out"; then
- echo 1>&2 "Android test failure! adb shell output:"
- cat "$TMPDIR/adb.out"
- exit 1
- fi
-}
-
-for arch in arm32 arm64; do
- android_build_and_test --arch=$arch
- android_build_and_test --arch=$arch --enable-crc
- android_build_and_test --arch=$arch --enable-crypto
- android_build_and_test --arch=$arch --enable-crc --enable-crypto
-done
-echo "Android tests passed"
diff --git a/util/compress/libdeflate/scripts/checksum_benchmarks.sh b/util/compress/libdeflate/scripts/checksum_benchmarks.sh
deleted file mode 100755
index 23b5984eb..000000000
--- a/util/compress/libdeflate/scripts/checksum_benchmarks.sh
+++ /dev/null
@@ -1,167 +0,0 @@
-#!/bin/bash
-
-set -eu -o pipefail
-
-have_cpu_feature() {
- local feature="$1"
- local tag
- case $ARCH in
- arm*|aarch*)
- tag="Features"
- ;;
- *)
- tag="flags"
- ;;
- esac
- grep -q "^$tag"$'[ \t]'"*:.*\<$feature\>" /proc/cpuinfo
-}
-
-make_and_test() {
- # Build the checksum program and tests. Set the special test support
- # flag to get support for LIBDEFLATE_DISABLE_CPU_FEATURES.
- make "$@" TEST_SUPPORT__DO_NOT_USE=1 checksum test_checksums > /dev/null
-
- # Run the checksum tests, for good measure. (This isn't actually part
- # of the benchmarking.)
- ./test_checksums > /dev/null
-}
-
-__do_benchmark() {
- local impl="$1" speed
- shift
- local flags=("$@")
-
- speed=$(./checksum "${CKSUM_FLAGS[@]}" "${flags[@]}" -t "$FILE" | \
- grep -o '[0-9]\+ MB/s' | grep -o '[0-9]\+')
- printf "%-45s%-10s\n" "$CKSUM_NAME ($impl)" "$speed"
-}
-
-do_benchmark() {
- local impl="$1"
-
- if [ "$impl" = zlib ]; then
- __do_benchmark "$impl" "-Z"
- else
- make_and_test CFLAGS="${EXTRA_CFLAGS[*]}"
- __do_benchmark "libdeflate, $impl"
- if [ "$ARCH" = x86_64 ]; then
- make_and_test CFLAGS="-m32 ${EXTRA_CFLAGS[*]}"
- __do_benchmark "libdeflate, $impl, 32-bit"
- fi
- fi
-}
-
-sort_by_speed() {
- awk '{print $NF, $0}' | sort -nr | cut -f2- -d' '
-}
-
-disable_cpu_feature() {
- local name="$1"
- shift
- local extra_cflags=("$@")
-
- LIBDEFLATE_DISABLE_CPU_FEATURES+=",$name"
- EXTRA_CFLAGS+=("${extra_cflags[@]}")
-}
-
-cleanup() {
- if $USING_TMPFILE; then
- rm "$FILE"
- fi
-}
-
-ARCH="$(uname -m)"
-USING_TMPFILE=false
-
-if (( $# > 1 )); then
- echo "Usage: $0 [FILE]" 1>&2
- exit 1
-fi
-
-trap cleanup EXIT
-
-if (( $# == 0 )); then
- # Generate default test data file.
- FILE=$(mktemp -t checksum_testdata.XXXXXXXXXX)
- USING_TMPFILE=true
- echo "Generating 100 MB test file: $FILE"
- head -c 100000000 /dev/urandom > "$FILE"
-else
- FILE="$1"
-fi
-
-cat << EOF
-Method Speed (MB/s)
------- ------------
-EOF
-
-# CRC-32
-CKSUM_NAME="CRC-32"
-CKSUM_FLAGS=()
-EXTRA_CFLAGS=()
-export LIBDEFLATE_DISABLE_CPU_FEATURES=""
-{
-case $ARCH in
-i386|x86_64)
- if have_cpu_feature pclmulqdq && have_cpu_feature avx; then
- do_benchmark "PCLMUL/AVX"
- disable_cpu_feature "avx" "-mno-avx"
- fi
- if have_cpu_feature pclmulqdq; then
- do_benchmark "PCLMUL"
- disable_cpu_feature "pclmul" "-mno-pclmul"
- fi
- ;;
-arm*|aarch*)
- if have_cpu_feature crc32; then
- do_benchmark "ARM"
- disable_cpu_feature "crc32" "-march=armv8-a+nocrc"
- fi
- if have_cpu_feature pmull; then
- do_benchmark "PMULL"
- disable_cpu_feature "pmull" "-march=armv8-a+nocrc+nocrypto"
- fi
- ;;
-esac
-do_benchmark "generic"
-do_benchmark "zlib"
-} | sort_by_speed
-
-# Adler-32
-CKSUM_NAME="Adler-32"
-CKSUM_FLAGS=(-A)
-EXTRA_CFLAGS=()
-export LIBDEFLATE_DISABLE_CPU_FEATURES=""
-echo
-{
-case $ARCH in
-i386|x86_64)
- if have_cpu_feature avx512bw; then
- do_benchmark "AVX-512BW"
- disable_cpu_feature "avx512bw" "-mno-avx512bw"
- fi
- if have_cpu_feature avx2; then
- do_benchmark "AVX2"
- disable_cpu_feature "avx2" "-mno-avx2"
- fi
- if have_cpu_feature sse2; then
- do_benchmark "SSE2"
- disable_cpu_feature "sse2" "-mno-sse2"
- fi
- ;;
-arm*)
- if have_cpu_feature neon; then
- do_benchmark "NEON"
- disable_cpu_feature "neon" "-mfpu=vfpv3"
- fi
- ;;
-aarch*)
- if have_cpu_feature asimd; then
- do_benchmark "NEON"
- disable_cpu_feature "neon" "-march=armv8-a+nosimd"
- fi
- ;;
-esac
-do_benchmark "generic"
-do_benchmark "zlib"
-} | sort_by_speed
diff --git a/util/compress/libdeflate/scripts/detect.sh b/util/compress/libdeflate/scripts/detect.sh
deleted file mode 100755
index 93064b8b2..000000000
--- a/util/compress/libdeflate/scripts/detect.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/sh
-
-set -eu
-
-# Use CC if specified in environment, else default to "cc".
-: "${CC:=cc}"
-
-# Use CFLAGS if specified in environment.
-: "${CFLAGS:=}"
-
-echo "/* THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT EDIT. */"
-echo "#ifndef CONFIG_H"
-echo "#define CONFIG_H"
-
-program_compiles() {
- echo "$1" | $CC $CFLAGS -Wno-error -x c - -o /dev/null > /dev/null 2>&1
-}
-
-check_function() {
- funcname=$1
- macro="HAVE_$(echo "$funcname" | tr '[:lower:]' '[:upper:]')"
-
- echo
- echo "/* Is the $funcname() function available? */"
- if program_compiles "int main() { $funcname(); }"; then
- echo "#define $macro 1"
- else
- echo "/* $macro is not set */"
- fi
-}
-
-have_stat_field() {
- program_compiles "#include <sys/types.h>
- #include <sys/stat.h>
- int main() { struct stat st; st.$1; }"
-}
-
-check_stat_nanosecond_precision() {
- echo
- echo "/* Does stat() provide nanosecond-precision timestamps? */"
- if have_stat_field st_atim; then
- echo "#define HAVE_STAT_NANOSECOND_PRECISION 1"
- elif have_stat_field st_atimespec; then
- # Nonstandard field names used by OS X and older BSDs
- echo "#define HAVE_STAT_NANOSECOND_PRECISION 1"
- echo "#define st_atim st_atimespec"
- echo "#define st_mtim st_mtimespec"
- echo "#define st_ctim st_ctimespec"
- else
- echo "/* HAVE_STAT_NANOSECOND_PRECISION is not set */"
- fi
-}
-
-check_function clock_gettime
-check_function futimens
-check_function futimes
-check_function posix_fadvise
-check_function posix_madvise
-
-check_stat_nanosecond_precision
-
-echo
-echo "#endif /* CONFIG_H */"
diff --git a/util/compress/libdeflate/scripts/exec_tests.sh b/util/compress/libdeflate/scripts/exec_tests.sh
deleted file mode 100644
index c748e423e..000000000
--- a/util/compress/libdeflate/scripts/exec_tests.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh
-#
-# Helper script used by run_tests.sh and android_tests.sh,
-# not intended to be run directly
-#
-
-set -eu
-
-run_cmd() {
- echo "$WRAPPER $*"
- $WRAPPER "$@" > /dev/null
-}
-
-for prog in ./test_*; do
- run_cmd "$prog"
-done
-
-for format in '' '-g' '-z'; do
- for ref_impl in '' '-Y' '-Z'; do
- run_cmd ./benchmark $format $ref_impl "$TESTDATA"
- done
-done
-for level in 0 1 3 7 9; do
- for ref_impl in '' '-Y'; do
- run_cmd ./benchmark -$level $ref_impl "$TESTDATA"
- done
-done
-for level in 0 1 3 7 9 12; do
- for ref_impl in '' '-Z'; do
- run_cmd ./benchmark -$level $ref_impl "$TESTDATA"
- done
-done
-
-echo "exec_tests finished successfully" # Needed for 'adb shell'
diff --git a/util/compress/libdeflate/scripts/gen_crc32_multipliers.c b/util/compress/libdeflate/scripts/gen_crc32_multipliers.c
deleted file mode 100644
index c607d73f8..000000000
--- a/util/compress/libdeflate/scripts/gen_crc32_multipliers.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * gen_crc32_multipliers.c
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <inttypes.h>
-#include <stdio.h>
-
-/* generator polynomial G(x) */
-#define CRCPOLY 0xEDB88320 /* G(x) without x^32 term */
-#define CRCPOLY_FULL (((uint64_t)CRCPOLY << 1) | 1) /* G(x) */
-
-/* Compute x^D mod G(x) */
-static uint32_t
-compute_multiplier(int D)
-{
- /* Start with x^0 mod G(x) */
- uint32_t remainder = 0x80000000;
-
- /* Each iteration, 'remainder' becomes x^i mod G(x) */
- for (int i = 1; i <= D; i++)
- remainder = (remainder >> 1) ^ ((remainder & 1) ? CRCPOLY : 0);
-
- /* Now 'remainder' is x^D mod G(x) */
- return remainder;
-}
-
-/* Compute floor(x^64 / G(x)) */
-static uint64_t
-compute_barrett_reduction_constant(void)
-{
- uint64_t quotient = 0;
- uint64_t dividend = 0x1;
-
- for (int i = 0; i < 64 - 32 + 1; i++) {
- if ((dividend >> i) & 1) {
- quotient |= (uint64_t)1 << i;
- dividend ^= CRCPOLY_FULL << i;
- }
- }
-
- return quotient;
-}
-
-/*
- * This program computes the constant multipliers needed for carryless
- * multiplication accelerated CRC-32. It assumes 128-bit vectors divided into
- * two 64-bit halves which are multiplied separately with different 32-bit
- * multipliers, producing two 95-bit products. For a given number of 128-bit
- * vectors per iteration, the program outputs a pair of multipliers, one for
- * each 64-bit half.
- *
- * Careful: all polynomials are "bit-reversed", meaning that the low-order bits
- * have the highest degree and the high-order bits have the lowest degree!
- */
-int
-main(void)
-{
- printf("\t/* Constants precomputed by gen_crc32_multipliers.c. "
- "Do not edit! */\n");
-
- /* High and low multipliers for each needed vector count */
- for (int order = 2; order >= 0; order--) {
- int vecs_per_iteration = 1 << order;
- int right = (128 * vecs_per_iteration) + 95;
- printf("\tconst __v2di multipliers_%d = (__v2di)"
- "{ 0x%08"PRIX32", 0x%08"PRIX32" };\n",
- vecs_per_iteration,
- compute_multiplier(right - 64) /* higher degree half */,
- compute_multiplier(right - 128) /* lower degree half */);
- }
-
- /* Multiplier for final 96 => 64 bit fold */
- printf("\tconst __v2di final_multiplier = (__v2di){ 0x%08"PRIX32" };\n",
- compute_multiplier(63));
-
- /* 32-bit mask */
- printf("\tconst __m128i mask32 = (__m128i)(__v4si){ 0xFFFFFFFF };\n");
-
- /* Constants for final 64 => 32 bit reduction */
- printf("\tconst __v2di barrett_reduction_constants =\n"
- "\t\t\t(__v2di){ 0x%016"PRIX64", 0x%016"PRIX64" };\n",
- compute_barrett_reduction_constant(), CRCPOLY_FULL);
-
- return 0;
-}
diff --git a/util/compress/libdeflate/scripts/gen_crc32_table.c b/util/compress/libdeflate/scripts/gen_crc32_table.c
deleted file mode 100644
index ab39376a7..000000000
--- a/util/compress/libdeflate/scripts/gen_crc32_table.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * gen_crc32_table.c - a program for CRC-32 table generation
- *
- * Originally public domain; changes after 2016-09-07 are copyrighted.
- *
- * Copyright 2016 Eric Biggers
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <stdint.h>
-#include <stdio.h>
-
-static uint32_t crc32_table[0x800];
-
-static uint32_t
-crc32_update_bit(uint32_t remainder, uint8_t next_bit)
-{
- return (remainder >> 1) ^
- (((remainder ^ next_bit) & 1) ? 0xEDB88320 : 0);
-}
-
-static uint32_t
-crc32_update_byte(uint32_t remainder, uint8_t next_byte)
-{
- for (int j = 0; j < 8; j++, next_byte >>= 1)
- remainder = crc32_update_bit(remainder, next_byte & 1);
- return remainder;
-}
-
-static void
-print_256_entries(const uint32_t *entries)
-{
- for (size_t i = 0; i < 256 / 4; i++) {
- printf("\t");
- for (size_t j = 0; j < 4; j++) {
- printf("0x%08x,", entries[i * 4 + j]);
- if (j != 3)
- printf(" ");
- }
- printf("\n");
- }
-}
-
-int
-main(void)
-{
- /* crc32_table[i] for 0 <= i < 0x100 is the CRC-32 of byte i. */
- for (int i = 0; i < 0x100; i++)
- crc32_table[i] = crc32_update_byte(0, i);
-
- /* crc32_table[i] for 0x100 <= i < 0x800 is the CRC-32 of byte i % 0x100
- * followed by i / 0x100 zero bytes. */
- for (int i = 0x100; i < 0x800; i++)
- crc32_table[i] = crc32_update_byte(crc32_table[i - 0x100], 0);
-
- printf("/*\n");
- printf(" * crc32_table.h - data table to accelerate CRC-32 computation\n");
- printf(" *\n");
- printf(" * THIS FILE WAS AUTOMATICALLY GENERATED "
- "BY gen_crc32_table.c. DO NOT EDIT.\n");
- printf(" */\n");
- printf("\n");
- printf("#include <stdint.h>\n");
- printf("\n");
- printf("static const uint32_t crc32_table[] = {\n");
- print_256_entries(&crc32_table[0x000]);
- printf("#if defined(CRC32_SLICE4) || defined(CRC32_SLICE8)\n");
- print_256_entries(&crc32_table[0x100]);
- print_256_entries(&crc32_table[0x200]);
- print_256_entries(&crc32_table[0x300]);
- printf("#endif /* CRC32_SLICE4 || CRC32_SLICE8 */\n");
- printf("#if defined(CRC32_SLICE8)\n");
- print_256_entries(&crc32_table[0x400]);
- print_256_entries(&crc32_table[0x500]);
- print_256_entries(&crc32_table[0x600]);
- print_256_entries(&crc32_table[0x700]);
- printf("#endif /* CRC32_SLICE8 */\n");
- printf("};\n");
- return 0;
-}
diff --git a/util/compress/libdeflate/scripts/gzip_tests.sh b/util/compress/libdeflate/scripts/gzip_tests.sh
deleted file mode 100755
index 58fe325bb..000000000
--- a/util/compress/libdeflate/scripts/gzip_tests.sh
+++ /dev/null
@@ -1,490 +0,0 @@
-#!/bin/bash
-#
-# Test script for libdeflate's gzip and gunzip programs.
-#
-# To run, you must set GZIP and GUNZIP in the environment to the absolute paths
-# to the gzip and gunzip programs to test. All tests should pass regardless of
-# whether the GNU versions or the libdeflate versions, or a combination, of
-# these programs are used.
-#
-# The environmental variable TESTDATA must also be set to a file containing
-# test data.
-#
-
-set -eu -o pipefail
-
-export -n GZIP GUNZIP TESTDATA
-
-TMPDIR="$(mktemp -d)"
-CURRENT_TEST=
-
-cleanup() {
- if [ -n "$CURRENT_TEST" ]; then
- echo "TEST FAILED: \"$CURRENT_TEST\""
- fi
- rm -rf -- "$TMPDIR"
-}
-
-trap cleanup EXIT
-
-TESTDATA="$(readlink -f "$TESTDATA")"
-cd "$TMPDIR"
-
-begin_test() {
- CURRENT_TEST="$1"
- rm -rf -- "${TMPDIR:?}"/*
- cp "$TESTDATA" file
-}
-
-gzip() {
- $GZIP "$@"
-}
-
-gunzip() {
- $GUNZIP "$@"
-}
-
-assert_status() {
- local expected_status="$1"
- local expected_msg="$2"
- shift 2
- (
- set +e
- { eval "$*" > /dev/null; } 2>&1
- local actual_status=$?
- if [ "$actual_status" != "$expected_status" ]; then
- echo 1>&2 "Command '$*' exited with status" \
- "$actual_status but expected status" \
- "$expected_status"
- exit 1
- fi
- exit 0
- ) > command_output
- if ! grep -E -q "$expected_msg" command_output; then
- echo 1>&2 "Expected output of command '$*' to match regex" \
- "'$expected_msg'"
- echo 1>&2 "Actual output was:"
- echo 1>&2 "---------------------------------------------------"
- cat 1>&2 command_output
- echo 1>&2 "---------------------------------------------------"
- return 1
- fi
-}
-
-assert_error() {
- assert_status 1 "$@"
-}
-
-assert_warning() {
- assert_status 2 "$@"
-}
-
-assert_skipped() {
- assert_warning '\<(ignored|skipping|unchanged)\>' "$@"
-}
-
-assert_equals() {
- local expected="$1"
- local actual="$2"
-
- if [ "$expected" != "$actual" ]; then
- echo 1>&2 "Expected '$expected', but got '$actual'"
- return 1
- fi
-}
-
-# Get the filesystem type.
-FSTYPE=$(df -T . | tail -1 | awk '{print $2}')
-
-# If gzip or gunzip is the GNU version, require that it supports the '-k'
-# option. This option was added in v1.6, released in 2013.
-check_version_prereq() {
- local prog=$1
-
- if ! echo | { $prog -k || true; } |& grep -q 'invalid option'; then
- return 0
- fi
- if ! $prog -V |& grep -q 'Free Software Foundation'; then
- echo 1>&2 "Unexpected case: not GNU $prog, but -k option is invalid"
- exit 1
- fi
- echo "GNU $prog is too old; skipping gzip/gunzip tests"
- exit 0
-}
-check_version_prereq gzip
-check_version_prereq gunzip
-
-begin_test 'Basic compression and decompression works'
-cp file orig
-gzip file
-[ ! -e file ] && [ -e file.gz ]
-gunzip file.gz
-[ -e file ] && [ ! -e file.gz ]
-cmp file orig
-
-
-begin_test 'gzip -d is gunzip'
-cp file orig
-gzip file
-gzip -d file.gz
-cmp file orig
-
-
-begin_test '-k (keep original file) works'
-cp file orig
-gzip -k file
-cmp file orig
-rm file
-cp file.gz orig.gz
-gunzip -k file.gz
-cmp file.gz orig.gz
-
-
-begin_test '-c (write to stdout) works'
-cp file orig
-gzip -k file
-gzip -c file > 2.gz
-cmp file orig
-cmp file.gz 2.gz
-gunzip -c 2.gz > file
-cmp file.gz 2.gz
-cmp file orig
-
-
-# Note: in some of the commands below, we intentionally use 'cat file | gzip'
-# rather than 'gzip < file', in order to test the use of a pipe. This produces
-# a shellcheck warning about 'cat' being unnecessary. Suppress that warning by
-# using { cat file; true; }.
-begin_test 'Reading from stdin works'
-gzip < file > 1.gz
-gzip - < file > 2.gz
-{ cat file; true; } | gzip > 3.gz
-{ cat file; true; } | gzip - > 4.gz
-cmp file <(gunzip < 1.gz)
-cmp file <(gunzip - < 2.gz)
-cmp file <({ cat 3.gz; true; } | gunzip)
-cmp file <({ cat 4.gz; true; } | gunzip -)
-
-
-begin_test '-n option is accepted'
-gzip -n file
-gunzip -n file.gz
-
-
-begin_test 'can specify multiple options'
-gzip -fk1 file
-cmp <(gzip -c -1 file) file.gz
-gunzip -kfd file.gz
-
-
-begin_test 'Compression levels'
-if [ "$GZIP" = /bin/gzip ]; then
- assert_error '\<invalid option\>' gzip -10
- max_level=9
-else
- for level in 13 99999 1a; do
- assert_error '\<Invalid compression level\>' gzip -$level
- done
- max_level=12
-fi
-for level in $(seq 1 $max_level); do
- gzip -c "-$level" file > "file$level"
- cmp file <(gunzip -c "file$level")
-done
-rm file command_output
-
-
-begin_test 'Overwriting output file requires -f'
-cp file orig
-echo -n > file.gz
-gzip -c file > 2.gz
-assert_warning 'already exists' gzip file </dev/null
-cmp file.gz /dev/null
-gzip -f file
-cmp 2.gz file.gz
-echo -n > file
-assert_warning 'already exists' gunzip file.gz </dev/null
-gunzip -f file.gz
-cmp file orig
-
-
-begin_test 'Nonexistent input file fails, even with -f'
-for prog in 'gzip' 'gzip -f' 'gunzip' 'gunzip -f'; do
- assert_error 'No such file or directory' "$prog" NONEXISTENT
-done
-
-
-begin_test 'Compressing already-suffixed file requires -f or -c'
-gzip file
-gzip -c file.gz > c.gz
-gzip file.gz 2>&1 >/dev/null | grep -q 'already has .gz suffix'
-[ -e file.gz ] && [ ! -e file.gz.gz ]
-gzip -f file.gz
-[ ! -e file.gz ] && [ -e file.gz.gz ]
-cmp file.gz.gz c.gz
-
-
-begin_test 'Decompressing unsuffixed file only works with -c'
-gzip file && mv file.gz file
-assert_skipped gunzip file
-assert_skipped gunzip -f file
-gunzip -c file > orig
-mv file file.gz && gunzip file.gz && cmp file orig
-
-
-begin_test '... unless there is a corresponding suffixed file'
-cp file orig
-gzip file
-[ ! -e file ] && [ -e file.gz ]
-gunzip -c file > tmp
-cmp tmp orig
-rm tmp
-ln -s NONEXISTENT file
-gunzip -c file > tmp
-cmp tmp orig
-rm tmp file
-gunzip file
-[ -e file ] && [ ! -e file.gz ]
-cmp file orig
-
-
-begin_test 'Directory is skipped, even with -f'
-mkdir dir
-mkdir dir.gz
-for opt in '' '-f' '-c'; do
- assert_skipped gzip $opt dir
-done
-#assert_skipped gzip dir.gz # XXX: GNU gzip warns, libdeflate gzip no-ops
-for opt in '' '-f' '-c'; do
- for name in dir dir.gz; do
- assert_skipped gunzip $opt $name
- done
-done
-
-
-begin_test '(gzip) symlink is rejected without -f or -c'
-ln -s file symlink1
-ln -s file symlink2
-assert_error 'Too many levels of symbolic links' gzip symlink1
-[ -e file ] && [ -e symlink1 ] && [ ! -e symlink1.gz ]
-gzip -f symlink1
-[ -e file ] && [ ! -e symlink1 ] && [ -e symlink1.gz ]
-gzip -c symlink2 > /dev/null
-
-
-begin_test '(gunzip) symlink is rejected without -f or -c'
-gzip file
-ln -s file.gz symlink1.gz
-ln -s file.gz symlink2.gz
-assert_error 'Too many levels of symbolic links' gunzip symlink1
-[ -e file.gz ] && [ -e symlink1.gz ] && [ ! -e symlink1 ]
-gunzip -f symlink1.gz
-[ -e file.gz ] && [ ! -e symlink1.gz ] && [ -e symlink1 ]
-gunzip -c symlink2.gz > /dev/null
-
-
-begin_test 'FIFO is skipped, even with -f'
-mkfifo foo
-mkfifo foo.gz
-assert_skipped gzip foo
-assert_skipped gzip -f foo
-#assert_skipped gzip -c foo # XXX: works with GNU gzip, not libdeflate's
-assert_skipped gunzip foo.gz
-assert_skipped gunzip -f foo.gz
-#assert_skipped gunzip -c foo.gz # XXX: works with GNU gzip, not libdeflate's
-
-
-begin_test '(gzip) overwriting symlink does not follow symlink'
-echo a > a
-echo b > b
-gzip a
-ln -s a.gz b.gz
-gzip -f b
-gunzip a.gz
-cmp <(echo a) a
-
-
-begin_test '(gunzip) overwriting symlink does not follow symlink'
-echo a > a
-echo b > b
-gzip b
-ln -s a b
-gunzip -f b.gz
-cmp <(echo a) a
-cmp <(echo b) b
-
-
-begin_test '(gzip) hard linked file skipped without -f or -c'
-cp file orig
-ln file link
-assert_equals 2 "$(stat -c %h file)"
-assert_skipped gzip file
-gzip -c file > /dev/null
-assert_equals 2 "$(stat -c %h file)"
-gzip -f file
-assert_equals 1 "$(stat -c %h link)"
-assert_equals 1 "$(stat -c %h file.gz)"
-cmp link orig
-# XXX: GNU gzip skips hard linked files with -k, libdeflate's doesn't
-
-
-begin_test '(gunzip) hard linked file skipped without -f or -c'
-gzip file
-ln file.gz link.gz
-cp file.gz orig.gz
-assert_equals 2 "$(stat -c %h file.gz)"
-assert_skipped gunzip file.gz
-gunzip -c file.gz > /dev/null
-assert_equals 2 "$(stat -c %h file.gz)"
-gunzip -f file
-assert_equals 1 "$(stat -c %h link.gz)"
-assert_equals 1 "$(stat -c %h file)"
-cmp link.gz orig.gz
-
-
-begin_test 'Multiple files'
-cp file file2
-gzip file file2
-[ ! -e file ] && [ ! -e file2 ] && [ -e file.gz ] && [ -e file2.gz ]
-gunzip file.gz file2.gz
-[ -e file ] && [ -e file2 ] && [ ! -e file.gz ] && [ ! -e file2.gz ]
-
-
-begin_test 'Multiple files, continue on warning'
-mkdir 1
-cp file 2
-assert_skipped gzip 1 2
-[ ! -e 1.gz ]
-cmp file <(gunzip -c 2.gz)
-rmdir 1
-mkdir 1.gz
-assert_skipped gunzip 1.gz 2.gz
-[ ! -e 1 ]
-cmp 2 file
-
-
-if (( $(id -u) != 0 )); then
- begin_test 'Multiple files, continue on error'
- cp file 1
- cp file 2
- chmod a-r 1
- assert_error 'Permission denied' gzip 1 2
- [ ! -e 1.gz ]
- cmp file <(gunzip -c 2.gz)
- rm -f 1
- cp 2.gz 1.gz
- chmod a-r 1.gz
- assert_error 'Permission denied' gunzip 1.gz 2.gz
- [ ! -e 1 ]
- cmp 2 file
-fi
-
-
-begin_test 'Compressing empty file'
-echo -n > empty
-gzip empty
-gunzip empty.gz
-cmp /dev/null empty
-
-
-begin_test 'Decompressing malformed file'
-echo -n > foo.gz
-assert_error '\<(not in gzip format|unexpected end of file)\>' \
- gunzip foo.gz
-echo 1 > foo.gz
-assert_error '\<not in gzip format\>' gunzip foo.gz
-echo abcdefgh > foo.gz
-assert_error '\<not in gzip format\>' gunzip foo.gz
-echo -ne '\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x4b\x4c\x4a\x4e\x49\x24\x16\x73\x01\x00\x6c\x5b\xa2\x62\x2e\x00\x00\x00' \
- > foo.gz
-assert_error '\<(not in gzip format|crc error)\>' gunzip foo.gz
-
-
-for suf in .foo foo .blaaaaaaaaaaaaaaaargh; do
- begin_test "Custom suffix: $suf"
- gzip -S $suf file
- [ ! -e file ] && [ ! -e file.gz ] && [ -e file$suf ]
- assert_skipped gunzip file$suf
- gunzip -S $suf file$suf
- [ -e file ] && [ ! -e file.gz ] && [ ! -e file$suf ]
-done
-# DIFFERENCE: GNU gzip lower cases suffix, we don't
-
-
-begin_test 'Empty suffix is rejected'
-assert_error '\<invalid suffix\>' gzip -S '""' file
-assert_error '\<invalid suffix\>' gunzip -S '""' file
-
-
-begin_test 'Timestamps and mode are preserved'
-if [ "$FSTYPE" = shiftfs ]; then
- # In Travis CI, the filesystem (shiftfs) only supports seconds precision
- # timestamps. Nanosecond precision still sometimes seems to work,
- # probably due to caching, but it is unreliable.
- format='%a;%X;%Y'
-else
- format='%a;%x;%y'
-fi
-chmod 777 file
-orig_stat="$(stat -c "$format" file)"
-gzip file
-sleep 1
-gunzip file.gz
-assert_equals "$orig_stat" "$(stat -c "$format" file)"
-
-
-begin_test 'Decompressing multi-member gzip file'
-cat file file > orig
-gzip -c file > file.gz
-gzip -c file >> file.gz
-gunzip -f file.gz
-cmp file orig
-
-
-begin_test 'Decompressing multi-member gzip file (final member smaller)'
-echo 'hello world' > hello
-cat file hello > orig
-gzip -c file > file.gz
-gzip -c hello >> file.gz
-gunzip -f file.gz
-cmp file orig
-
-
-begin_test 'Help option'
-gzip -h 2>&1 | grep -q 'Usage'
-gunzip -h 2>&1 | grep -q 'Usage'
-
-
-begin_test 'Incorrect usage'
-for prog in gzip gunzip; do
- for opt in '--invalid-option' '-0'; do
- assert_error '\<(unrecognized|invalid) option\>' $prog $opt
- done
-done
-
-
-begin_test '-t (test) option works'
-good_files=(
-'H4sIAAAAAAAAA3PMSVTITVTIzi9JVABTIJ5jzpGZelwAX+86ehsAAAA='
-'H4sIAAAAAAAAAwvJSFUoLM1MzlZIKsovz1NIy69QyCrNLShWyC9LLVIoAUrnJFZVKqTkp+txAQBqzFDrLQAAAA==')
-bad_files=(
-'H4sIAO1YYmAAA3PMSVTITVTIzi9JVABTIJ5jzpGZelwAX+46ehsAAAA='
-'H4sIAO1YYmAAA3PMSVTITVTIzi85VABTIJ5jzpGZelwAX+86ehsAAAA='
-'H4sIAAAAAAAAA3PMSVTITVTIzi9JVABTIJ5jzpGZelwAX+86ehsBAAA='
-'H4sIAAAAAAAAAwvJSFUoLM1MzlZIKsovz1NIy69QyCrNLShWyC9LLVIogUrnJFZVKqTkp+txAQBqzFDrLQAAAA=='
-'H4sIAAAAAAAAAwvJSFUoLM1MzlZIKsovz1NIy69QyCrNLShWyC9L')
-for contents in "${good_files[@]}"; do
- echo "$contents" | base64 -d | gzip -t
-done
-for contents in "${bad_files[@]}"; do
- echo "$contents" | base64 -d > file
- assert_error '\<invalid compressed data|file corrupt|unexpected end of file|Out of memory\>' \
- gzip -t file
-done
-
-
-begin_test 'Version information'
-gzip -V | grep -q Copyright
-gunzip -V | grep -q Copyright
-
-CURRENT_TEST=
diff --git a/util/compress/libdeflate/scripts/make-windows-releases.sh b/util/compress/libdeflate/scripts/make-windows-releases.sh
deleted file mode 100755
index 1c143e106..000000000
--- a/util/compress/libdeflate/scripts/make-windows-releases.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-set -eu -o pipefail
-
-for arch in 'i686' 'x86_64'; do
- make clean
- make -j CC=${arch}-w64-mingw32-gcc CFLAGS="-Werror" all \
- benchmark.exe checksum.exe
- dir=libdeflate-$(git describe --tags | tr -d v)-windows-${arch}-bin
- rm -rf "$dir" "$dir.zip"
- mkdir "$dir"
- cp libdeflate.{dll,lib,def} libdeflatestatic.lib libdeflate.h ./*.exe \
- "$dir"
- ${arch}-w64-mingw32-strip "$dir/libdeflate.dll" "$dir"/*.exe
- for file in COPYING NEWS; do
- sed < $file > "$dir/${file}.txt" -e 's/$/\r/g'
- done
- sed < README.md > "$dir/README.md" -e 's/$/\r/g'
- (cd "$dir" && zip -r "../${dir}.zip" .)
-done
diff --git a/util/compress/libdeflate/scripts/msc_test.bat b/util/compress/libdeflate/scripts/msc_test.bat
deleted file mode 100755
index f5e44bd09..000000000
--- a/util/compress/libdeflate/scripts/msc_test.bat
+++ /dev/null
@@ -1,3 +0,0 @@
-nmake /f Makefile.msc clean
-nmake /f Makefile.msc
-copy /y *.exe j:\exe\
diff --git a/util/compress/libdeflate/scripts/pgo_build.sh b/util/compress/libdeflate/scripts/pgo_build.sh
deleted file mode 100755
index 2eb2b2311..000000000
--- a/util/compress/libdeflate/scripts/pgo_build.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-
-# Try gcc profile-guided optimizations
-
-set -eu
-
-MAKE="make -j$(grep -c processor /proc/cpuinfo)"
-DATAFILE="$HOME/data/silesia"
-
-$MAKE benchmark > /dev/null
-echo "====================="
-echo "Original performance:"
-echo "---------------------"
-./benchmark "$@" "$DATAFILE"
-
-$MAKE CFLAGS=-fprofile-generate LDFLAGS=-fprofile-generate benchmark > /dev/null
-./benchmark "$@" "$DATAFILE" > /dev/null
-$MAKE CFLAGS=-fprofile-use benchmark > /dev/null
-rm -f {lib,programs}/*.gcda
-echo "=========================="
-echo "PGO-optimized performance:"
-echo "--------------------------"
-./benchmark "$@" "$DATAFILE"
diff --git a/util/compress/libdeflate/scripts/produce_gzip_benchmark_table.sh b/util/compress/libdeflate/scripts/produce_gzip_benchmark_table.sh
deleted file mode 100755
index 03fc927e5..000000000
--- a/util/compress/libdeflate/scripts/produce_gzip_benchmark_table.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-set -eu -o pipefail
-topdir="$(dirname "$0")/.."
-
-do_benchmark() {
- "$topdir/benchmark" -g -s "$(stat -c %s "$file")" "$@" "$file" \
- | grep Compressed | cut -f 4 -d ' '
-}
-
-echo "File | zlib -6 | zlib -9 | libdeflate -6 | libdeflate -9 | libdeflate -12"
-echo "-----|---------|---------|---------------|---------------|---------------"
-
-for file in "$@"; do
- echo -n "$(basename "$file")"
- results=()
- results+=("$(do_benchmark -Y -6)")
- results+=("$(do_benchmark -Y -9)")
- results+=("$(do_benchmark -6)")
- results+=("$(do_benchmark -9)")
- results+=("$(do_benchmark -12)")
- best=2000000000
- for result in "${results[@]}"; do
- if (( result < best)); then
- best=$result
- fi
- done
- for result in "${results[@]}"; do
- if (( result == best )); then
- em="**"
- else
- em=""
- fi
- echo -n " | ${em}${result}${em}"
- done
- echo
-done
diff --git a/util/compress/libdeflate/scripts/run_tests.sh b/util/compress/libdeflate/scripts/run_tests.sh
deleted file mode 100755
index bdfd139b2..000000000
--- a/util/compress/libdeflate/scripts/run_tests.sh
+++ /dev/null
@@ -1,329 +0,0 @@
-#!/bin/bash
-#
-# Test script for libdeflate
-
-set -eu -o pipefail
-cd "$(dirname "$0")/.."
-
-if [ $# -ne 0 ]; then
- echo 1>&2 "Usage: $0"
- exit 2
-fi
-
-# Use CC if specified in environment, else default to "cc".
-: "${CC:=cc}"
-
-# Use CFLAGS if specified in environment.
-: "${CFLAGS:=}"
-
-CLEANUP_CMDS=()
-cleanup() {
- for cmd in "${CLEANUP_CMDS[@]}"; do
- eval "$cmd"
- done
-}
-trap cleanup EXIT
-
-# Use TESTDATA if specified in environment, else generate it.
-if [ -z "${TESTDATA:-}" ]; then
- # Generate default TESTDATA file.
- TESTDATA=$(mktemp -t libdeflate_testdata.XXXXXXXXXX)
- export TESTDATA
- CLEANUP_CMDS+=("rm -f '$TESTDATA'")
- find . '(' -name '*.c' -o -name '*.h' -o -name '*.sh' ')' \
- -exec cat '{}' ';' | head -c 1000000 > "$TESTDATA"
-fi
-
-TMPDIR=$(mktemp -d -t libdeflate_test.XXXXXXXXX)
-CLEANUP_CMDS+=("rm -r '$TMPDIR'")
-
-MAKE="make -j$(getconf _NPROCESSORS_ONLN)"
-
-CC_VERSION=$($CC --version | head -1)
-
-ARCH=$(uname -m)
-
-for skip in SKIP_FREESTANDING SKIP_VALGRIND SKIP_UBSAN SKIP_ASAN SKIP_CFI \
- SKIP_SHARED_LIB; do
- if [ "${!skip:-}" = "1" ]; then
- eval $skip=true
- else
- eval $skip=false
- fi
-done
-
-###############################################################################
-
-INDENT=0
-
-log() {
- echo -n "[$(date)] "
- head -c $(( INDENT * 4 )) /dev/zero | tr '\0' ' '
- echo "$@"
-}
-
-begin() {
- log "$@"
- (( INDENT++ )) || true
-}
-
-end() {
- (( INDENT-- )) || true
-}
-
-run_cmd() {
- log "$@"
- "$@" > /dev/null
-}
-
-fail() {
- echo 1>&2 "$@"
- exit 1
-}
-
-file_count() {
- local dir=$1
-
- find "$dir" -type f -o -type l | wc -l
-}
-
-cflags_supported() {
- # -Werror is needed here in order for old versions of clang to reject
- # invalid options.
- echo 'int main(void){ return 0; }' \
- | $CC $CFLAGS "$@" -Werror -x c - -o /dev/null 2>/dev/null
-}
-
-valgrind_version_at_least() {
- local want_vers=$1
- local vers
-
- vers=$(valgrind --version | grep -E -o '[0-9\.]+' | head -1)
-
- [ "$want_vers" = "$(echo -e "$vers\n$want_vers" | sort -V | head -1)" ]
-}
-
-build_and_run_tests() {
- local quick=false
- if [ "${1:-}" = "--quick" ]; then
- quick=true
- shift
- fi
-
- begin "CC=$CC CFLAGS=\"$CFLAGS\" WRAPPER=\"$WRAPPER\" $*"
-
- # Build libdeflate, including the test programs. Set the special test
- # support flag to get support for LIBDEFLATE_DISABLE_CPU_FEATURES.
- $MAKE "$@" TEST_SUPPORT__DO_NOT_USE=1 all test_programs > /dev/null
-
- # When not using -march=native, run the tests multiple times with
- # different combinations of CPU features disabled. This is needed to
- # test all variants of dynamically-dispatched code.
- #
- # For now, we aren't super exhausive in which combinations of features
- # we test disabling. We just disable the features roughly in order from
- # newest to oldest for each architecture, cumulatively. In practice,
- # that's good enough to cover all the code.
- local features=('')
- if ! [[ "$CFLAGS" =~ "-march=native" ]] && ! $quick; then
- case "$ARCH" in
- i386|x86_64)
- features+=(avx512bw avx2 avx bmi2 pclmul sse2)
- ;;
- arm*|aarch*)
- features+=(crc32 pmull neon)
- ;;
- esac
- fi
- local disable_str=""
- local feature
- for feature in "${features[@]}"; do
- if [ -n "$feature" ]; then
- if [ -n "$disable_str" ]; then
- disable_str+=","
- fi
- disable_str+="$feature"
- fi
- log "Using LIBDEFLATE_DISABLE_CPU_FEATURES=$disable_str"
- LIBDEFLATE_DISABLE_CPU_FEATURES="$disable_str" \
- sh ./scripts/exec_tests.sh > /dev/null
- done
- end
-}
-
-verify_freestanding_build() {
- # It is expected that sanitizer builds link to external functions.
- if [[ "$CFLAGS" =~ "-fsanitize" ]]; then
- return 0
- fi
- log "Verifying that freestanding build is really freestanding"
- if nm libdeflate.so | grep -q ' U '; then
- echo 1>&2 "Freestanding lib links to external functions!:"
- nm libdeflate.so | grep ' U '
- return 1
- fi
- if ldd libdeflate.so | grep -q -v '\<statically linked\>'; then
- echo 1>&2 "Freestanding lib links to external libraries!:"
- ldd libdeflate.so
- return 1
- fi
-}
-
-gzip_tests() {
- local gzips=("$PWD/gzip")
- local gunzips=("$PWD/gunzip")
- if [ "${1:-}" != "--quick" ]; then
- gzips+=(/bin/gzip)
- gunzips+=(/bin/gunzip)
- fi
- local gzip gunzip
-
- begin "Running gzip program tests with CC=\"$CC\" CFLAGS=\"$CFLAGS\""
- $MAKE gzip gunzip > /dev/null
- for gzip in "${gzips[@]}"; do
- for gunzip in "${gunzips[@]}"; do
- log "GZIP=$gzip, GUNZIP=$gunzip"
- GZIP="$gzip" GUNZIP="$gunzip" TESTDATA="$TESTDATA" \
- ./scripts/gzip_tests.sh
- done
- done
- end
-}
-
-do_run_tests() {
- build_and_run_tests "$@"
- if [ "${1:-}" != "--quick" ]; then
- if $SKIP_FREESTANDING; then
- log "Skipping freestanding build tests due to SKIP_FREESTANDING=1"
- else
- build_and_run_tests FREESTANDING=1
- verify_freestanding_build
- fi
- fi
- gzip_tests "$@"
-}
-
-check_symbol_prefixes() {
- log "Checking that all global symbols are prefixed with \"libdeflate_\""
- $MAKE libdeflate.a > /dev/null
- if nm libdeflate.a | grep ' T ' | grep -E -v " _?libdeflate_"; then
- fail "Some global symbols aren't prefixed with \"libdeflate_\""
- fi
- log "Checking that all exported symbols are prefixed with \"libdeflate\""
- $MAKE libdeflate.so > /dev/null
- if nm libdeflate.so | grep ' T ' \
- | grep -E -v " (libdeflate_|_init\>|_fini\>)"; then
- fail "Some exported symbols aren't prefixed with \"libdeflate_\""
- fi
-}
-
-test_use_shared_lib() {
- if $SKIP_SHARED_LIB; then
- log "Skipping USE_SHARED_LIB=1 tests due to SKIP_SHARED_LIB=1"
- return
- fi
- log "Testing USE_SHARED_LIB=1"
- $MAKE gzip > /dev/null
- if ldd gzip | grep -q 'libdeflate.so'; then
- fail "Binary should be statically linked by default"
- fi
- $MAKE USE_SHARED_LIB=1 all check > /dev/null
- ldd gzip > "$TMPDIR/ldd.out"
- if ! grep -q 'libdeflate.so' "$TMPDIR/ldd.out"; then
- cat 1>&2 "$TMPDIR/ldd.out"
- fail "Binary isn't dynamically linked"
- fi
- rm "$TMPDIR/ldd.out"
-}
-
-install_uninstall_tests() {
- local shell
-
- begin "Testing 'make install' and 'make uninstall'"
- for shell in '/bin/bash' '/bin/dash'; do
- log "Trying SHELL=$shell"
- $MAKE SHELL=$shell clean > /dev/null
- $MAKE SHELL=$shell DESTDIR="$TMPDIR/inst" install > /dev/null
- if (( "$(file_count "$TMPDIR/inst")" == 0 )); then
- fail "'make install' didn't install any files"
- fi
- make SHELL=$shell DESTDIR="$TMPDIR/inst" uninstall > /dev/null
- if (( "$(file_count "$TMPDIR/inst")" != 0 )); then
- fail "'make uninstall' didn't uninstall all files"
- fi
- rm -r "$TMPDIR/inst"
- done
- end
-}
-
-run_tests() {
- export WRAPPER="" # no wrapper by default; overridden by valgrind tests
- local cflags
-
- begin "Running tests"
- do_run_tests
- end
-
- cflags=("-O3")
- if cflags_supported "${cflags[@]}" "-march=native"; then
- cflags+=("-march=native")
- fi
- begin "Running tests with ${cflags[*]}"
- CFLAGS="$CFLAGS ${cflags[*]}" do_run_tests
- end
-
- # Need valgrind 3.9.0 for '--errors-for-leak-kinds=all'
- # Need valgrind 3.12.0 for armv8 crypto and crc instructions
- if $SKIP_VALGRIND; then
- log "Skipping valgrind tests due to SKIP_VALGRIND=1"
- elif valgrind_version_at_least 3.12.0; then
- begin "Running tests with Valgrind"
- WRAPPER="valgrind --quiet --error-exitcode=100 --leak-check=full --errors-for-leak-kinds=all" \
- do_run_tests --quick
- end
- fi
-
- cflags=("-fsanitize=undefined" "-fno-sanitize-recover=undefined")
- if $SKIP_UBSAN; then
- log "Skipping UBSAN tests due to SKIP_UBSAN=1"
- elif cflags_supported "${cflags[@]}"; then
- begin "Running tests with UBSAN"
- CFLAGS="$CFLAGS ${cflags[*]}" do_run_tests --quick
- end
- else
- log "Skipping UBSAN tests because compiler ($CC_VERSION) doesn't support UBSAN"
- fi
-
- cflags=("-fsanitize=address" "-fno-sanitize-recover=address")
- if $SKIP_ASAN; then
- log "Skipping ASAN tests due to SKIP_ASAN=1"
- elif cflags_supported "${cflags[@]}"; then
- begin "Running tests with ASAN"
- CFLAGS="$CFLAGS ${cflags[*]}" do_run_tests --quick
- end
- else
- log "Skipping ASAN tests because compiler ($CC_VERSION) doesn't support ASAN"
- fi
-
- cflags=("-fsanitize=cfi" "-fno-sanitize-recover=cfi" "-flto"
- "-fvisibility=hidden")
- if $SKIP_CFI; then
- log "Skipping CFI tests due to SKIP_CFI=1"
- elif cflags_supported "${cflags[@]}"; then
- begin "Running tests with CFI"
- CFLAGS="$CFLAGS ${cflags[*]}" AR=llvm-ar do_run_tests --quick
- end
- else
- log "Skipping CFI tests because compiler ($CC_VERSION) doesn't support CFI"
- fi
-
- install_uninstall_tests
- check_symbol_prefixes
- test_use_shared_lib
-}
-
-###############################################################################
-
-log "Starting libdeflate tests"
-run_tests
-log "All tests passed!"
diff --git a/util/db/dbutil.go b/util/db/dbutil.go
index ed2ac8897..410bfb0c1 100644
--- a/util/db/dbutil.go
+++ b/util/db/dbutil.go
@@ -177,7 +177,7 @@ func Retry(fn func() error) (err error) {
return LoggedRetry(fn, logging.Base())
}
-// getDecoratedLogger retruns a decorated logger that includes the readonly true/false, caller and extra fields.
+// getDecoratedLogger returns a decorated logger that includes the readonly true/false, caller and extra fields.
func (db *Accessor) getDecoratedLogger(fn idemFn, extras ...interface{}) logging.Logger {
log := db.logger().With("readonly", db.readOnly)
_, file, line, ok := runtime.Caller(3)
diff --git a/util/db/initialize.go b/util/db/initialize.go
new file mode 100644
index 000000000..1662a17b4
--- /dev/null
+++ b/util/db/initialize.go
@@ -0,0 +1,123 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package db
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "fmt"
+)
+
+// Migration is used to upgrade a database from one version to the next.
+// The Migration slice is ordered and must contain all prior migrations
+// in order to determine which need to be called.
+type Migration func(ctx context.Context, tx *sql.Tx, newDatabase bool) error
+
+// Initialize creates or upgrades a DB accessor in a new atomic context.
+// The Migration slice is ordered and must contain all prior migrations
+// in order to determine which need to be called.
+func Initialize(accessor Accessor, migrations []Migration) error {
+ return accessor.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return InitializeWithContext(ctx, tx, migrations)
+ })
+}
+
+// InitializeWithContext creates or upgrades a DB accessor.
+func InitializeWithContext(ctx context.Context, tx *sql.Tx, migrations []Migration) error {
+ // check current database version
+ dbVersion, err := GetUserVersion(ctx, tx)
+ if err != nil {
+ return ErrUnableToRead
+ }
+
+ version := int32(len(migrations))
+
+ // if database version is greater than supported by current binary, write a warning. This would keep the existing
+ // fallback behavior where we could use an older binary iff the schema happen to be backward compatible.
+ if dbVersion > version {
+ return MakeErrUnknownVersion(dbVersion, version)
+ }
+
+ // if database is not up to date run migration functions.
+ if dbVersion < version {
+ var newDatabase bool
+ for i := dbVersion; i < version; i++ {
+ err = migrations[i](ctx, tx, newDatabase)
+ if err != nil && err != ErrNoOpMigration {
+ return MakeErrUpgradeFailure(dbVersion, i)
+ }
+
+ // Something like this is used by the account DB to conditionally skip things.
+ if i == 0 && err != ErrNoOpMigration {
+ newDatabase = true
+ }
+
+ // update version
+ _, err = SetUserVersion(ctx, tx, i+1)
+ if err != nil {
+ return MakeErrUpgradeFailure(dbVersion, i)
+ }
+ }
+ }
+
+ return nil
+}
+
+// ErrUnableToRead is returned when the accessor cannot be read.
+var ErrUnableToRead = errors.New("unable to read database")
+
+// ErrNoOpMigration is returned when there was no work for the migration to perform.
+var ErrNoOpMigration = errors.New("migration no-op")
+
+// ErrUnknownVersion is returned when a migration to the current version is not available.
+type ErrUnknownVersion struct {
+ CurrentVersion int32
+ SupportedVersion int32
+}
+
+// Error implements the error interface.
+func (err *ErrUnknownVersion) Error() string {
+ return fmt.Sprintf("database schema version is %d, but algod only supports up to %d", err.CurrentVersion, err.SupportedVersion)
+}
+
+// MakeErrUnknownVersion makes an ErrUnknownVersion.
+func MakeErrUnknownVersion(currentVersion, supportedVersion int32) *ErrUnknownVersion {
+ return &ErrUnknownVersion{
+ CurrentVersion: currentVersion,
+ SupportedVersion: supportedVersion,
+ }
+}
+
+// ErrUpgradeFailure is returned when a migration returns an error.
+type ErrUpgradeFailure struct {
+ SchemaVersionFrom int32
+ SchemaVersionTo int32
+}
+
+// Error implements the error interface.
+func (err *ErrUpgradeFailure) Error() string {
+ return fmt.Sprintf("failed to upgrade database from schema %d to %d", err.SchemaVersionFrom, err.SchemaVersionTo)
+}
+
+// MakeErrUpgradeFailure makes an ErrUpgradeFailure.
+func MakeErrUpgradeFailure(from, to int32) *ErrUpgradeFailure {
+ return &ErrUpgradeFailure{
+ SchemaVersionFrom: from,
+ SchemaVersionTo: to,
+ }
+}
diff --git a/util/db/initialize_test.go b/util/db/initialize_test.go
new file mode 100644
index 000000000..46f2ec941
--- /dev/null
+++ b/util/db/initialize_test.go
@@ -0,0 +1,246 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package db
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// A few migrations functions to mix and match in tests.
+var (
+ createFoo = func(ctx context.Context, tx *sql.Tx, newDatabase bool) error {
+ _, err := tx.Exec(`CREATE TABLE foo (field INTEGER)`)
+ return err
+ }
+
+ addToFoo = func(amount int) Migration {
+ return func(ctx context.Context, tx *sql.Tx, newDatabase bool) error {
+ _, err := tx.Exec(`INSERT INTO foo (field) VALUES(?)`, amount)
+ return err
+ }
+ }
+
+ returnError = func(err error) Migration {
+ return func(ctx context.Context, tx *sql.Tx, newDatabase bool) error {
+ return err
+ }
+ }
+
+ // Check the sum of the field column.
+ verifyFoo = func(expected int) func(t *testing.T, ctx context.Context, tx *sql.Tx) {
+ return func(t *testing.T, ctx context.Context, tx *sql.Tx) {
+ var field int
+ err := tx.QueryRow(`SELECT COALESCE(SUM(field), 0) FROM foo`).Scan(&field)
+ assert.NoError(t, err)
+ assert.Equal(t, expected, field)
+ }
+ }
+)
+
+func TestInitialize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ testcases := []struct {
+ name string
+ migrations []Migration
+ expectedVersion int32
+ verify func(t *testing.T, ctx context.Context, tx *sql.Tx)
+ expectedError error
+ }{
+ {
+ name: "Simple",
+ migrations: []Migration{
+ createFoo,
+ },
+ expectedVersion: 1,
+ verify: verifyFoo(0),
+ },
+ {
+ name: "Multiple",
+ migrations: []Migration{
+ createFoo,
+ addToFoo(1),
+ addToFoo(10),
+ addToFoo(100),
+ addToFoo(1000),
+ },
+ expectedVersion: 5,
+ verify: verifyFoo(1111),
+ },
+ {
+ name: "Error + rollback",
+ migrations: []Migration{
+ createFoo,
+ addToFoo(1),
+ returnError(errors.New("did not finish")),
+ addToFoo(10),
+ },
+ expectedVersion: 0,
+ verify: nil,
+ expectedError: MakeErrUpgradeFailure(0, 2),
+ },
+ }
+
+ for _, testcase := range testcases {
+ testcase := testcase
+ t.Run(testcase.name, func(t *testing.T) {
+ t.Parallel()
+
+ // Setup
+ accessor, err := MakeAccessor("test_"+testcase.name, false, true)
+ require.NoError(t, err)
+ defer accessor.Close()
+
+ err = Initialize(accessor, testcase.migrations)
+
+ // Check error.
+ if testcase.expectedError == nil {
+ require.NoError(t, err)
+ } else {
+ require.EqualError(t, err, testcase.expectedError.Error())
+ }
+
+ // Check results.
+ accessor.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ version, err := GetUserVersion(ctx, tx)
+ assert.NoError(t, err)
+ assert.Equal(t, testcase.expectedVersion, version)
+
+ if testcase.verify != nil {
+ testcase.verify(t, ctx, tx)
+ }
+ return nil
+ })
+ })
+ }
+}
+
+func TestReadOnlyError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ expiredContext, expiredContextCancelFunc := context.WithCancel(context.Background())
+ expiredContextCancelFunc()
+ err := InitializeWithContext(expiredContext, nil, []Migration{createFoo})
+
+ require.EqualError(t, err, ErrUnableToRead.Error())
+}
+
+func TestUnknownVersionError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ accessor, err := MakeAccessor("test-unknown-version", false, true)
+ require.NoError(t, err)
+ defer accessor.Close()
+
+ migrations := []Migration{
+ createFoo,
+ addToFoo(1),
+ }
+
+ // Initialize to version 2
+ err = Initialize(accessor, migrations)
+ require.NoError(t, err)
+
+ // Initialize with only version 1
+ err = Initialize(accessor, []Migration{createFoo})
+ require.EqualError(t, err, MakeErrUnknownVersion(2, 1).Error())
+}
+
+func TestNewDBFlag(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var captureNewDB bool
+ newDBCheck := func(ctx context.Context, tx *sql.Tx, newDatabase bool) error {
+ captureNewDB = newDatabase
+ return nil
+ }
+
+ testcases := []struct {
+ name string
+ migrations []Migration
+ expectedNewDB bool
+ }{
+ {
+ name: "no-op-migration-0",
+ migrations: []Migration{
+ returnError(ErrNoOpMigration),
+ newDBCheck,
+ },
+ expectedNewDB: false,
+ },
+ {
+ name: "regular-migration",
+ migrations: []Migration{
+ newDBCheck,
+ newDBCheck,
+ },
+ expectedNewDB: true,
+ },
+ }
+
+ for _, testcase := range testcases {
+ t.Run(testcase.name, func(t *testing.T) {
+ accessor, err := MakeAccessor("test_"+testcase.name, false, true)
+ require.NoError(t, err)
+ defer accessor.Close()
+
+ err = Initialize(accessor, testcase.migrations)
+ require.NoError(t, err)
+
+ require.Equal(t, testcase.expectedNewDB, captureNewDB)
+ })
+ }
+}
+
+func TestResumeUpgrading(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ accessor, err := MakeAccessor("test-resume", false, true)
+ require.NoError(t, err)
+ defer accessor.Close()
+
+ // Initialize to version 2
+ migrations := []Migration{
+ createFoo,
+ addToFoo(1),
+ }
+ err = Initialize(accessor, migrations)
+ require.NoError(t, err)
+
+ // Re-initialize and upgrade to version 4
+ migrations = []Migration{
+ createFoo,
+ addToFoo(1),
+ addToFoo(10),
+ addToFoo(100),
+ }
+ err = Initialize(accessor, migrations)
+ require.NoError(t, err)
+
+ accessor.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ verifyFoo(111)(t, ctx, tx)
+ return nil
+ })
+}
diff --git a/util/s3/s3Helper.go b/util/s3/s3Helper.go
index a997f20a1..236911370 100644
--- a/util/s3/s3Helper.go
+++ b/util/s3/s3Helper.go
@@ -199,11 +199,6 @@ func makeS3Session(credentials *credentials.Credentials, bucket string) (helper
return
}
-// GetLatestUpdateVersion returns the latest version details for the 'node' package
-func (helper *Helper) GetLatestUpdateVersion(channel string) (maxVersion uint64, maxVersionName string, err error) {
- return helper.GetUpdateVersion(channel, 0)
-}
-
// GetLatestPackageVersion returns the latest version details for a given package name (eg node, install, tools)
func (helper *Helper) GetLatestPackageVersion(channel string, packageName string) (maxVersion uint64, maxVersionName string, err error) {
return helper.GetPackageVersion(channel, packageName, 0)
@@ -214,12 +209,6 @@ func (helper *Helper) GetLatestPackageFilesVersion(channel string, packagePrefix
return helper.GetPackageFilesVersion(channel, packagePrefix, 0)
}
-// GetUpdateVersion ensures the specified version is present and returns the name of the file, if found
-// Or if specificVersion == 0, returns the name of the file with the max version
-func (helper *Helper) GetUpdateVersion(channel string, specificVersion uint64) (maxVersion uint64, maxVersionName string, err error) {
- return helper.GetPackageVersion(channel, "node", specificVersion)
-}
-
// DownloadFile downloads the specified file to the provided Writer
func (helper *Helper) DownloadFile(name string, writer io.WriterAt) error {
downloader := s3manager.NewDownloader(helper.session)
diff --git a/util/timers/deadlineMonitor.go b/util/timers/deadlineMonitor.go
deleted file mode 100644
index 823d3d8cc..000000000
--- a/util/timers/deadlineMonitor.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-// Package timers provides a Clock abstraction useful for simulating timeouts.
-package timers
-
-import (
- "time"
-)
-
-// MonotonicDeadlineMonitor is a concerete implementation of the DeadlineMonitor interface
-type MonotonicDeadlineMonitor struct {
- clock WallClock
- expiration time.Duration
- expired bool
-}
-
-// MakeMonotonicDeadlineMonitor creates an instance of the MonotonicDeadlineMonitor type, implementing DeadlineMonitor
-func MakeMonotonicDeadlineMonitor(clock WallClock, expiration time.Duration) *MonotonicDeadlineMonitor {
- return &MonotonicDeadlineMonitor{
- clock: clock,
- expiration: expiration,
- }
-}
-
-// Expired return true if the deadline has passed, or false otherwise.
-func (m *MonotonicDeadlineMonitor) Expired() bool {
- if m.expired {
- return true
- }
- if m.clock.Since() >= m.expiration {
- m.expired = true
- }
- return m.expired
-}
diff --git a/util/timers/interface.go b/util/timers/interface.go
index 7e4393ab0..aec9a424c 100644
--- a/util/timers/interface.go
+++ b/util/timers/interface.go
@@ -41,24 +41,3 @@ type Clock interface {
// the same timeouts as the original Clock.
Decode([]byte) (Clock, error)
}
-
-// WallClock extends the Clock interface by providing a referencial timing, allowing to create
-// timed events that are differential.
-type WallClock interface {
- Clock
-
- // Since returns the time spent between the last time the clock was zeroed out and the current
- // wall clock time.
- Since() time.Duration
-
- // DeadlineMonitorAt returns a DeadlineMonitor that expires after the provided delta time from zero has passed.
- //
- // DeadlineMonitorAt must be called after Zero; otherwise, the context's behavior is undefined.
- DeadlineMonitorAt(at time.Duration) DeadlineMonitor
-}
-
-// DeadlineMonitor test to see if the deadline it was created for has been reached yet or not.
-type DeadlineMonitor interface {
- // Expired return true if the deadline has passed, or false otherwise.
- Expired() bool
-}
diff --git a/util/timers/monotonic.go b/util/timers/monotonic.go
index afe33ae35..2788a6caa 100644
--- a/util/timers/monotonic.go
+++ b/util/timers/monotonic.go
@@ -30,7 +30,7 @@ type Monotonic struct {
}
// MakeMonotonicClock creates a new monotonic clock with a given zero point.
-func MakeMonotonicClock(zero time.Time) WallClock {
+func MakeMonotonicClock(zero time.Time) Clock {
return &Monotonic{
zero: zero,
}
@@ -86,14 +86,3 @@ func (m *Monotonic) Decode(data []byte) (Clock, error) {
func (m *Monotonic) String() string {
return time.Time(m.zero).String()
}
-
-// Since returns the time that has passed between the time the clock was last zeroed out and now
-func (m *Monotonic) Since() time.Duration {
- return time.Since(m.zero)
-}
-
-// DeadlineMonitorAt returns a DeadlineMonitor that expires after the provided delta time from zero has passed.
-// The method must be called after Zero; otherwise, the context's behavior is undefined.
-func (m *Monotonic) DeadlineMonitorAt(at time.Duration) DeadlineMonitor {
- return MakeMonotonicDeadlineMonitor(m, at)
-}