summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2022-11-01 17:35:39 -0400
committerGitHub <noreply@github.com>2022-11-01 17:35:39 -0400
commit31a1099c3e0914a37ae3b286a96584e241d617cc (patch)
tree4266053499b63eb1fdf3c3f99e62a5f33f7cb613
parentafd8b05b9a8fa9b49a37159d975fc2367da72fcd (diff)
parent2c0dd9eb758036291228ab0855abbe938f57203b (diff)
Merge pull request #4730 from Algo-devops-service/relbeta3.12.0v3.12.0-beta
-rw-r--r--.golangci.yml41
-rw-r--r--agreement/actions.go23
-rw-r--r--agreement/actiontype_string.go2
-rw-r--r--agreement/asyncVoteVerifier.go8
-rw-r--r--agreement/bundle.go6
-rw-r--r--agreement/cadaver.go2
-rw-r--r--agreement/cryptoVerifier.go19
-rw-r--r--agreement/cryptoVerifier_test.go20
-rw-r--r--agreement/demux.go10
-rw-r--r--agreement/errors.go23
-rw-r--r--agreement/events.go25
-rw-r--r--agreement/events_test.go88
-rw-r--r--agreement/eventtype_string.go2
-rw-r--r--agreement/message.go12
-rw-r--r--agreement/message_test.go50
-rw-r--r--agreement/msgp_gen.go5659
-rw-r--r--agreement/msgp_gen_test.go1560
-rw-r--r--agreement/persistence.go104
-rw-r--r--agreement/persistence_test.go141
-rw-r--r--agreement/player.go5
-rw-r--r--agreement/player_permutation_test.go22
-rw-r--r--agreement/player_test.go20
-rw-r--r--agreement/proposalManager.go5
-rw-r--r--agreement/proposalStore.go8
-rw-r--r--agreement/proposalTable.go12
-rw-r--r--agreement/proposalTable_test.go69
-rw-r--r--agreement/proposalTracker.go4
-rw-r--r--agreement/proposalTrackerContract.go2
-rw-r--r--agreement/pseudonode.go4
-rw-r--r--agreement/router.go13
-rw-r--r--agreement/service.go4
-rw-r--r--agreement/sort.go84
-rw-r--r--agreement/sort_test.go60
-rw-r--r--agreement/voteAggregator.go4
-rw-r--r--agreement/voteAggregator_test.go2
-rw-r--r--agreement/voteAuxiliary.go2
-rw-r--r--agreement/voteTracker.go11
-rw-r--r--agreement/voteTrackerContract.go2
-rw-r--r--buildnumber.dat2
-rw-r--r--catchup/ledgerFetcher.go8
-rw-r--r--cmd/catchpointdump/file.go68
-rw-r--r--cmd/catchpointdump/net.go5
-rw-r--r--cmd/dispenser/index.html.tpl80
-rw-r--r--cmd/dispenser/server.go62
-rw-r--r--cmd/goal/README.md147
-rw-r--r--cmd/goal/application.go236
-rw-r--r--cmd/goal/box.go118
-rw-r--r--cmd/goal/commands.go13
-rw-r--r--cmd/goal/examples/boxes.teal60
-rw-r--r--cmd/goal/examples/clear.teal2
-rw-r--r--cmd/goal/formatting.go13
-rw-r--r--cmd/goal/formatting_test.go71
-rw-r--r--cmd/goal/interact.go6
-rw-r--r--cmd/goal/messages.go3
-rw-r--r--cmd/netgoal/README.md52
-rw-r--r--cmd/netgoal/generate.go57
-rw-r--r--cmd/opdoc/tmLanguage.go12
-rw-r--r--cmd/pingpong/README.md8
-rw-r--r--cmd/pingpong/runCmd.go42
-rw-r--r--cmd/tealdbg/localLedger.go4
-rw-r--r--components/mocks/mockCatchpointCatchupAccessor.go4
-rw-r--r--config/config_test.go6
-rw-r--r--config/consensus.go41
-rw-r--r--config/localTemplate.go10
-rw-r--r--config/local_defaults.go4
-rw-r--r--config/version.go2
-rw-r--r--daemon/algod/api/algod.oas2.json204
-rw-r--r--daemon/algod/api/algod.oas3.yml242
-rw-r--r--daemon/algod/api/client/restClient.go20
-rw-r--r--daemon/algod/api/server/common/handlers.go3
-rw-r--r--daemon/algod/api/server/v2/account.go14
-rw-r--r--daemon/algod/api/server/v2/dryrun.go4
-rw-r--r--daemon/algod/api/server/v2/dryrun_test.go83
-rw-r--r--daemon/algod/api/server/v2/errors.go1
-rw-r--r--daemon/algod/api/server/v2/generated/private/routes.go317
-rw-r--r--daemon/algod/api/server/v2/generated/private/types.go31
-rw-r--r--daemon/algod/api/server/v2/generated/routes.go527
-rw-r--r--daemon/algod/api/server/v2/generated/types.go45
-rw-r--r--daemon/algod/api/server/v2/handlers.go107
-rw-r--r--daemon/algod/api/server/v2/handlers_test.go41
-rw-r--r--daemon/algod/api/server/v2/test/handlers_resources_test.go12
-rw-r--r--daemon/algod/api/server/v2/utils.go7
-rw-r--r--daemon/algod/api/swagger.go (renamed from ledger/internal/export_test.go)17
-rw-r--r--data/account/participationRegistry.go2
-rw-r--r--data/account/registeryDbOps.go2
-rw-r--r--data/accountManager.go6
-rw-r--r--data/basics/msgp_gen.go152
-rw-r--r--data/basics/userBalance.go24
-rw-r--r--data/basics/userBalance_test.go97
-rw-r--r--data/pools/transactionPool.go2
-rw-r--r--data/transactions/application.go23
-rw-r--r--data/transactions/application_test.go9
-rw-r--r--data/transactions/json_test.go96
-rw-r--r--data/transactions/logic/README.md111
-rw-r--r--data/transactions/logic/README_in.md71
-rw-r--r--data/transactions/logic/TEAL_opcodes.md122
-rw-r--r--data/transactions/logic/assembler.go177
-rw-r--r--data/transactions/logic/assembler_test.go197
-rw-r--r--data/transactions/logic/backwardCompat_test.go2
-rw-r--r--data/transactions/logic/box.go318
-rw-r--r--data/transactions/logic/box_test.go602
-rw-r--r--data/transactions/logic/debugger_test.go4
-rw-r--r--data/transactions/logic/doc.go77
-rw-r--r--data/transactions/logic/doc_test.go4
-rw-r--r--data/transactions/logic/eval.go397
-rw-r--r--data/transactions/logic/evalAppTxn_test.go33
-rw-r--r--data/transactions/logic/evalCrypto_test.go34
-rw-r--r--data/transactions/logic/evalStateful_test.go285
-rw-r--r--data/transactions/logic/eval_test.go623
-rw-r--r--data/transactions/logic/export_test.go14
-rw-r--r--data/transactions/logic/fields.go37
-rw-r--r--data/transactions/logic/fields_string.go15
-rw-r--r--data/transactions/logic/fields_test.go64
-rw-r--r--data/transactions/logic/frames.go7
-rw-r--r--data/transactions/logic/frames_test.go2
-rw-r--r--data/transactions/logic/langspec.json140
-rw-r--r--data/transactions/logic/ledger_test.go179
-rw-r--r--data/transactions/logic/opcodes.go22
-rw-r--r--data/transactions/logic/pairing.go3
-rw-r--r--data/transactions/logic/parsing.go105
-rw-r--r--data/transactions/logic/parsing_test.go139
-rw-r--r--data/transactions/logic/teal.tmLanguage.json8
-rw-r--r--data/transactions/msgp_gen.go1641
-rw-r--r--data/transactions/msgp_gen_test.go60
-rw-r--r--data/transactions/teal_test.go2
-rw-r--r--data/transactions/transaction.go21
-rw-r--r--data/transactions/transaction_test.go59
-rw-r--r--data/txHandler.go2
-rw-r--r--data/txHandler_test.go320
-rw-r--r--data/txntest/txn.go50
-rwxr-xr-xdocker/releases/build_releases.sh2
-rw-r--r--gen/generate.go41
-rw-r--r--gen/generate_test.go125
-rw-r--r--gen/resources/genesis-balance.json290
-rw-r--r--gen/resources/genesis-base.json290
-rw-r--r--gen/walletData.go28
-rw-r--r--go.mod5
-rw-r--r--go.sum5
-rw-r--r--installer/config.json.example4
-rw-r--r--ledger/accountdb.go388
-rw-r--r--ledger/accountdb_test.go945
-rw-r--r--ledger/acctonline.go2
-rw-r--r--ledger/acctupdates.go369
-rw-r--r--ledger/acctupdates_test.go325
-rw-r--r--ledger/apply/application_test.go2
-rw-r--r--ledger/apply/payment.go9
-rw-r--r--ledger/apptxn_test.go (renamed from ledger/internal/apptxn_test.go)803
-rw-r--r--ledger/boxtxn_test.go663
-rw-r--r--ledger/catchpointtracker.go150
-rw-r--r--ledger/catchpointtracker_test.go2
-rw-r--r--ledger/catchpointwriter.go226
-rw-r--r--ledger/catchpointwriter_test.go466
-rw-r--r--ledger/catchupaccessor.go57
-rw-r--r--ledger/catchupaccessor_test.go40
-rw-r--r--ledger/double_test.go (renamed from ledger/internal/double_test.go)44
-rw-r--r--ledger/eval_simple_test.go545
-rw-r--r--ledger/evalindexer.go10
-rw-r--r--ledger/evalindexer_test.go5
-rw-r--r--ledger/internal/appcow.go25
-rw-r--r--ledger/internal/appcow_test.go96
-rw-r--r--ledger/internal/applications.go266
-rw-r--r--ledger/internal/applications_test.go365
-rw-r--r--ledger/internal/cow.go31
-rw-r--r--ledger/internal/cow_test.go6
-rw-r--r--ledger/internal/eval.go111
-rw-r--r--ledger/internal/eval_blackbox_test.go1256
-rw-r--r--ledger/internal/eval_test.go8
-rw-r--r--ledger/internal/prefetcher/prefetcher.go170
-rw-r--r--ledger/internal/prefetcher/prefetcher_alignment_test.go128
-rw-r--r--ledger/internal/prefetcher/prefetcher_test.go141
-rw-r--r--ledger/ledger.go17
-rw-r--r--ledger/ledger_test.go8
-rw-r--r--ledger/ledgercore/accountdata.go19
-rw-r--r--ledger/ledgercore/statedelta.go140
-rw-r--r--ledger/lrukv.go132
-rw-r--r--ledger/lrukv_test.go240
-rw-r--r--ledger/msgp_gen.go655
-rw-r--r--ledger/msgp_gen_test.go80
-rw-r--r--ledger/persistedkvs.go143
-rw-r--r--ledger/persistedkvs_test.go175
-rw-r--r--ledger/persistedresources_list.go2
-rw-r--r--ledger/simple_test.go187
-rw-r--r--ledger/testing/consensusRange.go106
-rw-r--r--ledger/testing/consensusRange_test.go58
-rw-r--r--ledger/testing/randomAccounts.go5
-rw-r--r--ledger/tracker.go6
-rw-r--r--ledger/trackerdb.go16
-rw-r--r--ledger/txnbench_test.go (renamed from ledger/internal/txnbench_test.go)7
-rw-r--r--libgoal/libgoal.go19
-rw-r--r--libgoal/transactions.go31
-rw-r--r--logging/telemetryspec/event.go21
-rw-r--r--logging/usage.go10
-rw-r--r--network/wsNetwork.go2
-rw-r--r--network/wsPeer.go12
-rw-r--r--network/wsPeer_test.go1
-rw-r--r--node/node.go9
-rw-r--r--protocol/codec_tester.go13
-rw-r--r--protocol/consensus.go7
-rw-r--r--rpcs/txService.go2
-rw-r--r--scripts/buildtools/versions2
-rwxr-xr-xscripts/dump_genesis.sh3
-rw-r--r--shared/pingpong/accounts.go106
-rw-r--r--shared/pingpong/config.go8
-rw-r--r--shared/pingpong/pingpong.go20
-rw-r--r--stateproof/builder.go8
-rw-r--r--test/commandandcontrol/cc_agent/main.go6
-rw-r--r--test/commandandcontrol/cc_client/main.go2
-rw-r--r--test/commandandcontrol/cc_service/main.go4
-rw-r--r--test/e2e-go/features/accountPerf/sixMillion_test.go6
-rw-r--r--test/e2e-go/features/transactions/accountv2_test.go321
-rw-r--r--test/e2e-go/features/transactions/app_pages_test.go10
-rw-r--r--test/e2e-go/features/transactions/application_test.go2
-rw-r--r--test/e2e-go/restAPI/restClient_test.go415
-rw-r--r--test/e2e-go/upgrades/application_support_test.go8
-rw-r--r--test/heapwatch/block_history.py19
-rw-r--r--test/heapwatch/block_history_plot.py9
-rw-r--r--test/heapwatch/metrics_delta.py49
-rwxr-xr-xtest/scripts/e2e_subs/box-search.sh135
-rw-r--r--test/scripts/e2e_subs/tealprogs/boxes.teal60
-rw-r--r--test/testdata/configs/config-v24.json3
-rw-r--r--test/testdata/configs/config-v25.json108
-rwxr-xr-xtest/testdata/deployednettemplates/generate-recipe/generate_network.py16
-rw-r--r--test/testdata/deployednettemplates/recipes/README.md19
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/betanet-model-2/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/custom/README.md6
-rw-r--r--test/testdata/deployednettemplates/recipes/custom/example/network_templates/c5dmachines.json1
-rw-r--r--test/testdata/deployednettemplates/recipes/custom/example/network_templates/network-tpl.json1
-rw-r--r--test/testdata/deployednettemplates/recipes/custom/network_templates/network-tpl.json15
-rw-r--r--test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json3
-rw-r--r--test/testdata/deployednettemplates/recipes/mmnet/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/network-partition/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario2/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3s/Makefile2
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/Makefile2
-rw-r--r--util/db/dbutil.go11
241 files changed, 24058 insertions, 5208 deletions
diff --git a/.golangci.yml b/.golangci.yml
index 271c682e5..62490feca 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -33,6 +33,43 @@ linters-settings:
- (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).error
- (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).warnf
- (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).warn
+ # We do this 121 times and never check the error.
+ - (*github.com/spf13/cobra.Command).MarkFlagRequired
+ govet:
+ settings:
+ printf:
+ # Comma-separated list of print function names to check (in addition to default, see `go tool vet help printf`).
+ # Default: []
+ funcs:
+ - (github.com/algorand/go-algorand/logging.Logger).Debugf
+ - (github.com/algorand/go-algorand/logging.Logger).Infof
+ - (github.com/algorand/go-algorand/logging.Logger).Warnf
+ - (github.com/algorand/go-algorand/logging.Logger).Errorf
+ - (github.com/algorand/go-algorand/logging.Logger).Fatalf
+ - (github.com/algorand/go-algorand/logging.Logger).Panicf
+ - (github.com/algorand/go-algorand/logging.Logger).Debugln
+ - (github.com/algorand/go-algorand/logging.Logger).Infoln
+ - (github.com/algorand/go-algorand/logging.Logger).Warnln
+ - (github.com/algorand/go-algorand/logging.Logger).Errorln
+ - (github.com/algorand/go-algorand/logging.Logger).Fatalln
+ - (github.com/algorand/go-algorand/logging.Logger).Panicln
+ - (github.com/algorand/go-algorand/logging.Logger).Debug
+ - (github.com/algorand/go-algorand/logging.Logger).Info
+ - (github.com/algorand/go-algorand/logging.Logger).Warn
+ - (github.com/algorand/go-algorand/logging.Logger).Error
+ - (github.com/algorand/go-algorand/logging.Logger).Fatal
+ - (github.com/algorand/go-algorand/logging.Logger).Panic
+ - (github.com/algorand/go-algorand/data/transactions/logic.OpStream).warnf
+ - (github.com/algorand/go-algorand/data/transactions/logic.OpStream).errorf
+ - (github.com/algorand/go-algorand/data/transactions/logic.OpStream).lineErrorf
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportInfof
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportInfoln
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnf
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnln
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnRawf
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportWarnRawln
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportErrorf
+ - (github.com/algorand/go-algorand/cmd/goal/main).reportErrorln
issues:
# Work our way back over time to be clean against all these
@@ -69,9 +106,9 @@ issues:
- path: _test\.go
linters:
- errcheck
- - gofmt
+ # - gofmt
- gosimple
- - govet
+ # - govet
- ineffassign
- misspell
- nolintlint
diff --git a/agreement/actions.go b/agreement/actions.go
index 779d9d467..ca33c18db 100644
--- a/agreement/actions.go
+++ b/agreement/actions.go
@@ -26,8 +26,7 @@ import (
)
//go:generate stringer -type=actionType
-//msgp:ignore actionType
-type actionType int
+type actionType uint8
const (
noop actionType = iota
@@ -103,7 +102,7 @@ type networkAction struct {
UnauthenticatedVotes []unauthenticatedVote
- Err serializableError
+ Err *serializableError
}
func (a networkAction) t() actionType {
@@ -181,7 +180,7 @@ type cryptoAction struct {
Period period
Step step
Pinned bool
- TaskIndex int
+ TaskIndex uint64
}
func (a cryptoAction) t() actionType {
@@ -388,7 +387,7 @@ func (a pseudonodeAction) do(ctx context.Context, s *Service) {
case nil:
// no error.
persistCompleteEvents := s.persistState(persistStateDone)
- // we want to place there two one after the other. That way, the second would not get executed up until the first one is complete.
+ // we want to place these two one after the other. That way, the second would not get executed up until the first one is complete.
s.demux.prioritize(persistCompleteEvents)
s.demux.prioritize(voteEvents)
default:
@@ -403,12 +402,12 @@ func (a pseudonodeAction) do(ctx context.Context, s *Service) {
}
}
-func ignoreAction(e messageEvent, err serializableError) action {
- return networkAction{T: ignore, Err: err, h: e.Input.MessageHandle}
+func ignoreAction(e messageEvent, err *serializableError) action {
+ return networkAction{T: ignore, Err: err, h: e.Input.messageHandle}
}
-func disconnectAction(e messageEvent, err serializableError) action {
- return networkAction{T: disconnect, Err: err, h: e.Input.MessageHandle}
+func disconnectAction(e messageEvent, err *serializableError) action {
+ return networkAction{T: disconnect, Err: err, h: e.Input.messageHandle}
}
func broadcastAction(tag protocol.Tag, o interface{}) action {
@@ -427,7 +426,7 @@ func broadcastAction(tag protocol.Tag, o interface{}) action {
}
func relayAction(e messageEvent, tag protocol.Tag, o interface{}) action {
- a := networkAction{T: relay, h: e.Input.MessageHandle, Tag: tag}
+ a := networkAction{T: relay, h: e.Input.messageHandle, Tag: tag}
// TODO would be good to have compiler check this (and related) type switch
// by specializing one method per type
switch tag {
@@ -441,7 +440,7 @@ func relayAction(e messageEvent, tag protocol.Tag, o interface{}) action {
return a
}
-func verifyVoteAction(e messageEvent, r round, p period, taskIndex int) action {
+func verifyVoteAction(e messageEvent, r round, p period, taskIndex uint64) action {
return cryptoAction{T: verifyVote, M: e.Input, Round: r, Period: p, TaskIndex: taskIndex}
}
@@ -479,7 +478,7 @@ type checkpointAction struct {
Round round
Period period
Step step
- Err serializableError
+ Err *serializableError
done chan error // an output channel to let the pseudonode that we're done processing. We don't want to serialize that, since it's not needed in recovery/autopsy
}
diff --git a/agreement/actiontype_string.go b/agreement/actiontype_string.go
index c27b9138b..9272ec2cf 100644
--- a/agreement/actiontype_string.go
+++ b/agreement/actiontype_string.go
@@ -31,7 +31,7 @@ const _actionType_name = "noopignorebroadcastrelaydisconnectbroadcastVotesverify
var _actionType_index = [...]uint8{0, 4, 10, 19, 24, 34, 48, 58, 71, 83, 89, 100, 106, 112, 120, 129, 139}
func (i actionType) String() string {
- if i < 0 || i >= actionType(len(_actionType_index)-1) {
+ if i >= actionType(len(_actionType_index)-1) {
return "actionType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _actionType_name[_actionType_index[i]:_actionType_index[i+1]]
diff --git a/agreement/asyncVoteVerifier.go b/agreement/asyncVoteVerifier.go
index cb26b440e..877c92dfb 100644
--- a/agreement/asyncVoteVerifier.go
+++ b/agreement/asyncVoteVerifier.go
@@ -29,7 +29,7 @@ type asyncVerifyVoteRequest struct {
l LedgerReader
uv *unauthenticatedVote
uev *unauthenticatedEquivocationVote
- index int
+ index uint64
message message
// a channel that holds the response
@@ -39,7 +39,7 @@ type asyncVerifyVoteRequest struct {
type asyncVerifyVoteResponse struct {
v vote
ev equivocationVote
- index int
+ index uint64
message message
err error
cancelled bool
@@ -131,7 +131,7 @@ func (avv *AsyncVoteVerifier) executeEqVoteVerification(task interface{}) interf
}
}
-func (avv *AsyncVoteVerifier) verifyVote(verctx context.Context, l LedgerReader, uv unauthenticatedVote, index int, message message, out chan<- asyncVerifyVoteResponse) error {
+func (avv *AsyncVoteVerifier) verifyVote(verctx context.Context, l LedgerReader, uv unauthenticatedVote, index uint64, message message, out chan<- asyncVerifyVoteResponse) error {
select {
case <-avv.ctx.Done(): // if we're quitting, don't enqueue the request
// case <-verctx.Done(): DO NOT DO THIS! otherwise we will lose the vote (and forget to clean up)!
@@ -151,7 +151,7 @@ func (avv *AsyncVoteVerifier) verifyVote(verctx context.Context, l LedgerReader,
return nil
}
-func (avv *AsyncVoteVerifier) verifyEqVote(verctx context.Context, l LedgerReader, uev unauthenticatedEquivocationVote, index int, message message, out chan<- asyncVerifyVoteResponse) error {
+func (avv *AsyncVoteVerifier) verifyEqVote(verctx context.Context, l LedgerReader, uev unauthenticatedEquivocationVote, index uint64, message message, out chan<- asyncVerifyVoteResponse) error {
select {
case <-avv.ctx.Done(): // if we're quitting, don't enqueue the request
// case <-verctx.Done(): DO NOT DO THIS! otherwise we will lose the vote (and forget to clean up)!
diff --git a/agreement/bundle.go b/agreement/bundle.go
index 211f67b5b..de297110e 100644
--- a/agreement/bundle.go
+++ b/agreement/bundle.go
@@ -202,7 +202,8 @@ func (b unauthenticatedBundle) verifyAsync(ctx context.Context, l LedgerReader,
rv := rawVote{Sender: auth.Sender, Round: b.Round, Period: b.Period, Step: b.Step, Proposal: b.Proposal}
uv := unauthenticatedVote{R: rv, Cred: auth.Cred, Sig: auth.Sig}
- avv.verifyVote(ctx, l, uv, i, message{}, results)
+
+ avv.verifyVote(ctx, l, uv, uint64(i), message{}, results) //nolint:errcheck // verifyVote will call EnqueueBacklog, which blocks until the verify task is queued, or returns an error when ctx.Done(), which we are already checking
}
// create verification requests for equivocation votes
@@ -222,7 +223,8 @@ func (b unauthenticatedBundle) verifyAsync(ctx context.Context, l LedgerReader,
Proposals: auth.Proposals,
Sigs: auth.Sigs,
}
- avv.verifyEqVote(ctx, l, uev, i, message{}, results)
+ avv.verifyEqVote(ctx, l, uev, uint64(i), message{}, results) //nolint:errcheck // verifyVote will call EnqueueBacklog, which blocks until the verify task is queued, or returns an error when ctx.Done(), which we are already checking
+
}
return func() (bundle, error) {
diff --git a/agreement/cadaver.go b/agreement/cadaver.go
index 7b0cb8e76..d3f626ada 100644
--- a/agreement/cadaver.go
+++ b/agreement/cadaver.go
@@ -123,7 +123,7 @@ func (c *cadaver) trySetup() bool {
if c.out.bytesWritten >= c.fileSizeTarget {
err := c.out.Close()
if err != nil {
- logging.Base().Warn("unable to close cadaver file : %v", err)
+ logging.Base().Warnf("unable to close cadaver file : %v", err)
}
err = os.Rename(c.filename(), c.filename()+".archive")
if err != nil {
diff --git a/agreement/cryptoVerifier.go b/agreement/cryptoVerifier.go
index cf6c466e5..ca4bceb66 100644
--- a/agreement/cryptoVerifier.go
+++ b/agreement/cryptoVerifier.go
@@ -82,37 +82,41 @@ type (
Quit()
}
+ //msgp:ignore cryptoVoteRequest
cryptoVoteRequest struct {
message // the message we would like to verify.
- TaskIndex int // Caller specific number that would be passed back in the asyncVerifyVoteResponse.TaskIndex field
+ TaskIndex uint64 // Caller specific number that would be passed back in the asyncVerifyVoteResponse.TaskIndex field
Round round // The round that we're going to test against.
Period period // The period associated with the message we're going to test.
ctx context.Context // A context for this request, if the context is cancelled then the request is stale.
}
+ //msgp:ignore cryptoProposalRequest
cryptoProposalRequest struct {
message // the message we would like to verify.
- TaskIndex int // Caller specific number that would be passed back in the cryptoResult.TaskIndex field
+ TaskIndex uint64 // Caller specific number that would be passed back in the cryptoResult.TaskIndex field
Round round // The round that we're going to test against.
Period period // The period associated with the message we're going to test.
Pinned bool // A flag that is set if this is a pinned value for the given round.
ctx context.Context // A context for this request, if the context is cancelled then the request is stale.
}
+ //msgp:ignore cryptoBundleRequest
cryptoBundleRequest struct {
message // the message we would like to verify.
- TaskIndex int // Caller specific number that would be passed back in the asyncVerifyVoteResponse.TaskIndex field
+ TaskIndex uint64 // Caller specific number that would be passed back in the asyncVerifyVoteResponse.TaskIndex field
Round round // The round that we're going to test against.
Period period // The period associated with the message we're going to test.
Certify bool // A flag that set if this is a cert bundle.
ctx context.Context // A context for this request, if the context is cancelled then the request is stale.
}
+ //msgp:ignore cryptoResult
cryptoResult struct {
message
- Err serializableError
- TaskIndex int // the TaskIndex that was passed to the cryptoVerifier during the Verify call on the cryptoRequest.TaskIndex
- Cancelled bool // whether the corresponding request was cancelled before verification completed
+ Err *serializableError
+ TaskIndex uint64 // the TaskIndex that was passed to the cryptoVerifier during the Verify call on the cryptoRequest.TaskIndex
+ Cancelled bool // whether the corresponding request was cancelled before verification completed
}
// A poolCryptoVerifier uses asynchronous goroutines to implement cryptoVerifier.
@@ -146,9 +150,10 @@ type (
out chan cryptoResult
}
+ //msgp:ignore bundleFuture
bundleFuture struct {
message
- index int
+ index uint64
wait func() (bundle, error)
ctx context.Context
}
diff --git a/agreement/cryptoVerifier_test.go b/agreement/cryptoVerifier_test.go
index a42ffd9b0..21b78c601 100644
--- a/agreement/cryptoVerifier_test.go
+++ b/agreement/cryptoVerifier_test.go
@@ -93,7 +93,7 @@ func makeMessage(msgHandle int, tag protocol.Tag, sender basics.Address, l Ledge
}
return message{
- MessageHandle: MessageHandle(msgHandle),
+ messageHandle: MessageHandle(msgHandle),
Tag: tag,
UnauthenticatedVote: makeUnauthenticatedVote(l, sender, selection, voting, Round, Period, Step, proposal),
}
@@ -103,13 +103,13 @@ func makeMessage(msgHandle int, tag protocol.Tag, sender basics.Address, l Ledge
Block: e,
}
return message{
- MessageHandle: MessageHandle(msgHandle),
+ messageHandle: MessageHandle(msgHandle),
Tag: tag,
UnauthenticatedProposal: payload,
}
default: // protocol.VoteBundleTag
return message{
- MessageHandle: MessageHandle(msgHandle),
+ messageHandle: MessageHandle(msgHandle),
Tag: tag,
UnauthenticatedBundle: unauthenticatedBundle{
Round: Round,
@@ -180,9 +180,9 @@ func TestCryptoVerifierBuffers(t *testing.T) {
for _, msgType := range msgTypes {
for i := getSelectorCapacity(msgType) * 5; i > 0; i-- {
msg := <-verifier.Verified(msgType)
- _, has := usedMsgIDs[msg.MessageHandle]
+ _, has := usedMsgIDs[msg.messageHandle]
assert.True(t, has)
- delete(usedMsgIDs, msg.MessageHandle)
+ delete(usedMsgIDs, msg.messageHandle)
}
assert.False(t, verifier.ChannelFull(msgType))
assert.Zero(t, len(verifier.Verified(msgType)))
@@ -230,8 +230,8 @@ func TestCryptoVerifierBuffers(t *testing.T) {
}
msgIDMutex.Lock()
defer msgIDMutex.Unlock()
- _, has := usedMsgIDs[msg.MessageHandle]
- delete(usedMsgIDs, msg.MessageHandle)
+ _, has := usedMsgIDs[msg.messageHandle]
+ delete(usedMsgIDs, msg.messageHandle)
return assert.True(t, has)
}
@@ -333,7 +333,7 @@ func BenchmarkCryptoVerifierProposalVertification(b *testing.B) {
c := verifier.Verified(protocol.ProposalPayloadTag)
request := cryptoProposalRequest{
message: message{
- MessageHandle: MessageHandle(0),
+ messageHandle: MessageHandle(0),
Tag: protocol.ProposalPayloadTag,
UnauthenticatedProposal: proposals[0].unauthenticatedProposal,
},
@@ -402,11 +402,11 @@ func TestCryptoVerifierVerificationFailures(t *testing.T) {
cryptoVerifier := makeCryptoVerifier(nil, nil, voteVerifier, logging.TestingLog(t))
defer cryptoVerifier.Quit()
- cryptoVerifier.VerifyVote(context.Background(), cryptoVoteRequest{message: message{Tag: protocol.AgreementVoteTag}, Round: basics.Round(8), TaskIndex: 14})
+ cryptoVerifier.VerifyVote(context.Background(), cryptoVoteRequest{message: message{Tag: protocol.AgreementVoteTag}, Round: basics.Round(8), TaskIndex: uint64(14)})
// read the failed response from VerifiedVotes:
votesout := cryptoVerifier.VerifiedVotes()
voteResponse := <-votesout
require.Equal(t, context.Canceled, voteResponse.err)
require.True(t, voteResponse.cancelled)
- require.Equal(t, 14, voteResponse.index)
+ require.Equal(t, uint64(14), voteResponse.index)
}
diff --git a/agreement/demux.go b/agreement/demux.go
index 7379590d5..ad51038b4 100644
--- a/agreement/demux.go
+++ b/agreement/demux.go
@@ -140,11 +140,11 @@ func (d *demux) tokenizeMessages(ctx context.Context, net Network, tag protocol.
var msg message
switch tag {
case protocol.AgreementVoteTag:
- msg = message{MessageHandle: raw.MessageHandle, Tag: tag, UnauthenticatedVote: o.(unauthenticatedVote)}
+ msg = message{messageHandle: raw.MessageHandle, Tag: tag, UnauthenticatedVote: o.(unauthenticatedVote)}
case protocol.VoteBundleTag:
- msg = message{MessageHandle: raw.MessageHandle, Tag: tag, UnauthenticatedBundle: o.(unauthenticatedBundle)}
+ msg = message{messageHandle: raw.MessageHandle, Tag: tag, UnauthenticatedBundle: o.(unauthenticatedBundle)}
case protocol.ProposalPayloadTag:
- msg = message{MessageHandle: raw.MessageHandle, Tag: tag, CompoundMessage: o.(compoundMessage)}
+ msg = message{messageHandle: raw.MessageHandle, Tag: tag, CompoundMessage: o.(compoundMessage)}
default:
err := fmt.Errorf("bad message tag: %v", tag)
d.UpdateEventsQueue(fmt.Sprintf("Tokenizing-%s", tag), 0)
@@ -167,7 +167,7 @@ func (d *demux) tokenizeMessages(ctx context.Context, net Network, tag protocol.
}
// verifyVote enqueues a vote message to be verified.
-func (d *demux) verifyVote(ctx context.Context, m message, taskIndex int, r round, p period) {
+func (d *demux) verifyVote(ctx context.Context, m message, taskIndex uint64, r round, p period) {
d.UpdateEventsQueue(eventQueueCryptoVerifierVote, 1)
d.monitor.inc(cryptoVerifierCoserviceType)
d.crypto.VerifyVote(ctx, cryptoVoteRequest{message: m, TaskIndex: taskIndex, Round: r, Period: p})
@@ -367,7 +367,7 @@ func setupCompoundMessage(l LedgerReader, m message) (res externalEvent) {
return
}
- tailmsg := message{MessageHandle: m.MessageHandle, Tag: protocol.ProposalPayloadTag, UnauthenticatedProposal: compound.Proposal}
+ tailmsg := message{messageHandle: m.messageHandle, Tag: protocol.ProposalPayloadTag, UnauthenticatedProposal: compound.Proposal}
synthetic := messageEvent{T: payloadPresent, Input: tailmsg}
proto, err := l.ConsensusVersion(ParamsRound(synthetic.ConsensusRound()))
synthetic = synthetic.AttachConsensusVersion(ConsensusVersionView{Err: makeSerErr(err), Version: proto}).(messageEvent)
diff --git a/agreement/errors.go b/agreement/errors.go
index 606e433b6..eae272b9f 100644
--- a/agreement/errors.go
+++ b/agreement/errors.go
@@ -16,38 +16,39 @@
package agreement
-import "fmt"
+import (
+ "fmt"
+)
// serializableError, or state machine error, is a serializable error that
// is correctly written to cadaver files.
-type serializableErrorUnderlying string
-type serializableError = *serializableErrorUnderlying
+type serializableError string
// implement error interface
-func (e serializableErrorUnderlying) Error() string {
+func (e serializableError) Error() string {
return string(e)
}
-func (e serializableErrorUnderlying) String() string {
+func (e serializableError) String() string {
return e.Error()
}
// makeSerErrStr returns an serializableError that formats as the given text.
-func makeSerErrStr(text string) serializableError {
- s := serializableErrorUnderlying(text)
+func makeSerErrStr(text string) *serializableError {
+ s := serializableError(text)
return &s
}
-func makeSerErrf(format string, a ...interface{}) serializableError {
- s := serializableErrorUnderlying(fmt.Sprintf(format, a...))
+func makeSerErrf(format string, a ...interface{}) *serializableError {
+ s := serializableError(fmt.Sprintf(format, a...))
return &s
}
// makeSerErr returns an serializableError that formats as the given error.
-func makeSerErr(err error) serializableError {
+func makeSerErr(err error) *serializableError {
if err == nil {
return nil
}
- s := serializableErrorUnderlying(err.Error())
+ s := serializableError(err.Error())
return &s
}
diff --git a/agreement/events.go b/agreement/events.go
index 61176872a..f418cc38f 100644
--- a/agreement/events.go
+++ b/agreement/events.go
@@ -43,7 +43,9 @@ type event interface {
// A ConsensusVersionView is a view of the consensus version as read from a
// LedgerReader, associated with some round.
type ConsensusVersionView struct {
- Err serializableError
+ _struct struct{} `codec:","`
+
+ Err *serializableError
Version protocol.ConsensusVersion
}
@@ -69,8 +71,7 @@ type externalEvent interface {
// type of the implementing struct.
//
//go:generate stringer -type=eventType
-//msgp:ignore eventType
-type eventType int
+type eventType uint8
const (
// none is returned by state machines which have no event to return
@@ -255,6 +256,7 @@ func (e emptyEvent) AttachConsensusVersion(v ConsensusVersionView) externalEvent
}
type messageEvent struct {
+ _struct struct{} `codec:","`
// {vote,bundle,payload}{Present,Verified}
T eventType
@@ -263,10 +265,10 @@ type messageEvent struct {
// Err is set if cryptographic verification was attempted and failed for
// Input.
- Err serializableError
+ Err *serializableError
// TaskIndex is optionally set to track a message as it is processed
// through cryptographic verification.
- TaskIndex int
+ TaskIndex uint64
// Tail is an optionally-set field which specifies an unauthenticated
// proposal which should be processed after Input is processed. Tail is
@@ -314,12 +316,15 @@ func (e messageEvent) AttachConsensusVersion(v ConsensusVersionView) externalEve
// freshnessData is bundled with filterableMessageEvent
// to allow for delegated freshness computation
type freshnessData struct {
+ _struct struct{} `codec:","`
+
PlayerRound round
PlayerPeriod period
PlayerStep step
PlayerLastConcluding step
}
+//msgp:ignore filterableMessageEvent
type filterableMessageEvent struct {
messageEvent
@@ -534,7 +539,7 @@ type payloadProcessedEvent struct {
// Err is set to be the reason the proposal payload was rejected in
// payloadRejected.
- Err serializableError
+ Err *serializableError
}
func (e payloadProcessedEvent) t() eventType {
@@ -558,7 +563,7 @@ type filteredEvent struct {
// Err is the reason cryptographic verification failed and is set for
// events {proposal,vote,bundle}Malformed.
- Err serializableError
+ Err *serializableError
}
func (e filteredEvent) t() eventType {
@@ -623,6 +628,7 @@ func (e pinnedValueEvent) ComparableStr() string {
}
type thresholdEvent struct {
+ _struct struct{} `codec:","`
// {{soft,cert,next}Threshold, none}
T eventType
@@ -818,6 +824,7 @@ func (e nextThresholdStatusRequestEvent) ComparableStr() string {
}
type nextThresholdStatusEvent struct {
+ _struct struct{} `codec:","`
// the result of a nextThresholdStatusRequest. Contains two bits of information,
// capturing four cases:
// Bottom = false, Proposal = unset/bottom --> received no next value thresholds
@@ -910,8 +917,8 @@ type checkpointEvent struct {
Round round
Period period
Step step
- Err serializableError // the error that was generated while storing the state to disk; nil on success.
- done chan error // an output channel to let the pseudonode that we're done processing. We don't want to serialize that, since it's not needed in recovery/autopsy.
+ Err *serializableError // the error that was generated while storing the state to disk; nil on success.
+ done chan error // an output channel to let the pseudonode that we're done processing. We don't want to serialize that, since it's not needed in recovery/autopsy.
}
func (e checkpointEvent) t() eventType {
diff --git a/agreement/events_test.go b/agreement/events_test.go
new file mode 100644
index 000000000..243dd0508
--- /dev/null
+++ b/agreement/events_test.go
@@ -0,0 +1,88 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package agreement
+
+import (
+ "encoding/base64"
+ "testing"
+
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/stretchr/testify/require"
+)
+
+// TestSerializableErrorBackwardCompatible ensures Err field of type serializableError can be
+// properly decoded from ConsensusVersionView.
+// This test is only needed for agreement state serialization switch from reflection to msgp.
+func TestSerializableErrorBackwardCompatibility(t *testing.T) {
+
+ encodedEmpty, err := base64.StdEncoding.DecodeString("gqNFcnLAp1ZlcnNpb26jdjEw")
+ require.NoError(t, err)
+
+ encoded, err := base64.StdEncoding.DecodeString("gqNFcnKndGVzdGVycqdWZXJzaW9uo3YxMA==")
+ require.NoError(t, err)
+
+ // run on master f57a276 to get the encoded data for above
+ // cv := ConsensusVersionView{
+ // Err: nil,
+ // Version: protocol.ConsensusV10,
+ // }
+
+ // result := protocol.EncodeReflect(&cv)
+ // fmt.Println(base64.StdEncoding.EncodeToString(result))
+
+ // se := serializableErrorUnderlying("testerr")
+ // cv = ConsensusVersionView{
+ // Err: &se,
+ // Version: protocol.ConsensusV10,
+ // }
+
+ // result = protocol.EncodeReflect(&cv)
+ // fmt.Println(base64.StdEncoding.EncodeToString(result))
+
+ cvEmpty := ConsensusVersionView{
+ Err: nil,
+ Version: protocol.ConsensusV10,
+ }
+
+ se := serializableError("testerr")
+ cv := ConsensusVersionView{
+ Err: &se,
+ Version: protocol.ConsensusV10,
+ }
+
+ cv1 := ConsensusVersionView{}
+ err = protocol.Decode(encodedEmpty, &cv1)
+ require.NoError(t, err)
+
+ cv2 := ConsensusVersionView{}
+ err = protocol.DecodeReflect(encodedEmpty, &cv2)
+ require.NoError(t, err)
+
+ require.Equal(t, cv1, cv2)
+ require.Equal(t, cvEmpty, cv2)
+
+ cv1 = ConsensusVersionView{}
+ err = protocol.Decode(encoded, &cv1)
+ require.NoError(t, err)
+
+ cv2 = ConsensusVersionView{}
+ err = protocol.DecodeReflect(encoded, &cv2)
+ require.NoError(t, err)
+
+ require.Equal(t, cv1, cv2)
+ require.Equal(t, cv, cv2)
+}
diff --git a/agreement/eventtype_string.go b/agreement/eventtype_string.go
index f8ee701d8..9da84c1b9 100644
--- a/agreement/eventtype_string.go
+++ b/agreement/eventtype_string.go
@@ -54,7 +54,7 @@ const _eventType_name = "nonevotePresentpayloadPresentbundlePresentvoteVerifiedp
var _eventType_index = [...]uint16{0, 4, 15, 29, 42, 54, 69, 83, 100, 107, 118, 131, 144, 157, 176, 192, 204, 217, 231, 246, 261, 277, 293, 308, 322, 334, 342, 351, 362, 372, 389, 405, 431, 450, 471, 485, 501, 510, 523, 540}
func (i eventType) String() string {
- if i < 0 || i >= eventType(len(_eventType_index)-1) {
+ if i >= eventType(len(_eventType_index)-1) {
return "eventType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _eventType_name[_eventType_index[i]:_eventType_index[i+1]]
diff --git a/agreement/message.go b/agreement/message.go
index 33a6d23ce..a1f6a8c80 100644
--- a/agreement/message.go
+++ b/agreement/message.go
@@ -18,12 +18,20 @@ package agreement
import (
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/msgp/msgp"
)
// A message represents an internal message which is passed between components
// of the agreement service.
type message struct {
- MessageHandle
+ _struct struct{} `codec:","`
+
+ // this field is for backwards compatibility with crash state serialized using go-codec prior to explicit unexport.
+ // should be removed after the next consensus update.
+ MessageHandle msgp.Raw
+ // explicitly unexport this field since we can't define serializers for interface{} type
+ // the only implementation of this is gossip.messageMetadata which doesn't have exported fields to serialize.
+ messageHandle MessageHandle
Tag protocol.Tag
@@ -46,6 +54,8 @@ type message struct {
// These messages are concatenated as an optimization which prevents proposals
// from being dropped.
type compoundMessage struct {
+ _struct struct{} `codec:","`
+
Vote unauthenticatedVote
Proposal unauthenticatedProposal
}
diff --git a/agreement/message_test.go b/agreement/message_test.go
index 88c4b504b..76209a5f9 100644
--- a/agreement/message_test.go
+++ b/agreement/message_test.go
@@ -17,6 +17,7 @@
package agreement
import (
+ "encoding/base64"
"testing"
"github.com/stretchr/testify/require"
@@ -24,7 +25,9 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/committee"
+ "github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
)
var poolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
@@ -80,3 +83,50 @@ func BenchmarkVoteDecoding(b *testing.B) {
decodeVote(msgBytes)
}
}
+
+// TestMessageBackwardCompatibility ensures MessageHandle field can be
+// properly decoded from message.
+// This test is only needed for agreement state serialization switch from reflection to msgp.
+func TestMessageBackwardCompatibility(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ type messageMetadata struct {
+ raw network.IncomingMessage
+ }
+
+ encoded, err := base64.StdEncoding.DecodeString("iaZCdW5kbGWAr0NvbXBvdW5kTWVzc2FnZYKoUHJvcG9zYWyApFZvdGWArU1lc3NhZ2VIYW5kbGWAqFByb3Bvc2FsgKNUYWeiUFC1VW5hdXRoZW50aWNhdGVkQnVuZGxlgLdVbmF1dGhlbnRpY2F0ZWRQcm9wb3NhbICzVW5hdXRoZW50aWNhdGVkVm90ZYCkVm90ZYA=")
+ require.NoError(t, err)
+
+ // run on master f57a276 to get the encoded data for above
+ // msg := message{
+ // MessageHandle: &messageMetadata{raw: network.IncomingMessage{Tag: protocol.Tag("mytag"), Data: []byte("some data")}},
+ // Tag: protocol.ProposalPayloadTag,
+ // }
+
+ // result := protocol.EncodeReflect(&msg)
+ // fmt.Println(base64.StdEncoding.EncodeToString(result))
+
+ // messages for all rounds after this change should not have MessageHandle set so clearing it out and re-encoding/decoding it should yield this
+ targetMessage := message{
+ Tag: protocol.ProposalPayloadTag,
+ }
+
+ var m1, m2, m3, m4 message
+ // Both msgp and reflection should decode the message containing old MessageHandle successfully
+ err = protocol.Decode(encoded, &m1)
+ require.NoError(t, err)
+ err = protocol.DecodeReflect(encoded, &m2)
+ require.NoError(t, err)
+ // after setting MessageHandle to nil both should re-encode and decode to same values
+ m1.MessageHandle = nil
+ m2.MessageHandle = nil
+ e1 := protocol.Encode(&m1)
+ e2 := protocol.EncodeReflect(&m2)
+ require.Equal(t, e1, e2)
+ err = protocol.DecodeReflect(e1, &m3)
+ require.NoError(t, err)
+ err = protocol.Decode(e2, &m4)
+ require.NoError(t, err)
+ require.Equal(t, m3, m4)
+ require.Equal(t, m3, targetMessage)
+}
diff --git a/agreement/msgp_gen.go b/agreement/msgp_gen.go
index 4581b07a9..3c396226b 100644
--- a/agreement/msgp_gen.go
+++ b/agreement/msgp_gen.go
@@ -22,6 +22,30 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// ConsensusVersionView
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// actionType
+// |-----> MarshalMsg
+// |-----> CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> Msgsize
+// |-----> MsgIsZero
+//
+// blockAssembler
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// bundle
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -30,6 +54,22 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// compoundMessage
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// diskState
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// equivocationVote
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -46,6 +86,46 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// eventType
+// |-----> MarshalMsg
+// |-----> CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> Msgsize
+// |-----> MsgIsZero
+//
+// freshnessData
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// message
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// messageEvent
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// nextThresholdStatusEvent
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// period
// |-----> MarshalMsg
// |-----> CanMarshalMsg
@@ -54,6 +134,22 @@ import (
// |-----> Msgsize
// |-----> MsgIsZero
//
+// periodRouter
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// player
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// proposal
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -62,6 +158,54 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// proposalManager
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// proposalSeeker
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// proposalStore
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// proposalTable
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// proposalTracker
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// proposalTrackerContract
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// proposalValue
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -70,6 +214,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// proposalVoteCounter
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// proposerSeed
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -86,6 +238,22 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// rootRouter
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// roundRouter
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// seedInput
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -102,13 +270,13 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
-// serializableErrorUnderlying
-// |-----> MarshalMsg
-// |-----> CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> Msgsize
-// |-----> MsgIsZero
+// serializableError
+// |-----> MarshalMsg
+// |-----> CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> Msgsize
+// |-----> MsgIsZero
//
// step
// |-----> MarshalMsg
@@ -118,6 +286,22 @@ import (
// |-----> Msgsize
// |-----> MsgIsZero
//
+// stepRouter
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// thresholdEvent
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// transmittedPayload
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -166,6 +350,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// voteAggregator
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// voteAuthenticator
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -174,6 +366,38 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// voteTracker
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// voteTrackerContract
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// voteTrackerPeriod
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// voteTrackerRound
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// MarshalMsg implements msgp.Marshaler
func (z *Certificate) MarshalMsg(b []byte) (o []byte) {
@@ -518,6 +742,407 @@ func (z *Certificate) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *ConsensusVersionView) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "Err"
+ o = append(o, 0x82, 0xa3, 0x45, 0x72, 0x72)
+ if (*z).Err == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendString(o, string(*(*z).Err))
+ }
+ // string "Version"
+ o = append(o, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
+ o = (*z).Version.MarshalMsg(o)
+ return
+}
+
+func (_ *ConsensusVersionView) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ConsensusVersionView)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *ConsensusVersionView) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Err = nil
+ } else {
+ if (*z).Err == nil {
+ (*z).Err = new(serializableError)
+ }
+ {
+ var zb0003 string
+ zb0003, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Err")
+ return
+ }
+ *(*z).Err = serializableError(zb0003)
+ }
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Version.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Version")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = ConsensusVersionView{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Err":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Err = nil
+ } else {
+ if (*z).Err == nil {
+ (*z).Err = new(serializableError)
+ }
+ {
+ var zb0004 string
+ zb0004, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Err")
+ return
+ }
+ *(*z).Err = serializableError(zb0004)
+ }
+ }
+ case "Version":
+ bts, err = (*z).Version.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Version")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *ConsensusVersionView) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ConsensusVersionView)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *ConsensusVersionView) Msgsize() (s int) {
+ s = 1 + 4
+ if (*z).Err == nil {
+ s += msgp.NilSize
+ } else {
+ s += msgp.StringPrefixSize + len(string(*(*z).Err))
+ }
+ s += 8 + (*z).Version.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *ConsensusVersionView) MsgIsZero() bool {
+ return ((*z).Err == nil) && ((*z).Version.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z actionType) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendUint8(o, uint8(z))
+ return
+}
+
+func (_ actionType) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(actionType)
+ if !ok {
+ _, ok = (z).(*actionType)
+ }
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *actionType) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ {
+ var zb0001 uint8
+ zb0001, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ (*z) = actionType(zb0001)
+ }
+ o = bts
+ return
+}
+
+func (_ *actionType) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*actionType)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z actionType) Msgsize() (s int) {
+ s = msgp.Uint8Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z actionType) MsgIsZero() bool {
+ return z == 0
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *blockAssembler) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 5
+ // string "Assembled"
+ o = append(o, 0x85, 0xa9, 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62, 0x6c, 0x65, 0x64)
+ o = msgp.AppendBool(o, (*z).Assembled)
+ // string "Authenticators"
+ o = append(o, 0xae, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73)
+ if (*z).Authenticators == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Authenticators)))
+ }
+ for zb0001 := range (*z).Authenticators {
+ o = (*z).Authenticators[zb0001].MarshalMsg(o)
+ }
+ // string "Filled"
+ o = append(o, 0xa6, 0x46, 0x69, 0x6c, 0x6c, 0x65, 0x64)
+ o = msgp.AppendBool(o, (*z).Filled)
+ // string "Payload"
+ o = append(o, 0xa7, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64)
+ o = (*z).Payload.MarshalMsg(o)
+ // string "Pipeline"
+ o = append(o, 0xa8, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65)
+ o = (*z).Pipeline.MarshalMsg(o)
+ return
+}
+
+func (_ *blockAssembler) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*blockAssembler)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *blockAssembler) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0002 int
+ var zb0003 bool
+ zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 > 0 {
+ zb0002--
+ bts, err = (*z).Pipeline.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Pipeline")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ zb0002--
+ (*z).Filled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Filled")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ zb0002--
+ bts, err = (*z).Payload.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Payload")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ zb0002--
+ (*z).Assembled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Assembled")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ zb0002--
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Authenticators")
+ return
+ }
+ if zb0005 {
+ (*z).Authenticators = nil
+ } else if (*z).Authenticators != nil && cap((*z).Authenticators) >= zb0004 {
+ (*z).Authenticators = ((*z).Authenticators)[:zb0004]
+ } else {
+ (*z).Authenticators = make([]vote, zb0004)
+ }
+ for zb0001 := range (*z).Authenticators {
+ bts, err = (*z).Authenticators[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Authenticators", zb0001)
+ return
+ }
+ }
+ }
+ if zb0002 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0002)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 {
+ (*z) = blockAssembler{}
+ }
+ for zb0002 > 0 {
+ zb0002--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Pipeline":
+ bts, err = (*z).Pipeline.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Pipeline")
+ return
+ }
+ case "Filled":
+ (*z).Filled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Filled")
+ return
+ }
+ case "Payload":
+ bts, err = (*z).Payload.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Payload")
+ return
+ }
+ case "Assembled":
+ (*z).Assembled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Assembled")
+ return
+ }
+ case "Authenticators":
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Authenticators")
+ return
+ }
+ if zb0007 {
+ (*z).Authenticators = nil
+ } else if (*z).Authenticators != nil && cap((*z).Authenticators) >= zb0006 {
+ (*z).Authenticators = ((*z).Authenticators)[:zb0006]
+ } else {
+ (*z).Authenticators = make([]vote, zb0006)
+ }
+ for zb0001 := range (*z).Authenticators {
+ bts, err = (*z).Authenticators[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Authenticators", zb0001)
+ return
+ }
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *blockAssembler) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*blockAssembler)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *blockAssembler) Msgsize() (s int) {
+ s = 1 + 9 + (*z).Pipeline.Msgsize() + 7 + msgp.BoolSize + 8 + (*z).Payload.Msgsize() + 10 + msgp.BoolSize + 15 + msgp.ArrayHeaderSize
+ for zb0001 := range (*z).Authenticators {
+ s += (*z).Authenticators[zb0001].Msgsize()
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *blockAssembler) MsgIsZero() bool {
+ return ((*z).Pipeline.MsgIsZero()) && ((*z).Filled == false) && ((*z).Payload.MsgIsZero()) && ((*z).Assembled == false) && (len((*z).Authenticators) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *bundle) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -775,6 +1400,368 @@ func (z *bundle) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *compoundMessage) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "Proposal"
+ o = append(o, 0x82, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c)
+ o = (*z).Proposal.MarshalMsg(o)
+ // string "Vote"
+ o = append(o, 0xa4, 0x56, 0x6f, 0x74, 0x65)
+ o = (*z).Vote.MarshalMsg(o)
+ return
+}
+
+func (_ *compoundMessage) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*compoundMessage)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *compoundMessage) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Vote")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = compoundMessage{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Vote":
+ bts, err = (*z).Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Vote")
+ return
+ }
+ case "Proposal":
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Proposal")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *compoundMessage) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*compoundMessage)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *compoundMessage) Msgsize() (s int) {
+ s = 1 + 5 + (*z).Vote.Msgsize() + 9 + (*z).Proposal.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *compoundMessage) MsgIsZero() bool {
+ return ((*z).Vote.MsgIsZero()) && ((*z).Proposal.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *diskState) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 5
+ // string "ActionTypes"
+ o = append(o, 0x85, 0xab, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73)
+ if (*z).ActionTypes == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).ActionTypes)))
+ }
+ for zb0001 := range (*z).ActionTypes {
+ o = msgp.AppendUint8(o, uint8((*z).ActionTypes[zb0001]))
+ }
+ // string "Actions"
+ o = append(o, 0xa7, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73)
+ if (*z).Actions == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Actions)))
+ }
+ for zb0002 := range (*z).Actions {
+ o = msgp.AppendBytes(o, (*z).Actions[zb0002])
+ }
+ // string "Clock"
+ o = append(o, 0xa5, 0x43, 0x6c, 0x6f, 0x63, 0x6b)
+ o = msgp.AppendBytes(o, (*z).Clock)
+ // string "Player"
+ o = append(o, 0xa6, 0x50, 0x6c, 0x61, 0x79, 0x65, 0x72)
+ o = msgp.AppendBytes(o, (*z).Player)
+ // string "Router"
+ o = append(o, 0xa6, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72)
+ o = msgp.AppendBytes(o, (*z).Router)
+ return
+}
+
+func (_ *diskState) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*diskState)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *diskState) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).Router, bts, err = msgp.ReadBytesBytes(bts, (*z).Router)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Router")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).Player, bts, err = msgp.ReadBytesBytes(bts, (*z).Player)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Player")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).Clock, bts, err = msgp.ReadBytesBytes(bts, (*z).Clock)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Clock")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ActionTypes")
+ return
+ }
+ if zb0006 {
+ (*z).ActionTypes = nil
+ } else if (*z).ActionTypes != nil && cap((*z).ActionTypes) >= zb0005 {
+ (*z).ActionTypes = ((*z).ActionTypes)[:zb0005]
+ } else {
+ (*z).ActionTypes = make([]actionType, zb0005)
+ }
+ for zb0001 := range (*z).ActionTypes {
+ {
+ var zb0007 uint8
+ zb0007, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ActionTypes", zb0001)
+ return
+ }
+ (*z).ActionTypes[zb0001] = actionType(zb0007)
+ }
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Actions")
+ return
+ }
+ if zb0009 {
+ (*z).Actions = nil
+ } else if (*z).Actions != nil && cap((*z).Actions) >= zb0008 {
+ (*z).Actions = ((*z).Actions)[:zb0008]
+ } else {
+ (*z).Actions = make([][]byte, zb0008)
+ }
+ for zb0002 := range (*z).Actions {
+ (*z).Actions[zb0002], bts, err = msgp.ReadBytesBytes(bts, (*z).Actions[zb0002])
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Actions", zb0002)
+ return
+ }
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = diskState{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Router":
+ (*z).Router, bts, err = msgp.ReadBytesBytes(bts, (*z).Router)
+ if err != nil {
+ err = msgp.WrapError(err, "Router")
+ return
+ }
+ case "Player":
+ (*z).Player, bts, err = msgp.ReadBytesBytes(bts, (*z).Player)
+ if err != nil {
+ err = msgp.WrapError(err, "Player")
+ return
+ }
+ case "Clock":
+ (*z).Clock, bts, err = msgp.ReadBytesBytes(bts, (*z).Clock)
+ if err != nil {
+ err = msgp.WrapError(err, "Clock")
+ return
+ }
+ case "ActionTypes":
+ var zb0010 int
+ var zb0011 bool
+ zb0010, zb0011, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ActionTypes")
+ return
+ }
+ if zb0011 {
+ (*z).ActionTypes = nil
+ } else if (*z).ActionTypes != nil && cap((*z).ActionTypes) >= zb0010 {
+ (*z).ActionTypes = ((*z).ActionTypes)[:zb0010]
+ } else {
+ (*z).ActionTypes = make([]actionType, zb0010)
+ }
+ for zb0001 := range (*z).ActionTypes {
+ {
+ var zb0012 uint8
+ zb0012, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ActionTypes", zb0001)
+ return
+ }
+ (*z).ActionTypes[zb0001] = actionType(zb0012)
+ }
+ }
+ case "Actions":
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Actions")
+ return
+ }
+ if zb0014 {
+ (*z).Actions = nil
+ } else if (*z).Actions != nil && cap((*z).Actions) >= zb0013 {
+ (*z).Actions = ((*z).Actions)[:zb0013]
+ } else {
+ (*z).Actions = make([][]byte, zb0013)
+ }
+ for zb0002 := range (*z).Actions {
+ (*z).Actions[zb0002], bts, err = msgp.ReadBytesBytes(bts, (*z).Actions[zb0002])
+ if err != nil {
+ err = msgp.WrapError(err, "Actions", zb0002)
+ return
+ }
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *diskState) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*diskState)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *diskState) Msgsize() (s int) {
+ s = 1 + 7 + msgp.BytesPrefixSize + len((*z).Router) + 7 + msgp.BytesPrefixSize + len((*z).Player) + 6 + msgp.BytesPrefixSize + len((*z).Clock) + 12 + msgp.ArrayHeaderSize + (len((*z).ActionTypes) * (msgp.Uint8Size)) + 8 + msgp.ArrayHeaderSize
+ for zb0002 := range (*z).Actions {
+ s += msgp.BytesPrefixSize + len((*z).Actions[zb0002])
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *diskState) MsgIsZero() bool {
+ return (len((*z).Router) == 0) && (len((*z).Player) == 0) && (len((*z).Clock) == 0) && (len((*z).ActionTypes) == 0) && (len((*z).Actions) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *equivocationVote) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -1307,6 +2294,972 @@ func (z *equivocationVoteAuthenticator) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z eventType) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendUint8(o, uint8(z))
+ return
+}
+
+func (_ eventType) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(eventType)
+ if !ok {
+ _, ok = (z).(*eventType)
+ }
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *eventType) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ {
+ var zb0001 uint8
+ zb0001, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ (*z) = eventType(zb0001)
+ }
+ o = bts
+ return
+}
+
+func (_ *eventType) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*eventType)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z eventType) Msgsize() (s int) {
+ s = msgp.Uint8Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z eventType) MsgIsZero() bool {
+ return z == 0
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *freshnessData) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 4
+ // string "PlayerLastConcluding"
+ o = append(o, 0x84, 0xb4, 0x50, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x4c, 0x61, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x69, 0x6e, 0x67)
+ o = msgp.AppendUint64(o, uint64((*z).PlayerLastConcluding))
+ // string "PlayerPeriod"
+ o = append(o, 0xac, 0x50, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64)
+ o = msgp.AppendUint64(o, uint64((*z).PlayerPeriod))
+ // string "PlayerRound"
+ o = append(o, 0xab, 0x50, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x6e, 0x64)
+ o = (*z).PlayerRound.MarshalMsg(o)
+ // string "PlayerStep"
+ o = append(o, 0xaa, 0x50, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x53, 0x74, 0x65, 0x70)
+ o = msgp.AppendUint64(o, uint64((*z).PlayerStep))
+ return
+}
+
+func (_ *freshnessData) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*freshnessData)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *freshnessData) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).PlayerRound.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PlayerRound")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0003 uint64
+ zb0003, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PlayerPeriod")
+ return
+ }
+ (*z).PlayerPeriod = period(zb0003)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0004 uint64
+ zb0004, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PlayerStep")
+ return
+ }
+ (*z).PlayerStep = step(zb0004)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0005 uint64
+ zb0005, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PlayerLastConcluding")
+ return
+ }
+ (*z).PlayerLastConcluding = step(zb0005)
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = freshnessData{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "PlayerRound":
+ bts, err = (*z).PlayerRound.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PlayerRound")
+ return
+ }
+ case "PlayerPeriod":
+ {
+ var zb0006 uint64
+ zb0006, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PlayerPeriod")
+ return
+ }
+ (*z).PlayerPeriod = period(zb0006)
+ }
+ case "PlayerStep":
+ {
+ var zb0007 uint64
+ zb0007, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PlayerStep")
+ return
+ }
+ (*z).PlayerStep = step(zb0007)
+ }
+ case "PlayerLastConcluding":
+ {
+ var zb0008 uint64
+ zb0008, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PlayerLastConcluding")
+ return
+ }
+ (*z).PlayerLastConcluding = step(zb0008)
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *freshnessData) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*freshnessData)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *freshnessData) Msgsize() (s int) {
+ s = 1 + 12 + (*z).PlayerRound.Msgsize() + 13 + msgp.Uint64Size + 11 + msgp.Uint64Size + 21 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *freshnessData) MsgIsZero() bool {
+ return ((*z).PlayerRound.MsgIsZero()) && ((*z).PlayerPeriod == 0) && ((*z).PlayerStep == 0) && ((*z).PlayerLastConcluding == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *message) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 9
+ // string "Bundle"
+ o = append(o, 0x89, 0xa6, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65)
+ o = (*z).Bundle.MarshalMsg(o)
+ // string "CompoundMessage"
+ o = append(o, 0xaf, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65)
+ // map header, size 2
+ // string "Proposal"
+ o = append(o, 0x82, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c)
+ o = (*z).CompoundMessage.Proposal.MarshalMsg(o)
+ // string "Vote"
+ o = append(o, 0xa4, 0x56, 0x6f, 0x74, 0x65)
+ o = (*z).CompoundMessage.Vote.MarshalMsg(o)
+ // string "MessageHandle"
+ o = append(o, 0xad, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65)
+ o = (*z).MessageHandle.MarshalMsg(o)
+ // string "Proposal"
+ o = append(o, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c)
+ o = (*z).Proposal.MarshalMsg(o)
+ // string "Tag"
+ o = append(o, 0xa3, 0x54, 0x61, 0x67)
+ o = (*z).Tag.MarshalMsg(o)
+ // string "UnauthenticatedBundle"
+ o = append(o, 0xb5, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65)
+ o = (*z).UnauthenticatedBundle.MarshalMsg(o)
+ // string "UnauthenticatedProposal"
+ o = append(o, 0xb7, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c)
+ o = (*z).UnauthenticatedProposal.MarshalMsg(o)
+ // string "UnauthenticatedVote"
+ o = append(o, 0xb3, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x6f, 0x74, 0x65)
+ o = (*z).UnauthenticatedVote.MarshalMsg(o)
+ // string "Vote"
+ o = append(o, 0xa4, 0x56, 0x6f, 0x74, 0x65)
+ o = (*z).Vote.MarshalMsg(o)
+ return
+}
+
+func (_ *message) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*message)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *message) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).MessageHandle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "MessageHandle")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Tag.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Tag")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Vote")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Bundle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Bundle")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).UnauthenticatedVote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "UnauthenticatedVote")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).UnauthenticatedProposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "UnauthenticatedProposal")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).UnauthenticatedBundle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "UnauthenticatedBundle")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage")
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).CompoundMessage.Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage", "struct-from-array", "Vote")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).CompoundMessage.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage", "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage")
+ return
+ }
+ if zb0004 {
+ (*z).CompoundMessage = compoundMessage{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage")
+ return
+ }
+ switch string(field) {
+ case "Vote":
+ bts, err = (*z).CompoundMessage.Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage", "Vote")
+ return
+ }
+ case "Proposal":
+ bts, err = (*z).CompoundMessage.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage", "Proposal")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "CompoundMessage")
+ return
+ }
+ }
+ }
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = message{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "MessageHandle":
+ bts, err = (*z).MessageHandle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "MessageHandle")
+ return
+ }
+ case "Tag":
+ bts, err = (*z).Tag.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Tag")
+ return
+ }
+ case "Vote":
+ bts, err = (*z).Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Vote")
+ return
+ }
+ case "Proposal":
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Proposal")
+ return
+ }
+ case "Bundle":
+ bts, err = (*z).Bundle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Bundle")
+ return
+ }
+ case "UnauthenticatedVote":
+ bts, err = (*z).UnauthenticatedVote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "UnauthenticatedVote")
+ return
+ }
+ case "UnauthenticatedProposal":
+ bts, err = (*z).UnauthenticatedProposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "UnauthenticatedProposal")
+ return
+ }
+ case "UnauthenticatedBundle":
+ bts, err = (*z).UnauthenticatedBundle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "UnauthenticatedBundle")
+ return
+ }
+ case "CompoundMessage":
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage")
+ return
+ }
+ if zb0005 > 0 {
+ zb0005--
+ bts, err = (*z).CompoundMessage.Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage", "struct-from-array", "Vote")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ zb0005--
+ bts, err = (*z).CompoundMessage.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage", "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0005)
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage")
+ return
+ }
+ if zb0006 {
+ (*z).CompoundMessage = compoundMessage{}
+ }
+ for zb0005 > 0 {
+ zb0005--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage")
+ return
+ }
+ switch string(field) {
+ case "Vote":
+ bts, err = (*z).CompoundMessage.Vote.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage", "Vote")
+ return
+ }
+ case "Proposal":
+ bts, err = (*z).CompoundMessage.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage", "Proposal")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "CompoundMessage")
+ return
+ }
+ }
+ }
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *message) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*message)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *message) Msgsize() (s int) {
+ s = 1 + 14 + (*z).MessageHandle.Msgsize() + 4 + (*z).Tag.Msgsize() + 5 + (*z).Vote.Msgsize() + 9 + (*z).Proposal.Msgsize() + 7 + (*z).Bundle.Msgsize() + 20 + (*z).UnauthenticatedVote.Msgsize() + 24 + (*z).UnauthenticatedProposal.Msgsize() + 22 + (*z).UnauthenticatedBundle.Msgsize() + 16 + 1 + 5 + (*z).CompoundMessage.Vote.Msgsize() + 9 + (*z).CompoundMessage.Proposal.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *message) MsgIsZero() bool {
+ return ((*z).MessageHandle.MsgIsZero()) && ((*z).Tag.MsgIsZero()) && ((*z).Vote.MsgIsZero()) && ((*z).Proposal.MsgIsZero()) && ((*z).Bundle.MsgIsZero()) && ((*z).UnauthenticatedVote.MsgIsZero()) && ((*z).UnauthenticatedProposal.MsgIsZero()) && ((*z).UnauthenticatedBundle.MsgIsZero()) && (((*z).CompoundMessage.Vote.MsgIsZero()) && ((*z).CompoundMessage.Proposal.MsgIsZero()))
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *messageEvent) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 7
+ // string "Cancelled"
+ o = append(o, 0x87, 0xa9, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64)
+ o = msgp.AppendBool(o, (*z).Cancelled)
+ // string "Err"
+ o = append(o, 0xa3, 0x45, 0x72, 0x72)
+ if (*z).Err == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendString(o, string(*(*z).Err))
+ }
+ // string "Input"
+ o = append(o, 0xa5, 0x49, 0x6e, 0x70, 0x75, 0x74)
+ o = (*z).Input.MarshalMsg(o)
+ // string "Proto"
+ o = append(o, 0xa5, 0x50, 0x72, 0x6f, 0x74, 0x6f)
+ o = (*z).Proto.MarshalMsg(o)
+ // string "T"
+ o = append(o, 0xa1, 0x54)
+ o = msgp.AppendUint8(o, uint8((*z).T))
+ // string "Tail"
+ o = append(o, 0xa4, 0x54, 0x61, 0x69, 0x6c)
+ if (*z).Tail == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = (*z).Tail.MarshalMsg(o)
+ }
+ // string "TaskIndex"
+ o = append(o, 0xa9, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78)
+ o = msgp.AppendUint64(o, (*z).TaskIndex)
+ return
+}
+
+func (_ *messageEvent) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*messageEvent)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *messageEvent) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0003 uint8
+ zb0003, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "T")
+ return
+ }
+ (*z).T = eventType(zb0003)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Input.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Input")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Err = nil
+ } else {
+ if (*z).Err == nil {
+ (*z).Err = new(serializableError)
+ }
+ {
+ var zb0004 string
+ zb0004, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Err")
+ return
+ }
+ *(*z).Err = serializableError(zb0004)
+ }
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).TaskIndex, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TaskIndex")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Tail = nil
+ } else {
+ if (*z).Tail == nil {
+ (*z).Tail = new(messageEvent)
+ }
+ bts, err = (*z).Tail.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Tail")
+ return
+ }
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Cancelled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cancelled")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Proto.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Proto")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = messageEvent{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "T":
+ {
+ var zb0005 uint8
+ zb0005, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "T")
+ return
+ }
+ (*z).T = eventType(zb0005)
+ }
+ case "Input":
+ bts, err = (*z).Input.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Input")
+ return
+ }
+ case "Err":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Err = nil
+ } else {
+ if (*z).Err == nil {
+ (*z).Err = new(serializableError)
+ }
+ {
+ var zb0006 string
+ zb0006, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Err")
+ return
+ }
+ *(*z).Err = serializableError(zb0006)
+ }
+ }
+ case "TaskIndex":
+ (*z).TaskIndex, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TaskIndex")
+ return
+ }
+ case "Tail":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Tail = nil
+ } else {
+ if (*z).Tail == nil {
+ (*z).Tail = new(messageEvent)
+ }
+ bts, err = (*z).Tail.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Tail")
+ return
+ }
+ }
+ case "Cancelled":
+ (*z).Cancelled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Cancelled")
+ return
+ }
+ case "Proto":
+ bts, err = (*z).Proto.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Proto")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *messageEvent) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*messageEvent)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *messageEvent) Msgsize() (s int) {
+ s = 1 + 2 + msgp.Uint8Size + 6 + (*z).Input.Msgsize() + 4
+ if (*z).Err == nil {
+ s += msgp.NilSize
+ } else {
+ s += msgp.StringPrefixSize + len(string(*(*z).Err))
+ }
+ s += 10 + msgp.Uint64Size + 5
+ if (*z).Tail == nil {
+ s += msgp.NilSize
+ } else {
+ s += (*z).Tail.Msgsize()
+ }
+ s += 10 + msgp.BoolSize + 6 + (*z).Proto.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *messageEvent) MsgIsZero() bool {
+ return ((*z).T == 0) && ((*z).Input.MsgIsZero()) && ((*z).Err == nil) && ((*z).TaskIndex == 0) && ((*z).Tail == nil) && ((*z).Cancelled == false) && ((*z).Proto.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *nextThresholdStatusEvent) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "Bottom"
+ o = append(o, 0x82, 0xa6, 0x42, 0x6f, 0x74, 0x74, 0x6f, 0x6d)
+ o = msgp.AppendBool(o, (*z).Bottom)
+ // string "Proposal"
+ o = append(o, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c)
+ o = (*z).Proposal.MarshalMsg(o)
+ return
+}
+
+func (_ *nextThresholdStatusEvent) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*nextThresholdStatusEvent)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *nextThresholdStatusEvent) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Bottom, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Bottom")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = nextThresholdStatusEvent{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Bottom":
+ (*z).Bottom, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Bottom")
+ return
+ }
+ case "Proposal":
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Proposal")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *nextThresholdStatusEvent) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*nextThresholdStatusEvent)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *nextThresholdStatusEvent) Msgsize() (s int) {
+ s = 1 + 7 + msgp.BoolSize + 9 + (*z).Proposal.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *nextThresholdStatusEvent) MsgIsZero() bool {
+ return ((*z).Bottom == false) && ((*z).Proposal.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z period) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendUint64(o, uint64(z))
@@ -1353,6 +3306,487 @@ func (z period) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *periodRouter) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 4
+ // string "Children"
+ o = append(o, 0x84, 0xa8, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e)
+ if (*z).Children == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Children)))
+ }
+ zb0001_keys := make([]step, 0, len((*z).Children))
+ for zb0001 := range (*z).Children {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortStep(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Children[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ if zb0002 == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = zb0002.MarshalMsg(o)
+ }
+ }
+ // string "ProposalTracker"
+ o = append(o, 0xaf, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72)
+ o = (*z).ProposalTracker.MarshalMsg(o)
+ // string "ProposalTrackerContract"
+ o = append(o, 0xb7, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74)
+ o = (*z).ProposalTrackerContract.MarshalMsg(o)
+ // string "VoteTrackerPeriod"
+ o = append(o, 0xb1, 0x56, 0x6f, 0x74, 0x65, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64)
+ o = (*z).VoteTrackerPeriod.MarshalMsg(o)
+ return
+}
+
+func (_ *periodRouter) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*periodRouter)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *periodRouter) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).ProposalTracker.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalTracker")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).VoteTrackerPeriod.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerPeriod")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).ProposalTrackerContract.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalTrackerContract")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children")
+ return
+ }
+ if zb0006 {
+ (*z).Children = nil
+ } else if (*z).Children == nil {
+ (*z).Children = make(map[step]*stepRouter, zb0005)
+ }
+ for zb0005 > 0 {
+ var zb0001 step
+ var zb0002 *stepRouter
+ zb0005--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(stepRouter)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children", zb0001)
+ return
+ }
+ }
+ (*z).Children[zb0001] = zb0002
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = periodRouter{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "ProposalTracker":
+ bts, err = (*z).ProposalTracker.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalTracker")
+ return
+ }
+ case "VoteTrackerPeriod":
+ bts, err = (*z).VoteTrackerPeriod.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerPeriod")
+ return
+ }
+ case "ProposalTrackerContract":
+ bts, err = (*z).ProposalTrackerContract.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalTrackerContract")
+ return
+ }
+ case "Children":
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children")
+ return
+ }
+ if zb0008 {
+ (*z).Children = nil
+ } else if (*z).Children == nil {
+ (*z).Children = make(map[step]*stepRouter, zb0007)
+ }
+ for zb0007 > 0 {
+ var zb0001 step
+ var zb0002 *stepRouter
+ zb0007--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(stepRouter)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children", zb0001)
+ return
+ }
+ }
+ (*z).Children[zb0001] = zb0002
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *periodRouter) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*periodRouter)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *periodRouter) Msgsize() (s int) {
+ s = 1 + 16 + (*z).ProposalTracker.Msgsize() + 18 + (*z).VoteTrackerPeriod.Msgsize() + 24 + (*z).ProposalTrackerContract.Msgsize() + 9 + msgp.MapHeaderSize
+ if (*z).Children != nil {
+ for zb0001, zb0002 := range (*z).Children {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize()
+ if zb0002 == nil {
+ s += msgp.NilSize
+ } else {
+ s += zb0002.Msgsize()
+ }
+ }
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *periodRouter) MsgIsZero() bool {
+ return ((*z).ProposalTracker.MsgIsZero()) && ((*z).VoteTrackerPeriod.MsgIsZero()) && ((*z).ProposalTrackerContract.MsgIsZero()) && (len((*z).Children) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *player) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 8
+ // string "Deadline"
+ o = append(o, 0x88, 0xa8, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65)
+ o = msgp.AppendDuration(o, (*z).Deadline)
+ // string "FastRecoveryDeadline"
+ o = append(o, 0xb4, 0x46, 0x61, 0x73, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65)
+ o = msgp.AppendDuration(o, (*z).FastRecoveryDeadline)
+ // string "LastConcluding"
+ o = append(o, 0xae, 0x4c, 0x61, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x69, 0x6e, 0x67)
+ o = msgp.AppendUint64(o, uint64((*z).LastConcluding))
+ // string "Napping"
+ o = append(o, 0xa7, 0x4e, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67)
+ o = msgp.AppendBool(o, (*z).Napping)
+ // string "Pending"
+ o = append(o, 0xa7, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67)
+ o = (*z).Pending.MarshalMsg(o)
+ // string "Period"
+ o = append(o, 0xa6, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64)
+ o = msgp.AppendUint64(o, uint64((*z).Period))
+ // string "Round"
+ o = append(o, 0xa5, 0x52, 0x6f, 0x75, 0x6e, 0x64)
+ o = (*z).Round.MarshalMsg(o)
+ // string "Step"
+ o = append(o, 0xa4, 0x53, 0x74, 0x65, 0x70)
+ o = msgp.AppendUint64(o, uint64((*z).Step))
+ return
+}
+
+func (_ *player) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*player)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *player) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Round.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Round")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0003 uint64
+ zb0003, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Period")
+ return
+ }
+ (*z).Period = period(zb0003)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0004 uint64
+ zb0004, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Step")
+ return
+ }
+ (*z).Step = step(zb0004)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0005 uint64
+ zb0005, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "LastConcluding")
+ return
+ }
+ (*z).LastConcluding = step(zb0005)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Deadline, bts, err = msgp.ReadDurationBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Deadline")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Napping, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Napping")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).FastRecoveryDeadline, bts, err = msgp.ReadDurationBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "FastRecoveryDeadline")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Pending.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Pending")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = player{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Round":
+ bts, err = (*z).Round.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Round")
+ return
+ }
+ case "Period":
+ {
+ var zb0006 uint64
+ zb0006, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Period")
+ return
+ }
+ (*z).Period = period(zb0006)
+ }
+ case "Step":
+ {
+ var zb0007 uint64
+ zb0007, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Step")
+ return
+ }
+ (*z).Step = step(zb0007)
+ }
+ case "LastConcluding":
+ {
+ var zb0008 uint64
+ zb0008, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LastConcluding")
+ return
+ }
+ (*z).LastConcluding = step(zb0008)
+ }
+ case "Deadline":
+ (*z).Deadline, bts, err = msgp.ReadDurationBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Deadline")
+ return
+ }
+ case "Napping":
+ (*z).Napping, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Napping")
+ return
+ }
+ case "FastRecoveryDeadline":
+ (*z).FastRecoveryDeadline, bts, err = msgp.ReadDurationBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "FastRecoveryDeadline")
+ return
+ }
+ case "Pending":
+ bts, err = (*z).Pending.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Pending")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *player) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*player)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *player) Msgsize() (s int) {
+ s = 1 + 6 + (*z).Round.Msgsize() + 7 + msgp.Uint64Size + 5 + msgp.Uint64Size + 15 + msgp.Uint64Size + 9 + msgp.DurationSize + 8 + msgp.BoolSize + 21 + msgp.DurationSize + 8 + (*z).Pending.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *player) MsgIsZero() bool {
+ return ((*z).Round.MsgIsZero()) && ((*z).Period == 0) && ((*z).Step == 0) && ((*z).LastConcluding == 0) && ((*z).Deadline == 0) && ((*z).Napping == false) && ((*z).FastRecoveryDeadline == 0) && ((*z).Pending.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *proposal) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -2243,6 +4677,1048 @@ func (z *proposal) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *proposalManager) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 0
+ o = append(o, 0x80)
+ return
+}
+
+func (_ *proposalManager) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalManager)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *proposalManager) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = proposalManager{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *proposalManager) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalManager)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *proposalManager) Msgsize() (s int) {
+ s = 1
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *proposalManager) MsgIsZero() bool {
+ return true
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *proposalSeeker) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 3
+ // string "Filled"
+ o = append(o, 0x83, 0xa6, 0x46, 0x69, 0x6c, 0x6c, 0x65, 0x64)
+ o = msgp.AppendBool(o, (*z).Filled)
+ // string "Frozen"
+ o = append(o, 0xa6, 0x46, 0x72, 0x6f, 0x7a, 0x65, 0x6e)
+ o = msgp.AppendBool(o, (*z).Frozen)
+ // string "Lowest"
+ o = append(o, 0xa6, 0x4c, 0x6f, 0x77, 0x65, 0x73, 0x74)
+ o = (*z).Lowest.MarshalMsg(o)
+ return
+}
+
+func (_ *proposalSeeker) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalSeeker)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *proposalSeeker) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Lowest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Lowest")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Filled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Filled")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Frozen, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Frozen")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = proposalSeeker{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Lowest":
+ bts, err = (*z).Lowest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Lowest")
+ return
+ }
+ case "Filled":
+ (*z).Filled, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Filled")
+ return
+ }
+ case "Frozen":
+ (*z).Frozen, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Frozen")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *proposalSeeker) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalSeeker)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *proposalSeeker) Msgsize() (s int) {
+ s = 1 + 7 + (*z).Lowest.Msgsize() + 7 + msgp.BoolSize + 7 + msgp.BoolSize
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *proposalSeeker) MsgIsZero() bool {
+ return ((*z).Lowest.MsgIsZero()) && ((*z).Filled == false) && ((*z).Frozen == false)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *proposalStore) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 3
+ // string "Assemblers"
+ o = append(o, 0x83, 0xaa, 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62, 0x6c, 0x65, 0x72, 0x73)
+ if (*z).Assemblers == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Assemblers)))
+ }
+ zb0003_keys := make([]proposalValue, 0, len((*z).Assemblers))
+ for zb0003 := range (*z).Assemblers {
+ zb0003_keys = append(zb0003_keys, zb0003)
+ }
+ sort.Sort(SortProposalValue(zb0003_keys))
+ for _, zb0003 := range zb0003_keys {
+ zb0004 := (*z).Assemblers[zb0003]
+ _ = zb0004
+ o = zb0003.MarshalMsg(o)
+ o = zb0004.MarshalMsg(o)
+ }
+ // string "Pinned"
+ o = append(o, 0xa6, 0x50, 0x69, 0x6e, 0x6e, 0x65, 0x64)
+ o = (*z).Pinned.MarshalMsg(o)
+ // string "Relevant"
+ o = append(o, 0xa8, 0x52, 0x65, 0x6c, 0x65, 0x76, 0x61, 0x6e, 0x74)
+ if (*z).Relevant == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Relevant)))
+ }
+ zb0001_keys := make([]period, 0, len((*z).Relevant))
+ for zb0001 := range (*z).Relevant {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortPeriod(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Relevant[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ o = zb0002.MarshalMsg(o)
+ }
+ return
+}
+
+func (_ *proposalStore) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalStore)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *proposalStore) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0005 > 0 {
+ zb0005--
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Relevant")
+ return
+ }
+ if zb0008 {
+ (*z).Relevant = nil
+ } else if (*z).Relevant == nil {
+ (*z).Relevant = make(map[period]proposalValue, zb0007)
+ }
+ for zb0007 > 0 {
+ var zb0001 period
+ var zb0002 proposalValue
+ zb0007--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Relevant")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Relevant", zb0001)
+ return
+ }
+ (*z).Relevant[zb0001] = zb0002
+ }
+ }
+ if zb0005 > 0 {
+ zb0005--
+ bts, err = (*z).Pinned.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Pinned")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ zb0005--
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Assemblers")
+ return
+ }
+ if zb0010 {
+ (*z).Assemblers = nil
+ } else if (*z).Assemblers == nil {
+ (*z).Assemblers = make(map[proposalValue]blockAssembler, zb0009)
+ }
+ for zb0009 > 0 {
+ var zb0003 proposalValue
+ var zb0004 blockAssembler
+ zb0009--
+ bts, err = zb0003.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Assemblers")
+ return
+ }
+ bts, err = zb0004.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Assemblers", zb0003)
+ return
+ }
+ (*z).Assemblers[zb0003] = zb0004
+ }
+ }
+ if zb0005 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0005)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0006 {
+ (*z) = proposalStore{}
+ }
+ for zb0005 > 0 {
+ zb0005--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Relevant":
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Relevant")
+ return
+ }
+ if zb0012 {
+ (*z).Relevant = nil
+ } else if (*z).Relevant == nil {
+ (*z).Relevant = make(map[period]proposalValue, zb0011)
+ }
+ for zb0011 > 0 {
+ var zb0001 period
+ var zb0002 proposalValue
+ zb0011--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Relevant")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Relevant", zb0001)
+ return
+ }
+ (*z).Relevant[zb0001] = zb0002
+ }
+ case "Pinned":
+ bts, err = (*z).Pinned.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Pinned")
+ return
+ }
+ case "Assemblers":
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Assemblers")
+ return
+ }
+ if zb0014 {
+ (*z).Assemblers = nil
+ } else if (*z).Assemblers == nil {
+ (*z).Assemblers = make(map[proposalValue]blockAssembler, zb0013)
+ }
+ for zb0013 > 0 {
+ var zb0003 proposalValue
+ var zb0004 blockAssembler
+ zb0013--
+ bts, err = zb0003.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Assemblers")
+ return
+ }
+ bts, err = zb0004.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Assemblers", zb0003)
+ return
+ }
+ (*z).Assemblers[zb0003] = zb0004
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *proposalStore) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalStore)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *proposalStore) Msgsize() (s int) {
+ s = 1 + 9 + msgp.MapHeaderSize
+ if (*z).Relevant != nil {
+ for zb0001, zb0002 := range (*z).Relevant {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
+ }
+ }
+ s += 7 + (*z).Pinned.Msgsize() + 11 + msgp.MapHeaderSize
+ if (*z).Assemblers != nil {
+ for zb0003, zb0004 := range (*z).Assemblers {
+ _ = zb0003
+ _ = zb0004
+ s += 0 + zb0003.Msgsize() + zb0004.Msgsize()
+ }
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *proposalStore) MsgIsZero() bool {
+ return (len((*z).Relevant) == 0) && ((*z).Pinned.MsgIsZero()) && (len((*z).Assemblers) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *proposalTable) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0003Len := uint32(2)
+ var zb0003Mask uint8 /* 3 bits */
+ if len((*z).Pending) == 0 {
+ zb0003Len--
+ zb0003Mask |= 0x1
+ }
+ if (*z).PendingNext == 0 {
+ zb0003Len--
+ zb0003Mask |= 0x2
+ }
+ // variable map header, size zb0003Len
+ o = append(o, 0x80|uint8(zb0003Len))
+ if zb0003Len != 0 {
+ if (zb0003Mask & 0x1) == 0 { // if not empty
+ // string "Pending"
+ o = append(o, 0xa7, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67)
+ if (*z).Pending == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Pending)))
+ }
+ zb0001_keys := make([]uint64, 0, len((*z).Pending))
+ for zb0001 := range (*z).Pending {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortUint64(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Pending[zb0001]
+ _ = zb0002
+ o = msgp.AppendUint64(o, zb0001)
+ if zb0002 == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = zb0002.MarshalMsg(o)
+ }
+ }
+ }
+ if (zb0003Mask & 0x2) == 0 { // if not empty
+ // string "PendingNext"
+ o = append(o, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4e, 0x65, 0x78, 0x74)
+ o = msgp.AppendUint64(o, (*z).PendingNext)
+ }
+ }
+ return
+}
+
+func (_ *proposalTable) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalTable)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *proposalTable) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Pending")
+ return
+ }
+ if zb0006 {
+ (*z).Pending = nil
+ } else if (*z).Pending == nil {
+ (*z).Pending = make(map[uint64]*messageEvent, zb0005)
+ }
+ for zb0005 > 0 {
+ var zb0001 uint64
+ var zb0002 *messageEvent
+ zb0005--
+ zb0001, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Pending")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(messageEvent)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Pending", zb0001)
+ return
+ }
+ }
+ (*z).Pending[zb0001] = zb0002
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).PendingNext, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PendingNext")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = proposalTable{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Pending":
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Pending")
+ return
+ }
+ if zb0008 {
+ (*z).Pending = nil
+ } else if (*z).Pending == nil {
+ (*z).Pending = make(map[uint64]*messageEvent, zb0007)
+ }
+ for zb0007 > 0 {
+ var zb0001 uint64
+ var zb0002 *messageEvent
+ zb0007--
+ zb0001, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Pending")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(messageEvent)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Pending", zb0001)
+ return
+ }
+ }
+ (*z).Pending[zb0001] = zb0002
+ }
+ case "PendingNext":
+ (*z).PendingNext, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PendingNext")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *proposalTable) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalTable)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *proposalTable) Msgsize() (s int) {
+ s = 1 + 8 + msgp.MapHeaderSize
+ if (*z).Pending != nil {
+ for zb0001, zb0002 := range (*z).Pending {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + msgp.Uint64Size
+ if zb0002 == nil {
+ s += msgp.NilSize
+ } else {
+ s += zb0002.Msgsize()
+ }
+ }
+ }
+ s += 12 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *proposalTable) MsgIsZero() bool {
+ return (len((*z).Pending) == 0) && ((*z).PendingNext == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *proposalTracker) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 3
+ // string "Duplicate"
+ o = append(o, 0x83, 0xa9, 0x44, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65)
+ if (*z).Duplicate == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Duplicate)))
+ }
+ zb0001_keys := make([]basics.Address, 0, len((*z).Duplicate))
+ for zb0001 := range (*z).Duplicate {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortAddress(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Duplicate[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ o = msgp.AppendBool(o, zb0002)
+ }
+ // string "Freezer"
+ o = append(o, 0xa7, 0x46, 0x72, 0x65, 0x65, 0x7a, 0x65, 0x72)
+ o = (*z).Freezer.MarshalMsg(o)
+ // string "Staging"
+ o = append(o, 0xa7, 0x53, 0x74, 0x61, 0x67, 0x69, 0x6e, 0x67)
+ o = (*z).Staging.MarshalMsg(o)
+ return
+}
+
+func (_ *proposalTracker) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalTracker)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *proposalTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Duplicate")
+ return
+ }
+ if zb0006 {
+ (*z).Duplicate = nil
+ } else if (*z).Duplicate == nil {
+ (*z).Duplicate = make(map[basics.Address]bool, zb0005)
+ }
+ for zb0005 > 0 {
+ var zb0001 basics.Address
+ var zb0002 bool
+ zb0005--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Duplicate")
+ return
+ }
+ zb0002, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Duplicate", zb0001)
+ return
+ }
+ (*z).Duplicate[zb0001] = zb0002
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).Freezer.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Freezer")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).Staging.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Staging")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = proposalTracker{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Duplicate":
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Duplicate")
+ return
+ }
+ if zb0008 {
+ (*z).Duplicate = nil
+ } else if (*z).Duplicate == nil {
+ (*z).Duplicate = make(map[basics.Address]bool, zb0007)
+ }
+ for zb0007 > 0 {
+ var zb0001 basics.Address
+ var zb0002 bool
+ zb0007--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Duplicate")
+ return
+ }
+ zb0002, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Duplicate", zb0001)
+ return
+ }
+ (*z).Duplicate[zb0001] = zb0002
+ }
+ case "Freezer":
+ bts, err = (*z).Freezer.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Freezer")
+ return
+ }
+ case "Staging":
+ bts, err = (*z).Staging.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Staging")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *proposalTracker) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalTracker)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *proposalTracker) Msgsize() (s int) {
+ s = 1 + 10 + msgp.MapHeaderSize
+ if (*z).Duplicate != nil {
+ for zb0001, zb0002 := range (*z).Duplicate {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize() + msgp.BoolSize
+ }
+ }
+ s += 8 + (*z).Freezer.Msgsize() + 8 + (*z).Staging.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *proposalTracker) MsgIsZero() bool {
+ return (len((*z).Duplicate) == 0) && ((*z).Freezer.MsgIsZero()) && ((*z).Staging.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *proposalTrackerContract) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 4
+ // string "Froze"
+ o = append(o, 0x84, 0xa5, 0x46, 0x72, 0x6f, 0x7a, 0x65)
+ o = msgp.AppendBool(o, (*z).Froze)
+ // string "SawCertThreshold"
+ o = append(o, 0xb0, 0x53, 0x61, 0x77, 0x43, 0x65, 0x72, 0x74, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64)
+ o = msgp.AppendBool(o, (*z).SawCertThreshold)
+ // string "SawOneVote"
+ o = append(o, 0xaa, 0x53, 0x61, 0x77, 0x4f, 0x6e, 0x65, 0x56, 0x6f, 0x74, 0x65)
+ o = msgp.AppendBool(o, (*z).SawOneVote)
+ // string "SawSoftThreshold"
+ o = append(o, 0xb0, 0x53, 0x61, 0x77, 0x53, 0x6f, 0x66, 0x74, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64)
+ o = msgp.AppendBool(o, (*z).SawSoftThreshold)
+ return
+}
+
+func (_ *proposalTrackerContract) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalTrackerContract)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *proposalTrackerContract) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).SawOneVote, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "SawOneVote")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Froze, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Froze")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).SawSoftThreshold, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "SawSoftThreshold")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).SawCertThreshold, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "SawCertThreshold")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = proposalTrackerContract{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "SawOneVote":
+ (*z).SawOneVote, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "SawOneVote")
+ return
+ }
+ case "Froze":
+ (*z).Froze, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Froze")
+ return
+ }
+ case "SawSoftThreshold":
+ (*z).SawSoftThreshold, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "SawSoftThreshold")
+ return
+ }
+ case "SawCertThreshold":
+ (*z).SawCertThreshold, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "SawCertThreshold")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *proposalTrackerContract) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalTrackerContract)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *proposalTrackerContract) Msgsize() (s int) {
+ s = 1 + 11 + msgp.BoolSize + 6 + msgp.BoolSize + 17 + msgp.BoolSize + 17 + msgp.BoolSize
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *proposalTrackerContract) MsgIsZero() bool {
+ return ((*z).SawOneVote == false) && ((*z).Froze == false) && ((*z).SawSoftThreshold == false) && ((*z).SawCertThreshold == false)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *proposalValue) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -2426,6 +5902,185 @@ func (z *proposalValue) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *proposalVoteCounter) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "Count"
+ o = append(o, 0x82, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74)
+ o = msgp.AppendUint64(o, (*z).Count)
+ // string "Votes"
+ o = append(o, 0xa5, 0x56, 0x6f, 0x74, 0x65, 0x73)
+ if (*z).Votes == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Votes)))
+ }
+ zb0001_keys := make([]basics.Address, 0, len((*z).Votes))
+ for zb0001 := range (*z).Votes {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortAddress(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Votes[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ o = zb0002.MarshalMsg(o)
+ }
+ return
+}
+
+func (_ *proposalVoteCounter) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalVoteCounter)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *proposalVoteCounter) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).Count, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Count")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Votes")
+ return
+ }
+ if zb0006 {
+ (*z).Votes = nil
+ } else if (*z).Votes == nil {
+ (*z).Votes = make(map[basics.Address]vote, zb0005)
+ }
+ for zb0005 > 0 {
+ var zb0001 basics.Address
+ var zb0002 vote
+ zb0005--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Votes")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Votes", zb0001)
+ return
+ }
+ (*z).Votes[zb0001] = zb0002
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = proposalVoteCounter{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Count":
+ (*z).Count, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Count")
+ return
+ }
+ case "Votes":
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Votes")
+ return
+ }
+ if zb0008 {
+ (*z).Votes = nil
+ } else if (*z).Votes == nil {
+ (*z).Votes = make(map[basics.Address]vote, zb0007)
+ }
+ for zb0007 > 0 {
+ var zb0001 basics.Address
+ var zb0002 vote
+ zb0007--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Votes")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Votes", zb0001)
+ return
+ }
+ (*z).Votes[zb0001] = zb0002
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *proposalVoteCounter) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*proposalVoteCounter)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *proposalVoteCounter) Msgsize() (s int) {
+ s = 1 + 6 + msgp.Uint64Size + 6 + msgp.MapHeaderSize
+ if (*z).Votes != nil {
+ for zb0001, zb0002 := range (*z).Votes {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
+ }
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *proposalVoteCounter) MsgIsZero() bool {
+ return ((*z).Count == 0) && (len((*z).Votes) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *proposerSeed) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
@@ -2751,6 +6406,740 @@ func (z *rawVote) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *rootRouter) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 3
+ // string "Children"
+ o = append(o, 0x83, 0xa8, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e)
+ if (*z).Children == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Children)))
+ }
+ zb0001_keys := make([]round, 0, len((*z).Children))
+ for zb0001 := range (*z).Children {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortRound(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Children[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ if zb0002 == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = zb0002.MarshalMsg(o)
+ }
+ }
+ // string "ProposalManager"
+ o = append(o, 0xaf, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72)
+ // map header, size 0
+ o = append(o, 0x80)
+ // string "VoteAggregator"
+ o = append(o, 0xae, 0x56, 0x6f, 0x74, 0x65, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x6f, 0x72)
+ // map header, size 0
+ o = append(o, 0x80)
+ return
+}
+
+func (_ *rootRouter) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*rootRouter)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *rootRouter) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalManager")
+ return
+ }
+ if zb0005 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0005)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalManager", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalManager")
+ return
+ }
+ if zb0006 {
+ (*z).ProposalManager = proposalManager{}
+ }
+ for zb0005 > 0 {
+ zb0005--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalManager")
+ return
+ }
+ switch string(field) {
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalManager")
+ return
+ }
+ }
+ }
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteAggregator")
+ return
+ }
+ if zb0007 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0007)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteAggregator", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteAggregator")
+ return
+ }
+ if zb0008 {
+ (*z).VoteAggregator = voteAggregator{}
+ }
+ for zb0007 > 0 {
+ zb0007--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteAggregator")
+ return
+ }
+ switch string(field) {
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteAggregator")
+ return
+ }
+ }
+ }
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children")
+ return
+ }
+ if zb0010 {
+ (*z).Children = nil
+ } else if (*z).Children == nil {
+ (*z).Children = make(map[round]*roundRouter, zb0009)
+ }
+ for zb0009 > 0 {
+ var zb0001 round
+ var zb0002 *roundRouter
+ zb0009--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(roundRouter)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children", zb0001)
+ return
+ }
+ }
+ (*z).Children[zb0001] = zb0002
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = rootRouter{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "ProposalManager":
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalManager")
+ return
+ }
+ if zb0011 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0011)
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalManager", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalManager")
+ return
+ }
+ if zb0012 {
+ (*z).ProposalManager = proposalManager{}
+ }
+ for zb0011 > 0 {
+ zb0011--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalManager")
+ return
+ }
+ switch string(field) {
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalManager")
+ return
+ }
+ }
+ }
+ }
+ case "VoteAggregator":
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteAggregator")
+ return
+ }
+ if zb0013 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0013)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteAggregator", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "VoteAggregator")
+ return
+ }
+ if zb0014 {
+ (*z).VoteAggregator = voteAggregator{}
+ }
+ for zb0013 > 0 {
+ zb0013--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteAggregator")
+ return
+ }
+ switch string(field) {
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "VoteAggregator")
+ return
+ }
+ }
+ }
+ }
+ case "Children":
+ var zb0015 int
+ var zb0016 bool
+ zb0015, zb0016, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children")
+ return
+ }
+ if zb0016 {
+ (*z).Children = nil
+ } else if (*z).Children == nil {
+ (*z).Children = make(map[round]*roundRouter, zb0015)
+ }
+ for zb0015 > 0 {
+ var zb0001 round
+ var zb0002 *roundRouter
+ zb0015--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(roundRouter)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children", zb0001)
+ return
+ }
+ }
+ (*z).Children[zb0001] = zb0002
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *rootRouter) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*rootRouter)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *rootRouter) Msgsize() (s int) {
+ s = 1 + 16 + 1 + 15 + 1 + 9 + msgp.MapHeaderSize
+ if (*z).Children != nil {
+ for zb0001, zb0002 := range (*z).Children {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize()
+ if zb0002 == nil {
+ s += msgp.NilSize
+ } else {
+ s += zb0002.Msgsize()
+ }
+ }
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *rootRouter) MsgIsZero() bool {
+ return (len((*z).Children) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *roundRouter) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 3
+ // string "Children"
+ o = append(o, 0x83, 0xa8, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e)
+ if (*z).Children == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Children)))
+ }
+ zb0001_keys := make([]period, 0, len((*z).Children))
+ for zb0001 := range (*z).Children {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortPeriod(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Children[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ if zb0002 == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = zb0002.MarshalMsg(o)
+ }
+ }
+ // string "ProposalStore"
+ o = append(o, 0xad, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x65)
+ o = (*z).ProposalStore.MarshalMsg(o)
+ // string "VoteTrackerRound"
+ o = append(o, 0xb0, 0x56, 0x6f, 0x74, 0x65, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x6e, 0x64)
+ // map header, size 2
+ // string "Freshest"
+ o = append(o, 0x82, 0xa8, 0x46, 0x72, 0x65, 0x73, 0x68, 0x65, 0x73, 0x74)
+ o = (*z).VoteTrackerRound.Freshest.MarshalMsg(o)
+ // string "Ok"
+ o = append(o, 0xa2, 0x4f, 0x6b)
+ o = msgp.AppendBool(o, (*z).VoteTrackerRound.Ok)
+ return
+}
+
+func (_ *roundRouter) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*roundRouter)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *roundRouter) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).ProposalStore.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProposalStore")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound")
+ return
+ }
+ if zb0005 > 0 {
+ zb0005--
+ bts, err = (*z).VoteTrackerRound.Freshest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound", "struct-from-array", "Freshest")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ zb0005--
+ (*z).VoteTrackerRound.Ok, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound", "struct-from-array", "Ok")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0005)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound")
+ return
+ }
+ if zb0006 {
+ (*z).VoteTrackerRound = voteTrackerRound{}
+ }
+ for zb0005 > 0 {
+ zb0005--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound")
+ return
+ }
+ switch string(field) {
+ case "Freshest":
+ bts, err = (*z).VoteTrackerRound.Freshest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound", "Freshest")
+ return
+ }
+ case "Ok":
+ (*z).VoteTrackerRound.Ok, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound", "Ok")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerRound")
+ return
+ }
+ }
+ }
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children")
+ return
+ }
+ if zb0008 {
+ (*z).Children = nil
+ } else if (*z).Children == nil {
+ (*z).Children = make(map[period]*periodRouter, zb0007)
+ }
+ for zb0007 > 0 {
+ var zb0001 period
+ var zb0002 *periodRouter
+ zb0007--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(periodRouter)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Children", zb0001)
+ return
+ }
+ }
+ (*z).Children[zb0001] = zb0002
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = roundRouter{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "ProposalStore":
+ bts, err = (*z).ProposalStore.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ProposalStore")
+ return
+ }
+ case "VoteTrackerRound":
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound")
+ return
+ }
+ if zb0009 > 0 {
+ zb0009--
+ bts, err = (*z).VoteTrackerRound.Freshest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound", "struct-from-array", "Freshest")
+ return
+ }
+ }
+ if zb0009 > 0 {
+ zb0009--
+ (*z).VoteTrackerRound.Ok, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound", "struct-from-array", "Ok")
+ return
+ }
+ }
+ if zb0009 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0009)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound")
+ return
+ }
+ if zb0010 {
+ (*z).VoteTrackerRound = voteTrackerRound{}
+ }
+ for zb0009 > 0 {
+ zb0009--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound")
+ return
+ }
+ switch string(field) {
+ case "Freshest":
+ bts, err = (*z).VoteTrackerRound.Freshest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound", "Freshest")
+ return
+ }
+ case "Ok":
+ (*z).VoteTrackerRound.Ok, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound", "Ok")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerRound")
+ return
+ }
+ }
+ }
+ }
+ case "Children":
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children")
+ return
+ }
+ if zb0012 {
+ (*z).Children = nil
+ } else if (*z).Children == nil {
+ (*z).Children = make(map[period]*periodRouter, zb0011)
+ }
+ for zb0011 > 0 {
+ var zb0001 period
+ var zb0002 *periodRouter
+ zb0011--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ zb0002 = nil
+ } else {
+ if zb0002 == nil {
+ zb0002 = new(periodRouter)
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Children", zb0001)
+ return
+ }
+ }
+ (*z).Children[zb0001] = zb0002
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *roundRouter) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*roundRouter)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *roundRouter) Msgsize() (s int) {
+ s = 1 + 14 + (*z).ProposalStore.Msgsize() + 17 + 1 + 9 + (*z).VoteTrackerRound.Freshest.Msgsize() + 3 + msgp.BoolSize + 9 + msgp.MapHeaderSize
+ if (*z).Children != nil {
+ for zb0001, zb0002 := range (*z).Children {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize()
+ if zb0002 == nil {
+ s += msgp.NilSize
+ } else {
+ s += zb0002.Msgsize()
+ }
+ }
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *roundRouter) MsgIsZero() bool {
+ return ((*z).ProposalStore.MsgIsZero()) && (((*z).VoteTrackerRound.Freshest.MsgIsZero()) && ((*z).VoteTrackerRound.Ok == false)) && (len((*z).Children) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *seedInput) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
@@ -3023,22 +7412,22 @@ func (z *selector) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
-func (z serializableErrorUnderlying) MarshalMsg(b []byte) (o []byte) {
+func (z serializableError) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
return
}
-func (_ serializableErrorUnderlying) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(serializableErrorUnderlying)
+func (_ serializableError) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(serializableError)
if !ok {
- _, ok = (z).(*serializableErrorUnderlying)
+ _, ok = (z).(*serializableError)
}
return ok
}
// UnmarshalMsg implements msgp.Unmarshaler
-func (z *serializableErrorUnderlying) UnmarshalMsg(bts []byte) (o []byte, err error) {
+func (z *serializableError) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
@@ -3046,25 +7435,25 @@ func (z *serializableErrorUnderlying) UnmarshalMsg(bts []byte) (o []byte, err er
err = msgp.WrapError(err)
return
}
- (*z) = serializableErrorUnderlying(zb0001)
+ (*z) = serializableError(zb0001)
}
o = bts
return
}
-func (_ *serializableErrorUnderlying) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*serializableErrorUnderlying)
+func (_ *serializableError) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*serializableError)
return ok
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z serializableErrorUnderlying) Msgsize() (s int) {
+func (z serializableError) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}
// MsgIsZero returns whether this is a zero value
-func (z serializableErrorUnderlying) MsgIsZero() bool {
+func (z serializableError) MsgIsZero() bool {
return z == ""
}
@@ -3115,6 +7504,337 @@ func (z step) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *stepRouter) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "VoteTracker"
+ o = append(o, 0x82, 0xab, 0x56, 0x6f, 0x74, 0x65, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72)
+ o = (*z).VoteTracker.MarshalMsg(o)
+ // string "VoteTrackerContract"
+ o = append(o, 0xb3, 0x56, 0x6f, 0x74, 0x65, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74)
+ o = (*z).VoteTrackerContract.MarshalMsg(o)
+ return
+}
+
+func (_ *stepRouter) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*stepRouter)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *stepRouter) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).VoteTracker.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTracker")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).VoteTrackerContract.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteTrackerContract")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = stepRouter{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "VoteTracker":
+ bts, err = (*z).VoteTracker.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTracker")
+ return
+ }
+ case "VoteTrackerContract":
+ bts, err = (*z).VoteTrackerContract.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteTrackerContract")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *stepRouter) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*stepRouter)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *stepRouter) Msgsize() (s int) {
+ s = 1 + 12 + (*z).VoteTracker.Msgsize() + 20 + (*z).VoteTrackerContract.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *stepRouter) MsgIsZero() bool {
+ return ((*z).VoteTracker.MsgIsZero()) && ((*z).VoteTrackerContract.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *thresholdEvent) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 7
+ // string "Bundle"
+ o = append(o, 0x87, 0xa6, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65)
+ o = (*z).Bundle.MarshalMsg(o)
+ // string "Period"
+ o = append(o, 0xa6, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64)
+ o = msgp.AppendUint64(o, uint64((*z).Period))
+ // string "Proposal"
+ o = append(o, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c)
+ o = (*z).Proposal.MarshalMsg(o)
+ // string "Proto"
+ o = append(o, 0xa5, 0x50, 0x72, 0x6f, 0x74, 0x6f)
+ o = (*z).Proto.MarshalMsg(o)
+ // string "Round"
+ o = append(o, 0xa5, 0x52, 0x6f, 0x75, 0x6e, 0x64)
+ o = (*z).Round.MarshalMsg(o)
+ // string "Step"
+ o = append(o, 0xa4, 0x53, 0x74, 0x65, 0x70)
+ o = msgp.AppendUint64(o, uint64((*z).Step))
+ // string "T"
+ o = append(o, 0xa1, 0x54)
+ o = msgp.AppendUint8(o, uint8((*z).T))
+ return
+}
+
+func (_ *thresholdEvent) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*thresholdEvent)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *thresholdEvent) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0003 uint8
+ zb0003, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "T")
+ return
+ }
+ (*z).T = eventType(zb0003)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Round.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Round")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0004 uint64
+ zb0004, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Period")
+ return
+ }
+ (*z).Period = period(zb0004)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0005 uint64
+ zb0005, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Step")
+ return
+ }
+ (*z).Step = step(zb0005)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Bundle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Bundle")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Proto.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Proto")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = thresholdEvent{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "T":
+ {
+ var zb0006 uint8
+ zb0006, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "T")
+ return
+ }
+ (*z).T = eventType(zb0006)
+ }
+ case "Round":
+ bts, err = (*z).Round.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Round")
+ return
+ }
+ case "Period":
+ {
+ var zb0007 uint64
+ zb0007, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Period")
+ return
+ }
+ (*z).Period = period(zb0007)
+ }
+ case "Step":
+ {
+ var zb0008 uint64
+ zb0008, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Step")
+ return
+ }
+ (*z).Step = step(zb0008)
+ }
+ case "Proposal":
+ bts, err = (*z).Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Proposal")
+ return
+ }
+ case "Bundle":
+ bts, err = (*z).Bundle.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Bundle")
+ return
+ }
+ case "Proto":
+ bts, err = (*z).Proto.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Proto")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *thresholdEvent) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*thresholdEvent)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *thresholdEvent) Msgsize() (s int) {
+ s = 1 + 2 + msgp.Uint8Size + 6 + (*z).Round.Msgsize() + 7 + msgp.Uint64Size + 5 + msgp.Uint64Size + 9 + (*z).Proposal.Msgsize() + 7 + (*z).Bundle.Msgsize() + 6 + (*z).Proto.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *thresholdEvent) MsgIsZero() bool {
+ return ((*z).T == 0) && ((*z).Round.MsgIsZero()) && ((*z).Period == 0) && ((*z).Step == 0) && ((*z).Proposal.MsgIsZero()) && ((*z).Bundle.MsgIsZero()) && ((*z).Proto.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -5885,6 +10605,84 @@ func (z *vote) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *voteAggregator) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 0
+ o = append(o, 0x80)
+ return
+}
+
+func (_ *voteAggregator) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteAggregator)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *voteAggregator) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = voteAggregator{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *voteAggregator) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteAggregator)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *voteAggregator) Msgsize() (s int) {
+ s = 1
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *voteAggregator) MsgIsZero() bool {
+ return true
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *voteAuthenticator) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -6023,3 +10821,830 @@ func (z *voteAuthenticator) Msgsize() (s int) {
func (z *voteAuthenticator) MsgIsZero() bool {
return ((*z).Sender.MsgIsZero()) && ((*z).Cred.MsgIsZero()) && ((*z).Sig.MsgIsZero())
}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *voteTracker) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 4
+ // string "Counts"
+ o = append(o, 0x84, 0xa6, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73)
+ if (*z).Counts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Counts)))
+ }
+ zb0003_keys := make([]proposalValue, 0, len((*z).Counts))
+ for zb0003 := range (*z).Counts {
+ zb0003_keys = append(zb0003_keys, zb0003)
+ }
+ sort.Sort(SortProposalValue(zb0003_keys))
+ for _, zb0003 := range zb0003_keys {
+ zb0004 := (*z).Counts[zb0003]
+ _ = zb0004
+ o = zb0003.MarshalMsg(o)
+ o = zb0004.MarshalMsg(o)
+ }
+ // string "Equivocators"
+ o = append(o, 0xac, 0x45, 0x71, 0x75, 0x69, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73)
+ if (*z).Equivocators == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Equivocators)))
+ }
+ zb0005_keys := make([]basics.Address, 0, len((*z).Equivocators))
+ for zb0005 := range (*z).Equivocators {
+ zb0005_keys = append(zb0005_keys, zb0005)
+ }
+ sort.Sort(SortAddress(zb0005_keys))
+ for _, zb0005 := range zb0005_keys {
+ zb0006 := (*z).Equivocators[zb0005]
+ _ = zb0006
+ o = zb0005.MarshalMsg(o)
+ o = zb0006.MarshalMsg(o)
+ }
+ // string "EquivocatorsCount"
+ o = append(o, 0xb1, 0x45, 0x71, 0x75, 0x69, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
+ o = msgp.AppendUint64(o, (*z).EquivocatorsCount)
+ // string "Voters"
+ o = append(o, 0xa6, 0x56, 0x6f, 0x74, 0x65, 0x72, 0x73)
+ if (*z).Voters == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Voters)))
+ }
+ zb0001_keys := make([]basics.Address, 0, len((*z).Voters))
+ for zb0001 := range (*z).Voters {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortAddress(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Voters[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ o = zb0002.MarshalMsg(o)
+ }
+ return
+}
+
+func (_ *voteTracker) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTracker)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *voteTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0007 > 0 {
+ zb0007--
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Voters")
+ return
+ }
+ if zb0010 {
+ (*z).Voters = nil
+ } else if (*z).Voters == nil {
+ (*z).Voters = make(map[basics.Address]vote, zb0009)
+ }
+ for zb0009 > 0 {
+ var zb0001 basics.Address
+ var zb0002 vote
+ zb0009--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Voters")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Voters", zb0001)
+ return
+ }
+ (*z).Voters[zb0001] = zb0002
+ }
+ }
+ if zb0007 > 0 {
+ zb0007--
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Counts")
+ return
+ }
+ if zb0012 {
+ (*z).Counts = nil
+ } else if (*z).Counts == nil {
+ (*z).Counts = make(map[proposalValue]proposalVoteCounter, zb0011)
+ }
+ for zb0011 > 0 {
+ var zb0003 proposalValue
+ var zb0004 proposalVoteCounter
+ zb0011--
+ bts, err = zb0003.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Counts")
+ return
+ }
+ bts, err = zb0004.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Counts", zb0003)
+ return
+ }
+ (*z).Counts[zb0003] = zb0004
+ }
+ }
+ if zb0007 > 0 {
+ zb0007--
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Equivocators")
+ return
+ }
+ if zb0014 {
+ (*z).Equivocators = nil
+ } else if (*z).Equivocators == nil {
+ (*z).Equivocators = make(map[basics.Address]equivocationVote, zb0013)
+ }
+ for zb0013 > 0 {
+ var zb0005 basics.Address
+ var zb0006 equivocationVote
+ zb0013--
+ bts, err = zb0005.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Equivocators")
+ return
+ }
+ bts, err = zb0006.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Equivocators", zb0005)
+ return
+ }
+ (*z).Equivocators[zb0005] = zb0006
+ }
+ }
+ if zb0007 > 0 {
+ zb0007--
+ (*z).EquivocatorsCount, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "EquivocatorsCount")
+ return
+ }
+ }
+ if zb0007 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0007)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0008 {
+ (*z) = voteTracker{}
+ }
+ for zb0007 > 0 {
+ zb0007--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Voters":
+ var zb0015 int
+ var zb0016 bool
+ zb0015, zb0016, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Voters")
+ return
+ }
+ if zb0016 {
+ (*z).Voters = nil
+ } else if (*z).Voters == nil {
+ (*z).Voters = make(map[basics.Address]vote, zb0015)
+ }
+ for zb0015 > 0 {
+ var zb0001 basics.Address
+ var zb0002 vote
+ zb0015--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Voters")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Voters", zb0001)
+ return
+ }
+ (*z).Voters[zb0001] = zb0002
+ }
+ case "Counts":
+ var zb0017 int
+ var zb0018 bool
+ zb0017, zb0018, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Counts")
+ return
+ }
+ if zb0018 {
+ (*z).Counts = nil
+ } else if (*z).Counts == nil {
+ (*z).Counts = make(map[proposalValue]proposalVoteCounter, zb0017)
+ }
+ for zb0017 > 0 {
+ var zb0003 proposalValue
+ var zb0004 proposalVoteCounter
+ zb0017--
+ bts, err = zb0003.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Counts")
+ return
+ }
+ bts, err = zb0004.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Counts", zb0003)
+ return
+ }
+ (*z).Counts[zb0003] = zb0004
+ }
+ case "Equivocators":
+ var zb0019 int
+ var zb0020 bool
+ zb0019, zb0020, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Equivocators")
+ return
+ }
+ if zb0020 {
+ (*z).Equivocators = nil
+ } else if (*z).Equivocators == nil {
+ (*z).Equivocators = make(map[basics.Address]equivocationVote, zb0019)
+ }
+ for zb0019 > 0 {
+ var zb0005 basics.Address
+ var zb0006 equivocationVote
+ zb0019--
+ bts, err = zb0005.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Equivocators")
+ return
+ }
+ bts, err = zb0006.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Equivocators", zb0005)
+ return
+ }
+ (*z).Equivocators[zb0005] = zb0006
+ }
+ case "EquivocatorsCount":
+ (*z).EquivocatorsCount, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "EquivocatorsCount")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *voteTracker) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTracker)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *voteTracker) Msgsize() (s int) {
+ s = 1 + 7 + msgp.MapHeaderSize
+ if (*z).Voters != nil {
+ for zb0001, zb0002 := range (*z).Voters {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize() + zb0002.Msgsize()
+ }
+ }
+ s += 7 + msgp.MapHeaderSize
+ if (*z).Counts != nil {
+ for zb0003, zb0004 := range (*z).Counts {
+ _ = zb0003
+ _ = zb0004
+ s += 0 + zb0003.Msgsize() + zb0004.Msgsize()
+ }
+ }
+ s += 13 + msgp.MapHeaderSize
+ if (*z).Equivocators != nil {
+ for zb0005, zb0006 := range (*z).Equivocators {
+ _ = zb0005
+ _ = zb0006
+ s += 0 + zb0005.Msgsize() + zb0006.Msgsize()
+ }
+ }
+ s += 18 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *voteTracker) MsgIsZero() bool {
+ return (len((*z).Voters) == 0) && (len((*z).Counts) == 0) && (len((*z).Equivocators) == 0) && ((*z).EquivocatorsCount == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *voteTrackerContract) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 3
+ // string "Emitted"
+ o = append(o, 0x83, 0xa7, 0x45, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64)
+ o = msgp.AppendBool(o, (*z).Emitted)
+ // string "Step"
+ o = append(o, 0xa4, 0x53, 0x74, 0x65, 0x70)
+ o = msgp.AppendUint64(o, uint64((*z).Step))
+ // string "StepOk"
+ o = append(o, 0xa6, 0x53, 0x74, 0x65, 0x70, 0x4f, 0x6b)
+ o = msgp.AppendBool(o, (*z).StepOk)
+ return
+}
+
+func (_ *voteTrackerContract) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTrackerContract)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *voteTrackerContract) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ {
+ var zb0003 uint64
+ zb0003, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Step")
+ return
+ }
+ (*z).Step = step(zb0003)
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).StepOk, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "StepOk")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Emitted, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Emitted")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = voteTrackerContract{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Step":
+ {
+ var zb0004 uint64
+ zb0004, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Step")
+ return
+ }
+ (*z).Step = step(zb0004)
+ }
+ case "StepOk":
+ (*z).StepOk, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "StepOk")
+ return
+ }
+ case "Emitted":
+ (*z).Emitted, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Emitted")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *voteTrackerContract) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTrackerContract)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *voteTrackerContract) Msgsize() (s int) {
+ s = 1 + 5 + msgp.Uint64Size + 7 + msgp.BoolSize + 8 + msgp.BoolSize
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *voteTrackerContract) MsgIsZero() bool {
+ return ((*z).Step == 0) && ((*z).StepOk == false) && ((*z).Emitted == false)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *voteTrackerPeriod) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 1
+ // string "Cached"
+ o = append(o, 0x81, 0xa6, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64)
+ // map header, size 2
+ // string "Bottom"
+ o = append(o, 0x82, 0xa6, 0x42, 0x6f, 0x74, 0x74, 0x6f, 0x6d)
+ o = msgp.AppendBool(o, (*z).Cached.Bottom)
+ // string "Proposal"
+ o = append(o, 0xa8, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c)
+ o = (*z).Cached.Proposal.MarshalMsg(o)
+ return
+}
+
+func (_ *voteTrackerPeriod) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTrackerPeriod)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *voteTrackerPeriod) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached")
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).Cached.Bottom, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached", "struct-from-array", "Bottom")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).Cached.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached", "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached")
+ return
+ }
+ if zb0004 {
+ (*z).Cached = nextThresholdStatusEvent{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached")
+ return
+ }
+ switch string(field) {
+ case "Bottom":
+ (*z).Cached.Bottom, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached", "Bottom")
+ return
+ }
+ case "Proposal":
+ bts, err = (*z).Cached.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached", "Proposal")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Cached")
+ return
+ }
+ }
+ }
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = voteTrackerPeriod{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Cached":
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Cached")
+ return
+ }
+ if zb0005 > 0 {
+ zb0005--
+ (*z).Cached.Bottom, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Cached", "struct-from-array", "Bottom")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ zb0005--
+ bts, err = (*z).Cached.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Cached", "struct-from-array", "Proposal")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0005)
+ if err != nil {
+ err = msgp.WrapError(err, "Cached", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "Cached")
+ return
+ }
+ if zb0006 {
+ (*z).Cached = nextThresholdStatusEvent{}
+ }
+ for zb0005 > 0 {
+ zb0005--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Cached")
+ return
+ }
+ switch string(field) {
+ case "Bottom":
+ (*z).Cached.Bottom, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Cached", "Bottom")
+ return
+ }
+ case "Proposal":
+ bts, err = (*z).Cached.Proposal.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Cached", "Proposal")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "Cached")
+ return
+ }
+ }
+ }
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *voteTrackerPeriod) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTrackerPeriod)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *voteTrackerPeriod) Msgsize() (s int) {
+ s = 1 + 7 + 1 + 7 + msgp.BoolSize + 9 + (*z).Cached.Proposal.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *voteTrackerPeriod) MsgIsZero() bool {
+ return (((*z).Cached.Bottom == false) && ((*z).Cached.Proposal.MsgIsZero()))
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *voteTrackerRound) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "Freshest"
+ o = append(o, 0x82, 0xa8, 0x46, 0x72, 0x65, 0x73, 0x68, 0x65, 0x73, 0x74)
+ o = (*z).Freshest.MarshalMsg(o)
+ // string "Ok"
+ o = append(o, 0xa2, 0x4f, 0x6b)
+ o = msgp.AppendBool(o, (*z).Ok)
+ return
+}
+
+func (_ *voteTrackerRound) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTrackerRound)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *voteTrackerRound) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Freshest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Freshest")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Ok, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Ok")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = voteTrackerRound{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Freshest":
+ bts, err = (*z).Freshest.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Freshest")
+ return
+ }
+ case "Ok":
+ (*z).Ok, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Ok")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *voteTrackerRound) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*voteTrackerRound)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *voteTrackerRound) Msgsize() (s int) {
+ s = 1 + 9 + (*z).Freshest.Msgsize() + 3 + msgp.BoolSize
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *voteTrackerRound) MsgIsZero() bool {
+ return ((*z).Freshest.MsgIsZero()) && ((*z).Ok == false)
+}
diff --git a/agreement/msgp_gen_test.go b/agreement/msgp_gen_test.go
index 0231cc28a..99053ca4c 100644
--- a/agreement/msgp_gen_test.go
+++ b/agreement/msgp_gen_test.go
@@ -74,6 +74,126 @@ func BenchmarkUnmarshalCertificate(b *testing.B) {
}
}
+func TestMarshalUnmarshalConsensusVersionView(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := ConsensusVersionView{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingConsensusVersionView(t *testing.T) {
+ protocol.RunEncodingTest(t, &ConsensusVersionView{})
+}
+
+func BenchmarkMarshalMsgConsensusVersionView(b *testing.B) {
+ v := ConsensusVersionView{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgConsensusVersionView(b *testing.B) {
+ v := ConsensusVersionView{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalConsensusVersionView(b *testing.B) {
+ v := ConsensusVersionView{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalblockAssembler(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := blockAssembler{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingblockAssembler(t *testing.T) {
+ protocol.RunEncodingTest(t, &blockAssembler{})
+}
+
+func BenchmarkMarshalMsgblockAssembler(b *testing.B) {
+ v := blockAssembler{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgblockAssembler(b *testing.B) {
+ v := blockAssembler{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalblockAssembler(b *testing.B) {
+ v := blockAssembler{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalbundle(t *testing.T) {
partitiontest.PartitionTest(t)
v := bundle{}
@@ -134,6 +254,126 @@ func BenchmarkUnmarshalbundle(b *testing.B) {
}
}
+func TestMarshalUnmarshalcompoundMessage(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := compoundMessage{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingcompoundMessage(t *testing.T) {
+ protocol.RunEncodingTest(t, &compoundMessage{})
+}
+
+func BenchmarkMarshalMsgcompoundMessage(b *testing.B) {
+ v := compoundMessage{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgcompoundMessage(b *testing.B) {
+ v := compoundMessage{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalcompoundMessage(b *testing.B) {
+ v := compoundMessage{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshaldiskState(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := diskState{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingdiskState(t *testing.T) {
+ protocol.RunEncodingTest(t, &diskState{})
+}
+
+func BenchmarkMarshalMsgdiskState(b *testing.B) {
+ v := diskState{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgdiskState(b *testing.B) {
+ v := diskState{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshaldiskState(b *testing.B) {
+ v := diskState{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalequivocationVote(t *testing.T) {
partitiontest.PartitionTest(t)
v := equivocationVote{}
@@ -254,6 +494,366 @@ func BenchmarkUnmarshalequivocationVoteAuthenticator(b *testing.B) {
}
}
+func TestMarshalUnmarshalfreshnessData(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := freshnessData{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingfreshnessData(t *testing.T) {
+ protocol.RunEncodingTest(t, &freshnessData{})
+}
+
+func BenchmarkMarshalMsgfreshnessData(b *testing.B) {
+ v := freshnessData{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgfreshnessData(b *testing.B) {
+ v := freshnessData{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalfreshnessData(b *testing.B) {
+ v := freshnessData{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalmessage(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := message{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingmessage(t *testing.T) {
+ protocol.RunEncodingTest(t, &message{})
+}
+
+func BenchmarkMarshalMsgmessage(b *testing.B) {
+ v := message{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgmessage(b *testing.B) {
+ v := message{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalmessage(b *testing.B) {
+ v := message{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalmessageEvent(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := messageEvent{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingmessageEvent(t *testing.T) {
+ protocol.RunEncodingTest(t, &messageEvent{})
+}
+
+func BenchmarkMarshalMsgmessageEvent(b *testing.B) {
+ v := messageEvent{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgmessageEvent(b *testing.B) {
+ v := messageEvent{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalmessageEvent(b *testing.B) {
+ v := messageEvent{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalnextThresholdStatusEvent(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := nextThresholdStatusEvent{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingnextThresholdStatusEvent(t *testing.T) {
+ protocol.RunEncodingTest(t, &nextThresholdStatusEvent{})
+}
+
+func BenchmarkMarshalMsgnextThresholdStatusEvent(b *testing.B) {
+ v := nextThresholdStatusEvent{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgnextThresholdStatusEvent(b *testing.B) {
+ v := nextThresholdStatusEvent{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalnextThresholdStatusEvent(b *testing.B) {
+ v := nextThresholdStatusEvent{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalperiodRouter(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := periodRouter{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingperiodRouter(t *testing.T) {
+ protocol.RunEncodingTest(t, &periodRouter{})
+}
+
+func BenchmarkMarshalMsgperiodRouter(b *testing.B) {
+ v := periodRouter{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgperiodRouter(b *testing.B) {
+ v := periodRouter{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalperiodRouter(b *testing.B) {
+ v := periodRouter{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalplayer(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := player{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingplayer(t *testing.T) {
+ protocol.RunEncodingTest(t, &player{})
+}
+
+func BenchmarkMarshalMsgplayer(b *testing.B) {
+ v := player{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgplayer(b *testing.B) {
+ v := player{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalplayer(b *testing.B) {
+ v := player{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalproposal(t *testing.T) {
partitiontest.PartitionTest(t)
v := proposal{}
@@ -314,6 +914,366 @@ func BenchmarkUnmarshalproposal(b *testing.B) {
}
}
+func TestMarshalUnmarshalproposalManager(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := proposalManager{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingproposalManager(t *testing.T) {
+ protocol.RunEncodingTest(t, &proposalManager{})
+}
+
+func BenchmarkMarshalMsgproposalManager(b *testing.B) {
+ v := proposalManager{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgproposalManager(b *testing.B) {
+ v := proposalManager{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalproposalManager(b *testing.B) {
+ v := proposalManager{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalproposalSeeker(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := proposalSeeker{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingproposalSeeker(t *testing.T) {
+ protocol.RunEncodingTest(t, &proposalSeeker{})
+}
+
+func BenchmarkMarshalMsgproposalSeeker(b *testing.B) {
+ v := proposalSeeker{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgproposalSeeker(b *testing.B) {
+ v := proposalSeeker{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalproposalSeeker(b *testing.B) {
+ v := proposalSeeker{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalproposalStore(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := proposalStore{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingproposalStore(t *testing.T) {
+ protocol.RunEncodingTest(t, &proposalStore{})
+}
+
+func BenchmarkMarshalMsgproposalStore(b *testing.B) {
+ v := proposalStore{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgproposalStore(b *testing.B) {
+ v := proposalStore{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalproposalStore(b *testing.B) {
+ v := proposalStore{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalproposalTable(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := proposalTable{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingproposalTable(t *testing.T) {
+ protocol.RunEncodingTest(t, &proposalTable{})
+}
+
+func BenchmarkMarshalMsgproposalTable(b *testing.B) {
+ v := proposalTable{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgproposalTable(b *testing.B) {
+ v := proposalTable{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalproposalTable(b *testing.B) {
+ v := proposalTable{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalproposalTracker(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := proposalTracker{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingproposalTracker(t *testing.T) {
+ protocol.RunEncodingTest(t, &proposalTracker{})
+}
+
+func BenchmarkMarshalMsgproposalTracker(b *testing.B) {
+ v := proposalTracker{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgproposalTracker(b *testing.B) {
+ v := proposalTracker{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalproposalTracker(b *testing.B) {
+ v := proposalTracker{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalproposalTrackerContract(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := proposalTrackerContract{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingproposalTrackerContract(t *testing.T) {
+ protocol.RunEncodingTest(t, &proposalTrackerContract{})
+}
+
+func BenchmarkMarshalMsgproposalTrackerContract(b *testing.B) {
+ v := proposalTrackerContract{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgproposalTrackerContract(b *testing.B) {
+ v := proposalTrackerContract{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalproposalTrackerContract(b *testing.B) {
+ v := proposalTrackerContract{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalproposalValue(t *testing.T) {
partitiontest.PartitionTest(t)
v := proposalValue{}
@@ -374,6 +1334,66 @@ func BenchmarkUnmarshalproposalValue(b *testing.B) {
}
}
+func TestMarshalUnmarshalproposalVoteCounter(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := proposalVoteCounter{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingproposalVoteCounter(t *testing.T) {
+ protocol.RunEncodingTest(t, &proposalVoteCounter{})
+}
+
+func BenchmarkMarshalMsgproposalVoteCounter(b *testing.B) {
+ v := proposalVoteCounter{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgproposalVoteCounter(b *testing.B) {
+ v := proposalVoteCounter{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalproposalVoteCounter(b *testing.B) {
+ v := proposalVoteCounter{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalproposerSeed(t *testing.T) {
partitiontest.PartitionTest(t)
v := proposerSeed{}
@@ -494,6 +1514,126 @@ func BenchmarkUnmarshalrawVote(b *testing.B) {
}
}
+func TestMarshalUnmarshalrootRouter(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := rootRouter{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingrootRouter(t *testing.T) {
+ protocol.RunEncodingTest(t, &rootRouter{})
+}
+
+func BenchmarkMarshalMsgrootRouter(b *testing.B) {
+ v := rootRouter{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgrootRouter(b *testing.B) {
+ v := rootRouter{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalrootRouter(b *testing.B) {
+ v := rootRouter{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalroundRouter(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := roundRouter{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingroundRouter(t *testing.T) {
+ protocol.RunEncodingTest(t, &roundRouter{})
+}
+
+func BenchmarkMarshalMsgroundRouter(b *testing.B) {
+ v := roundRouter{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgroundRouter(b *testing.B) {
+ v := roundRouter{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalroundRouter(b *testing.B) {
+ v := roundRouter{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalseedInput(t *testing.T) {
partitiontest.PartitionTest(t)
v := seedInput{}
@@ -614,6 +1754,126 @@ func BenchmarkUnmarshalselector(b *testing.B) {
}
}
+func TestMarshalUnmarshalstepRouter(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := stepRouter{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingstepRouter(t *testing.T) {
+ protocol.RunEncodingTest(t, &stepRouter{})
+}
+
+func BenchmarkMarshalMsgstepRouter(b *testing.B) {
+ v := stepRouter{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgstepRouter(b *testing.B) {
+ v := stepRouter{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalstepRouter(b *testing.B) {
+ v := stepRouter{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalthresholdEvent(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := thresholdEvent{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingthresholdEvent(t *testing.T) {
+ protocol.RunEncodingTest(t, &thresholdEvent{})
+}
+
+func BenchmarkMarshalMsgthresholdEvent(b *testing.B) {
+ v := thresholdEvent{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgthresholdEvent(b *testing.B) {
+ v := thresholdEvent{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalthresholdEvent(b *testing.B) {
+ v := thresholdEvent{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshaltransmittedPayload(t *testing.T) {
partitiontest.PartitionTest(t)
v := transmittedPayload{}
@@ -974,6 +2234,66 @@ func BenchmarkUnmarshalvote(b *testing.B) {
}
}
+func TestMarshalUnmarshalvoteAggregator(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := voteAggregator{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingvoteAggregator(t *testing.T) {
+ protocol.RunEncodingTest(t, &voteAggregator{})
+}
+
+func BenchmarkMarshalMsgvoteAggregator(b *testing.B) {
+ v := voteAggregator{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgvoteAggregator(b *testing.B) {
+ v := voteAggregator{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalvoteAggregator(b *testing.B) {
+ v := voteAggregator{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalvoteAuthenticator(t *testing.T) {
partitiontest.PartitionTest(t)
v := voteAuthenticator{}
@@ -1033,3 +2353,243 @@ func BenchmarkUnmarshalvoteAuthenticator(b *testing.B) {
}
}
}
+
+func TestMarshalUnmarshalvoteTracker(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := voteTracker{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingvoteTracker(t *testing.T) {
+ protocol.RunEncodingTest(t, &voteTracker{})
+}
+
+func BenchmarkMarshalMsgvoteTracker(b *testing.B) {
+ v := voteTracker{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgvoteTracker(b *testing.B) {
+ v := voteTracker{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalvoteTracker(b *testing.B) {
+ v := voteTracker{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalvoteTrackerContract(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := voteTrackerContract{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingvoteTrackerContract(t *testing.T) {
+ protocol.RunEncodingTest(t, &voteTrackerContract{})
+}
+
+func BenchmarkMarshalMsgvoteTrackerContract(b *testing.B) {
+ v := voteTrackerContract{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgvoteTrackerContract(b *testing.B) {
+ v := voteTrackerContract{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalvoteTrackerContract(b *testing.B) {
+ v := voteTrackerContract{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalvoteTrackerPeriod(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := voteTrackerPeriod{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingvoteTrackerPeriod(t *testing.T) {
+ protocol.RunEncodingTest(t, &voteTrackerPeriod{})
+}
+
+func BenchmarkMarshalMsgvoteTrackerPeriod(b *testing.B) {
+ v := voteTrackerPeriod{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgvoteTrackerPeriod(b *testing.B) {
+ v := voteTrackerPeriod{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalvoteTrackerPeriod(b *testing.B) {
+ v := voteTrackerPeriod{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalvoteTrackerRound(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := voteTrackerRound{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingvoteTrackerRound(t *testing.T) {
+ protocol.RunEncodingTest(t, &voteTrackerRound{})
+}
+
+func BenchmarkMarshalMsgvoteTrackerRound(b *testing.B) {
+ v := voteTrackerRound{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgvoteTrackerRound(b *testing.B) {
+ v := voteTrackerRound{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalvoteTrackerRound(b *testing.B) {
+ v := voteTrackerRound{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/agreement/persistence.go b/agreement/persistence.go
index aef2a0f60..497e4b9af 100644
--- a/agreement/persistence.go
+++ b/agreement/persistence.go
@@ -33,10 +33,14 @@ import (
// diskState represents the state required by the agreement protocol to be persistent.
type diskState struct {
- Router, Player, Clock []byte
+ _struct struct{} `codec:","`
- ActionTypes []actionType
- Actions [][]byte
+ Router []byte
+ Player []byte
+ Clock []byte
+
+ ActionTypes []actionType `codec:"ActionTypes,allocbound=-"`
+ Actions [][]byte `codec:"Actions,allocbound=-"`
}
func persistent(as []action) bool {
@@ -49,17 +53,30 @@ func persistent(as []action) bool {
}
// encode serializes the current state into a byte array.
-func encode(t timers.Clock, rr rootRouter, p player, a []action) []byte {
+func encode(t timers.Clock, rr rootRouter, p player, a []action, reflect bool) (raw []byte) {
var s diskState
- s.Router = protocol.EncodeReflect(rr)
- s.Player = protocol.EncodeReflect(p)
+ if reflect {
+ s.Router = protocol.EncodeReflect(rr)
+ s.Player = protocol.EncodeReflect(p)
+ } else {
+ s.Router = protocol.Encode(&rr)
+ s.Player = protocol.Encode(&p)
+ }
s.Clock = t.Encode()
- for _, act := range a {
- s.ActionTypes = append(s.ActionTypes, act.t())
- s.Actions = append(s.Actions, protocol.EncodeReflect(act))
+ s.ActionTypes = make([]actionType, len(a))
+ s.Actions = make([][]byte, len(a))
+ for i, act := range a {
+ s.ActionTypes[i] = act.t()
+
+ // still use reflection for actions since action is an interface and we can't define marshaller methods on it
+ s.Actions[i] = protocol.EncodeReflect(act)
}
- raw := protocol.EncodeReflect(s)
- return raw
+ if reflect {
+ raw = protocol.EncodeReflect(s)
+ } else {
+ raw = protocol.Encode(&s)
+ }
+ return
}
// persist atomically writes state to the crash database.
@@ -177,17 +194,28 @@ func restore(log logging.Logger, crash db.Accessor) (raw []byte, err error) {
// decode process the incoming raw bytes array and attempt to reconstruct the agreement state objects.
//
// In all decoding errors, it returns the error code in err
-func decode(raw []byte, t0 timers.Clock, log serviceLogger) (t timers.Clock, rr rootRouter, p player, a []action, err error) {
+func decode(raw []byte, t0 timers.Clock, log serviceLogger, reflect bool) (t timers.Clock, rr rootRouter, p player, a []action, err error) {
var t2 timers.Clock
var rr2 rootRouter
var p2 player
a2 := []action{}
var s diskState
-
- err = protocol.DecodeReflect(raw, &s)
- if err != nil {
- log.Errorf("decode (agreement): error decoding retrieved state (len = %v): %v", len(raw), err)
- return
+ if reflect {
+ err = protocol.DecodeReflect(raw, &s)
+ if err != nil {
+ log.Errorf("decode (agreement): error decoding retrieved state (len = %v): %v", len(raw), err)
+ return
+ }
+ } else {
+ err = protocol.Decode(raw, &s)
+ if err != nil {
+ log.Warnf("decode (agreement): error decoding retrieved state using msgp (len = %v): %v. Trying reflection", len(raw), err)
+ err = protocol.DecodeReflect(raw, &s)
+ if err != nil {
+ log.Errorf("decode (agreement): error decoding using either reflection or msgp): %v", err)
+ return
+ }
+ }
}
t2, err = t0.Decode(s.Clock)
@@ -195,19 +223,43 @@ func decode(raw []byte, t0 timers.Clock, log serviceLogger) (t timers.Clock, rr
return
}
- err = protocol.DecodeReflect(s.Player, &p2)
- if err != nil {
- return
- }
+ if reflect {
+ err = protocol.DecodeReflect(s.Player, &p2)
+ if err != nil {
+ return
+ }
- rr2 = makeRootRouter(p2)
- err = protocol.DecodeReflect(s.Router, &rr2)
- if err != nil {
- return
+ rr2 = makeRootRouter(p2)
+ err = protocol.DecodeReflect(s.Router, &rr2)
+ if err != nil {
+ return
+ }
+ } else {
+ err = protocol.Decode(s.Player, &p2)
+ if err != nil {
+ log.Warnf("decode (agreement): failed to decode Player using msgp (len = %v): %v. Trying reflection", len(s.Player), err)
+ err = protocol.DecodeReflect(s.Player, &p2)
+ if err != nil {
+ log.Errorf("decode (agreement): failed to decode Player using either reflection or msgp: %v", err)
+ return
+ }
+ }
+ rr2 = makeRootRouter(p2)
+ err = protocol.Decode(s.Router, &rr2)
+ if err != nil {
+ log.Warnf("decode (agreement): failed to decode Router using msgp (len = %v): %v. Trying reflection", len(s.Router), err)
+ rr2 = makeRootRouter(p2)
+ err = protocol.DecodeReflect(s.Router, &rr2)
+ if err != nil {
+ log.Errorf("decode (agreement): failed to decode Router using either reflection or msgp: %v", err)
+ return
+ }
+ }
}
for i := range s.Actions {
act := zeroAction(s.ActionTypes[i])
+ // always use reflection for actions since action is an interface and we can't define unmarshaller methods on it
err = protocol.DecodeReflect(s.Actions[i], &act)
if err != nil {
return
@@ -308,7 +360,7 @@ func (p *asyncPersistenceLoop) loop(ctx context.Context) {
// sanity check; we check it after the fact, since it's not expected to ever happen.
// performance-wise, it takes approximitly 300000ns to execute, and we don't want it to
// block the persist operation.
- _, _, _, _, derr := decode(s.raw, s.clock, p.log)
+ _, _, _, _, derr := decode(s.raw, s.clock, p.log, false)
if derr != nil {
p.log.Errorf("could not decode own encoded disk state: %v", derr)
}
diff --git a/agreement/persistence_test.go b/agreement/persistence_test.go
index 94221f7be..7a4ec3db4 100644
--- a/agreement/persistence_test.go
+++ b/agreement/persistence_test.go
@@ -25,7 +25,9 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/db"
"github.com/algorand/go-algorand/util/timers"
@@ -34,17 +36,17 @@ import (
func TestAgreementSerialization(t *testing.T) {
partitiontest.PartitionTest(t)
- // todo : we need to deserialize some more meaningfull state.
+ // todo : we need to deserialize some more meaningful state.
clock := timers.MakeMonotonicClock(time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC))
status := player{Round: 350, Step: soft, Deadline: time.Duration(23) * time.Second}
router := makeRootRouter(status)
- a := []action{}
+ a := []action{checkpointAction{}, disconnectAction(messageEvent{}, nil)}
- encodedBytes := encode(clock, router, status, a)
+ encodedBytes := encode(clock, router, status, a, false)
t0 := timers.MakeMonotonicClock(time.Date(2000, 0, 0, 0, 0, 0, 0, time.UTC))
log := makeServiceLogger(logging.Base())
- clock2, router2, status2, a2, err := decode(encodedBytes, t0, log)
+ clock2, router2, status2, a2, err := decode(encodedBytes, t0, log, false)
require.NoError(t, err)
require.Equalf(t, clock, clock2, "Clock wasn't serialized/deserialized correctly")
require.Equalf(t, router, router2, "Router wasn't serialized/deserialized correctly")
@@ -53,7 +55,7 @@ func TestAgreementSerialization(t *testing.T) {
}
func BenchmarkAgreementSerialization(b *testing.B) {
- // todo : we need to deserialize some more meaningfull state.
+ // todo : we need to deserialize some more meaningful state.
b.SkipNow()
clock := timers.MakeMonotonicClock(time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC))
@@ -63,12 +65,12 @@ func BenchmarkAgreementSerialization(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
- encode(clock, router, status, a)
+ encode(clock, router, status, a, false)
}
}
func BenchmarkAgreementDeserialization(b *testing.B) {
- // todo : we need to deserialize some more meaningfull state.
+ // todo : we need to deserialize some more meaningful state.
b.SkipNow()
clock := timers.MakeMonotonicClock(time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC))
@@ -76,12 +78,12 @@ func BenchmarkAgreementDeserialization(b *testing.B) {
router := makeRootRouter(status)
a := []action{}
- encodedBytes := encode(clock, router, status, a)
+ encodedBytes := encode(clock, router, status, a, false)
t0 := timers.MakeMonotonicClock(time.Date(2000, 0, 0, 0, 0, 0, 0, time.UTC))
log := makeServiceLogger(logging.Base())
b.ResetTimer()
for n := 0; n < b.N; n++ {
- decode(encodedBytes, t0, log)
+ decode(encodedBytes, t0, log, false)
}
}
@@ -163,3 +165,124 @@ func BenchmarkAgreementPersistenceRecovery(b *testing.B) {
restore(serviceLogger{Logger: logging.Base()}, accessor)
}
}
+
+func randomizeDiskState() (rr rootRouter, p player) {
+ p2, err := protocol.RandomizeObject(&player{})
+ if err != nil {
+ return
+ }
+ rr2, err := protocol.RandomizeObject(&rootRouter{})
+ if err != nil {
+ return
+ }
+ p = *(p2.(*player))
+ rr = *(rr2.(*rootRouter))
+ return
+}
+
+func TestRandomizedEncodingFullDiskState(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ for i := 0; i < 5000; i++ {
+ router, player := randomizeDiskState()
+ a := []action{}
+ clock := timers.MakeMonotonicClock(time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC))
+ log := makeServiceLogger(logging.Base())
+ e1 := encode(clock, router, player, a, true)
+ e2 := encode(clock, router, player, a, false)
+ require.Equalf(t, e1, e2, "msgp and go-codec encodings differ: len(msgp)=%v, len(reflect)=%v", len(e1), len(e2))
+ _, rr1, p1, _, err1 := decode(e1, clock, log, true)
+ _, rr2, p2, _, err2 := decode(e1, clock, log, false)
+ require.NoErrorf(t, err1, "reflect decoding failed")
+ require.NoErrorf(t, err2, "msgp decoding failed")
+ require.Equalf(t, rr1, rr2, "rootRouters decoded differently")
+ require.Equalf(t, p1, p2, "players decoded differently")
+ }
+
+}
+
+func BenchmarkRandomizedEncode(b *testing.B) {
+ clock := timers.MakeMonotonicClock(time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC))
+ router, player := randomizeDiskState()
+ a := []action{}
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ encode(clock, router, player, a, false)
+ }
+}
+
+func BenchmarkRandomizedDecode(b *testing.B) {
+ clock := timers.MakeMonotonicClock(time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC))
+ router, player := randomizeDiskState()
+ a := []action{}
+ ds := encode(clock, router, player, a, false)
+ log := makeServiceLogger(logging.Base())
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ decode(ds, clock, log, false)
+ }
+}
+
+func TestEmptyMapDeserialization(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var rr, rr1 rootRouter
+ rr.Children = make(map[basics.Round]*roundRouter)
+ e := protocol.Encode(&rr)
+ err := protocol.Decode(e, &rr1)
+ require.NoError(t, err)
+ require.NotNil(t, rr1.Children)
+
+ var v, v1 voteTracker
+ v.Equivocators = make(map[basics.Address]equivocationVote)
+ ve := protocol.Encode(&v)
+ err = protocol.Decode(ve, &v1)
+ require.NoError(t, err)
+ require.NotNil(t, v1.Equivocators)
+}
+
+func TestDecodeFailures(t *testing.T) {
+ clock := timers.MakeMonotonicClock(time.Date(2015, 1, 2, 5, 6, 7, 8, time.UTC))
+ ce := clock.Encode()
+ log := makeServiceLogger(logging.Base())
+ player := player{Round: 350, Step: soft, Deadline: time.Duration(23) * time.Second}
+ router := makeRootRouter(player)
+ pe := protocol.Encode(&player)
+ re := protocol.Encode(&router)
+
+ // diskState decoding failure
+ {
+ type diskState struct {
+ UnexpectedDiskField int64
+ }
+ uds := diskState{UnexpectedDiskField: 5}
+ udse := protocol.EncodeReflect(uds)
+ _, _, _, _, err := decode(udse, clock, log, false)
+ require.ErrorContains(t, err, "UnexpectedDiskField")
+
+ }
+
+ // player decoding failure
+ {
+ type player struct {
+ UnexpectedPlayerField int64
+ }
+ p := player{UnexpectedPlayerField: 3}
+ pe := protocol.EncodeReflect(p)
+ ds := diskState{Player: pe, Router: re, Clock: ce}
+ dse := protocol.EncodeReflect(ds)
+ _, _, _, _, err := decode(dse, clock, log, false)
+ require.ErrorContains(t, err, "UnexpectedPlayerField")
+ }
+
+ // router decoding failure
+ {
+ type rootRouter struct {
+ UnexpectedRouterField int64
+ }
+ router := rootRouter{UnexpectedRouterField: 5}
+ re := protocol.EncodeReflect(router)
+ ds := diskState{Player: pe, Router: re, Clock: ce}
+ dse := protocol.EncodeReflect(ds)
+ _, _, _, _, err := decode(dse, clock, log, false)
+ require.ErrorContains(t, err, "UnexpectedRouterField")
+ }
+}
diff --git a/agreement/player.go b/agreement/player.go
index 2add5711e..cc29240aa 100644
--- a/agreement/player.go
+++ b/agreement/player.go
@@ -26,6 +26,7 @@ import (
// The player implements the top-level state machine functionality of the
// agreement protocol.
type player struct {
+ _struct struct{} `codec:","`
// Round, Period, and Step hold the current round, period, and step of
// the player state machine.
Round round
@@ -391,7 +392,7 @@ func (p *player) enterRound(r routerHandle, source event, target round) []action
if e.t() == payloadPipelined {
e := e.(payloadProcessedEvent)
- msg := message{MessageHandle: 0, Tag: protocol.ProposalPayloadTag, UnauthenticatedProposal: e.UnauthenticatedPayload} // TODO do we want to keep around the original handle?
+ msg := message{messageHandle: 0, Tag: protocol.ProposalPayloadTag, UnauthenticatedProposal: e.UnauthenticatedPayload} // TODO do we want to keep around the original handle?
a := verifyPayloadAction(messageEvent{T: payloadPresent, Input: msg}, p.Round, e.Period, e.Pinned)
actions = append(actions, a)
}
@@ -570,7 +571,7 @@ func (p *player) handleMessageEvent(r routerHandle, e messageEvent) (actions []a
}
// relay as the proposer
- if e.Input.MessageHandle == nil {
+ if e.Input.messageHandle == nil {
var uv unauthenticatedVote
switch ef.t() {
case payloadPipelined, payloadAccepted:
diff --git a/agreement/player_permutation_test.go b/agreement/player_permutation_test.go
index 251a27622..d7dcf9add 100644
--- a/agreement/player_permutation_test.go
+++ b/agreement/player_permutation_test.go
@@ -69,7 +69,7 @@ func getPlayerPermutation(t *testing.T, n int) (plyr *player, pMachine ioAutomat
plyr.Pending.push(&messageEvent{
T: payloadPresent,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
UnauthenticatedProposal: payload.u(),
},
})
@@ -161,7 +161,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: voteVerified,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
Vote: vvote,
UnauthenticatedVote: vvote.u(),
},
@@ -172,7 +172,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: votePresent,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
UnauthenticatedVote: vvote.u(),
},
Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
@@ -182,7 +182,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: voteVerified,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
Vote: vvote,
UnauthenticatedVote: vvote.u(),
},
@@ -193,7 +193,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: voteVerified,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
Vote: vvote,
UnauthenticatedVote: vvote.u(),
},
@@ -205,7 +205,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: votePresent,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
UnauthenticatedVote: vvote.u(),
},
Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
@@ -214,7 +214,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: payloadPresent,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
UnauthenticatedProposal: payload.u(),
},
}
@@ -222,7 +222,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: payloadVerified,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
UnauthenticatedProposal: payload.u(),
Proposal: *payload,
},
@@ -278,7 +278,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: voteVerified,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
Vote: vvote,
UnauthenticatedVote: vvote.u(),
},
@@ -290,7 +290,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
e = messageEvent{
T: voteVerified,
Input: message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
Vote: vvote,
UnauthenticatedVote: vvote.u(),
},
@@ -303,7 +303,7 @@ func getMessageEventPermutation(t *testing.T, n int, helper *voteMakerHelper) (e
Input: message{
Bundle: bundle{},
UnauthenticatedBundle: unauthenticatedBundle{},
- MessageHandle: "uniquemalformedBundle",
+ messageHandle: "uniquemalformedBundle",
},
Err: errTestVerifyFailed,
}
diff --git a/agreement/player_test.go b/agreement/player_test.go
index ecd871bc1..3e3cf8167 100644
--- a/agreement/player_test.go
+++ b/agreement/player_test.go
@@ -1877,7 +1877,7 @@ func TestPlayerPropagatesProposalPayload(t *testing.T) {
require.NoError(t, panicErr)
m := message{
- MessageHandle: "msghandle",
+ messageHandle: "msghandle",
UnauthenticatedProposal: payload.u(),
}
inMsg = messageEvent{
@@ -1952,7 +1952,7 @@ func TestPlayerPropagatesProposalPayloadFutureRound(t *testing.T) {
require.NoError(t, panicErr)
m := message{
- MessageHandle: "msghandle",
+ messageHandle: "msghandle",
UnauthenticatedProposal: payload.u(),
}
inMsg = messageEvent{
@@ -2269,7 +2269,7 @@ func TestPlayerDisconnectsFromMalformedProposalVote(t *testing.T) {
return false
}
act := wrapper.action.(networkAction)
- if act.T == disconnect && act.h == m.MessageHandle && act.Err != nil {
+ if act.T == disconnect && act.h == m.messageHandle && act.Err != nil {
return true
}
return false
@@ -2287,7 +2287,7 @@ func TestPlayerIgnoresMalformedPayload(t *testing.T) {
// check ignore on malformed payloads
m := message{
- MessageHandle: "uniquemessage",
+ messageHandle: "uniquemessage",
Proposal: proposal{},
UnauthenticatedProposal: unauthenticatedProposal{},
}
@@ -2308,7 +2308,7 @@ func TestPlayerIgnoresMalformedPayload(t *testing.T) {
return false
}
act := wrapper.action.(networkAction)
- if act.T == ignore && act.h == m.MessageHandle && act.Err != nil {
+ if act.T == ignore && act.h == m.messageHandle && act.Err != nil {
return true
}
return false
@@ -2329,7 +2329,7 @@ func TestPlayerDisconnectsFromMalformedVotes(t *testing.T) {
m := message{
Vote: vv,
UnauthenticatedVote: vv.u(),
- MessageHandle: "uniquemalformedvote",
+ messageHandle: "uniquemalformedvote",
}
inMsg := messageEvent{
T: voteVerified,
@@ -2348,7 +2348,7 @@ func TestPlayerDisconnectsFromMalformedVotes(t *testing.T) {
return false
}
act := wrapper.action.(networkAction)
- if act.T == disconnect && act.h == m.MessageHandle && act.Err != nil {
+ if act.T == disconnect && act.h == m.messageHandle && act.Err != nil {
return true
}
return false
@@ -2368,7 +2368,7 @@ func TestPlayerDisconnectsFromMalformedBundles(t *testing.T) {
m := message{
Bundle: bundle{},
UnauthenticatedBundle: unauthenticatedBundle{},
- MessageHandle: "uniquemalformedBundle",
+ messageHandle: "uniquemalformedBundle",
}
inMsg := messageEvent{
Err: verifyError,
@@ -2387,7 +2387,7 @@ func TestPlayerDisconnectsFromMalformedBundles(t *testing.T) {
return false
}
act := wrapper.action.(networkAction)
- if act.T == disconnect && act.h == m.MessageHandle && act.Err != nil {
+ if act.T == disconnect && act.h == m.messageHandle && act.Err != nil {
return true
}
return false
@@ -2524,7 +2524,7 @@ func TestPlayerRequestsPipelinedPayloadVerification(t *testing.T) {
require.NoError(t, panicErr)
m := message{
UnauthenticatedProposal: payloadTwo.u(),
- MessageHandle: "r2",
+ messageHandle: "r2",
}
inMsg = messageEvent{
T: payloadPresent,
diff --git a/agreement/proposalManager.go b/agreement/proposalManager.go
index 8cf03b32f..5abe5f052 100644
--- a/agreement/proposalManager.go
+++ b/agreement/proposalManager.go
@@ -28,7 +28,10 @@ import (
// payload{Present,Verified}, roundInterruption, {soft,cert,next}Threshold.
// It returns the following type(s) of event: none, vote{Filtered,Malformed},
// payload{Pipelined,Rejected,Accepted}, and proposal{Accepted,Committable}.
-type proposalManager struct{}
+
+type proposalManager struct {
+ _struct struct{} `codec:","`
+}
func (m *proposalManager) T() stateMachineTag {
return proposalMachine
diff --git a/agreement/proposalStore.go b/agreement/proposalStore.go
index e375ef92f..841dc91b9 100644
--- a/agreement/proposalStore.go
+++ b/agreement/proposalStore.go
@@ -37,6 +37,7 @@ var proposalAlreadyAssembledCounter = metrics.MakeCounter(
// Once a proposal is successfully validated, it is stored by the
// blockAssembler.
type blockAssembler struct {
+ _struct struct{} `codec:","`
// Pipeline contains a proposal which has not yet been validated. The
// proposal might be inside the cryptoVerifier, or it might be a
// pipelined proposal from the next round.
@@ -53,7 +54,7 @@ type blockAssembler struct {
// for a given proposal-value. When a proposal payload is relayed by
// the state machine, a matching can be concatenated with the vote to
// ensure that peers do not drop the proposal payload.
- Authenticators []vote
+ Authenticators []vote `codec:"Authenticators,allocbound=-"`
}
// pipeline adds the given unvalidated proposal to the blockAssembler, returning
@@ -120,11 +121,12 @@ func (a blockAssembler) trim(p period) blockAssembler {
// It returns the following type(s) of event: none, voteFiltered,
// proposal{Accepted,Committable}, and payload{Pipelined,Rejected}.
type proposalStore struct {
+ _struct struct{} `codec:","`
// Relevant contains a current collection of important proposal-values
// in the round. Relevant is indexed by period, and the proposalValue is
// the last one reported by the corresponding proposalMachinePeriod.
// Each corresponding proposal is tracked in Assemblers.
- Relevant map[period]proposalValue
+ Relevant map[period]proposalValue `codec:"Relevant,allocbound=-"`
// Pinned contains the extra proposal-value, not tracked in Relevant,
// for which a certificate may have formed (i.e., vbar in the spec).
// The proposal corresponding to Pinned is tracked in Assemblers.
@@ -132,7 +134,7 @@ type proposalStore struct {
// Assemblers contains the set of proposal-values currently tracked and
// held by the proposalStore.
- Assemblers map[proposalValue]blockAssembler
+ Assemblers map[proposalValue]blockAssembler `codec:"Assemblers,allocbound=-"`
}
func (store *proposalStore) T() stateMachineTag {
diff --git a/agreement/proposalTable.go b/agreement/proposalTable.go
index 79448f403..b3ef71ac6 100644
--- a/agreement/proposalTable.go
+++ b/agreement/proposalTable.go
@@ -19,22 +19,24 @@ package agreement
// A proposalTable stores proposals which need to be authenticated
// after their prior votes have been processed.
type proposalTable struct {
- Pending map[int]*messageEvent
- PendingNext int
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Pending map[uint64]*messageEvent `codec:"Pending,allocbound=-"`
+ PendingNext uint64
}
// push adds a proposal to the proposalTable.
-func (t *proposalTable) push(e *messageEvent) int {
+func (t *proposalTable) push(e *messageEvent) uint64 {
t.PendingNext++
if t.Pending == nil {
- t.Pending = make(map[int]*messageEvent)
+ t.Pending = make(map[uint64]*messageEvent)
}
t.Pending[t.PendingNext] = e
return t.PendingNext
}
// pop takes a proposal from the proposalTable.
-func (t *proposalTable) pop(taskIndex int) *messageEvent {
+func (t *proposalTable) pop(taskIndex uint64) *messageEvent {
res := t.Pending[taskIndex]
delete(t.Pending, taskIndex)
return res
diff --git a/agreement/proposalTable_test.go b/agreement/proposalTable_test.go
new file mode 100644
index 000000000..af81305fb
--- /dev/null
+++ b/agreement/proposalTable_test.go
@@ -0,0 +1,69 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package agreement
+
+import (
+ "encoding/base64"
+ "testing"
+
+ "github.com/algorand/go-algorand/network"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+// This test is only necessary for transition to msgp encoding
+// of the player state machine for agreement persistence
+func TestProposalTableMsgpEncoding(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ type messageMetadata struct {
+ raw network.IncomingMessage
+ }
+ encoded, err := base64.StdEncoding.DecodeString("gqdQZW5kaW5ngQGHqUNhbmNlbGxlZMKjRXJywKVJbnB1dImmQnVuZGxlgK9Db21wb3VuZE1lc3NhZ2WCqFByb3Bvc2FsgKRWb3RlgK1NZXNzYWdlSGFuZGxlgKhQcm9wb3NhbICjVGFnolBQtVVuYXV0aGVudGljYXRlZEJ1bmRsZYC3VW5hdXRoZW50aWNhdGVkUHJvcG9zYWyAs1VuYXV0aGVudGljYXRlZFZvdGWApFZvdGWApVByb3RvgqNFcnLAp1ZlcnNpb26goVQApFRhaWzAqVRhc2tJbmRleD+rUGVuZGluZ05leHQB")
+ require.NoError(t, err)
+
+ // run on master a3e90ad to get the encoded data for above
+ // pt := proposalTable{}
+ // msg := messageEvent{
+ // Input: message{
+ // Tag: protocol.ProposalPayloadTag,
+ // MessageHandle: &messageMetadata{raw: network.IncomingMessage{Tag: protocol.Tag("mytag"), Data: []byte("some data")}},
+ // },
+ // TaskIndex: 63}
+ // pt.push(&msg)
+ // result := protocol.EncodeReflect(&pt)
+ // fmt.Println(base64.StdEncoding.EncodeToString(result))
+
+ var ptMsgp, ptReflect proposalTable
+ err = protocol.Decode(encoded, &ptMsgp)
+ require.NoError(t, err)
+ err = protocol.DecodeReflect(encoded, &ptReflect)
+ require.NoError(t, err)
+
+ msgMsgp := ptMsgp.pop(ptMsgp.PendingNext)
+ msgReflect := ptReflect.pop(ptReflect.PendingNext)
+
+ // After setting MessageHandle to nil they should be the same
+ msgMsgp.Input.MessageHandle = nil
+ msgReflect.Input.MessageHandle = nil
+ require.Equal(t, msgMsgp, msgReflect)
+ // Check that the other fields we have manually set are still the same
+ require.Equal(t, msgMsgp.Input.Tag, protocol.ProposalPayloadTag)
+ require.Equal(t, msgMsgp.TaskIndex, uint64(63))
+
+}
diff --git a/agreement/proposalTracker.go b/agreement/proposalTracker.go
index c76c5c9fd..e3efcd372 100644
--- a/agreement/proposalTracker.go
+++ b/agreement/proposalTracker.go
@@ -25,6 +25,7 @@ import (
// A proposalSeeker finds the vote with the lowest credential until freeze() is
// called.
type proposalSeeker struct {
+ _struct struct{} `codec:","`
// Lowest contains the vote with the lowest credential seen so far.
Lowest vote
// Filled is set if any vote has been seen.
@@ -66,10 +67,11 @@ func (s proposalSeeker) freeze() proposalSeeker {
// It returns the following type(s) of event: voteFiltered, proposalAccepted, readStaging,
// and proposalFrozen.
type proposalTracker struct {
+ _struct struct{} `codec:","`
// Duplicate holds the set of senders which has been seen by the
// proposalTracker. A duplicate proposal-vote or an equivocating
// proposal-vote is dropped by a proposalTracker.
- Duplicate map[basics.Address]bool
+ Duplicate map[basics.Address]bool `codec:"Duplicate,allocbound=-"`
// Freezer holds a proposalSeeker, which seeks the proposal-vote with
// the lowest credential seen by the proposalTracker.
Freezer proposalSeeker
diff --git a/agreement/proposalTrackerContract.go b/agreement/proposalTrackerContract.go
index c33feb841..bbe4911c2 100644
--- a/agreement/proposalTrackerContract.go
+++ b/agreement/proposalTrackerContract.go
@@ -21,6 +21,8 @@ import (
)
type proposalTrackerContract struct {
+ _struct struct{} `codec:","`
+
SawOneVote bool
Froze bool
SawSoftThreshold bool
diff --git a/agreement/pseudonode.go b/agreement/pseudonode.go
index 06a91b210..6075e8ebf 100644
--- a/agreement/pseudonode.go
+++ b/agreement/pseudonode.go
@@ -387,7 +387,7 @@ func (t pseudonodeVotesTask) execute(verifier *AsyncVoteVerifier, quit chan stru
asyncVerifyingVotes := len(unverifiedVotes)
for i, uv := range unverifiedVotes {
msg := message{Tag: protocol.AgreementVoteTag, UnauthenticatedVote: uv}
- err := verifier.verifyVote(context.TODO(), t.node.ledger, uv, i, msg, results)
+ err := verifier.verifyVote(context.TODO(), t.node.ledger, uv, uint64(i), msg, results)
if err != nil {
orderedResults[i].err = err
t.node.log.Infof("pseudonode.makeVotes: failed to enqueue vote verification for (%d, %d): %v", t.round, t.period, err)
@@ -515,7 +515,7 @@ func (t pseudonodeProposalsTask) execute(verifier *AsyncVoteVerifier, quit chan
asyncVerifyingVotes := len(votes)
for i, uv := range votes {
msg := message{Tag: protocol.AgreementVoteTag, UnauthenticatedVote: uv}
- err := verifier.verifyVote(context.TODO(), t.node.ledger, uv, i, msg, results)
+ err := verifier.verifyVote(context.TODO(), t.node.ledger, uv, uint64(i), msg, results)
if err != nil {
cryptoOutputs[i].err = err
t.node.log.Infof("pseudonode.makeProposals: failed to enqueue vote verification for (%d, %d): %v", t.round, t.period, err)
diff --git a/agreement/router.go b/agreement/router.go
index fd0638a01..f6f0ea010 100644
--- a/agreement/router.go
+++ b/agreement/router.go
@@ -66,6 +66,8 @@ type router interface {
}
type rootRouter struct {
+ _struct struct{} `codec:","`
+
root actor // playerMachine (not restored: explicitly set on construction)
proposalRoot listener // proposalMachine
voteRoot listener // voteMachine
@@ -73,20 +75,24 @@ type rootRouter struct {
ProposalManager proposalManager
VoteAggregator voteAggregator
- Children map[round]*roundRouter
+ Children map[round]*roundRouter `codec:"Children,allocbound=-"`
}
type roundRouter struct {
+ _struct struct{} `codec:","`
+
proposalRoot listener // proposalMachineRound
voteRoot listener // voteMachineRound
ProposalStore proposalStore
VoteTrackerRound voteTrackerRound
- Children map[period]*periodRouter
+ Children map[period]*periodRouter `codec:"Children,allocbound=-"`
}
type periodRouter struct {
+ _struct struct{} `codec:","`
+
proposalRoot listener // proposalMachinePeriod
voteRoot listener // voteMachinePeriod
@@ -95,10 +101,11 @@ type periodRouter struct {
ProposalTrackerContract proposalTrackerContract
- Children map[step]*stepRouter
+ Children map[step]*stepRouter `codec:"Children,allocbound=-"`
}
type stepRouter struct {
+ _struct struct{} `codec:","`
voteRoot listener // voteMachineStep
VoteTracker voteTracker
diff --git a/agreement/service.go b/agreement/service.go
index 346234950..00b192b5c 100644
--- a/agreement/service.go
+++ b/agreement/service.go
@@ -191,7 +191,7 @@ func (s *Service) mainLoop(input <-chan externalEvent, output chan<- []action, r
var err error
raw, err := restore(s.log, s.Accessor)
if err == nil {
- clock, router, status, a, err = decode(raw, s.Clock, s.log)
+ clock, router, status, a, err = decode(raw, s.Clock, s.log, false)
if err != nil {
reset(s.log, s.Accessor)
} else {
@@ -246,7 +246,7 @@ func (s *Service) mainLoop(input <-chan externalEvent, output chan<- []action, r
// usage semantics : caller should ensure to call this function only when we have participation
// keys for the given voting round.
func (s *Service) persistState(done chan error) (events <-chan externalEvent) {
- raw := encode(s.Clock, s.persistRouter, s.persistStatus, s.persistActions)
+ raw := encode(s.Clock, s.persistRouter, s.persistStatus, s.persistActions, false)
return s.persistenceLoop.Enqueue(s.Clock, s.persistStatus.Round, s.persistStatus.Period, s.persistStatus.Step, raw, done)
}
diff --git a/agreement/sort.go b/agreement/sort.go
new file mode 100644
index 000000000..21fa3fc27
--- /dev/null
+++ b/agreement/sort.go
@@ -0,0 +1,84 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package agreement
+
+import (
+ "bytes"
+
+ "github.com/algorand/go-algorand/data/basics"
+)
+
+// These types are defined to satisfy SortInterface used by
+
+// SortAddress is re-exported from basics.Address since the interface is already defined there
+//msgp:sort basics.Address SortAddress
+type SortAddress = basics.SortAddress
+
+// SortUint64 is re-exported from basics since the interface is already defined there
+// canonical encoding of maps in msgpack format.
+type SortUint64 = basics.SortUint64
+
+// SortStep defines SortInterface used by msgp to consistently sort maps with this type as key.
+//msgp:ignore SortStep
+//msgp:sort step SortStep
+type SortStep []step
+
+func (a SortStep) Len() int { return len(a) }
+func (a SortStep) Less(i, j int) bool { return a[i] < a[j] }
+func (a SortStep) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// SortPeriod defines SortInterface used by msgp to consistently sort maps with this type as key.
+//msgp:ignore SortPeriod
+//msgp:sort period SortPeriod
+type SortPeriod []period
+
+func (a SortPeriod) Len() int { return len(a) }
+func (a SortPeriod) Less(i, j int) bool { return a[i] < a[j] }
+func (a SortPeriod) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// SortRound defines SortInterface used by msgp to consistently sort maps with this type as key.
+//msgp:ignore SortRound
+//msgp:sort round SortRound
+type SortRound []round
+
+func (a SortRound) Len() int { return len(a) }
+func (a SortRound) Less(i, j int) bool { return a[i] < a[j] }
+func (a SortRound) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// SortProposalValue defines SortInterface used by msgp to consistently sort maps with this type as key.
+//msgp:ignore SortProposalValue
+//msgp:sort proposalValue SortProposalValue
+type SortProposalValue []proposalValue
+
+func (a SortProposalValue) Len() int { return len(a) }
+func (a SortProposalValue) Less(i, j int) bool {
+ if a[i].OriginalPeriod != a[j].OriginalPeriod {
+ return a[i].OriginalPeriod < a[j].OriginalPeriod
+ }
+ cmp := bytes.Compare(a[i].OriginalProposer[:], a[j].OriginalProposer[:])
+ if cmp != 0 {
+ return cmp < 0
+ }
+ cmp = bytes.Compare(a[i].BlockDigest[:], a[j].BlockDigest[:])
+ if cmp != 0 {
+ return cmp < 0
+ }
+ cmp = bytes.Compare(a[i].EncodingDigest[:], a[j].EncodingDigest[:])
+ return cmp < 0
+}
+
+func (a SortProposalValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/agreement/sort_test.go b/agreement/sort_test.go
new file mode 100644
index 000000000..8240e5eff
--- /dev/null
+++ b/agreement/sort_test.go
@@ -0,0 +1,60 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package agreement
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSortProposalValueLess(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ // initialize a new digest with all bytes being 'a'
+ d1 := new(crypto.Digest)
+ for i := range d1 {
+ d1[i] = byte('a')
+ }
+ p1 := proposalValue{
+ OriginalPeriod: 1,
+ OriginalProposer: basics.Address(*d1),
+ BlockDigest: *d1,
+ EncodingDigest: *d1,
+ }
+ sp := SortProposalValue{p1, p1}
+ // They are both equal so Less should return false regardless of order
+ require.Falsef(t, sp.Less(0, 1), "%v < %v is true for equal values", sp[0], sp[1])
+ require.Falsef(t, sp.Less(1, 0), "%v < %v is true for equal values", sp[1], sp[0])
+
+ // working our way backwards from the order of checks in sortProposalValue.Less()
+ // the test is tied to the implementation because it defines what the canonical order of checks is
+ sp[1].EncodingDigest[3] = byte('b')
+ require.Truef(t, sp.Less(0, 1), "expected %v < % v", sp[0], sp[1])
+ sp[0].BlockDigest[3] = byte('b')
+ require.Falsef(t, sp.Less(0, 1), "expected %v >= %v", sp[0], sp[1])
+ sp[1].BlockDigest[3] = byte('c')
+ require.Truef(t, sp.Less(0, 1), "expected %v < %v", sp[0], sp[1])
+ sp[0].OriginalProposer[3] = byte('b')
+ require.Falsef(t, sp.Less(0, 1), "expected %v >= %v", sp[0], sp[1])
+ sp[1].OriginalProposer[3] = byte('c')
+ require.Truef(t, sp.Less(0, 1), "expected %v < %v", sp[0], sp[1])
+ sp[0].OriginalPeriod = 2
+ require.Falsef(t, sp.Less(0, 1), "expected %v >= %v", sp[0], sp[1])
+}
diff --git a/agreement/voteAggregator.go b/agreement/voteAggregator.go
index 057f9d14f..196e91a3b 100644
--- a/agreement/voteAggregator.go
+++ b/agreement/voteAggregator.go
@@ -29,7 +29,9 @@ import (
// bundlePresent, and bundleVerified.
// It returns the following type(s) of event: none, vote{Filtered,Malformed},
// bundle{Filtered,Malformed}, and {soft,cert,next}Threshold.
-type voteAggregator struct{}
+type voteAggregator struct {
+ _struct struct{} `codec:","`
+}
func (agg *voteAggregator) T() stateMachineTag {
return voteMachine
diff --git a/agreement/voteAggregator_test.go b/agreement/voteAggregator_test.go
index f23bd4d56..8795a0b36 100644
--- a/agreement/voteAggregator_test.go
+++ b/agreement/voteAggregator_test.go
@@ -900,7 +900,7 @@ func TestVoteAggregatorOldVote(t *testing.T) {
results := make(chan asyncVerifyVoteResponse, len(uvs))
for i, uv := range uvs {
- avv.verifyVote(context.Background(), ledger, uv, i, message{}, results)
+ avv.verifyVote(context.Background(), ledger, uv, uint64(i), message{}, results)
result := <-results
require.True(t, result.cancelled)
}
diff --git a/agreement/voteAuxiliary.go b/agreement/voteAuxiliary.go
index 0c6e85c47..d99a01139 100644
--- a/agreement/voteAuxiliary.go
+++ b/agreement/voteAuxiliary.go
@@ -19,6 +19,7 @@ package agreement
// A voteTrackerPeriod is a voteMachinePeriod which indicates whether a
// next-threshold of votes was observed for a some value in a period.
type voteTrackerPeriod struct {
+ _struct struct{} `codec:","`
// Make it explicit that we are serializing player fields for crash recovery;
// we should probably adopt this convention over the rest of player at some point.
Cached nextThresholdStatusEvent
@@ -99,6 +100,7 @@ func (t *voteTrackerPeriod) handle(r routerHandle, p player, e event) event {
// It returns the following type(s) of event: none and
// {soft,cert,next}Threshold, and freshestBundle
type voteTrackerRound struct {
+ _struct struct{} `codec:","`
// Freshest holds the freshest thresholdEvent seen this round.
Freshest thresholdEvent
// Ok is set if any thresholdEvent has been seen.
diff --git a/agreement/voteTracker.go b/agreement/voteTracker.go
index d0f717abd..394584015 100644
--- a/agreement/voteTracker.go
+++ b/agreement/voteTracker.go
@@ -27,8 +27,10 @@ import (
)
type proposalVoteCounter struct {
+ _struct struct{} `codec:","`
+
Count uint64
- Votes map[basics.Address]vote
+ Votes map[basics.Address]vote `codec:"Votes,allocbound=-"`
}
// A voteTracker is a voteMachineStep which handles duplication and
@@ -40,20 +42,21 @@ type proposalVoteCounter struct {
// It returns the following type(s) of event: none and
// {soft,cert,next}Threshold.
type voteTracker struct {
+ _struct struct{} `codec:","`
// Voters holds the set of voters which have voted in the current step.
// It is used to track whether a voter has equivocated.
- Voters map[basics.Address]vote
+ Voters map[basics.Address]vote `codec:"Voters,allocbound=-"`
// Counts holds the weighted sum of the votes for a given proposal.
// it also hold the individual votes.
// preconditions :
// Any proposalValue in Counts is gurenteed to contain at least one vote
- Counts map[proposalValue]proposalVoteCounter
+ Counts map[proposalValue]proposalVoteCounter `codec:"Counts,allocbound=-"`
// Equivocators holds the set of voters which have already equivocated
// once. Future votes from these voters are dropped and not
// propagated.
- Equivocators map[basics.Address]equivocationVote
+ Equivocators map[basics.Address]equivocationVote `codec:"Equivocators,allocbound=-"`
// EquivocatorsCount holds the number of equivocating votes which count
// for any proposal-value.
diff --git a/agreement/voteTrackerContract.go b/agreement/voteTrackerContract.go
index ca90a74a8..ad1585e6d 100644
--- a/agreement/voteTrackerContract.go
+++ b/agreement/voteTrackerContract.go
@@ -42,6 +42,8 @@ import (
// Trace properties
// - voteFilterRequest is idempotent
type voteTrackerContract struct {
+ _struct struct{} `codec:","`
+
Step step
StepOk bool
diff --git a/buildnumber.dat b/buildnumber.dat
index 0cfbf0888..573541ac9 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-2
+0
diff --git a/catchup/ledgerFetcher.go b/catchup/ledgerFetcher.go
index fa965a154..30c5ccb3b 100644
--- a/catchup/ledgerFetcher.go
+++ b/catchup/ledgerFetcher.go
@@ -39,8 +39,8 @@ import (
var errNoLedgerForRound = errors.New("no ledger available for given round")
const (
- // maxCatchpointFileChunkSize is a rough estimate for the worst-case scenario we're going to have of all the accounts data per a single catchpoint file chunk.
- maxCatchpointFileChunkSize = ledger.BalancesPerCatchpointFileChunk * basics.MaxEncodedAccountDataSize
+ // maxCatchpointFileChunkSize is a rough estimate for the worst-case scenario we're going to have of all the accounts data per a single catchpoint file chunk and one account with max resources.
+ maxCatchpointFileChunkSize = ledger.BalancesPerCatchpointFileChunk*(ledger.MaxEncodedBaseAccountDataSize+ledger.MaxEncodedKVDataSize) + ledger.ResourcesPerCatchpointFileChunk*ledger.MaxEncodedBaseResourceDataSize
// defaultMinCatchpointFileDownloadBytesPerSecond defines the worst-case scenario download speed we expect to get while downloading a catchpoint file
defaultMinCatchpointFileDownloadBytesPerSecond = 20 * 1024
// catchpointFileStreamReadSize defines the number of bytes we would attempt to read at each iteration from the incoming http data stream
@@ -146,10 +146,12 @@ func (lf *ledgerFetcher) getPeerLedger(ctx context.Context, peer network.HTTPPee
"writing balances to disk took %d seconds, "+
"writing creatables to disk took %d seconds, "+
"writing hashes to disk took %d seconds, "+
+ "writing kv pairs to disk took %d seconds, "+
"total duration is %d seconds",
downloadProgress.BalancesWriteDuration/time.Second,
downloadProgress.CreatablesWriteDuration/time.Second,
downloadProgress.HashesWriteDuration/time.Second,
+ downloadProgress.KVWriteDuration/time.Second,
writeDuration/time.Second)
}
@@ -191,5 +193,5 @@ func (lf *ledgerFetcher) getPeerLedger(ctx context.Context, peer network.HTTPPee
}
func (lf *ledgerFetcher) processBalancesBlock(ctx context.Context, sectionName string, bytes []byte, downloadProgress *ledger.CatchpointCatchupAccessorProgress) error {
- return lf.accessor.ProgressStagingBalances(ctx, sectionName, bytes, downloadProgress)
+ return lf.accessor.ProcessStagingBalances(ctx, sectionName, bytes, downloadProgress)
}
diff --git a/cmd/catchpointdump/file.go b/cmd/catchpointdump/file.go
index d0e181670..ec2a3ccd8 100644
--- a/cmd/catchpointdump/file.go
+++ b/cmd/catchpointdump/file.go
@@ -21,6 +21,7 @@ import (
"bufio"
"context"
"database/sql"
+ "encoding/base64"
"encoding/json"
"fmt"
"io"
@@ -34,6 +35,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
@@ -128,6 +130,10 @@ var fileCmd = &cobra.Command{
if err != nil {
reportErrorf("Unable to print account database : %v", err)
}
+ err = printKeyValueStore("./ledger.tracker.sqlite", outFile)
+ if err != nil {
+ reportErrorf("Unable to print key value store : %v", err)
+ }
}
},
}
@@ -176,7 +182,7 @@ func loadCatchpointIntoDatabase(ctx context.Context, catchupAccessor ledger.Catc
return fileHeader, err
}
}
- err = catchupAccessor.ProgressStagingBalances(ctx, header.Name, balancesBlockBytes, &downloadProgress)
+ err = catchupAccessor.ProcessStagingBalances(ctx, header.Name, balancesBlockBytes, &downloadProgress)
if err != nil {
return fileHeader, err
}
@@ -380,7 +386,65 @@ func printAccountsDatabase(databaseName string, fileHeader ledger.CatchpointFile
}
// increase the deadline warning to disable the warning message.
- db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(5*time.Second))
+ _, _ = db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(5*time.Second))
return err
})
}
+
+func printKeyValue(writer *bufio.Writer, key, value []byte) {
+ var pretty string
+ ai, rest, err := logic.SplitBoxKey(string(key))
+ if err == nil {
+ pretty = fmt.Sprintf("box(%d, %s)", ai, base64.StdEncoding.EncodeToString([]byte(rest)))
+ } else {
+ pretty = base64.StdEncoding.EncodeToString(key)
+ }
+
+ fmt.Fprintf(writer, "%s : %v\n", pretty, base64.StdEncoding.EncodeToString(value))
+}
+
+func printKeyValueStore(databaseName string, outFile *os.File) error {
+ fmt.Printf("\n")
+ printDumpingCatchpointProgressLine(0, 50, 0)
+ lastProgressUpdate := time.Now()
+ progress := uint64(0)
+ defer printDumpingCatchpointProgressLine(0, 0, 0)
+
+ fileWriter := bufio.NewWriterSize(outFile, 1024*1024)
+ defer fileWriter.Flush()
+
+ dbAccessor, err := db.MakeAccessor(databaseName, true, false)
+ if err != nil || dbAccessor.Handle == nil {
+ return err
+ }
+
+ return dbAccessor.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ var rowsCount int64
+ err := tx.QueryRow("SELECT count(*) from catchpointkvstore").Scan(&rowsCount)
+ if err != nil {
+ return err
+ }
+
+ // ordered to make dumps more "diffable"
+ rows, err := tx.Query("SELECT key, value FROM catchpointkvstore order by key")
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+ for rows.Next() {
+ progress++
+ var key []byte
+ var value []byte
+ err := rows.Scan(&key, &value)
+ if err != nil {
+ return err
+ }
+ printKeyValue(fileWriter, key, value)
+ if time.Since(lastProgressUpdate) > 50*time.Millisecond {
+ lastProgressUpdate = time.Now()
+ printDumpingCatchpointProgressLine(int(float64(progress)*50.0/float64(rowsCount)), 50, int64(progress))
+ }
+ }
+ return nil
+ })
+}
diff --git a/cmd/catchpointdump/net.go b/cmd/catchpointdump/net.go
index 9073ece66..8c1b056af 100644
--- a/cmd/catchpointdump/net.go
+++ b/cmd/catchpointdump/net.go
@@ -357,6 +357,11 @@ func loadAndDump(addr string, tarFile string, genesisInitState ledgercore.InitSt
if err != nil {
return err
}
+ err = printKeyValueStore("./ledger.tracker.sqlite", outFile)
+ if err != nil {
+ return err
+ }
+
}
return nil
}
diff --git a/cmd/dispenser/index.html.tpl b/cmd/dispenser/index.html.tpl
new file mode 100644
index 000000000..ec53973b8
--- /dev/null
+++ b/cmd/dispenser/index.html.tpl
@@ -0,0 +1,80 @@
+<!DOCTYPE html>
+ <head>
+ <title>Algorand dispenser</title>
+ <script src='https://www.google.com/recaptcha/api.js'>
+ </script>
+ <script src="https://code.jquery.com/jquery-3.3.1.min.js"
+ integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8="
+ crossorigin="anonymous">
+ </script>
+ <script>
+ var ADDRESS_REGEX = /[A-Z0-9]{58}/
+
+ function sanitize(string) {
+ const entityMap = {
+ '&': '&amp;',
+ '<': '&lt;',
+ '>': '&gt;',
+ '"': '&quot;',
+ "'": '&#39;',
+ '/': '&#x2F;',
+ '`': '&#x60;',
+ '=': '&#x3D;'
+ };
+ return String(string).replace(/[&<>"'`=\/]/g, function (s) {
+ return entityMap[s];
+ });
+ }
+
+ function loadparam() {
+ const queryString = window.location.search;
+ const urlParams = new URLSearchParams(queryString);
+ const account = sanitize(urlParams.get('account'))
+
+ if (ADDRESS_REGEX.test(account)) {
+ $('#target').val(account);
+ }
+ }
+
+ function onload() {
+ loadparam();
+ $('#dispense').click(function(e) {
+ var recaptcha = grecaptcha.getResponse();
+ var target = sanitize($('#target').val());
+
+ if (ADDRESS_REGEX.test(target)) {
+ $('#status').html('Sending request..');
+ var req = $.post('/dispense', {
+ recaptcha: recaptcha,
+ target: target,
+ }, function(data) {
+ $('#status').text('Code ' + req.status + ' ' + req.statusText + ': ' + req.responseText);
+ }).fail(function() {
+ $('#status').text('Code ' + req.status + ' ' + req.statusText + ': ' + req.responseText);
+ });
+ }
+ else {
+ $('#status').text('Please enter a valid Algorand address')
+ }
+ });
+ }
+ </script>
+ </head>
+ <body onload="onload()">
+ <h1>Algorand dispenser</h1>
+ <div class="g-recaptcha" data-sitekey="{{.RecaptchaSiteKey}}">
+ </div>
+ <div>
+ <p>The dispensed Algos have no monetary value and should only be used to test applications.</p>
+ <p>This service is gracefully provided to enable development on the Algorand blockchain test networks.</p>
+ <p>Please do not abuse it by requesting more Algos than needed.</p>
+ </div>
+ <div>
+ <input id="target" placeholder="target address" size="80">
+ <button id="dispense">Dispense</button>
+ </div>
+ <div>
+ Status: <span id="status"></span>
+ </div>
+ </body>
+</html>
diff --git a/cmd/dispenser/server.go b/cmd/dispenser/server.go
index d4ec0b5b8..7890ab8f4 100644
--- a/cmd/dispenser/server.go
+++ b/cmd/dispenser/server.go
@@ -17,6 +17,9 @@
package main
import (
+ _ "embed"
+ "html"
+
// "bytes"
"encoding/json"
"flag"
@@ -63,61 +66,8 @@ type dispenserSiteConfig struct {
topPage string
}
-const topPageTemplate = `
-<html>
- <head>
- <title>Algorand dispenser</title>
- <script src='https://www.google.com/recaptcha/api.js'>
- </script>
- <script src="https://code.jquery.com/jquery-3.3.1.min.js"
- integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8="
- crossorigin="anonymous">
- </script>
- <script>
- function loadparam() {
- const queryString = window.location.search;
- const urlParams = new URLSearchParams(queryString);
- $('#target').val(urlParams.get('account'));
- }
-
- function onload() {
- loadparam();
- $('#dispense').click(function(e) {
- var recaptcha = grecaptcha.getResponse();
- var target = $('#target').val();
-
- $('#status').html('Sending request..');
- var req = $.post('/dispense', {
- recaptcha: recaptcha,
- target: target,
- }, function(data) {
- $('#status').html('Code ' + req.status + ' ' + req.statusText + ': ' + req.responseText);
- }).fail(function() {
- $('#status').html('Code ' + req.status + ' ' + req.statusText + ': ' + req.responseText);
- });
- });
- }
- </script>
- </head>
- <body onload="onload()">
- <h1>Algorand dispenser</h1>
- <div class="g-recaptcha" data-sitekey="{{.RecaptchaSiteKey}}">
- </div>
- <div>
- <p>The dispensed Algos have no monetary value and should only be used to test applications.</p>
- <p>This service is gracefully provided to enable development on the Algorand blockchain test networks.</p>
- <p>Please do not abuse it by requesting more Algos than needed.</p>
- </div>
- <div>
- <input id="target" placeholder="target address" size="80">
- <button id="dispense">Dispense</button>
- </div>
- <div>
- Status: <span id="status"></span>
- </div>
- </body>
-</html>
-`
+//go:embed index.html.tpl
+var topPageTemplate string
func getConfig(r *http.Request) dispenserSiteConfig {
return configMap[r.Host]
@@ -190,7 +140,7 @@ func dispense(w http.ResponseWriter, r *http.Request) {
return
}
- target := targets[0]
+ target := html.EscapeString(targets[0])
c, ok := client[r.Host]
if !ok {
diff --git a/cmd/goal/README.md b/cmd/goal/README.md
new file mode 100644
index 000000000..2ecebf65b
--- /dev/null
+++ b/cmd/goal/README.md
@@ -0,0 +1,147 @@
+# Example `goal` Snippets
+
+Unless otherwise noted, it is assumed that the working directory
+begins at the top level of the `go-algorand` repo.
+
+It is also assumed that the main README's installation instructions have been followed and `make install` run so that the `goal` executable has been rebuilt from the same source as this example and is available in the shell environment.
+You can run `ls -l $(which goal)` after your `make install` and look at the installation time as a sanity check.
+
+Finally, all the `goal` commands assume that `${ALGORAND_DATA}` has been set. See the first Q/A for how this is done.
+
+## Starting a Single Node Dev Network
+
+### Q: Having just completed a new build in go-algorand, how do I get a single node dev network up, with algos in an easily accessible wallet from goal?
+
+### A:
+
+```sh
+# set this to where you want to keep the network files (and data dirs will go beneath)
+NETWORKS=~/networks
+
+# create a networks directory if you don't already have it
+mkdir -p ${NETWORKS}
+
+# set this to "name" your network
+NAME=niftynetwork
+
+# assuming here that are currently working out of the root directory of the go-algorand repo
+goal network create -n ${NAME} -r ${NETWORKS}/${NAME} -t ./test/testdata/nettemplates/OneNodeFuture.json
+
+# after the next command and for the rest of the README, we assume that `${ALGORAND_DATA}` is set
+export ALGORAND_DATA=${NETWORKS}/${NAME}/Primary
+echo $ALGORAND_DATA
+
+# start the network
+goal node start
+
+# see if it worked (run a few times, note block increasing)
+goal node status
+sleep 4 # assuming you're copy/pasting this entire block
+goal node status
+sleep 4
+goal node status
+
+# find the account with all the money
+goal account list
+
+# put it in a variable
+ACCOUNT=`goal account list | awk '{print $2}'`
+echo $ACCOUNT
+
+# send some money from the account to itself
+goal clerk send --to ${ACCOUNT} --from ${ACCOUNT} --amount 10
+```
+
+## Creating Applications
+
+### Q: How do I use goal to create an app?
+
+### A:
+Here's an example with the following assumptions:
+* all the setup is as in the first question
+* the approval program (which tests box functionality) has relative path `cmd/goal/examples/boxes.teal`
+* the clear program has relative path `cmd/goal/examples/clear.teal`
+* there are no local or global storage requirements
+
+```sh
+TEALDIR=cmd/goal/examples
+echo $TEALDIR
+
+# create the app and TAKE NOTE of its "app index"
+goal app create --creator ${ACCOUNT} --approval-prog ${TEALDIR}/boxes.teal --clear-prog ${TEALDIR}/clear.teal --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0
+```
+
+For the following questions, you'll need to use the app index. That will be shown in the last line printed. EG:
+
+```sh
+Attempting to create app (approval size 125, hash RKWO3VXBKQXF77PC6EHRLFXD4YTJYTJTGPTPWQ46YH5ESGPZ5JIA; clear size 3, hash IS4FW6ZCRMQRTDIINAVAQHD2GK6DXUNQHQ52IQGZEVPP4OEU56QA)
+Issued transaction from account ECRQFXZ7P3PLNK6QLIEVX7AXU6NTVQZHFUSEXTXMBKKOA2NTIV4PCX7XNY, txid SZK3U7AARMPQSZUICZIGYRLC7UDXJCVPV34JCBN5LIBXMF635UKA (fee 1000)
+Transaction SZK3U7AARMPQSZUICZIGYRLC7UDXJCVPV34JCBN5LIBXMF635UKA still pending as of round 12
+Transaction SZK3U7AARMPQSZUICZIGYRLC7UDXJCVPV34JCBN5LIBXMF635UKA still pending as of round 13
+Transaction SZK3U7AARMPQSZUICZIGYRLC7UDXJCVPV34JCBN5LIBXMF635UKA committed in round 14
+Created app with app index 2
+```
+
+## Funding App-Accounts
+
+### Q: How do I fund the app account so that it can satisfy its boxes min-balance requirement and allow for box creation?
+
+### A:
+Assuming you followed the previous step, and that the _app index_ is 2:
+
+```sh
+# store the app index for later usage
+APPID=2
+echo $APPID
+
+# store the app's account address into a variable
+APP_ACCOUNT=`goal app info --app-id ${APPID} | grep "Application account" | awk '{print $3}'`
+echo $APP_ACCOUNT
+
+# fund the app's account (here we're being very conservative and sending 10 algos)
+goal clerk send --to ${APP_ACCOUNT} --from ${ACCOUNT} --amount 10000000
+
+# verify the balance of the app's account
+goal account balance --address ${APP_ACCOUNT}
+```
+
+## Application Boxes in `goal`
+
+### Q: How do I use boxes in goal? In particular, I'd like to make a goal app call which:
+* accesses a particular box for a particular app
+* stores an ABI type as its contents
+
+### A:
+Here's an example with the following assumptions:
+
+* the caller's account is given by `${ACCOUNT}` (see first answer)
+* the program used is `boxes.teal` referenced above. In particular:
+ * it routes to box subroutines using the app argument at index 0 as the method signifier
+* the app id has been stored in `${APPID}` (see the previous answer)
+* the box referenced in the first non-create app-call has name `greatBox`
+* another referenced box is named `an_ABI_box`
+ * this second box is provided contents `[2,3,5]` of ABI-type `(uint8,uint8,uint8)`
+
+```sh
+# create a box with a simple non-ABI name. Note how the `--box` flag needs to be set so as to refer to the box being touched
+goal app call --from $ACCOUNT --app-id ${APPID} --box "str:greatBox" --app-arg "str:create" --app-arg "str:greatBox"
+
+# create another box
+goal app call --from ${ACCOUNT} --app-id ${APPID} --box "str:an_ABI_box" --app-arg "str:create" --app-arg "str:an_ABI_box"
+
+# set the contents to ABI type `(uint8,uint8,uint8)` with value `[2,3,5]`
+goal app call --from ${ACCOUNT} --app-id ${APPID} --box "str:an_ABI_box" --app-arg "str:set" --app-arg "str:an_ABI_box" --app-arg "abi:(uint8,uint8,uint8):[2,3,5]"
+```
+
+### Q: How do I search for boxes in goal?
+
+### A:
+Assuming you followed the previous step to create `greatBox` and `an_ABI_box`:
+
+```sh
+# get all boxes for a given app
+goal app box list --app-id ${APPID}
+
+# get the box details for a given box
+goal app box info --app-id ${APPID} --name "str:an_ABI_box"
+```
diff --git a/cmd/goal/application.go b/cmd/goal/application.go
index 58ac78f75..97006d29f 100644
--- a/cmd/goal/application.go
+++ b/cmd/goal/application.go
@@ -19,9 +19,6 @@ package main
import (
"bytes"
"crypto/sha512"
- "encoding/base32"
- "encoding/base64"
- "encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
@@ -72,6 +69,7 @@ var (
// platform seems not so far-fetched?
foreignApps []string
foreignAssets []string
+ appBoxes []string // parse these as we do app args, with optional number and comma in front
appStrAccounts []string
appArgs []string
@@ -98,8 +96,9 @@ func init() {
appCmd.PersistentFlags().StringArrayVar(&appArgs, "app-arg", nil, "Args to encode for application transactions (all will be encoded to a byte slice). For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.")
appCmd.PersistentFlags().StringSliceVar(&foreignApps, "foreign-app", nil, "Indexes of other apps whose global state is read in this transaction")
appCmd.PersistentFlags().StringSliceVar(&foreignAssets, "foreign-asset", nil, "Indexes of assets whose parameters are read in this transaction")
+ appCmd.PersistentFlags().StringArrayVar(&appBoxes, "box", nil, "Boxes that may be accessed by this transaction. Use the same form as app-arg to name the box, preceded by an optional app-id and comma. No app-id indicates the box is accessible by the app being called.")
appCmd.PersistentFlags().StringSliceVar(&appStrAccounts, "app-account", nil, "Accounts that may be accessed from application logic")
- appCmd.PersistentFlags().StringVarP(&appInputFilename, "app-input", "i", "", "JSON file containing encoded arguments and inputs (mutually exclusive with app-arg-b64 and app-account)")
+ appCmd.PersistentFlags().StringVarP(&appInputFilename, "app-input", "i", "", "JSON file containing encoded arguments and inputs (mutually exclusive with app-arg, app-account, foreign-app, foreign-asset, and box)")
appCmd.PersistentFlags().StringVar(&approvalProgFile, "approval-prog", "", "(Uncompiled) TEAL assembly program filename for approving/rejecting transactions")
appCmd.PersistentFlags().StringVar(&clearProgFile, "clear-prog", "", "(Uncompiled) TEAL assembly program filename for updating application state when a user clears their local state")
@@ -199,16 +198,51 @@ func panicIfErr(err error) {
}
}
-type appCallArg struct {
- Encoding string `codec:"encoding"`
- Value string `codec:"value"`
+func newAppCallBytes(arg string) logic.AppCallBytes {
+ appBytes, err := logic.NewAppCallBytes(arg)
+ if err != nil {
+ reportErrorf(err.Error())
+ }
+ return appBytes
}
type appCallInputs struct {
- Accounts []string `codec:"accounts"`
- ForeignApps []uint64 `codec:"foreignapps"`
- ForeignAssets []uint64 `codec:"foreignassets"`
- Args []appCallArg `codec:"args"`
+ Accounts []string `codec:"accounts"`
+ ForeignApps []uint64 `codec:"foreignapps"`
+ ForeignAssets []uint64 `codec:"foreignassets"`
+ Boxes []boxRef `codec:"boxes"`
+ Args []logic.AppCallBytes `codec:"args"`
+}
+
+type boxRef struct {
+ appID uint64 `codec:"app"`
+ name logic.AppCallBytes `codec:"name"`
+}
+
+// newBoxRef parses a command-line box ref, which is an optional appId, a comma,
+// and then the same format as an app call arg.
+func newBoxRef(arg string) boxRef {
+ parts := strings.SplitN(arg, ":", 2)
+ if len(parts) != 2 {
+ reportErrorf("box refs should be of the form '[<app>,]encoding:value'")
+ }
+ encoding := parts[0] // tentative, may be <app>,<encoding>
+ value := parts[1]
+ parts = strings.SplitN(encoding, ",", 2)
+ appID := uint64(0)
+ if len(parts) == 2 {
+ // There was a comma in the part before the ":"
+ encoding = parts[1]
+ var err error
+ appID, err = strconv.ParseUint(parts[0], 10, 64)
+ if err != nil {
+ reportErrorf("Could not parse app id in box ref: %v", err)
+ }
+ }
+ return boxRef{
+ appID: appID,
+ name: newAppCallBytes(encoding + ":" + value),
+ }
}
func stringsToUint64(strs []string) []uint64 {
@@ -223,78 +257,60 @@ func stringsToUint64(strs []string) []uint64 {
return out
}
-func getForeignAssets() []uint64 {
- return stringsToUint64(foreignAssets)
-}
-
-func getForeignApps() []uint64 {
- return stringsToUint64(foreignApps)
+func stringsToBoxRefs(strs []string) []boxRef {
+ out := make([]boxRef, len(strs))
+ for i, brstr := range strs {
+ out[i] = newBoxRef(brstr)
+ }
+ return out
}
-func parseAppArg(arg appCallArg) (rawValue []byte, parseErr error) {
- switch arg.Encoding {
- case "str", "string":
- rawValue = []byte(arg.Value)
- case "int", "integer":
- num, err := strconv.ParseUint(arg.Value, 10, 64)
- if err != nil {
- parseErr = fmt.Errorf("Could not parse uint64 from string (%s): %v", arg.Value, err)
- return
- }
- ibytes := make([]byte, 8)
- binary.BigEndian.PutUint64(ibytes, num)
- rawValue = ibytes
- case "addr", "address":
- addr, err := basics.UnmarshalChecksumAddress(arg.Value)
- if err != nil {
- parseErr = fmt.Errorf("Could not unmarshal checksummed address from string (%s): %v", arg.Value, err)
- return
- }
- rawValue = addr[:]
- case "b32", "base32", "byte base32":
- data, err := base32.StdEncoding.DecodeString(arg.Value)
- if err != nil {
- parseErr = fmt.Errorf("Could not decode base32-encoded string (%s): %v", arg.Value, err)
- return
- }
- rawValue = data
- case "b64", "base64", "byte base64":
- data, err := base64.StdEncoding.DecodeString(arg.Value)
+func translateBoxRefs(input []boxRef, foreignApps []uint64) []transactions.BoxRef {
+ output := make([]transactions.BoxRef, len(input))
+ for i, tbr := range input {
+ rawName, err := tbr.name.Raw()
if err != nil {
- parseErr = fmt.Errorf("Could not decode base64-encoded string (%s): %v", arg.Value, err)
- return
- }
- rawValue = data
- case "abi":
- typeAndValue := strings.SplitN(arg.Value, ":", 2)
- if len(typeAndValue) != 2 {
- parseErr = fmt.Errorf("Could not decode abi string (%s): should split abi-type and abi-value with colon", arg.Value)
- return
+ reportErrorf("Could not decode box name %s: %v", tbr.name, err)
}
- abiType, err := abi.TypeOf(typeAndValue[0])
- if err != nil {
- parseErr = fmt.Errorf("Could not decode abi type string (%s): %v", typeAndValue[0], err)
- return
+
+ index := uint64(0)
+ if tbr.appID != 0 {
+ found := false
+ for a, id := range foreignApps {
+ if tbr.appID == id {
+ index = uint64(a + 1)
+ found = true
+ break
+ }
+ }
+ // Check appIdx after the foreignApps check. If the user actually
+ // put the appIdx in foreignApps, and then used the appIdx here
+ // (rather than 0), then maybe they really want to use it in the
+ // transaction as the full number. Though it's hard to see why.
+ if !found && tbr.appID == appIdx {
+ index = 0
+ found = true
+ }
+ if !found {
+ reportErrorf("Box ref with appId (%d) not in foreign-apps", tbr.appID)
+ }
}
- value, err := abiType.UnmarshalFromJSON([]byte(typeAndValue[1]))
- if err != nil {
- parseErr = fmt.Errorf("Could not decode abi value string (%s):%v ", typeAndValue[1], err)
- return
+ output[i] = transactions.BoxRef{
+ Index: index,
+ Name: rawName,
}
- return abiType.Encode(value)
- default:
- parseErr = fmt.Errorf("Unknown encoding: %s", arg.Encoding)
}
- return
+ return output
}
-func parseAppInputs(inputs appCallInputs) (args [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) {
+func parseAppInputs(inputs appCallInputs) (args [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef) {
accounts = inputs.Accounts
foreignApps = inputs.ForeignApps
foreignAssets = inputs.ForeignAssets
+ boxes = translateBoxRefs(inputs.Boxes, foreignApps)
args = make([][]byte, len(inputs.Args))
for i, arg := range inputs.Args {
- rawValue, err := parseAppArg(arg)
+ rawValue, err := arg.Raw()
if err != nil {
reportErrorf("Could not decode input at index %d: %v", i, err)
}
@@ -303,7 +319,7 @@ func parseAppInputs(inputs appCallInputs) (args [][]byte, accounts []string, for
return
}
-func processAppInputFile() (args [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) {
+func processAppInputFile() (args [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef) {
var inputs appCallInputs
f, err := os.Open(appInputFilename)
if err != nil {
@@ -319,49 +335,31 @@ func processAppInputFile() (args [][]byte, accounts []string, foreignApps []uint
return parseAppInputs(inputs)
}
-// filterEmptyStrings filters out empty string parsed in by StringArrayVar
-// this function is added to support abi argument parsing
-// since parsing of `appArg` diverted from `StringSliceVar` to `StringArrayVar`
-func filterEmptyStrings(strSlice []string) []string {
- var newStrSlice []string
-
- for _, str := range strSlice {
- if len(str) > 0 {
- newStrSlice = append(newStrSlice, str)
- }
- }
- return newStrSlice
-}
-
-func getAppInputs() (args [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) {
- if (appArgs != nil || appStrAccounts != nil || foreignApps != nil) && appInputFilename != "" {
- reportErrorf("Cannot specify both command-line arguments/accounts and JSON input filename")
- }
+func getAppInputs() (args [][]byte, accounts []string, apps []uint64, assets []uint64, boxes []transactions.BoxRef) {
if appInputFilename != "" {
+ if appArgs != nil || appStrAccounts != nil || foreignApps != nil || foreignAssets != nil {
+ reportErrorf("Cannot specify both command-line arguments/resources and JSON input filename")
+ }
return processAppInputFile()
}
- var encodedArgs []appCallArg
-
- // we need to filter out empty strings from appArgs first, caused by change to `StringArrayVar`
- newAppArgs := filterEmptyStrings(appArgs)
+ // we need to ignore empty strings from appArgs because app-arg was
+ // previously a StringSliceVar, which also does that, and some test depend
+ // on it. appArgs became `StringArrayVar` in order to support abi arguments
+ // which contain commas.
- for _, arg := range newAppArgs {
- encodingValue := strings.SplitN(arg, ":", 2)
- if len(encodingValue) != 2 {
- reportErrorf("all arguments should be of the form 'encoding:value'")
- }
- encodedArg := appCallArg{
- Encoding: encodingValue[0],
- Value: encodingValue[1],
+ var encodedArgs []logic.AppCallBytes
+ for _, arg := range appArgs {
+ if len(arg) > 0 {
+ encodedArgs = append(encodedArgs, newAppCallBytes(arg))
}
- encodedArgs = append(encodedArgs, encodedArg)
}
inputs := appCallInputs{
Accounts: appStrAccounts,
- ForeignApps: getForeignApps(),
- ForeignAssets: getForeignAssets(),
+ ForeignApps: stringsToUint64(foreignApps),
+ ForeignAssets: stringsToUint64(foreignAssets),
+ Boxes: stringsToBoxRefs(appBoxes),
Args: encodedArgs,
}
@@ -450,14 +448,14 @@ var createAppCmd = &cobra.Command{
// Parse transaction parameters
approvalProg, clearProg := mustParseProgArgs()
onCompletionEnum := mustParseOnCompletion(onCompletion)
- appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgs, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
switch onCompletionEnum {
case transactions.CloseOutOC, transactions.ClearStateOC:
reportWarnf("'--on-completion %s' may be ill-formed for 'goal app create'", onCompletion)
}
- tx, err := client.MakeUnsignedAppCreateTx(onCompletionEnum, approvalProg, clearProg, globalSchema, localSchema, appArgs, appAccounts, foreignApps, foreignAssets, extraPages)
+ tx, err := client.MakeUnsignedAppCreateTx(onCompletionEnum, approvalProg, clearProg, globalSchema, localSchema, appArgs, appAccounts, foreignApps, foreignAssets, boxes, extraPages)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -530,9 +528,9 @@ var updateAppCmd = &cobra.Command{
// Parse transaction parameters
approvalProg, clearProg := mustParseProgArgs()
- appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgs, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
- tx, err := client.MakeUnsignedAppUpdateTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, approvalProg, clearProg)
+ tx, err := client.MakeUnsignedAppUpdateTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, boxes, approvalProg, clearProg)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -600,9 +598,9 @@ var optInAppCmd = &cobra.Command{
dataDir, client := getDataDirAndClient()
// Parse transaction parameters
- appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgs, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
- tx, err := client.MakeUnsignedAppOptInTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets)
+ tx, err := client.MakeUnsignedAppOptInTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, boxes)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -670,9 +668,9 @@ var closeOutAppCmd = &cobra.Command{
dataDir, client := getDataDirAndClient()
// Parse transaction parameters
- appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgs, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
- tx, err := client.MakeUnsignedAppCloseOutTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets)
+ tx, err := client.MakeUnsignedAppCloseOutTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, boxes)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -740,9 +738,9 @@ var clearAppCmd = &cobra.Command{
dataDir, client := getDataDirAndClient()
// Parse transaction parameters
- appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgs, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
- tx, err := client.MakeUnsignedAppClearStateTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets)
+ tx, err := client.MakeUnsignedAppClearStateTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, boxes)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -810,9 +808,9 @@ var callAppCmd = &cobra.Command{
dataDir, client := getDataDirAndClient()
// Parse transaction parameters
- appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgs, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
- tx, err := client.MakeUnsignedAppNoOpTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets)
+ tx, err := client.MakeUnsignedAppNoOpTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, boxes)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -880,9 +878,9 @@ var deleteAppCmd = &cobra.Command{
dataDir, client := getDataDirAndClient()
// Parse transaction parameters
- appArgs, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgs, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
- tx, err := client.MakeUnsignedAppDeleteTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets)
+ tx, err := client.MakeUnsignedAppDeleteTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, boxes)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
@@ -1254,7 +1252,7 @@ var methodAppCmd = &cobra.Command{
dataDir, client := getDataDirAndClient()
// Parse transaction parameters
- appArgsParsed, appAccounts, foreignApps, foreignAssets := getAppInputs()
+ appArgsParsed, appAccounts, foreignApps, foreignAssets, boxes := getAppInputs()
if len(appArgsParsed) > 0 {
reportErrorf("--arg and --app-arg are mutually exclusive, do not use --app-arg")
}
@@ -1371,7 +1369,7 @@ var methodAppCmd = &cobra.Command{
}
appCallTxn, err := client.MakeUnsignedApplicationCallTx(
- appIdx, applicationArgs, appAccounts, foreignApps, foreignAssets,
+ appIdx, applicationArgs, appAccounts, foreignApps, foreignAssets, boxes,
onCompletionEnum, approvalProg, clearProg, globalSchema, localSchema, extraPages)
if err != nil {
diff --git a/cmd/goal/box.go b/cmd/goal/box.go
new file mode 100644
index 000000000..850be9ae5
--- /dev/null
+++ b/cmd/goal/box.go
@@ -0,0 +1,118 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "bytes"
+ "strings"
+
+ "github.com/spf13/cobra"
+)
+
+var boxName string
+var maxBoxes uint64
+
+func init() {
+ appCmd.AddCommand(appBoxCmd)
+
+ appBoxCmd.AddCommand(appBoxInfoCmd)
+ appBoxCmd.AddCommand(appBoxListCmd)
+ appBoxCmd.PersistentFlags().Uint64Var(&appIdx, "app-id", 0, "Application ID")
+ appBoxCmd.MarkFlagRequired("app-id")
+
+ appBoxInfoCmd.Flags().StringVarP(&boxName, "name", "n", "", "Application box name. Use the same form as app-arg to name the box.")
+ appBoxInfoCmd.MarkFlagRequired("name")
+
+ appBoxListCmd.Flags().Uint64VarP(&maxBoxes, "max", "m", 0, "Maximum number of boxes to list. 0 means no limit.")
+}
+
+var appBoxCmd = &cobra.Command{
+ Use: "box",
+ Short: "Read application box data",
+ Args: cobra.ArbitraryArgs,
+ Run: func(cmd *cobra.Command, args []string) {
+ // If no arguments passed, we should fallback to help
+ cmd.HelpFunc()(cmd, args)
+ },
+}
+
+var appBoxInfoCmd = &cobra.Command{
+ Use: "info",
+ Short: "Retrieve information about an application box.",
+ Args: validateNoPosArgsFn,
+ Run: func(cmd *cobra.Command, args []string) {
+ _, client := getDataDirAndClient()
+
+ // Ensure box name is specified
+ if boxName == "" {
+ reportErrorf(errorMissingBoxName)
+ }
+
+ // Get box info
+ box, err := client.GetApplicationBoxByName(appIdx, boxName)
+ if err != nil {
+ if strings.Contains(err.Error(), `{"message":"box not found"}`) {
+ reportErrorf("No box found for appid %d with name %s", appIdx, boxName)
+ }
+ reportErrorf(errorRequestFail, err)
+ }
+
+ // Print inputted box name, but check that it matches found box name first
+ // This reduces confusion of potentially receiving a different box name representation
+ boxNameBytes, err := newAppCallBytes(boxName).Raw()
+ if err != nil {
+ reportErrorf(errorInvalidBoxName, boxName, err)
+ }
+ if !bytes.Equal(box.Name, boxNameBytes) {
+ reportErrorf(errorBoxNameMismatch, box.Name, boxNameBytes)
+ }
+ reportInfof("Name: %s", boxName)
+
+ // Print box value
+ reportInfof("Value: %s", encodeBytesAsAppCallBytes(box.Value))
+ },
+}
+
+var appBoxListCmd = &cobra.Command{
+ Use: "list",
+ Short: "List all application boxes belonging to an application",
+ Long: "List all application boxes belonging to an application.\n" +
+ "For printable strings, the box name is formatted as 'str:hello'\n" +
+ "For everything else, the box name is formatted as 'b64:A=='. ",
+ Args: validateNoPosArgsFn,
+ Run: func(cmd *cobra.Command, args []string) {
+ _, client := getDataDirAndClient()
+
+ // Get app boxes
+ boxesRes, err := client.ApplicationBoxes(appIdx, maxBoxes)
+ if err != nil {
+ reportErrorf(errorRequestFail, err)
+ }
+ boxes := boxesRes.Boxes
+
+ // Error if no boxes found
+ if len(boxes) == 0 {
+ reportErrorf("No boxes found for appid %d", appIdx)
+ }
+
+ // Print app boxes
+ for _, descriptor := range boxes {
+ encodedName := encodeBytesAsAppCallBytes(descriptor.Name)
+ reportInfof("%s", encodedName)
+ }
+ },
+}
diff --git a/cmd/goal/commands.go b/cmd/goal/commands.go
index 8fb4550c0..5440c04ec 100644
--- a/cmd/goal/commands.go
+++ b/cmd/goal/commands.go
@@ -17,6 +17,7 @@
package main
import (
+ "flag"
"fmt"
"io"
"os"
@@ -552,13 +553,23 @@ func reportErrorln(args ...interface{}) {
}
fmt.Fprintln(os.Stderr, line)
}
- os.Exit(1)
+ exit(1)
}
func reportErrorf(format string, args ...interface{}) {
reportErrorln(fmt.Sprintf(format, args...))
}
+func exit(code int) {
+ if flag.Lookup("test.v") == nil {
+ // normal run
+ os.Exit(code)
+ } else {
+ // testing run. panic, so we can require.Panic
+ panic(code)
+ }
+}
+
// writeFile is a wrapper of os.WriteFile which considers the special
// case of stdout filename
func writeFile(filename string, data []byte, perm os.FileMode) error {
diff --git a/cmd/goal/examples/boxes.teal b/cmd/goal/examples/boxes.teal
new file mode 100644
index 000000000..45e284d6b
--- /dev/null
+++ b/cmd/goal/examples/boxes.teal
@@ -0,0 +1,60 @@
+// WARNING: THIS IS NOT A PRODUCTION QUALITY PROGRAM - FOR TEST PURPOSES ONLY
+
+#pragma version 8
+ txn ApplicationID
+ bz end
+ txn ApplicationArgs 0 // [arg[0]] // fails if no args && app already exists
+ byte "create" // [arg[0], "create"] // create box named arg[1]
+ == // [arg[0]=?="create"]
+ bz del // "create" ? continue : goto del
+ int 24 // [24]
+ txn NumAppArgs // [24, NumAppArgs]
+ int 2 // [24, NumAppArgs, 2]
+ == // [24, NumAppArgs=?=2]
+ bnz default // WARNING: Assumes that when "create" provided, NumAppArgs >= 3
+ pop // get rid of 24 // NumAppArgs != 2
+ txn ApplicationArgs 2 // [arg[2]] // ERROR when NumAppArgs == 1
+ btoi // [btoi(arg[2])]
+default: // [24] // NumAppArgs >= 3
+ txn ApplicationArgs 1 // [24, arg[1]]
+ swap // [arg[1], 24]
+ box_create // [] // boxes: arg[1] -> [24]byte
+ assert
+ b end
+del: // delete box arg[1]
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "delete" // [arg[0], "delete"]
+ == // [arg[0]=?="delete"]
+ bz set // "delete" ? continue : goto set
+ txn ApplicationArgs 1 // [arg[1]]
+ box_del // del boxes[arg[1]]
+ assert
+ b end
+set: // put arg[1] at start of box arg[0] ... so actually a _partial_ "set"
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "set" // [arg[0], "set"]
+ == // [arg[0]=?="set"]
+ bz test // "set" ? continue : goto test
+ txn ApplicationArgs 1 // [arg[1]]
+ int 0 // [arg[1], 0]
+ txn ApplicationArgs 2 // [arg[1], 0, arg[2]]
+ box_replace // [] // boxes: arg[1] -> replace(boxes[arg[1]], 0, arg[2])
+ b end
+test: // fail unless arg[2] is the prefix of box arg[1]
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "check" // [arg[0], "check"]
+ == // [arg[0]=?="check"]
+ bz bad // "check" ? continue : goto bad
+ txn ApplicationArgs 1 // [arg[1]]
+ int 0 // [arg[1], 0]
+ txn ApplicationArgs 2 // [arg[1], 0, arg[2]]
+ len // [arg[1], 0, len(arg[2])]
+ box_extract // [ boxes[arg[1]][0:len(arg[2])] ]
+ txn ApplicationArgs 2 // [ boxes[arg[1]][0:len(arg[2])], arg[2] ]
+ == // [ boxes[arg[1]][0:len(arg[2])]=?=arg[2] ]
+ assert // boxes[arg[1]].startwith(arg[2]) ? pop : ERROR
+ b end
+bad: // arg[0] ∉ {"create", "delete", "set", "check"}
+ err
+end:
+ int 1
diff --git a/cmd/goal/examples/clear.teal b/cmd/goal/examples/clear.teal
new file mode 100644
index 000000000..2b67f1fa6
--- /dev/null
+++ b/cmd/goal/examples/clear.teal
@@ -0,0 +1,2 @@
+#pragma version 8
+int 1
diff --git a/cmd/goal/formatting.go b/cmd/goal/formatting.go
index 06bfcad85..ed4b3116e 100644
--- a/cmd/goal/formatting.go
+++ b/cmd/goal/formatting.go
@@ -17,6 +17,7 @@
package main
import (
+ "encoding/base64"
"unicode"
"unicode/utf8"
@@ -190,9 +191,19 @@ func heuristicFormatVal(val basics.TealValue) basics.TealValue {
}
func heuristicFormat(state map[string]basics.TealValue) map[string]basics.TealValue {
- result := make(map[string]basics.TealValue)
+ result := make(map[string]basics.TealValue, len(state))
for k, v := range state {
result[heuristicFormatKey(k)] = heuristicFormatVal(v)
}
return result
}
+
+// Encode bytes as an app call bytes string.
+// Will use `str:` if the string is printable, otherwise `b64:`.
+func encodeBytesAsAppCallBytes(value []byte) string {
+ if isPrintable, _ := unicodePrintable(string(value)); isPrintable {
+ return "str:" + string(value)
+ }
+
+ return "b64:" + base64.StdEncoding.EncodeToString(value)
+}
diff --git a/cmd/goal/formatting_test.go b/cmd/goal/formatting_test.go
index bc3bce670..9915597d7 100644
--- a/cmd/goal/formatting_test.go
+++ b/cmd/goal/formatting_test.go
@@ -42,3 +42,74 @@ func TestUnicodePrintable(t *testing.T) {
require.Equalf(t, testElement.printableString, printableString, "test string:%s", testElement.testString)
}
}
+
+func TestNewAppCallBytes(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ acb := newAppCallBytes("int:3")
+ require.Equal(t, "int", acb.Encoding)
+ require.Equal(t, "3", acb.Value)
+ _, err := acb.Raw()
+ require.NoError(t, err)
+
+ require.Panics(t, func() { newAppCallBytes("hello") })
+}
+
+func TestNewBoxRef(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ br := newBoxRef("str:hello")
+ require.EqualValues(t, 0, br.appID)
+ require.Equal(t, "str", br.name.Encoding)
+ require.Equal(t, "hello", br.name.Value)
+
+ require.Panics(t, func() { newBoxRef("1,hello") })
+ require.Panics(t, func() { newBoxRef("hello") })
+
+ br = newBoxRef("2,str:hello")
+ require.EqualValues(t, 2, br.appID)
+ require.Equal(t, "str", br.name.Encoding)
+ require.Equal(t, "hello", br.name.Value)
+}
+
+func TestStringsToBoxRefs(t *testing.T) {
+ brs := stringsToBoxRefs([]string{"77,str:hello", "55,int:6", "int:88"})
+ require.EqualValues(t, 77, brs[0].appID)
+ require.EqualValues(t, 55, brs[1].appID)
+ require.EqualValues(t, 0, brs[2].appID)
+
+ tbrs := translateBoxRefs(brs, []uint64{55, 77})
+ require.EqualValues(t, 2, tbrs[0].Index)
+ require.EqualValues(t, 1, tbrs[1].Index)
+ require.EqualValues(t, 0, tbrs[2].Index)
+
+ require.Panics(t, func() { translateBoxRefs(stringsToBoxRefs([]string{"addr:88"}), nil) })
+ translateBoxRefs(stringsToBoxRefs([]string{"addr:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ"}), nil)
+ // if we're here, that didn't panic/exit
+
+ tbrs = translateBoxRefs(brs, []uint64{77, 55})
+ require.EqualValues(t, 1, tbrs[0].Index)
+ require.EqualValues(t, 2, tbrs[1].Index)
+ require.EqualValues(t, 0, tbrs[2].Index)
+
+ require.Panics(t, func() { translateBoxRefs(brs, []uint64{55, 78}) })
+ require.Panics(t, func() { translateBoxRefs(brs, []uint64{51, 77}) })
+}
+
+func TestBytesToAppCallBytes(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ testCases := []struct {
+ input []byte
+ expected string
+ }{
+ {[]byte("unicode"), "str:unicode"},
+ {[]byte{1, 2, 3, 4}, "b64:AQIDBA=="},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.expected, func(t *testing.T) {
+ acb := encodeBytesAsAppCallBytes(tc.input)
+ require.Equal(t, tc.expected, acb)
+ })
+ }
+}
diff --git a/cmd/goal/interact.go b/cmd/goal/interact.go
index 45fb5bf8a..11255a4a3 100644
--- a/cmd/goal/interact.go
+++ b/cmd/goal/interact.go
@@ -513,7 +513,7 @@ var appExecuteCmd = &cobra.Command{
var inputs appCallInputs
for _, arg := range proc.Args {
- var callArg appCallArg
+ var callArg logic.AppCallBytes
callArg.Encoding = arg.Kind
if !procFlags.Changed(arg.Name) && arg.Default != "" {
@@ -565,7 +565,7 @@ var appExecuteCmd = &cobra.Command{
appArgs := make([][]byte, len(inputs.Args))
for i, arg := range inputs.Args {
- rawValue, err := parseAppArg(arg)
+ rawValue, err := arg.Raw()
if err != nil {
reportErrorf("Could not parse argument corresponding to '%s': %v", proc.Args[i].Name, err)
}
@@ -586,7 +586,7 @@ var appExecuteCmd = &cobra.Command{
localSchema = header.Query.Local.ToStateSchema()
globalSchema = header.Query.Global.ToStateSchema()
}
- tx, err := client.MakeUnsignedApplicationCallTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, onCompletion, approvalProg, clearProg, globalSchema, localSchema, 0)
+ tx, err := client.MakeUnsignedApplicationCallTx(appIdx, appArgs, appAccounts, foreignApps, foreignAssets, nil, onCompletion, approvalProg, clearProg, globalSchema, localSchema, 0)
if err != nil {
reportErrorf("Cannot create application txn: %v", err)
}
diff --git a/cmd/goal/messages.go b/cmd/goal/messages.go
index 35ec43efa..98f8e215d 100644
--- a/cmd/goal/messages.go
+++ b/cmd/goal/messages.go
@@ -100,6 +100,9 @@ const (
errorMarshalingState = "failed to encode state: %s"
errorApprovProgArgsRequired = "Exactly one of --approval-prog or --approval-prog-raw is required"
errorClearProgArgsRequired = "Exactly one of --clear-prog or --clear-prog-raw is required"
+ errorMissingBoxName = "Box --name is required"
+ errorInvalidBoxName = "Failed to parse box name %s. It must have the same form as app-arg. Error: %s"
+ errorBoxNameMismatch = "Inputted box name %s does not match box name %s received from algod"
// Clerk
infoTxIssued = "Sent %d MicroAlgos from account %s to address %s, transaction ID: %s. Fee set to %d"
diff --git a/cmd/netgoal/README.md b/cmd/netgoal/README.md
new file mode 100644
index 000000000..8b1ebc870
--- /dev/null
+++ b/cmd/netgoal/README.md
@@ -0,0 +1,52 @@
+# Netgoal
+
+## netgoal generate
+`--participation-host-machines (-N)` and `--npn-host-machines (-X)` are optional parameters and they default to `--participation-algod-nodes (-n)` and `--npn-algod-nodes (-x)` respectively, i.e. defaults to a machine per algod node.
+
+### Long-Form Flags Example
+- Wallets: The command below will generate 100 wallets for the 100 participation algod nodes. By default each npn gets one wallet each. If there are more wallets than nodes, it will get split across the participation algod nodes.
+- Relays: 8 Relays and 8 machines to host the relays will be generated
+- Participation Nodes: 100 particiipation algod nodes will be distributed across 20 host machines.
+- Non-Participation Nodes (NPNs): 10 non-participation algod nodes will be distributed across 5 host machines.
+
+```
+netgoal generate -t net -r /tmp/wat -o net.json --wallets 100 --relays 8 --participation-host-machines 20 --participation-algod-nodes 100 --npn-host-machines 5 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+```
+
+### Short-Form Flags Example
+The following will result in the same outcome as the command above.
+```
+netgoal generate -t net -r /tmp/wat -o net.json -w 100 -R 8 -N 20 -n 100 -X 5 -x 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+```
+## Flags
+```
+netgoal generate -h
+
+Usage:
+ netgoal generate [flags]
+
+Flags:
+ --bal stringArray Application Count
+ -h, --help help for generate
+ --naccounts uint Account count (default 31)
+ --napps uint Application Count (default 7)
+ --nassets uint Asset count (default 5)
+ --node-template string json for one node
+ --non-participating-node-template string json for non participating node
+ -x, --npn-algod-nodes int Total non-participation algod nodes to generate
+ -X, --npn-host-machines int Host machines to generate for non-participation algod nodes, default=npn-algod-nodes
+ --ntxns uint Transaction count (default 17)
+ -o, --outputfile string Output filename
+ -n, --participation-algod-nodes int Total participation algod nodes to generate (default -1)
+ -N, --participation-host-machines int Host machines to generate for participation algod nodes, default=participation-algod-nodes (default -1)
+ --relay-template string json for a relay node
+ -R, --relays int Relays to generate (default -1)
+ --rounds uint Number of rounds (default 13)
+ -t, --template string Template to generate
+ --wallet-name string Source wallet name
+ -w, --wallets int Wallets to generate (default -1)
+
+Global Flags:
+ -m, --modifier string Override Genesis Version Modifier (eg 'v1')
+ -r, --rootdir string Root directory for the private network directories
+```
diff --git a/cmd/netgoal/generate.go b/cmd/netgoal/generate.go
index 725b5e5cf..0eed602bf 100644
--- a/cmd/netgoal/generate.go
+++ b/cmd/netgoal/generate.go
@@ -37,10 +37,10 @@ import (
var outputFilename string
var templateToGenerate string
var relaysToGenerate int
-var nodesToGenerate int
-var nodeHostsToGenerate int
-var nonPartnodesToGenerate int
-var nonPartnodesHostsToGenerate int
+var participationAlgodNodes int
+var participationHostMachines int
+var npnAlgodNodes int
+var npnHostMachines int
var walletsToGenerate int
var nodeTemplatePath string
var nonParticipatingNodeTemplatePath string
@@ -63,10 +63,10 @@ func init() {
generateCmd.Flags().IntVarP(&walletsToGenerate, "wallets", "w", -1, "Wallets to generate")
generateCmd.Flags().IntVarP(&relaysToGenerate, "relays", "R", -1, "Relays to generate")
- generateCmd.Flags().IntVarP(&nodeHostsToGenerate, "node-hosts", "N", -1, "Node-hosts to generate, default=nodes")
- generateCmd.Flags().IntVarP(&nodesToGenerate, "nodes", "n", -1, "Nodes to generate")
- generateCmd.Flags().IntVarP(&nonPartnodesToGenerate, "non-participating-nodes", "X", 0, "Non participating nodes to generate")
- generateCmd.Flags().IntVarP(&nonPartnodesHostsToGenerate, "non-participating-nodes-hosts", "H", 0, "Non participating nodes hosts to generate")
+ generateCmd.Flags().IntVarP(&participationAlgodNodes, "participation-algod-nodes", "n", -1, "Total participation algod nodes to generate")
+ generateCmd.Flags().IntVarP(&participationHostMachines, "participation-host-machines", "N", -1, "Host machines to generate for participation algod nodes, default=participation-algod-nodes")
+ generateCmd.Flags().IntVarP(&npnAlgodNodes, "npn-algod-nodes", "x", 0, "Total non-participation algod nodes to generate")
+ generateCmd.Flags().IntVarP(&npnHostMachines, "npn-host-machines", "X", 0, "Host machines to generate for non-participation algod nodes, default=npn-algod-nodes")
generateCmd.Flags().StringVarP(&nodeTemplatePath, "node-template", "", "", "json for one node")
generateCmd.Flags().StringVarP(&nonParticipatingNodeTemplatePath, "non-participating-node-template", "", "", "json for non participating node")
generateCmd.Flags().StringVarP(&relayTemplatePath, "relay-template", "", "", "json for a relay node")
@@ -149,24 +149,27 @@ template modes for -t:`,
if walletsToGenerate < 0 {
reportErrorf("must specify number of wallets with -w")
}
- err = generateWalletGenesis(outputFilename, walletsToGenerate, nonPartnodesToGenerate)
+ err = generateWalletGenesis(outputFilename, walletsToGenerate, npnAlgodNodes)
case "net", "network", "goalnet":
if walletsToGenerate < 0 {
reportErrorf("must specify number of wallets with -w")
}
- if nodesToGenerate < 0 {
+ if participationAlgodNodes < 0 {
reportErrorf("must specify number of nodes with -n")
}
- if nodeHostsToGenerate < 0 {
- nodeHostsToGenerate = nodesToGenerate
+ if participationHostMachines < 0 {
+ participationHostMachines = participationAlgodNodes
+ }
+ if (npnAlgodNodes >= 0) && (npnHostMachines == 0) {
+ npnHostMachines = npnAlgodNodes
}
if relaysToGenerate < 0 {
reportErrorf("must specify number of relays with -R")
}
if templateType == "goalnet" {
- err = generateNetworkGoalTemplate(outputFilename, walletsToGenerate, relaysToGenerate, nodesToGenerate, nonPartnodesHostsToGenerate)
+ err = generateNetworkGoalTemplate(outputFilename, walletsToGenerate, relaysToGenerate, participationAlgodNodes, npnAlgodNodes)
} else {
- err = generateNetworkTemplate(outputFilename, walletsToGenerate, relaysToGenerate, nodeHostsToGenerate, nodesToGenerate, nonPartnodesHostsToGenerate, nonPartnodesToGenerate, baseNode, baseNonParticipatingNode, baseRelay)
+ err = generateNetworkTemplate(outputFilename, walletsToGenerate, relaysToGenerate, participationHostMachines, participationAlgodNodes, npnHostMachines, npnAlgodNodes, baseNode, baseNonParticipatingNode, baseRelay)
}
case "otwt":
err = generateNetworkTemplate(outputFilename, 1000, 10, 20, 100, 0, 0, baseNode, baseNonParticipatingNode, baseRelay)
@@ -234,9 +237,9 @@ func pickNodeConfig(alt []remote.NodeConfig, name string) remote.NodeConfig {
return alt[0]
}
-func generateNetworkGoalTemplate(templateFilename string, wallets, relays, nodes, npnHosts int) error {
+func generateNetworkGoalTemplate(templateFilename string, wallets, relays, nodes, npnNodes int) error {
template := netdeploy.NetworkTemplate{}
- template.Nodes = make([]remote.NodeConfigGoal, 0, relays+nodes+npnHosts)
+ template.Nodes = make([]remote.NodeConfigGoal, 0, relays+nodes+npnNodes)
template.Genesis = generateWalletGenesisData(walletsToGenerate, 0)
for i := 0; i < relays; i++ {
name := "relay" + strconv.Itoa(i+1)
@@ -257,7 +260,7 @@ func generateNetworkGoalTemplate(templateFilename string, wallets, relays, nodes
template.Nodes = append(template.Nodes, newNode)
}
- for i := 0; i < npnHosts; i++ {
+ for i := 0; i < npnNodes; i++ {
name := "nonParticipatingNode" + strconv.Itoa(i+1)
newNode := remote.NodeConfigGoal{
Name: name,
@@ -286,8 +289,8 @@ func generateNetworkGoalTemplate(templateFilename string, wallets, relays, nodes
}
}
- if npnHosts > 0 {
- for walletIndex < npnHosts {
+ if npnNodes > 0 {
+ for walletIndex < npnNodes {
for nodei, node := range template.Nodes {
if node.Name[0:4] != "nonP" {
continue
@@ -298,11 +301,11 @@ func generateNetworkGoalTemplate(templateFilename string, wallets, relays, nodes
}
template.Nodes[nodei].Wallets = append(template.Nodes[nodei].Wallets, wallet)
walletIndex++
- if walletIndex >= npnHosts {
+ if walletIndex >= npnNodes {
break
}
}
- if walletIndex >= npnHosts {
+ if walletIndex >= npnNodes {
break
}
}
@@ -478,18 +481,18 @@ func saveGoalTemplateToDisk(template netdeploy.NetworkTemplate, filename string)
return err
}
-func generateWalletGenesisData(wallets, npnHosts int) gen.GenesisData {
+func generateWalletGenesisData(wallets, npnNodes int) gen.GenesisData {
ratZero := big.NewRat(int64(0), int64(1))
ratHundred := big.NewRat(int64(100), int64(1))
data := gen.DefaultGenesis
- totalWallets := wallets + npnHosts
+ totalWallets := wallets + npnNodes
data.Wallets = make([]gen.WalletData, totalWallets)
participatingNodeStake := big.NewRat(int64(100), int64(wallets))
nonParticipatingNodeStake := ratZero
- if npnHosts > 0 {
+ if npnNodes > 0 {
// split participating an non participating stake evenly
participatingNodeStake = big.NewRat(int64(50), int64(wallets))
- nonParticipatingNodeStake = big.NewRat(int64(50), int64(npnHosts))
+ nonParticipatingNodeStake = big.NewRat(int64(50), int64(npnNodes))
}
stake := ratZero
@@ -519,8 +522,8 @@ func generateWalletGenesisData(wallets, npnHosts int) gen.GenesisData {
return data
}
-func generateWalletGenesis(filename string, wallets, npnHosts int) error {
- data := generateWalletGenesisData(wallets, npnHosts)
+func generateWalletGenesis(filename string, wallets, npnNodes int) error {
+ data := generateWalletGenesisData(wallets, npnNodes)
return saveGenesisDataToDisk(data, filename)
}
diff --git a/cmd/opdoc/tmLanguage.go b/cmd/opdoc/tmLanguage.go
index 7c193d01e..6501babc0 100644
--- a/cmd/opdoc/tmLanguage.go
+++ b/cmd/opdoc/tmLanguage.go
@@ -169,6 +169,7 @@ func buildSyntaxHighlight() *tmLanguage {
},
},
}
+ var allAccess []string
var allArithmetics []string
var keys []string
@@ -192,11 +193,8 @@ func buildSyntaxHighlight() *tmLanguage {
Name: "keyword.other.teal",
Match: fmt.Sprintf("^(%s)\\b", strings.Join(loading, "|")),
})
- case "State Access":
- keywords.Patterns = append(keywords.Patterns, pattern{
- Name: "keyword.other.unit.teal",
- Match: fmt.Sprintf("^(%s)\\b", strings.Join(names, "|")),
- })
+ case "State Access", "Box Access":
+ allAccess = append(allAccess, names...)
// For these, accumulate into allArithmetics,
// and only add to keyword.Patterns later, when all
// have been collected.
@@ -231,6 +229,10 @@ func buildSyntaxHighlight() *tmLanguage {
}
}
keywords.Patterns = append(keywords.Patterns, pattern{
+ Name: "keyword.other.unit.teal",
+ Match: fmt.Sprintf("^(%s)\\b", strings.Join(allAccess, "|")),
+ })
+ keywords.Patterns = append(keywords.Patterns, pattern{
Name: "keyword.operator.teal",
Match: fmt.Sprintf("^(%s)\\b", strings.Join(allArithmetics, "|")),
})
diff --git a/cmd/pingpong/README.md b/cmd/pingpong/README.md
new file mode 100644
index 000000000..738e57d49
--- /dev/null
+++ b/cmd/pingpong/README.md
@@ -0,0 +1,8 @@
+# PingPong usage
+
+Example:
+`pingpong run -d {node data directory} --numapp 10 --numboxread 4 --tps 200 --refresh 1800 --numaccounts 500 --duration 120`
+
+Note: if you don't set the `--duration` parameter the test will continue running until it's stopped externally.
+
+`pingpong run -h` will describe each CLI parameter. \ No newline at end of file
diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go
index d49cea843..4e516423c 100644
--- a/cmd/pingpong/runCmd.go
+++ b/cmd/pingpong/runCmd.go
@@ -59,6 +59,20 @@ var teal string
var groupSize uint32
var numAsset uint32
var numApp uint32
+
+/*
+Note on box workloads:
+
+two different box workloads are supported in order to exercise different
+portions of the performance critical codepath while keeping the app programs
+relatively simple. The BoxUpdate workload updates the content of the boxes
+during every app call, to verify that box manipulation is performant. The BoxRead
+workload only reads the box contents, which requires every box read to work its
+way through the in memory state deltas, into the box cache, and potentially all the
+way to the database.
+*/
+var numBoxUpdate uint32
+var numBoxRead uint32
var numAppOptIn uint32
var appProgOps uint32
var appProgHashes uint32
@@ -105,6 +119,8 @@ func init() {
runCmd.Flags().Uint32Var(&groupSize, "groupsize", 1, "The number of transactions in each group")
runCmd.Flags().Uint32Var(&numAsset, "numasset", 0, "The number of assets each account holds")
runCmd.Flags().Uint32Var(&numApp, "numapp", 0, "The total number of apps to create")
+ runCmd.Flags().Uint32Var(&numBoxUpdate, "numboxupdate", 0, "The total number of boxes each app holds, where boxes are updated each app call. Only one of numboxupdate and numboxread can be set")
+ runCmd.Flags().Uint32Var(&numBoxRead, "numboxread", 0, "The total number of boxes each app holds, where boxes are only read each app call. Only one of numboxupdate and numboxread can be set.")
runCmd.Flags().Uint32Var(&numAppOptIn, "numappoptin", 0, "The number of apps each account opts in to")
runCmd.Flags().Uint32Var(&appProgOps, "appprogops", 0, "The approximate number of TEAL operations to perform in each ApplicationCall transaction")
runCmd.Flags().Uint32Var(&appProgHashes, "appproghashes", 0, "The number of hashes to include in the Application")
@@ -360,6 +376,32 @@ var runCmd = &cobra.Command{
cfg.AppLocalKeys = appProgLocalKeys
}
+ // verify and set numBoxUpdate
+ if numBoxUpdate != 0 && numApp == 0 {
+ reportErrorf("If number of boxes is nonzero than number of apps must also be nonzero")
+ }
+
+ if numBoxUpdate <= 8 {
+ cfg.NumBoxUpdate = numBoxUpdate
+ } else {
+ reportErrorf("Invalid number of boxes: %d, (valid number: 0 - 8)\n", numBoxUpdate)
+ }
+
+ // verify and set numBoxRead
+ if numBoxRead != 0 && numApp == 0 {
+ reportErrorf("If number of boxes is nonzero than number of apps must also be nonzero")
+ }
+
+ if numBoxRead != 0 && numBoxUpdate != 0 {
+ reportErrorf("Only one of numboxread or numboxupdate can be nonzero")
+ }
+
+ if numBoxRead <= 8 {
+ cfg.NumBoxRead = numBoxRead
+ } else {
+ reportErrorf("Invalid number of boxes: %d, (valid number: 0 - 8)\n", numBoxRead)
+ }
+
if rekey {
cfg.Rekey = rekey
if !cfg.RandomLease && !cfg.RandomNote && !cfg.RandomizeFee && !cfg.RandomizeAmt {
diff --git a/cmd/tealdbg/localLedger.go b/cmd/tealdbg/localLedger.go
index c0a6cd723..030dcd853 100644
--- a/cmd/tealdbg/localLedger.go
+++ b/cmd/tealdbg/localLedger.go
@@ -321,6 +321,10 @@ func (l *localLedger) LookupApplication(rnd basics.Round, addr basics.Address, a
return result, nil
}
+func (l *localLedger) LookupKv(rnd basics.Round, name string) ([]byte, error) {
+ return nil, fmt.Errorf("boxes not implemented in debugger")
+}
+
func (l *localLedger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, error) {
ad := l.balances[addr]
// Clear RewardsBase since tealdbg has no idea about rewards level so the underlying calculation with reward will fail.
diff --git a/components/mocks/mockCatchpointCatchupAccessor.go b/components/mocks/mockCatchpointCatchupAccessor.go
index c92113d70..a9ecb02d4 100644
--- a/components/mocks/mockCatchpointCatchupAccessor.go
+++ b/components/mocks/mockCatchpointCatchupAccessor.go
@@ -52,8 +52,8 @@ func (m *MockCatchpointCatchupAccessor) ResetStagingBalances(ctx context.Context
return nil
}
-// ProgressStagingBalances deserialize the given bytes as a temporary staging balances
-func (m *MockCatchpointCatchupAccessor) ProgressStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *ledger.CatchpointCatchupAccessorProgress) (err error) {
+// ProcessStagingBalances deserialize the given bytes as a temporary staging balances
+func (m *MockCatchpointCatchupAccessor) ProcessStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *ledger.CatchpointCatchupAccessorProgress) (err error) {
return nil
}
diff --git a/config/config_test.go b/config/config_test.go
index 1e1915faa..4434cc3ae 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -302,7 +302,7 @@ func TestConfigMigrateFromDisk(t *testing.T) {
a.NoError(err)
modified, err := migrate(c)
a.NoError(err)
- a.Equal(defaultLocal, modified)
+ a.Equal(defaultLocal, modified, "config-v%d.json", configVersion)
}
cNext := Local{Version: getLatestConfigVersion() + 1}
@@ -486,7 +486,7 @@ func TestLocalStructTags(t *testing.T) {
localType := reflect.TypeOf(Local{})
versionField, ok := localType.FieldByName("Version")
- require.True(t, true, ok)
+ require.True(t, ok)
ver := 0
versionTags := []string{}
for {
@@ -503,7 +503,7 @@ func TestLocalStructTags(t *testing.T) {
if field.Tag == "" {
require.Failf(t, "Field is missing versioning information", "Field Name: %s", field.Name)
}
- // the field named "Version" is tested separatly in TestLocalVersionField, so we'll be skipping
+ // the field named "Version" is tested separately in TestLocalVersionField, so we'll be skipping
// it on this test.
if field.Name == "Version" {
continue
diff --git a/config/consensus.go b/config/consensus.go
index 71b54daa7..dfba6c372 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -271,7 +271,7 @@ type ConsensusParams struct {
// be read in the transaction
MaxAppTxnForeignAssets int
- // maximum number of "foreign references" (accounts, asa, app)
+ // maximum number of "foreign references" (accounts, asa, app, boxes)
// that can be attached to a single app call.
MaxAppTotalTxnReferences int
@@ -331,6 +331,26 @@ type ConsensusParams struct {
// []byte values stored in LocalState or GlobalState key/value stores
SchemaBytesMinBalance uint64
+ // Maximum length of a box (Does not include name/key length. That is capped by MaxAppKeyLen)
+ MaxBoxSize uint64
+
+ // Minimum Balance Requirement (MBR) per box created (this accounts for a
+ // bit of overhead used to store the box bytes)
+ BoxFlatMinBalance uint64
+
+ // MBR per byte of box storage. MBR is incremented by BoxByteMinBalance * (len(name)+len(value))
+ BoxByteMinBalance uint64
+
+ // Number of box references allowed
+ MaxAppBoxReferences int
+
+ // Amount added to a txgroup's box I/O budget per box ref supplied.
+ // For reads: the sum of the sizes of all boxes in the group must be less than I/O budget
+ // For writes: the sum of the sizes of all boxes created or written must be less than I/O budget
+ // In both cases, what matters is the sizes of the boxes touched, not the
+ // number of times they are touched, or the size of the touches.
+ BytesPerBoxReference uint64
+
// maximum number of total key/value pairs allowed by a given
// LocalStateSchema (and therefore allowed in LocalState)
MaxLocalSchemaEntries uint64
@@ -1208,12 +1228,27 @@ func initConsensusProtocols() {
v33.ApprovedUpgrades[protocol.ConsensusV35] = 10000
v34.ApprovedUpgrades[protocol.ConsensusV35] = 10000
+ v36 := v35
+ v36.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+
+ // Boxes (unlimited global storage)
+ v36.LogicSigVersion = 8
+ v36.MaxBoxSize = 32768
+ v36.BoxFlatMinBalance = 2500
+ v36.BoxByteMinBalance = 400
+ v36.MaxAppBoxReferences = 8
+ v36.BytesPerBoxReference = 1024
+
+ Consensus[protocol.ConsensusV36] = v36
+
+ v35.ApprovedUpgrades[protocol.ConsensusV36] = 140000
+
// ConsensusFuture is used to test features that are implemented
// but not yet released in a production protocol version.
- vFuture := v35
+ vFuture := v36
vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- vFuture.LogicSigVersion = 8 // When moving this to a release, put a new higher LogicSigVersion here
+ vFuture.LogicSigVersion = 9 // When moving this to a release, put a new higher LogicSigVersion here
Consensus[protocol.ConsensusFuture] = vFuture
diff --git a/config/localTemplate.go b/config/localTemplate.go
index ed6eb4493..ba9d97db7 100644
--- a/config/localTemplate.go
+++ b/config/localTemplate.go
@@ -41,7 +41,7 @@ type Local struct {
// Version tracks the current version of the defaults so we can migrate old -> new
// This is specifically important whenever we decide to change the default value
// for an existing parameter. This field tag must be updated any time we add a new version.
- Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24"`
+ Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24" version[25]:"25"`
// environmental (may be overridden)
// When enabled, stores blocks indefinitely, otherwise, only the most recent blocks
@@ -452,6 +452,14 @@ type Local struct {
// MaxAcctLookback sets the maximum lookback range for account states,
// i.e. the ledger can answer account states questions for the range Latest-MaxAcctLookback...Latest
MaxAcctLookback uint64 `version[23]:"4"`
+
+ // EnableUsageLog enables 10Hz log of CPU and RAM usage.
+ // Also adds 'algod_ram_usage` (number of bytes in use) to /metrics
+ EnableUsageLog bool `version[24]:"false"`
+
+ // MaxAPIBoxPerApplication defines the maximum total number of boxes per application that will be returned
+ // in GetApplicationBoxes REST API responses.
+ MaxAPIBoxPerApplication uint64 `version[25]:"100000"`
}
// DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers
diff --git a/config/local_defaults.go b/config/local_defaults.go
index 2aa46eef1..487149da9 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -20,7 +20,7 @@
package config
var defaultLocal = Local{
- Version: 24,
+ Version: 25,
AccountUpdatesStatsInterval: 5000000000,
AccountsRebuildSynchronousMode: 1,
AgreementIncomingBundlesQueueLength: 7,
@@ -70,6 +70,7 @@ var defaultLocal = Local{
EnableRequestLogger: false,
EnableRuntimeMetrics: false,
EnableTopAccountsReporting: false,
+ EnableUsageLog: false,
EnableVerbosedTransactionSyncLogging: false,
EndpointAddress: "127.0.0.1:0",
FallbackDNSResolverAddress: "",
@@ -84,6 +85,7 @@ var defaultLocal = Local{
LogArchiveMaxAge: "",
LogArchiveName: "node.archive.log",
LogSizeLimit: 1073741824,
+ MaxAPIBoxPerApplication: 100000,
MaxAPIResourcesPerAccount: 100000,
MaxAcctLookback: 4,
MaxCatchpointDownloadDuration: 7200000000000,
diff --git a/config/version.go b/config/version.go
index 0927cc14b..d35a252ad 100644
--- a/config/version.go
+++ b/config/version.go
@@ -33,7 +33,7 @@ const VersionMajor = 3
// VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced.
// Not enforced until after initial public release (x > 0).
-const VersionMinor = 11
+const VersionMinor = 12
// Version is the type holding our full version information.
type Version struct {
diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json
index 6744d6149..b2ae1f949 100644
--- a/daemon/algod/api/algod.oas2.json
+++ b/daemon/algod/api/algod.oas2.json
@@ -1499,6 +1499,145 @@
}
]
},
+ "/v2/applications/{application-id}/boxes": {
+ "get": {
+ "description": "Given an application ID, return all Box names. No particular ordering is guaranteed. Request fails when client or server-side configured limits prevent returning all Box names.",
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Get all box names for a given application.",
+ "operationId": "GetApplicationBoxes",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "An application identifier",
+ "name": "application-id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "integer",
+ "description": "Max number of box names to return. If max is not set, or max == 0, returns all box-names.",
+ "name": "max",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "$ref": "#/responses/BoxesResponse"
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ },
+ "parameters": [
+ {
+ "type": "integer",
+ "name": "application-id",
+ "in": "path",
+ "required": true
+ }
+ ]
+ },
+ "/v2/applications/{application-id}/box": {
+ "get": {
+ "description": "Given an application ID and box name, it returns the box name and value (each base64 encoded). Box names must be in the goal app call arg encoding form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.",
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Get box information for a given application.",
+ "operationId": "GetApplicationBoxByName",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "An application identifier",
+ "name": "application-id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "A box name, in the goal app call arg form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.",
+ "name": "name",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "$ref": "#/responses/BoxResponse"
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Box Not Found",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ },
+ "parameters": [
+ {
+ "type": "integer",
+ "name": "application-id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "name": "name",
+ "in": "query",
+ "required": true
+ }
+ ]
+ },
"/v2/assets/{asset-id}": {
"get": {
"description": "Given a asset ID, it returns asset information including creator, name, total supply and special addresses.",
@@ -1934,6 +2073,14 @@
"description": "The count of all assets (AssetParams objects) created by this account.",
"type": "integer"
},
+ "total-boxes": {
+ "description": "\\[tbx\\] The number of existing boxes created by this account's app.",
+ "type": "integer"
+ },
+ "total-box-bytes": {
+ "description": "\\[tbxb\\] The total number of bytes used by this account's app's box keys and values.",
+ "type": "integer"
+ },
"participation": {
"$ref": "#/definitions/AccountParticipation"
},
@@ -2596,6 +2743,40 @@
}
}
},
+ "Box": {
+ "description": "Box name and its content.",
+ "type": "object",
+ "required": [
+ "name",
+ "value"
+ ],
+ "properties": {
+ "name": {
+ "description": "\\[name\\] box name, base64 encoded",
+ "type": "string",
+ "format": "byte"
+ },
+ "value": {
+ "description": "\\[value\\] box value, base64 encoded.",
+ "type": "string",
+ "format": "byte"
+ }
+ }
+ },
+ "BoxDescriptor": {
+ "description": "Box descriptor describes a Box.",
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "name": {
+ "description": "Base64 encoded box name",
+ "type": "string",
+ "format": "byte"
+ }
+ }
+ },
"Version": {
"description": "algod version information.",
"type": "object",
@@ -3393,6 +3574,29 @@
"$ref": "#/definitions/Application"
}
},
+ "BoxesResponse": {
+ "description": "Box names of an application",
+ "schema": {
+ "type": "object",
+ "required": [
+ "boxes"
+ ],
+ "properties": {
+ "boxes": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/BoxDescriptor"
+ }
+ }
+ }
+ }
+ },
+ "BoxResponse": {
+ "description": "Box information",
+ "schema": {
+ "$ref": "#/definitions/Box"
+ }
+ },
"AssetResponse": {
"description": "Asset information",
"schema": {
diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml
index 0a15f51bb..f1c6da8ce 100644
--- a/daemon/algod/api/algod.oas3.yml
+++ b/daemon/algod/api/algod.oas3.yml
@@ -350,6 +350,37 @@
},
"description": "Encoded block object."
},
+ "BoxResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/Box"
+ }
+ }
+ },
+ "description": "Box information"
+ },
+ "BoxesResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "boxes": {
+ "items": {
+ "$ref": "#/components/schemas/BoxDescriptor"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "boxes"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Box names of an application"
+ },
"CatchpointAbortResponse": {
"content": {
"application/json": {
@@ -895,6 +926,14 @@
"description": "The count of all assets that have been opted in, equivalent to the count of AssetHolding objects held by this account.",
"type": "integer"
},
+ "total-box-bytes": {
+ "description": "\\[tbxb\\] The total number of bytes used by this account's app's box keys and values.",
+ "type": "integer"
+ },
+ "total-boxes": {
+ "description": "\\[tbx\\] The number of existing boxes created by this account's app.",
+ "type": "integer"
+ },
"total-created-apps": {
"description": "The count of all apps (AppParams objects) created by this account.",
"type": "integer"
@@ -1201,6 +1240,43 @@
],
"type": "object"
},
+ "Box": {
+ "description": "Box name and its content.",
+ "properties": {
+ "name": {
+ "description": "\\[name\\] box name, base64 encoded",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ },
+ "value": {
+ "description": "\\[value\\] box value, base64 encoded.",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ }
+ },
+ "required": [
+ "name",
+ "value"
+ ],
+ "type": "object"
+ },
+ "BoxDescriptor": {
+ "description": "Box descriptor describes a Box.",
+ "properties": {
+ "name": {
+ "description": "Base64 encoded box name",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ }
+ },
+ "required": [
+ "name"
+ ],
+ "type": "object"
+ },
"BuildVersion": {
"properties": {
"branch": {
@@ -2501,6 +2577,172 @@
"summary": "Get application information."
}
},
+ "/v2/applications/{application-id}/box": {
+ "get": {
+ "description": "Given an application ID and box name, it returns the box name and value (each base64 encoded). Box names must be in the goal app call arg encoding form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.",
+ "operationId": "GetApplicationBoxByName",
+ "parameters": [
+ {
+ "description": "An application identifier",
+ "in": "path",
+ "name": "application-id",
+ "required": true,
+ "schema": {
+ "type": "integer"
+ }
+ },
+ {
+ "description": "A box name, in the goal app call arg form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.",
+ "in": "query",
+ "name": "name",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/Box"
+ }
+ }
+ },
+ "description": "Box information"
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request"
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "404": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Box Not Found"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Get box information for a given application."
+ }
+ },
+ "/v2/applications/{application-id}/boxes": {
+ "get": {
+ "description": "Given an application ID, return all Box names. No particular ordering is guaranteed. Request fails when client or server-side configured limits prevent returning all Box names.",
+ "operationId": "GetApplicationBoxes",
+ "parameters": [
+ {
+ "description": "An application identifier",
+ "in": "path",
+ "name": "application-id",
+ "required": true,
+ "schema": {
+ "type": "integer"
+ }
+ },
+ {
+ "description": "Max number of box names to return. If max is not set, or max == 0, returns all box-names.",
+ "in": "query",
+ "name": "max",
+ "schema": {
+ "type": "integer"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "boxes": {
+ "items": {
+ "$ref": "#/components/schemas/BoxDescriptor"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "boxes"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Box names of an application"
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request"
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Get all box names for a given application."
+ }
+ },
"/v2/assets/{asset-id}": {
"get": {
"description": "Given a asset ID, it returns asset information including creator, name, total supply and special addresses.",
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index 2bece32a6..a68017787 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -374,6 +374,26 @@ func (client RestClient) AccountInformation(address string) (response v1.Account
return
}
+type applicationBoxesParams struct {
+ Max uint64 `url:"max,omitempty"`
+}
+
+// ApplicationBoxes gets the BoxesResponse associated with the passed application ID
+func (client RestClient) ApplicationBoxes(appID uint64, maxBoxNum uint64) (response generatedV2.BoxesResponse, err error) {
+ err = client.get(&response, fmt.Sprintf("/v2/applications/%d/boxes", appID), applicationBoxesParams{maxBoxNum})
+ return
+}
+
+type applicationBoxByNameParams struct {
+ Name string `url:"name"`
+}
+
+// GetApplicationBoxByName gets the BoxResponse associated with the passed application ID and box name
+func (client RestClient) GetApplicationBoxByName(appID uint64, name string) (response generatedV2.BoxResponse, err error) {
+ err = client.get(&response, fmt.Sprintf("/v2/applications/%d/box", appID), applicationBoxByNameParams{name})
+ return
+}
+
// AccountInformationV2 gets the AccountData associated with the passed address
func (client RestClient) AccountInformationV2(address string, includeCreatables bool) (response generatedV2.Account, err error) {
var infoParams accountInformationParams
diff --git a/daemon/algod/api/server/common/handlers.go b/daemon/algod/api/server/common/handlers.go
index c0d2f43a9..e32cb2645 100644
--- a/daemon/algod/api/server/common/handlers.go
+++ b/daemon/algod/api/server/common/handlers.go
@@ -23,6 +23,7 @@ import (
"github.com/labstack/echo/v4"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/daemon/algod/api"
"github.com/algorand/go-algorand/daemon/algod/api/server/lib"
"github.com/algorand/go-algorand/daemon/algod/api/spec/common"
)
@@ -66,7 +67,7 @@ func SwaggerJSON(ctx lib.ReqContext, context echo.Context) {
w := context.Response().Writer
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
- w.Write([]byte(lib.SwaggerSpecJSON))
+ _, _ = w.Write([]byte(api.SwaggerSpecJSONEmbed))
}
// HealthCheck is an httpHandler for route GET /health
diff --git a/daemon/algod/api/server/v2/account.go b/daemon/algod/api/server/v2/account.go
index 31725f13a..cfe4bdf61 100644
--- a/daemon/algod/api/server/v2/account.go
+++ b/daemon/algod/api/server/v2/account.go
@@ -136,6 +136,8 @@ func AccountDataToAccount(
TotalAppsOptedIn: uint64(len(appsLocalState)),
AppsTotalSchema: &totalAppSchema,
AppsTotalExtraPages: numOrNil(totalExtraPages),
+ TotalBoxes: numOrNil(record.TotalBoxes),
+ TotalBoxBytes: numOrNil(record.TotalBoxBytes),
MinBalance: minBalance.Raw,
}, nil
}
@@ -330,6 +332,16 @@ func AccountToAccountData(a *generated.Account) (basics.AccountData, error) {
totalExtraPages = uint32(*a.AppsTotalExtraPages)
}
+ var totalBoxes uint64
+ if a.TotalBoxes != nil {
+ totalBoxes = *a.TotalBoxes
+ }
+
+ var totalBoxBytes uint64
+ if a.TotalBoxBytes != nil {
+ totalBoxBytes = *a.TotalBoxBytes
+ }
+
status, err := basics.UnmarshalStatus(a.Status)
if err != nil {
return basics.AccountData{}, err
@@ -350,6 +362,8 @@ func AccountToAccountData(a *generated.Account) (basics.AccountData, error) {
AppParams: appParams,
TotalAppSchema: totalSchema,
TotalExtraAppPages: totalExtraPages,
+ TotalBoxes: totalBoxes,
+ TotalBoxBytes: totalBoxBytes,
}
if a.AuthAddr != nil {
diff --git a/daemon/algod/api/server/v2/dryrun.go b/daemon/algod/api/server/v2/dryrun.go
index 3079a71aa..c827c2857 100644
--- a/daemon/algod/api/server/v2/dryrun.go
+++ b/daemon/algod/api/server/v2/dryrun.go
@@ -330,6 +330,10 @@ func (dl *dryrunLedger) LookupAsset(rnd basics.Round, addr basics.Address, aidx
return result, nil
}
+func (dl *dryrunLedger) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ return nil, fmt.Errorf("boxes not implemented in dry run")
+}
+
func (dl *dryrunLedger) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
switch ctype {
case basics.AssetCreatable:
diff --git a/daemon/algod/api/server/v2/dryrun_test.go b/daemon/algod/api/server/v2/dryrun_test.go
index cfa87cb98..4a314608f 100644
--- a/daemon/algod/api/server/v2/dryrun_test.go
+++ b/daemon/algod/api/server/v2/dryrun_test.go
@@ -388,7 +388,7 @@ func checkAppCallResponse(t *testing.T, response *generated.DryrunResponse, msg
if response.Txns[idx].AppCallMessages != nil {
messages := *response.Txns[idx].AppCallMessages
assert.GreaterOrEqual(t, len(messages), 1)
- assert.Equal(t, msg, messages[len(messages)-1])
+ assert.Contains(t, messages[len(messages)-1], msg)
}
}
}
@@ -1092,8 +1092,8 @@ int 1`)
require.NoError(t, err)
approval := ops.Program
ops, err = logic.AssembleString("int 1")
- clst := ops.Program
require.NoError(t, err)
+ clst := ops.Program
var appIdx basics.AppIndex = 1
creator := randomAddress()
sender := randomAddress()
@@ -1369,9 +1369,9 @@ int 1`)
},
Accounts: []generated.Account{
{
- Address: sender.String(),
- Status: "Online",
- Amount: 10000000,
+ Address: (appIdx + 2).Address().String(),
+ Status: "Online",
+ AmountWithoutPendingRewards: 105_000,
},
},
}
@@ -1379,29 +1379,19 @@ int 1`)
var response generated.DryrunResponse
doDryrunRequest(&dr, &response)
require.Empty(t, response.Error)
- require.Equal(t, 3, len(response.Txns))
+ require.Len(t, response.Txns, 3)
for i, txn := range response.Txns {
messages := *txn.AppCallMessages
- require.GreaterOrEqual(t, len(messages), 1)
- cost := int64(*txn.BudgetConsumed) - int64(*txn.BudgetAdded)
- require.NotNil(t, cost)
- require.Equal(t, expectedCosts[i], cost)
- require.Equal(t, expectedBudgetAdded[i], *txn.BudgetAdded)
- statusMatches := false
- costExceedFound := false
- for _, msg := range messages {
- if strings.Contains(msg, "cost budget exceeded") {
- costExceedFound = true
- }
- if msg == test.msg {
- statusMatches = true
- }
- }
+ require.Contains(t, messages, test.msg, "Wrong result") // PASS or REJECT
+
if test.msg == "REJECT" {
- require.True(t, costExceedFound, "budget error not found in messages")
+ require.Contains(t, messages[2], "cost budget exceeded", "Failed for a surprise reason")
}
- require.True(t, statusMatches, "expected status not found in messages")
+
+ cost := int64(*txn.BudgetConsumed) - int64(*txn.BudgetAdded)
+ require.Equal(t, expectedCosts[i], cost, "txn %d cost", i)
+ require.Equal(t, expectedBudgetAdded[i], *txn.BudgetAdded, "txn %d added", i)
}
})
}
@@ -1434,8 +1424,8 @@ int 1`
approval := ops.Program
ops, err = logic.AssembleString("int 1")
- clst := ops.Program
require.NoError(t, err)
+ clst := ops.Program
sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU")
a.NoError(err)
@@ -1493,8 +1483,8 @@ int 0
require.NoError(t, err)
approval := ops.Program
ops, err = logic.AssembleString("int 1")
- clst := ops.Program
require.NoError(t, err)
+ clst := ops.Program
var appIdx basics.AppIndex = 1
creator := randomAddress()
rewardBase := uint64(10000000)
@@ -1561,8 +1551,8 @@ int 1
require.NoError(t, err)
ops, err := logic.AssembleString("int 1")
- clst := ops.Program
require.NoError(t, err)
+ clst := ops.Program
sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU")
a.NoError(err)
@@ -1638,8 +1628,8 @@ int 1`)
require.NoError(t, err)
ops, err := logic.AssembleString("int 1")
- clst := ops.Program
require.NoError(t, err)
+ clst := ops.Program
sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU")
a.NoError(err)
@@ -1742,7 +1732,7 @@ func TestDryrunCheckEvalDeltasReturned(t *testing.T) {
// Test that a PASS and REJECT dryrun both return the dryrun evaldelta.
for i := range []int{0, 1} {
- ops, _ := logic.AssembleString(fmt.Sprintf(`
+ ops, err := logic.AssembleString(fmt.Sprintf(`
#pragma version 6
txna ApplicationArgs 0
txna ApplicationArgs 1
@@ -1752,6 +1742,7 @@ txna ApplicationArgs 0
int %d
app_local_put
int %d`, expectedUint, i))
+ require.NoError(t, err)
dr.ProtocolVersion = string(dryrunProtoVersion)
dr.Txns = []transactions.SignedTxn{
@@ -1803,5 +1794,41 @@ int %d`, expectedUint, i))
logResponse(t, &response)
}
}
+}
+
+// TestDryrunEarlyExit is a regression test. Ensures that we no longer exit so
+// early in eval() that problems are caused by the debugState being nil.
+func TestDryrunEarlyExit(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ var dr DryrunRequest
+ var response generated.DryrunResponse
+
+ ops, err := logic.AssembleString("#pragma version 5 \n int 1")
+ require.NoError(t, err)
+ dr.ProtocolVersion = string(dryrunProtoVersion)
+
+ dr.Txns = []transactions.SignedTxn{
+ txntest.Txn{
+ ApplicationID: 1,
+ Type: protocol.ApplicationCallTx,
+ }.SignedTxn(),
+ }
+ dr.Apps = []generated.Application{{
+ Id: 1,
+ Params: generated.ApplicationParams{
+ ApprovalProgram: ops.Program,
+ },
+ }}
+ dr.Accounts = []generated.Account{{
+ Status: "Online",
+ Address: basics.Address{}.String(),
+ }}
+ doDryrunRequest(&dr, &response)
+ checkAppCallPass(t, &response)
+ ops.Program[0] = 100 // version too high
+ doDryrunRequest(&dr, &response)
+ checkAppCallResponse(t, &response, "program version 100 greater than max")
}
diff --git a/daemon/algod/api/server/v2/errors.go b/daemon/algod/api/server/v2/errors.go
index f20df6f2f..aa53f101b 100644
--- a/daemon/algod/api/server/v2/errors.go
+++ b/daemon/algod/api/server/v2/errors.go
@@ -21,6 +21,7 @@ var (
errAssetDoesNotExist = "asset does not exist"
errAccountAppDoesNotExist = "account application info not found"
errAccountAssetDoesNotExist = "account asset info not found"
+ errBoxDoesNotExist = "box not found"
errFailedLookingUpLedger = "failed to retrieve information from the ledger"
errFailedLookingUpTransactionPool = "failed to retrieve information from the transaction pool"
errFailedRetrievingNodeStatus = "failed retrieving node status"
diff --git a/daemon/algod/api/server/v2/generated/private/routes.go b/daemon/algod/api/server/v2/generated/private/routes.go
index 2603a9022..5e613f023 100644
--- a/daemon/algod/api/server/v2/generated/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/private/routes.go
@@ -311,163 +311,166 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9+3PcNtLgv4Ka/ar8uOGM5NeuVbX1nWI5WV0cR2Up2bvP9iUYsmcGKxJgAFDSxKf/",
- "/QoNgARJcIZ6rHKp80+2hng0Go1Gv/FlkoqiFBy4VpODL5OSSlqABol/0TQVFdcJy8xfGahUslIzwScH",
- "/htRWjK+mkwnzPxaUr2eTCecFtC0Mf2nEwm/VUxCNjnQsoLpRKVrKKgZWG9K07oe6SpZicQNcWiHOD6a",
- "XG/5QLNMglJ9KH/k+YYwnuZVBkRLyhVNzSdFLpleE71mirjOhHEiOBCxJHrdakyWDPJMzfwif6tAboJV",
- "usmHl3TdgJhIkUMfzjeiWDAOHiqogao3hGhBMlhiozXVxMxgYPUNtSAKqEzXZCnkDlAtECG8wKticvBx",
- "ooBnIHG3UmAX+N+lBPgdEk3lCvTk8zS2uKUGmWhWRJZ27LAvQVW5VgTb4hpX7AI4Mb1m5IdKabIAQjn5",
- "8O0b8vz589dmIQXVGjJHZIOramYP12S7Tw4mGdXgP/dpjeYrISnPkrr9h2/f4PynboFjW1GlIH5YDs0X",
- "cnw0tADfMUJCjGtY4T60qN/0iByK5ucFLIWEkXtiG9/rpoTz/6G7klKdrkvBuI7sC8GvxH6O8rCg+zYe",
- "VgPQal8aTEkz6Me95PXnL/vT/b3rv3w8TP7L/fny+fXI5b+px92BgWjDtJISeLpJVhIonpY15X18fHD0",
- "oNaiyjOyphe4+bRAVu/6EtPXss4LmleGTlgqxWG+EopQR0YZLGmVa+InJhXPDZsyozlqJ0yRUooLlkE2",
- "Ndz3cs3SNUmpskNgO3LJ8tzQYKUgG6K1+Oq2HKbrECUGrlvhAxf0/y4ymnXtwARcITdI0lwoSLTYcT35",
- "G4fyjIQXSnNXqZtdVuRsDQQnNx/sZYu444am83xDNO5rRqgilPiraUrYkmxERS5xc3J2jv3dagzWCmKQ",
- "hpvTukfN4R1CXw8ZEeQthMiBckSeP3d9lPElW1USFLlcg167O0+CKgVXQMTiX5Bqs+3/4/TH90RI8gMo",
- "RVdwQtNzAjwV2fAeu0ljN/i/lDAbXqhVSdPz+HWds4JFQP6BXrGiKgivigVIs1/+ftCCSNCV5EMA2RF3",
- "0FlBr/qTnsmKp7i5zbQtQc2QElNlTjczcrwkBb36+97UgaMIzXNSAs8YXxF9xQeFNDP3bvASKSqejZBh",
- "tNmw4NZUJaRsySAj9ShbIHHT7IKH8ZvB00hWATh+kEFw6ll2gMPhKkIz5uiaL6SkKwhIZkZ+cpwLv2px",
- "DrxmcGSxwU+lhAsmKlV3GoARp94uXnOhISklLFmExk4dOgz3sG0cey2cgJMKrinjkBnOi0ALDZYTDcIU",
- "TLhdmelf0Quq4NWLoQu8+Tpy95eiu+tbd3zUbmOjxB7JyL1ovroDGxebWv1HKH/h3IqtEvtzbyPZ6sxc",
- "JUuW4zXzL7N/Hg2VQibQQoS/eBRbcaorCQef+FPzF0nIqaY8ozIzvxT2px+qXLNTtjI/5fand2LF0lO2",
- "GkBmDWtUm8Juhf3HjBdnx/oqqjS8E+K8KsMFpS2tdLEhx0dDm2zHvClhHtaqbKhVnF15TeOmPfRVvZED",
- "QA7irqSm4TlsJBhoabrEf66WSE90KX83/5RlbnrrchlDraFjd9+ibcDZDA7LMmcpNUj84D6br4YJgNUS",
- "aNNijhfqwZcAxFKKEqRmdlBalkkuUponSlONI/2HhOXkYPKXeWNcmdvuah5M/s70OsVORh61Mk5Cy/IG",
- "Y5wYuUZtYRaGQeMnZBOW7aFExLjdRENKzLDgHC4o17NGH2nxg/oAf3QzNfi2oozFd0e/GkQ4sQ0XoKx4",
- "axs+UiRAPUG0EkQrSpurXCzqHx4flmWDQfx+WJYWHygaAkOpC66Y0uoJLp82Jymc5/hoRr4Lx0Y5W/B8",
- "Yy4HK2qYu2Hpbi13i9WGI7eGZsRHiuB2CjkzW+PRYGT4+6A41BnWIjdSz05aMY3/4dqGZGZ+H9X5z0Fi",
- "IW6HiQu1KIc5q8DgL4Hm8rhDOX3CcbacGTns9r0d2ZhR4gRzK1rZup923C14rFF4KWlpAXRf7F3KOGpg",
- "tpGF9Y7cdCSji8IcnOGA1hCqW5+1nechCgmSQgeGb3KRnv+DqvU9nPmFH6t//HAasgaagSRrqtazSUzK",
- "CI9XM9qYI2YaovZOFsFUs3qJ97W8HUvLqKbB0hy8cbHEoh77IdMDGdFdfsT/0JyYz+ZsG9Zvh52RM2Rg",
- "yh5n50HIjCpvFQQ7k2mAJgZBCqu9E6N13wjKN83k8X0atUdvrcHA7ZBbhFl6Yw48XAh5uyPRoXVOGiMn",
- "oWbUgCNMOzuLTasycfiJGEpsg85AjV9pOyV3h4/hqoWFU03/DVhQZtT7wEJ7oPvGgihKlsM9nNd1lBMZ",
- "zfX5M3L6j8OX+89+efbyleEapRQrSQuy2GhQ5LFTGIjSmxye9FeGInuV6/jor15401h73Ng4SlQyhYKW",
- "/aGsyc3ey7YZMe36WGujGVddAzjmWJ6BYS8W7cRakw1oR0yZa79Y3MtmDCEsa2bJiIMkg53EdNPlNdNs",
- "wiXKjazuQ78CKYWMGH3wiGmRijy5AKmYiNjvT1wL4lp4mavs/m6hJZdUETM32iMrjrdchLL0FUfQmIZC",
- "7ZIZ7NBnV7zBjRuQSkk3PfTb9UZW5+Ydsy9t5HvzliIlyERfcZLBolq1xPOlFAWhJMOOeHG8Y6u1Du7R",
- "EynE8t4lqugssSXhByuF5KZPXxZ5LzIwumCl7oG9N4M12DOUE+KMLkSlCSVcZICKY6XijH/A+4huD/TW",
- "6PAu0WsrWCzAKCkprcxqq5KgL6JHi03HhKaWihJEjRow1tZWdtvKTmc9W7kEmhnlBTgRC2cRdbZaXCRF",
- "R4r2rNNdOxF1rgVXKUUKShml06oSO0Hz7SxZ6i14QsAR4HoWogRZUnlLYLXQNN8BKLaJgVvLic6M3Id6",
- "3PTbNrA7ebiNVBq901KBEUrNgctBwxAKR+LkAiSaU/+t++cnue32VeVAsIMTrc5Ygeorp1woSAXPVHSw",
- "nCqd7Dq2plFL/jMrCE5K7KTiwAMmlHdUaWtUZzxDXcCyG5zH2lbMFMMAD16BZuSf/e3XHzs1fJKrStVX",
- "oarKUkgNWWwNHK62zPUeruq5xDIYu75vtSCVgl0jD2EpGN8hy67EIojq2vbkvE79xaGFxtwDmygqW0A0",
- "iNgGyKlvFWA3dPgOAGIUx7onEg5THcqpvczTidKiLM3500nF635DaDq1rQ/1T03bPnFR3fD1TICZXXuY",
- "HOSXFrPW1b+mRmjHkUlBz83dhCK4tf73YTaHMVGMp5Bso3xzLE9Nq/AI7DikA9qPCyYKZuscjg79Rolu",
- "kAh27MLQggdUsRMqNUtZiZLE97C5d8GqO0HUakUy0JQZ9SD4YIWsMuxPrDunO+btBK1RUnMf/J7YHFlO",
- "zhReGG3gz2GD5usTGydwFkQX3IOkGBnVnG7KCQLqvY/mQg6bwBVNdb4x15xew4ZcggSiqkXBtLaBH21B",
- "UosyCQeIWiS2zOhsQtbH7ndgjJHqFIcKltffiunEii3b4TvrCC4tdDiBqRQiH+Ee6CEjCsEo9wEphdl1",
- "5uKMfDCKp6QWkE6IQYNgzTwfqRaacQXkf4mKpJSjAFZpqG8EIZHN4vVrZjAXWD2ncxQ0GIIcCrByJX55",
- "+rS78KdP3Z4zRZZw6YPzTMMuOp4+RS3pRCjdOlz3oKKb43Yc4e1oqjEXhZPhujxlt6HajTxmJ086g9f2",
- "HXOmlHKEa5Z/ZwbQOZlXY9Ye0sg4Iz2OO8oKEwwdWzfuO3pJ/z06fDN0DLr+xIFvqfk45F4y8lW+uQc+",
- "bQciEkoJCk9VqJco+1Usw/hNd+zURmko+qq97frLgGDzwYsFPSlT8JxxSArBYRNNWWAcfsCPsd72ZA90",
- "Rh471LcrNrXg74DVnmcMFd4Vv7jbASmf1H7Ve9j87rgdq04YuYpaKeQloSTNGeqsgistq1R/4hSl4uAs",
- "R0z9XtYf1pPe+CZxxSyiN7mhPnGqDA5rWTlqnlxCRAv+FsCrS6parUDpjnywBPjEXSvGScWZxrkKs1+J",
- "3bASJNrbZ7ZlQTdkSXNU634HKcii0u0bEwPslDZalzUxmWmIWH7iVJMcjAb6A+NnVzicj2PzNMNBXwp5",
- "XmNhFj0PK+CgmEriLonv7Fd0Ybrlr507E7Md7GdrRDHjN1F4Gw2tCP7//fg/Dz4eJv9Fk9/3ktf/bf75",
- "y4vrJ097Pz67/vvf/0/7p+fXf3/yn/8R2ykPeyz8y0F+fOSkyeMjFBka41IP9gezOBSMJ1EiO1sDKRjH",
- "KOIObZHHRvDxBPSkMVO5Xf/E9RU3hHRBc5ZRfTty6LK43lm0p6NDNa2N6CiQfq2fYy7dlUhKmp6jR2+y",
- "YnpdLWapKOZeip6vRC1RzzMKheD4LZvTks1VCen8Yn/HlX4HfkUi7KrDZG8tEPT9gfGQTTRZuihMPHnL",
- "iluiqJQzUmJEkvfLiOW0Dsu16XgHBGM219Q7Fd2fz16+mkybWMv6u9HU7dfPkTPBsqtYRG0GVzFJzR01",
- "PGKPFCnpRoGO8yGEPeqCsn6LcNgCjIiv1qx8eJ6jNFvEeaWP83Aa3xU/5jYAw5xENM9unNVHLB8ebi0B",
- "Mij1Opam05I5sFWzmwAdl0opxQXwKWEzmHU1rmwFyjvDcqBLTBdBE6MYE7dWnwNLaJ4qAqyHCxml1sTo",
- "B8Vkx/evpxMnRqh7l+zdwDG4unPWtlj/txbk0Xdvz8jcsV71yAZ326GDcNyIJcNFnLWcbYab2eREG93+",
- "iX/iR7BknJnvB594RjWdL6hiqZpXCuQ3NKc8hdlKkAMfxHZENf3EezLbYP5wED5IymqRs5Sch7J1Q542",
- "J6w/wqdPHw3H//Tpc89z05eE3VRR/mInSC6ZXotKJy7pJZFwSWUWAV3VSQ84sk1Z2zbrlLixLSt2STVu",
- "/DjPo2WpusHP/eWXZW6WH5ChcqG9ZsuI0kJ6qcaIOhYa3N/3wl0Mkl76jKlKgSK/FrT8yLj+TJJP1d7e",
- "cyCtaOBfnfBgaHJTQsvmdavg7K69CxduNSS40pImJV2Bii5fAy1x91HyLtC6mucEu7WikH1ACw7VLMDj",
- "Y3gDLBw3jqjExZ3aXj57Ob4E/IRbiG2MuNE4LW67X0Fc8q23qxPb3NulSq8Tc7ajq1KGxP3O1EmNKyNk",
- "eU+SYituDoHL/1wASdeQnkOGqWhQlHozbXX3zkonsnrWwZRN2bRRhZhXhObBBZCqzKgT6infdBM8FGjt",
- "s1o+wDlszkSTlnSTjI52goEaOqhIqYF0aYg1PLZujO7mO8c3BlWXpY/Tx4BNTxYHNV34PsMH2Yq893CI",
- "Y0TRCoAfQgSVEURY4h9AwS0Wasa7E+nHlmf0lYW9+SIZnp73E9ekUcOc8zpcDcb12+8FYP63uFRkQY3c",
- "Llzqsg2iD7hYpegKBiTk0EI7MlS9ZdXFQXbde9GbTiy7F1rvvomCbBsnZs1RSgHzxZAKKjOdkAU/k3UC",
- "4ApmBCuSOIQtchST6mgJy3SobFnKbYmFIdDiBAySNwKHB6ONkVCyWVPls6ox+dyf5VEywL8xKWRbKuBx",
- "4G0PMszrRD/Pc7vntKdduoRAnwXoU/9C1XJEGp+R8DEALLYdgqMAlEEOK7tw29gTSpOg0myQgePH5TJn",
- "HEgSc9xTpUTKbFp8c824OcDIx08JscZkMnqEGBkHYKNzCwcm70V4NvnqJkByl2BD/djoFgv+hnjYpQ3N",
- "MiKPKA0LZ3wgqM5zAOqiPer7qxNzhMMQxqfEsLkLmhs25zS+ZpBeRhqKrZ38M+defTIkzm6x5duL5UZr",
- "slfRbVYTykwe6LhAtwXi7aJEbAsU4svZsmpcDd2lY6YeuL6HcPU4yGW7FQAdTb+p+uQ0v50aWvtu7t9k",
- "DUufNjnaPqo0RvtD9BPdpQH89U0QdfbZSfe6jirpbbdrO/EukJ9irNickb6vo+9RUZADSsRJS4JIzmMe",
- "MCPYA7LbU98t0NwxvY/yzZPAly9hxZSGxhZtbiXvXHlo2xzFqgJCLIdXp0u5NOv7IETNo23aKnZsLfPB",
- "V3AhNCRLJpVO0JAfXYJp9K1CjfJb0zQuKLSjBWyBHZbFeQNOew6bJGN5FadXN+/3R2ba97URRlWLc9ig",
- "OAg0XZMFFoSKxhBtmdqGmW1d8Du74Hf03tY77jSYpmZiacilPcef5Fx0OO82dhAhwBhx9HdtEKVbGCRe",
- "/EeQ61h6WCA02MOZmYazbabH3mHK/Ng7oy8sFMN3lB0pupZAW966CoY+EqPuMR3UU+qnPAycAVqWLLvq",
- "GALtqIPqIr2Rtu8T1TtYwN11g+3AQGD0i0XVSlDtmgSNdGsrY/FwbbNRmDlrVw4IGUI4FVO+rmMfUYa0",
- "sfjYLlydAc2/h83Ppi0uZ3I9ndzNbhjDtRtxB65P6u2N4hk93NaO1HID3BDltCyluKB54qyrQ6QpxYUj",
- "TWzujbEPzOriNryzt4fvThz419NJmgOVSS0qDK4K25V/mlXZ8gcDB8TXjTMKj5fZrSgZbH6dsx1aZC/X",
- "4Gp0BdJor5hIY20PjqKz0C7jgTY77a3OMWCXuMVBAGXtH2hsV9Y90HYJ0AvKcm808tAOBMXg4sZVpIly",
- "hXCAO7sWAg9Rcq/spne646ejoa4dPCmca0sVscIWylNE8K7/2IiQaItCUi0olgKxJoE+c+JVkZjjl6ic",
- "pXEDI18oQxzcOo5MY4KNB4RRM2LFBvyQvGLBWKaZGqHodoAM5ogi05eVGcLdQrgKxxVnv1VAWAZcm08S",
- "T2XnoGLtFWdq7l+nRnboz+UGtubpZvi7yBhhGZzujYdAbBcwQjdVD9yjWmX2C63NMeaHwB5/A293OGPv",
- "StziqXb04ajZxgCu2+6msCBxn/8ZwrDF63ZXQ/bKq6vHMzBHtLoxU8lSit8hruehehwJufeFfxiGePwO",
- "fBbJXOqymNq60xRpbmYf3O4h6Sa0QrU99ANUjzsf+KSwAok3z1Jut9oWG20FesUJJgzOnNvxG4JxMPcC",
- "WnN6uaCx8ixGyDAwHTbez5YhWQviO3vcO5s3c7WYZiRwpNZtmU1GK0E22TD9xOdbCgx22tGiQiMZINWG",
- "MsHUOr9yJSLDVPyScluz1vSzR8n1VmCNX6bXpZCYSqriNu8MUlbQPC45ZIj9duptxlbMVmytFAQlQd1A",
- "ttS1pSJXVtX6lxvUHC/J3jQoOux2I2MXTLFFDthi37ZYUIWcvDZE1V3M8oDrtcLmz0Y0X1c8k5DptbKI",
- "VYLUQh2qN7XnZgH6EoCTPWy3/5o8Rp+VYhfwxGDR3c+Tg/3XaHS1f+zFLgBXmnkbN8mQnfzTsZM4HaPT",
- "zo5hGLcbdRZNjLT19IcZ15bTZLuOOUvY0vG63WepoJyuIB4mUeyAyfbF3URDWgcvPLPFoJWWYkOYjs8P",
- "mhr+NBDEbdifBYOkoiiYLpxnQ4nC0FNT79NO6oezlaVdqSYPl/+IDsLS+0c6SuTDGk3t/RZbNbpx39MC",
- "2midEmrzh3PWuO59ATly7KsQYO2qumSVxY2ZyywdxRz05C9JKRnXqFhUepn8jaRrKmlq2N9sCNxk8epF",
- "pF5Xu0QPvxngD453CQrkRRz1coDsvQzh+pLHXPCkMBwle9IkTQSnctCTGY8W8xy9Gyy4feixQpkZJRkk",
- "t6pFbjTg1HciPL5lwDuSYr2eG9HjjVf24JRZyTh50Mrs0E8f3jkpoxAyVpOmOe5O4pCgJYMLDFyLb5IZ",
- "8457IfNRu3AX6P9Yz4MXOQOxzJ/lmCLwTcXy7OcmCaxT8lBSnq6jdv+F6fhLU3y7XrI9x9ESKGvKOeTR",
- "4eyd+Yu/WyO3/7/E2HkKxke27ZYytMvtLK4BvA2mB8pPaNDLdG4mCLHazoqpoy7zlcgIztPU22iorF+d",
- "MShX9lsFSscyDPCDjfxA+47RC2y1LAI8Q6l6Rr6zj+esgbTKAaA0y4oqt6nlkK1AOsNjVeaCZlNixjl7",
- "e/iO2FltH1tC1lbrWqEw115FR68PivOMiyH01WDj8c3jx9kecGlWrTRW51CaFmUsF820OPMNMOEttHWi",
- "mBdiZ0aOrIStvPxmJzH0sGSyMJJpPZrl8UgT5j9a03SNomuLmwyT/Pgyc54qVfDeQF03uK6vg+fOwO0q",
- "zdlCc1MijH5xyZR9MwUuoJ3+VueCOtXJp8O1lycrzi2lRHn0tlzl26DdA2cd2t4cGoWsg/gbCi62SuNN",
- "q+6dYq9owYpuCb/eQwM2BaquB+vfwkopF5ylWC4ieKWlBtm9vzLGVzCiskbXGOWPuDuhkcMVLRxYhxM5",
- "LA6WEvSM0CGub6wMvppNtdRh/9T40MeaarICrRxng2zq6186ewnjCly9JHyKJ+CTQrb8L8ghoy69pDb9",
- "3pCMMHZ+QAD+1nx779QjDCo9ZxwFIYc2F79qLRr4PIQ20hPTZCVAufW0EwjVR9Nnhrl0GVx9nvnnJHAM",
- "674wy7a+uv5Qh95z5zxlpu0b09ZWTmh+boUp2kkPy9JNOlwdNSoP6Cs+iOCIBybxJvAAufX44WhbyG2r",
- "yx3vU0NocIEOOyjxHu4RRl0ptFMa+YLmlaUobEFsqEs0YZrxCBjvGIfmsZPIBZFGrwTcGDyvA/1UKqm2",
- "IuAonnYGNEcvXYyhKe1MtHcdqrPBiBJco59jeBubIqcDjKNu0AhulG/qN1YMdQfCxBt83Mkhsl+yFKUq",
- "J0RlGHbcKWIaYxyGcfsyye0LoH8M+jKR7a4ltSfnJjfRUCbZospWoBOaZbFCc9/gV4JfSVah5ABXkFZ1",
- "oa6yJCmWYGjXpOhTm5soFVxVxZa5fIM7TpeKmBz9HidQPq66GXxGkP0a1nv09uTD2zeHZ2+P7H1h1HKb",
- "SmZkbgmFYYhGj1UajOhcKSC/hmj8Ffv92llwHMygeHGEaMMCyp4QMaB+scF/Y8W0hgnI+dRvHNXlHejY",
- "8cbifXuknnBujl6i2CoZjwm8+u6Ojmbq253Hpv+9HshcrNqAPHCa+zZmHO5RjA2/NfdbmAXeqxBnb8A6",
- "SRtjqIR/BwG12zq9sM088cbtlYxD231d0n679WS4OP0U7+iBSMoguZ9aMcA6g4biKdPB8F+qXRaOpmQr",
- "p8SK8rERbDCGrWRvn/uMGsKGAjBs/IX53Os9ToDtqQM49laE+siePkDf+7BBUlLmPJ0Ns+hj1gUY90O+",
- "x4QeNhvcXYQL28VBYiuJVwgfrrPR1NbAa6AUijVVLWOlw0eGlZxh9e+gTkh/LO/TvYBUG6E+8FVJgJtU",
- "DTGTBQ8dfK23MaB+1NE3rszGttoa/fqlO5hNLwMgyGKxtR9n4ytJHNYRCegnxacGVsDdWwPt2N7REYbL",
- "JaSaXezIuPin0VKbaP6p12PtQzZBAgarI9b8A8M3VK8bgLYlRGyFJ6g/dWdwhuKtz2HzSJEWNUSLUU49",
- "z7tNojJiALlDYkhEqJjHzxrenBOGqZoyEAvew267Q1PyZbAKeJA/dMu5PEkSGuYUbZnyQsQ091Fzma43",
- "yrTD4KuhpIx+Hd5hQegIyx6r+gWH+gXhQKshx/1yUJcuURrzY2pbs0+ZBuV/88lwdhb7MnVTpxwt+5dU",
- "Zr5FVFX1WnCy5T7qZVL4GrJdoJf1zKyJh+rHzkcKjGDUW5oLxfgqGQodbIcgha/aoaMVrwMscIxwLUG6",
- "9wm0f/g70cLHT22DYxsq3Atst0GCGizqZYEbTLX/0NQSwDKJ1D777pzI4QKN3koNdDLI+B+ecxuy39jv",
- "Pljcl8kboZE7ek12puz7SDimekgMqX5J3G25Owj9Nlov49y+V6Ni6f/coDK0HpdSZFVqL+jwYDQ2hrHF",
- "NbawkqjCmPZX2ZP9cyw18y5I6TmHzdzK3+ma8qbmT/tYWxHKriFIoe3s9r0aBOK6T76yC1jdC5x/pFI9",
- "nZRC5MmAufi4X8WgewbOWXoOGTF3h48hGagETh6jlbL2B16uNz5rvyyBQ/ZkRohRy4tSb7xrsF2QszM5",
- "f6S3zX+Fs2aVLSzi9P3ZJx4Pf8KSH/KO/M0Ps52rKTDM745T2UF2lAm4GqigIOllpC7+2LcaI866bq3y",
- "hqgsFDEp5ZY5o6POd1/nj5B+UKx7u/YTppT7rM9USGs6QmnJG3S6wssPjUVoXNlw32EHeKFSHBQO99zI",
- "gfMHxwj9UCMlWMogJbSWv0vP9k+M1nwp2CKFEchmmcoWDBN9oTIwoqg3tW0ijue+CQPzxwXHmhp904dC",
- "UyLWpQwJx5xLeUHzhzdfYGGBQ8SHe/0mvtBQ/w2RbFGpbhet8I6OmjvQde9van6C5pZ/gtmjqA3YDeXs",
- "qHXBdl9nDusn0Zzkonm4AYcklzimNRrvvyILF5FaSkiZYp1g/UtfMq9W97CCbPMo0nb9ctc6fxb6DmTs",
- "FARRkvdN+S0t8H5oIGyO6B/MVAZObpTKY9TXI4sI/mI8KkwN3XFdnLesybacYSeaQ0i4Z6ty4Ma+oVW5",
- "n/Q6dnm4Drx0KgX9dY6+rVu4jVzUzdrGukT6yB32ZOjFGE9GvPSa6Y6uFIsQrFtIEFTy6/6vRMISC5ML",
- "8vQpTvD06dQ1/fVZ+7M5zk+fRsW4B3OitN4PdvPGKObnoeg/G+E2EGja2Y+K5dkuwmiFDTePBGBg7C8u",
- "wPoPeabgF2tP7R9VV+D5Ju7b7iYgYiJrbU0eTBUEBI+IBXbdZtEXnhWklWR6g3nf3vzGfonW0/muttg7",
- "j0+dKejuPi3Ooa4c0Nj3K+Vv1++EfRG6MDI1Os81vhj19ooWZQ7uoPz90eKv8PxvL7K95/t/Xfxt7+Ve",
- "Ci9evt7bo69f0P3Xz/fh2d9evtiD/eWr14tn2bMXzxYvnr149fJ1+vzF/uLFq9d/fWT4kAHZAjrxWUaT",
- "/4lveSSHJ8fJmQG2wQktWf1QnCFjX0acpngSoaAsnxz4n/67P2GzVBTN8P7XiUtimKy1LtXBfH55eTkL",
- "u8xXaNBLtKjS9dzP03+g6+S4DrC2ibG4ozZ21pACbqojhUP89uHt6Rk5PDmeNQQzOZjszfZm+/j8Tgmc",
- "lmxyMHmOP+HpWeO+zx2xTQ6+XE8n8zXQHP1f5o8CtGSp/6Qu6WoFcubqqZufLp7NvSgx/+KMmdfbvs3D",
- "0oTzLy2bb7ajJ1Zvm3/xScnbW7eyfp2tO+gwEorhKe0TtPMvKMoO/j53CQjxj6hSWFqdewdGvGVrNV/0",
- "Fcuuuz3ci5DzL80Trdf2MOcQc1fYuH0avOg6JUwTuhASs3Z1ujbn16cLMtV+0bcmxuPMEKHp9aZ+rjao",
- "lHTwsSeN24GIHwlPrCHH5kC1Zmp4ppYVhMV76huh1b65Fz7uJa8/f9mf7u9d/8Xwfffny+fXI/2Ob5rX",
- "bk9rpj6y4WfMtUMLKp6zZ3t7d3jM6ZCHT+/iJgVvhkVf4K7KpBjSst1WdQYiNTJ25AR1hh947/PFDVe8",
- "1c7TivKJvM3wDc2IT2XBufcfbu5jjl5fw3+JvV+up5OXD7n6Y25InuYEWwZJ3v2t/4mfc3HJfUsjDFRF",
- "QeXGH2PVYgr+EWq8cuhKodVPsguqYfIZzcqxmMcB5qI0vQVzOTW9vjKXh2IuuEn3wVzaA90zc3l2wwP+",
- "51/xV3b6Z2Onp5bdjWenTpSz2ZJz+35kI+H13hJYQTRtExMo6baHobsc9jvQvXeuJ3dkMX/Yk9f/f5+T",
- "F3svHg6CdiHs72FD3gtNvkX765/0zI47PtskoY5mlGU9IrfsH5T+RmSbLRgq1Kp0GU4RuWTBuAG5f7v0",
- "X1bsvUN9Dhtiw4+8m5mLDHry0PUdecCf9snsrzzkKw+RdvrnDzf9KcgLlgI5g6IUkkqWb8hPvM5Pv71a",
- "l2XR0O720e/xNKONpCKDFfDEMaxkIbKNr03YGvAcrIW7J6jMv7QLjFsr2qBZ6gh/r9897AO92JDjo54E",
- "Y7t1Oe03G2za0RgjOmEXxK2aYZcXDShj28jcLGQlNLFYyNyivjKer4znTsLL6MMTk1+i2oQ35HTv5Kkv",
- "1BIrZUR1f+oxOscfelzvZaP7+kxMf7Eh8JCR4IPN1eqi+StL+MoS7sYSvoPIYcRT65hEhOhuY+ntMwiM",
- "9s26z/RgFIRvXuVUEgVjzRSHOKIzTjwEl3hoJS2KK6ujUU7gitlAx8iG3a/e9pXFfWVxfyKv1W5G0xZE",
- "bqzpnMOmoGWt36h1pTNxaQscRrki1v6nuSsUjEGYdUCHFsQP0CTVkh9dQYJ8g5GnLDNinGYFGJGq5nWm",
- "s0+VaGKizQjNe80rxnECZBU4i62ITYN0NQWp4PZ1046vzUH23uqEMSb7WwXI0RxuHIyTacvZ4rYxUn/6",
- "zvJX3zdyvcWWjlRhw8X7wRr1+6Wtv+eXlOlkKaRLZUX09TtroPnc1fnq/NrUrOh9wUIcwY9BYEf813n9",
- "XkP0YzeyJfbVRYz4Rk3oWhgKhhtcB4F9/Gz2Ccv9ur1vIpsO5nPM/1oLpeeT6+mXTtRT+PFzvTVf6mvZ",
- "bdH15+v/GwAA//9MwM7Ji70AAA==",
+ "H4sIAAAAAAAC/+x9+3PcNtLgv4Ka/aoc+4Yz8iu7VlXqO9lysro4jstSsnef7UswZM8MViTAAKA0E5/+",
+ "9ys0ABIkwRnqscqmPv9ka4hHo9Fo9BufJ6koSsGBazU5/DwpqaQFaJD4F01TUXGdsMz8lYFKJSs1E3xy",
+ "6L8RpSXjq8l0wsyvJdXryXTCaQFNG9N/OpHwW8UkZJNDLSuYTlS6hoKagfW2NK3rkTbJSiRuiCM7xMnx",
+ "5GrHB5plEpTqQ/kjz7eE8TSvMiBaUq5oaj4pcsn0mug1U8R1JowTwYGIJdHrVmOyZJBnauYX+VsFchus",
+ "0k0+vKSrBsREihz6cL4SxYJx8FBBDVS9IUQLksESG62pJmYGA6tvqAVRQGW6Jksh94BqgQjhBV4Vk8MP",
+ "EwU8A4m7lQK7wP8uJcDvkGgqV6Ann6axxS01yESzIrK0E4d9CarKtSLYFte4YhfAiek1Iz9USpMFEMrJ",
+ "+29fkadPn74wCymo1pA5IhtcVTN7uCbbfXI4yagG/7lPazRfCUl5ltTt33/7Cuc/dQsc24oqBfHDcmS+",
+ "kJPjoQX4jhESYlzDCvehRf2mR+RQND8vYCkkjNwT2/hONyWc/w/dlZTqdF0KxnVkXwh+JfZzlIcF3Xfx",
+ "sBqAVvvSYEqaQT8cJC8+fX48fXxw9ZcPR8l/uT+fP70aufxX9bh7MBBtmFZSAk+3yUoCxdOypryPj/eO",
+ "HtRaVHlG1vQCN58WyOpdX2L6WtZ5QfPK0AlLpTjKV0IR6sgogyWtck38xKTiuWFTZjRH7YQpUkpxwTLI",
+ "pob7Xq5ZuiYpVXYIbEcuWZ4bGqwUZEO0Fl/djsN0FaLEwHUjfOCC/n2R0axrDyZgg9wgSXOhINFiz/Xk",
+ "bxzKMxJeKM1dpa53WZGzNRCc3Hywly3ijhuazvMt0bivGaGKUOKvpilhS7IVFbnEzcnZOfZ3qzFYK4hB",
+ "Gm5O6x41h3cIfT1kRJC3ECIHyhF5/tz1UcaXbFVJUORyDXrt7jwJqhRcARGLf0Kqzbb/r9Mf3xIhyQ+g",
+ "FF3BO5qeE+CpyIb32E0au8H/qYTZ8EKtSpqex6/rnBUsAvIPdMOKqiC8KhYgzX75+0ELIkFXkg8BZEfc",
+ "Q2cF3fQnPZMVT3Fzm2lbgpohJabKnG5n5GRJCrr55mDqwFGE5jkpgWeMr4je8EEhzcy9H7xEiopnI2QY",
+ "bTYsuDVVCSlbMshIPcoOSNw0++Bh/HrwNJJVAI4fZBCcepY94HDYRGjGHF3zhZR0BQHJzMhPjnPhVy3O",
+ "gdcMjiy2+KmUcMFEpepOAzDi1LvFay40JKWEJYvQ2KlDh+Eeto1jr4UTcFLBNWUcMsN5EWihwXKiQZiC",
+ "CXcrM/0rekEVfP1s6AJvvo7c/aXo7vrOHR+129gosUcyci+ar+7AxsWmVv8Ryl84t2KrxP7c20i2OjNX",
+ "yZLleM380+yfR0OlkAm0EOEvHsVWnOpKwuFH/sj8RRJyqinPqMzML4X96Ycq1+yUrcxPuf3pjVix9JSt",
+ "BpBZwxrVprBbYf8x48XZsd5ElYY3QpxXZbigtKWVLrbk5Hhok+2Y1yXMo1qVDbWKs43XNK7bQ2/qjRwA",
+ "chB3JTUNz2ErwUBL0yX+s1kiPdGl/N38U5a56a3LZQy1ho7dfYu2AWczOCrLnKXUIPG9+2y+GiYAVkug",
+ "TYs5XqiHnwMQSylKkJrZQWlZJrlIaZ4oTTWO9B8SlpPDyV/mjXFlbrureTD5G9PrFDsZedTKOAkty2uM",
+ "8c7INWoHszAMGj8hm7BsDyUixu0mGlJihgXncEG5njX6SIsf1Af4g5upwbcVZSy+O/rVIMKJbbgAZcVb",
+ "2/CBIgHqCaKVIFpR2lzlYlH/8NVRWTYYxO9HZWnxgaIhMJS6YMOUVg9x+bQ5SeE8J8cz8l04NsrZgudb",
+ "czlYUcPcDUt3a7lbrDYcuTU0Iz5QBLdTyJnZGo8GI8PfBcWhzrAWuZF69tKKafx31zYkM/P7qM5/DhIL",
+ "cTtMXKhFOcxZBQZ/CTSXrzqU0yccZ8uZkaNu35uRjRklTjA3opWd+2nH3YHHGoWXkpYWQPfF3qWMowZm",
+ "G1lYb8lNRzK6KMzBGQ5oDaG68Vnbex6ikCApdGB4mYv0/O9Ure/gzC/8WP3jh9OQNdAMJFlTtZ5NYlJG",
+ "eLya0cYcMdMQtXeyCKaa1Uu8q+XtWVpGNQ2W5uCNiyUW9dgPmR7IiO7yI/6H5sR8NmfbsH477IycIQNT",
+ "9jg7D0JmVHmrINiZTAM0MQhSWO2dGK37WlC+aiaP79OoPXptDQZuh9wicIfE5s6PwUuxicHwUmx6R0Bs",
+ "QN0FfZhxUIzUUKgR8B07yATuv0MflZJu+0jGsccg2SzQiK4KTwMPb3wzS2N5PVoIeTPu02ErnDT2ZELN",
+ "qAHznXaQhE2rMnGkGLFJ2QadgRoX3m6m0R0+hrEWFk41/RdgQZlR7wIL7YHuGguiKFkOd0D66yjTX1AF",
+ "T5+Q078fPX/85Jcnz782JFlKsZK0IIutBkW+croZUXqbw8P+ylA7qnIdH/3rZ94K2R43No4SlUyhoGV/",
+ "KGvdtCKQbUZMuz7W2mjGVdcAjjmcZ2A4uUU7sYZ7A9oxU0bCKhZ3shlDCMuaWTLiIMlgLzFdd3nNNNtw",
+ "iXIrq7tQZUFKISP2NTxiWqQiTy5AKiYirpJ3rgVxLbx4W3Z/t9CSS6qImRtNvxVHgSJCWXrDx/N9O/TZ",
+ "hje42cn57Xojq3PzjtmXNvK9JVGREmSiN5xksKhWLU1oKUVBKMmwI97Rb9hqrQOR5Z0UYnnnt3Z0ltiS",
+ "8IMV+HLTpy/2vRUZGLW7UnfA3pvBGuwZyglxRhei0oQSLjJAHb1SccY/4OhFDxM6xnR4l+i1leEWYPTB",
+ "lFZmtVVJ0O3To8WmY0JTS0UJokYN2MVrh4ZtZaezTsRcAs2MngiciIUzPjuzOC6Sos9Ke9bprp2I5tyC",
+ "q5QiBaWMfm+1tr2g+XaWLPUOPCHgCHA9C1GCLKm8IbBaaJrvARTbxMCtRXJnse9DPW76XRvYnTzcRiqN",
+ "im+pwMj/5sDloGEIhSNxcgESLdf/0v3zk9x0+6pyIK7EiVZnrEBLAadcKEgFz1R0sJwqnew7tqZRS/4z",
+ "KwhOSuyk4sAD1qo3VGnrv2A8Q7XLshucx5qxzBTDAA9egWbkn/3t1x87NXySq0rVV6GqylJIDVlsDRw2",
+ "O+Z6C5t6LrEMxq7vWy1IpWDfyENYCsZ3yLIrsQiiujbzOQdff3FoDDP3wDaKyhYQDSJ2AXLqWwXYDX3r",
+ "A4AYHb3uiYTDVIdyaof+dKK0KEtz/nRS8brfEJpObesj/VPTtk9cVDd8PRNgZtceJgf5pcWsjapYUyO0",
+ "48ikoOfmbkIR3Dpa+jCbw5goxlNIdlG+OZanplV4BPYc0gHtx8VtBbN1DkeHfqNEN0gEe3ZhaMEDqtg7",
+ "KjVLWYmSxPewvXPBqjtB1EBIMtCUGfUg+GCFrDLsT6znrDvmzQStUVJzH/ye2BxZTs4UXhht4M9hi56C",
+ "dzYk4ywI5LgDSTEyqjndlBME1Dt6zYUcNoENTXW+NdecXsOWXIIEoqpFwbS2MTZtQVKLMgkHiFokdszo",
+ "zG82nMHvwBh74CkOFSyvvxXTiRVbdsN31hFcWuhwAlMpRD7CE9NDRhSCUZ4aUgqz68yFdPm4H09JLSCd",
+ "EIO215p5PlAtNOMKyP8RFUkpRwGs0lDfCEIim8Xr18xgLrB6TueTaTAEORRg5Ur88uhRd+GPHrk9Z4os",
+ "4dLHQZqGXXQ8eoRa0juhdOtw3YGKbo7bSYS3o6nGXBROhuvylP0+ATfymJ181xm8tu+YM6WUI1yz/Fsz",
+ "gM7J3IxZe0gj4/whOO4oK0wwdGzduO/okP7X6PDN0DHo+hMHbrzm45Anz8hX+fYO+LQdiEgoJSg8VaFe",
+ "ouxXsQxDZd2xU1uloeir9rbrLwOCzXsvFvSkTMFzxiEpBIdtNDuEcfgBP8Z625M90Bl57FDfrtjUgr8D",
+ "VnueMVR4W/zibgek/K52Yd/B5nfH7Vh1wiBh1EohLwklac5QZxVcaVml+iOnKBUHZzli6vey/rCe9Mo3",
+ "iStmEb3JDfWRU3Tz1LJy1Dy5hIgW/C2AV5dUtVqB0h35YAnwkbtWjJOKM41zFWa/ErthJUi0t89sy4Ju",
+ "yZLmqNb9DlKQRaXbNybGMipttC5rYjLTELH8yKkmORgN9AfGzzY4nA8Z9DTDQV8KeV5jYRY9DyvgoJhK",
+ "4i6J7+xX9Ba75a+d5xgTS+xna0Qx4zcBj1sNrWSJ//vVfx5+OEr+iya/HyQv/sf80+dnVw8f9X58cvXN",
+ "N/+v/dPTq28e/ud/xHbKwx6LtHOQnxw7afLkGEWGxrjUg/3eLA4F40mUyM7WQArGMWC7Q1vkKyP4eAJ6",
+ "2Jip3K5/5HrDDSFd0JxlVN+MHLosrncW7enoUE1rIzoKpF/rp5j3fCWSkqbn6NGbrJheV4tZKoq5l6Ln",
+ "K1FL1POMQiE4fsvmtGRzVUI6v3i850q/Bb8iEXbVYbI3Fgj6/sB4dCyaLF3AK568ZcUtUVTKGSkx+Mv7",
+ "ZcRyWkdA28zHQ4LhsWvqnYruzyfPv55Mm7DW+rvR1O3XT5EzwbJNLHg5g01MUnNHDY/YA0VKulWg43wI",
+ "YY+6oKzfIhy2ACPiqzUr75/nKM0WcV7pQ2qcxrfhJ9zGupiTiObZrbP6iOX9w60lQAalXscyoloyB7Zq",
+ "dhOg41IppbgAPiVsBrOuxpWtQHlnWA50iZk5aGIUY0IE63NgCc1TRYD1cCGj1JoY/aCY7Pj+1XTixAh1",
+ "55K9GzgGV3fO2hbr/9aCPPju9RmZO9arHtg4ejt0EPkcsWS44L6Ws81wM5sHahMJPvKP/BiWjDPz/fAj",
+ "z6im8wVVLFXzSoF8SXPKU5itBDn08YLHVNOPvCezDaZqB5GapKwWOUvJeShbN+Rp0+/6I3z8+MFw/I8f",
+ "P/U8N31J2E0V5S92guSS6bWodOLyixIJl1RmEdBVnV+CI9vswF2zTokb27Jil7/kxo/zPFqWqhtn3l9+",
+ "WeZm+QEZKhdFbbaMKC2kl2qMqGOhwf19K9zFIOmlT06rFCjya0HLD4zrTyT5WB0cPAXSCrz+1QkPhia3",
+ "JbRsXjeKg+/au3DhVkOCjZY0KekKVHT5GmiJu4+Sd4HW1Twn2K0V8O0DWnCoZgEeH8MbYOG4dvAqLu7U",
+ "9vKJ4vEl4CfcQmxjxI3GaXHT/QpCwG+8XZ0w8t4uVXqdmLMdXZUyJO53ps4fXRkhy3uSFFtxcwhcqu0C",
+ "SLqG9BwyzPqDotTbaau7d1Y6kdWzDqZsdqwN4MQULjQPLoBUZUadUE/5tptLo0Brn0D0Hs5heyaaDLDr",
+ "JM+0cznU0EFFSg2kS0Os4bF1Y3Q33zm+MX69LH1KBMbGerI4rOnC9xk+yFbkvYNDHCOKVq7BECKojCDC",
+ "Ev8ACm6wUDPerUg/tjyjryzszRdJpvW8n7gmjRrmnNfhajCFwn4vAFPtxaUiC2rkduGyxG2+QsDFKkVX",
+ "MCAhhxbakVkBLasuDrLv3ovedGLZvdB6900UZNs4MWuOUgqYL4ZUUJnphCz4mawTAFcwI1j8xSFskaOY",
+ "VEdLWKZDZctSbqtZDIEWJ2CQvBE4PBhtjISSzZoqn8COef7+LI+SAf6F+Te7si5PAm97kMxf51R6nts9",
+ "pz3t0uVe+oRLn2UZqpYjMiaNhI8BYLHtEBwFoAxyWNmF28aeUJpcoGaDDBw/Lpc540CSmOOeKiVSZisQ",
+ "NNeMmwOMfPyIEGtMJqNHiJFxADY6t3Bg8laEZ5OvrgMkd7lM1I+NbrHgb4iHXdrQLCPyiNKwcMYHguo8",
+ "B6Au2qO+vzoxRzgMYXxKDJu7oLlhc07jawbpJf+h2NpJ9XPu1YdD4uwOW769WK61JnsV3WQ1oczkgY4L",
+ "dDsgXohNYuOuoxLvYrMw9B6NVsMo8NjBtGmWDxRZiA267PFqwfolag8sw3B4MAINf8MU0iv2G7rNLTC7",
+ "pt0tTcWoUCHJOHNeTS5D4sSYqQckmCFy+SrInLwRAB1jR1NjzCm/e5XUtnjSv8ybW23aVATwgbWx4z90",
+ "hKK7NIC/vhWmznV815VYonaKtue5neYZiJAxojdsou/u6TuVFOSASkHSEqKS85gT0Og2gDfOqe8WGC8w",
+ "mZTy7cMgnEHCiikNjTneXMzev3Tf5kmKNSyEWA6vTpdyadb3Xoj6mrJJ0tixtcx7X8GF0JAsmVQ6QV9G",
+ "dAmm0bcKlepvTdO4rNQOmLDlnFgW5w047Tlsk4zlVZxe3bzfH5tp39YsUVUL5LeME6Dpmiyw/Fg0jGrH",
+ "1DbSbueC39gFv6F3tt5xp8E0NRNLQy7tOf4k56LDeXexgwgBxoijv2uDKN3BIFH2OYZcxzLkArnJHs7M",
+ "NJztsr72DlPmx94bgGKhGL6j7EjRtQQGg52rYOgmMmIJ00H1rn7Wx8AZoGXJsk3HFmpHHdSY6bUMHr4s",
+ "QgcLuLtusD0YCOyescBiCapdAaMR8G0dtlYC6mwUZs7adSpChhBOxZSvItpHlCFtFBX34eoMaP49bH82",
+ "bXE5k6vp5Ham0xiu3Yh7cP2u3t4ontHJb01pLU/INVFOy1KKC5onzsA8RJpSXDjSxObeHn3PrC5uxjx7",
+ "ffTmnQP/ajpJc6AyqUWFwVVhu/JPsypbbGPggPgqhUbn8zK7FSWDza8rBIRG6cs1uIpwgTTaK13TOByC",
+ "o+iM1Mt4rNFek7Pzjdgl7vCRQFm7SBrznfWQtL0i9IKy3NvNPLQDcUG4uHH1j6JcIRzg1t6VwEmW3Cm7",
+ "6Z3u+OloqGsPTwrn2lGzrrBlGRURvOtCNyIkmuOQVAuKhWesVaTPnHhVoCUhUTlL4zZWvlCGOLj1nZnG",
+ "BBsPCKNmxIoNuGJ5xYKxTDM1QtHtABnMEUWmL2I0hLuFcPW0K85+q4CwDLg2nySeys5BxUo/ztrev06N",
+ "7NCfyw1sLfTN8LeRMcKiS90bD4HYLWCEnroeuMe1yuwXWlukzA+BS+IaDv9wxt6VuMNZ7+jDUbMNg1y3",
+ "PW5h+es+/zOEYUsl7q+97ZVXV/1pYI5oLW2mkqUUv0Ncz0P1OJJ14MtMMYxy+R34LJK81WUxtXWnKQne",
+ "zD643UPSTWiFagcpDFA97nzglsN6N95CTbndalvathXrFieYMD51bsdvCMbB3IvpzenlgsaKARkhw8B0",
+ "1DiAW7Z0LYjv7HHvzP7MVf6akcCXXLdlNh+vBNkkBPVzv28oMNhpR4sKjWSAVBvKBFPr/8uViAxT8UvK",
+ "bYVk088eJddbgTV+mV6XQmI2rYqb/TNIWUHzuOSQpX0Tb8ZWzNYHrhQEBWjdQLawuqUiV8TXutgb1Jws",
+ "ycE0KHHtdiNjF0yxRQ7Y4rFtsaAKOXltiKq7mOUB12uFzZ+MaL6ueCYh02tlEasEqYU6VG9q59UC9CUA",
+ "JwfY7vEL8hW67RS7gIcGi+5+nhw+foFGV/vHQewCcIXAd3GTDNnJPxw7idMx+i3tGIZxu1Fn0dxQ+3rD",
+ "MOPacZps1zFnCVs6Xrf/LBWU0xXEI0WKPTDZvribaEjr4IVntvS40lJsCdPx+UFTw58G4tgN+7NgkFQU",
+ "BdOFc+4oURh6aqrL2kn9cLaOuSsM5uHyH9FHWnoXUUeJvF+jqb3fYqtGT/ZbWkAbrVNCbQp1zproBV+u",
+ "kJz4QgxYKa0ukGZxY+YyS0cxB4MZlqSUjGtULCq9TP5G0jWVNDXsbzYEbrL4+lmkOly7ShG/HuD3jncJ",
+ "CuRFHPVygOy9DOH6kq+44ElhOEr2sMkbCU7loDM37rYb8h3uHnqsUGZGSQbJrWqRGw049a0Ij+8Y8Jak",
+ "WK/nWvR47ZXdO2VWMk4etDI79NP7N07KKISMleVpjruTOCRoyeACY/fim2TGvOVeyHzULtwG+j/W8+BF",
+ "zkAs82c5pgi8FBHt1FcsrC3pLlY9Yh0YOqbmgyGDhRtqStrV4e7f6eeNz33nk/niYcU/usD+wVuKSPYr",
+ "GNjEoHJldDuz+nvg/6bkpdiM3dTOCfEb+2+AmihKKpZnPzf5nZ3CoJLydB31Zy1Mx1+aJwzqxdn7KVrd",
+ "aE05hzw6nJUFf/EyY0Sq/acYO0/B+Mi23VqldrmdxTWAt8H0QPkJDXqZzs0EIVbbCW91QHW+EhnBeZpS",
+ "Og337Ne4DSoR/laB0rHkIfxgg7rQbmn0XVsIjwDPUFucke/sE2RrIK1KH6ilsaLKbdUIyFYgnUG9KnNB",
+ "sykx45y9PnpD7Ky2jy3EbQvxrVBJaa+iY68K6m6NCw/2NbXjqQvjx9kdS21WrTQW3lGaFmUszdS0OPMN",
+ "MJc1tOGj+hJiZ0aOreaovF5iJzH0sGSyMBpXPZqVXZAmzH+0pukaVbIWSx0m+fEVJD1VquDVlrr6el06",
+ "C8+dgdsVkbQ1JKdEGL35kin78hRcQDuztU7zdiYBn+naXp6sOLeUEpU9dpUhuAnaPXA2UMOb+aOQdRB/",
+ "TYHcFmC9bkHNU+wVrUXTrc7Ze67FZjfWVbX9i4Ip5YKzFCvBxK5m94rVGB/YiKI5XSOrP+LuhEYOV7Qm",
+ "aB0m57A4WCXUM0KHuL4RPvhqNtVSh/1T43NJa6rJCrRynA2yqS9t6+yAjCtwpdDwQbOATwrZ8isih4y6",
+ "qpPapXFNMsK0mAHF7lvz7a1T+zFe/JxxFPAd2lxourXU4SM72mgFTJOVAOXW084NVh9MnxmmyWaw+TTz",
+ "j/LgGNYtZ5ZtfdD9oY68R9p5gE3bV6atLYrS/NyKQLaTHpWlm3S48HFUHtAbPojgiGcx8a6dALn1+OFo",
+ "O8htZygJ3qeG0OACHdFQ4j3cI4y6CHCnwLwRWi1FYQtiQ7iitRAYj4DxhnFonoyKXBBp9ErAjcHzOtBP",
+ "pZJqKwKO4mlnQHP0PscYmtLO9XDboTobjCjBNfo5hrexqV88wDjqBo3gRvm2fqnKUHcgTLzCJ/IcIvvV",
+ "iFGqckJUhhkFnfrEMcZhGLevgN6+APrHoC8T2e5aUntyrnMTDSWJLqpsBTqhWRarIfkSvxL8SrIKJQfY",
+ "QFrVNfjKkqRYXaVdbqZPbW6iVHBVFTvm8g1uOV0qYnL0W5xA+ZSJZvAZQfZrWO/x63fvX786Ont9bO8L",
+ "RVRls0SNzC2hMAxxRk640mBE50oB+TVE46/Y79fOguNgBnXJI0Qb1kb3hIi5Most/hurkzdMQC5W5NrR",
+ "ij4wBDteW7xvj9QTzs3RSxRbJeMxgVff7dHRTH2z89j0v9MDmYtVG5B7rmCxixmHexRjw6/N/RYWeOgV",
+ "f7Q3YF1/AWMDhX9NBrXbOnO4zTzxxu1Vg0SfVP1axW47yfC7E1O8owcihIO6HdSKAdbJORQnnA6GtVPt",
+ "Euw0JTs55WDSkg0ysulJ9tHkqIF3KLDIxhWZz73e4wTYnjqAY+9EqI9Y6wP0vQ+HJSVlzoPfMIs+Zl3g",
+ "/LBVc9ehaza4uwgXjj5oWIwX/x8uodOUzcFroBSKNQVrY68CjAyXOsPC/kEJoP5YPlbhAlJthPrABysB",
+ "rlMQyEwWvGHypZTOgPpRR5W5Cjq7yub0SxPvYTa9zJYgO8uWdZ2NLxJzVEfaoP8fXxFZAXfPiLRj1kdH",
+ "zi6XkGp2sSeT6B9GS22yVKZej7XPgQWJRayOxPTPtF9TvW4A2pXosxOeoLTcrcEZyiM4h+0DRVrUEK0z",
+ "O/U87yY1CBADyB0SQyJCxTzZ1vDmnItM1ZSBWPCRI7Y7NNWcBgv8B3lxN5zLkyShYa7cjikvRExzHzWX",
+ "6XqtDFIMKhxKNuqX2B4WhI6xormqH2ep32EPtBpy0q/0dulqIGDeV21r9tUQQPnffJKnncW+7988QYCW",
+ "/UsqM98iqqp6LTjZcR/1MoR8eegu0Mt6ZtbE+fVzQiK1gzCaM82FYnyVDIXEtkPrwrdBMYAArwOsXY5w",
+ "LUG6p0fQhJwLBYkWPi5wFxy7UOHesbwJEtRgvT4L3GAVjfdNmRCsgEqxagZ1wRHhAo3eSg10MijmMTzn",
+ "LmS/st99EoSvgDlCI3f0muytxuEjPJnqITGk+iVxt+X+5IqbaL2Mc/sUlYpV9uAGlaH1uJQiq1J7QYcH",
+ "o7ExjK2bs4OVRBXGtL/KnuyfYxWpN0Gq2jls51b+TteUN+W82sfailB2DUFqeGe379QgENd98pVdwOpO",
+ "4PwjlerppBQiTwbMxSf9AiXdM3DO0nPIiLk7fGzUQJF/8hVaKWt/4OV66wtylCVwyB7OCDFqeVHqrXcN",
+ "tmvtdibnD/Su+Tc4a1bZmkFO35995PGwPqzmI2/J3/wwu7maAsP8bjmVHWRP+YvNQHEUSS8jT16MffE2",
+ "4qzrPkPQEJWFIial3DAXetT57uv8EdIP6vDv1n7CUglNDJa0piOUlrxBpyu8/NBYhMa9COA77AEvVIqD",
+ "NwE8N3Lg/MGBUj/USAmWMkgJreXv07P9Q801Xwq2SGFkvVmmLVxjneztfQmMKOpVbZuI47lvwsC6CIJj",
+ "rZi+6UOhKRFLzoaEY86lvKD5/ZsvsGDGEeLDPWwVX2io/4ZItqhUN4tWeENHzR3ounc3NX+H5pZ/gNmj",
+ "qA3YDeXsqPVbDL6EJJZGoznJRfMmCw5JLnFMazR+/DVZuEjrUkLKFOskoVz6api1uofFoZv3znbrl/vW",
+ "+bPQtyBjpyCIkrxtKutpgfdDA2FzRP9gpjJwcqNUHqO+HllE8BfjUWHK857r4rxlTbaVSjvRHELCHVuV",
+ "Azf2Na3K/WTuscvDdeClUynor3P0bd3CbeSibtY21iXSR+6u8mtjPBnxqoqmO7pSLEKwJClBUMmvj38l",
+ "Epb45oAgjx7hBI8eTV3TX5+0P5vj/OhRVIy7NydK62lwN2+MYn4eiv6zEW4Dgaad/ahYnu0jjFbYcPP+",
+ "BwbG/uISB/6QF0h+sfbU/lF1tduv477tbgIiJrLW1uTBVEFA8IhYYNdtFn28XUFaSaa3WM/Am9/YL9E6",
+ "Ud/VFnvn8akzYN3dp8U51BUxGvt+pfzt+p2wj70XRqZG57nGx+Beb2hR5uAOyjcPFn+Fp397lh08ffzX",
+ "xd8Onh+k8Oz5i4MD+uIZffzi6WN48rfnzw7g8fLrF4sn2ZNnTxbPnjz7+vmL9Omzx4tnX7/46wPDhwzI",
+ "FtCJz56b/G98pic5eneSnBlgG5zQktVvQBoy9i8E0BRPIhSU5ZND/9P/9CdsloqiGd7/OnHJOZO11qU6",
+ "nM8vLy9nYZf5Cg16iRZVup77efpv7707qQOsbcI37qiNnTWkgJvqSOEIv71/fXpGjt6dzBqCmRxODmYH",
+ "s8f4slYJnJZscjh5ij/h6Vnjvs8dsU0OP19NJ/M10Bz9X+aPArRkqf+kLulqBXLmnkowP108mXtRYv7Z",
+ "GTOvdn2bh1VH559bNt9sT0+sSjj/7JPtd7duZbM7W3fQYSQUu5rNF5jDM7YpqKDx8FLsq9XzzygiD/4+",
+ "d4kN8Y+oqtgzMPeOkXjLFpY+642BtdPDPSI7/9y86nxlmUQOMTeIzQegwSPQU8I0oQshMctdp2vDF3x6",
+ "LVPtR8BrIj/JDHGbXq/qF66DymKHH3pSvh2I+JGQExgybw5qa6aGF2tZQVjsqr5pWu2b++bDQfLi0+fH",
+ "08cHV38x94n78/nTq5H+zFfNA9mn9WUxsuEnzE1Fyyye3ycHB7d4/+2Ih6914yYFzwxGH+2vyqQY0t7d",
+ "VnUGIjUy9uTQdYYfeCL42TVXvNN+1Ioeijzn8pJmxKfI4NyP72/uE47eZMPXib23rqaT5/e5+hNuSJ7m",
+ "BFsGRRH6W/8TP+fikvuWRsioioLKrT/GqsUU/Lv1eJXRlUJromQXVMPkE5qrY7GUA8xFaXoD5nJqen1h",
+ "LvfFXHCT7oK5tAe6Y+by5JoH/M+/4i/s9M/GTk8tuxvPTp0oZ7Mw5/bJ2UbC6z0/soJoOigmZtJdb8l3",
+ "Oex3oHtP409uyWL+sFfy/3ufk2cHz+4Pgnbh+O9hS94KTb5Fu+6f9MyOOz67JKGOZpRlPSK37B+Ufimy",
+ "7Q4MFWpVusypiFyyYNyA3L9d+o+x9p6uP4ctsWFN3n3NRQY9eejqljzgT/vK/hce8oWHSDv90/ub/hTk",
+ "BUuBnEFRCkkly7fkJ17nvd9crcuyaMh4++j3eJrRRlKRwQp44hhWshDZ1tfybA14DtZy3hNU5p/bBfmt",
+ "FW3QLHWMv9dPpfaBXmzJyXFPgrHdupz25RabdjTGiE7YBXGnZtjlRQPK2C4yNwtZCU0sFjK3qC+M5wvj",
+ "uZXwMvrwxOSXqDbhDTndO3nqC8DESn9R3Z96jM7xhx7XO9novj4T019saD1kJPhgc8C6aP7CEr6whNux",
+ "hO8gchjx1DomESG6m1h6+wwCo4iz7rNWGF3hm1c5lUTBWDPFEY7ojBP3wSXuW0mL4srqaJQ3L/9FNuxu",
+ "9bYvLO4Li/sTea32M5q2IHJtTecctgUta/1GrSudiUtbODHKFfGtDJq7wtoY3FkHimhB/ABNsi750RU6",
+ "yLcY0coyI8ZpVoARqWpeZzr7FIwm1tqM0DzxvmIcJ0BWgbPYCvI0SINTkApuH0Tu+NocZG+tThhjsr9V",
+ "gBzN4cbBOJm2nC1uGyP12m8tf/V9I1c7bOlIFTYMvR+sUT953Pp7fkmZTpZCuhRZRF+/swaaz139sM6v",
+ "TS2M3hcs8BH8GAR2xH+d1++bRD92I2ZiX13EiG/UhMSFIWa4wXVw2YdPZp+wPLbb+yZi6nA+x7yytVB6",
+ "Prmafu5EU4UfP9Vb87m+lt0WXX26+v8BAAD///6xMH4pwwAA",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/private/types.go b/daemon/algod/api/server/v2/generated/private/types.go
index 70a6da158..39c96a0c2 100644
--- a/daemon/algod/api/server/v2/generated/private/types.go
+++ b/daemon/algod/api/server/v2/generated/private/types.go
@@ -87,6 +87,12 @@ type Account struct {
// The count of all assets that have been opted in, equivalent to the count of AssetHolding objects held by this account.
TotalAssetsOptedIn uint64 `json:"total-assets-opted-in"`
+ // \[tbxb\] The total number of bytes used by this account's app's box keys and values.
+ TotalBoxBytes *uint64 `json:"total-box-bytes,omitempty"`
+
+ // \[tbx\] The number of existing boxes created by this account's app.
+ TotalBoxes *uint64 `json:"total-boxes,omitempty"`
+
// The count of all apps (AppParams objects) created by this account.
TotalCreatedApps uint64 `json:"total-created-apps"`
@@ -259,6 +265,23 @@ type AssetParams struct {
UrlB64 *[]byte `json:"url-b64,omitempty"`
}
+// Box defines model for Box.
+type Box struct {
+
+ // \[name\] box name, base64 encoded
+ Name []byte `json:"name"`
+
+ // \[value\] box value, base64 encoded.
+ Value []byte `json:"value"`
+}
+
+// BoxDescriptor defines model for BoxDescriptor.
+type BoxDescriptor struct {
+
+ // Base64 encoded box name
+ Name []byte `json:"name"`
+}
+
// BuildVersion defines model for BuildVersion.
type BuildVersion struct {
Branch string `json:"branch"`
@@ -643,6 +666,14 @@ type BlockResponse struct {
Cert *map[string]interface{} `json:"cert,omitempty"`
}
+// BoxResponse defines model for BoxResponse.
+type BoxResponse Box
+
+// BoxesResponse defines model for BoxesResponse.
+type BoxesResponse struct {
+ Boxes []BoxDescriptor `json:"boxes"`
+}
+
// CatchpointAbortResponse defines model for CatchpointAbortResponse.
type CatchpointAbortResponse struct {
diff --git a/daemon/algod/api/server/v2/generated/routes.go b/daemon/algod/api/server/v2/generated/routes.go
index 7a5720857..54a0802ea 100644
--- a/daemon/algod/api/server/v2/generated/routes.go
+++ b/daemon/algod/api/server/v2/generated/routes.go
@@ -32,6 +32,12 @@ type ServerInterface interface {
// Get application information.
// (GET /v2/applications/{application-id})
GetApplicationByID(ctx echo.Context, applicationId uint64) error
+ // Get box information for a given application.
+ // (GET /v2/applications/{application-id}/box)
+ GetApplicationBoxByName(ctx echo.Context, applicationId uint64, params GetApplicationBoxByNameParams) error
+ // Get all box names for a given application.
+ // (GET /v2/applications/{application-id}/boxes)
+ GetApplicationBoxes(ctx echo.Context, applicationId uint64, params GetApplicationBoxesParams) error
// Get asset information.
// (GET /v2/assets/{asset-id})
GetAssetByID(ctx echo.Context, assetId uint64) error
@@ -327,6 +333,94 @@ func (w *ServerInterfaceWrapper) GetApplicationByID(ctx echo.Context) error {
return err
}
+// GetApplicationBoxByName converts echo context to params.
+func (w *ServerInterfaceWrapper) GetApplicationBoxByName(ctx echo.Context) error {
+
+ validQueryParams := map[string]bool{
+ "pretty": true,
+ "name": true,
+ }
+
+ // Check for unknown query parameters.
+ for name, _ := range ctx.QueryParams() {
+ if _, ok := validQueryParams[name]; !ok {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
+ }
+ }
+
+ var err error
+ // ------------- Path parameter "application-id" -------------
+ var applicationId uint64
+
+ err = runtime.BindStyledParameter("simple", false, "application-id", ctx.Param("application-id"), &applicationId)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter application-id: %s", err))
+ }
+
+ ctx.Set("api_key.Scopes", []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params GetApplicationBoxByNameParams
+ // ------------- Required query parameter "name" -------------
+ if paramValue := ctx.QueryParam("name"); paramValue != "" {
+
+ } else {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Query argument name is required, but not found"))
+ }
+
+ err = runtime.BindQueryParameter("form", true, true, "name", ctx.QueryParams(), &params.Name)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter name: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetApplicationBoxByName(ctx, applicationId, params)
+ return err
+}
+
+// GetApplicationBoxes converts echo context to params.
+func (w *ServerInterfaceWrapper) GetApplicationBoxes(ctx echo.Context) error {
+
+ validQueryParams := map[string]bool{
+ "pretty": true,
+ "max": true,
+ }
+
+ // Check for unknown query parameters.
+ for name, _ := range ctx.QueryParams() {
+ if _, ok := validQueryParams[name]; !ok {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
+ }
+ }
+
+ var err error
+ // ------------- Path parameter "application-id" -------------
+ var applicationId uint64
+
+ err = runtime.BindStyledParameter("simple", false, "application-id", ctx.Param("application-id"), &applicationId)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter application-id: %s", err))
+ }
+
+ ctx.Set("api_key.Scopes", []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params GetApplicationBoxesParams
+ // ------------- Optional query parameter "max" -------------
+ if paramValue := ctx.QueryParam("max"); paramValue != "" {
+
+ }
+
+ err = runtime.BindQueryParameter("form", true, false, "max", ctx.QueryParams(), &params.Max)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter max: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetApplicationBoxes(ctx, applicationId, params)
+ return err
+}
+
// GetAssetByID converts echo context to params.
func (w *ServerInterfaceWrapper) GetAssetByID(ctx echo.Context) error {
@@ -868,6 +962,8 @@ func RegisterHandlers(router interface {
router.GET("/v2/accounts/:address/assets/:asset-id", wrapper.AccountAssetInformation, m...)
router.GET("/v2/accounts/:address/transactions/pending", wrapper.GetPendingTransactionsByAddress, m...)
router.GET("/v2/applications/:application-id", wrapper.GetApplicationByID, m...)
+ router.GET("/v2/applications/:application-id/box", wrapper.GetApplicationBoxByName, m...)
+ router.GET("/v2/applications/:application-id/boxes", wrapper.GetApplicationBoxes, m...)
router.GET("/v2/assets/:asset-id", wrapper.GetAssetByID, m...)
router.GET("/v2/blocks/:round", wrapper.GetBlock, m...)
router.GET("/v2/blocks/:round/hash", wrapper.GetBlockHash, m...)
@@ -890,217 +986,226 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9aXPctrLoX8Gbe6u83OFI3nKPVZW6T7GdHL1jOy5bJ3eJ/GIM2TODIxLgAUBpJn7+",
- "76/QAEiQBGeoxbKd6JOtIZZGo9HoHR8nqShKwYFrNTn4OCmppAVokPgXTVNRcZ2wzPyVgUolKzUTfHLg",
- "vxGlJePLyXTCzK8l1avJdMJpAU0b0386kfDPiknIJgdaVjCdqHQFBTUD601pWtcjrZOlSNwQh3aIo+eT",
- "T1s+0CyToFQfyp95viGMp3mVAdGSckVT80mRc6ZXRK+YIq4zYZwIDkQsiF61GpMFgzxTM7/If1YgN8Eq",
- "3eTDS/rUgJhIkUMfzmeimDMOHiqogao3hGhBMlhgoxXVxMxgYPUNtSAKqExXZCHkDlAtECG8wKticvDr",
- "RAHPQOJupcDO8L8LCfA7JJrKJejJ+2lscQsNMtGsiCztyGFfgqpyrQi2xTUu2RlwYnrNyKtKaTIHQjl5",
- "++Mz8ujRo6dmIQXVGjJHZIOramYP12S7Tw4mGdXgP/dpjeZLISnPkrr92x+f4fzv3ALHtqJKQfywHJov",
- "5Oj50AJ8xwgJMa5hifvQon7TI3Iomp/nsBASRu6JbXytmxLO/0V3JaU6XZWCcR3ZF4Jfif0c5WFB9208",
- "rAag1b40mJJm0F/3k6fvPz6YPtj/9C+/Hib/4/588ujTyOU/q8fdgYFow7SSEni6SZYSKJ6WFeV9fLx1",
- "9KBWosozsqJnuPm0QFbv+hLT17LOM5pXhk5YKsVhvhSKUEdGGSxolWviJyYVzw2bMqM5aidMkVKKM5ZB",
- "NjXc93zF0hVJqbJDYDtyzvLc0GClIBuitfjqthymTyFKDFyXwgcu6OtFRrOuHZiANXKDJM2FgkSLHdeT",
- "v3Eoz0h4oTR3lbrYZUWOV0BwcvPBXraIO25oOs83ROO+ZoQqQom/mqaELchGVOQcNydnp9jfrcZgrSAG",
- "abg5rXvUHN4h9PWQEUHeXIgcKEfk+XPXRxlfsGUlQZHzFeiVu/MkqFJwBUTM/wGpNtv+f979/JoISV6B",
- "UnQJb2h6SoCnIhveYzdp7Ab/hxJmwwu1LGl6Gr+uc1awCMiv6JoVVUF4VcxBmv3y94MWRIKuJB8CyI64",
- "g84Kuu5PeiwrnuLmNtO2BDVDSkyVOd3MyNGCFHT9/f7UgaMIzXNSAs8YXxK95oNCmpl7N3iJFBXPRsgw",
- "2mxYcGuqElK2YJCRepQtkLhpdsHD+MXgaSSrABw/yCA49Sw7wOGwjtCMObrmCynpEgKSmZG/O86FX7U4",
- "BV4zODLf4KdSwhkTlao7DcCIU28Xr7nQkJQSFixCY+8cOgz3sG0cey2cgJMKrinjkBnOi0ALDZYTDcIU",
- "TLhdmelf0XOq4LvHQxd483Xk7i9Ed9e37vio3cZGiT2SkXvRfHUHNi42tfqPUP7CuRVbJvbn3kay5bG5",
- "ShYsx2vmH2b/PBoqhUyghQh/8Si25FRXEg5O+H3zF0nIO015RmVmfinsT6+qXLN3bGl+yu1PL8WSpe/Y",
- "cgCZNaxRbQq7FfYfM16cHet1VGl4KcRpVYYLSlta6XxDjp4PbbId86KEeVirsqFWcbz2msZFe+h1vZED",
- "QA7irqSm4SlsJBhoabrAf9YLpCe6kL+bf8oyN711uYih1tCxu2/RNuBsBodlmbOUGiS+dZ/NV8MEwGoJ",
- "tGmxhxfqwccAxFKKEqRmdlBalkkuUponSlONI/2rhMXkYPIve41xZc92V3vB5C9Nr3fYycijVsZJaFle",
- "YIw3Rq5RW5iFYdD4CdmEZXsoETFuN9GQEjMsOIczyvWs0Uda/KA+wL+6mRp8W1HG4rujXw0inNiGc1BW",
- "vLUN7ygSoJ4gWgmiFaXNZS7m9Q93D8uywSB+PyxLiw8UDYGh1AVrprS6h8unzUkK5zl6PiM/hWOjnC14",
- "vjGXgxU1zN2wcLeWu8Vqw5FbQzPiHUVwO4Wcma3xaDAy/HVQHOoMK5EbqWcnrZjGf3VtQzIzv4/q/G2Q",
- "WIjbYeJCLcphziow+EugudztUE6fcJwtZ0YOu30vRzZmlDjBXIpWtu6nHXcLHmsUnktaWgDdF3uXMo4a",
- "mG1kYb0iNx3J6KIwB2c4oDWE6tJnbed5iEKCpNCB4YdcpKd/pWp1DWd+7sfqHz+chqyAZiDJiqrVbBKT",
- "MsLj1Yw25oiZhqi9k3kw1axe4nUtb8fSMqppsDQHb1wssajHfsj0QEZ0l5/xPzQn5rM524b122Fn5BgZ",
- "mLLH2XkQMqPKWwXBzmQaoIlBkMJq78Ro3ReC8lkzeXyfRu3RC2swcDvkFmGW3pgDD+dCXu5IdGidk8bI",
- "SagZNeAI087OYtOqTBx+IoYS26AzUONX2k7J3eFjuGph4Z2mnwELyox6HVhoD3TdWBBFyXK4hvO6inIi",
- "o7k+ekje/fXwyYOHvz188p3hGqUUS0kLMt9oUOSuUxiI0psc7vVXhiJ7lev46N899qax9rixcZSoZAoF",
- "LftDWZObvZdtM2La9bHWRjOuugZwzLE8BsNeLNqJtSYb0J4zZa79Yn4tmzGEsKyZJSMOkgx2EtNFl9dM",
- "swmXKDeyug79CqQUMmL0wSOmRSry5AykYiJiv3/jWhDXwstcZfd3Cy05p4qYudEeWXG85SKUpdccQWMa",
- "CrVLZrBDH695gxs3IJWSbnrot+uNrM7NO2Zf2sj35i1FSpCJXnOSwbxatsTzhRQFoSTDjnhxvGTLlQ7u",
- "0TdSiMW1S1TRWWJLwg9WCslNn74s8lpkYHTBSl0De28Ga7BnKCfEGZ2LShNKuMgAFcdKxRn/gPcR3R7o",
- "rdHhXaJXVrCYg1FSUlqZ1VYlQV9EjxabjglNLRUliBo1YKytrey2lZ3OerZyCTQzygtwIubOIupstbhI",
- "io4U7Vmnu3Yi6lwLrlKKFJQySqdVJXaC5ttZstRb8ISAI8D1LEQJsqDyksBqoWm+A1BsEwO3lhOdGbkP",
- "9bjpt21gd/JwG6k0eqelAiOUmgOXg4YhFI7EyRlINKd+1v3zk1x2+6pyINjBiVbHrED1lVMuFKSCZyo6",
- "WE6VTnYdW9OoJf+ZFQQnJXZSceABE8pLqrQ1qjOeoS5g2Q3OY20rZophgAevQDPyL/7264+dGj7JVaXq",
- "q1BVZSmkhiy2Bg7rLXO9hnU9l1gEY9f3rRakUrBr5CEsBeM7ZNmVWARRXduenNepvzi00Jh7YBNFZQuI",
- "BhHbAHnnWwXYDR2+A4AYxbHuiYTDVIdyai/zdKK0KEtz/nRS8brfEJre2daH+u9N2z5xUd3w9UyAmV17",
- "mBzk5xaz1tW/okZox5FJQU/N3YQiuLX+92E2hzFRjKeQbKN8cyzfmVbhEdhxSAe0HxdMFMzWORwd+o0S",
- "3SAR7NiFoQUPqGJvqNQsZSVKEn+DzbULVt0JolYrkoGmzKgHwQcrZJVhf2LdOd0xLydojZKa++D3xObI",
- "cnKm8MJoA38KGzRfv7FxAsdBdME1SIqRUc3pppwgoN77aC7ksAmsaarzjbnm9Ao25BwkEFXNC6a1Dfxo",
- "C5JalEk4QNQisWVGZxOyPna/A2OMVO9wqGB5/a2YTqzYsh2+447g0kKHE5hKIfIR7oEeMqIQjHIfkFKY",
- "XWcuzsgHo3hKagHphBg0CNbM845qoRlXQP5bVCSlHAWwSkN9IwiJbBavXzODucDqOZ2joMEQ5FCAlSvx",
- "y/373YXfv+/2nCmygHMfnGcadtFx/z5qSW+E0q3DdQ0qujluRxHejqYac1E4Ga7LU3Ybqt3IY3byTWfw",
- "2r5jzpRSjnDN8q/MADoncz1m7SGNjDPS47ijrDDB0LF1476jl/Tz6PDN0DHo+hMHvqXm45B7ychX+eYa",
- "+LQdiEgoJSg8VaFeouxXsQjjN92xUxuloeir9rbrbwOCzVsvFvSkTMFzxiEpBIdNNGWBcXiFH2O97cke",
- "6Iw8dqhvV2xqwd8Bqz3PGCq8Kn5xtwNSflP7Va9h87vjdqw6YeQqaqWQl4SSNGeoswqutKxSfcIpSsXB",
- "WY6Y+r2sP6wnPfNN4opZRG9yQ51wqgwOa1k5ap5cQEQL/hHAq0uqWi5B6Y58sAA44a4V46TiTONchdmv",
- "xG5YCRLt7TPbsqAbsqA5qnW/gxRkXun2jYkBdkobrcuamMw0RCxOONUkB6OBvmL8eI3D+Tg2TzMc9LmQ",
- "pzUWZtHzsAQOiqkk7pL4yX5FF6Zb/sq5MzHbwX62RhQzfhOFt9HQiuD/v3f/4+DXw+R/aPL7fvL03/be",
- "f3z86d793o8PP33//f9r//To0/f3/uNfYzvlYY+FfznIj547afLoOYoMjXGpB/uNWRwKxpMokR2vgBSM",
- "YxRxh7bIXSP4eAK615ip3K6fcL3mhpDOaM4yqi9HDl0W1zuL9nR0qKa1ER0F0q/1fcyluxRJSdNT9OhN",
- "lkyvqvksFcWel6L3lqKWqPcyCoXg+C3boyXbUyWke2cPdlzpV+BXJMKuOkz20gJB3x8YD9lEk6WLwsST",
- "t6i4JYpKOSMlRiR5v4xYTOuwXJuOd0AwZnNFvVPR/fnwyXeTaRNrWX83mrr9+j5yJli2jkXUZrCOSWru",
- "qOERu6NISTcKdJwPIexRF5T1W4TDFmBEfLVi5c3zHKXZPM4rfZyH0/jW/IjbAAxzEtE8u3FWH7G4ebi1",
- "BMig1KtYmk5L5sBWzW4CdFwqpRRnwKeEzWDW1biyJSjvDMuBLjBdBE2MYkzcWn0OLKF5qgiwHi5klFoT",
- "ox8Ukx3f/zSdODFCXbtk7waOwdWds7bF+r+1IHd+enFM9hzrVXdscLcdOgjHjVgyXMRZy9lmuJlNTrTR",
- "7Sf8hD+HBePMfD844RnVdG9OFUvVXqVA/kBzylOYLQU58EFsz6mmJ7wnsw3mDwfhg6Ss5jlLyWkoWzfk",
- "aXPC+iOcnPxqOP7Jyfue56YvCbupovzFTpCcM70SlU5c0ksi4ZzKLAK6qpMecGSbsrZt1ilxY1tW7JJq",
- "3PhxnkfLUnWDn/vLL8vcLD8gQ+VCe82WEaWF9FKNEXUsNLi/r4W7GCQ99xlTlQJFPhS0/JVx/Z4kJ9X+",
- "/iMgrWjgD054MDS5KaFl87pUcHbX3oULtxoSrLWkSUmXoKLL10BL3H2UvAu0ruY5wW6tKGQf0IJDNQvw",
- "+BjeAAvHhSMqcXHvbC+fvRxfAn7CLcQ2RtxonBaX3a8gLvnS29WJbe7tUqVXiTnb0VUpQ+J+Z+qkxqUR",
- "srwnSbElN4fA5X/OgaQrSE8hw1Q0KEq9mba6e2elE1k962DKpmzaqELMK0Lz4BxIVWbUCfWUb7oJHgq0",
- "9lktb+EUNseiSUu6SEZHO8FADR1UpNRAujTEGh5bN0Z3853jG4Oqy9LH6WPApieLg5oufJ/hg2xF3ms4",
- "xDGiaAXADyGCyggiLPEPoOASCzXjXYn0Y8sz+src3nyRDE/P+4lr0qhhznkdrgbj+u33AjD/W5wrMqdG",
- "bhcuddkG0QdcrFJ0CQMScmihHRmq3rLq4iC77r3oTScW3Qutd99EQbaNE7PmKKWA+WJIBZWZTsiCn8k6",
- "AXAFM4IVSRzC5jmKSXW0hGU6VLYs5bbEwhBocQIGyRuBw4PRxkgo2ayo8lnVmHzuz/IoGeAzJoVsSwU8",
- "CrztQYZ5nejneW73nPa0S5cQ6LMAfepfqFqOSOMzEj4GgMW2Q3AUgDLIYWkXbht7QmkSVJoNMnD8vFjk",
- "jANJYo57qpRImU2Lb64ZNwcY+fg+IdaYTEaPECPjAGx0buHA5LUIzyZfXgRI7hJsqB8b3WLB3xAPu7Sh",
- "WUbkEaVh4YwPBNV5DkBdtEd9f3VijnAYwviUGDZ3RnPD5pzG1wzSy0hDsbWTf+bcq/eGxNkttnx7sVxo",
- "TfYqusxqQpnJAx0X6LZAvF2UiG2BQnw5W1aNq6G7dMzUA9f3EK7uBrlslwKgo+k3VZ+c5rdTQ2vfzf2b",
- "rGHp0yZH20eVxmh/iH6iuzSAv74Jos4+e9O9rqNKetvt2k68C+SnGCs2Z6Tv6+h7VBTkgBJx0pIgktOY",
- "B8wI9oDs9p3vFmjumN5H+eZe4MuXsGRKQ2OLNreSd67ctG2OYlUBIRbDq9OlXJj1vRWi5tE2bRU7tpZ5",
- "4ys4ExqSBZNKJ2jIjy7BNPpRoUb5o2kaFxTa0QK2wA7L4rwBpz2FTZKxvIrTq5v3b8/NtK9rI4yq5qew",
- "QXEQaLoicywIFY0h2jK1DTPbuuCXdsEv6bWtd9xpME3NxNKQS3uOb+RcdDjvNnYQIcAYcfR3bRClWxgk",
- "XvzPIdex9LBAaLCHMzMNZ9tMj73DlPmxd0ZfWCiG7yg7UnQtgba8dRUMfSRG3WM6qKfUT3kYOAO0LFm2",
- "7hgC7aiD6iK9kLbvE9U7WMDddYPtwEBg9ItF1UpQ7ZoEjXRrK2PxcG2zUZg5blcOCBlCOBVTvq5jH1GG",
- "tLH42C5cHQPN/wabX0xbXM7k03RyNbthDNduxB24flNvbxTP6OG2dqSWG+CCKKdlKcUZzRNnXR0iTSnO",
- "HGlic2+MvWFWF7fhHb84fPnGgf9pOklzoDKpRYXBVWG78ptZlS1/MHBAfN04o/B4md2KksHm1znboUX2",
- "fAWuRlcgjfaKiTTW9uAoOgvtIh5os9Pe6hwDdolbHARQ1v6BxnZl3QNtlwA9oyz3RiMP7UBQDC5uXEWa",
- "KFcIB7iyayHwECXXym56pzt+Ohrq2sGTwrm2VBErbKE8RQTv+o+NCIm2KCTVgmIpEGsS6DMnXhWJOX6J",
- "ylkaNzDyuTLEwa3jyDQm2HhAGDUjVmzAD8krFoxlmqkRim4HyGCOKDJ9WZkh3M2Fq3BccfbPCgjLgGvz",
- "SeKp7BxUrL3iTM3969TIDv253MDWPN0MfxUZIyyD073xEIjtAkbopuqB+7xWmf1Ca3OM+SGwx1/A2x3O",
- "2LsSt3iqHX04arYxgKu2uyksSNznf4YwbPG63dWQvfLq6vEMzBGtbsxUspDid4jreageR0LufeEfhiEe",
- "vwOfRTKXuiymtu40RZqb2Qe3e0i6Ca1QbQ/9ANXjzgc+KaxA4s2zlNuttsVGW4FecYIJgzP37PgNwTiY",
- "ewGtOT2f01h5FiNkGJgOG+9ny5CsBfGdPe6dzZu5WkwzEjhS67bMJqOVIJtsmH7i8yUFBjvtaFGhkQyQ",
- "akOZYGqdX7kSkWEqfk65rVlr+tmj5HorsMYv0+tcSEwlVXGbdwYpK2gelxwyxH479TZjS2YrtlYKgpKg",
- "biBb6tpSkSurav3LDWqOFmR/GhQddruRsTOm2DwHbPHAtphThZy8NkTVXczygOuVwuYPRzRfVTyTkOmV",
- "sohVgtRCHao3tedmDvocgJN9bPfgKbmLPivFzuCewaK7nycHD56i0dX+sR+7AFxp5m3cJEN28p+OncTp",
- "GJ12dgzDuN2os2hipK2nP8y4tpwm23XMWcKWjtftPksF5XQJ8TCJYgdMti/uJhrSOnjhmS0GrbQUG8J0",
- "fH7Q1PCngSBuw/4sGCQVRcF04TwbShSGnpp6n3ZSP5ytLO1KNXm4/Ed0EJbeP9JRIm/WaGrvt9iq0Y37",
- "mhbQRuuUUJs/nLPGde8LyJEjX4UAa1fVJassbsxcZuko5qAnf0FKybhGxaLSi+QvJF1RSVPD/mZD4Cbz",
- "7x5H6nW1S/TwiwF+43iXoECexVEvB8jeyxCuL7nLBU8Kw1Gye03SRHAqBz2Z8Wgxz9G7wYLbhx4rlJlR",
- "kkFyq1rkRgNOfSXC41sGvCIp1uu5ED1eeGU3TpmVjJMHrcwO/f3tSydlFELGatI0x91JHBK0ZHCGgWvx",
- "TTJjXnEvZD5qF64C/Zf1PHiRMxDL/FmOKQI/VCzPfmmSwDolDyXl6Spq95+bjr81xbfrJdtzHC2BsqKc",
- "Qx4dzt6Zv/m7NXL7/0OMnadgfGTbbilDu9zO4hrA22B6oPyEBr1M52aCEKvtrJg66jJfiozgPE29jYbK",
- "+tUZg3Jl/6xA6ViGAX6wkR9o3zF6ga2WRYBnKFXPyE/28ZwVkFY5AJRmWVHlNrUcsiVIZ3isylzQbErM",
- "OMcvDl8SO6vtY0vI2mpdSxTm2qvo6PVBcZ5xMYS+Gmw8vnn8ONsDLs2qlcbqHErToozlopkWx74BJryF",
- "tk4U80LszMhzK2ErL7/ZSQw9LJgsjGRaj2Z5PNKE+Y/WNF2h6NriJsMkP77MnKdKFbw3UNcNruvr4Lkz",
- "cLtKc7bQ3JQIo1+cM2XfTIEzaKe/1bmgTnXy6XDt5cmKc0spUR69LVf5Mmj3wFmHtjeHRiHrIP6Cgout",
- "0njRqnvvsFe0YEW3hF/voQGbAlXXg/VvYaWUC85SLBcRvNJSg+zeXxnjKxhRWaNrjPJH3J3QyOGKFg6s",
- "w4kcFgdLCXpG6BDXN1YGX82mWuqwf2p86GNFNVmCVo6zQTb19S+dvYRxBa5eEj7FE/BJIVv+F+SQUZde",
- "Upt+L0hGGDs/IAD/aL69duoRBpWeMo6CkEObi1+1Fg18HkIb6YlpshSg3HraCYTqV9Nnhrl0Gazfz/xz",
- "EjiGdV+YZVtfXX+oQ++5c54y0/aZaWsrJzQ/t8IU7aSHZekmHa6OGpUH9JoPIjjigUm8CTxAbj1+ONoW",
- "ctvqcsf71BAanKHDDkq8h3uEUVcK7ZRGPqN5ZSkKWxAb6hJNmGY8AsZLxqF57CRyQaTRKwE3Bs/rQD+V",
- "SqqtCDiKpx0DzdFLF2NoSjsT7VWH6mwwogTX6OcY3samyOkA46gbNIIb5Zv6jRVD3YEw8Qwfd3KI7Jcs",
- "RanKCVEZhh13ipjGGIdh3L5McvsC6B+Dvkxku2tJ7cm5yE00lEk2r7Il6IRmWazQ3A/4leBXklUoOcAa",
- "0qou1FWWJMUSDO2aFH1qcxOlgquq2DKXb3DF6VIRk6Nf4wTKx1U3g88Isl/Dep+/ePP2xbPD4xfP7X1h",
- "1HKbSmZkbgmFYYhGj1UajOhcKSAfQjR+wH4fOguOgxkUL44QbVhA2RMiBtTPN/hvrJjWMAE5n/qFo7q8",
- "Ax07Xli8b4/UE87N0UsUWybjMYFX39XR0Ux9ufPY9L/WA5mLZRuQG05z38aMwz2KseEX5n4Ls8B7FeLs",
- "DVgnaWMMlfDvIKB2W6cXtpkn3ri9knFou69L2m+3ngwXp5/iHT0QSRkk91MrBlhn0FA8ZToY/ku1y8LR",
- "lGzllFhRPjaCDcawleztc59RQ9hQAIaNvzCfe73HCbA9dQDH3opQH9nTB+hvPmyQlJQ5T2fDLPqYdQHG",
- "/ZDvMaGHzQZ3F+HCdnGQ2EriFcKH62w0tTXwGiiFYk1Vy1jp8JFhJcdY/TuoE9Ify/t0zyDVRqgPfFUS",
- "4CJVQ8xkwUMHt/U2BtSPOvrGldnYVlujX790B7PpZQAEWSy29uNsfCWJwzoiAf2k+NTAErh7a6Ad2zs6",
- "wnCxgFSzsx0ZF/9ptNQmmn/q9Vj7kE2QgMHqiDX/wPAF1esGoG0JEVvhCepPXRmcoXjrU9jcUaRFDdFi",
- "lFPP8y6TqIwYQO6QGBIRKubxs4Y354RhqqYMxIL3sNvu0JR8GawCHuQPXXIuT5KEhjlFW6Y8EzHNfdRc",
- "puuFMu0w+GooKaNfh3dYEHqOZY9V/YJD/YJwoNWQo345qHOXKI35MbWt2adMg/K/+WQ4O4t9mbqpU46W",
- "/XMqM98iqqp6LTjZch/1Mil8Ddku0It6ZtbEQ/Vj5yMFRjDqLc2FYnyZDIUOtkOQwlft0NGK1wEWOEa4",
- "FiDd+wTaP/ydaOHjp7bBsQ0V7gW2yyBBDRb1ssANptq/bWoJYJlEap99d07kcIFGb6UGOhlk/A/PuQ3Z",
- "z+x3Hyzuy+SN0MgdvSY7U/Z9JBxTPSSGVL8g7rbcHYR+Ga2XcW7fq1Gx9H9uUBlaj0spsiq1F3R4MBob",
- "w9jiGltYSVRhTPur7Mn+OZaaeRmk9JzCZs/K3+mK8qbmT/tYWxHKriFIoe3s9rUaBOK6T760C1heC5xf",
- "UqmeTkoh8mTAXHzUr2LQPQOnLD2FjJi7w8eQDFQCJ3fRSln7A89XG5+1X5bAIbs3I8So5UWpN9412C7I",
- "2Zmc39Hb5l/jrFllC4s4fX92wuPhT1jyQ16Rv/lhtnM1BYb5XXEqO8iOMgHrgQoKkp5H6uKPfasx4qzr",
- "1ipviMpCEZNSLpkzOup893X+COkHxbq3az9hSrnP+kyFtKYjlJa8QacrvLxqLELjyob7DjvAC5XioHC4",
- "50YOnC8cI/SqRkqwlEFKaC1/l57tnxit+VKwRQojkM0ylS0YJvpCZWBEUc9q20Qcz30TBuaPC441Nfqm",
- "D4WmRKxLGRKOOZfyjOY3b77AwgKHiA/3+k18oaH+GyLZolJdLlrhJR01d6DrXt/U/A2aW/4TzB5FbcBu",
- "KGdHrQu2+zpzWD+J5iQXzcMNOCQ5xzGt0fjBd2TuIlJLCSlTrBOsf+5L5tXqHlaQbR5F2q5f7lrnL0Jf",
- "gYydgiBK8ropv6UF3g8NhM0R/cJMZeDkRqk8Rn09sojgL8ajwtTQHdfFacuabMsZdqI5hIRrtioHbuwL",
- "WpX7Sa9jl4frwEunUtBf5+jbuoXbyEXdrG2sS6SP3GFPhp6P8WTES6+Z7uhKsQjBuoUEQSUfHnwgEhZY",
- "mFyQ+/dxgvv3p67ph4ftz+Y4378fFeNuzInSej/YzRujmF+Gov9shNtAoGlnPyqWZ7sIoxU23DwSgIGx",
- "v7kA6y/yTMFv1p7aP6quwPNF3LfdTUDERNbamjyYKggIHhEL7LrNoi88K0gryfQG8769+Y39Fq2n81Nt",
- "sXcenzpT0N19WpxCXTmgse9Xyt+uPwn7InRhZGp0nmt8MerFmhZlDu6gfH9n/u/w6C+Ps/1HD/59/pf9",
- "J/spPH7ydH+fPn1MHzx99AAe/uXJ4314sPju6fxh9vDxw/njh4+/e/I0ffT4wfzxd0///Y7hQwZkC+jE",
- "ZxlN/gvf8kgO3xwlxwbYBie0ZPVDcYaMfRlxmuJJhIKyfHLgf/rf/oTNUlE0w/tfJy6JYbLSulQHe3vn",
- "5+ezsMveEg16iRZVutrz8/Qf6HpzVAdY28RY3FEbO2tIATfVkcIhfnv74t0xOXxzNGsIZnIw2Z/tzx7g",
- "8zslcFqyycHkEf6Ep2eF+77niG1y8PHTdLK3Apqj/8v8UYCWLPWf1DldLkHOXD1189PZwz0vSux9dMbM",
- "T2bUZSwj3oaKB/HB/TLjzjGC8TY2FLxVtlO5KpLTupirszXwDCN4rX3QsLYaWUdZU7jtqGFUPn3d1vM5",
- "+DXyXs2CLSvZedqyjiJwlZ6ZIvZhdUmcSvOGpqdhlCwS5D8rkJuGYBwrCwvR+MKbLpa2UMuyHXjWiEmx",
- "R/Bi9dpxZrPPAaXWfoWGE2lZQQhJw1cNr9xPnr7/+OQvnyYjAEEnlwJMU/xA8/yDfXsU1ugp8In+LpFz",
- "GikyieLxtLFTY4dmm6YYOVd/DeuI123a8dofuODwYWgbHGDRfaB5bhoKDrE9eI+JdEgJeIge7u9f2wME",
- "dYqCjb+rR/EkcYmB+hzGfoq8TubfIRh4muzxNS60Hblz5eV2h+st+geaYW1nUNou5cE3u5Qjjn5mw/GJ",
- "vdE+TSdPvuG9OeKG59CcYMsgS71/i/ydn3Jxzn1LI81URUHlBmWVoAB9KJV+Gryt9sJiuXsfW17I7Ep3",
- "Wa9O+NHzHdfbHTXEFPvlmzq1eM33utos+qlcwWFYM6XVvRn5KeyNjBmzIW2uYSV58xpnKcUZywyLdQEc",
- "vmhEA9sdFSaKRi/bwLR7e+9+1nv3sG11aNX/iQHTIvGtMPXCFK568fWj0ztPqVzqqZKg6u8laid+1nru",
- "HaVv8IHsEQz2FndDj4sPiDcBvLWk067W/Pn5rtXfgmuidR98Rq78jQtrr2hu6CRYbidTzhbFuhXi/jRC",
- "XB25Zl9EwzqQ28Q6LPa+99HXMLsGUc7VcBshxIWabtA3qLF1t8Mp7s1sQbKwzeXYgYtC2ymeYWW5W8Hs",
- "cwtm/ZKMMTCaQntfThhDGFZNzcaLPEPWemLhQrUlv1Hp60+MrEFxy0C6W9C6BG/sCVGOE382nvmHFJ4c",
- "0m7Fpj+12GQDv7cITq16qS5LYFh2Au0Sw2yCcySrQGFwsh19SpSQLla2lExIpjdTwjjJwJw99BgKiaVh",
- "tKx4ag39dgrg+N9Xh/+FeQqvDv+LfE/2p7UIhpnzkeltJGhbBvoJdD/gWf2wOazFga2y0FcjYBzXSApS",
- "EULUa+FLniLSCrr+fghla+tXjIlnBV1Ptkoi029HWryq0NRJwexTERb+4QSd/v55vnb8rSKwpqnON4Ti",
- "/bOxiSKqmjf1StvihhZlEg4QjXHbMqN//SuWrX7REOBIYSF8ZWs7fMed2o4tdLgcUnxqb7dg0kNGFILL",
- "SXm3u/vN7m5fLCWlMGeaYeGq5j7xd1ULyOYNKAfuQHbDjPy3qDDYxT5xCrGi6zgDZoL4OZ0AGmQB5/jA",
- "bI2d+/e7C79/3+05U2QB58hBKceGXXTcv/8HEFnXda1rSrjgCccXOM+ABBFyt3LrVy23Ptl/9M2u5h3I",
- "M5YCOYaiFJJKlm/I33ldHPBqYnnNcyoelGvcyn96aVWNFB2I71fyXXd900w3kmEryzYwIdQPJTtdedq8",
- "tGR0eSzq5gvlqKl3nWDgn/Wq2P2Y9hwrs5iQHnhwftgcPR8jl38jjtDRxUUj91p8bz73DRCNp3l7M/E0",
- "45jp4/3HNwdBuAuvhSY/ornsM7P0z2o7iJNVwGwu7FFpPCYha3FZ61uZijmhU1cQHiuUb0idVWr4iWWE",
- "9kmoPtcwM4zlF1+xfX6nWThKl1303vKFW75wJb7QJaiGI2A2pNr7iK6CkB30jiRmc/2BXIyBv0WKwjtc",
- "BFmATlcuS7STFhNhK75G8TBP2faSzzX7/xDoSInMsNgXvjAzMns8SOBDpxfICPH97Ovwmc9sgTUA6vrT",
- "/sEqdOcw/4ZD/XyDe+SGKR9z7pNJzS5eCMpnzeT9NB1Ey3X4DG8RfDEE95jaC5cIb4+XW8QfISrdP7WQ",
- "kNeiyVV25Zf/iGaPz3kjf+4FvRYcrF/aSKyWFm9dkLW4gG/eIVJ8kQrreHTP6MdFhz3/UMtW+eGv9pmU",
- "rTLEmIvZTPZN3s5/jT4V17pAzNpmO5Prm9HG8F3T0NYSbVcR/YKKxRdhlV+htvElmNHNcA88pJ6FuBuf",
- "j+YnWK3F0uleXRtyiLnEy+2OZjRa1AFX0Qq5c8gFX6qvk8ts2/g4XiIEUBcijlcb/vMdy2dYCIYLX3PR",
- "lQZSjKdg3xjyz3EWTCkXFvh4/y83B6FmhS+nxsOsyC/MOD6nB+kmXT5YS7ku1eWDr6LlsxXLOqVng9o9",
- "Q/ytFaT1Ua9Z9mk3nwsc/RdkcYwHLC70TNGyBCovz9t2RxIdd2Y8eh6GuLaq99Z1lSKgGBRdMAjr3yYj",
- "DUGYKy0W7sqquAW0fg7ccgAXfyoW09rPbe5usTggJ/w+USv65MHD3x4++c7/+fDJdwOmLDOPK93SN2Y1",
- "A5nPdpgxFq0/bsRWW5CukXdw01t5sR2aTli2jpbqbMrxh+fCuY2RT9xRpKSbwQq/5Y7nBMJhm6cFbr6e",
- "ndJsHn8d22sk9Zt1R/yHWue0RddcFf7bZwQG4qwCJmIIrXlPoMb69qcFtkiBHbKsa7jftL7YhMHbW8wj",
- "T3YulC8qoOovpTcmqDYC9wJJGy1fThbEcrLTwClcvwJq5GlVlaWQuj7dajZKTIPByJxQShskXCeEpVSn",
- "q6rc+4j/wfpMn5pKSPaR2z3rrt4mh72zLa41ENmO2VS2bJcEcy50sSCvWCrFIRYadjeG2igNRS8o1XX9",
- "bdvzqdHbRfCccUgKwWPVxH7Gr6/wY7SUMQY3DnTGMNOhvt2Hq1vwd8BqzzOGuV0Vv1+JFnwlw0xntRLK",
- "OpkDTR9I/81pab0l0xyT1s97H1t/uqgS11KtKp2J86AvKkn23I/xOgfliMebjWvlolPWV5EMlKGub8+Q",
- "E+AhRtr110g9qKDo9GBJqD+paWfBeNYhEpTeUnEGUtVKv/QBILf2nT+OfWf8vgecq1K7mFWlrlcqeC0y",
- "sOO2S4XG8gO5yMCVV+wLA7W8E1eb/c3QtOsoMimtlitNqpJoEVOZmo4JTS3/tA9JqV0v79hW/oWJMyA0",
- "l0CzDZkDcCLmZtHtF8wIVfjOmde7nFQXf0CmgauUIgWlIEvqJ8l3gFYXrUQtTW/BEwKOANezECXIgspL",
- "AmvFm+2A6k7STg1uHQniJJg+1OOm37aB3cnDbaQSiL9F0ewiijIHZ3iJoHAkTtAuwD7z/vlJLrt9VYkP",
- "20eeQLJfj1mBlzCnXChIBc/U8ENlu44tFuMP1qLMCoKTEn0v3Aw8oBS8pEq/dQbj8D2XoOi/mWLLy2pD",
- "BafNyL/U5aZ7Y6eGX3JVqbomtdMTIYutgcN6y1yvYV3PhcZ4P3atiGpBKgW7Rh7CUjC+Q5YKn0rTgakd",
- "S/L3F4dFDqhTIvuobAHRIGIbIO98qwC7oRl4ABB8qLoM1QD3Lk8D11yIHCi39jxRlub86aTidb8hNL2z",
- "rQ/135u2feJyyeHI1zMBKjQSOMjPLWYVRpCvqCIODlLQU2dHWLoc7T7M5jAm6LdLtlG+OZbvTKvwCOw4",
- "pF2FNTz+rXPWORwd+o0S3SAR7NiFoQXHVOSvQva/qDzbdS58xmCGtokgEK9mHalw75wynSyEdG9n0oUG",
- "GdFvO9WWKdPKGa6sBU8L548jOIJjKG4c9wZjU67KJbhaEHyRBbP7/ZwKM9WPQo4K4m7HRVCmScU185Wy",
- "zHmrZcyvT4m+lZ5vpedb6flWer6Vnm+l51vp+VZ6/tzS85cKnk48n/bxK7GCG2TyTUr4txbrLdpIIKY6",
- "JcGI6OYcb83W0EBzXBDL8XIthRpM+8bH2ZSoZAokNdMxTsqcGmkI1toXHyNzquC7x/Xjpq5khHuezfAa",
- "0+DRQ/Lur4c+mmrlon7abe+6om1E6U0O91xWW/1+kk9vA24w6LLbqNd+Uhf1ZoX5BcuBKIOrF9j6OZxB",
- "biR5G6hBjC7S146OgebPHG52KEetF3LMaB+mLZ3Moa2gZfAMJa6VKkIx8q7zwM2C5mr4hRs7XkHLWPm2",
- "mk9btQlZww8i23TI3ezaHm5gm9CbmCrGqdxEgiV75N0jDS0M83GE1df7Pl175F+faPtktovC4u9Kq+ih",
- "3Ebl0ZC3esN6Q9mwy0WHTqLPu3XjvCY1gGNCHQw9+z0hb22/L5vqgxC5I9Zw5q/G8dxuWTMNbGsEKsd6",
- "vtW8HI/46OnFsz81hJ1VKeCL2Y7i1olptASeON6SzEW2SVqcqX3BZExRpaCY775kQtaIh6m+V8yX7VfQ",
- "l7khngeL28ZuQ3pYJ463DjBeG8w6ju3W2MIRHecNMP65ue8QhwxBII71xHTnbnXqC/KzZprNLU+75WnB",
- "aexc9oy7OOouE5ldjqfJjaz4MDt7sYa0MvOGh/SuumdYFmJ0rVuW+wzm1XJpBPa+FRqrJON4zcPkN83l",
- "7HLHMriLEYcdvC4tdNWs+O5wfcYRhAPfFZIspajKe7ZmPd+ggbMoKd94p4bR/Isqtzi0lTyul4faKObY",
- "S8PeuDZsl3vjzW+B9cndou3fLVrIOVXuxVnISMUxkTOW67DuPIK8G+PHa95w4K1PJNv1Rlbn5h3D/f0u",
- "u8jG2pFTgkz0mtsD1TpMLqfCntzZbfmsP8eN8Ma+DTHAYPv5AQ1D2H0xyIBl4c3QKabsr4Y2P31Lz8PS",
- "zNclNI7X1leAd2KtvUYqTxsxUgqapVShUYODPhfy9DPLknp9FLEiI5j4gkA/O25cWQwcd5RI2U5I9Vp5",
- "Ncd0bsG/dG2MJvPp0BVkamHj1rD7RzHs/uAPnyKUSHrePZzWh4NncgSboud6zaNcaq+0LxANxS+HqeW2",
- "5bVGYvSGbwdkBO//WIcy5CWhJM0ZupsFV1pWqT7hFB1awcL6xfhrN92wKPXMN4n7VCMuTzfUCTdC1YLU",
- "bq6oSLWAiAP7RwAvsalquQSlO5x4AXDCXSvGScWZxrkKlkqR2Iwkc10bjj6zLQu6IQuao0f2d5CCzI0S",
- "EValRveQ0izPXXSImYaIxQmnmuRgmP4rZgQ6M5z3INQRT5buaizEk5GXwEExlcStsz/Zr5jv65bvvQDo",
- "rLCffZ7eTSf6ethZNgj50XP3YsTRcywC3sSF9GC/sWCBgvEkSmTmxnfxVV3aIneNjOcJ6F4TYeJ2/YQb",
- "YVoLgoye6suRQ9ep2zuL9nR0qKa1ER3fr1/r+1i1wqVIjMpIl+b3JdOraj5LRbHnqxjuLUVd0XAvo1AI",
- "jt+yPVqyPVVCunf2YId8cAV+RSLs6vbm/gMlEQV0YE5LvfH4Ml537wfu5Wt4oOvrfpVrZ8Dp7RtYt29g",
- "3b6SdPsG1u3u3r6BdftC1O0LUX/WF6JmWyVEVyVw55stumfapERCameuGXjYrPW6S98ryfSMkOOV4f/U",
- "3AFwBpLmJKXKCkbcxj0XWBtRVWkKkB2c8KQFia2IaCa+2/zXqrkn1f7+IyD797p9rN0i4Lz9viiq4id0",
- "NZHvycnkZNIbSUIhzsCVlMbmWYXhL7bXzmH/Vz3uz7K3dQXdWOPKipYlmGtNVYsFS5lFeS6MMrAUnWht",
- "LvALSAOcrdFGmLbPaiE+McrdxcRQVwApJnT37/ej4O2YXQ/odCtp3WjtxT+ugL2NT/U37Pp44Naxewzx",
- "lmXcBMv44kzjD/TCxu1jGl/ZgkJHauu1rKsU5ikhZQuWxuxOXkay5mTDm3EESCvJ9AZvOFqy307B/P+9",
- "4eMK5Jm//CqZTw4mK63Lg709fM9yJZTem5irqfmmOh/N/UCXdgR3uZSSneFbOO8//f8AAAD//xjSW+CP",
- "MgEA",
+ "H4sIAAAAAAAC/+y9e3fbOJIo/lXw0+45eawoO8+d+Jw++3PidI93knRO7O6dnXZuByJLEsYUwAFAW+rc",
+ "fPd7UABIkAQl+hHn0f4rsYhnoVCod30cpWJZCA5cq9Hex1FBJV2CBol/0TQVJdcJy8xfGahUskIzwUd7",
+ "/htRWjI+H41HzPxaUL0YjUecLqFuY/qPRxL+VTIJ2WhPyxLGI5UuYEnNwHpdmNbVSKtkLhI3xL4d4vBg",
+ "9GnDB5plEpTqrvJnnq8J42leZkC0pFzR1HxS5JzpBdELpojrTBgnggMRM6IXjcZkxiDP1MRv8l8lyHWw",
+ "Szd5/5Y+1UtMpMihu84XYjllHPyqoFpUdSBEC5LBDBstqCZmBrNW31ALooDKdEFmQm5Zql1EuF7g5XK0",
+ "99tIAc9A4mmlwM7wvzMJ8Ackmso56NH7cWxzMw0y0WwZ2dqhg74EVeZaEWyLe5yzM+DE9JqQ16XSZAqE",
+ "cvLuxxfk0aNHz8xGllRryByS9e6qnj3ck+0+2htlVIP/3MU1ms+FpDxLqvbvfnyB8x+5DQ5tRZWC+GXZ",
+ "N1/I4UHfBnzHCAoxrmGO59DAftMjcinqn6cwExIGnoltfK2HEs7/RU8lpTpdFIJxHTkXgl+J/RylYUH3",
+ "TTSsWkCjfWEgJc2gv+0mz95/fDB+sPvp337bT/7h/nzy6NPA7b+oxt0CgWjDtJQSeLpO5hIo3pYF5V14",
+ "vHP4oBaizDOyoGd4+HSJpN71JaavJZ1nNC8NnrBUiv18LhShDo0ymNEy18RPTEqeGzJlRnPYTpgihRRn",
+ "LINsbKjv+YKlC5JSZYfAduSc5bnBwVJB1odr8d1tuEyfQpCYdV0KHrihrxcY9b62QAJWSA2SNBcKEi22",
+ "PE/+xaE8I+GDUr9V6mKPFTleAMHJzQf72CLsuMHpPF8TjeeaEaoIJf5pGhM2I2tRknM8nJydYn+3GwO1",
+ "JTFAw8NpvKPm8vaBrwOMCPCmQuRAOQLP37suyPiMzUsJipwvQC/cmydBFYIrIGL6T0i1Ofb/Pvr5DRGS",
+ "vAal6Bze0vSUAE9F1n/GbtLYC/5PJcyBL9W8oOlp/LnO2ZJFlvyartiyXBJeLqcgzXn590ELIkGXkvct",
+ "yI64Bc+WdNWd9FiWPMXDradtMGoGlZgqcrqekMMZWdLVD7tjtxxFaJ6TAnjG+JzoFe9l0szc25eXSFHy",
+ "bAAPo82BBa+mKiBlMwYZqUbZsBI3zbb1MH6x9dScVbAcP0jvcqpZtiyHwyqCM+bqmi+koHMIUGZCfnGU",
+ "C79qcQq8InBkusZPhYQzJkpVdepZI069mb3mQkNSSJixCI4dOXAY6mHbOPK6dAxOKrimjENmKC8uWmiw",
+ "lKh3TcGEm4WZ7hM9pQqePu57wOuvA09/JtqnvvHEB502NkrslYy8i+aru7BxtqnRf4DwF86t2DyxP3cO",
+ "ks2PzVMyYzk+M/805+fBUCokAg1A+IdHsTmnupSwd8Lvm79IQo405RmVmfllaX96XeaaHbG5+Sm3P70S",
+ "c5YesXkPMKu1RqUp7La0/5jx4uRYr6JCwyshTssi3FDakEqna3J40HfIdsyLIuZ+JcqGUsXxyksaF+2h",
+ "V9VB9iyyF3YFNQ1PYS3BrJamM/xnNUN8ojP5h/mnKHLTWxezGGgNHrv3FnUDTmewXxQ5S6kB4jv32Xw1",
+ "RACslEDrFjv4oO59DJZYSFGA1MwOSosiyUVK80RpqnGkf5cwG+2N/m2nVq7s2O5qJ5j8lel1hJ0MP2p5",
+ "nIQWxQXGeGv4GrWBWBgCjZ+QTFiyhxwR4/YQDSoxQ4JzOKNcT2p5pEEPqgv8m5uphrdlZSy8W/JVL8CJ",
+ "bTgFZdlb2/COIgHoCYKVIFiR25znYlr9cHe/KGoI4vf9orDwQNYQGHJdsGJKq3u4fVrfpHCew4MJ+Skc",
+ "G/lswfO1eRwsq2Hehpl7tdwrVimO3B7qEe8ogscp5MQcjQeD4eGvA+NQZliI3HA9W3HFNP6raxuimfl9",
+ "UOdvA8VC2PYjF0pRDnJWgMFfAsnlbgtzuojjdDkTst/uezm0MaPEEeZSuLLxPO24G+BYgfBc0sIu0H2x",
+ "bynjKIHZRnatV6SmAwlddM3BHQ5wDVd16bu29T5EV4Ko0FrD81ykp3+lanENd37qx+peP5yGLIBmIMmC",
+ "qsVkFOMywutVjzbkipmGKL2TaTDVpNridW1vy9YyqmmwNbfeOFtiQY/9kOiBjMguP+N/aE7MZ3O3Dem3",
+ "w07IMRIwZa+zsyBkRpS3AoKdyTRAFYMgSyu9EyN1X2iVL+rJ4+c06IxeWoWBOyG3CTwhsbr2a/BcrGJr",
+ "eC5WnSsgVqCuAz/MOMhGaliqAes7cCsTeP4OfFRKuu4CGcceAmSzQcO6KrwNPHzxzSy15nV/KuTlqE+L",
+ "rHBS65MJNaMGxHfcAhI2LYvEoWJEJ2UbtAaqTXibiUZ7+BjEGlA40vQzQEGZUa8DCs2BrhsKYlmwHK4B",
+ "9RdRoj+lCh49JEd/3X/y4OHvD588NShZSDGXdEmmaw2K3HWyGVF6ncO97s5QOipzHR/96WOvhWyOGxtH",
+ "iVKmsKRFdyir3bQskG1GTLsu1Jpgxl1XCxxyOY/BUHILdmIV92ZpB0wZDms5vZbD6ANYVs+SEbeSDLYi",
+ "00W3V0+zDrco17K8DlEWpBQyol/DK6ZFKvLkDKRiImIqeetaENfCs7dF+3e7WnJOFTFzo+q35MhQRDBL",
+ "r/hwum+HPl7xGjYbKb/db2R3bt4h59IEvtckKlKATPSKkwym5bwhCc2kWBJKMuyIb/QrNl/ogGV5K4WY",
+ "XfurHZ0ltiX8YBm+3PTpsn1vRAZG7C7VNZD3erAaegZzQpjRqSg1oYSLDFBGL1Wc8PcYetHChIYxHb4l",
+ "emF5uCkYeTClpdltWRA0+3Rwse6Y0NRiUYKgUT168cqgYVvZ6awRMZdAMyMnAidi6pTPTi2Om6Ros9Ke",
+ "dLpnJyI5N9ZVSJGCUka+t1Lb1qX5dhYt9QY44cJxwdUsRAkyo/KSi9VC03zLQrFNbLkVS+409t1VD5t+",
+ "0wG2Jw+PkUoj4lssMPy/uXA5aOgD4UCYnIFEzfVnPT8/yWWPryx6/Eoca3XMlqgp4JQLBangmYoOllOl",
+ "k23X1jRq8H9mB8FNid1UHLhHW/WKKm3tF4xnKHZZcoPzWDWWmaJ/wb1PoBn5V//6dcdODZ3kqlTVU6jK",
+ "ohBSQxbbA4fVhrnewKqaS8yCsav3VgtSKtg2ch+UgvEdsOxOLICortR8zsDX3Rwqw8w7sI6CsrGIGhCb",
+ "FnLkWwXQDW3rPQsxMnrVExGHqRbmVAb98UhpURTm/umk5FW/PjAd2db7+pe6bRe5qK7peibAzK79mtzK",
+ "zy1krVfFghqmHUcmS3pq3iZkwa2hpbtmcxkTxXgKySbMN9fyyLQKr8CWS9oj/Ti/rWC21uVo4W8U6XqR",
+ "YMsp9G24RxR7S6VmKSuQk/gbrK+dsWpPEFUQkgw0ZUY8CD5YJqsI+xNrOWuPeTlGaxDX3F1+h22ObCdn",
+ "Ch+M5uJPYY2WgrfWJeM4cOS4Bk4xMqq53ZQTXKg39JoHOWwCK5rqfG2eOb2ANTkHCUSV0yXT2vrYNBlJ",
+ "LYokHCCqkdgwo1O/WXcGfwJD9IFHOFSwve5RjEeWbdm8vuMW49IAh2OYCiHyAZaYDjCiKxhkqSGFMKfO",
+ "nEuX9/vxmNRYpGNiUPdaEc87qgFm3AH5X1GSlHJkwEoN1YsgJJJZfH7NDOYBq+Z0NpkaQpDDEixfiV/u",
+ "329v/P59d+ZMkRmcez9I07ANjvv3UUp6K5RuXK5rENHNdTuM0HZU1ZiHwvFwbZqy3SbgRh5ykm9bg1f6",
+ "HXOnlHKIa7Z/ZQLQupmrIXsPcWSYPQTHHaSFCYaO7RvPHQ3Sn0eGr4eOra47cWDGqz/2WfIMf5Wvr4FO",
+ "24GIhEKCwlsVyiXKfhWz0FXWXTu1VhqWXdHedv29h7F559mCDpcpeM44JEvBYR2NDmEcXuPHWG97s3s6",
+ "I43t69tmmxrrby2rOc8QLLwqfPG0A1R+W5mwr+Hw2+O2tDqhkzBKpZAXhJI0ZyizCq60LFN9wilyxcFd",
+ "jqj6Pa/fLye98E3igllEbnJDnXCKZp6KV46qJ2cQkYJ/BPDikirnc1C6xR/MAE64a8U4KTnTONfSnFdi",
+ "D6wAifr2iW25pGsyozmKdX+AFGRa6uaLib6MShupy6qYzDREzE441SQHI4G+Zvx4hcN5l0GPMxz0uZCn",
+ "FRQm0fswBw6KqSRukvjJfkVrsdv+wlmOMbDEfrZKFDN+7fC41tAIlvg/d/9r77f95B80+WM3efYfO+8/",
+ "Pv50737nx4effvjh/zZ/evTph3v/9e+xk/Jrj3nauZUfHjhu8vAAWYZaudRZ+41pHJaMJ1EkO14AWTKO",
+ "Dtst3CJ3DePjEeheraZyp37C9YobRDqjOcuovhw6tElc5y7a29HCmsZBtARIv9f3Mev5XCQFTU/Rojea",
+ "M70op5NULHc8F70zFxVHvZNRWAqO37IdWrAdVUC6c/Zgy5N+BXpFIuSqRWQvzRB07YFx71hUWTqHV7x5",
+ "s5JbpCiVU1Ki85e3y4jZuPKAtpGPewTdYxfUGxXdnw+fPB2Na7fW6ruR1O3X95E7wbJVzHk5g1WMU3NX",
+ "Da/YHUUKulag43QI1x41QVm7RTjsEgyLrxasuHmaozSbxmmld6lxEt+KH3Lr62JuIqpn107rI2Y3v24t",
+ "ATIo9CIWEdXgObBVfZoALZNKIcUZ8DFhE5i0Ja5sDsobw3KgM4zMQRWjGOIiWN0Di2geKwKohxsZJNbE",
+ "8AfZZEf3P41Hjo1Q187Zu4Fj62rPWeli/d9akDs/vTwmO470qjvWj94OHXg+RzQZzrmvYWwz1MzGgdpA",
+ "ghN+wg9gxjgz3/dOeEY13ZlSxVK1UyqQz2lOeQqTuSB73l/wgGp6wjs8W2+oduCpSYpymrOUnIa8dY2e",
+ "NvyuO8LJyW+G4p+cvO9YbrqcsJsqSl/sBMk50wtR6sTFFyUSzqnMIktXVXwJjmyjAzfNOiZubEuKXfyS",
+ "Gz9O82hRqLafeXf7RZGb7QdoqJwXtTkyorSQnqsxrI5dDZ7vG+EeBknPfXBaqUCRD0ta/Ma4fk+Sk3J3",
+ "9xGQhuP1B8c8GJxcF9DQeV3KD76t78KNWwkJVlrSpKBzUNHta6AFnj5y3kvUruY5wW4Nh2/v0IJD1Rvw",
+ "8Og/ALuOCzuv4uaObC8fKB7fAn7CI8Q2ht2ojRaXPa/ABfzSx9VyI++cUqkXibnb0V0pg+L+ZKr40blh",
+ "srwlSbE5N5fAhdpOgaQLSE8hw6g/WBZ6PW5098ZKx7J60sGUjY61DpwYwoXqwSmQssioY+opX7djaRRo",
+ "7QOI3sEprI9FHQF2keCZZiyH6ruoiKkBd2mQNby2boz24TvDN/qvF4UPiUDfWI8WexVe+D79F9myvNdw",
+ "iWNI0Yg16AMElRFAWOTvAcElNmrGuxLqx7Zn5JWpffkiwbSe9hPXpBbDnPE63A2GUNjvS8BQe3GuyJQa",
+ "vl24KHEbrxBQsVLROfRwyKGGdmBUQEOri4Nse/eiL52YtR+0znsTXbJtnJg9RzEFzBeDKijMtFwW/EzW",
+ "CIA7mBBM/uIANs2RTaq8JSzRobKhKbfZLPqWFkdgkLxmOPwymhAJOZsFVT6AHeP8/V0exAN8xvibTVGX",
+ "h4G1PQjmr2IqPc1t39OOdOliL33ApY+yDEXLARGThsNHB7DYcQiODFAGOcztxm1jjyh1LFB9QGYdP89m",
+ "OeNAkpjhniolUmYzENTPjJsDDH98nxCrTCaDR4ihcbBsNG7hwOSNCO8mn19kkdzFMlE/NprFgr8h7nZp",
+ "XbMMyyMKQ8IZ73Gq8xSAOm+P6v1q+RzhMITxMTFk7ozmhsw5ia8epBP8h2xrK9TPmVfv9bGzG3T59mG5",
+ "0J7sU3SZ3YQ8k190nKHbsOKpWCXW7zrK8U5XU4PvUW819AKPXUwbZnlHkalYockenxbMX6K2rKV/HX4Z",
+ "gYS/YgrxFfv1veZ2MZum3cxNxbBQIco4dV6FLn3sxJCpeziYPnS5G0ROXmoBLWVHnWPMCb9bhdQme9J9",
+ "zOtXbVxnBPCOtbHr33eFoqfUA7+uFqaKdXzb5liieoqm5bkZ5hmwkDGkN2Sia+7pGpUU5IBCQdJgopLT",
+ "mBHQyDaAL86R7xYoLzCYlPL1vcCdQcKcKQ21Ot48zN6+dNPqSYo5LISY9e9OF3Jm9vdOiOqZskHS2LGx",
+ "zRvfwZnQkMyYVDpBW0Z0C6bRjwqF6h9N0ziv1HSYsOmcWBanDTjtKayTjOVlHF/dvH87MNO+qUiiKqdI",
+ "bxknQNMFmWL6sagb1Yaprafdxg2/sht+Ra9tv8Nug2lqJpYGXZpzfCP3okV5N5GDCALGkKN7ar0g3UAg",
+ "kfc5gFzHIuQCvslezsw0nGzSvnYuU+bH3uqAYlfR/0bZkaJ7CRQGG3fB0Exk2BKmg+xd3aiPnjtAi4Jl",
+ "q5Yu1I7aKzHTCyk8fFqEFhTwdN1gWyAQ6D1jjsUSVDMDRs3g2zxsjQDUySDIHDfzVIQEIZyKKZ9FtAso",
+ "g9rIKm6D1THQ/G+w/tW0xe2MPo1HV1OdxmDtRtwC67fV8UbhjEZ+q0prWEIuCHJaFFKc0TxxCuY+1JTi",
+ "zKEmNvf66BsmdXE15vHL/Vdv3fI/jUdpDlQmFavQuytsV3wzu7LJNnouiM9SaGQ+z7NbVjI4/CpDQKiU",
+ "Pl+AywgXcKOd1DW1wSG4ik5JPYv7Gm1VOTvbiN3iBhsJFJWJpFbfWQtJ0ypCzyjLvd7Mr7bHLwg3Nyz/",
+ "UZQqhANc2boSGMmSayU3ndsdvx01dm2hSeFcG3LWLW1aRkUEb5vQDQuJ6jhE1SXFxDNWK9IlTrxcoiYh",
+ "UTlL4zpWPlUGObi1nZnGBBv3MKNmxJL1mGJ5yYKxTDM1QNBtLTKYIwpMn8SoD3ZT4fJpl5z9qwTCMuDa",
+ "fJJ4K1sXFTP9OG179zk1vEN3Ljew1dDXw1+FxwiTLrVfPFzEZgYjtNR1lntQicx+o5VGyvwQmCQuYPAP",
+ "Z+w8iRuM9Q4/HDZbN8hF0+IWpr/u0j+DGDZV4vbc2154ddmfeuaI5tJmKplJ8QfE5TwUjyNRBz7NFEMv",
+ "lz+ATyLBW20SU2l36pTg9ey9x93H3YRaqKaTQg/W48kHZjnMd+M11JTbo7apbRu+bnGECf1Td+z4NcK4",
+ "NXd8enN6PqWxZECGyTBr2q8NwA1duhbEd/awd2p/5jJ/TUhgS67aMhuPV4CsA4K6sd+XZBjstINZhZoz",
+ "QKwNeYKxtf/lSkSGKfk55TZDsulnr5LrrcAqv0yvcyExmlbF1f4ZpGxJ8zjnkKVdFW/G5szmBy4VBAlo",
+ "3UA2sbrFIpfE15rYa9AczsjuOEhx7U4jY2dMsWkO2OKBbTGlCil5pYiqupjtAdcLhc0fDmi+KHkmIdML",
+ "ZQGrBKmYOhRvKuPVFPQ5ACe72O7BM3IXzXaKncE9A0X3Po/2HjxDpav9Yzf2ALhE4JuoSYbk5H8cOYnj",
+ "Mdot7RiGcLtRJ9HYUFu9oZ9wbbhNtuuQu4QtHa3bfpeWlNM5xD1FllvWZPviaaIirQUXntnU40pLsSZM",
+ "x+cHTQ196vFjN+TPLoOkYrlkeumMO0osDT7V2WXtpH44m8fcJQbz6/If0UZaeBNRS4i8WaWpfd9iu0ZL",
+ "9hu6hCZYx4TaEOqc1d4LPl0hOfSJGDBTWpUgzcLGzGW2jmwOOjPMSCEZ1yhYlHqW/IWkCyppasjfpG+5",
+ "yfTp40h2uGaWIn6xhd843CUokGdx0MsetPc8hOtL7nLBk6WhKNm9Om4kuJW9xty42a7Pdrh56KFMmRkl",
+ "6UW3soFuNKDUV0I8vmHAK6JitZ8L4eOFd3bjmFnKOHrQ0pzQL+9eOS5jKWQsLU993R3HIUFLBmfouxc/",
+ "JDPmFc9C5oNO4Sqr/7KWB89yBmyZv8sxQeC5iEinPmNhpUl3vuoR7UDfNTUfDBpM3VBj0swOd/NGP698",
+ "7hqfzBe/VvyjvdgvfKQIZL+DnkMMMldGjzOrvgf2b0qei9XQQ23dEH+wXwFooiApWZ79Wsd3thKDSsrT",
+ "RdSeNTUdf69LGFSbs+9TNLvRgnIOeXQ4ywv+7nnGCFf7TzF0niXjA9u2c5Xa7bY2Vy+8uUy/KD+hAS/T",
+ "uZkghGoz4K1yqM7nIiM4T51Kp6ae3Ry3QSbCf5WgdCx4CD9Ypy7UWxp51ybCI8AzlBYn5CdbgmwBpJHp",
+ "A6U0tixzmzUCsjlIp1Avi1zQbEzMOMcv918RO6vtYxNx20R8cxRSmrto6auCvFvD3IN9Tu146MLwcTb7",
+ "UptdK42Jd5SmyyIWZmpaHPsGGMsa6vBRfAmhMyEHVnJUXi6xkxh8mDG5NBJXNZrlXRAnzH+0pukCRbIG",
+ "Se1H+eEZJD1WqqBqS5V9vUqdhffOrNslkbQ5JMdEGLn5nClbeQrOoBnZWoV5O5WAj3Rtbk+WnFtMifIe",
+ "m9IQXAbsfnHWUcOr+aMrawH+ggy5TcB60YSaR9grmoumnZ2zU67FRjdWWbV9RcGUcsFZiplgYk+zq2I1",
+ "xAY2IGlOW8nqr7i7oZHLFc0JWrnJOSj2Zgn1hNABrquED76aQ7XYYf/UWC5pQTWZg1aOskE29qltnR6Q",
+ "cQUuFRoWNAvopJANuyJSyKipOqlMGhdEIwyL6RHsfjTf3jixH/3FTxlHBt+BzbmmW00dFtnRRipgmswF",
+ "KLefZmyw+s30mWCYbAar9xNflAfHsGY5s21rg+4Ote8t0s4CbNq+MG1tUpT654YHsp10vyjcpP2Jj6P8",
+ "gF7xXgBHLIuJN+0EwK3GD0fbgG4bXUnwPTWIBmdoiIYC3+EOYlRJgFsJ5g3TajEKWxDrwhXNhcB4ZBmv",
+ "GIe6ZFTkgUijTwIeDN7Xnn4qlVRbFnAQTTsGmqP1OUbQlHamh6sO1TpgBAnu0c/Rf4x1/uIewlE1qBk3",
+ "ytdVpSqD3QEz8QJL5DlAdrMRI1flmKgMIwpa+YljhMMQbp8BvfkAdK9Blyey3bWk9uZc5CXqCxKdltkc",
+ "dEKzLJZD8jl+JfiVZCVyDrCCtKxy8BUFSTG7SjPdTBfb3ESp4KpcbpjLN7jidKmI8dFvcALlQybqwScE",
+ "ya8hvQcv3757+WL/+OWBfS8UUaWNEjU8t4SlIYgTcsiVBsM6lwrIhxCMH7Dfh9aG48sM8pJHkDbMje4R",
+ "EWNlpmv8N5Ynrx+BnK/Ihb0VvWMIdrwwe98cqcOcm6uXKDZPhkMCn76rg6Oe+nL3se5/rRcyF/PmQm44",
+ "g8UmYhyeUYwMvzTvW5jgoZP80b6AVf4F9A0UvpoMSrdV5HCTeOKL28kGiTapqlrFZj1Jf92JMb7RPR7C",
+ "Qd4OatkAa+Ts8xNOe93aqXYBdpqSjZSyN2jJOhnZ8CRbNDmq4O1zLLJ+ReZzp/cwBrYjDuDYGwHqPda6",
+ "C/qbd4clBWXOgl8Tiy5kneN8v1Zz06WrD7i9CeeO3qtYjCf/70+hU6fNwWegEIrVCWtjVQEGuksdY2L/",
+ "IAVQdyzvq3AGqTZMfWCDlQAXSQhkJgtqmNym0ukRPyqvMpdBZ1PanG5q4i3EphPZEkRn2bSuk+FJYvYr",
+ "Txu0/2MVkTlwV0ak6bM+2HN2NoNUs7MtkUT/Y6TUOkpl7OVYWw4sCCxilSemL9N+QfG6XtCmQJ+N6wlS",
+ "y115OX1xBKewvqNIAxuieWbHnuZdJgcBQgCpQ2JQRKiYJdsq3pxxkakKMxAK3nPEdoc6m1Nvgv8gLu6S",
+ "c3mUJDSMldsw5ZmISe6D5jJdLxRBik6FfcFG3RTb/YzQAWY0V1VxlqoOeyDVkMNuprdzlwMB474qXbPP",
+ "hgDK/+aDPO0str5/XYIANfvnVGa+RVRU9VJwsuE96kQI+fTQ7UXPqplZ7efXjQmJ5A5Cb840F4rxedLn",
+ "Ett0rQtrg6IDAT4HmLsc1zUD6UqPoAo5FwoSLbxf4KZ1bAKFq2N5GSCo3nx9dnG9WTTe1WlCMAMqxawZ",
+ "1DlHhBs0cis1q5NBMo/+OTcB+4X97oMgfAbMARK5w9dkazYO7+HJVAeIIdbPiHsttwdXXEbqZZzbUlQq",
+ "ltmDG1CG2uNCiqxM7QMdXoxaxzA0b84GUhIVGNPuLju8f45ZpF4FoWqnsN6x/He6oLxO59W81paFsnsI",
+ "QsNbp32tCoG47JPP7Qbm17LOLylUj0eFEHnSoy4+7CYoad+BU5aeQkbM2+F9o3qS/JO7qKWs7IHni7VP",
+ "yFEUwCG7NyHEiOXLQq+9abCZa7c1Ob+jN82/wlmz0uYMcvL+5ITH3fowm4+8In3zw2ymagoM8bviVHaQ",
+ "LekvVj3JUSQ9j5S8GFrxNmKsa5chqJHKriLGpVwyFnrQ/e7K/BHUD/Lwb5Z+wlQJtQ+WtKoj5Ja8QqfN",
+ "vLyuNULDKgL4DluWFwrFQU0AT43ccr6wo9TrCijBVnoxobH9bXK2L9Rc0aXgiBR61ptt2sQ11sjePJdA",
+ "iaJeVLqJOJy7KgzMiyA45orpqj4UqhIx5WyIOOZeyjOa37z6AhNm7CM8XGGr+EZD+TcEsgWlupy3wis6",
+ "aO5A1r2+qflbVLf8D5gziuqA3VBOj1rVYvApJDE1Gs1JLuqaLDgkOccxrdL4wVMydZ7WhYSUKdYKQjn3",
+ "2TArcQ+TQ9f1zjbLl9v2+avQV0BjJyCIgrypM+tpge9DvcL6in5hotJzc6NYHsO+DlpE4BejUWHI85bn",
+ "4rShTbaZSlveHELCNWuVAzP2BbXK3WDuodvDfeCjUyro7nPwa92AbeShrvc21CTSBe6m9GtDLBnxrIqm",
+ "O5pSLEAwJSnBpZIPDz4QCTOsOSDI/fs4wf37Y9f0w8PmZ3Od79+PsnE3ZkRplAZ388Yw5tc+7z/r4dbj",
+ "aNo6j5Ll2TbEaLgN1/U/0DH2dxc48EUqkPxu9andq+pyt1/EfNs+BARMZK+NyYOpAofgAb7ArtskWrxd",
+ "QVpKpteYz8Cr39jv0TxRP1Uae2fxqSJg3dunxSlUGTFq/X6p/Ov6k7DF3peGp0bjucZicC9XdFnk4C7K",
+ "D3em/wmP/vI423304D+nf9l9spvC4yfPdnfps8f0wbNHD+DhX5483oUHs6fPpg+zh48fTh8/fPz0ybP0",
+ "0eMH08dPn/3nHUOHzJLtQkc+em70dyzTk+y/PUyOzWJrmNCCVTUgDRr7CgE0xZsIS8ry0Z7/6f/3N2yS",
+ "imU9vP915IJzRgutC7W3s3N+fj4Ju+zMUaGXaFGmix0/T7f23tvDysHaBnzjiVrfWYMKeKgOFfbx27uX",
+ "R8dk/+3hpEaY0d5od7I7eYCVtQrgtGCjvdEj/AlvzwLPfcch22jv46fxaGcBNEf7l/ljCVqy1H9S53Q+",
+ "BzlxpRLMT2cPdzwrsfPRKTM/mVHnsUwP1lU88A/uVhBwhhH0t7Gu4I2MvMoliB1XeZqdroFn6MFr9YOG",
+ "tFXAOszqhISHNaHyaRlsnqq93yKlqGZsXspW1drKi8AlcWeK/PfRz2+IkMSJNG9pehp6ySJC/qsEua4R",
+ "xpGyMMGSz6nrfGmXal40Hc9qNilW3zJWigFnNuccYGplV6gpkZYlhCup6aqhlbvJs/cfn/zl02jAQtDI",
+ "pQDDbz/QPP9gywrDCi0FPoGFC1AeR/LHIns8rvXU2KE+pjF6zlVfwxIBVZumv/YHLjh86DsGt7DoOdA8",
+ "Nw0Fh9gZvMcAUcQEvEQPd3evrbZIFaJg/e+qUTxKXGKgLoWxnyKFB32JkZ6qg4+vcaNNz50rb7c9XGfT",
+ "z2mGadtBabuVB9/sVg452pkNxSf2Rfs0Hj35hs/mkBuaQ3OCLYPsC91X5Bd+ysU59y0NN1Mul1SukVcJ",
+ "akuEXOmn3tdqJ8yDvfOxYYXMrvSWdUoAHB5sed7uqD6i2E1L1kqzbb5XWZTRTuVyiWNeZ3VvQn4KeyNh",
+ "xihfG0NbSl4X2i2kOGOZIbHOgcMnQ6nXdkeFAdDRxzZQ7d6+u5/13d1vah0aea1ii2mg+MY1ddwUrvrw",
+ "db3TW1WSLlWFKMhmfYmcoJ+1VENL6OutfT+AwN7Crgd2fexNsN6K02lmIf/8dNfKb8Ez0XgPPiNV/saZ",
+ "tdc0N3gSbLcVKWeTvd0ycX8aJq7yXLPFDjG/6Sa2DosY7Hz0ufmugZVzuQkHMHGhpBv0DXLH3W1RinsT",
+ "m2gvbHM5cuC80LayZ5gx8ZYx+9yMWTfVaGwZdQLJL8eM4RoWdS7Si1QYbJQOuVDO1G+U+/oTA6uX3TIr",
+ "3c5oXYI2dpgoR4k/G838LpknB7RbtulPzTZZx+8NjFMjD7CLEujnnUC7wDAb4ByJKlDonGxHHxMlpPOV",
+ "LSQTkun1mDBOMjB3Dy2GQmJqGC1LnlpFv50COP739f7fMU7h9f7fyQ9kd1yxYBg5H5neeoI2eaCfQHcd",
+ "ntXz9X7FDmzkhb4aBuO4AlIQihCCXgufyheBtqSrH/pAtrJ2xRh7tqSr0UZOZPztcItXZZpaIZhdLMLE",
+ "P5yg0d9X3mz63yoCK5rqfE0ovj9rGyiiymmdh7fJbmhRJOEAUR+3DTP6qnaxaPWLugBHEgth9bjN6ztu",
+ "5SxtgMPFkGIVze2MSQcY0RVcjsu7Pd1v9nS7bCkphLnTDBNX1e+Jf6sai6xrm7nl9kQ3TMj/ihKdXWz1",
+ "YogVE8AZMBLEz+kY0CAKOMfa0RV07t9vb/z+fXfmTJEZnCMFpRwbtsFx//53wLKuqhzulHDBE47Fdc+A",
+ "BB5yt3zrV823Ptl99M3u5gjkGUuBHMOyEJJKlq/JL7xKDng1tryiOSUP0jVupD+dsKqaiw7Y9yvZrtu2",
+ "aaZrzrARZRuoEKoa6E5WHtcVxIwsj0ndfKIcNfamE3T8s1YVex7jjmFlEmPSAwvO8/XhwRC+/BsxhA5O",
+ "Lhp51+Jn87lfgKg/zbub8acZRkwf7z6+uRWEp/BGaPIjqss+M0n/rLqDOFoNJDY7U5vRfBPB4S2KgzSg",
+ "zlQekB8sCBNmQ7cuwnddGd4wI/a9CfF501XFQTgaOhc0r/O3UTm3nQz5Mvsjd/yfezj+nQn5UUjDbyib",
+ "Yk27EiHkDuN678HDR49dE0nPbSBBu9306eO9/R9+cM3qLPlW/Ow0V1ruLSDPhevgCHx3XPNh7+//+4/J",
+ "ZHJnK6UUq+frNzYl5ddCLrumkfDg+07rGz+kmO7CpQrdCrobcTV9LlZRwi5Wtw/LF3tYDPS/iwdl2kQj",
+ "Z8SprPd19NjgB8begIs8MV7XiarO6p2YkDfCpboqcyqtytlVxJqXVFKuAbKJR0Iyw5w2mNonzRlwbYR9",
+ "rPEjE8UysBlC5qVE7cISi2BLOMPYS5weTV2NFWyn4aC+Zvpt5PVamzGtXmAt3Ja9vtlXGcM6OkLiTz90",
+ "FPdTsUoqwFxKBX29xvAK2QbFNTYLgWwN/MKxh2iwasbGFnSkzaoDf26i/M3y2Rbd3cFuJ4oX9mOq/ZRC",
+ "gd7litooylt2zJYXw3pXa1LlcjG8mWd84tTLzDBUSv+KvWK2OmNEpcE2eG/v5600fiUq0UaomiJgDhK1",
+ "8xEddEJy0LmSmEPhO3LsC7ycpFh6NydBZqDThcvN0gpGj5AVXxmkn6Zsqgt73YwGHlE3MX2YYhfrlQ7M",
+ "2RSkzUBXM5AR5PvZZ782n9kMM29VVV98+WN0omK+ImBVDNCVTGXKR3r6FC7mFC+0yhf15F0eCcFyHZ56",
+ "twC+GIA7RO2lrzyHEHOb+B5iQb10l5A3os4Q5IqefI/Gxs/5In/uDb0RHKw3qOFYLS7eOv5V7AIqzBEo",
+ "PjWcFSnwretjHXZ8ecSN/MNfbXHCjTzEkIfZTPZNvs5/jRYebzwgZm+TrSmt6tGG0F3T0Gbwb+bu/4KC",
+ "xRchlV+htPEliNHNUA+8pJ6EuBefD6YnmCPR4ulOlZG9j7jEi1wMJjRaVGEO0boUU8gFn6uvk8psOvg4",
+ "XCIIUJX/iNf4+PNdyxeYfpELn+ncJeRUjKdgK3titR+myJIp5YJxHu/+5eZWqNnSJzHmYS6SL0w4Pqff",
+ "1k06WmEFkypBrlerRovWoNmmmbi1zpjZR98aoREf9Ypln7bTucC99oIkjvGAxIX+YLQogMrL07bt5p3j",
+ "1oyHB2FgWaNmRpXNNLIUA6ILhj78x2igIggzFImZe7JKbhfqM686CuCivsRsXPmGmLdbzPbICb9P1II+",
+ "efDw94dPnvo/Hz552qPKMvO4hIldZVY9kPlshxmi0fp+4ySajHQFvL2bPsqLndB4xLJVNEF+XQQrvBfO",
+ "dQXpxB1FCrruratRbCniFQ5bF/S6+SzSSrPpIiryeImkqhR9yJ9XMqdNdexqX90W7+qJbgiIiEG0uopX",
+ "BfXNBb02cIEttKwqJ920vFgHn9pXzANPth6UL8qg6i8lNyYoNgL3DEkTLF+OF8QiDuPAKFzV3kfnjbIo",
+ "hNTV7VaTQWwa9PrDh1xaL+I6JiylOl2Uxc5H/A9mRf1U5x/NzZWUO9ZcvYkPO7ItrjX8z45Z55NvJuJ1",
+ "JnQxI69ZKsU+lvdwL4ZaKw3LTiiY6/p7T5i/T23efV0EzxmHZCl4LIfvz/j1NX6MFhDBkKKezhjc1de3",
+ "Rd+a628tqznPEOJ2Vfh+JVLwlRQzrd1KKKoQalR9IP7Xt6VRwbG+Jo2fdz42/nReJa6lWpQ6E+dBXxSS",
+ "7L0fYnUOioAMVxtXwkWrmIYiGSiDXd+eIieAQwy1q6+RLKxBqZfeRKx/UtXOjPGshSTIvaXiDKSqhH7p",
+ "HUBu9Tvfj35n+LkHlKtU24hVqa6XK3gjMrDjNhP0x7JycJGBS2reZQYqficuNvuXoW7XEmRSWs4XmpQF",
+ "0SImMtUdE5pa+mnLt6pt9S5tK1/X7QwIzSXQbE2mAJyIqdl0s24woQpdrr3c5bi6eNnGel2FFCkoBVni",
+ "U5VsW1qVKh6lNL0BTrhwXHA1C1GCzKi85GIte7N5oboVKl8tt/IEcRxMd9XDpt90gO3Jw2OkEoh/RVHt",
+ "IpZFDk7xEgHhQJigXoB95vPzk1z2+MoiMaQ7UnjUfj1mS3yEOeVCQSp4pvrLA2+7tlgCK9iLMjsIbkrs",
+ "puLAPULBK6r0O6cwDqsoBqW2zBQb6hn3lXkxI/9aFXnpjJ0aeslVqapKME5OhCy2Bw6rDXO9gVU1Fyrj",
+ "/diVIKoFKRVsG7kPSsH4DlgqLFCsA1U7FsLqbg5Ti1EnRHZB2VhEDYhNCznyrQLohmrgnoUwVQO6qjra",
+ "xJypEDlQbvV5oijM/dNJyat+fWA6sq339S912y5yuZRMSNczASpUEriVn1vIKvQgX1BF3DrIkp46PcLc",
+ "ZUbqrtlcxgTtdskmzDfX8si0Cq/AlkvaFljD69+4Z63L0cLfKNL1IsGWU+jbcExE/iZjL9rGhc/ozNBU",
+ "EQTs1aTFFe6cU6aTmZCuYj2daZAR+bZV44Qy7aM2rAZPC2ePIziCIyhuHFf5vI5qdmll7BJ8ajNz+t2Y",
+ "CjPVj0IOcuJu+kVQpknJNcuD8LOKx/z6hOhb7vmWe77lnm+551vu+ZZ7vuWeb7nnz809fynn6cTTae+/",
+ "EktzR26jq78rj8TqpnuWH4UEw6K7NET93tUaaI4bYjk+roVQvWHfWBJZiVKmQFIzHeOkyKnhhmClqzQY",
+ "zdxJPlGbK4qM6ZiogkcPydFf97031cJ5/TTb3nWpkonS6xzuuai2qmqpD28DbiDootuol358ugyXF4Tl",
+ "QJSB1UtsfQBnkBtO3jpqECOLdKWjY6D5CwebLcJRoy6lGe3DuCGTObAtaREUf8e9UkUoet61ykrOaK76",
+ "60ra8Za0iGWsqOi0FZuQNDwX2bqF7ubUdvAAm4he+1QxTuU64izZQe8OamhhiI9DrK7c9+naPf+6SNtF",
+ "s20YFuNcJKjopdyE5VGXt+rAOkNZt8tZC0+iRZXbfl6jaoFDXB0MPvszIe9svy8b6oMrclespsxfjeG5",
+ "2bIiGtjWMFSO9HyrcTke8NHbi3d/bBA7K1MgTCviMG6VmEZz4ImjLclUZOukQZmaD0zGFFUKltPtj0xI",
+ "Gl1mNfeumC+bn6Av80IcBJvbRG5DfFgljrb2EF7rzDqM7FbQwhEd5Q0g/rmpbx+FDJdAHOmJyc7tmjAX",
+ "pGf1NOtbmnZL04Lb2HrsGXd+1G0iMrkcTZNrWfJ+cvZyBWlp5g0v6V11z5AshOhKNzT3GUzL+RwzxHW0",
+ "0FibBMdjgn8hKme3O5TAXQw57OBVaqGrRsW3h+sSjsAd+K6QZC5FWdyzlaL4GhWcy4LytTdqGMl/WeYW",
+ "hjaTx/XSUOvF3LViocEBlWv9erm3Xv0WaJ/cK9r83YKFnFNF7PlCRkqOgZyxWIcVH56dzg59vOI1Bd6Y",
+ "n87uN7I7N+8Q6u9P2Xk2VoacAmSiV9xeqGZ2SBtTYW/u5DZ91p/jRXhrK7L1ENhufEBNELY/DDIgWfgy",
+ "tEqY+KehSU/f0fOwIMp1MY3DpfUF4JtYSa+Rei+GjZSCZilVqNTgoM+FPP3MvKReHUa0yFVG7Uh03LC0",
+ "GDjuIJayGZDqpfJyiuHcgn/p3Bh15NO+S8jUgMatYvd7Uew+95dPEYppxluX09pw8E4OIFP0XK94lErt",
+ "FLbuZ5//chhablteqydGZ/imQ0ZQddMalCEvCPUZklPBlZZlqk84RYNWsLFuCazKTNfPSr3wTeI21YjJ",
+ "0w11wikm0K3MXFGWagYRA/aPAJ5jU+V8Dkq3KPEM4IS7VoyTkjONcy1ZKkViI5LMc20o+sS2XNI1mdEc",
+ "LbJ/gBRkaoSIsBYMmoeUZnnuvEPMNETMTjjVJAdD9F8zw9CZ4bwFofJ4snhXQSEejDwHDoqpJK6d/cl+",
+ "xXhft31vBUBjhf3s4/RuOtDXr51lvSs/PHB12g4PsPRO7RfSWfuNOQssGU+iSGZefOdf1cYtctfweB6B",
+ "7tUeJu7UT7hhprUgSOipvhw6tI26nbtob0cLaxoH0bL9+r2+j2UrnIvEiIx0bn6fM70op5NULHd8FsOd",
+ "uagyGu5kFJaC47dshxZsRxWQ7pw92MIfXIFekQi5un25v6MgogAPzG2pDh6T9LfPvuddvoayuF93Ldyt",
+ "Dqe3lWdvK8/e1ia9rTx7e7q3lWdv67Le1mX9s9ZlnWzkEF2WwK01W3RHtUmJhNTOXBHwsFmjukvXKsn0",
+ "hJDjhaH/1LwBcAaS5iSlCpTPNY+JMOcLTVSZpgDZ3glPGiuxGRHNxHfr/1ox96Tc3X0EZPdeu4/VWwSU",
+ "t9sXWVX8hKYm8gM5GZ2MOiNJWIozcCmlsXlWovuL7bV12P+vGvdn2Tm6JV1b5cqCFgWYZ02VsxlLmQV5",
+ "LowwMBctb20u8AtIszibo40wbYvZIjzRy92XHHMJkGJMd/d9Pwxqx2wroNPOpHWjuRe/XwZ7E53qHtj1",
+ "0cCNY3cI4i3JuAmS8cWJxndUYeO2mMZXtqHQkNqolnWVxDwFpGzG0pjeyfNIVp1saDOOAGkpmV7jC0cL",
+ "9vspmP+/N3TcFte0j18p89HeaKF1sbezg1XkF0LpnZF5mupvqvXRvA90bkdwj0sh2RnWwnn/6f8FAAD/",
+ "/yBFSh9LQwEA",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/types.go b/daemon/algod/api/server/v2/generated/types.go
index b52d1286f..e11e7750e 100644
--- a/daemon/algod/api/server/v2/generated/types.go
+++ b/daemon/algod/api/server/v2/generated/types.go
@@ -87,6 +87,12 @@ type Account struct {
// The count of all assets that have been opted in, equivalent to the count of AssetHolding objects held by this account.
TotalAssetsOptedIn uint64 `json:"total-assets-opted-in"`
+ // \[tbxb\] The total number of bytes used by this account's app's box keys and values.
+ TotalBoxBytes *uint64 `json:"total-box-bytes,omitempty"`
+
+ // \[tbx\] The number of existing boxes created by this account's app.
+ TotalBoxes *uint64 `json:"total-boxes,omitempty"`
+
// The count of all apps (AppParams objects) created by this account.
TotalCreatedApps uint64 `json:"total-created-apps"`
@@ -259,6 +265,23 @@ type AssetParams struct {
UrlB64 *[]byte `json:"url-b64,omitempty"`
}
+// Box defines model for Box.
+type Box struct {
+
+ // \[name\] box name, base64 encoded
+ Name []byte `json:"name"`
+
+ // \[value\] box value, base64 encoded.
+ Value []byte `json:"value"`
+}
+
+// BoxDescriptor defines model for BoxDescriptor.
+type BoxDescriptor struct {
+
+ // Base64 encoded box name
+ Name []byte `json:"name"`
+}
+
// BuildVersion defines model for BuildVersion.
type BuildVersion struct {
Branch string `json:"branch"`
@@ -643,6 +666,14 @@ type BlockResponse struct {
Cert *map[string]interface{} `json:"cert,omitempty"`
}
+// BoxResponse defines model for BoxResponse.
+type BoxResponse Box
+
+// BoxesResponse defines model for BoxesResponse.
+type BoxesResponse struct {
+ Boxes []BoxDescriptor `json:"boxes"`
+}
+
// CatchpointAbortResponse defines model for CatchpointAbortResponse.
type CatchpointAbortResponse struct {
@@ -869,6 +900,20 @@ type GetPendingTransactionsByAddressParams struct {
Format *string `json:"format,omitempty"`
}
+// GetApplicationBoxByNameParams defines parameters for GetApplicationBoxByName.
+type GetApplicationBoxByNameParams struct {
+
+ // A box name, in the goal app call arg form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.
+ Name string `json:"name"`
+}
+
+// GetApplicationBoxesParams defines parameters for GetApplicationBoxes.
+type GetApplicationBoxesParams struct {
+
+ // Max number of box names to return. If max is not set, or max == 0, returns all box-names.
+ Max *uint64 `json:"max,omitempty"`
+}
+
// GetBlockParams defines parameters for GetBlock.
type GetBlockParams struct {
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index dadaf5263..dcf3841f6 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -51,8 +51,14 @@ import (
"github.com/algorand/go-codec/codec"
)
-const maxTealSourceBytes = 1e5
-const maxTealDryrunBytes = 1e5
+// max compiled teal program is currently 8k
+// but we allow for comments, spacing, and repeated consts
+// in the source teal, allow up to 200kb
+const maxTealSourceBytes = 200_000
+
+// With the ability to hold unlimited assets DryrunRequests can
+// become quite large, allow up to 1mb
+const maxTealDryrunBytes = 1_000_000
// Handlers is an implementation to the V2 route handler interface defined by the generated code.
type Handlers struct {
@@ -65,6 +71,8 @@ type Handlers struct {
type LedgerForAPI interface {
LookupAccount(round basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, basics.MicroAlgos, error)
LookupLatest(addr basics.Address) (basics.AccountData, basics.Round, basics.MicroAlgos, error)
+ LookupKv(round basics.Round, key string) ([]byte, error)
+ LookupKeysByPrefix(round basics.Round, keyPrefix string, maxKeyNum uint64) ([]string, error)
ConsensusParams(r basics.Round) (config.ConsensusParams, error)
Latest() basics.Round
LookupAsset(rnd basics.Round, addr basics.Address, aidx basics.AssetIndex) (ledgercore.AssetResource, error)
@@ -356,7 +364,7 @@ func (v2 *Handlers) AccountInformation(ctx echo.Context, address string, params
}
totalResults := record.TotalAssets + record.TotalAssetParams + record.TotalAppLocalStates + record.TotalAppParams
if totalResults > maxResults {
- v2.Log.Info("MaxAccountAPIResults limit %d exceeded, total results %d", maxResults, totalResults)
+ v2.Log.Infof("MaxAccountAPIResults limit %d exceeded, total results %d", maxResults, totalResults)
extraData := map[string]interface{}{
"max-results": maxResults,
"total-assets-opted-in": record.TotalAssets,
@@ -461,6 +469,8 @@ func (v2 *Handlers) basicAccountInformation(ctx echo.Context, addr basics.Addres
NumUint: record.TotalAppSchema.NumUint,
},
AppsTotalExtraPages: numOrNil(uint64(record.TotalExtraAppPages)),
+ TotalBoxes: numOrNil(record.TotalBoxes),
+ TotalBoxBytes: numOrNil(record.TotalBoxBytes),
MinBalance: record.MinBalance(&consensus).Raw,
}
response := generated.AccountResponse(account)
@@ -1192,6 +1202,97 @@ func (v2 *Handlers) GetApplicationByID(ctx echo.Context, applicationID uint64) e
return ctx.JSON(http.StatusOK, response)
}
+func applicationBoxesMaxKeys(requestedMax uint64, algodMax uint64) uint64 {
+ if requestedMax == 0 {
+ if algodMax == 0 {
+ return math.MaxUint64 // unlimited results when both requested and algod max are 0
+ }
+ return algodMax + 1 // API limit dominates. Increments by 1 to test if more than max supported results exist.
+ }
+
+ if requestedMax <= algodMax || algodMax == 0 {
+ return requestedMax // requested limit dominates
+ }
+
+ return algodMax + 1 // API limit dominates. Increments by 1 to test if more than max supported results exist.
+}
+
+// GetApplicationBoxes returns the box names of an application
+// (GET /v2/applications/{application-id}/boxes)
+func (v2 *Handlers) GetApplicationBoxes(ctx echo.Context, applicationID uint64, params generated.GetApplicationBoxesParams) error {
+ appIdx := basics.AppIndex(applicationID)
+ ledger := v2.Node.LedgerForAPI()
+ lastRound := ledger.Latest()
+ keyPrefix := logic.MakeBoxKey(appIdx, "")
+
+ requestedMax, algodMax := nilToZero(params.Max), v2.Node.Config().MaxAPIBoxPerApplication
+ max := applicationBoxesMaxKeys(requestedMax, algodMax)
+
+ if max != math.MaxUint64 {
+ record, _, _, err := ledger.LookupAccount(ledger.Latest(), appIdx.Address())
+ if err != nil {
+ return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
+ }
+ if record.TotalBoxes > max {
+ return ctx.JSON(http.StatusBadRequest, generated.ErrorResponse{
+ Message: "Result limit exceeded",
+ Data: &map[string]interface{}{
+ "max-api-box-per-application": algodMax,
+ "max": requestedMax,
+ "total-boxes": record.TotalBoxes,
+ },
+ })
+ }
+ }
+
+ boxKeys, err := ledger.LookupKeysByPrefix(lastRound, keyPrefix, math.MaxUint64)
+ if err != nil {
+ return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
+ }
+
+ prefixLen := len(keyPrefix)
+ responseBoxes := make([]generated.BoxDescriptor, len(boxKeys))
+ for i, boxKey := range boxKeys {
+ responseBoxes[i] = generated.BoxDescriptor{
+ Name: []byte(boxKey[prefixLen:]),
+ }
+ }
+ response := generated.BoxesResponse{Boxes: responseBoxes}
+ return ctx.JSON(http.StatusOK, response)
+}
+
+// GetApplicationBoxByName returns the value of an application's box
+// (GET /v2/applications/{application-id}/box)
+func (v2 *Handlers) GetApplicationBoxByName(ctx echo.Context, applicationID uint64, params generated.GetApplicationBoxByNameParams) error {
+ appIdx := basics.AppIndex(applicationID)
+ ledger := v2.Node.LedgerForAPI()
+ lastRound := ledger.Latest()
+
+ encodedBoxName := params.Name
+ boxNameBytes, err := logic.NewAppCallBytes(encodedBoxName)
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+ boxName, err := boxNameBytes.Raw()
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+
+ value, err := ledger.LookupKv(lastRound, logic.MakeBoxKey(appIdx, string(boxName)))
+ if err != nil {
+ return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
+ }
+ if value == nil {
+ return notFound(ctx, errors.New(errBoxDoesNotExist), errBoxDoesNotExist, v2.Log)
+ }
+
+ response := generated.BoxResponse{
+ Name: boxName,
+ Value: value,
+ }
+ return ctx.JSON(http.StatusOK, response)
+}
+
// GetAssetByID returns application information by app idx.
// (GET /v2/assets/{asset-id})
func (v2 *Handlers) GetAssetByID(ctx echo.Context, assetID uint64) error {
diff --git a/daemon/algod/api/server/v2/handlers_test.go b/daemon/algod/api/server/v2/handlers_test.go
new file mode 100644
index 000000000..98635897d
--- /dev/null
+++ b/daemon/algod/api/server/v2/handlers_test.go
@@ -0,0 +1,41 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package v2
+
+import (
+ "math"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestApplicationBoxesMaxKeys(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Response size limited by request supplied value.
+ require.Equal(t, uint64(5), applicationBoxesMaxKeys(5, 7))
+ require.Equal(t, uint64(5), applicationBoxesMaxKeys(5, 0))
+
+ // Response size limited by algod max.
+ require.Equal(t, uint64(2), applicationBoxesMaxKeys(5, 1))
+ require.Equal(t, uint64(2), applicationBoxesMaxKeys(0, 1))
+
+ // Response size _not_ limited
+ require.Equal(t, uint64(math.MaxUint64), applicationBoxesMaxKeys(0, 0))
+}
diff --git a/daemon/algod/api/server/v2/test/handlers_resources_test.go b/daemon/algod/api/server/v2/test/handlers_resources_test.go
index b7b52eeda..0b61be0bb 100644
--- a/daemon/algod/api/server/v2/test/handlers_resources_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_resources_test.go
@@ -42,6 +42,7 @@ import (
type mockLedger struct {
accounts map[basics.Address]basics.AccountData
+ kvstore map[string][]byte
latest basics.Round
blocks []bookkeeping.Block
}
@@ -61,6 +62,17 @@ func (l *mockLedger) LookupLatest(addr basics.Address) (basics.AccountData, basi
return ad, l.latest, basics.MicroAlgos{Raw: 0}, nil
}
+func (l *mockLedger) LookupKv(round basics.Round, key string) ([]byte, error) {
+ if value, ok := l.kvstore[key]; ok {
+ return value, nil
+ }
+ return nil, fmt.Errorf("Key %v does not exist", key)
+}
+
+func (l *mockLedger) LookupKeysByPrefix(round basics.Round, keyPrefix string, maxKeyNum uint64) ([]string, error) {
+ panic("not implemented")
+}
+
func (l *mockLedger) ConsensusParams(r basics.Round) (config.ConsensusParams, error) {
return config.Consensus[protocol.ConsensusFuture], nil
}
diff --git a/daemon/algod/api/server/v2/utils.go b/daemon/algod/api/server/v2/utils.go
index fc9dbe2f4..93f95c2aa 100644
--- a/daemon/algod/api/server/v2/utils.go
+++ b/daemon/algod/api/server/v2/utils.go
@@ -90,6 +90,13 @@ func byteOrNil(data []byte) *[]byte {
return &data
}
+func nilToZero(numPtr *uint64) uint64 {
+ if numPtr == nil {
+ return 0
+ }
+ return *numPtr
+}
+
func computeCreatableIndexInPayset(tx node.TxnWithStatus, txnCounter uint64, payset []transactions.SignedTxnWithAD) (cidx *uint64) {
// Compute transaction index in block
offset := -1
diff --git a/ledger/internal/export_test.go b/daemon/algod/api/swagger.go
index a772d53b9..e20506e2c 100644
--- a/ledger/internal/export_test.go
+++ b/daemon/algod/api/swagger.go
@@ -14,15 +14,12 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal
+package api
-// Export for testing only. See
-// https://medium.com/@robiplus/golang-trick-export-for-test-aa16cbd7b8cd for a
-// nice explanation. tl;dr: Since some of our testing is in logic_test package,
-// we export some extra things to make testing easier there. But we do it in a
-// _test.go file, so they are only exported during testing.
+import _ "embed" // for embedding purposes
-// In order to generate a block
-func (eval *BlockEvaluator) SetGenerate(g bool) {
- eval.generate = g
-}
+// SwaggerSpecJSONEmbed is a string that is pulled from algod.oas2.json via go-embed
+// for use with the GET /swagger.json endpoint
+//
+//go:embed algod.oas2.json
+var SwaggerSpecJSONEmbed string
diff --git a/data/account/participationRegistry.go b/data/account/participationRegistry.go
index e1c82a892..c8892513e 100644
--- a/data/account/participationRegistry.go
+++ b/data/account/participationRegistry.go
@@ -1015,7 +1015,7 @@ func (db *participationDB) Flush(timeout time.Duration) error {
// Close attempts to flush with db.flushTimeout, then waits for the write queue for another db.flushTimeout.
func (db *participationDB) Close() {
if err := db.Flush(db.flushTimeout); err != nil {
- db.log.Warnf("participationDB unhandled error during Close/Flush: %w", err)
+ db.log.Warnf("participationDB unhandled error during Close/Flush: %v", err)
}
db.store.Close()
diff --git a/data/account/registeryDbOps.go b/data/account/registeryDbOps.go
index 282008eb7..f83055d3d 100644
--- a/data/account/registeryDbOps.go
+++ b/data/account/registeryDbOps.go
@@ -85,7 +85,7 @@ func (d deleteStateProofKeysOp) apply(db *participationDB) error {
})
if err != nil {
- db.log.Warnf("participationDB unable to delete stateProof key: %w", err)
+ db.log.Warnf("participationDB unable to delete stateProof key: %v", err)
}
return err
}
diff --git a/data/accountManager.go b/data/accountManager.go
index aa5064e09..39998a09d 100644
--- a/data/accountManager.go
+++ b/data/accountManager.go
@@ -67,7 +67,7 @@ func (manager *AccountManager) Keys(rnd basics.Round) (out []account.Participati
if part.OverlapsInterval(rnd, rnd) {
partRndSecrets, err := manager.registry.GetForRound(part.ParticipationID, rnd)
if err != nil {
- manager.log.Warnf("error while loading round secrets from participation registry: %w", err)
+ manager.log.Warnf("error while loading round secrets from participation registry: %v", err)
continue
}
out = append(out, partRndSecrets)
@@ -198,7 +198,7 @@ func (manager *AccountManager) DeleteOldKeys(latestHdr bookkeeping.BlockHeader,
// Delete expired records from participation registry.
if err := manager.registry.DeleteExpired(latestHdr.Round, agreementProto); err != nil {
- manager.log.Warnf("error while deleting expired records from participation registry: %w", err)
+ manager.log.Warnf("error while deleting expired records from participation registry: %v", err)
}
}
@@ -212,6 +212,6 @@ func (manager *AccountManager) Record(account basics.Address, round basics.Round
// This function updates a cache in the ParticipationRegistry, we must call Flush to persist the changes.
err := manager.registry.Record(account, round, participationType)
if err != nil {
- manager.log.Warnf("node.Record: Account %v not able to record participation (%d) on round %d: %w", account, participationType, round, err)
+ manager.log.Warnf("node.Record: Account %v not able to record participation (%d) on round %d: %v", account, participationType, round, err)
}
}
diff --git a/data/basics/msgp_gen.go b/data/basics/msgp_gen.go
index 53bcc659f..c39ca5833 100644
--- a/data/basics/msgp_gen.go
+++ b/data/basics/msgp_gen.go
@@ -201,8 +201,8 @@ import (
func (z *AccountData) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0009Len := uint32(17)
- var zb0009Mask uint32 /* 18 bits */
+ zb0009Len := uint32(19)
+ var zb0009Mask uint32 /* 20 bits */
if (*z).MicroAlgos.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x2
@@ -247,30 +247,38 @@ func (z *AccountData) MarshalMsg(b []byte) (o []byte) {
zb0009Len--
zb0009Mask |= 0x800
}
- if (*z).TotalExtraAppPages == 0 {
+ if (*z).TotalBoxes == 0 {
zb0009Len--
zb0009Mask |= 0x1000
}
- if ((*z).TotalAppSchema.NumUint == 0) && ((*z).TotalAppSchema.NumByteSlice == 0) {
+ if (*z).TotalBoxBytes == 0 {
zb0009Len--
zb0009Mask |= 0x2000
}
- if (*z).VoteID.MsgIsZero() {
+ if (*z).TotalExtraAppPages == 0 {
zb0009Len--
zb0009Mask |= 0x4000
}
- if (*z).VoteFirstValid == 0 {
+ if ((*z).TotalAppSchema.NumUint == 0) && ((*z).TotalAppSchema.NumByteSlice == 0) {
zb0009Len--
zb0009Mask |= 0x8000
}
- if (*z).VoteKeyDilution == 0 {
+ if (*z).VoteID.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x10000
}
- if (*z).VoteLastValid == 0 {
+ if (*z).VoteFirstValid == 0 {
zb0009Len--
zb0009Mask |= 0x20000
}
+ if (*z).VoteKeyDilution == 0 {
+ zb0009Len--
+ zb0009Mask |= 0x40000
+ }
+ if (*z).VoteLastValid == 0 {
+ zb0009Len--
+ zb0009Mask |= 0x80000
+ }
// variable map header, size zb0009Len
o = msgp.AppendMapHeader(o, zb0009Len)
if zb0009Len != 0 {
@@ -414,11 +422,21 @@ func (z *AccountData) MarshalMsg(b []byte) (o []byte) {
o = (*z).StateProofID.MarshalMsg(o)
}
if (zb0009Mask & 0x1000) == 0 { // if not empty
+ // string "tbx"
+ o = append(o, 0xa3, 0x74, 0x62, 0x78)
+ o = msgp.AppendUint64(o, (*z).TotalBoxes)
+ }
+ if (zb0009Mask & 0x2000) == 0 { // if not empty
+ // string "tbxb"
+ o = append(o, 0xa4, 0x74, 0x62, 0x78, 0x62)
+ o = msgp.AppendUint64(o, (*z).TotalBoxBytes)
+ }
+ if (zb0009Mask & 0x4000) == 0 { // if not empty
// string "teap"
o = append(o, 0xa4, 0x74, 0x65, 0x61, 0x70)
o = msgp.AppendUint32(o, (*z).TotalExtraAppPages)
}
- if (zb0009Mask & 0x2000) == 0 { // if not empty
+ if (zb0009Mask & 0x8000) == 0 { // if not empty
// string "tsch"
o = append(o, 0xa4, 0x74, 0x73, 0x63, 0x68)
// omitempty: check for empty values
@@ -445,22 +463,22 @@ func (z *AccountData) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendUint64(o, (*z).TotalAppSchema.NumUint)
}
}
- if (zb0009Mask & 0x4000) == 0 { // if not empty
+ if (zb0009Mask & 0x10000) == 0 { // if not empty
// string "vote"
o = append(o, 0xa4, 0x76, 0x6f, 0x74, 0x65)
o = (*z).VoteID.MarshalMsg(o)
}
- if (zb0009Mask & 0x8000) == 0 { // if not empty
+ if (zb0009Mask & 0x20000) == 0 { // if not empty
// string "voteFst"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x46, 0x73, 0x74)
o = msgp.AppendUint64(o, uint64((*z).VoteFirstValid))
}
- if (zb0009Mask & 0x10000) == 0 { // if not empty
+ if (zb0009Mask & 0x40000) == 0 { // if not empty
// string "voteKD"
o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x4b, 0x44)
o = msgp.AppendUint64(o, (*z).VoteKeyDilution)
}
- if (zb0009Mask & 0x20000) == 0 { // if not empty
+ if (zb0009Mask & 0x80000) == 0 { // if not empty
// string "voteLst"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x4c, 0x73, 0x74)
o = msgp.AppendUint64(o, uint64((*z).VoteLastValid))
@@ -876,6 +894,22 @@ func (z *AccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
if zb0009 > 0 {
+ zb0009--
+ (*z).TotalBoxes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalBoxes")
+ return
+ }
+ }
+ if zb0009 > 0 {
+ zb0009--
+ (*z).TotalBoxBytes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalBoxBytes")
+ return
+ }
+ }
+ if zb0009 > 0 {
err = msgp.ErrTooManyArrayFields(zb0009)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
@@ -1252,6 +1286,18 @@ func (z *AccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "TotalExtraAppPages")
return
}
+ case "tbx":
+ (*z).TotalBoxes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalBoxes")
+ return
+ }
+ case "tbxb":
+ (*z).TotalBoxBytes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalBoxBytes")
+ return
+ }
default:
err = msgp.ErrNoField(string(field))
if err != nil {
@@ -1304,13 +1350,13 @@ func (z *AccountData) Msgsize() (s int) {
s += 0 + zb0007.Msgsize() + zb0008.Msgsize()
}
}
- s += 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + msgp.Uint32Size
+ s += 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + msgp.Uint32Size + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size
return
}
// MsgIsZero returns whether this is a zero value
func (z *AccountData) MsgIsZero() bool {
- return ((*z).Status == 0) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) && ((*z).RewardedMicroAlgos.MsgIsZero()) && ((*z).VoteID.MsgIsZero()) && ((*z).SelectionID.MsgIsZero()) && ((*z).StateProofID.MsgIsZero()) && ((*z).VoteFirstValid == 0) && ((*z).VoteLastValid == 0) && ((*z).VoteKeyDilution == 0) && (len((*z).AssetParams) == 0) && (len((*z).Assets) == 0) && ((*z).AuthAddr.MsgIsZero()) && (len((*z).AppLocalStates) == 0) && (len((*z).AppParams) == 0) && (((*z).TotalAppSchema.NumUint == 0) && ((*z).TotalAppSchema.NumByteSlice == 0)) && ((*z).TotalExtraAppPages == 0)
+ return ((*z).Status == 0) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) && ((*z).RewardedMicroAlgos.MsgIsZero()) && ((*z).VoteID.MsgIsZero()) && ((*z).SelectionID.MsgIsZero()) && ((*z).StateProofID.MsgIsZero()) && ((*z).VoteFirstValid == 0) && ((*z).VoteLastValid == 0) && ((*z).VoteKeyDilution == 0) && (len((*z).AssetParams) == 0) && (len((*z).Assets) == 0) && ((*z).AuthAddr.MsgIsZero()) && (len((*z).AppLocalStates) == 0) && (len((*z).AppParams) == 0) && (((*z).TotalAppSchema.NumUint == 0) && ((*z).TotalAppSchema.NumByteSlice == 0)) && ((*z).TotalExtraAppPages == 0) && ((*z).TotalBoxes == 0) && ((*z).TotalBoxBytes == 0)
}
// MarshalMsg implements msgp.Marshaler
@@ -2899,8 +2945,8 @@ func (z *AssetParams) MsgIsZero() bool {
func (z *BalanceRecord) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0009Len := uint32(18)
- var zb0009Mask uint32 /* 20 bits */
+ zb0009Len := uint32(20)
+ var zb0009Mask uint32 /* 22 bits */
if (*z).Addr.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x4
@@ -2949,30 +2995,38 @@ func (z *BalanceRecord) MarshalMsg(b []byte) (o []byte) {
zb0009Len--
zb0009Mask |= 0x2000
}
- if (*z).AccountData.TotalExtraAppPages == 0 {
+ if (*z).AccountData.TotalBoxes == 0 {
zb0009Len--
zb0009Mask |= 0x4000
}
- if ((*z).AccountData.TotalAppSchema.NumUint == 0) && ((*z).AccountData.TotalAppSchema.NumByteSlice == 0) {
+ if (*z).AccountData.TotalBoxBytes == 0 {
zb0009Len--
zb0009Mask |= 0x8000
}
- if (*z).AccountData.VoteID.MsgIsZero() {
+ if (*z).AccountData.TotalExtraAppPages == 0 {
zb0009Len--
zb0009Mask |= 0x10000
}
- if (*z).AccountData.VoteFirstValid == 0 {
+ if ((*z).AccountData.TotalAppSchema.NumUint == 0) && ((*z).AccountData.TotalAppSchema.NumByteSlice == 0) {
zb0009Len--
zb0009Mask |= 0x20000
}
- if (*z).AccountData.VoteKeyDilution == 0 {
+ if (*z).AccountData.VoteID.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x40000
}
- if (*z).AccountData.VoteLastValid == 0 {
+ if (*z).AccountData.VoteFirstValid == 0 {
zb0009Len--
zb0009Mask |= 0x80000
}
+ if (*z).AccountData.VoteKeyDilution == 0 {
+ zb0009Len--
+ zb0009Mask |= 0x100000
+ }
+ if (*z).AccountData.VoteLastValid == 0 {
+ zb0009Len--
+ zb0009Mask |= 0x200000
+ }
// variable map header, size zb0009Len
o = msgp.AppendMapHeader(o, zb0009Len)
if zb0009Len != 0 {
@@ -3121,11 +3175,21 @@ func (z *BalanceRecord) MarshalMsg(b []byte) (o []byte) {
o = (*z).AccountData.StateProofID.MarshalMsg(o)
}
if (zb0009Mask & 0x4000) == 0 { // if not empty
+ // string "tbx"
+ o = append(o, 0xa3, 0x74, 0x62, 0x78)
+ o = msgp.AppendUint64(o, (*z).AccountData.TotalBoxes)
+ }
+ if (zb0009Mask & 0x8000) == 0 { // if not empty
+ // string "tbxb"
+ o = append(o, 0xa4, 0x74, 0x62, 0x78, 0x62)
+ o = msgp.AppendUint64(o, (*z).AccountData.TotalBoxBytes)
+ }
+ if (zb0009Mask & 0x10000) == 0 { // if not empty
// string "teap"
o = append(o, 0xa4, 0x74, 0x65, 0x61, 0x70)
o = msgp.AppendUint32(o, (*z).AccountData.TotalExtraAppPages)
}
- if (zb0009Mask & 0x8000) == 0 { // if not empty
+ if (zb0009Mask & 0x20000) == 0 { // if not empty
// string "tsch"
o = append(o, 0xa4, 0x74, 0x73, 0x63, 0x68)
// omitempty: check for empty values
@@ -3152,22 +3216,22 @@ func (z *BalanceRecord) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendUint64(o, (*z).AccountData.TotalAppSchema.NumUint)
}
}
- if (zb0009Mask & 0x10000) == 0 { // if not empty
+ if (zb0009Mask & 0x40000) == 0 { // if not empty
// string "vote"
o = append(o, 0xa4, 0x76, 0x6f, 0x74, 0x65)
o = (*z).AccountData.VoteID.MarshalMsg(o)
}
- if (zb0009Mask & 0x20000) == 0 { // if not empty
+ if (zb0009Mask & 0x80000) == 0 { // if not empty
// string "voteFst"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x46, 0x73, 0x74)
o = msgp.AppendUint64(o, uint64((*z).AccountData.VoteFirstValid))
}
- if (zb0009Mask & 0x40000) == 0 { // if not empty
+ if (zb0009Mask & 0x100000) == 0 { // if not empty
// string "voteKD"
o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x4b, 0x44)
o = msgp.AppendUint64(o, (*z).AccountData.VoteKeyDilution)
}
- if (zb0009Mask & 0x80000) == 0 { // if not empty
+ if (zb0009Mask & 0x200000) == 0 { // if not empty
// string "voteLst"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x4c, 0x73, 0x74)
o = msgp.AppendUint64(o, uint64((*z).AccountData.VoteLastValid))
@@ -3591,6 +3655,22 @@ func (z *BalanceRecord) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
if zb0009 > 0 {
+ zb0009--
+ (*z).AccountData.TotalBoxes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalBoxes")
+ return
+ }
+ }
+ if zb0009 > 0 {
+ zb0009--
+ (*z).AccountData.TotalBoxBytes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalBoxBytes")
+ return
+ }
+ }
+ if zb0009 > 0 {
err = msgp.ErrTooManyArrayFields(zb0009)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
@@ -3973,6 +4053,18 @@ func (z *BalanceRecord) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "TotalExtraAppPages")
return
}
+ case "tbx":
+ (*z).AccountData.TotalBoxes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalBoxes")
+ return
+ }
+ case "tbxb":
+ (*z).AccountData.TotalBoxBytes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalBoxBytes")
+ return
+ }
default:
err = msgp.ErrNoField(string(field))
if err != nil {
@@ -4025,13 +4117,13 @@ func (z *BalanceRecord) Msgsize() (s int) {
s += 0 + zb0007.Msgsize() + zb0008.Msgsize()
}
}
- s += 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + msgp.Uint32Size
+ s += 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + msgp.Uint32Size + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size
return
}
// MsgIsZero returns whether this is a zero value
func (z *BalanceRecord) MsgIsZero() bool {
- return ((*z).Addr.MsgIsZero()) && ((*z).AccountData.Status == 0) && ((*z).AccountData.MicroAlgos.MsgIsZero()) && ((*z).AccountData.RewardsBase == 0) && ((*z).AccountData.RewardedMicroAlgos.MsgIsZero()) && ((*z).AccountData.VoteID.MsgIsZero()) && ((*z).AccountData.SelectionID.MsgIsZero()) && ((*z).AccountData.StateProofID.MsgIsZero()) && ((*z).AccountData.VoteFirstValid == 0) && ((*z).AccountData.VoteLastValid == 0) && ((*z).AccountData.VoteKeyDilution == 0) && (len((*z).AccountData.AssetParams) == 0) && (len((*z).AccountData.Assets) == 0) && ((*z).AccountData.AuthAddr.MsgIsZero()) && (len((*z).AccountData.AppLocalStates) == 0) && (len((*z).AccountData.AppParams) == 0) && (((*z).AccountData.TotalAppSchema.NumUint == 0) && ((*z).AccountData.TotalAppSchema.NumByteSlice == 0)) && ((*z).AccountData.TotalExtraAppPages == 0)
+ return ((*z).Addr.MsgIsZero()) && ((*z).AccountData.Status == 0) && ((*z).AccountData.MicroAlgos.MsgIsZero()) && ((*z).AccountData.RewardsBase == 0) && ((*z).AccountData.RewardedMicroAlgos.MsgIsZero()) && ((*z).AccountData.VoteID.MsgIsZero()) && ((*z).AccountData.SelectionID.MsgIsZero()) && ((*z).AccountData.StateProofID.MsgIsZero()) && ((*z).AccountData.VoteFirstValid == 0) && ((*z).AccountData.VoteLastValid == 0) && ((*z).AccountData.VoteKeyDilution == 0) && (len((*z).AccountData.AssetParams) == 0) && (len((*z).AccountData.Assets) == 0) && ((*z).AccountData.AuthAddr.MsgIsZero()) && (len((*z).AccountData.AppLocalStates) == 0) && (len((*z).AccountData.AppParams) == 0) && (((*z).AccountData.TotalAppSchema.NumUint == 0) && ((*z).AccountData.TotalAppSchema.NumByteSlice == 0)) && ((*z).AccountData.TotalExtraAppPages == 0) && ((*z).AccountData.TotalBoxes == 0) && ((*z).AccountData.TotalBoxBytes == 0)
}
// MarshalMsg implements msgp.Marshaler
diff --git a/data/basics/userBalance.go b/data/basics/userBalance.go
index 2b0a2699e..06d7c4b6b 100644
--- a/data/basics/userBalance.go
+++ b/data/basics/userBalance.go
@@ -42,20 +42,16 @@ const (
// These two accounts also have additional Algo transfer restrictions.
NotParticipating
- // MaxEncodedAccountDataSize is a rough estimate for the worst-case scenario we're going to have of the account data and address serialized.
- // this number is verified by the TestEncodedAccountDataSize function.
- MaxEncodedAccountDataSize = 850000
-
// encodedMaxAssetsPerAccount is the decoder limit of number of assets stored per account.
// it's being verified by the unit test TestEncodedAccountAllocationBounds to align
// with config.Consensus[protocol.ConsensusCurrentVersion].MaxAssetsPerAccount; note that the decoded
// parameter is used only for protecting the decoder against malicious encoded account data stream.
- // protocol-specific constains would be tested once the decoding is complete.
+ // protocol-specific contains would be tested once the decoding is complete.
encodedMaxAssetsPerAccount = 1024
// EncodedMaxAppLocalStates is the decoder limit for number of opted-in apps in a single account.
// It is verified in TestEncodedAccountAllocationBounds to align with
- // config.Consensus[protocol.ConsensusCurrentVersion].MaxppsOptedIn
+ // config.Consensus[protocol.ConsensusCurrentVersion].MaxAppsOptedIn
EncodedMaxAppLocalStates = 64
// EncodedMaxAppParams is the decoder limit for number of created apps in a single account.
@@ -228,6 +224,12 @@ type AccountData struct {
// TotalExtraAppPages stores the extra length in pages (MaxAppProgramLen bytes per page)
// requested for app program by this account
TotalExtraAppPages uint32 `codec:"teap"`
+
+ // Total number of boxes associated with this account, which implies it is an app account.
+ TotalBoxes uint64 `codec:"tbx"`
+
+ // TotalBoxBytes stores the sum of all len(keys) and len(values) of Boxes
+ TotalBoxBytes uint64 `codec:"tbxb"`
}
// AppLocalState stores the LocalState associated with an application. It also
@@ -475,6 +477,7 @@ func (u AccountData) MinBalance(proto *config.ConsensusParams) (res MicroAlgos)
u.TotalAppSchema,
uint64(len(u.AppParams)), uint64(len(u.AppLocalStates)),
uint64(u.TotalExtraAppPages),
+ u.TotalBoxes, u.TotalBoxBytes,
)
}
@@ -487,6 +490,7 @@ func MinBalance(
totalAppSchema StateSchema,
totalAppParams uint64, totalAppLocalStates uint64,
totalExtraAppPages uint64,
+ totalBoxes uint64, totalBoxBytes uint64,
) (res MicroAlgos) {
var min uint64
@@ -514,6 +518,14 @@ func MinBalance(
extraAppProgramLenCost := MulSaturate(proto.AppFlatParamsMinBalance, totalExtraAppPages)
min = AddSaturate(min, extraAppProgramLenCost)
+ // Base MinBalance for each created box
+ boxBaseCost := MulSaturate(proto.BoxFlatMinBalance, totalBoxes)
+ min = AddSaturate(min, boxBaseCost)
+
+ // Per byte MinBalance for boxes
+ boxByteCost := MulSaturate(proto.BoxByteMinBalance, totalBoxBytes)
+ min = AddSaturate(min, boxByteCost)
+
res.Raw = min
return res
}
diff --git a/data/basics/userBalance_test.go b/data/basics/userBalance_test.go
index 050ea2882..347dadfe6 100644
--- a/data/basics/userBalance_test.go
+++ b/data/basics/userBalance_test.go
@@ -131,102 +131,6 @@ func getSampleAccountData() AccountData {
}
}
-func TestEncodedAccountDataSize(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- maxStateSchema := StateSchema{
- NumUint: 0x1234123412341234,
- NumByteSlice: 0x1234123412341234,
- }
- ad := getSampleAccountData()
- ad.TotalAppSchema = maxStateSchema
-
- // TODO after applications enabled: change back to protocol.ConsensusCurrentVersion
- currentConsensusParams := config.Consensus[protocol.ConsensusFuture]
-
- for assetCreatorAssets := 0; assetCreatorAssets < currentConsensusParams.MaxAssetsPerAccount; assetCreatorAssets++ {
- ap := AssetParams{
- Total: 0x1234123412341234,
- Decimals: 0x12341234,
- DefaultFrozen: true,
- UnitName: makeString(currentConsensusParams.MaxAssetUnitNameBytes),
- AssetName: makeString(currentConsensusParams.MaxAssetNameBytes),
- URL: makeString(currentConsensusParams.MaxAssetURLBytes),
- Manager: Address(crypto.Hash([]byte{1, byte(assetCreatorAssets)})),
- Reserve: Address(crypto.Hash([]byte{2, byte(assetCreatorAssets)})),
- Freeze: Address(crypto.Hash([]byte{3, byte(assetCreatorAssets)})),
- Clawback: Address(crypto.Hash([]byte{4, byte(assetCreatorAssets)})),
- }
- copy(ap.MetadataHash[:], makeString(32))
- ad.AssetParams[AssetIndex(0x1234123412341234-assetCreatorAssets)] = ap
- }
-
- for assetHolderAssets := 0; assetHolderAssets < currentConsensusParams.MaxAssetsPerAccount; assetHolderAssets++ {
- ah := AssetHolding{
- Amount: 0x1234123412341234,
- Frozen: true,
- }
- ad.Assets[AssetIndex(0x1234123412341234-assetHolderAssets)] = ah
- }
-
- maxProg := []byte(makeString(config.MaxAvailableAppProgramLen))
- maxGlobalState := make(TealKeyValue, currentConsensusParams.MaxGlobalSchemaEntries)
- maxLocalState := make(TealKeyValue, currentConsensusParams.MaxLocalSchemaEntries)
-
- for globalKey := uint64(0); globalKey < currentConsensusParams.MaxGlobalSchemaEntries; globalKey++ {
- prefix := fmt.Sprintf("%d|", globalKey)
- padding := makeString(currentConsensusParams.MaxAppKeyLen - len(prefix))
- maxKey := prefix + padding
- maxValue := TealValue{
- Type: TealBytesType,
- Bytes: makeString(currentConsensusParams.MaxAppSumKeyValueLens - len(maxKey)),
- }
- maxGlobalState[maxKey] = maxValue
- }
-
- for localKey := uint64(0); localKey < currentConsensusParams.MaxLocalSchemaEntries; localKey++ {
- prefix := fmt.Sprintf("%d|", localKey)
- padding := makeString(currentConsensusParams.MaxAppKeyLen - len(prefix))
- maxKey := prefix + padding
- maxValue := TealValue{
- Type: TealBytesType,
- Bytes: makeString(currentConsensusParams.MaxAppSumKeyValueLens - len(maxKey)),
- }
- maxLocalState[maxKey] = maxValue
- }
- maxAppsCreate := currentConsensusParams.MaxAppsCreated
- if maxAppsCreate == 0 {
- maxAppsCreate = config.Consensus[protocol.ConsensusV30].MaxAppsCreated
- }
- for appCreatorApps := 0; appCreatorApps < maxAppsCreate; appCreatorApps++ {
- ap := AppParams{
- ApprovalProgram: maxProg,
- ClearStateProgram: maxProg,
- GlobalState: maxGlobalState,
- StateSchemas: StateSchemas{
- LocalStateSchema: maxStateSchema,
- GlobalStateSchema: maxStateSchema,
- },
- }
- ad.AppParams[AppIndex(0x1234123412341234-appCreatorApps)] = ap
- }
-
- maxAppsOptedIn := currentConsensusParams.MaxAppsOptedIn
- if maxAppsOptedIn == 0 {
- maxAppsOptedIn = config.Consensus[protocol.ConsensusV30].MaxAppsOptedIn
- }
- for appHolderApps := 0; appHolderApps < maxAppsOptedIn; appHolderApps++ {
- ls := AppLocalState{
- KeyValue: maxLocalState,
- Schema: maxStateSchema,
- }
- ad.AppLocalStates[AppIndex(0x1234123412341234-appHolderApps)] = ls
- }
-
- encoded := ad.MarshalMsg(nil)
- require.GreaterOrEqual(t, MaxEncodedAccountDataSize, len(encoded))
-}
-
func TestEncodedAccountAllocationBounds(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -248,6 +152,7 @@ func TestEncodedAccountAllocationBounds(t *testing.T) {
if proto.MaxGlobalSchemaEntries > EncodedMaxKeyValueEntries {
require.Failf(t, "proto.MaxGlobalSchemaEntries > encodedMaxKeyValueEntries", "protocol version = %s", protoVer)
}
+ // There is no protocol limit to the number of Boxes per account, so that allocbound is not checked.
}
}
diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go
index d5df86811..36726e744 100644
--- a/data/pools/transactionPool.go
+++ b/data/pools/transactionPool.go
@@ -745,7 +745,7 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact
case transactions.TxnDeadError:
asmStats.InvalidCount++
stats.ExpiredCount++
- case transactions.MinFeeError:
+ case transactions.MinFeeError, *ledgercore.LeaseInLedgerError:
asmStats.InvalidCount++
stats.RemovedInvalidCount++
pool.log.Infof("Cannot re-add pending transaction to pool: %v", err)
diff --git a/data/transactions/application.go b/data/transactions/application.go
index 0db7e72b9..70fd774df 100644
--- a/data/transactions/application.go
+++ b/data/transactions/application.go
@@ -46,6 +46,12 @@ const (
// can contain. Its value is verified against consensus parameters in
// TestEncodedAppTxnAllocationBounds
encodedMaxForeignAssets = 32
+
+ // encodedMaxBoxes sets the allocation bound for the maximum
+ // number of Boxes that a transaction decoded off of the wire
+ // can contain. Its value is verified against consensus parameters in
+ // TestEncodedAppTxnAllocationBounds
+ encodedMaxBoxes = 32
)
// OnCompletion is an enum representing some layer 1 side effect that an
@@ -115,6 +121,12 @@ type ApplicationCallTxnFields struct {
// by the executing ApprovalProgram or ClearStateProgram.
ForeignApps []basics.AppIndex `codec:"apfa,allocbound=encodedMaxForeignApps"`
+ // Boxes are the boxes that can be accessed by this transaction (and others
+ // in the same group). The Index in the BoxRef is the slot of ForeignApps
+ // that the name is associated with (shifted by 1, so 0 indicates "current
+ // app")
+ Boxes []BoxRef `codec:"apbx,allocbound=encodedMaxBoxes"`
+
// ForeignAssets are asset IDs for assets whose AssetParams
// (and since v4, Holdings) may be read by the executing
// ApprovalProgram or ClearStateProgram.
@@ -155,6 +167,14 @@ type ApplicationCallTxnFields struct {
// method below!
}
+// BoxRef names a box by the slot
+type BoxRef struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Index uint64 `codec:"i"`
+ Name []byte `codec:"n"`
+}
+
// Empty indicates whether or not all the fields in the
// ApplicationCallTxnFields are zeroed out
func (ac *ApplicationCallTxnFields) Empty() bool {
@@ -176,6 +196,9 @@ func (ac *ApplicationCallTxnFields) Empty() bool {
if ac.ForeignAssets != nil {
return false
}
+ if ac.Boxes != nil {
+ return false
+ }
if ac.LocalStateSchema != (basics.StateSchema{}) {
return false
}
diff --git a/data/transactions/application_test.go b/data/transactions/application_test.go
index a076e4b8d..64777185c 100644
--- a/data/transactions/application_test.go
+++ b/data/transactions/application_test.go
@@ -33,7 +33,7 @@ func TestApplicationCallFieldsNotChanged(t *testing.T) {
af := ApplicationCallTxnFields{}
s := reflect.ValueOf(&af).Elem()
- if s.NumField() != 12 {
+ if s.NumField() != 13 {
t.Errorf("You added or removed a field from transactions.ApplicationCallTxnFields. " +
"Please ensure you have updated the Empty() method and then " +
"fix this test")
@@ -76,6 +76,10 @@ func TestApplicationCallFieldsEmpty(t *testing.T) {
a.False(ac.Empty())
ac.LocalStateSchema = basics.StateSchema{}
+ ac.Boxes = make([]BoxRef, 1)
+ a.False(ac.Empty())
+
+ ac.Boxes = nil
ac.GlobalStateSchema = basics.StateSchema{NumUint: 1}
a.False(ac.Empty())
@@ -115,6 +119,9 @@ func TestEncodedAppTxnAllocationBounds(t *testing.T) {
if proto.MaxAppTxnForeignAssets > encodedMaxForeignAssets {
require.Failf(t, "proto.MaxAppTxnForeignAssets > encodedMaxForeignAssets", "protocol version = %s", protoVer)
}
+ if proto.MaxAppBoxReferences > encodedMaxBoxes {
+ require.Failf(t, "proto.MaxAppBoxReferences > encodedMaxBoxes", "protocol version = %s", protoVer)
+ }
}
}
diff --git a/data/transactions/json_test.go b/data/transactions/json_test.go
new file mode 100644
index 000000000..547224fb6
--- /dev/null
+++ b/data/transactions/json_test.go
@@ -0,0 +1,96 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package transactions_test
+
+/* These tests are pretty low-value now. They test something very basic about
+ our codec for encoding []byte as base64 strings in json. The test were
+ written when BoxRef contained a string instead of []byte. When that was true,
+ these tests were more important because there was work that had to be done to
+ make it happen (implement MarshalJSON and UnmarshalJSON) */
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/stretchr/testify/require"
+)
+
+func decode(t *testing.T, data string, v interface{}) {
+ t.Helper()
+ err := protocol.DecodeJSON([]byte(data), v)
+ require.NoErrorf(t, err, "Cannot decode %s", data)
+}
+
+func compact(data []byte) string {
+ return strings.ReplaceAll(strings.ReplaceAll(string(data), " ", ""), "\n", "")
+}
+
+// TestJsonMarshal ensures that BoxRef names are b64 encoded, since they may not be characters.
+func TestJsonMarshal(t *testing.T) {
+ marshal := protocol.EncodeJSON(transactions.BoxRef{Index: 4, Name: []byte("joe")})
+ require.Equal(t, `{"i":4,"n":"am9l"}`, compact(marshal))
+
+ marshal = protocol.EncodeJSON(transactions.BoxRef{Index: 0, Name: []byte("joe")})
+ require.Equal(t, `{"n":"am9l"}`, compact(marshal))
+
+ marshal = protocol.EncodeJSON(transactions.BoxRef{Index: 1, Name: []byte("")})
+ require.Equal(t, `{"i":1}`, compact(marshal))
+
+ marshal = protocol.EncodeJSON(transactions.BoxRef{Index: 0, Name: []byte("")})
+ require.Equal(t, `{}`, compact(marshal))
+}
+
+// TestJsonUnmarshal ensures that BoxRef unmarshaling expects b64 names
+func TestJsonUnmarshal(t *testing.T) {
+ var br transactions.BoxRef
+
+ decode(t, `{"i":4,"n":"am9l"}`, &br)
+ require.Equal(t, transactions.BoxRef{Index: 4, Name: []byte("joe")}, br)
+
+ br = transactions.BoxRef{}
+ decode(t, `{"n":"am9l"}`, &br)
+ require.Equal(t, transactions.BoxRef{Index: 0, Name: []byte("joe")}, br)
+
+ br = transactions.BoxRef{}
+ decode(t, `{"i":4}`, &br)
+ require.Equal(t, transactions.BoxRef{Index: 4, Name: nil}, br)
+
+ br = transactions.BoxRef{}
+ decode(t, `{}`, &br)
+ require.Equal(t, transactions.BoxRef{Index: 0, Name: nil}, br)
+}
+
+// TestTxnJson tests a few more things about how our Transactions get JSON
+// encoded. These things could change without breaking the protocol, should stay
+// the same for the sake of REST API compatibility.
+func TestTxnJson(t *testing.T) {
+ txn := txntest.Txn{
+ Sender: basics.Address{0x01, 0x02, 0x03},
+ }
+ marshal := protocol.EncodeJSON(txn.Txn())
+ require.Contains(t, compact(marshal), `"snd":"AEBA`)
+
+ txn = txntest.Txn{
+ Boxes: []transactions.BoxRef{{Index: 3, Name: []byte("john")}},
+ }
+ marshal = protocol.EncodeJSON(txn.Txn())
+ require.Contains(t, compact(marshal), `"apbx":[{"i":3,"n":"am9obg=="}]`)
+}
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index e577ea93f..850d02a3e 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -13,12 +13,13 @@ application call transactions.
Programs have read-only access to the transaction they are attached
to, the other transactions in their atomic transaction group, and a
few global values. In addition, _Smart Contracts_ have access to
-limited state that is global to the application and per-account local
-state for each account that has opted-in to the application. For both
-types of program, approval is signaled by finishing with the stack
-containing a single non-zero uint64 value, though `return` can be used
-to signal an early approval which approves based only upon the top
-stack value being a non-zero uint64 value.
+limited state that is global to the application, per-account local
+state for each account that has opted-in to the application, and
+additional per-application arbitrary state in named _boxes_. For both types of
+program, approval is signaled by finishing with the stack containing a
+single non-zero uint64 value, though `return` can be used to signal an
+early approval which approves based only upon the top stack value
+being a non-zero uint64 value.
## The Stack
@@ -29,8 +30,15 @@ arguments from it and pushing results to it. Some operations have
_immediate_ arguments that are encoded directly into the instruction,
rather than coming from the stack.
-The maximum stack depth is 1000. If the stack depth is
-exceeded or if a byte-array element exceed 4096 bytes, the program fails.
+The maximum stack depth is 1000. If the stack depth is exceeded or if
+a byte-array element exceeds 4096 bytes, the program fails. If an
+opcode is documented to access a position in the stack that does not
+exist, the operation fails. Most often, this is an attempt to access
+an element below the stack -- the simplest example is an operation
+like `concat` which expects two arguments on the stack. If the stack
+has fewer than two elements, the operation fails. Some operations, like
+`frame_dig` and `proto` could fail because of an attempt to access
+above the current stack.
## Scratch Space
@@ -38,7 +46,9 @@ In addition to the stack there are 256 positions of scratch
space. Like stack values, scratch locations may be uint64s or
byte-arrays. Scratch locations are initialized as uint64 zero. Scratch
space is accessed by the `load(s)` and `store(s)` opcodes which move
-data from or to scratch space, respectively.
+data from or to scratch space, respectively. Application calls may
+inspect the final scratch space of earlier application calls in the
+same group using `gload(s)(s)`
## Versions
@@ -116,11 +126,13 @@ while being evaluated. If the program exceeds its budget, it fails.
Smart Contracts are executed in ApplicationCall transactions. Like
Smart Signatures, contracts indicate success by leaving a single
-non-zero integer on the stack. A failed Smart Contract call is not a
-valid transaction, thus not written to the blockchain. Nodes maintain
-a list of transactions that would succeed, given the current state of
-the blockchain, called the transaction pool. Nodes draw from the pool
-if they are called upon to propose a block.
+non-zero integer on the stack. A failed Smart Contract call to an
+ApprovalProgram is not a valid transaction, thus not written to the
+blockchain. An ApplicationCall with OnComplete set to ClearState
+invokes the ClearStateProgram, rather than the usual
+ApprovalProgram. If the ClearStateProgram fails, application state
+changes are rolled back, but the transaction still succeeds, and the
+Sender's local state for the called application is removed.
Smart Contracts have access to everything a Smart Signature may access
(see previous section), as well as the ability to examine blockchain
@@ -134,14 +146,15 @@ blockchain.
Smart contracts have limits on their execution cost (700, consensus
parameter MaxAppProgramCost). Before v4, this was a static limit on
-the cost of all the instructions in the program. Since then, the cost
+the cost of all the instructions in the program. Starting in v4, the cost
is tracked dynamically during execution and must not exceed
MaxAppProgramCost. Beginning with v5, programs costs are pooled and
tracked dynamically across app executions in a group. If `n`
application invocations appear in a group, then the total execution
-cost of such calls must not exceed `n`*MaxAppProgramCost. In v6, inner
+cost of all such calls must not exceed `n`*MaxAppProgramCost. In v6, inner
application calls become possible, and each such call increases the
-pooled budget by MaxAppProgramCost.
+pooled budget by MaxAppProgramCost at the time the inner group is submitted
+with `itxn_submit`.
Executions of the ClearStateProgram are more stringent, in order to
ensure that applications may be closed out, but that applications also
@@ -158,7 +171,7 @@ ClearStateProgram fails, and the app's state _is cleared_.
Smart contracts have limits on the amount of blockchain state they
may examine. Opcodes may only access blockchain resources such as
-Accounts, Assets, and contract state if the given resource is
+Accounts, Assets, Boxes, and contract state if the given resource is
_available_.
* A resource in the "foreign array" fields of the ApplicationCall
@@ -181,6 +194,14 @@ _available_.
* Since v7, the account associated with any contract present in the
`txn.ForeignApplications` field is _available_.
+ * A Box is _available_ to an Approval Program if _any_ transaction in
+ the same group contains a box reference (`txn.Boxes`) that denotes
+ the box. A box reference contains an index `i`, and name `n`. The
+ index refers to the `ith` application in the transaction's
+ ForeignApplications array, with the usual convention that 0
+ indicates the application ID of the app called by that
+ transaction. No box is ever _available_ to a ClearStateProgram.
+
## Constants
Constants can be pushed onto the stack in two different ways:
@@ -383,6 +404,7 @@ Some of these have immediate data in the byte or bytes after the opcode.
| `intc_2` | constant 2 from intcblock |
| `intc_3` | constant 3 from intcblock |
| `pushint uint` | immediate UINT |
+| `pushints uint ...` | push sequence of immediate uints to stack in the order they appear (first uint being deepest) |
| `bytecblock bytes ...` | prepare block of byte-array constants for use by bytec |
| `bytec i` | Ith constant from bytecblock |
| `bytec_0` | constant 0 from bytecblock |
@@ -390,6 +412,7 @@ Some of these have immediate data in the byte or bytes after the opcode.
| `bytec_2` | constant 2 from bytecblock |
| `bytec_3` | constant 3 from bytecblock |
| `pushbytes bytes` | immediate BYTES |
+| `pushbytess bytes ...` | push sequences of immediate byte arrays to stack (first byte array being deepest) |
| `bzero` | zero filled byte-array of length A |
| `arg n` | Nth LogicSig argument |
| `arg_0` | LogicSig argument 0 |
@@ -567,11 +590,20 @@ App fields used in the `app_params_get` opcode.
Account fields used in the `acct_params_get` opcode.
-| Index | Name | Type | Notes |
-| - | ------ | -- | --------- |
-| 0 | AcctBalance | uint64 | Account balance in microalgos |
-| 1 | AcctMinBalance | uint64 | Minimum required blance for account, in microalgos |
-| 2 | AcctAuthAddr | []byte | Address the account is rekeyed to. |
+| Index | Name | Type | In | Notes |
+| - | ------ | -- | - | --------- |
+| 0 | AcctBalance | uint64 | | Account balance in microalgos |
+| 1 | AcctMinBalance | uint64 | | Minimum required balance for account, in microalgos |
+| 2 | AcctAuthAddr | []byte | | Address the account is rekeyed to. |
+| 3 | AcctTotalNumUint | uint64 | v8 | The total number of uint64 values allocated by this account in Global and Local States. |
+| 4 | AcctTotalNumByteSlice | uint64 | v8 | The total number of byte array values allocated by this account in Global and Local States. |
+| 5 | AcctTotalExtraAppPages | uint64 | v8 | The number of extra app code pages used by this account. |
+| 6 | AcctTotalAppsCreated | uint64 | v8 | The number of existing apps created by this account. |
+| 7 | AcctTotalAppsOptedIn | uint64 | v8 | The number of apps this account is opted into. |
+| 8 | AcctTotalAssetsCreated | uint64 | v8 | The number of existing ASAs created by this account. |
+| 9 | AcctTotalAssets | uint64 | v8 | The numbers of ASAs held by this account (including ASAs this account created). |
+| 10 | AcctTotalBoxes | uint64 | v8 | The number of existing boxes created by this account's app. |
+| 11 | AcctTotalBoxBytes | uint64 | v8 | The total number of bytes used by this account's app's box keys and values. |
### Flow Control
@@ -584,16 +616,16 @@ Account fields used in the `acct_params_get` opcode.
| `b target` | branch unconditionally to TARGET |
| `return` | use A as success value; end |
| `pop` | discard A |
-| `popn n` | Remove N values from the top of the stack |
+| `popn n` | remove N values from the top of the stack |
| `dup` | duplicate A |
| `dup2` | duplicate A and B |
| `dupn n` | duplicate A, N times |
| `dig n` | Nth value from the top of the stack. dig 0 is equivalent to dup |
-| `bury n` | Replace the Nth value from the top of the stack. bury 0 fails. |
+| `bury n` | replace the Nth value from the top of the stack with A. bury 0 fails. |
| `cover n` | remove top of stack, and place it deeper in the stack such that N elements are above it. Fails if stack depth <= N. |
| `uncover n` | remove the value at depth N in the stack and shift above items down so the Nth deep value is on top of the stack. Fails if stack depth <= N. |
| `frame_dig i` | Nth (signed) value from the frame pointer. |
-| `frame_bury i` | Replace the Nth (signed) value from the frame pointer in the stack |
+| `frame_bury i` | replace the Nth (signed) value from the frame pointer in the stack with A |
| `swap` | swaps A and B on stack |
| `select` | selects one of two values based on top-of-stack: B if C != 0, else A |
| `assert` | immediately fail unless A is a non-zero number |
@@ -601,13 +633,14 @@ Account fields used in the `acct_params_get` opcode.
| `proto a r` | Prepare top call frame for a retsub that will assume A args and R return values. |
| `retsub` | pop the top instruction from the call stack and branch to it |
| `switch target ...` | branch to the Ath label. Continue at following instruction if index A exceeds the number of labels. |
+| `match target ...` | given match cases from A[1] to A[N], branch to the Ith label where A[I] = B. Continue to the following instruction if no matches are found. |
### State Access
| Opcode | Description |
| - | -- |
-| `balance` | get balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. |
-| `min_balance` | get minimum required balance for account A, in microalgos. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. |
+| `balance` | balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. Changes caused by inner transactions are observable immediately following `itxn_submit` |
+| `min_balance` | minimum required balance for account A, in microalgos. Required balance is affected by ASA, App, and Box usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. Changes caused by inner transactions or box usage are observable immediately following the opcode effecting the change. |
| `app_opted_in` | 1 if account A is opted in to application B, else 0 |
| `app_local_get` | local state of the key B in the current application in account A |
| `app_local_get_ex` | X is the local state of application B, key C in account A. Y is 1 if key existed, else 0 |
@@ -624,6 +657,28 @@ Account fields used in the `acct_params_get` opcode.
| `log` | write A to log state of the current application |
| `block f` | field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive) |
+### Box Access
+
+All box related opcodes fail immediately if used in a
+ClearStateProgram. This behavior is meant to discourage Smart Contract
+authors from depending upon the availability of boxes in a ClearState
+transaction, as accounts using ClearState are under no requirement to
+furnish appropriate Box References. Authors would do well to keep the
+same issue in mind with respect to the availability of Accounts,
+Assets, and Apps though State Access opcodes _are_ allowed in
+ClearState programs because the current application and sender account
+are sure to be _available_.
+
+| Opcode | Description |
+| - | -- |
+| `box_create` | create a box named A, of length B. Fail if A is empty or B exceeds 32,768. Returns 0 if A already existed, else 1 |
+| `box_extract` | read C bytes from box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size. |
+| `box_replace` | write byte-array C into box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size. |
+| `box_del` | delete box named A if it exists. Return 1 if A existed, 0 otherwise |
+| `box_len` | X is the length of box A if A exists, else 0. Y is 1 if A exists, else 0. |
+| `box_get` | X is the contents of box A if A exists, else ''. Y is 1 if A exists, else 0. |
+| `box_put` | replaces the contents of box A with byte-array B. Fails if A exists and len(B) != len(box A). Creates A if it does not exist |
+
### Inner Transactions
The following opcodes allow for "inner transactions". Inner
diff --git a/data/transactions/logic/README_in.md b/data/transactions/logic/README_in.md
index c323b51de..6dfcfe966 100644
--- a/data/transactions/logic/README_in.md
+++ b/data/transactions/logic/README_in.md
@@ -13,12 +13,13 @@ application call transactions.
Programs have read-only access to the transaction they are attached
to, the other transactions in their atomic transaction group, and a
few global values. In addition, _Smart Contracts_ have access to
-limited state that is global to the application and per-account local
-state for each account that has opted-in to the application. For both
-types of program, approval is signaled by finishing with the stack
-containing a single non-zero uint64 value, though `return` can be used
-to signal an early approval which approves based only upon the top
-stack value being a non-zero uint64 value.
+limited state that is global to the application, per-account local
+state for each account that has opted-in to the application, and
+additional per-application arbitrary state in named _boxes_. For both types of
+program, approval is signaled by finishing with the stack containing a
+single non-zero uint64 value, though `return` can be used to signal an
+early approval which approves based only upon the top stack value
+being a non-zero uint64 value.
## The Stack
@@ -29,8 +30,15 @@ arguments from it and pushing results to it. Some operations have
_immediate_ arguments that are encoded directly into the instruction,
rather than coming from the stack.
-The maximum stack depth is 1000. If the stack depth is
-exceeded or if a byte-array element exceed 4096 bytes, the program fails.
+The maximum stack depth is 1000. If the stack depth is exceeded or if
+a byte-array element exceeds 4096 bytes, the program fails. If an
+opcode is documented to access a position in the stack that does not
+exist, the operation fails. Most often, this is an attempt to access
+an element below the stack -- the simplest example is an operation
+like `concat` which expects two arguments on the stack. If the stack
+has fewer than two elements, the operation fails. Some operations, like
+`frame_dig` and `proto` could fail because of an attempt to access
+above the current stack.
## Scratch Space
@@ -38,7 +46,9 @@ In addition to the stack there are 256 positions of scratch
space. Like stack values, scratch locations may be uint64s or
byte-arrays. Scratch locations are initialized as uint64 zero. Scratch
space is accessed by the `load(s)` and `store(s)` opcodes which move
-data from or to scratch space, respectively.
+data from or to scratch space, respectively. Application calls may
+inspect the final scratch space of earlier application calls in the
+same group using `gload(s)(s)`
## Versions
@@ -116,11 +126,13 @@ while being evaluated. If the program exceeds its budget, it fails.
Smart Contracts are executed in ApplicationCall transactions. Like
Smart Signatures, contracts indicate success by leaving a single
-non-zero integer on the stack. A failed Smart Contract call is not a
-valid transaction, thus not written to the blockchain. Nodes maintain
-a list of transactions that would succeed, given the current state of
-the blockchain, called the transaction pool. Nodes draw from the pool
-if they are called upon to propose a block.
+non-zero integer on the stack. A failed Smart Contract call to an
+ApprovalProgram is not a valid transaction, thus not written to the
+blockchain. An ApplicationCall with OnComplete set to ClearState
+invokes the ClearStateProgram, rather than the usual
+ApprovalProgram. If the ClearStateProgram fails, application state
+changes are rolled back, but the transaction still succeeds, and the
+Sender's local state for the called application is removed.
Smart Contracts have access to everything a Smart Signature may access
(see previous section), as well as the ability to examine blockchain
@@ -134,14 +146,15 @@ blockchain.
Smart contracts have limits on their execution cost (700, consensus
parameter MaxAppProgramCost). Before v4, this was a static limit on
-the cost of all the instructions in the program. Since then, the cost
+the cost of all the instructions in the program. Starting in v4, the cost
is tracked dynamically during execution and must not exceed
MaxAppProgramCost. Beginning with v5, programs costs are pooled and
tracked dynamically across app executions in a group. If `n`
application invocations appear in a group, then the total execution
-cost of such calls must not exceed `n`*MaxAppProgramCost. In v6, inner
+cost of all such calls must not exceed `n`*MaxAppProgramCost. In v6, inner
application calls become possible, and each such call increases the
-pooled budget by MaxAppProgramCost.
+pooled budget by MaxAppProgramCost at the time the inner group is submitted
+with `itxn_submit`.
Executions of the ClearStateProgram are more stringent, in order to
ensure that applications may be closed out, but that applications also
@@ -158,7 +171,7 @@ ClearStateProgram fails, and the app's state _is cleared_.
Smart contracts have limits on the amount of blockchain state they
may examine. Opcodes may only access blockchain resources such as
-Accounts, Assets, and contract state if the given resource is
+Accounts, Assets, Boxes, and contract state if the given resource is
_available_.
* A resource in the "foreign array" fields of the ApplicationCall
@@ -181,6 +194,14 @@ _available_.
* Since v7, the account associated with any contract present in the
`txn.ForeignApplications` field is _available_.
+ * A Box is _available_ to an Approval Program if _any_ transaction in
+ the same group contains a box reference (`txn.Boxes`) that denotes
+ the box. A box reference contains an index `i`, and name `n`. The
+ index refers to the `ith` application in the transaction's
+ ForeignApplications array, with the usual convention that 0
+ indicates the application ID of the app called by that
+ transaction. No box is ever _available_ to a ClearStateProgram.
+
## Constants
Constants can be pushed onto the stack in two different ways:
@@ -317,6 +338,20 @@ Account fields used in the `acct_params_get` opcode.
@@ State_Access.md @@
+### Box Access
+
+All box related opcodes fail immediately if used in a
+ClearStateProgram. This behavior is meant to discourage Smart Contract
+authors from depending upon the availability of boxes in a ClearState
+transaction, as accounts using ClearState are under no requirement to
+furnish appropriate Box References. Authors would do well to keep the
+same issue in mind with respect to the availability of Accounts,
+Assets, and Apps though State Access opcodes _are_ allowed in
+ClearState programs because the current application and sender account
+are sure to be _available_.
+
+@@ Box_Access.md @@
+
### Inner Transactions
The following opcodes allow for "inner transactions". Inner
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index d093c0823..cd2bd5842 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -242,7 +242,7 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
## intcblock uint ...
-- Opcode: 0x20 {varuint length} [{varuint value}, ...]
+- Opcode: 0x20 {varuint count} [{varuint value}, ...]
- Stack: ... &rarr; ...
- prepare block of uint64 constants for use by intc
@@ -280,7 +280,7 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
## bytecblock bytes ...
-- Opcode: 0x26 {varuint length} [({varuint value length} bytes), ...]
+- Opcode: 0x26 {varuint count} [({varuint value length} bytes), ...]
- Stack: ... &rarr; ...
- prepare block of byte-array constants for use by bytec
@@ -614,14 +614,14 @@ See `bnz` for details on how branches work. `b` always jumps to the offset.
- Opcode: 0x45 {uint8 depth}
- Stack: ..., A &rarr; ...
-- Replace the Nth value from the top of the stack. bury 0 fails.
+- replace the Nth value from the top of the stack with A. bury 0 fails.
- Availability: v8
## popn n
- Opcode: 0x46 {uint8 stack depth}
- Stack: ..., [N items] &rarr; ...
-- Remove N values from the top of the stack
+- remove N values from the top of the stack
- Availability: v8
## dupn n
@@ -834,7 +834,7 @@ Almost all smart contracts should use simpler and smaller methods (such as the [
- Opcode: 0x60
- Stack: ..., A &rarr; ..., uint64
-- get balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted.
+- balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. Changes caused by inner transactions are observable immediately following `itxn_submit`
- Availability: v2
- Mode: Application
@@ -1013,18 +1013,27 @@ params: Txn.ForeignApps offset or an _available_ app id. Return: did_exist flag
`acct_params` Fields:
-| Index | Name | Type | Notes |
-| - | ------ | -- | --------- |
-| 0 | AcctBalance | uint64 | Account balance in microalgos |
-| 1 | AcctMinBalance | uint64 | Minimum required blance for account, in microalgos |
-| 2 | AcctAuthAddr | []byte | Address the account is rekeyed to. |
+| Index | Name | Type | In | Notes |
+| - | ------ | -- | - | --------- |
+| 0 | AcctBalance | uint64 | | Account balance in microalgos |
+| 1 | AcctMinBalance | uint64 | | Minimum required balance for account, in microalgos |
+| 2 | AcctAuthAddr | []byte | | Address the account is rekeyed to. |
+| 3 | AcctTotalNumUint | uint64 | v8 | The total number of uint64 values allocated by this account in Global and Local States. |
+| 4 | AcctTotalNumByteSlice | uint64 | v8 | The total number of byte array values allocated by this account in Global and Local States. |
+| 5 | AcctTotalExtraAppPages | uint64 | v8 | The number of extra app code pages used by this account. |
+| 6 | AcctTotalAppsCreated | uint64 | v8 | The number of existing apps created by this account. |
+| 7 | AcctTotalAppsOptedIn | uint64 | v8 | The number of apps this account is opted into. |
+| 8 | AcctTotalAssetsCreated | uint64 | v8 | The number of existing ASAs created by this account. |
+| 9 | AcctTotalAssets | uint64 | v8 | The numbers of ASAs held by this account (including ASAs this account created). |
+| 10 | AcctTotalBoxes | uint64 | v8 | The number of existing boxes created by this account's app. |
+| 11 | AcctTotalBoxBytes | uint64 | v8 | The total number of bytes used by this account's app's box keys and values. |
## min_balance
- Opcode: 0x78
- Stack: ..., A &rarr; ..., uint64
-- get minimum required balance for account A, in microalgos. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes.
+- minimum required balance for account A, in microalgos. Required balance is affected by ASA, App, and Box usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. Changes caused by inner transactions or box usage are observable immediately following the opcode effecting the change.
- Availability: v3
- Mode: Application
@@ -1048,6 +1057,24 @@ pushbytes args are not added to the bytecblock during assembly processes
pushint args are not added to the intcblock during assembly processes
+## pushbytess bytes ...
+
+- Opcode: 0x82 {varuint count} [({varuint value length} bytes), ...]
+- Stack: ... &rarr; ..., [N items]
+- push sequences of immediate byte arrays to stack (first byte array being deepest)
+- Availability: v8
+
+pushbytess args are not added to the bytecblock during assembly processes
+
+## pushints uint ...
+
+- Opcode: 0x83 {varuint count} [{varuint value}, ...]
+- Stack: ... &rarr; ..., [N items]
+- push sequence of immediate uints to stack in the order they appear (first uint being deepest)
+- Availability: v8
+
+pushints args are not added to the intcblock during assembly processes
+
## ed25519verify_bare
- Opcode: 0x84
@@ -1094,7 +1121,7 @@ Fails unless the last instruction executed was a `callsub`.
- Opcode: 0x8c {int8 frame slot}
- Stack: ..., A &rarr; ...
-- Replace the Nth (signed) value from the frame pointer in the stack
+- replace the Nth (signed) value from the frame pointer in the stack with A
- Availability: v8
## switch target ...
@@ -1104,6 +1131,15 @@ Fails unless the last instruction executed was a `callsub`.
- branch to the Ath label. Continue at following instruction if index A exceeds the number of labels.
- Availability: v8
+## match target ...
+
+- Opcode: 0x8e {uint8 branch count} [{int16 branch offset, big-endian}, ...]
+- Stack: ..., [A1, A2, ..., AN], B &rarr; ...
+- given match cases from A[1] to A[N], branch to the Ith label where A[I] = B. Continue to the following instruction if no matches are found.
+- Availability: v8
+
+`match` consumes N+1 values from the stack. Let the top stack value be B. The following N values represent an ordered list of match cases/constants (A), where the first value (A[0]) is the deepest in the stack. The immediate arguments are an ordered list of N labels (T). `match` will branch to target T[I], where A[I] = B. If there are no matches then execution continues on to the next instruction.
+
## shl
- Opcode: 0x90
@@ -1378,6 +1414,68 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
- Availability: v6
- Mode: Application
+## box_create
+
+- Opcode: 0xb9
+- Stack: ..., A: []byte, B: uint64 &rarr; ..., uint64
+- create a box named A, of length B. Fail if A is empty or B exceeds 32,768. Returns 0 if A already existed, else 1
+- Availability: v8
+- Mode: Application
+
+Newly created boxes are filled with 0 bytes. `box_create` will fail if the referenced box already exists with a different size. Otherwise, existing boxes are unchanged by `box_create`.
+
+## box_extract
+
+- Opcode: 0xba
+- Stack: ..., A: []byte, B: uint64, C: uint64 &rarr; ..., []byte
+- read C bytes from box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.
+- Availability: v8
+- Mode: Application
+
+## box_replace
+
+- Opcode: 0xbb
+- Stack: ..., A: []byte, B: uint64, C: []byte &rarr; ...
+- write byte-array C into box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.
+- Availability: v8
+- Mode: Application
+
+## box_del
+
+- Opcode: 0xbc
+- Stack: ..., A: []byte &rarr; ..., uint64
+- delete box named A if it exists. Return 1 if A existed, 0 otherwise
+- Availability: v8
+- Mode: Application
+
+## box_len
+
+- Opcode: 0xbd
+- Stack: ..., A: []byte &rarr; ..., X: uint64, Y: uint64
+- X is the length of box A if A exists, else 0. Y is 1 if A exists, else 0.
+- Availability: v8
+- Mode: Application
+
+## box_get
+
+- Opcode: 0xbe
+- Stack: ..., A: []byte &rarr; ..., X: []byte, Y: uint64
+- X is the contents of box A if A exists, else ''. Y is 1 if A exists, else 0.
+- Availability: v8
+- Mode: Application
+
+For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`
+
+## box_put
+
+- Opcode: 0xbf
+- Stack: ..., A: []byte, B: []byte &rarr; ...
+- replaces the contents of box A with byte-array B. Fails if A exists and len(B) != len(box A). Creates A if it does not exist
+- Availability: v8
+- Mode: Application
+
+For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`
+
## txnas f
- Opcode: 0xc0 {uint8 transaction field index}
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index fd3c79bf4..e25e806be 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -500,7 +500,7 @@ func (ops *OpStream) ByteLiteral(val []byte) {
func asmInt(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- return ops.error("int needs one argument")
+ return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
}
// After backBranchEnabledVersion, control flow is confusing, so if there's
@@ -548,7 +548,7 @@ func asmInt(ops *OpStream, spec *OpSpec, args []string) error {
// Explicit invocation of const lookup and push
func asmIntC(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- return ops.error("intc operation needs one argument")
+ return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
}
constIndex, err := byteImm(args[0], "constant")
if err != nil {
@@ -559,7 +559,7 @@ func asmIntC(ops *OpStream, spec *OpSpec, args []string) error {
}
func asmByteC(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- return ops.error("bytec operation needs one argument")
+ return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
}
constIndex, err := byteImm(args[0], "constant")
if err != nil {
@@ -571,7 +571,7 @@ func asmByteC(ops *OpStream, spec *OpSpec, args []string) error {
func asmPushInt(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- return ops.errorf("%s needs one argument", spec.Name)
+ return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
}
val, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
@@ -583,16 +583,23 @@ func asmPushInt(ops *OpStream, spec *OpSpec, args []string) error {
ops.pending.Write(scratch[:vlen])
return nil
}
+
+func asmPushInts(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.pending.WriteByte(spec.Opcode)
+ _, err := asmIntImmArgs(ops, args)
+ return err
+}
+
func asmPushBytes(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 0 {
- return ops.errorf("%s operation needs byte literal argument", spec.Name)
+ return ops.errorf("%s needs byte literal argument", spec.Name)
}
val, consumed, err := parseBinaryArgs(args)
if err != nil {
return ops.error(err)
}
if len(args) != consumed {
- return ops.errorf("%s operation with extraneous argument", spec.Name)
+ return ops.errorf("%s with extraneous argument", spec.Name)
}
ops.pending.WriteByte(spec.Opcode)
var scratch [binary.MaxVarintLen64]byte
@@ -602,6 +609,12 @@ func asmPushBytes(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
+func asmPushBytess(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.pending.WriteByte(spec.Opcode)
+ _, err := asmByteImmArgs(ops, args)
+ return err
+}
+
func base32DecodeAnyPadding(x string) (val []byte, err error) {
val, err = base32.StdEncoding.WithPadding(base32.NoPadding).DecodeString(x)
if err != nil {
@@ -751,7 +764,7 @@ func parseStringLiteral(input string) (result []byte, err error) {
// byte "this is a string\n"
func asmByte(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 0 {
- return ops.errorf("%s operation needs byte literal argument", spec.Name)
+ return ops.errorf("%s needs byte literal argument", spec.Name)
}
// After backBranchEnabledVersion, control flow is confusing, so if there's
@@ -781,7 +794,7 @@ func asmByte(ops *OpStream, spec *OpSpec, args []string) error {
return ops.error(err)
}
if len(args) != consumed {
- return ops.errorf("%s operation with extraneous argument", spec.Name)
+ return ops.errorf("%s with extraneous argument", spec.Name)
}
ops.ByteLiteral(val)
return nil
@@ -812,8 +825,7 @@ func asmMethod(ops *OpStream, spec *OpSpec, args []string) error {
return ops.error("Unable to parse method signature")
}
-func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
- ops.pending.WriteByte(spec.Opcode)
+func asmIntImmArgs(ops *OpStream, args []string) ([]uint64, error) {
ivals := make([]uint64, len(args))
var scratch [binary.MaxVarintLen64]byte
l := binary.PutUvarint(scratch[:], uint64(len(args)))
@@ -825,9 +837,17 @@ func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
}
l = binary.PutUvarint(scratch[:], cu)
ops.pending.Write(scratch[:l])
- if !ops.known.deadcode {
- ivals[i] = cu
- }
+ ivals[i] = cu
+ }
+
+ return ivals, nil
+}
+
+func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.pending.WriteByte(spec.Opcode)
+ ivals, err := asmIntImmArgs(ops, args)
+ if err != nil {
+ return err
}
if !ops.known.deadcode {
// If we previously processed an `int`, we thought we could insert our
@@ -843,8 +863,7 @@ func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
- ops.pending.WriteByte(spec.Opcode)
+func asmByteImmArgs(ops *OpStream, args []string) ([][]byte, error) {
bvals := make([][]byte, 0, len(args))
rest := args
for len(rest) > 0 {
@@ -854,7 +873,7 @@ func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
// intcblock, but parseBinaryArgs would have
// to return a useful consumed value even in
// the face of errors. Hard.
- return ops.error(err)
+ return nil, ops.error(err)
}
bvals = append(bvals, val)
rest = rest[consumed:]
@@ -867,6 +886,17 @@ func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
ops.pending.Write(scratch[:l])
ops.pending.Write(bv)
}
+
+ return bvals, nil
+}
+
+func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.pending.WriteByte(spec.Opcode)
+ bvals, err := asmByteImmArgs(ops, args)
+ if err != nil {
+ return err
+ }
+
if !ops.known.deadcode {
// If we previously processed a pseudo `byte`, we thought we could
// insert our own bytecblock, but now we see a manual one.
@@ -884,7 +914,7 @@ func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
// parses base32-with-checksum account address strings into a byte literal
func asmAddr(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- return ops.error("addr operation needs one argument")
+ return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
}
addr, err := basics.UnmarshalChecksumAddress(args[0])
if err != nil {
@@ -896,7 +926,7 @@ func asmAddr(ops *OpStream, spec *OpSpec, args []string) error {
func asmArg(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- return ops.error("arg operation needs one argument")
+ return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
}
val, err := byteImm(args[0], "argument")
if err != nil {
@@ -921,7 +951,7 @@ func asmArg(ops *OpStream, spec *OpSpec, args []string) error {
func asmBranch(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
- return ops.error("branch operation needs label argument")
+ return ops.errorf("%s needs a single label argument", spec.Name)
}
ops.referToLabel(ops.pending.Len()+1, args[0], ops.pending.Len()+spec.Size)
@@ -1454,6 +1484,24 @@ func typeDupN(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, err
return nil, copies, nil
}
+func typePushBytess(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ types := make(StackTypes, len(args))
+ for i := range types {
+ types[i] = StackBytes
+ }
+
+ return nil, types, nil
+}
+
+func typePushInts(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+ types := make(StackTypes, len(args))
+ for i := range types {
+ types[i] = StackUint64
+ }
+
+ return nil, types, nil
+}
+
func joinIntsOnOr(singularTerminator string, list ...int) string {
if len(list) == 1 {
switch list[0] {
@@ -2519,7 +2567,7 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
out += fmt.Sprintf("0x%s // %s", hex.EncodeToString(constant), guessByteFormat(constant))
pc = int(end)
case immInts:
- intc, nextpc, err := parseIntcblock(dis.program, pc)
+ intc, nextpc, err := parseIntImmArgs(dis.program, pc)
if err != nil {
return "", err
}
@@ -2533,7 +2581,7 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
}
pc = nextpc
case immBytess:
- bytec, nextpc, err := parseBytecBlock(dis.program, pc)
+ bytec, nextpc, err := parseByteImmArgs(dis.program, pc)
if err != nil {
return "", err
}
@@ -2590,13 +2638,13 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
return out, nil
}
-var errShortIntcblock = errors.New("intcblock ran past end of program")
-var errTooManyIntc = errors.New("intcblock with too many items")
+var errShortIntImmArgs = errors.New("const int list ran past end of program")
+var errTooManyIntc = errors.New("const int list with too many items")
-func parseIntcblock(program []byte, pos int) (intc []uint64, nextpc int, err error) {
+func parseIntImmArgs(program []byte, pos int) (intc []uint64, nextpc int, err error) {
numInts, bytesUsed := binary.Uvarint(program[pos:])
if bytesUsed <= 0 {
- err = fmt.Errorf("could not decode intcblock size at pc=%d", pos)
+ err = fmt.Errorf("could not decode length of int list at pc=%d", pos)
return
}
pos += bytesUsed
@@ -2607,7 +2655,7 @@ func parseIntcblock(program []byte, pos int) (intc []uint64, nextpc int, err err
intc = make([]uint64, numInts)
for i := uint64(0); i < numInts; i++ {
if pos >= len(program) {
- err = errShortIntcblock
+ err = errShortIntImmArgs
return
}
intc[i], bytesUsed = binary.Uvarint(program[pos:])
@@ -2621,38 +2669,19 @@ func parseIntcblock(program []byte, pos int) (intc []uint64, nextpc int, err err
return
}
-func checkIntConstBlock(cx *EvalContext) error {
- pos := cx.pc + 1
- numInts, bytesUsed := binary.Uvarint(cx.program[pos:])
- if bytesUsed <= 0 {
- return fmt.Errorf("could not decode intcblock size at pc=%d", pos)
- }
- pos += bytesUsed
- if numInts > uint64(len(cx.program)) {
- return errTooManyIntc
- }
- //intc = make([]uint64, numInts)
- for i := uint64(0); i < numInts; i++ {
- if pos >= len(cx.program) {
- return errShortIntcblock
- }
- _, bytesUsed = binary.Uvarint(cx.program[pos:])
- if bytesUsed <= 0 {
- return fmt.Errorf("could not decode int const[%d] at pc=%d", i, pos)
- }
- pos += bytesUsed
- }
- cx.nextpc = pos
- return nil
+func checkIntImmArgs(cx *EvalContext) error {
+ var err error
+ _, cx.nextpc, err = parseIntImmArgs(cx.program, cx.pc+1)
+ return err
}
-var errShortBytecblock = errors.New("bytecblock ran past end of program")
-var errTooManyItems = errors.New("bytecblock with too many items")
+var errShortByteImmArgs = errors.New("const bytes list ran past end of program")
+var errTooManyItems = errors.New("const bytes list with too many items")
-func parseBytecBlock(program []byte, pos int) (bytec [][]byte, nextpc int, err error) {
+func parseByteImmArgs(program []byte, pos int) (bytec [][]byte, nextpc int, err error) {
numItems, bytesUsed := binary.Uvarint(program[pos:])
if bytesUsed <= 0 {
- err = fmt.Errorf("could not decode bytecblock size at pc=%d", pos)
+ err = fmt.Errorf("could not decode length of bytes list at pc=%d", pos)
return
}
pos += bytesUsed
@@ -2663,7 +2692,7 @@ func parseBytecBlock(program []byte, pos int) (bytec [][]byte, nextpc int, err e
bytec = make([][]byte, numItems)
for i := uint64(0); i < numItems; i++ {
if pos >= len(program) {
- err = errShortBytecblock
+ err = errShortByteImmArgs
return
}
itemLen, bytesUsed := binary.Uvarint(program[pos:])
@@ -2673,12 +2702,12 @@ func parseBytecBlock(program []byte, pos int) (bytec [][]byte, nextpc int, err e
}
pos += bytesUsed
if pos >= len(program) {
- err = errShortBytecblock
+ err = errShortByteImmArgs
return
}
end := uint64(pos) + itemLen
if end > uint64(len(program)) || end < uint64(pos) {
- err = errShortBytecblock
+ err = errShortByteImmArgs
return
}
bytec[i] = program[pos : pos+int(itemLen)]
@@ -2688,38 +2717,10 @@ func parseBytecBlock(program []byte, pos int) (bytec [][]byte, nextpc int, err e
return
}
-func checkByteConstBlock(cx *EvalContext) error {
- pos := cx.pc + 1
- numItems, bytesUsed := binary.Uvarint(cx.program[pos:])
- if bytesUsed <= 0 {
- return fmt.Errorf("could not decode bytecblock size at pc=%d", pos)
- }
- pos += bytesUsed
- if numItems > uint64(len(cx.program)) {
- return errTooManyItems
- }
- //bytec = make([][]byte, numItems)
- for i := uint64(0); i < numItems; i++ {
- if pos >= len(cx.program) {
- return errShortBytecblock
- }
- itemLen, bytesUsed := binary.Uvarint(cx.program[pos:])
- if bytesUsed <= 0 {
- return fmt.Errorf("could not decode []byte const[%d] at pc=%d", i, pos)
- }
- pos += bytesUsed
- if pos >= len(cx.program) {
- return errShortBytecblock
- }
- end := uint64(pos) + itemLen
- if end > uint64(len(cx.program)) || end < uint64(pos) {
- return errShortBytecblock
- }
- //bytec[i] = program[pos : pos+int(itemLen)]
- pos += int(itemLen)
- }
- cx.nextpc = pos
- return nil
+func checkByteImmArgs(cx *EvalContext) error {
+ var err error
+ _, cx.nextpc, err = parseByteImmArgs(cx.program, cx.pc+1)
+ return err
}
func parseSwitch(program []byte, pos int) (targets []int, nextpc int, err error) {
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index 1adf9b450..5da1708b9 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -369,6 +369,16 @@ pushint 1
gitxnas 0 Logs
`
+const boxNonsense = `
+ box_create
+ box_extract
+ box_replace
+ box_del
+ box_len
+ box_put
+ box_get
+`
+
const randomnessNonsense = `
pushint 0xffff
block BlkTimestamp
@@ -407,7 +417,15 @@ switch_label1:
pushint 1
`
-const v8Nonsense = v7Nonsense + switchNonsense + frameNonsense
+const matchNonsense = `
+match_label0:
+pushints 1 2 1
+match match_label0 match_label1
+match_label1:
+pushbytess "1" "2" "1"
+`
+
+const v8Nonsense = v7Nonsense + switchNonsense + frameNonsense + matchNonsense + boxNonsense
const v9Nonsense = v8Nonsense + pairingNonsense
@@ -418,11 +436,14 @@ const randomnessCompiled = "81ffff03d101d000"
const v7Compiled = v6Compiled + "5e005f018120af060180070123456789abcd49490501988003012345494984" +
randomnessCompiled + "800243218001775c0280018881015d"
+const boxCompiled = "b9babbbcbdbfbe"
+
const switchCompiled = "81018d02fff800008101"
+const matchCompiled = "83030102018e02fff500008203013101320131"
-const v8Compiled = v7Compiled + switchCompiled + frameCompiled
+const v8Compiled = v7Compiled + switchCompiled + frameCompiled + matchCompiled + boxCompiled
-const v9Compiled = v7Compiled + pairingCompiled
+const v9Compiled = v8Compiled + pairingCompiled
var nonsense = map[uint64]string{
1: v1Nonsense,
@@ -445,6 +466,7 @@ var compiled = map[uint64]string{
6: "06" + v6Compiled,
7: "07" + v7Compiled,
8: "08" + v8Compiled,
+ 9: "09" + v9Compiled,
}
func pseudoOp(opcode string) bool {
@@ -487,7 +509,9 @@ func TestAssemble(t *testing.T) {
// check that compilation is stable over
// time. we must assemble to the same bytes
// this month that we did last month.
- expectedBytes, _ := hex.DecodeString(compiled[v])
+ bytecode, ok := compiled[v]
+ require.True(t, ok, "Need v%d bytecode", v)
+ expectedBytes, _ := hex.DecodeString(bytecode)
require.NotEmpty(t, expectedBytes)
// the hex is for convenience if the program has been changed. the
// hex string can be copy pasted back in as a new expected result.
@@ -863,8 +887,8 @@ func TestAssembleBytes(t *testing.T) {
expectedOptimizedConsts := "018006616263646566"
bad := [][]string{
- {"byte", "...operation needs byte literal argument"},
- {`byte "john" "doe"`, "...operation with extraneous argument"},
+ {"byte", "...needs byte literal argument"},
+ {`byte "john" "doe"`, "...with extraneous argument"},
}
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
@@ -1651,17 +1675,40 @@ func TestConstantArgs(t *testing.T) {
t.Parallel()
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- testProg(t, "int", v, Expect{1, "int needs one argument"})
- testProg(t, "intc", v, Expect{1, "intc operation needs one argument"})
- testProg(t, "byte", v, Expect{1, "byte operation needs byte literal argument"})
- testProg(t, "bytec", v, Expect{1, "bytec operation needs one argument"})
- testProg(t, "addr", v, Expect{1, "addr operation needs one argument"})
+ testProg(t, "int", v, Expect{1, "int needs one immediate argument, was given 0"})
+ testProg(t, "int 1 2", v, Expect{1, "int needs one immediate argument, was given 2"})
+ testProg(t, "intc", v, Expect{1, "intc needs one immediate argument, was given 0"})
+ testProg(t, "intc hi bye", v, Expect{1, "intc needs one immediate argument, was given 2"})
+ testProg(t, "byte", v, Expect{1, "byte needs byte literal argument"})
+ testProg(t, "bytec", v, Expect{1, "bytec needs one immediate argument, was given 0"})
+ testProg(t, "bytec 1 x", v, Expect{1, "bytec needs one immediate argument, was given 2"})
+ testProg(t, "addr", v, Expect{1, "addr needs one immediate argument, was given 0"})
+ testProg(t, "addr x y", v, Expect{1, "addr needs one immediate argument, was given 2"})
}
for v := uint64(3); v <= AssemblerMaxVersion; v++ {
- testProg(t, "pushint", v, Expect{1, "pushint needs one argument"})
- testProg(t, "pushbytes", v, Expect{1, "pushbytes operation needs byte literal argument"})
+ testProg(t, "pushint", v, Expect{1, "pushint needs one immediate argument, was given 0"})
+ testProg(t, "pushint 3 4", v, Expect{1, "pushint needs one immediate argument, was given 2"})
+ testProg(t, "pushbytes", v, Expect{1, "pushbytes needs byte literal argument"})
+ }
+}
+
+func TestBranchArgs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ for v := uint64(2); v <= AssemblerMaxVersion; v++ {
+ testProg(t, "b", v, Expect{1, "b needs a single label argument"})
+ testProg(t, "b lab1 lab2", v, Expect{1, "b needs a single label argument"})
+ testProg(t, "int 1; bz", v, Expect{1, "bz needs a single label argument"})
+ testProg(t, "int 1; bz a b", v, Expect{1, "bz needs a single label argument"})
+ testProg(t, "int 1; bnz", v, Expect{1, "bnz needs a single label argument"})
+ testProg(t, "int 1; bnz c d", v, Expect{1, "bnz needs a single label argument"})
}
+ for v := uint64(4); v <= AssemblerMaxVersion; v++ {
+ testProg(t, "callsub", v, Expect{1, "callsub needs a single label argument"})
+ testProg(t, "callsub one two", v, Expect{1, "callsub needs a single label argument"})
+ }
}
func TestAssembleDisassembleErrors(t *testing.T) {
@@ -2347,13 +2394,13 @@ func TestErrShortBytecblock(t *testing.T) {
text := `intcblock 0x1234567812345678 0x1234567812345671 0x1234567812345672 0x1234567812345673 4 5 6 7 8`
ops := testProg(t, text, 1)
- _, _, err := parseIntcblock(ops.Program, 1)
- require.Equal(t, err, errShortIntcblock)
+ _, _, err := parseIntImmArgs(ops.Program, 1)
+ require.Equal(t, err, errShortIntImmArgs)
var cx EvalContext
cx.program = ops.Program
- err = checkIntConstBlock(&cx)
- require.Equal(t, err, errShortIntcblock)
+ err = checkIntImmArgs(&cx)
+ require.Equal(t, err, errShortIntImmArgs)
}
func TestMethodWarning(t *testing.T) {
@@ -2890,3 +2937,119 @@ int 1
`
testProg(t, source, AssemblerMaxVersion)
}
+
+func TestAssembleMatch(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // fail when target doesn't correspond to existing label
+ source := `
+ pushints 1 1 1
+ match label1 label2
+ label1:
+ `
+ testProg(t, source, AssemblerMaxVersion, NewExpect(3, "reference to undefined label \"label2\""))
+
+ // No labels is pretty degenerate, but ok, I suppose. It's just a no-op
+ testProg(t, `
+int 0
+match
+int 1
+`, AssemblerMaxVersion)
+
+ // confirm arg limit
+ source = `
+ pushints 1 2 1
+ match label1 label2
+ label1:
+ label2:
+ `
+ ops := testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 12) // ver (1) + pushints (5) + opcode (1) + length (1) + labels (2*2)
+
+ // confirm byte array args are assembled successfully
+ source = `
+ pushbytess "1" "2" "1"
+ match label1 label2
+ label1:
+ label2:
+ `
+ testProg(t, source, AssemblerMaxVersion)
+
+ var labels []string
+ for i := 0; i < 255; i++ {
+ labels = append(labels, fmt.Sprintf("label%d", i))
+ }
+
+ // test that 255 labels is ok
+ source = fmt.Sprintf(`
+ pushint 1
+ match %s
+ %s
+ `, strings.Join(labels, " "), strings.Join(labels, ":\n")+":\n")
+ ops = testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 515) // ver (1) + pushint (2) + opcode (1) + length (1) + labels (2*255)
+
+ // 256 is too many
+ source = fmt.Sprintf(`
+ pushint 1
+ match %s extra
+ %s
+ `, strings.Join(labels, " "), strings.Join(labels, ":\n")+":\n")
+ testProg(t, source, AssemblerMaxVersion, Expect{3, "match cannot take more than 255 labels"})
+
+ // allow duplicate label reference
+ source = `
+ pushint 1
+ match label1 label1
+ label1:
+ `
+ testProg(t, source, AssemblerMaxVersion)
+}
+
+func TestAssemblePushConsts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // allow empty const int list
+ source := `pushints`
+ testProg(t, source, AssemblerMaxVersion)
+
+ // allow empty const bytes list
+ source = `pushbytess`
+ testProg(t, source, AssemblerMaxVersion)
+
+ // basic test
+ source = `pushints 1 2 3`
+ ops := testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 6) // ver (1) + pushints (5)
+ source = `pushbytess "1" "2" "33"`
+ ops = testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 10) // ver (1) + pushbytess (9)
+
+ // 256 increases size of encoded length to two bytes
+ valsStr := make([]string, 256)
+ for i := range valsStr {
+ valsStr[i] = fmt.Sprintf("%d", 1)
+ }
+ source = fmt.Sprintf(`pushints %s`, strings.Join(valsStr, " "))
+ ops = testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 260) // ver (1) + opcode (1) + len (2) + ints (256)
+
+ for i := range valsStr {
+ valsStr[i] = fmt.Sprintf("\"%d\"", 1)
+ }
+ source = fmt.Sprintf(`pushbytess %s`, strings.Join(valsStr, " "))
+ ops = testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 516) // ver (1) + opcode (1) + len (2) + bytess (512)
+
+ // enforce correct types
+ source = `pushints "1" "2" "3"`
+ testProg(t, source, AssemblerMaxVersion, Expect{1, `strconv.ParseUint: parsing "\"1\"": invalid syntax`})
+ source = `pushbytess 1 2 3`
+ testProg(t, source, AssemblerMaxVersion, Expect{1, "byte arg did not parse: 1"})
+ source = `pushints 6 4; concat`
+ testProg(t, source, AssemblerMaxVersion, Expect{1, "concat arg 1 wanted type []byte got uint64"})
+ source = `pushbytess "x" "y"; +`
+ testProg(t, source, AssemblerMaxVersion, Expect{1, "+ arg 1 wanted type uint64 got []byte"})
+}
diff --git a/data/transactions/logic/backwardCompat_test.go b/data/transactions/logic/backwardCompat_test.go
index 13a19ddaa..04fff1828 100644
--- a/data/transactions/logic/backwardCompat_test.go
+++ b/data/transactions/logic/backwardCompat_test.go
@@ -480,7 +480,7 @@ func TestBackwardCompatAssemble(t *testing.T) {
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- testLogic(t, source, v, defaultEvalParams(nil))
+ testLogic(t, source, v, defaultEvalParams())
})
}
}
diff --git a/data/transactions/logic/box.go b/data/transactions/logic/box.go
new file mode 100644
index 000000000..6f2e9ccd9
--- /dev/null
+++ b/data/transactions/logic/box.go
@@ -0,0 +1,318 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+)
+
+const (
+ boxCreate = iota
+ boxRead
+ boxWrite
+ boxDelete
+)
+
+func (cx *EvalContext) availableBox(name string, operation int, createSize uint64) ([]byte, bool, error) {
+ if cx.txn.Txn.OnCompletion == transactions.ClearStateOC {
+ return nil, false, fmt.Errorf("boxes may not be accessed from ClearState program")
+ }
+
+ dirty, ok := cx.available.boxes[boxRef{cx.appID, name}]
+ if !ok {
+ return nil, false, fmt.Errorf("invalid Box reference %v", name)
+ }
+
+ // Since the box is in cx.available, we know this GetBox call is cheap. It
+ // will go (at most) to the cowRoundBase. Knowledge about existence
+ // simplifies write budget tracking, then we return the info to avoid yet
+ // another call to GetBox which most ops need anyway.
+ content, exists, err := cx.Ledger.GetBox(cx.appID, name)
+ if err != nil {
+ return nil, false, err
+ }
+
+ switch operation {
+ case boxCreate:
+ if exists {
+ if createSize != uint64(len(content)) {
+ return nil, false, fmt.Errorf("box size mismatch %d %d", uint64(len(content)), createSize)
+ }
+ // Since it exists, we have no dirty work to do. The weird case of
+ // box_put, which seems like a combination of create and write, is
+ // properly handled because already used boxWrite to declare the
+ // intent to write (and tracky dirtiness).
+ return content, exists, nil
+ }
+ fallthrough // If it doesn't exist, a create is like write
+ case boxWrite:
+ writeSize := createSize
+ if exists {
+ writeSize = uint64(len(content))
+ }
+ if !dirty {
+ cx.available.dirtyBytes += writeSize
+ }
+ dirty = true
+ case boxDelete:
+ if dirty {
+ cx.available.dirtyBytes -= uint64(len(content))
+ }
+ dirty = false
+ case boxRead:
+ /* nothing to do */
+ }
+ cx.available.boxes[boxRef{cx.appID, name}] = dirty
+
+ if cx.available.dirtyBytes > cx.ioBudget {
+ return nil, false, fmt.Errorf("write budget (%d) exceeded %d", cx.ioBudget, cx.available.dirtyBytes)
+ }
+ return content, exists, nil
+}
+
+func argCheck(cx *EvalContext, name string, size uint64) error {
+ // Enforce length rules. Currently these are the same as enforced by
+ // ledger. If these were ever to change in proto, we would need to isolate
+ // changes to different program versions. (so a v7 app could not see a
+ // bigger box than expected, for example)
+ if len(name) == 0 {
+ return fmt.Errorf("box names may not be zero length")
+ }
+ if len(name) > cx.Proto.MaxAppKeyLen {
+ return fmt.Errorf("name too long: length was %d, maximum is %d", len(name), cx.Proto.MaxAppKeyLen)
+ }
+ if size > cx.Proto.MaxBoxSize {
+ return fmt.Errorf("box size too large: %d, maximum is %d", size, cx.Proto.MaxBoxSize)
+ }
+ return nil
+}
+
+func opBoxCreate(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // size
+ prev := last - 1 // name
+
+ name := string(cx.stack[prev].Bytes)
+ size := cx.stack[last].Uint
+
+ err := argCheck(cx, name, size)
+ if err != nil {
+ return err
+ }
+ _, exists, err := cx.availableBox(name, boxCreate, size)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ appAddr := cx.getApplicationAddress(cx.appID)
+ err = cx.Ledger.NewBox(cx.appID, name, make([]byte, size), appAddr)
+ if err != nil {
+ return err
+ }
+ }
+
+ cx.stack[prev] = boolToSV(!exists)
+ cx.stack = cx.stack[:last]
+ return err
+}
+
+func opBoxExtract(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // length
+ prev := last - 1 // start
+ pprev := prev - 1 // name
+
+ name := string(cx.stack[pprev].Bytes)
+ start := cx.stack[prev].Uint
+ length := cx.stack[last].Uint
+
+ err := argCheck(cx, name, basics.AddSaturate(start, length))
+ if err != nil {
+ return err
+ }
+ contents, exists, err := cx.availableBox(name, boxRead, 0)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return fmt.Errorf("no such box %#v", name)
+ }
+
+ bytes, err := extractCarefully(contents, start, length)
+ cx.stack[pprev].Bytes = bytes
+ cx.stack = cx.stack[:prev]
+ return err
+}
+
+func opBoxReplace(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // replacement
+ prev := last - 1 // start
+ pprev := prev - 1 // name
+
+ replacement := cx.stack[last].Bytes
+ start := cx.stack[prev].Uint
+ name := string(cx.stack[pprev].Bytes)
+
+ err := argCheck(cx, name, basics.AddSaturate(start, uint64(len(replacement))))
+ if err != nil {
+ return err
+ }
+
+ contents, exists, err := cx.availableBox(name, boxWrite, 0 /* size is already known */)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return fmt.Errorf("no such box %#v", name)
+ }
+
+ bytes, err := replaceCarefully(contents, replacement, start)
+ if err != nil {
+ return err
+ }
+ cx.stack = cx.stack[:pprev]
+ return cx.Ledger.SetBox(cx.appID, name, bytes)
+}
+
+func opBoxDel(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // name
+ name := string(cx.stack[last].Bytes)
+
+ err := argCheck(cx, name, 0)
+ if err != nil {
+ return err
+ }
+ _, exists, err := cx.availableBox(name, boxDelete, 0)
+ if err != nil {
+ return err
+ }
+ if exists {
+ appAddr := cx.getApplicationAddress(cx.appID)
+ _, err := cx.Ledger.DelBox(cx.appID, name, appAddr)
+ if err != nil {
+ return err
+ }
+ }
+ cx.stack[last] = boolToSV(exists)
+ return nil
+}
+
+func opBoxLen(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // name
+ name := string(cx.stack[last].Bytes)
+
+ err := argCheck(cx, name, 0)
+ if err != nil {
+ return err
+ }
+ contents, exists, err := cx.availableBox(name, boxRead, 0)
+ if err != nil {
+ return err
+ }
+
+ cx.stack[last] = stackValue{Uint: uint64(len(contents))}
+ cx.stack = append(cx.stack, boolToSV(exists))
+ return nil
+}
+
+func opBoxGet(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // name
+ name := string(cx.stack[last].Bytes)
+
+ err := argCheck(cx, name, 0)
+ if err != nil {
+ return err
+ }
+ contents, exists, err := cx.availableBox(name, boxRead, 0)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ contents = []byte{}
+ }
+ cx.stack[last].Bytes = contents // Will rightly panic if too big
+ cx.stack = append(cx.stack, boolToSV(exists))
+ return nil
+}
+
+func opBoxPut(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // value
+ prev := last - 1 // name
+
+ value := cx.stack[last].Bytes
+ name := string(cx.stack[prev].Bytes)
+
+ err := argCheck(cx, name, uint64(len(value)))
+ if err != nil {
+ return err
+ }
+
+ // This boxWrite usage requires the size, because the box may not exist.
+ contents, exists, err := cx.availableBox(name, boxWrite, uint64(len(value)))
+ if err != nil {
+ return err
+ }
+
+ cx.stack = cx.stack[:prev]
+
+ if exists {
+ /* the replacement must match existing size */
+ if len(contents) != len(value) {
+ return fmt.Errorf("attempt to box_put wrong size %d != %d", len(contents), len(value))
+ }
+ return cx.Ledger.SetBox(cx.appID, name, value)
+ }
+
+ /* The box did not exist, so create it. */
+ appAddr := cx.getApplicationAddress(cx.appID)
+ return cx.Ledger.NewBox(cx.appID, name, value, appAddr)
+}
+
+const boxPrefix = "bx:"
+const boxPrefixLength = len(boxPrefix)
+const boxNameIndex = boxPrefixLength + 8 // len("bx:") + 8 (appIdx, big-endian)
+
+// MakeBoxKey creates the key that a box named `name` under app `appIdx` should use.
+func MakeBoxKey(appIdx basics.AppIndex, name string) string {
+ /* This format is chosen so that a simple indexing scheme on the key would
+ allow for quick lookups of all the boxes of a certain app, or even all
+ the boxes of a certain app with a certain prefix.
+
+ The "bx:" prefix is so that the kvstore might be usable for things
+ besides boxes.
+ */
+ key := make([]byte, boxNameIndex+len(name))
+ copy(key, boxPrefix)
+ binary.BigEndian.PutUint64(key[boxPrefixLength:], uint64(appIdx))
+ copy(key[boxNameIndex:], name)
+ return string(key)
+}
+
+// SplitBoxKey extracts an appid and box name from a string that was created by MakeBoxKey()
+func SplitBoxKey(key string) (basics.AppIndex, string, error) {
+ if len(key) < boxNameIndex {
+ return 0, "", fmt.Errorf("SplitBoxKey() cannot extract AppIndex as key (%s) too short (length=%d)", key, len(key))
+ }
+ if key[:boxPrefixLength] != boxPrefix {
+ return 0, "", fmt.Errorf("SplitBoxKey() illegal app box prefix in key (%s). Expected prefix '%s'", key, boxPrefix)
+ }
+ keyBytes := []byte(key)
+ app := basics.AppIndex(binary.BigEndian.Uint64(keyBytes[boxPrefixLength:boxNameIndex]))
+ return app, key[boxNameIndex:], nil
+}
diff --git a/data/transactions/logic/box_test.go b/data/transactions/logic/box_test.go
new file mode 100644
index 000000000..515f0ad69
--- /dev/null
+++ b/data/transactions/logic/box_test.go
@@ -0,0 +1,602 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic_test
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBoxNewDel(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, txn, ledger := logic.MakeSampleEnv()
+
+ for _, size := range []int{24, 0} {
+ t.Run(fmt.Sprintf("box size=%d", size), func(t *testing.T) {
+ createSelf := fmt.Sprintf(`byte "self"; int %d; box_create;`, size)
+ createOther := fmt.Sprintf(`byte "other"; int %d; box_create;`, size)
+
+ ledger.NewApp(txn.Sender, 888, basics.AppParams{})
+
+ logic.TestApp(t, createSelf, ep)
+ ledger.DelBoxes(888, "self")
+
+ logic.TestApp(t, createSelf+`assert;`+createSelf+`!`, ep)
+ ledger.DelBoxes(888, "self")
+ logic.TestApp(t, createSelf+`assert;`+createOther, ep)
+ ledger.DelBoxes(888, "self")
+
+ logic.TestApp(t, createSelf+`assert; byte "self"; box_del`, ep)
+ logic.TestApp(t, `byte "self"; box_del; !`, ep)
+ logic.TestApp(t, createSelf+`assert
+ byte "self"; box_del; assert
+ byte "self"; box_del; !`, ep)
+ ledger.DelBoxes(888, "self")
+
+ logic.TestApp(t, fmt.Sprintf(
+ `byte "self"; box_get; !; assert; pop
+ byte "self"; int %d; bzero; box_put; int 1`, size), ep)
+ })
+ }
+
+}
+
+func TestBoxNewBad(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, txn, ledger := logic.MakeSampleEnv()
+
+ ledger.NewApp(txn.Sender, 888, basics.AppParams{})
+ logic.TestApp(t, `byte "self"; int 999; box_create`, ep, "write budget")
+
+ // In test proto, you get 100 I/O budget per boxref
+ ten := [10]transactions.BoxRef{}
+ txn.Boxes = append(txn.Boxes, ten[:]...) // write budget is now 11*100 = 1100
+ logic.TestApp(t, `byte "self"; int 999; box_create`, ep)
+ ledger.DelBoxes(888, "self")
+ logic.TestApp(t, `byte "self"; int 1000; box_create`, ep)
+ ledger.DelBoxes(888, "self")
+ logic.TestApp(t, `byte "self"; int 1001; box_create`, ep, "box size too large")
+
+ logic.TestApp(t, `byte "unknown"; int 1000; box_create`, ep, "invalid Box reference")
+
+ long := strings.Repeat("x", 65)
+ txn.Boxes = []transactions.BoxRef{{Name: []byte(long)}}
+ logic.TestApp(t, fmt.Sprintf(`byte "%s"; int 1000; box_create`, long), ep, "name too long")
+
+ txn.Boxes = []transactions.BoxRef{{Name: []byte("")}} // irrelevant, zero check comes first anyway
+ logic.TestApp(t, `byte ""; int 1000; box_create`, ep, "zero length")
+}
+
+func TestBoxReadWrite(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, txn, ledger := logic.MakeSampleEnv()
+
+ ledger.NewApp(txn.Sender, 888, basics.AppParams{})
+ // extract some bytes until past the end, confirm the begin as zeros, and
+ // when it fails.
+ logic.TestApp(t, `byte "self"; int 4; box_create; assert
+ byte "self"; int 1; int 2; box_extract;
+ byte 0x0000; ==; assert;
+ byte "self"; int 1; int 3; box_extract;
+ byte 0x000000; ==; assert;
+ byte "self"; int 0; int 4; box_extract;
+ byte 0x00000000; ==; assert;
+ int 1`, ep)
+
+ logic.TestApp(t, `byte "self"; int 1; int 4; box_extract;
+ byte 0x00000000; ==`, ep, "extraction end 5")
+
+ // Replace some bytes until past the end, confirm when it fails.
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3031; box_replace;
+ byte "self"; int 0; int 4; box_extract;
+ byte 0x00303100; ==`, ep)
+ logic.TestApp(t, `byte "self"; int 1; byte 0x303132; box_replace;
+ byte "self"; int 0; int 4; box_extract;
+ byte 0x00303132; ==`, ep)
+ logic.TestApp(t, `byte "self"; int 1; byte 0x30313233; box_replace;
+ byte "self"; int 0; int 4; box_extract;
+ byte 0x0030313233; ==`, ep, "replacement end 5")
+
+ // Replace with different byte in different place.
+ logic.TestApp(t, `byte "self"; int 0; byte 0x4444; box_replace;
+ byte "self"; int 0; int 4; box_extract;
+ byte 0x44443132; ==`, ep)
+
+ // All bow down to the God of code coverage!
+ ledger.DelBoxes(888, "self")
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3031; box_replace`, ep,
+ "no such box")
+ logic.TestApp(t, `byte "junk"; int 1; byte 0x3031; box_replace`, ep,
+ "invalid Box reference")
+}
+
+func TestBoxAcrossTxns(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ledger := logic.NewLedger(nil)
+ ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
+ // After creation in first txn, second one can read it (though it's empty)
+ logic.TestApps(t, []string{
+ `byte "self"; int 64; box_create`,
+ `byte "self"; int 10; int 4; box_extract; byte 0x00000000; ==`,
+ }, nil, 8, ledger)
+ // after creation, modification, the third can read it
+ logic.TestApps(t, []string{
+ `byte "self"; int 64; box_create`,
+ `byte "self"; int 2; byte "hi"; box_replace; int 1`,
+ `byte "self"; int 1; int 4; box_extract; byte 0x00686900; ==`, // "\0hi\0"
+ }, nil, 8, ledger)
+}
+
+// TestDirtyTracking gives confidence that the number of dirty bytes to be
+// written is tracked properly, despite repeated creates/deletes of the same
+// thing, touches in different txns, etc.
+func TestDirtyTracking(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, txn, ledger := logic.MakeSampleEnv()
+
+ ledger.NewApp(txn.Sender, 888, basics.AppParams{})
+ logic.TestApp(t, `byte "self"; int 200; box_create`, ep)
+ logic.TestApp(t, `byte "other"; int 201; box_create`, ep, "write budget")
+ // deleting "self" doesn't give extra write budget to create big "other"
+ logic.TestApp(t, `byte "self"; box_del; !; byte "other"; int 201; box_create`, ep,
+ "write budget")
+
+ // though it cancels out a creation that happened here
+ logic.TestApp(t, `byte "self"; int 200; box_create; assert
+ byte "self"; box_del; assert
+ byte "self"; int 200; box_create;
+ `, ep)
+
+ ledger.DelBoxes(888, "self", "other")
+ // same, but create a different box than deleted
+ logic.TestApp(t, `byte "self"; int 200; box_create; assert
+ byte "self"; box_del; assert
+ byte "other"; int 200; box_create;
+ `, ep)
+
+ // no funny business by trying to del twice! this case is also interested
+ // because the read budget is spent on "other", which is 200, while the
+ // write budget is spent on "self"
+ logic.TestApp(t, `byte "other"; box_len; assert`, ep) // reminder, "other" exists!
+ logic.TestApp(t, `byte "self"; int 200; box_create; assert
+ byte "self"; box_del; assert
+ byte "self"; box_del; !; assert
+ byte "self"; int 201; box_create;
+ `, ep, "write budget")
+ logic.TestApp(t, `byte "self"; box_len; !; assert; !`, ep) // "self" was not made
+ logic.TestApp(t, `byte "self"; int 200; box_create`, ep) // make it
+ // Now that both exist with size 200, naming both in Boxes causes failure
+ logic.TestApp(t, `int 1`, ep, "read budget")
+
+}
+
+func TestBoxUnavailableWithClearState(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ tests := map[string]string{
+ "box_create": `byte "self"; int 64; box_create`,
+ "box_del": `byte "self"; box_del`,
+ "box_extract": `byte "self"; int 7; int 0; box_extract`,
+ "box_get": `byte "self"; box_get`,
+ "box_len": `byte "self"; box_len`,
+ "box_put": `byte "put"; byte "self"; box_put`,
+ "box_replace": `byte "self"; int 0; byte "new"; box_replace`,
+ }
+
+ for name, program := range tests {
+ t.Run(name, func(t *testing.T) {
+ ep, _, l := logic.MakeSampleEnv()
+ l.NewApp(basics.Address{}, 888, basics.AppParams{})
+ ep.TxnGroup[0].Txn.OnCompletion = transactions.ClearStateOC
+ logic.TestApp(t, program, ep, "boxes may not be accessed from ClearState program")
+ })
+ }
+}
+
+func TestBoxAvailability(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ledger := logic.NewLedger(nil)
+ ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
+
+ // B is not available (recall that "self" is set up by MakeSampleEnv, in TestApps)
+ logic.TestApps(t, []string{
+ `byte "self"; int 64; box_create`,
+ `byte "B"; int 10; int 4; box_extract; byte 0x00000000; ==`,
+ }, nil, 8, ledger, logic.NewExpect(1, "invalid Box reference B"))
+
+ // B is available if indexed by 0 in tx[1].Boxes
+ group := logic.MakeSampleTxnGroup(logic.MakeSampleTxn(), txntest.Txn{
+ Type: "appl",
+ ApplicationID: 10000,
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("B")}},
+ }.SignedTxn())
+ group[0].Txn.Type = protocol.ApplicationCallTx
+ logic.TestApps(t, []string{
+ `byte "self"; int 64; box_create`,
+ `byte "B"; int 10; int 4; box_extract; byte 0x00000000; ==`,
+ }, group, 8, ledger, logic.NewExpect(1, "no such box"))
+
+ // B is available if listed by appId in tx[1].Boxes
+ group = logic.MakeSampleTxnGroup(logic.MakeSampleTxn(), txntest.Txn{
+ Type: "appl",
+ ApplicationID: 10000,
+ ForeignApps: []basics.AppIndex{10000},
+ Boxes: []transactions.BoxRef{{Index: 1, Name: []byte("B")}},
+ }.SignedTxn())
+ group[0].Txn.Type = protocol.ApplicationCallTx
+ logic.TestApps(t, []string{
+ `byte "self"; int 64; box_create`,
+ `byte "B"; int 10; int 4; box_extract; byte 0x00000000; ==`,
+ }, group, 8, ledger, logic.NewExpect(1, "no such box"))
+}
+
+func TestBoxReadBudget(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ appID := basics.AppIndex(888)
+ appAddr := appID.Address()
+
+ ep, txn, ledger := logic.MakeSampleEnv()
+ ledger.NewApp(basics.Address{}, appID, basics.AppParams{})
+
+ // Sample txn has two box refs, so read budget is 2*100
+
+ ledger.NewBox(appID, "self", make([]byte, 100), appAddr)
+ ledger.NewBox(appID, "other", make([]byte, 100), appAddr)
+ ledger.NewBox(appID, "third", make([]byte, 100), appAddr)
+
+ // Right at budget
+ logic.TestApp(t, `byte "self"; box_len; assert; byte "other"; box_len; assert; ==`, ep)
+
+ // With three box refs, read budget is now 3*100
+ txn.Boxes = append(txn.Boxes, transactions.BoxRef{Name: []byte("third")})
+ logic.TestApp(t, `byte "self"; box_len; assert; byte "third"; box_len; assert; ==`, ep)
+
+ // Increase "third" box size to 101
+ ledger.DelBox(appID, "third", appAddr)
+ ledger.NewBox(appID, "third", make([]byte, 101), appAddr)
+
+ // Budget exceeded
+ logic.TestApp(t, `byte "self"; box_len; assert; byte "third"; box_len; assert; ==`, ep, "box read budget (300) exceeded")
+ // Still exceeded if we don't touch the boxes
+ logic.TestApp(t, `int 1`, ep, "box read budget (300) exceeded")
+
+ // Still exceeded with one box ref
+ txn.Boxes = txn.Boxes[2:]
+ logic.TestApp(t, `byte "third"; box_len; assert; int 101; ==`, ep, "box read budget (100) exceeded")
+
+ // But not with two
+ txn.Boxes = append(txn.Boxes, transactions.BoxRef{})
+ logic.TestApp(t, `byte "third"; box_len; assert; int 101; ==`, ep)
+}
+
+func TestBoxWriteBudget(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, _, ledger := logic.MakeSampleEnv()
+ ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
+
+ // Sample tx[0] has two box refs, so write budget is 2*100
+
+ // Test simple use of one box, less than, equal, or over budget
+ logic.TestApp(t, `byte "self"; int 4; box_create`, ep)
+ logic.TestApp(t, `byte "self"; box_del; assert
+ byte "self"; int 199; box_create`, ep)
+ logic.TestApp(t, `byte "self"; box_del; assert
+ byte "self"; int 200; box_create`, ep)
+ logic.TestApp(t, `byte "self"; box_del; assert
+ byte "self"; int 201; box_create`, ep, "write budget (200) exceeded")
+
+ // Test interplay of two different boxes being created
+ logic.TestApp(t, `byte "self"; int 4; box_create; assert
+ byte "other"; int 4; box_create`, ep)
+
+ logic.TestApp(t, `byte "self"; box_del; assert; byte "other"; box_del; assert
+ byte "self"; int 4; box_create; assert;
+ byte "other"; int 196; box_create`, ep)
+
+ logic.TestApp(t, `byte "self"; box_del; assert; byte "other"; box_del; assert
+ byte "self"; int 6; box_create; assert
+ byte "other"; int 196; box_create`, ep,
+ "write budget (200) exceeded")
+ ledger.DelBoxes(888, "other")
+
+ logic.TestApp(t, `byte "self"; box_del; assert
+ byte "self"; int 6; box_create; assert
+ byte "other"; int 196; box_create; assert // fails to create
+ byte "self"; box_del;`, ep, "write budget (200) exceeded")
+
+ logic.TestApp(t, `byte "other"; int 196; box_create`, ep)
+ logic.TestApp(t, `byte "self"; box_del`, ep, "read budget") // 6 + 196 > 200
+ logic.TestApp(t, `byte "junk"; box_del`, ep, "read budget") // fails before invalid "junk" is noticed
+ ledger.DelBoxes(888, "self", "other")
+ logic.TestApp(t, `byte "junk"; box_del`, ep, "invalid Box reference")
+
+ // Create two boxes, that sum to over budget, then test trying to use them together
+ logic.TestApp(t, `byte "self"; int 101; box_create`, ep)
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ byte "other"; int 101; box_create`, ep, "write budget (200) exceeded")
+
+ logic.TestApp(t, `byte "other"; int 101; box_create`, ep)
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ byte "other"; int 1; byte 0x3333; box_replace;
+ int 1`, ep, "read budget (200) exceeded")
+ ledger.DelBoxes(888, "other")
+
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ byte "other"; int 10; box_create`, ep)
+ // They're now small enough to read and write
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ byte "other"; int 1; byte 0x3333; box_replace;
+ int 1`, ep)
+ // writing twice is no problem (even though it's the big one)
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ byte "self"; int 50; byte 0x3333; box_replace;
+ byte "other"; int 1; byte 0x3333; box_replace;
+ int 1`, ep)
+
+ logic.TestApp(t, `byte "self"; box_del; assert; byte "other"; box_del`, ep) // cleanup
+
+}
+
+// TestWriteBudgetPut ensures we get write budget right for box_put
+func TestWriteBudgetPut(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, _, ledger := logic.MakeSampleEnv()
+ ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
+
+ // Sample tx[0] has two box refs, so write budget is 2*100
+
+ // Test simple use of one box
+ logic.TestApp(t, `byte "self"; int 200; box_create`, ep) // equal to budget
+ logic.TestApp(t, `byte "self"; box_del`, ep)
+ logic.TestApp(t, `byte "self"; int 201; box_create`, ep, // 1 over budget
+ "write budget")
+
+ // More complicated versions that use 1 or more 150 byte boxes, so one is ok, two is over
+ logic.TestApp(t, `byte "self"; int 150; box_create`, ep)
+ logic.TestApp(t, `byte "self"; int 150; bzero; box_put; int 1`, ep)
+ logic.TestApp(t, `byte "self"; int 149; bzero; byte "x"; concat; box_put; int 1`, ep)
+ // puts to same name, doesn't go over budget (although we don't optimize
+ // away puts with the same content, this test uses different contents just
+ // to be sure).
+ logic.TestApp(t, `byte "self"; int 150; bzero; box_put;
+ byte "self"; int 149; bzero; byte "x"; concat; box_put; int 1`, ep)
+ // puts to different names do
+ logic.TestApp(t, `byte "self"; int 150; bzero; box_put;
+ byte "other"; int 149; bzero; byte "x"; concat; box_put; int 1`, ep,
+ "write budget")
+
+ // testing a regression: ensure box_put does not double debit when creating
+ logic.TestApp(t, `byte "self"; int 150; bzero; box_put; int 1`, ep)
+}
+
+// TestBoxRepeatedCreate ensures that app is not charged write budget for
+// creates that don't do anything.
+func TestBoxRepeatedCreate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, _, ledger := logic.MakeSampleEnv()
+ ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
+
+ // Sample tx[0] has two box refs, so write budget is 2*100
+ logic.TestApp(t, `byte "self"; int 201; box_create`, ep,
+ "write budget")
+ logic.TestApp(t, `byte "self"; int 200; box_create`, ep)
+ logic.TestApp(t, `byte "self"; int 200; box_create; !; assert // does not actually create
+ byte "other"; int 200; box_create; assert // does create, and budget should be enough
+ int 1`, ep)
+
+ ledger.DelBoxes(888, "self", "other")
+ logic.TestApp(t, `byte "other"; int 200; box_create; assert
+ byte "other"; box_del; assert
+ byte "other"; int 200; box_create`, ep)
+
+}
+
+func TestIOBudgetGrow(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, txn, ledger := logic.MakeSampleEnv()
+ ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
+ ledger.CreateBox(888, "self", 101)
+ ledger.CreateBox(888, "other", 101)
+
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ byte "other"; int 1; byte 0x3333; box_replace;
+ int 1`, ep, "read budget (200) exceeded")
+
+ txn.Boxes = append(txn.Boxes, transactions.BoxRef{})
+ // Since we added an empty BoxRef, we can read > 200.
+ logic.TestApp(t, `byte "self"; int 1; int 7; box_extract; pop;
+ byte "other"; int 1; int 7; box_extract; pop;
+ int 1`, ep)
+ // Add write, for that matter
+ logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ byte "other"; int 1; byte 0x3333; box_replace;
+ int 1`, ep)
+
+ txn.Boxes = append(txn.Boxes, transactions.BoxRef{Name: []byte("another")})
+
+ // Here we read 202, and write a very different 350 (since we now have 4 brs)
+ logic.TestApp(t, `byte "self"; int 1; int 7; box_extract; pop;
+ byte "other"; int 1; int 7; box_extract; pop;
+ byte "another"; int 350; box_create`, ep)
+}
+
+func TestConveniences(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, _, ledger := logic.MakeSampleEnv()
+ ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
+
+ // box_get of a new name reports !exists, and returns 0 length bytes.
+ logic.TestApp(t, `byte "self"; box_get; !; assert; len; !`, ep)
+
+ // box_len of a new name reports !exists, and returns 0 as the length
+ logic.TestApp(t, `byte "self"; box_len; !; assert; !`, ep)
+
+ // box_put creates the box with contents provided
+ logic.TestApp(t, `byte "self"; byte 0x3132; box_put;
+ byte "self"; box_len; assert; int 2; ==; assert
+ byte "self"; box_get; assert; byte 0x3132; ==`, ep)
+
+ // box_put fails if box exists and is wrong size (self exists from last test)
+ logic.TestApp(t, `byte "self"; byte 0x313233; box_put; int 1`, ep,
+ "box_put wrong size")
+ ledger.DelBoxes(888, "self")
+
+ // put and get can interact with created boxes
+ logic.TestApp(t, `byte "self"; int 3; box_create`, ep)
+ logic.TestApp(t, `byte "self"; box_get; assert; byte 0x000000; ==`, ep)
+ logic.TestApp(t, `byte "self"; byte 0xAABBCC; box_put; int 1`, ep)
+ logic.TestApp(t, `byte "self"; int 1; byte 0xDDEE; box_replace; int 1`, ep)
+ logic.TestApp(t, `byte "self"; box_get; assert; byte 0xAADDEE; ==`, ep)
+ ledger.DelBoxes(888, "self")
+
+ // box_get panics if the box is too big (for TEAL, or for proto)
+ ep.Proto.MaxBoxSize = 5000
+ ep.Proto.BytesPerBoxReference = 5000 // avoid write budget error
+ logic.TestApp(t, `byte "self"; int 4098; box_create; assert; // bigger than maxStringSize
+ byte "self"; box_get; assert; len`, ep,
+ "box_get produced a too big")
+}
+
+// TestEarlyPanics ensures that all of the box opcodes die early if they are
+// given an empty or too long name.
+func TestEarlyPanics(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ tests := map[string]string{
+ "box_create": `byte "%s"; int 10; box_create`,
+ "box_del": `byte "%s"; box_del`,
+ "box_extract": `byte "%s"; int 1; int 2; box_extract`,
+ "box_get": `byte "%s"; box_get`,
+ "box_len": `byte "%s"; box_len`,
+ "box_put": `byte "%s"; byte "hello"; box_put`,
+ "box_replace": `byte "%s"; int 0; byte "new"; box_replace`,
+ }
+
+ ep, _, l := logic.MakeSampleEnv()
+ l.NewApp(basics.Address{}, 888, basics.AppParams{})
+
+ for name, program := range tests {
+ t.Run(name+"/zero", func(t *testing.T) {
+ logic.TestApp(t, fmt.Sprintf(program, ""), ep, "zero length")
+ })
+ }
+
+ big := strings.Repeat("x", 65)
+ for name, program := range tests {
+ t.Run(name+"/long", func(t *testing.T) {
+ logic.TestApp(t, fmt.Sprintf(program, big), ep, "name too long")
+ })
+ }
+
+}
+
+func TestBoxTotals(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, txn, ledger := logic.MakeSampleEnv()
+
+ ledger.NewApp(txn.Sender, 888, basics.AppParams{})
+ // The SENDER certainly has no boxes (but does exist)
+ logic.TestApp(t, `int 0; acct_params_get AcctTotalBoxes; pop; !`, ep)
+ // Nor does the app account, to start
+ logic.TestApp(t, `int 888; app_params_get AppAddress; assert;
+ acct_params_get AcctTotalBoxes; pop; !; `, ep)
+ // Create a 31 byte box with a 4 byte name
+ logic.TestApp(t, `byte "self"; int 31; box_create`, ep)
+ logic.TestApp(t, `int 888; app_params_get AppAddress; assert;
+ acct_params_get AcctTotalBoxes; pop; int 1; ==`, ep)
+ logic.TestApp(t, `int 888; app_params_get AppAddress; assert;
+ acct_params_get AcctTotalBoxBytes; pop; int 35; ==`, ep)
+}
+
+func TestMakeBoxKey(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ type testCase struct {
+ description string
+ name string
+ app basics.AppIndex
+ key string
+ err string
+ }
+
+ pp := func(tc testCase) string {
+ return fmt.Sprintf("<<<%s>>> (name, app) = (%#v, %d) --should--> key = %#v (err = [%s])", tc.description, tc.name, tc.app, tc.key, tc.err)
+ }
+
+ var testCases = []testCase{
+ // COPACETIC:
+ {"zero appid", "stranger", 0, "bx:\x00\x00\x00\x00\x00\x00\x00\x00stranger", ""},
+ {"typical", "348-8uj", 131231, "bx:\x00\x00\x00\x00\x00\x02\x00\x9f348-8uj", ""},
+ {"empty box name", "", 42, "bx:\x00\x00\x00\x00\x00\x00\x00*", ""},
+ {"random byteslice", "{\xbb\x04\a\xd1\xe2\xc6I\x81{", 13475904583033571713, "bx:\xbb\x04\a\xd1\xe2\xc6I\x81{\xbb\x04\a\xd1\xe2\xc6I\x81{", ""},
+
+ // ERRORS:
+ {"too short", "", 0, "stranger", "SplitBoxKey() cannot extract AppIndex as key (stranger) too short (length=8)"},
+ {"wrong prefix", "", 0, "strangersINTHEdark", "SplitBoxKey() illegal app box prefix in key (strangersINTHEdark). Expected prefix 'bx:'"},
+ }
+
+ for _, tc := range testCases {
+ app, name, err := logic.SplitBoxKey(tc.key)
+
+ if tc.err == "" {
+ key := logic.MakeBoxKey(tc.app, tc.name)
+ require.Equal(t, tc.app, app, pp(tc))
+ require.Equal(t, tc.name, name, pp(tc))
+ require.Equal(t, tc.key, key, pp(tc))
+ } else {
+ require.EqualError(t, err, tc.err, pp(tc))
+ }
+ }
+}
diff --git a/data/transactions/logic/debugger_test.go b/data/transactions/logic/debugger_test.go
index f33e8ae5c..2775e74e2 100644
--- a/data/transactions/logic/debugger_test.go
+++ b/data/transactions/logic/debugger_test.go
@@ -113,7 +113,7 @@ func TestDebuggerHook(t *testing.T) {
partitiontest.PartitionTest(t)
testDbg := testDbgHook{}
- ep := defaultEvalParams(nil)
+ ep := defaultEvalParams()
ep.Debugger = &testDbg
testLogic(t, testProgram, AssemblerMaxVersion, ep)
@@ -223,7 +223,7 @@ func TestCallStackUpdate(t *testing.T) {
}
testDbg := testDbgHook{}
- ep := defaultEvalParams(nil)
+ ep := defaultEvalParams()
ep.Debugger = &testDbg
testLogic(t, testCallStackProgram, AssemblerMaxVersion, ep)
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index a12149bcf..243c22ec2 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -75,6 +75,7 @@ var opDocByName = map[string]string{
"intc_2": "constant 2 from intcblock",
"intc_3": "constant 3 from intcblock",
"pushint": "immediate UINT",
+ "pushints": "push sequence of immediate uints to stack in the order they appear (first uint being deepest)",
"bytecblock": "prepare block of byte-array constants for use by bytec",
"bytec": "Ith constant from bytecblock",
"bytec_0": "constant 0 from bytecblock",
@@ -82,6 +83,7 @@ var opDocByName = map[string]string{
"bytec_2": "constant 2 from bytecblock",
"bytec_3": "constant 3 from bytecblock",
"pushbytes": "immediate BYTES",
+ "pushbytess": "push sequences of immediate byte arrays to stack (first byte array being deepest)",
"bzero": "zero filled byte-array of length A",
"arg": "Nth LogicSig argument",
@@ -128,30 +130,29 @@ var opDocByName = map[string]string{
"dup2": "duplicate A and B",
"dupn": "duplicate A, N times",
"dig": "Nth value from the top of the stack. dig 0 is equivalent to dup",
- "bury": "Replace the Nth value from the top of the stack. bury 0 fails.",
+ "bury": "replace the Nth value from the top of the stack with A. bury 0 fails.",
"cover": "remove top of stack, and place it deeper in the stack such that N elements are above it. Fails if stack depth <= N.",
"uncover": "remove the value at depth N in the stack and shift above items down so the Nth deep value is on top of the stack. Fails if stack depth <= N.",
"swap": "swaps A and B on stack",
"select": "selects one of two values based on top-of-stack: B if C != 0, else A",
- "concat": "join A and B",
- "substring": "A range of bytes from A starting at S up to but not including E. If E < S, or either is larger than the array length, the program fails",
- "substring3": "A range of bytes from A starting at B up to but not including C. If C < B, or either is larger than the array length, the program fails",
- "getbit": "Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
- "setbit": "Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
- "getbyte": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
- "setbyte": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
- "extract": "A range of bytes from A starting at S up to but not including S+L. If L is 0, then extract to the end of the string. If S or S+L is larger than the array length, the program fails",
- "extract3": "A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails",
- "extract_uint16": "A uint16 formed from a range of big-endian bytes from A starting at B up to but not including B+2. If B+2 is larger than the array length, the program fails",
- "extract_uint32": "A uint32 formed from a range of big-endian bytes from A starting at B up to but not including B+4. If B+4 is larger than the array length, the program fails",
- "extract_uint64": "A uint64 formed from a range of big-endian bytes from A starting at B up to but not including B+8. If B+8 is larger than the array length, the program fails",
- "replace2": "Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)",
- "replace3": "Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)",
- "base64_decode": "decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E",
-
- "balance": "get balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted.",
- "min_balance": "get minimum required balance for account A, in microalgos. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes.",
+ "concat": "join A and B",
+ "substring": "A range of bytes from A starting at S up to but not including E. If E < S, or either is larger than the array length, the program fails",
+ "substring3": "A range of bytes from A starting at B up to but not including C. If C < B, or either is larger than the array length, the program fails",
+ "getbit": "Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
+ "setbit": "Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
+ "getbyte": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
+ "setbyte": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
+ "extract": "A range of bytes from A starting at S up to but not including S+L. If L is 0, then extract to the end of the string. If S or S+L is larger than the array length, the program fails",
+ "extract3": "A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails",
+ "extract_uint16": "A uint16 formed from a range of big-endian bytes from A starting at B up to but not including B+2. If B+2 is larger than the array length, the program fails",
+ "extract_uint32": "A uint32 formed from a range of big-endian bytes from A starting at B up to but not including B+4. If B+4 is larger than the array length, the program fails",
+ "extract_uint64": "A uint64 formed from a range of big-endian bytes from A starting at B up to but not including B+8. If B+8 is larger than the array length, the program fails",
+ "replace2": "Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)",
+ "replace3": "Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)",
+ "base64_decode": "decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E",
+ "balance": "balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. Changes caused by inner transactions are observable immediately following `itxn_submit`",
+ "min_balance": "minimum required balance for account A, in microalgos. Required balance is affected by ASA, App, and Box usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. Changes caused by inner transactions or box usage are observable immediately following the opcode effecting the change.",
"app_opted_in": "1 if account A is opted in to application B, else 0",
"app_local_get": "local state of the key B in the current application in account A",
"app_local_get_ex": "X is the local state of application B, key C in account A. Y is 1 if key existed, else 0",
@@ -167,6 +168,7 @@ var opDocByName = map[string]string{
"acct_params_get": "X is field F from account A. Y is 1 if A owns positive algos, else 0",
"assert": "immediately fail unless A is a non-zero number",
"callsub": "branch unconditionally to TARGET, saving the next instruction on the call stack",
+ "proto": "Prepare top call frame for a retsub that will assume A args and R return values.",
"retsub": "pop the top instruction from the call stack and branch to it",
"b+": "A plus B. A and B are interpreted as big-endian unsigned integers",
@@ -197,11 +199,19 @@ var opDocByName = map[string]string{
"block": "field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive)",
"switch": "branch to the Ath label. Continue at following instruction if index A exceeds the number of labels.",
+ "match": "given match cases from A[1] to A[N], branch to the Ith label where A[I] = B. Continue to the following instruction if no matches are found.",
- "proto": "Prepare top call frame for a retsub that will assume A args and R return values.",
"frame_dig": "Nth (signed) value from the frame pointer.",
- "frame_bury": "Replace the Nth (signed) value from the frame pointer in the stack",
- "popn": "Remove N values from the top of the stack",
+ "frame_bury": "replace the Nth (signed) value from the frame pointer in the stack with A",
+ "popn": "remove N values from the top of the stack",
+
+ "box_create": "create a box named A, of length B. Fail if A is empty or B exceeds 32,768. Returns 0 if A already existed, else 1",
+ "box_extract": "read C bytes from box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.",
+ "box_replace": "write byte-array C into box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.",
+ "box_del": "delete box named A if it exists. Return 1 if A existed, 0 otherwise",
+ "box_len": "X is the length of box A if A exists, else 0. Y is 1 if A exists, else 0.",
+ "box_get": "X is the contents of box A if A exists, else ''. Y is 1 if A exists, else 0.",
+ "box_put": "replaces the contents of box A with byte-array B. Fails if A exists and len(B) != len(box A). Creates A if it does not exist",
}
// OpDoc returns a description of the op
@@ -210,12 +220,14 @@ func OpDoc(opName string) string {
}
var opcodeImmediateNotes = map[string]string{
- "intcblock": "{varuint length} [{varuint value}, ...]",
+ "intcblock": "{varuint count} [{varuint value}, ...]",
"intc": "{uint8 int constant index}",
"pushint": "{varuint int}",
- "bytecblock": "{varuint length} [({varuint value length} bytes), ...]",
+ "pushints": "{varuint count} [{varuint value}, ...]",
+ "bytecblock": "{varuint count} [({varuint value length} bytes), ...]",
"bytec": "{uint8 byte constant index}",
"pushbytes": "{varuint length} {bytes}",
+ "pushbytess": "{varuint count} [({varuint value length} bytes), ...]",
"arg": "{uint8 arg index N}",
"global": "{uint8 global field index}",
@@ -273,6 +285,7 @@ var opcodeImmediateNotes = map[string]string{
"block": "{uint8 block field}",
"switch": "{uint8 branch count} [{int16 branch offset, big-endian}, ...]",
+ "match": "{uint8 branch count} [{int16 branch offset, big-endian}, ...]",
"proto": "{uint8 arguments} {uint8 return values}",
"frame_dig": "{int8 frame slot}",
@@ -300,6 +313,7 @@ var opDocExtras = map[string]string{
"bz": "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.",
"b": "See `bnz` for details on how branches work. `b` always jumps to the offset.",
"callsub": "The call stack is separate from the data stack. Only `callsub`, `retsub`, and `proto` manipulate it.",
+ "proto": "Fails unless the last instruction executed was a `callsub`.",
"retsub": "If the current frame was prepared by `proto A R`, `retsub` will remove the 'A' arguments from the stack, move the `R` return values down, and pop any stack locations above the relocated return values.",
"intcblock": "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.",
"bytecblock": "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.",
@@ -318,7 +332,9 @@ var opDocExtras = map[string]string{
"btoi": "`btoi` fails if the input is longer than 8 bytes.",
"concat": "`concat` fails if the result would be greater than 4096 bytes.",
"pushbytes": "pushbytes args are not added to the bytecblock during assembly processes",
+ "pushbytess": "pushbytess args are not added to the bytecblock during assembly processes",
"pushint": "pushint args are not added to the intcblock during assembly processes",
+ "pushints": "pushints args are not added to the intcblock during assembly processes",
"getbit": "see explanation of bit ordering in setbit",
"setbit": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.",
"balance": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.",
@@ -339,9 +355,15 @@ var opDocExtras = map[string]string{
"itxn_next": "`itxn_next` initializes the transaction exactly as `itxn_begin` does",
"itxn_field": "`itxn_field` fails if A is of the wrong type for F, including a byte array of the wrong size for use as an address when F is an address field. `itxn_field` also fails if A is an account, asset, or app that is not _available_, or an attempt is made extend an array field beyond the limit imposed by consensus parameters. (Addresses set into asset params of acfg transactions need not be _available_.)",
"itxn_submit": "`itxn_submit` resets the current transaction so that it can not be resubmitted. A new `itxn_begin` is required to prepare another inner transaction.",
+
"base64_decode": "*Warning*: Usage should be restricted to very rare use cases. In almost all cases, smart contracts should directly handle non-encoded byte-strings. This opcode should only be used in cases where base64 is the only available option, e.g. interoperability with a third-party that only signs base64 strings.\n\n Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See [RFC 4648 sections 4 and 5](https://rfc-editor.org/rfc/rfc4648.html#section-4). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\\n` and `\\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\\r`, or `\\n`.",
"json_ref": "*Warning*: Usage should be restricted to very rare use cases, as JSON decoding is expensive and quite limited. In addition, JSON objects are large and not optimized for size.\n\nAlmost all smart contracts should use simpler and smaller methods (such as the [ABI](https://arc.algorand.foundation/ARCs/arc-0004). This opcode should only be used in cases where JSON is only available option, e.g. when a third-party only signs JSON.",
- "proto": "Fails unless the last instruction executed was a `callsub`.",
+
+ "match": "`match` consumes N+1 values from the stack. Let the top stack value be B. The following N values represent an ordered list of match cases/constants (A), where the first value (A[0]) is the deepest in the stack. The immediate arguments are an ordered list of N labels (T). `match` will branch to target T[I], where A[I] = B. If there are no matches then execution continues on to the next instruction.",
+
+ "box_create": "Newly created boxes are filled with 0 bytes. `box_create` will fail if the referenced box already exists with a different size. Otherwise, existing boxes are unchanged by `box_create`.",
+ "box_get": "For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`",
+ "box_put": "For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`",
}
// OpDocExtra returns extra documentation text about an op
@@ -357,9 +379,10 @@ var OpGroups = map[string][]string{
"Byte Array Manipulation": {"substring", "substring3", "extract", "extract3", "extract_uint16", "extract_uint32", "extract_uint64", "replace2", "replace3", "base64_decode", "json_ref"},
"Byte Array Arithmetic": {"b+", "b-", "b/", "b*", "b<", "b>", "b<=", "b>=", "b==", "b!=", "b%", "bsqrt"},
"Byte Array Logic": {"b|", "b&", "b^", "b~"},
- "Loading Values": {"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "bzero", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "args", "txn", "gtxn", "txna", "txnas", "gtxna", "gtxnas", "gtxns", "gtxnsa", "gtxnsas", "global", "load", "loads", "store", "stores", "gload", "gloads", "gloadss", "gaid", "gaids"},
- "Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "popn", "dup", "dup2", "dupn", "dig", "bury", "cover", "uncover", "frame_dig", "frame_bury", "swap", "select", "assert", "callsub", "proto", "retsub", "switch"},
+ "Loading Values": {"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "pushints", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "pushbytess", "bzero", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "args", "txn", "gtxn", "txna", "txnas", "gtxna", "gtxnas", "gtxns", "gtxnsa", "gtxnsas", "global", "load", "loads", "store", "stores", "gload", "gloads", "gloadss", "gaid", "gaids"},
+ "Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "popn", "dup", "dup2", "dupn", "dig", "bury", "cover", "uncover", "frame_dig", "frame_bury", "swap", "select", "assert", "callsub", "proto", "retsub", "switch", "match"},
"State Access": {"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get", "app_params_get", "acct_params_get", "log", "block"},
+ "Box Access": {"box_create", "box_extract", "box_replace", "box_del", "box_len", "box_get", "box_put"},
"Inner Transactions": {"itxn_begin", "itxn_next", "itxn_field", "itxn_submit", "itxn", "itxna", "itxnas", "gitxn", "gitxna", "gitxnas"},
}
diff --git a/data/transactions/logic/doc_test.go b/data/transactions/logic/doc_test.go
index e95293106..9b5f2a950 100644
--- a/data/transactions/logic/doc_test.go
+++ b/data/transactions/logic/doc_test.go
@@ -33,7 +33,9 @@ func TestOpDocs(t *testing.T) {
opsSeen[op.Name] = false
}
for name := range opDocByName {
- assert.Contains(t, opsSeen, name, "opDocByName contains strange opcode %#v", name)
+ if _, ok := opsSeen[name]; !ok { // avoid assert.Contains: printing opsSeen is waste
+ assert.Fail(t, "opDocByName contains strange opcode", "%#v", name)
+ }
opsSeen[name] = true
}
for op, seen := range opsSeen {
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index 1e78d6960..34db841c1 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -113,7 +113,7 @@ func (sv stackValue) address() (addr basics.Address, err error) {
func (sv stackValue) uint() (uint64, error) {
if sv.Bytes != nil {
- return 0, errors.New("not a uint64")
+ return 0, fmt.Errorf("%#v is not a uint64", sv.Bytes)
}
return sv.Uint, nil
}
@@ -217,7 +217,7 @@ type LedgerForLogic interface {
AccountData(addr basics.Address) (ledgercore.AccountData, error)
Authorizer(addr basics.Address) (basics.Address, error)
Round() basics.Round
- LatestTimestamp() int64
+ PrevTimestamp() int64
BlockHdrCached(basics.Round) (bookkeeping.BlockHeader, error)
AssetHolding(addr basics.Address, assetIdx basics.AssetIndex) (basics.AssetHolding, error)
@@ -233,15 +233,38 @@ type LedgerForLogic interface {
SetGlobal(appIdx basics.AppIndex, key string, value basics.TealValue) error
DelGlobal(appIdx basics.AppIndex, key string) error
+ NewBox(appIdx basics.AppIndex, key string, value []byte, appAddr basics.Address) error
+ GetBox(appIdx basics.AppIndex, key string) ([]byte, bool, error)
+ SetBox(appIdx basics.AppIndex, key string, value []byte) error
+ DelBox(appIdx basics.AppIndex, key string, appAddr basics.Address) (bool, error)
+
Perform(gi int, ep *EvalParams) error
Counter() uint64
}
-// resources contains a list of apps and assets. It's used to track the apps and
-// assets created by a txgroup, for "free" access.
+// resources contains a catalog of available resources. It's used to track the
+// apps, assets, and boxes that are available to a transaction, outside the
+// direct foreign array mechanism.
type resources struct {
asas []basics.AssetIndex
apps []basics.AppIndex
+
+ // boxes are all of the top-level box refs from the txgroup. Most are added
+ // during NewEvalParams(). refs using 0 on an appl create are resolved and
+ // added when the appl executes. The boolean value indicates the "dirtiness"
+ // of the box - has it been modified in this txngroup? If yes, the size of
+ // the box counts against the group writeBudget. So delete is NOT a dirtying
+ // operation.
+ boxes map[boxRef]bool
+
+ // dirtyBytes maintains a running count of the number of dirty bytes in `boxes`
+ dirtyBytes uint64
+}
+
+// boxRef is the "hydrated" form of a BoxRef - it has the actual app id, not an index
+type boxRef struct {
+ app basics.AppIndex
+ name string
}
// EvalParams contains data that comes into condition evaluation.
@@ -281,9 +304,19 @@ type EvalParams struct {
// Total allowable inner txns in a group transaction (nil before inner pooling enabled)
pooledAllowedInners *int
- // created contains resources that may be used for "created" - they need not be in
- // a foreign array. They remain empty until createdResourcesVersion.
- created *resources
+ // available contains resources that may be used even though they are not
+ // necessarily directly in the txn's "static arrays". Apps and ASAs go in if
+ // the app or asa was created earlier in the txgroup (empty until
+ // createdResourcesVersion). Boxes go in when the ep is created, to share
+ // availability across all txns in the group.
+ available *resources
+
+ // ioBudget is the number of bytes that the box ref'd boxes can sum to, and
+ // the number of bytes that created or written boxes may sum to.
+ ioBudget uint64
+
+ // readBudgetChecked allows us to only check the read budget once
+ readBudgetChecked bool
// Caching these here means the hashes can be shared across the TxnGroup
// (and inners, because the cache is shared with the inner EvalParams)
@@ -310,9 +343,30 @@ func copyWithClearAD(txgroup []transactions.SignedTxnWithAD) []transactions.Sign
// NewEvalParams creates an EvalParams to use while evaluating a top-level txgroup
func NewEvalParams(txgroup []transactions.SignedTxnWithAD, proto *config.ConsensusParams, specials *transactions.SpecialAddresses) *EvalParams {
apps := 0
+ var allBoxes map[boxRef]bool
for _, tx := range txgroup {
if tx.Txn.Type == protocol.ApplicationCallTx {
apps++
+ if allBoxes == nil && len(tx.Txn.Boxes) > 0 {
+ allBoxes = make(map[boxRef]bool)
+ }
+ for _, br := range tx.Txn.Boxes {
+ var app basics.AppIndex
+ if br.Index == 0 {
+ // "current app": Ignore if this is a create, else use ApplicationID
+ if tx.Txn.ApplicationID == 0 {
+ // When the create actually happens, and we learn the appID, we'll add it.
+ continue
+ }
+ app = tx.Txn.ApplicationID
+ } else {
+ // Bounds check will already have been done by
+ // WellFormed. For testing purposes, it's better to panic
+ // now than after returning a nil.
+ app = tx.Txn.ForeignApps[br.Index-1] // shift for the 0=this convention
+ }
+ allBoxes[boxRef{app, string(br.Name)}] = false
+ }
}
}
@@ -351,15 +405,14 @@ func NewEvalParams(txgroup []transactions.SignedTxnWithAD, proto *config.Consens
FeeCredit: &credit,
PooledApplicationBudget: pooledApplicationBudget,
pooledAllowedInners: pooledAllowedInners,
- created: &resources{},
+ available: &resources{boxes: allBoxes},
appAddrCache: make(map[basics.AppIndex]basics.Address),
}
}
// feeCredit returns the extra fee supplied in this top-level txgroup compared
// to required minfee. It can make assumptions about overflow because the group
-// is known OK according to TxnGroupBatchVerify. (In essence the group is
-// "WellFormed")
+// is known OK according to txnGroupBatchPrep. (The group is "WellFormed")
func feeCredit(txgroup []transactions.SignedTxnWithAD, minFee uint64) uint64 {
minFeeCount := uint64(0)
feesPaid := uint64(0)
@@ -369,10 +422,9 @@ func feeCredit(txgroup []transactions.SignedTxnWithAD, minFee uint64) uint64 {
}
feesPaid = basics.AddSaturate(feesPaid, stxn.Txn.Fee.Raw)
}
- // Overflow is impossible, because TxnGroupBatchVerify checked.
+ // Overflow is impossible, because txnGroupBatchPrep checked.
feeNeeded := minFee * minFeeCount
-
- return feesPaid - feeNeeded
+ return basics.SubSaturate(feesPaid, feeNeeded)
}
// NewInnerEvalParams creates an EvalParams to be used while evaluating an inner group txgroup
@@ -400,16 +452,21 @@ func NewInnerEvalParams(txg []transactions.SignedTxnWithAD, caller *EvalContext)
Trace: caller.Trace,
TxnGroup: txg,
pastScratch: make([]*scratchSpace, len(txg)),
+ logger: caller.logger,
+ SigLedger: caller.SigLedger,
+ Ledger: caller.Ledger,
+ Debugger: nil, // See #4438, where this becomes caller.Debugger
MinAvmVersion: &minAvmVersion,
FeeCredit: caller.FeeCredit,
Specials: caller.Specials,
PooledApplicationBudget: caller.PooledApplicationBudget,
pooledAllowedInners: caller.pooledAllowedInners,
- SigLedger: caller.SigLedger,
- Ledger: caller.Ledger,
- created: caller.created,
+ available: caller.available,
+ ioBudget: caller.ioBudget,
+ readBudgetChecked: true, // don't check for inners
appAddrCache: caller.appAddrCache,
- caller: caller,
+ // read comment in EvalParams declaration about txid caches
+ caller: caller,
}
return ep
}
@@ -458,17 +515,17 @@ func (ep *EvalParams) log() logging.Logger {
// package. For example, after a acfg transaction is processed, the AD created
// by the acfg is added to the EvalParams this way.
func (ep *EvalParams) RecordAD(gi int, ad transactions.ApplyData) {
- if ep.created == nil {
+ if ep.available == nil {
// This is a simplified ep. It won't be used for app evaluation, and
// shares the TxnGroup memory with the caller. Don't touch anything!
return
}
ep.TxnGroup[gi].ApplyData = ad
if aid := ad.ConfigAsset; aid != 0 {
- ep.created.asas = append(ep.created.asas, aid)
+ ep.available.asas = append(ep.available.asas, aid)
}
if aid := ad.ApplicationID; aid != 0 {
- ep.created.apps = append(ep.created.apps, aid)
+ ep.available.apps = append(ep.available.apps, aid)
}
}
@@ -604,10 +661,6 @@ func (st StackType) Typed() bool {
return false
}
-func (sts StackTypes) plus(other StackTypes) StackTypes {
- return append(sts, other...)
-}
-
// PanicError wraps a recover() catching a panic()
type PanicError struct {
PanicValue interface{}
@@ -659,10 +712,54 @@ func EvalContract(program []byte, gi int, aid basics.AppIndex, params *EvalParam
}
}
+ // If this is a creation, make any "0 index" box refs available now that we
+ // have an appID.
+ if cx.txn.Txn.ApplicationID == 0 {
+ for _, br := range cx.txn.Txn.Boxes {
+ if br.Index == 0 {
+ cx.EvalParams.available.boxes[boxRef{cx.appID, string(br.Name)}] = false
+ }
+ }
+ }
+
+ // Check the I/O budget for reading if this is the first top-level app call
+ if cx.caller == nil && !cx.readBudgetChecked {
+ boxRefCount := uint64(0) // Intentionally counts duplicates
+ for _, tx := range cx.TxnGroup {
+ boxRefCount += uint64(len(tx.Txn.Boxes))
+ }
+ cx.ioBudget = boxRefCount * cx.Proto.BytesPerBoxReference
+
+ used := uint64(0)
+ for br := range cx.available.boxes {
+ if len(br.name) == 0 {
+ // 0 length names are not allowed for actual created boxes, but
+ // may have been used to add I/O budget.
+ continue
+ }
+ box, ok, err := cx.Ledger.GetBox(br.app, br.name)
+ if err != nil {
+ return false, nil, err
+ }
+ if !ok {
+ continue
+ }
+ size := uint64(len(box))
+ cx.available.boxes[br] = false
+
+ used = basics.AddSaturate(used, size)
+ if used > cx.ioBudget {
+ return false, nil, fmt.Errorf("box read budget (%d) exceeded", cx.ioBudget)
+ }
+ }
+ cx.readBudgetChecked = true
+ }
+
if cx.Trace != nil && cx.caller != nil {
fmt.Fprintf(cx.Trace, "--- enter %d %s %v\n", aid, cx.txn.Txn.OnCompletion, cx.txn.Txn.ApplicationArgs)
}
pass, err := eval(program, &cx)
+
if cx.Trace != nil && cx.caller != nil {
fmt.Fprintf(cx.Trace, "--- exit %d accept=%t\n", aid, pass)
}
@@ -709,33 +806,15 @@ func eval(program []byte, cx *EvalContext) (pass bool, err error) {
errstr += cx.Trace.String()
}
err = PanicError{x, errstr}
- cx.EvalParams.log().Errorf("recovered panic in Eval: %w", err)
- }
- }()
-
- defer func() {
- // Ensure we update the debugger before exiting
- if cx.Debugger != nil {
- errDbg := cx.Debugger.Complete(cx.refreshDebugState(err))
- if err == nil {
- err = errDbg
- }
+ cx.EvalParams.log().Errorf("recovered panic in Eval: %v", err)
}
}()
- if (cx.EvalParams.Proto == nil) || (cx.EvalParams.Proto.LogicSigVersion == 0) {
- err = errLogicSigNotSupported
- return
- }
- if cx.txn.Lsig.Args != nil && len(cx.txn.Lsig.Args) > transactions.EvalMaxArgs {
- err = errTooManyArgs
- return
- }
+ // Avoid returning for any reason until after cx.debugState is setup. That
+ // require cx to be minimally setup, too.
- version, vlen, err := versionCheck(program, cx.EvalParams)
- if err != nil {
- return false, err
- }
+ version, vlen, verr := versionCheck(program, cx.EvalParams)
+ // defer verr check until after cx and debugState is setup
cx.version = version
cx.pc = vlen
@@ -751,6 +830,23 @@ func eval(program []byte, cx *EvalContext) (pass bool, err error) {
if derr := cx.Debugger.Register(cx.refreshDebugState(err)); derr != nil {
return false, derr
}
+ defer func() {
+ // Ensure we update the debugger before exiting
+ errDbg := cx.Debugger.Complete(cx.refreshDebugState(err))
+ if err == nil {
+ err = errDbg
+ }
+ }()
+ }
+
+ if (cx.EvalParams.Proto == nil) || (cx.EvalParams.Proto.LogicSigVersion == 0) {
+ return false, errLogicSigNotSupported
+ }
+ if cx.txn.Lsig.Args != nil && len(cx.txn.Lsig.Args) > transactions.EvalMaxArgs {
+ return false, errTooManyArgs
+ }
+ if verr != nil {
+ return false, verr
}
for (err == nil) && (cx.pc < len(cx.program)) {
@@ -1126,6 +1222,15 @@ func (cx *EvalContext) checkStep() (int, error) {
return opcost, nil
}
+func (cx *EvalContext) ensureStackCap(targetCap int) {
+ if cap(cx.stack) < targetCap {
+ // Let's grow all at once, plus a little slack.
+ newStack := make([]stackValue, len(cx.stack), targetCap+4)
+ copy(newStack, cx.stack)
+ cx.stack = newStack
+ }
+}
+
func opErr(cx *EvalContext) error {
return errors.New("err opcode executed")
}
@@ -1855,7 +1960,7 @@ func opBytesZero(cx *EvalContext) error {
func opIntConstBlock(cx *EvalContext) error {
var err error
- cx.intc, cx.nextpc, err = parseIntcblock(cx.program, cx.pc+1)
+ cx.intc, cx.nextpc, err = parseIntImmArgs(cx.program, cx.pc+1)
return err
}
@@ -1895,9 +2000,24 @@ func opPushInt(cx *EvalContext) error {
return nil
}
+func opPushInts(cx *EvalContext) error {
+ intc, nextpc, err := parseIntImmArgs(cx.program, cx.pc+1)
+ if err != nil {
+ return err
+ }
+ finalLen := len(cx.stack) + len(intc)
+ cx.ensureStackCap(finalLen)
+ for _, cint := range intc {
+ sv := stackValue{Uint: cint}
+ cx.stack = append(cx.stack, sv)
+ }
+ cx.nextpc = nextpc
+ return nil
+}
+
func opByteConstBlock(cx *EvalContext) error {
var err error
- cx.bytec, cx.nextpc, err = parseBytecBlock(cx.program, cx.pc+1)
+ cx.bytec, cx.nextpc, err = parseByteImmArgs(cx.program, cx.pc+1)
return err
}
@@ -1942,6 +2062,21 @@ func opPushBytes(cx *EvalContext) error {
return nil
}
+func opPushBytess(cx *EvalContext) error {
+ cbytess, nextpc, err := parseByteImmArgs(cx.program, cx.pc+1)
+ if err != nil {
+ return err
+ }
+ finalLen := len(cx.stack) + len(cbytess)
+ cx.ensureStackCap(finalLen)
+ for _, cbytes := range cbytess {
+ sv := stackValue{Bytes: cbytes}
+ cx.stack = append(cx.stack, sv)
+ }
+ cx.nextpc = nextpc
+ return nil
+}
+
func opArgN(cx *EvalContext, n uint64) error {
if n >= uint64(len(cx.txn.Lsig.Args)) {
return fmt.Errorf("cannot load arg[%d] of %d", n, len(cx.txn.Lsig.Args))
@@ -2119,6 +2254,44 @@ func opSwitch(cx *EvalContext) error {
return nil
}
+func opMatch(cx *EvalContext) error {
+ n := int(cx.program[cx.pc+1])
+ // stack contains the n sized match list and the single match value
+ if n+1 > len(cx.stack) {
+ return fmt.Errorf("match expects %d stack args while stack only contains %d", n+1, len(cx.stack))
+ }
+
+ last := len(cx.stack) - 1
+ matchVal := cx.stack[last]
+ cx.stack = cx.stack[:last]
+
+ argBase := len(cx.stack) - n
+ matchList := cx.stack[argBase:]
+ cx.stack = cx.stack[:argBase]
+
+ matchedIdx := n
+ for i, stackArg := range matchList {
+ if stackArg.argType() != matchVal.argType() {
+ continue
+ }
+
+ if matchVal.argType() == StackBytes && bytes.Equal(matchVal.Bytes, stackArg.Bytes) {
+ matchedIdx = i
+ break
+ } else if matchVal.argType() == StackUint64 && matchVal.Uint == stackArg.Uint {
+ matchedIdx = i
+ break
+ }
+ }
+
+ target, err := switchTarget(cx, uint64(matchedIdx))
+ if err != nil {
+ return err
+ }
+ cx.nextpc = target
+ return nil
+}
+
const protoByte = 0x8a
func opCallSub(cx *EvalContext) error {
@@ -3011,7 +3184,7 @@ func (cx *EvalContext) getRound() uint64 {
}
func (cx *EvalContext) getLatestTimestamp() (uint64, error) {
- ts := cx.Ledger.LatestTimestamp()
+ ts := cx.Ledger.PrevTimestamp()
if ts < 0 {
return 0, fmt.Errorf("latest timestamp %d < 0", ts)
}
@@ -3626,24 +3799,30 @@ func opSetByte(cx *EvalContext) error {
return nil
}
-func opExtractImpl(x []byte, start, length int) ([]byte, error) {
+func extractCarefully(x []byte, start, length uint64) ([]byte, error) {
+ if start > uint64(len(x)) {
+ return nil, fmt.Errorf("extraction start %d is beyond length: %d", start, len(x))
+ }
end := start + length
- if start > len(x) || end > len(x) {
- return nil, errors.New("extract range beyond length of string")
+ if end < start {
+ return nil, fmt.Errorf("extraction end exceeds uint64")
+ }
+ if end > uint64(len(x)) {
+ return nil, fmt.Errorf("extraction end %d is beyond length: %d", end, len(x))
}
return x[start:end], nil
}
func opExtract(cx *EvalContext) error {
last := len(cx.stack) - 1
- startIdx := cx.program[cx.pc+1]
- lengthIdx := cx.program[cx.pc+2]
+ start := uint64(cx.program[cx.pc+1])
+ length := uint64(cx.program[cx.pc+2])
// Shortcut: if length is 0, take bytes from start index to the end
- length := int(lengthIdx)
if length == 0 {
- length = len(cx.stack[last].Bytes) - int(startIdx)
+ // If length has wrapped, it's because start > len(), so extractCarefully will report
+ length = uint64(len(cx.stack[last].Bytes) - int(start))
}
- bytes, err := opExtractImpl(cx.stack[last].Bytes, int(startIdx), length)
+ bytes, err := extractCarefully(cx.stack[last].Bytes, start, length)
cx.stack[last].Bytes = bytes
return err
}
@@ -3651,18 +3830,18 @@ func opExtract(cx *EvalContext) error {
func opExtract3(cx *EvalContext) error {
last := len(cx.stack) - 1 // length
prev := last - 1 // start
- byteArrayIdx := prev - 1 // bytes
- startIdx := cx.stack[prev].Uint
- lengthIdx := cx.stack[last].Uint
- if startIdx > math.MaxInt32 || lengthIdx > math.MaxInt32 {
- return errors.New("extract range beyond length of string")
- }
- bytes, err := opExtractImpl(cx.stack[byteArrayIdx].Bytes, int(startIdx), int(lengthIdx))
- cx.stack[byteArrayIdx].Bytes = bytes
+ pprev := prev - 1 // bytes
+
+ start := cx.stack[prev].Uint
+ length := cx.stack[last].Uint
+ bytes, err := extractCarefully(cx.stack[pprev].Bytes, start, length)
+ cx.stack[pprev].Bytes = bytes
cx.stack = cx.stack[:prev]
return err
}
+// replaceCarefully is used to make a NEW byteslice copy of original, with
+// replacement written over the bytes starting at start.
func replaceCarefully(original []byte, replacement []byte, start uint64) ([]byte, error) {
if start > uint64(len(original)) {
return nil, fmt.Errorf("replacement start %d beyond length: %d", start, len(original))
@@ -3731,11 +3910,11 @@ func convertBytesToInt(x []byte) uint64 {
return out
}
-func opExtractNBytes(cx *EvalContext, n int) error {
+func opExtractNBytes(cx *EvalContext, n uint64) error {
last := len(cx.stack) - 1 // start
prev := last - 1 // bytes
- startIdx := cx.stack[last].Uint
- bytes, err := opExtractImpl(cx.stack[prev].Bytes, int(startIdx), n) // extract n bytes
+ start := cx.stack[last].Uint
+ bytes, err := extractCarefully(cx.stack[prev].Bytes, start, n) // extract n bytes
if err != nil {
return err
}
@@ -3784,7 +3963,7 @@ func (cx *EvalContext) accountReference(account stackValue) (basics.Address, uin
invalidIndex := uint64(len(cx.txn.Txn.Accounts) + 1)
// Allow an address for an app that was created in group
if err != nil && cx.version >= createdResourcesVersion {
- for _, appID := range cx.created.apps {
+ for _, appID := range cx.available.apps {
createdAddress := cx.getApplicationAddress(appID)
if addr == createdAddress {
return addr, invalidIndex, nil
@@ -3914,13 +4093,8 @@ func opAppLocalGetEx(cx *EvalContext) error {
return err
}
- var isOk stackValue
- if ok {
- isOk.Uint = 1
- }
-
cx.stack[pprev] = result
- cx.stack[prev] = isOk
+ cx.stack[prev] = boolToSV(ok)
cx.stack = cx.stack[:last]
return nil
}
@@ -3989,13 +4163,8 @@ func opAppGlobalGetEx(cx *EvalContext) error {
return err
}
- var isOk stackValue
- if ok {
- isOk.Uint = 1
- }
-
cx.stack[prev] = result
- cx.stack[last] = isOk
+ cx.stack[last] = boolToSV(ok)
return nil
}
@@ -4007,6 +4176,13 @@ func opAppLocalPut(cx *EvalContext) error {
sv := cx.stack[last]
key := string(cx.stack[prev].Bytes)
+ // Enforce key lengths. Now, this is the same as enforced by ledger, but if
+ // it ever to change in proto, we would need to isolate changes to different
+ // program versions. (so a v6 app could not see a bigger key, for example)
+ if len(key) > cx.Proto.MaxAppKeyLen {
+ return fmt.Errorf("key too long: length was %d, maximum is %d", len(key), cx.Proto.MaxAppKeyLen)
+ }
+
addr, accountIdx, err := cx.mutableAccountReference(cx.stack[pprev])
if err != nil {
return err
@@ -4026,6 +4202,17 @@ func opAppLocalPut(cx *EvalContext) error {
}
cx.txn.EvalDelta.LocalDeltas[accountIdx][key] = tv.ToValueDelta()
}
+
+ // Enforce maximum value length (also enforced by ledger)
+ if tv.Type == basics.TealBytesType {
+ if len(tv.Bytes) > cx.Proto.MaxAppBytesValueLen {
+ return fmt.Errorf("value too long for key 0x%x: length was %d", key, len(tv.Bytes))
+ }
+ if sum := len(key) + len(tv.Bytes); sum > cx.Proto.MaxAppSumKeyValueLens {
+ return fmt.Errorf("key/value total too long for key 0x%x: sum was %d", key, sum)
+ }
+ }
+
err = cx.Ledger.SetLocal(addr, cx.appID, key, tv, accountIdx)
if err != nil {
return err
@@ -4042,6 +4229,14 @@ func opAppGlobalPut(cx *EvalContext) error {
sv := cx.stack[last]
key := string(cx.stack[prev].Bytes)
+ // Enforce maximum key length. Currently this is the same as enforced by
+ // ledger. If it were ever to change in proto, we would need to isolate
+ // changes to different program versions. (so a v6 app could not see a
+ // bigger key, for example)
+ if len(key) > cx.Proto.MaxAppKeyLen {
+ return fmt.Errorf("key too long: length was %d, maximum is %d", len(key), cx.Proto.MaxAppKeyLen)
+ }
+
// if writing the same value, don't record in EvalDelta, matching ledger
// behavior with previous BuildEvalDelta mechanism
etv, ok, err := cx.Ledger.GetGlobal(cx.appID, key)
@@ -4053,6 +4248,16 @@ func opAppGlobalPut(cx *EvalContext) error {
cx.txn.EvalDelta.GlobalDelta[key] = tv.ToValueDelta()
}
+ // Enforce maximum value length (also enforced by ledger)
+ if tv.Type == basics.TealBytesType {
+ if len(tv.Bytes) > cx.Proto.MaxAppBytesValueLen {
+ return fmt.Errorf("value too long for key 0x%x: length was %d", key, len(tv.Bytes))
+ }
+ if sum := len(key) + len(tv.Bytes); sum > cx.Proto.MaxAppSumKeyValueLens {
+ return fmt.Errorf("key/value total too long for key 0x%x: sum was %d", key, sum)
+ }
+ }
+
err = cx.Ledger.SetGlobal(cx.appID, key, tv)
if err != nil {
return err
@@ -4138,7 +4343,7 @@ func appReference(cx *EvalContext, ref uint64, foreign bool) (basics.AppIndex, e
}
// or was created in group
if cx.version >= createdResourcesVersion {
- for _, appID := range cx.created.apps {
+ for _, appID := range cx.available.apps {
if appID == basics.AppIndex(ref) {
return appID, nil
}
@@ -4177,7 +4382,7 @@ func asaReference(cx *EvalContext, ref uint64, foreign bool) (basics.AssetIndex,
}
// or was created in group
if cx.version >= createdResourcesVersion {
- for _, assetID := range cx.created.asas {
+ for _, assetID := range cx.available.asas {
if assetID == basics.AssetIndex(ref) {
return assetID, nil
}
@@ -4338,6 +4543,26 @@ func opAcctParamsGet(cx *EvalContext) error {
value.Uint = account.MinBalance(cx.Proto).Raw
case AcctAuthAddr:
value.Bytes = account.AuthAddr[:]
+
+ case AcctTotalNumUint:
+ value.Uint = uint64(account.TotalAppSchema.NumUint)
+ case AcctTotalNumByteSlice:
+ value.Uint = uint64(account.TotalAppSchema.NumByteSlice)
+ case AcctTotalExtraAppPages:
+ value.Uint = uint64(account.TotalExtraAppPages)
+
+ case AcctTotalAppsCreated:
+ value.Uint = account.TotalAppParams
+ case AcctTotalAppsOptedIn:
+ value.Uint = account.TotalAppLocalStates
+ case AcctTotalAssetsCreated:
+ value.Uint = account.TotalAssetParams
+ case AcctTotalAssets:
+ value.Uint = account.TotalAssets
+ case AcctTotalBoxes:
+ value.Uint = account.TotalBoxes
+ case AcctTotalBoxBytes:
+ value.Uint = account.TotalBoxBytes
}
cx.stack[last] = value
cx.stack = append(cx.stack, boolToSV(account.MicroAlgos.Raw > 0))
@@ -4464,7 +4689,7 @@ func (cx *EvalContext) availableAsset(sv stackValue) (basics.AssetIndex, error)
}
// or was created in group
if cx.version >= createdResourcesVersion {
- for _, assetID := range cx.created.asas {
+ for _, assetID := range cx.available.asas {
if assetID == aid {
return aid, nil
}
@@ -4492,7 +4717,7 @@ func (cx *EvalContext) availableApp(sv stackValue) (basics.AppIndex, error) {
}
// or was created in group
if cx.version >= createdResourcesVersion {
- for _, appID := range cx.created.apps {
+ for _, appID := range cx.available.apps {
if appID == aid {
return aid, nil
}
diff --git a/data/transactions/logic/evalAppTxn_test.go b/data/transactions/logic/evalAppTxn_test.go
index 35d8f28e3..ea7bd6885 100644
--- a/data/transactions/logic/evalAppTxn_test.go
+++ b/data/transactions/logic/evalAppTxn_test.go
@@ -117,6 +117,10 @@ func TestFieldTypes(t *testing.T) {
TestApp(t, NoTrack("itxn_begin; byte 0x01; itxn_field XferAsset;"), ep, "not a uint64")
TestApp(t, NoTrack("itxn_begin; byte 0x01; itxn_field AssetAmount;"), ep, "not a uint64")
+ // get coverage on uintMaxed()
+ TestApp(t, NoTrack("itxn_begin; byte \"pay\"; itxn_field ExtraProgramPages;"), ep, "not a uint64")
+ // get coverage on bool()
+ TestApp(t, NoTrack("itxn_begin; byte \"pay\"; itxn_field Nonparticipation;"), ep, "not a uint64")
}
func appAddr(id int) basics.Address {
@@ -253,7 +257,7 @@ func TestRekeyPay(t *testing.T) {
TestApp(t, "txn Sender; txn Accounts 1; int 100"+pay+"; int 1", ep)
// Note that the Sender would fail min balance check if we did it here.
// It seems proper to wait until end of txn though.
- // See explanation in logicLedger's Perform()
+ // See explanation in cowRoundState's Perform()
}
func TestRekeyBack(t *testing.T) {
@@ -496,7 +500,7 @@ func TestNumInnerPooled(t *testing.T) {
tx := txntest.Txn{
Type: protocol.ApplicationCallTx,
}.SignedTxn()
- ledger := MakeLedger(nil)
+ ledger := NewLedger(nil)
ledger.NewApp(tx.Txn.Receiver, 888, basics.AppParams{})
ledger.NewAccount(appAddr(888), 1000000)
short := pay + ";int 1"
@@ -770,6 +774,8 @@ func TestFieldSetting(t *testing.T) {
"not an address")
TestApp(t, "itxn_begin; int 6; bzero; itxn_field ConfigAssetUnitName; int 1", ep)
+ TestApp(t, NoTrack("itxn_begin; int 6; itxn_field ConfigAssetUnitName; int 1"), ep,
+ "not a byte array")
TestApp(t, "itxn_begin; int 7; bzero; itxn_field ConfigAssetUnitName; int 1", ep,
"value is too long")
@@ -782,6 +788,8 @@ func TestInnerGroup(t *testing.T) {
partitiontest.PartitionTest(t)
ep, tx, ledger := MakeSampleEnv()
+ ep.FeeCredit = nil // default sample env starts at 401
+
ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
// Need both fees and both payments
ledger.NewAccount(appAddr(888), 999+2*MakeTestProto().MinTxnFee)
@@ -802,6 +810,8 @@ func TestInnerFeePooling(t *testing.T) {
partitiontest.PartitionTest(t)
ep, tx, ledger := MakeSampleEnv()
+ ep.FeeCredit = nil // default sample env starts at 401
+
ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
ledger.NewAccount(appAddr(888), 50_000)
pay := `
@@ -1757,6 +1767,13 @@ int 1
ep, parentTx, ledger := MakeSampleEnv()
ep.Proto.UnifyInnerTxIDs = unified
+ // Whenever MakeSampleEnv() is changed to create a different
+ // transaction, we must reverse those changes here, so that the
+ // historic test is correct.
+ parentTx.Type = protocol.PaymentTx
+ parentTx.Boxes = nil
+ ep.FeeCredit = nil // else inner's fee will change
+
parentTx.ApplicationID = parentAppID
parentTx.ForeignApps = []basics.AppIndex{
childAppID,
@@ -2073,11 +2090,18 @@ int 1
for _, unified := range []bool{true, false} {
t.Run(fmt.Sprintf("unified=%t", unified), func(t *testing.T) {
- t.Parallel()
+ // t.Parallel() NO! unified variable is actually shared
ep, parentTx, ledger := MakeSampleEnv()
ep.Proto.UnifyInnerTxIDs = unified
+ // Whenever MakeSampleEnv() is changed to create a different
+ // transaction, we must reverse those changes here, so that the
+ // historic test is correct.
+ parentTx.Type = protocol.PaymentTx
+ parentTx.Boxes = nil
+ ep.FeeCredit = nil // else inner's fee will change
+
parentTx.ApplicationID = parentAppID
parentTx.ForeignApps = []basics.AppIndex{
childAppID,
@@ -2548,7 +2572,7 @@ func TestNumInnerDeep(t *testing.T) {
ForeignApps: []basics.AppIndex{basics.AppIndex(222)},
}.SignedTxnWithAD()
require.Equal(t, 888, int(tx.Txn.ApplicationID))
- ledger := MakeLedger(nil)
+ ledger := NewLedger(nil)
pay3 := TestProg(t, pay+pay+pay+"int 1;", AssemblerMaxVersion).Program
ledger.NewApp(tx.Txn.Receiver, 222, basics.AppParams{
@@ -2929,6 +2953,7 @@ done:
func TestInfiniteRecursion(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
ep, tx, ledger := MakeSampleEnv()
source := `
diff --git a/data/transactions/logic/evalCrypto_test.go b/data/transactions/logic/evalCrypto_test.go
index 773330fab..e3dfff2f7 100644
--- a/data/transactions/logic/evalCrypto_test.go
+++ b/data/transactions/logic/evalCrypto_test.go
@@ -41,8 +41,8 @@ import (
func TestKeccak256(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
/*
pip install sha3
import sha3
@@ -58,8 +58,8 @@ byte 0xc195eca25a6f4c82bfba0287082ddb0d602ae9230f9cf1f1a40b68f8e2c41567
func TestSHA3_256(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
/*
pip install hashlib
import hashlib
@@ -74,8 +74,8 @@ byte 0xd757297405c5c89f7ceca368ee76c2f1893ee24f654e60032e65fb53b01aae10
func TestSHA512_256(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
/*
pip cryptography
from cryptography.hazmat.backends import default_backend
@@ -176,8 +176,8 @@ pop // output`, "int 1"},
func TestEd25519verify(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
var s crypto.Seed
crypto.RandBytes(s[:])
c := crypto.GenerateSignatureSecrets(s)
@@ -200,26 +200,26 @@ ed25519verify`, pkStr), v)
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
txn.Lsig.Args = [][]byte{data[:], sig[:]}
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn))
// short sig will fail
txn.Lsig.Args[1] = sig[1:]
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn), "invalid signature")
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn), "invalid signature")
// flip a bit and it should not pass
msg1 := "52fdfc072182654f163f5f0f9a621d729566c74d0aa413bf009c9800418c19cd"
data1, err := hex.DecodeString(msg1)
require.NoError(t, err)
txn.Lsig.Args = [][]byte{data1, sig[:]}
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn), "REJECT")
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn), "REJECT")
})
}
}
func TestEd25519VerifyBare(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
var s crypto.Seed
crypto.RandBytes(s[:])
c := crypto.GenerateSignatureSecrets(s)
@@ -240,18 +240,18 @@ ed25519verify_bare`, pkStr), v)
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
txn.Lsig.Args = [][]byte{data[:], sig[:]}
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn))
// short sig will fail
txn.Lsig.Args[1] = sig[1:]
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn), "invalid signature")
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn), "invalid signature")
// flip a bit and it should not pass
msg1 := "52fdfc072182654f163f5f0f9a621d729566c74d0aa413bf009c9800418c19cd"
data1, err := hex.DecodeString(msg1)
require.NoError(t, err)
txn.Lsig.Args = [][]byte{data1, sig[:]}
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn), "REJECT")
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn), "REJECT")
})
}
}
@@ -446,7 +446,7 @@ ecdsa_verify Secp256k1`, hex.EncodeToString(r), hex.EncodeToString(s), hex.Encod
ops := testProg(t, source, 5)
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
- pass, err := EvalSignature(0, defaultEvalParamsWithVersion(&txn, 5))
+ pass, err := EvalSignature(0, defaultEvalParamsWithVersion(5, txn))
require.NoError(t, err)
require.True(t, pass)
}
@@ -552,7 +552,7 @@ ecdsa_verify Secp256r1`, hex.EncodeToString(r), hex.EncodeToString(s), hex.Encod
ops := testProg(t, source, fidoVersion)
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
- pass, err := EvalSignature(0, defaultEvalParamsWithVersion(&txn, fidoVersion))
+ pass, err := EvalSignature(0, defaultEvalParamsWithVersion(fidoVersion, txn))
require.NoError(t, err)
require.True(t, pass)
}
@@ -560,6 +560,7 @@ ecdsa_verify Secp256r1`, hex.EncodeToString(r), hex.EncodeToString(s), hex.Encod
// test compatibility with ethereum signatures
func TestEcdsaEthAddress(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
/*
pip install eth-keys pycryptodome
@@ -589,6 +590,7 @@ byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr
func TestEcdsaCostVariation(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
// Doesn't matter if the actual verify returns true or false. Just confirm the cost depends on curve.
source := `
@@ -689,7 +691,7 @@ ed25519verify`, pkStr), AssemblerMaxVersion)
var txn transactions.SignedTxn
txn.Lsig.Logic = programs[i]
txn.Lsig.Args = [][]byte{data[i][:], signatures[i][:]}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
pass, err := EvalSignature(0, ep)
if !pass {
b.Log(hex.EncodeToString(programs[i]))
@@ -774,7 +776,7 @@ func benchmarkEcdsa(b *testing.B, source string, curve EcdsaCurve) {
var txn transactions.SignedTxn
txn.Lsig.Logic = data[i].programs
txn.Lsig.Args = [][]byte{data[i].msg[:], data[i].r, data[i].s, data[i].x, data[i].y, data[i].pk, {uint8(data[i].v)}}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
pass, err := EvalSignature(0, ep)
if !pass {
b.Log(hex.EncodeToString(data[i].programs))
@@ -897,7 +899,7 @@ func benchmarkBn256(b *testing.B, source string) {
var txn transactions.SignedTxn
txn.Lsig.Logic = data[i].programs
txn.Lsig.Args = [][]byte{data[i].a, data[i].k, data[i].g1, data[i].g2}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
pass, err := EvalSignature(0, ep)
if !pass {
b.Log(hex.EncodeToString(data[i].programs))
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index f5c87abb3..0d0676c16 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -49,17 +49,23 @@ func makeSampleEnv() (*EvalParams, *transactions.Transaction, *Ledger) {
}
func makeSampleEnvWithVersion(version uint64) (*EvalParams, *transactions.Transaction, *Ledger) {
- ep := defaultEvalParamsWithVersion(nil, version)
- ep.TxnGroup = transactions.WrapSignedTxnsWithAD(makeSampleTxnGroup(makeSampleTxn()))
- ledger := MakeLedger(map[basics.Address]uint64{})
+ // We'd usually like an app in the group, so that the ep created is
+ // "complete". But to keep as many old tests working as possible, if
+ // version < appsEnabledVersion, don't put an appl txn in it.
+ firstTxn := makeSampleTxn()
+ if version >= appsEnabledVersion {
+ firstTxn.Txn.Type = protocol.ApplicationCallTx
+ }
+ ep := defaultEvalParamsWithVersion(version, makeSampleTxnGroup(firstTxn)...)
+ ledger := NewLedger(nil)
ep.SigLedger = ledger
ep.Ledger = ledger
return ep, &ep.TxnGroup[0].Txn, ledger
}
func makeOldAndNewEnv(version uint64) (*EvalParams, *EvalParams, *Ledger) {
- new, _, sharedLedger := makeSampleEnv()
- old, _, _ := makeSampleEnvWithVersion(version)
+ new, _, sharedLedger := makeSampleEnvWithVersion(version)
+ old, _, _ := makeSampleEnvWithVersion(version - 1)
old.Ledger = sharedLedger
return old, new, sharedLedger
}
@@ -238,8 +244,8 @@ log
// check err opcode work in both modes
source := "err"
- testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil), "err opcode executed")
- testApp(t, source, defaultEvalParams(nil), "err opcode executed")
+ testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(), "err opcode executed")
+ testApp(t, source, defaultEvalParams(), "err opcode executed")
// check that ed25519verify and arg is not allowed in stateful mode between v2-v4
disallowedV4 := []string{
@@ -252,7 +258,7 @@ log
}
for _, source := range disallowedV4 {
ops := testProg(t, source, 4)
- testAppBytes(t, ops.Program, defaultEvalParams(nil),
+ testAppBytes(t, ops.Program, defaultEvalParams(),
"not allowed in current mode", "not allowed in current mode")
}
@@ -266,7 +272,7 @@ log
}
for _, source := range disallowed {
ops := testProg(t, source, AssemblerMaxVersion)
- testAppBytes(t, ops.Program, defaultEvalParams(nil),
+ testAppBytes(t, ops.Program, defaultEvalParams(),
"not allowed in current mode", "not allowed in current mode")
}
@@ -289,7 +295,7 @@ log
}
for _, source := range statefulOpcodeCalls {
- testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil),
+ testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(),
"not allowed in current mode", "not allowed in current mode")
}
@@ -328,7 +334,7 @@ func TestBalance(t *testing.T) {
testApp(t, text, ep)
}
-func testApps(t *testing.T, programs []string, txgroup []transactions.SignedTxn, version uint64, ledger LedgerForLogic,
+func testApps(t *testing.T, programs []string, txgroup []transactions.SignedTxn, version uint64, ledger *Ledger,
expected ...Expect) {
t.Helper()
codes := make([][]byte, len(programs))
@@ -348,8 +354,9 @@ func testApps(t *testing.T, programs []string, txgroup []transactions.SignedTxn,
}
ep := NewEvalParams(transactions.WrapSignedTxnsWithAD(txgroup), makeTestProtoV(version), &transactions.SpecialAddresses{})
if ledger == nil {
- ledger = MakeLedger(nil)
+ ledger = NewLedger(nil)
}
+ ledger.Reset()
ep.Ledger = ledger
testAppsBytes(t, codes, ep, expected...)
}
@@ -359,11 +366,15 @@ func testAppsBytes(t *testing.T, programs [][]byte, ep *EvalParams, expected ...
require.Equal(t, len(programs), len(ep.TxnGroup))
for i := range ep.TxnGroup {
if programs[i] != nil {
+ appID := ep.TxnGroup[i].Txn.ApplicationID
+ if appID == 0 {
+ appID = basics.AppIndex(888)
+ }
if len(expected) > 0 && expected[0].l == i {
- testAppFull(t, programs[i], i, basics.AppIndex(888), ep, expected[0].s)
+ testAppFull(t, programs[i], i, appID, ep, expected[0].s)
break // Stop after first failure
} else {
- testAppFull(t, programs[i], i, basics.AppIndex(888), ep)
+ testAppFull(t, programs[i], i, appID, ep)
}
}
}
@@ -379,7 +390,7 @@ func testAppBytes(t *testing.T, program []byte, ep *EvalParams, problems ...stri
t.Helper()
ep.reset()
aid := ep.TxnGroup[0].Txn.ApplicationID
- if aid == basics.AppIndex(0) {
+ if aid == 0 {
aid = basics.AppIndex(888)
}
return testAppFull(t, program, 0, aid, ep, problems...)
@@ -423,7 +434,7 @@ func testAppFull(t *testing.T, program []byte, gi int, aid basics.AppIndex, ep *
// the best way to be concise about all sorts of tests.
if ep.Ledger == nil {
- ep.Ledger = MakeLedger(nil)
+ ep.Ledger = NewLedger(nil)
}
pass, err := EvalApp(program, gi, aid, ep)
@@ -488,24 +499,13 @@ func TestMinBalance(t *testing.T) {
func TestAppCheckOptedIn(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
- txn := makeSampleTxn()
- txgroup := makeSampleTxnGroup(txn)
- now := defaultEvalParams(&txn)
- now.TxnGroup = transactions.WrapSignedTxnsWithAD(txgroup)
- pre := defaultEvalParamsWithVersion(&txn, directRefEnabledVersion-1)
- pre.TxnGroup = transactions.WrapSignedTxnsWithAD(txgroup)
+ pre, now, ledger := makeOldAndNewEnv(directRefEnabledVersion)
- ledger := MakeLedger(
- map[basics.Address]uint64{
- txn.Txn.Receiver: 1,
- txn.Txn.Sender: 1,
- },
- )
- now.Ledger = ledger
- pre.Ledger = ledger
+ txn := pre.TxnGroup[0]
+ ledger.NewAccount(txn.Txn.Receiver, 1)
+ ledger.NewAccount(txn.Txn.Sender, 1)
testApp(t, "int 2; int 100; app_opted_in; int 1; ==", now, "invalid Account reference")
// Receiver is not opted in
@@ -561,7 +561,7 @@ exit:
int 1
==`
- pre, now, ledger := makeOldAndNewEnv(directRefEnabledVersion - 1)
+ pre, now, ledger := makeOldAndNewEnv(directRefEnabledVersion)
ledger.NewAccount(now.TxnGroup[0].Txn.Receiver, 1)
testApp(t, text, now, "invalid Account reference")
@@ -694,7 +694,6 @@ int 0
func TestAppReadGlobalState(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
text := `int 0
@@ -720,7 +719,7 @@ byte 0x414c474f
==
&&
`
- pre, now, ledger := makeOldAndNewEnv(directRefEnabledVersion - 1)
+ pre, now, ledger := makeOldAndNewEnv(directRefEnabledVersion)
ledger.NewAccount(now.TxnGroup[0].Txn.Sender, 1)
now.TxnGroup[0].Txn.ApplicationID = 100
@@ -770,13 +769,13 @@ int 4141
now.TxnGroup[0].Txn.ApplicationID = 0
now.TxnGroup[0].Txn.ForeignApps = []basics.AppIndex{100}
- testAppFull(t, testProg(t, text, LogicVersion).Program, 0, 100, now)
+ testAppFull(t, testProg(t, text, directRefEnabledVersion).Program, 0, 100, now)
// Direct reference to the current app also works
now.TxnGroup[0].Txn.ForeignApps = []basics.AppIndex{}
- testAppFull(t, testProg(t, strings.Replace(text, "int 1 // ForeignApps index", "int 100", -1), LogicVersion).Program,
+ testAppFull(t, testProg(t, strings.Replace(text, "int 1 // ForeignApps index", "int 100", -1), directRefEnabledVersion).Program,
0, 100, now)
- testAppFull(t, testProg(t, strings.Replace(text, "int 1 // ForeignApps index", "global CurrentApplicationID", -1), LogicVersion).Program,
+ testAppFull(t, testProg(t, strings.Replace(text, "int 1 // ForeignApps index", "global CurrentApplicationID", -1), directRefEnabledVersion).Program,
0, 100, now)
}
@@ -918,11 +917,11 @@ func testAssetsByVersion(t *testing.T, assetsTestProgram string, version uint64)
}
}
- txn := makeSampleTxn()
- pre := defaultEvalParamsWithVersion(&txn, directRefEnabledVersion-1)
+ txn := makeSampleAppl(888)
+ pre := defaultEvalParamsWithVersion(directRefEnabledVersion-1, txn)
require.GreaterOrEqual(t, version, uint64(directRefEnabledVersion))
- now := defaultEvalParamsWithVersion(&txn, version)
- ledger := MakeLedger(
+ now := defaultEvalParamsWithVersion(version, txn)
+ ledger := NewLedger(
map[basics.Address]uint64{
txn.Txn.Sender: 1,
},
@@ -1132,6 +1131,59 @@ func TestAcctParams(t *testing.T) {
source = "int 0; acct_params_get AcctAuthAddr; assert; global ZeroAddress; =="
testApp(t, source, ep)
+
+ // No apps or schema at first, then 1 created and the global schema noted
+ source = "int 0; acct_params_get AcctTotalAppsCreated; assert; !"
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalNumUint; assert; !"
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalNumByteSlice; assert; !"
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalExtraAppPages; assert; !"
+ testApp(t, source, ep)
+ ledger.NewApp(tx.Sender, 2000, basics.AppParams{
+ StateSchemas: basics.StateSchemas{
+ LocalStateSchema: basics.StateSchema{
+ NumUint: 6,
+ NumByteSlice: 7,
+ },
+ GlobalStateSchema: basics.StateSchema{
+ NumUint: 8,
+ NumByteSlice: 9,
+ },
+ },
+ ExtraProgramPages: 2,
+ })
+ source = "int 0; acct_params_get AcctTotalAppsCreated; assert; int 1; =="
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalNumUint; assert; int 8; =="
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalNumByteSlice; assert; int 9; =="
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalExtraAppPages; assert; int 2; =="
+ testApp(t, source, ep)
+
+ // Not opted in at first, then opted into 1, schema added
+ source = "int 0; acct_params_get AcctTotalAppsOptedIn; assert; !"
+ testApp(t, source, ep)
+ ledger.NewLocals(tx.Sender, 2000)
+ source = "int 0; acct_params_get AcctTotalAppsOptedIn; assert; int 1; =="
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalNumUint; assert; int 8; int 6; +; =="
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalNumByteSlice; assert; int 9; int 7; +; =="
+ testApp(t, source, ep)
+
+ // No ASAs at first, then 1 created AND in total
+ source = "int 0; acct_params_get AcctTotalAssetsCreated; assert; !"
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalAssets; assert; !"
+ testApp(t, source, ep)
+ ledger.NewAsset(tx.Sender, 3000, basics.AssetParams{})
+ source = "int 0; acct_params_get AcctTotalAssetsCreated; assert; int 1; =="
+ testApp(t, source, ep)
+ source = "int 0; acct_params_get AcctTotalAssets; assert; int 1; =="
+ testApp(t, source, ep)
}
func TestGlobalNonDelete(t *testing.T) {
@@ -1230,13 +1282,14 @@ intc_1
ops := testProg(t, source, AssemblerMaxVersion)
- txn := makeSampleTxn()
+ var txn transactions.SignedTxn
+ txn.Txn.Type = protocol.ApplicationCallTx
txn.Txn.ApplicationID = 100
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
err := CheckContract(ops.Program, ep)
require.NoError(t, err)
- ledger := MakeLedger(
+ ledger := NewLedger(
map[basics.Address]uint64{
txn.Txn.Sender: 1,
},
@@ -1284,13 +1337,11 @@ intc_1
func TestAppLocalStateReadWrite(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
- txn := makeSampleTxn()
- txn.Txn.ApplicationID = 100
- ep := defaultEvalParams(&txn)
- ledger := MakeLedger(
+ txn := makeSampleAppl(100)
+ ep := defaultEvalParams(txn)
+ ledger := NewLedger(
map[basics.Address]uint64{
txn.Txn.Sender: 1,
},
@@ -1511,6 +1562,36 @@ int 1
require.Equal(t, uint64(0x79), vd.Uint)
}
+func TestAppLocalGlobalErrorCases(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, tx, ledger := makeSampleEnv()
+ ledger.NewApp(tx.Sender, 888, basics.AppParams{})
+
+ testApp(t, fmt.Sprintf(`byte "%v"; int 1; app_global_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen+1)), ep, "key too long")
+
+ testApp(t, fmt.Sprintf(`byte "%v"; int 1; app_global_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen)), ep)
+
+ ledger.NewLocals(tx.Sender, 888)
+ testApp(t, fmt.Sprintf(`int 0; byte "%v"; int 1; app_local_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen+1)), ep, "key too long")
+
+ testApp(t, fmt.Sprintf(`int 0; byte "%v"; int 1; app_local_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen)), ep)
+
+ testApp(t, fmt.Sprintf(`byte "foo"; byte "%v"; app_global_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen+1)), ep, "value too long for key")
+
+ testApp(t, fmt.Sprintf(`byte "foo"; byte "%v"; app_global_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen)), ep)
+
+ testApp(t, fmt.Sprintf(`int 0; byte "foo"; byte "%v"; app_local_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen+1)), ep, "value too long for key")
+
+ testApp(t, fmt.Sprintf(`int 0; byte "foo"; byte "%v"; app_local_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen)), ep)
+
+ ep.Proto.MaxAppSumKeyValueLens = 2 // Override to generate error.
+ testApp(t, `byte "foo"; byte "foo"; app_global_put; int 1`, ep, "key/value total too long for key")
+
+ testApp(t, `int 0; byte "foo"; byte "foo"; app_local_put; int 1`, ep, "key/value total too long for key")
+}
+
func TestAppGlobalReadWriteDeleteErrors(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1635,11 +1716,10 @@ int 0x77
==
&&
`
- txn := makeSampleTxn()
- txn.Txn.ApplicationID = 100
+ txn := makeSampleAppl(100)
txn.Txn.ForeignApps = []basics.AppIndex{txn.Txn.ApplicationID}
- ep := defaultEvalParams(&txn)
- ledger := MakeLedger(
+ ep := defaultEvalParams(txn)
+ ledger := NewLedger(
map[basics.Address]uint64{
txn.Txn.Sender: 1,
},
@@ -1774,24 +1854,19 @@ ok2:
byte "myval"
==
`
- txn := makeSampleTxn()
- txn.Txn.ApplicationID = 100
- txn.Txn.ForeignApps = []basics.AppIndex{txn.Txn.ApplicationID, 101}
- ep := defaultEvalParams(&txn)
- ledger := MakeLedger(
- map[basics.Address]uint64{
- txn.Txn.Sender: 1,
- },
- )
- ep.Ledger = ledger
- ledger.NewApp(txn.Txn.Sender, 100, basics.AppParams{})
+
+ ep, txn, ledger := makeSampleEnv()
+ txn.ApplicationID = 100
+ txn.ForeignApps = []basics.AppIndex{txn.ApplicationID, 101}
+ ledger.NewAccount(txn.Sender, 1)
+ ledger.NewApp(txn.Sender, 100, basics.AppParams{})
delta := testApp(t, source, ep, "no such app")
require.Empty(t, delta.GlobalDelta)
require.Empty(t, delta.LocalDeltas)
- ledger.NewApp(txn.Txn.Receiver, 101, basics.AppParams{})
- ledger.NewApp(txn.Txn.Receiver, 100, basics.AppParams{}) // this keeps current app id = 100
+ ledger.NewApp(txn.Receiver, 101, basics.AppParams{})
+ ledger.NewApp(txn.Receiver, 100, basics.AppParams{}) // this keeps current app id = 100
algoValue := basics.TealValue{Type: basics.TealBytesType, Bytes: "myval"}
ledger.NewGlobal(101, "mykey", algoValue)
@@ -1820,14 +1895,10 @@ app_global_get
int 7
==
`
- txn := makeSampleTxn()
- txn.Txn.ApplicationID = 100
- ep := defaultEvalParams(&txn)
- ledger := MakeLedger(
- map[basics.Address]uint64{
- txn.Txn.Sender: 1,
- },
- )
+ txn := makeSampleAppl(100)
+ ep := defaultEvalParams(txn)
+ ledger := NewLedger(nil)
+ ledger.NewAccount(txn.Txn.Sender, 1)
ep.Ledger = ledger
ledger.NewApp(txn.Txn.Sender, 100, basics.AppParams{})
@@ -1837,7 +1908,6 @@ int 7
func TestAppGlobalDelete(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
// check write/delete/read
@@ -1866,16 +1936,10 @@ err
ok:
int 1
`
- txn := makeSampleTxn()
- txn.Txn.ApplicationID = 100
- ep := defaultEvalParams(&txn)
- ledger := MakeLedger(
- map[basics.Address]uint64{
- txn.Txn.Sender: 1,
- },
- )
- ep.Ledger = ledger
- ledger.NewApp(txn.Txn.Sender, 100, basics.AppParams{})
+ ep, txn, ledger := makeSampleEnv()
+ ledger.NewAccount(txn.Sender, 1)
+ txn.ApplicationID = 100
+ ledger.NewApp(txn.Sender, 100, basics.AppParams{})
delta := testApp(t, source, ep)
require.Len(t, delta.GlobalDelta, 2)
@@ -1896,7 +1960,7 @@ byte 0x414c474f
app_global_get_ex
== // two zeros
`
- ep.TxnGroup[0].Txn.ForeignApps = []basics.AppIndex{txn.Txn.ApplicationID}
+ txn.ForeignApps = []basics.AppIndex{txn.ApplicationID}
delta = testApp(t, source, ep)
require.Len(t, delta.GlobalDelta, 1)
vd := delta.GlobalDelta["ALGO"]
@@ -1997,7 +2061,6 @@ int 1
func TestAppLocalDelete(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
// check write/delete/read
@@ -2032,10 +2095,9 @@ err
ok:
int 1
`
- txn := makeSampleTxn()
- txn.Txn.ApplicationID = 100
- ep := defaultEvalParams(&txn)
- ledger := MakeLedger(
+ txn := makeSampleAppl(100)
+ ep := defaultEvalParams(txn)
+ ledger := NewLedger(
map[basics.Address]uint64{
txn.Txn.Sender: 1,
},
@@ -2195,6 +2257,7 @@ int 1
func TestEnumFieldErrors(t *testing.T) {
partitiontest.PartitionTest(t)
+ // t.Parallel() NO! manipulates globalFieldSpecs
source := `txn Amount`
origSpec := txnFieldSpecs[Amount]
@@ -2205,8 +2268,8 @@ func TestEnumFieldErrors(t *testing.T) {
txnFieldSpecs[Amount] = origSpec
}()
- testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil), "Amount expected field type is []byte but got uint64")
- testApp(t, source, defaultEvalParams(nil), "Amount expected field type is []byte but got uint64")
+ testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(), "Amount expected field type is []byte but got uint64")
+ testApp(t, source, defaultEvalParams(), "Amount expected field type is []byte but got uint64")
source = `global MinTxnFee`
@@ -2218,8 +2281,8 @@ func TestEnumFieldErrors(t *testing.T) {
globalFieldSpecs[MinTxnFee] = origMinTxnFs
}()
- testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil), "MinTxnFee expected field type is []byte but got uint64")
- testApp(t, source, defaultEvalParams(nil), "MinTxnFee expected field type is []byte but got uint64")
+ testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(), "MinTxnFee expected field type is []byte but got uint64")
+ testApp(t, source, defaultEvalParams(), "MinTxnFee expected field type is []byte but got uint64")
ep, tx, ledger := makeSampleEnv()
ledger.NewAccount(tx.Sender, 1)
@@ -2269,13 +2332,13 @@ assert
func TestReturnTypes(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
// Ensure all opcodes return values they are supposed to according to the OpSpecs table
- t.Parallel()
typeToArg := map[StackType]string{
StackUint64: "int 1\n",
StackAny: "int 1\n",
- StackBytes: "byte 0x33343536\n",
+ StackBytes: "byte 0x33343536\n", // Which is the string "3456"
}
ep, tx, ledger := makeSampleEnv()
@@ -2283,6 +2346,9 @@ func TestReturnTypes(t *testing.T) {
tx.ApplicationID = 1
tx.ForeignApps = []basics.AppIndex{tx.ApplicationID}
tx.ForeignAssets = []basics.AssetIndex{basics.AssetIndex(1), basics.AssetIndex(1)}
+ tx.Boxes = []transactions.BoxRef{{
+ Name: []byte("3456"),
+ }}
ep.TxnGroup[0].Lsig.Args = [][]byte{
[]byte("aoeu"),
[]byte("aoeu"),
@@ -2318,7 +2384,7 @@ func TestReturnTypes(t *testing.T) {
ledger.NewAccount(appAddr(1), 1000000)
// We try to form a snippet that will test every opcode, by sandwiching it
- // between arguments that correspond to the opcodes input types, and then
+ // between arguments that correspond to the opcode's input types, and then
// check to see if the proper output types end up on the stack. But many
// opcodes require more specific inputs than a constant string or the number
// 1 for ints. Defaults are also supplied for immediate arguments. For
@@ -2377,6 +2443,9 @@ func TestReturnTypes(t *testing.T) {
"proto": "callsub p; p: proto 0 3",
"bury": ": int 1; int 2; int 3; bury 2; pop; pop;",
+
+ "box_create": "int 9; +; box_create", // make the size match the 10 in CreateBox
+ "box_put": "byte 0x010203040506; concat; box_put", // make the 4 byte arg into a 10
}
/* Make sure the specialCmd tests the opcode in question */
@@ -2399,12 +2468,12 @@ func TestReturnTypes(t *testing.T) {
"vrf_verify": true,
+ "frame_dig": true, // would need a "proto" subroutine
+ "frame_bury": true, // would need a "proto" subroutine
+
"bn256_add": true,
"bn256_scalar_mul": true,
"bn256_pairing": true,
-
- "frame_dig": true, // would need a "proto" subroutine
- "frame_bury": true, // would need a "proto" subroutine
}
byName := OpsByName[LogicVersion]
@@ -2462,6 +2531,9 @@ func TestReturnTypes(t *testing.T) {
ep.reset() // for Trace and budget isolation
ep.pastScratch[0] = &scratchSpace{} // for gload
+ // these allows the box_* opcodes that to work
+ ledger.CreateBox(1, "3456", 10)
+ ep.ioBudget = 50
cx := EvalContext{
EvalParams: ep,
@@ -2554,8 +2626,8 @@ func TestBlockSeed(t *testing.T) {
// makeSampleEnv creates txns with fv, lv that don't actually fit the round
// in l. Nothing in most tests cares. But the rule for `block` is related
// to lv and fv, so we set the fv,lv more realistically.
- txn.FirstValid = l.round() - 10
- txn.LastValid = l.round() + 10
+ txn.FirstValid = l.Round() - 10
+ txn.LastValid = l.Round() + 10
// Keep in mind that proto.MaxTxnLife is 1500 in the test proto
@@ -2622,7 +2694,7 @@ func TestPooledAppCallsVerifyOp(t *testing.T) {
pop
int 1`
- ledger := MakeLedger(nil)
+ ledger := NewLedger(nil)
call := transactions.SignedTxn{Txn: transactions.Transaction{Type: protocol.ApplicationCallTx}}
// Simulate test with 2 grouped txn
testApps(t, []string{source, ""}, []transactions.SignedTxn{call, call}, LogicVersion, ledger,
@@ -2658,8 +2730,8 @@ func TestAppInfo(t *testing.T) {
func TestBudget(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
- ep := defaultEvalParams(nil)
source := `
global OpcodeBudget
int 699
@@ -2669,11 +2741,12 @@ global OpcodeBudget
int 695
==
`
- testApp(t, source, ep)
+ testApp(t, source, defaultEvalParams())
}
func TestSelfMutate(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
ep, _, ledger := makeSampleEnv()
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index 130167bf0..9fa175373 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -47,12 +47,15 @@ func makeTestProto() *config.ConsensusParams {
func makeTestProtoV(version uint64) *config.ConsensusParams {
return &config.ConsensusParams{
- LogicSigVersion: version,
- LogicSigMaxCost: 20000,
- Application: version >= appsEnabledVersion,
- MaxAppProgramCost: 700,
- MaxAppKeyLen: 64,
- MaxAppBytesValueLen: 64,
+ LogicSigVersion: version,
+ LogicSigMaxCost: 20000,
+ Application: version >= appsEnabledVersion,
+ MaxAppProgramCost: 700,
+
+ MaxAppKeyLen: 64,
+ MaxAppBytesValueLen: 64,
+ MaxAppSumKeyValueLens: 128,
+
// These must be identical to keep an old backward compat test working
MinTxnFee: 1001,
MinBalance: 1001,
@@ -104,15 +107,18 @@ func makeTestProtoV(version uint64) *config.ConsensusParams {
SupportBecomeNonParticipatingTransactions: true,
UnifyInnerTxIDs: true,
+
+ MaxBoxSize: 1000,
+ BytesPerBoxReference: 100,
}
}
-func defaultEvalParams(txn *transactions.SignedTxn) *EvalParams {
- return defaultEvalParamsWithVersion(txn, LogicVersion)
+func defaultEvalParams(txns ...transactions.SignedTxn) *EvalParams {
+ return defaultEvalParamsWithVersion(LogicVersion, txns...)
}
-func benchmarkEvalParams(txn *transactions.SignedTxn) *EvalParams {
- ep := defaultEvalParamsWithVersion(txn, LogicVersion)
+func benchmarkEvalParams(txn transactions.SignedTxn) *EvalParams {
+ ep := defaultEvalParams(txn)
ep.Trace = nil // Tracing would slow down benchmarks
clone := *ep.Proto
bigBudget := 1000 * 1000 * 1000 // Allow long run times
@@ -123,23 +129,28 @@ func benchmarkEvalParams(txn *transactions.SignedTxn) *EvalParams {
return ep
}
-func defaultEvalParamsWithVersion(txn *transactions.SignedTxn, version uint64) *EvalParams {
- var zero uint64
- ep := &EvalParams{
- Proto: makeTestProtoV(version),
- TxnGroup: make([]transactions.SignedTxnWithAD, 1),
- Specials: &transactions.SpecialAddresses{},
- Trace: &strings.Builder{},
- FeeCredit: &zero,
- SigLedger: MakeLedger(nil),
+func defaultEvalParamsWithVersion(version uint64, txns ...transactions.SignedTxn) *EvalParams {
+ empty := false
+ if len(txns) == 0 {
+ empty = true
+ txns = []transactions.SignedTxn{{Txn: transactions.Transaction{Type: protocol.ApplicationCallTx}}}
}
- if txn != nil {
- ep.TxnGroup[0].SignedTxn = *txn
+ ep := NewEvalParams(transactions.WrapSignedTxnsWithAD(txns), makeTestProtoV(version), &transactions.SpecialAddresses{})
+ ep.Trace = &strings.Builder{}
+ ep.SigLedger = NewLedger(nil)
+ if empty {
+ // We made an app type in order to get a full ep, but that sets MinTealVersion=2
+ ep.TxnGroup[0].Txn.Type = "" // set it back
+ ep.MinAvmVersion = nil // will recalculate in eval()
}
- ep.reset()
return ep
}
+// `supportsAppEval` is test helper method for disambiguating whe `EvalParams` is suitable for logicsig vs app evaluations.
+func (ep *EvalParams) supportsAppEval() bool {
+ return ep.available != nil
+}
+
// reset puts an ep back into its original state. This is in *_test.go because
// no real code should ever need this. EvalParams should be created to evaluate
// a group, and then thrown away.
@@ -156,9 +167,23 @@ func (ep *EvalParams) reset() {
for i := range ep.TxnGroup {
ep.TxnGroup[i].ApplyData = transactions.ApplyData{}
}
- ep.created = &resources{}
+ if ep.available != nil {
+ ep.available.apps = nil
+ ep.available.asas = nil
+ // reinitialize boxes because evaluation can add box refs for app creates.
+ available := NewEvalParams(ep.TxnGroup, ep.Proto, ep.Specials).available
+ if available != nil {
+ ep.available.boxes = available.boxes
+ }
+ ep.available.dirtyBytes = 0
+ }
+ ep.readBudgetChecked = false
ep.appAddrCache = make(map[basics.AppIndex]basics.Address)
- ep.Trace = &strings.Builder{}
+ if ep.Trace != nil {
+ ep.Trace = &strings.Builder{}
+ }
+ ep.txidCache = nil
+ ep.innerTxidCache = nil
}
func TestTooManyArgs(t *testing.T) {
@@ -172,7 +197,7 @@ func TestTooManyArgs(t *testing.T) {
txn.Lsig.Logic = ops.Program
args := [transactions.EvalMaxArgs + 1][]byte{}
txn.Lsig.Args = args[:]
- pass, err := EvalSignature(0, defaultEvalParams(&txn))
+ pass, err := EvalSignature(0, defaultEvalParams(txn))
require.Error(t, err)
require.False(t, pass)
})
@@ -183,7 +208,7 @@ func TestEmptyProgram(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testLogicBytes(t, nil, defaultEvalParams(nil), "invalid", "invalid program (empty)")
+ testLogicBytes(t, nil, defaultEvalParams(), "invalid", "invalid program (empty)")
}
// TestMinAvmVersionParamEval tests eval/check reading the MinAvmVersion from the param
@@ -191,7 +216,7 @@ func TestMinAvmVersionParamEvalCheckSignature(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- params := defaultEvalParams(nil)
+ params := defaultEvalParams()
version2 := uint64(rekeyingEnabledVersion)
params.MinAvmVersion = &version2
program := make([]byte, binary.MaxVarintLen64)
@@ -314,7 +339,7 @@ func TestWrongProtoVersion(t *testing.T) {
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, "int 1", v)
- ep := defaultEvalParamsWithVersion(nil, 0)
+ ep := defaultEvalParamsWithVersion(0)
testAppBytes(t, ops.Program, ep, "LogicSig not supported", "LogicSig not supported")
})
}
@@ -361,7 +386,7 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E=
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
txn.Lsig.Args = [][]byte{[]byte("=0\x97S\x85H\xe9\x91B\xfd\xdb;1\xf5Z\xaec?\xae\xf2I\x93\x08\x12\x94\xaa~\x06\x08\x849b")}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
err := CheckSignature(0, ep)
require.NoError(t, err)
pass, err := EvalSignature(0, ep)
@@ -422,7 +447,7 @@ func TestTLHC(t *testing.T) {
txn.Lsig.Args = [][]byte{secret}
txn.Txn.FirstValid = 999999
block := bookkeeping.Block{}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
err := CheckSignature(0, ep)
if err != nil {
t.Log(hex.EncodeToString(ops.Program))
@@ -439,7 +464,7 @@ func TestTLHC(t *testing.T) {
txn.Txn.Receiver = a2
txn.Txn.CloseRemainderTo = a2
- ep = defaultEvalParams(&txn)
+ ep = defaultEvalParams(txn)
pass, err = EvalSignature(0, ep)
if !pass {
t.Log(hex.EncodeToString(ops.Program))
@@ -451,7 +476,7 @@ func TestTLHC(t *testing.T) {
txn.Txn.Receiver = a2
txn.Txn.CloseRemainderTo = a2
txn.Txn.FirstValid = 1
- ep = defaultEvalParams(&txn)
+ ep = defaultEvalParams(txn)
pass, err = EvalSignature(0, ep)
if pass {
t.Log(hex.EncodeToString(ops.Program))
@@ -463,7 +488,7 @@ func TestTLHC(t *testing.T) {
txn.Txn.Receiver = a1
txn.Txn.CloseRemainderTo = a1
txn.Txn.FirstValid = 999999
- ep = defaultEvalParams(&txn)
+ ep = defaultEvalParams(txn)
pass, err = EvalSignature(0, ep)
if !pass {
t.Log(hex.EncodeToString(ops.Program))
@@ -475,7 +500,7 @@ func TestTLHC(t *testing.T) {
// wrong answer
txn.Lsig.Args = [][]byte{[]byte("=0\x97S\x85H\xe9\x91B\xfd\xdb;1\xf5Z\xaec?\xae\xf2I\x93\x08\x12\x94\xaa~\x06\x08\x849a")}
block.BlockHeader.Round = 1
- ep = defaultEvalParams(&txn)
+ ep = defaultEvalParams(txn)
pass, err = EvalSignature(0, ep)
if pass {
t.Log(hex.EncodeToString(ops.Program))
@@ -489,22 +514,22 @@ func TestTLHC(t *testing.T) {
func TestU64Math(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
testAccepts(t, "int 0x1234567812345678; int 0x100000000; /; int 0x12345678; ==", 1)
}
func TestItob(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
testAccepts(t, "byte 0x1234567812345678; int 0x1234567812345678; itob; ==", 1)
}
func TestBtoi(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
testAccepts(t, "int 0x1234567812345678; byte 0x1234567812345678; btoi; ==", 1)
testAccepts(t, "int 0x34567812345678; byte 0x34567812345678; btoi; ==", 1)
testAccepts(t, "int 0x567812345678; byte 0x567812345678; btoi; ==", 1)
@@ -913,7 +938,7 @@ func TestTxnBadField(t *testing.T) {
t.Parallel()
program := []byte{0x01, 0x31, 0x7f}
- testLogicBytes(t, program, defaultEvalParams(nil), "invalid txn field")
+ testLogicBytes(t, program, defaultEvalParams(), "invalid txn field")
// TODO: Check should know the type stack was wrong
// test txn does not accept ApplicationArgs and Accounts
@@ -926,7 +951,7 @@ func TestTxnBadField(t *testing.T) {
ops := testProg(t, source, AssemblerMaxVersion)
require.Equal(t, txnaOpcode, ops.Program[1])
ops.Program[1] = txnOpcode
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "invalid txn field")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), fmt.Sprintf("invalid txn field %s", field))
}
}
@@ -935,7 +960,7 @@ func TestGtxnBadIndex(t *testing.T) {
t.Parallel()
program := []byte{0x01, 0x33, 0x1, 0x01}
- testLogicBytes(t, program, defaultEvalParams(nil), "txn index 1")
+ testLogicBytes(t, program, defaultEvalParams(), "txn index 1")
}
func TestGtxnBadField(t *testing.T) {
@@ -944,7 +969,7 @@ func TestGtxnBadField(t *testing.T) {
t.Parallel()
program := []byte{0x01, 0x33, 0x0, 127}
// TODO: Check should know the type stack was wrong
- testLogicBytes(t, program, defaultEvalParams(nil), "invalid txn field TxnField(127)")
+ testLogicBytes(t, program, defaultEvalParams(), "invalid txn field TxnField(127)")
// test gtxn does not accept ApplicationArgs and Accounts
txnOpcode := OpsByName[LogicVersion]["txn"].Opcode
@@ -956,7 +981,7 @@ func TestGtxnBadField(t *testing.T) {
ops := testProg(t, source, AssemblerMaxVersion)
require.Equal(t, txnaOpcode, ops.Program[1])
ops.Program[1] = txnOpcode
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "invalid txn field")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), fmt.Sprintf("invalid txn field %s", field))
}
}
@@ -965,7 +990,7 @@ func TestGlobalBadField(t *testing.T) {
t.Parallel()
program := []byte{0x01, 0x32, 127}
- testLogicBytes(t, program, defaultEvalParams(nil), "invalid global field")
+ testLogicBytes(t, program, defaultEvalParams(), "invalid global field")
}
func TestArg(t *testing.T) {
@@ -988,7 +1013,7 @@ func TestArg(t *testing.T) {
[]byte("aoeu4"),
}
ops := testProg(t, source, v)
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn))
})
}
}
@@ -1080,6 +1105,10 @@ const globalV8TestProgram = globalV7TestProgram + `
// No new globals in v8
`
+const globalV9TestProgram = globalV8TestProgram + `
+// No new globals in v9
+`
+
func TestGlobal(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1099,12 +1128,13 @@ func TestGlobal(t *testing.T) {
6: {CallerApplicationAddress, globalV6TestProgram},
7: {CallerApplicationAddress, globalV7TestProgram},
8: {CallerApplicationAddress, globalV8TestProgram},
+ 9: {CallerApplicationAddress, globalV9TestProgram},
}
// tests keys are versions so they must be in a range 1..AssemblerMaxVersion plus zero version
require.LessOrEqual(t, len(tests), AssemblerMaxVersion+1)
require.Len(t, globalFieldSpecs, int(invalidGlobalField))
- ledger := MakeLedger(nil)
+ ledger := NewLedger(nil)
addr, err := basics.UnmarshalChecksumAddress(testAddr)
require.NoError(t, err)
ledger.NewApp(addr, 888, basics.AppParams{})
@@ -1120,10 +1150,14 @@ func TestGlobal(t *testing.T) {
}
}
- txn := transactions.SignedTxn{}
- txn.Txn.Group = crypto.Digest{0x07, 0x06}
+ appcall := transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ },
+ }
+ appcall.Txn.Group = crypto.Digest{0x07, 0x06}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(appcall)
ep.Ledger = ledger
testApp(t, tests[v].program, ep)
})
@@ -1168,11 +1202,11 @@ int %s
txn := transactions.SignedTxn{}
txn.Txn.Type = tt
if v < appsEnabledVersion && tt == protocol.ApplicationCallTx {
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn),
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn),
"program version must be", "program version must be")
return
}
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn))
})
}
})
@@ -1266,13 +1300,25 @@ txn VoteKeyDilution
int 1
==
&&
+
txn Type
-byte 0x706179
+byte "pay"
+==
+txn Type
+byte "appl"
==
+||
+
&&
+
txn TypeEnum
int 1
==
+txn TypeEnum
+int 6
+==
+||
+
&&
txn XferAsset
int 10
@@ -1576,6 +1622,14 @@ int 1
`
const testTxnProgramTextV8 = testTxnProgramTextV7 + `
+assert
+// though box refs introduced in v8, they are not exposed to AVM (yet?)
+int 1
+`
+
+const testTxnProgramTextV9 = testTxnProgramTextV8 + `
+assert
+int 1
`
func makeSampleTxn() transactions.SignedTxn {
@@ -1642,11 +1696,19 @@ func makeSampleTxn() transactions.SignedTxn {
txn.Txn.AssetFrozen = true
txn.Txn.ForeignAssets = []basics.AssetIndex{55, 77}
txn.Txn.ForeignApps = []basics.AppIndex{56, 100, 111} // 100 must be 2nd, 111 must be present
+ txn.Txn.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("self")}, {Index: 0, Name: []byte("other")}}
txn.Txn.GlobalStateSchema = basics.StateSchema{NumUint: 3, NumByteSlice: 0}
txn.Txn.LocalStateSchema = basics.StateSchema{NumUint: 1, NumByteSlice: 2}
return txn
}
+func makeSampleAppl(app basics.AppIndex) transactions.SignedTxn {
+ sample := makeSampleTxn()
+ sample.Txn.Type = protocol.ApplicationCallTx
+ sample.Txn.ApplicationID = app
+ return sample
+}
+
// makeSampleTxnGroup creates a sample txn group. If less than two transactions
// are supplied, samples are used.
func makeSampleTxnGroup(txns ...transactions.SignedTxn) []transactions.SignedTxn {
@@ -1681,6 +1743,7 @@ func TestTxn(t *testing.T) {
6: testTxnProgramTextV6,
7: testTxnProgramTextV7,
8: testTxnProgramTextV8,
+ 9: testTxnProgramTextV9,
}
for i, txnField := range TxnFieldNames {
@@ -1706,6 +1769,9 @@ func TestTxn(t *testing.T) {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, source, v)
txn := makeSampleTxn()
+ if v >= appsEnabledVersion {
+ txn.Txn.Type = protocol.ApplicationCallTx
+ }
txn.Txn.ApprovalProgram = ops.Program
txn.Txn.ClearStateProgram = clearOps.Program
txn.Lsig.Logic = ops.Program
@@ -1730,9 +1796,8 @@ func TestTxn(t *testing.T) {
programHash[:],
clearProgramHash[:],
}
- // Since we test GroupIndex ==3, we need to fake up such a group
- ep := defaultEvalParams(nil)
- ep.TxnGroup = transactions.WrapSignedTxnsWithAD([]transactions.SignedTxn{txn, txn, txn, txn})
+ // Since we test GroupIndex ==3, we need a larger group
+ ep := defaultEvalParams(txn, txn, txn, txn)
ep.TxnGroup[2].EvalDelta.Logs = []string{"x", "prefilled"}
if v < txnEffectsVersion {
testLogicFull(t, ops.Program, 3, ep)
@@ -1816,16 +1881,12 @@ func TestGaid(t *testing.T) {
t.Parallel()
check0 := testProg(t, "gaid 0; int 100; ==", 4)
- txn := makeSampleTxn()
- txn.Txn.Type = protocol.ApplicationCallTx
- txgroup := make([]transactions.SignedTxn, 3)
- txgroup[1] = txn
+ appTxn := makeSampleTxn()
+ appTxn.Txn.Type = protocol.ApplicationCallTx
targetTxn := makeSampleTxn()
targetTxn.Txn.Type = protocol.AssetConfigTx
- txgroup[0] = targetTxn
- ep := defaultEvalParams(nil)
- ep.TxnGroup = transactions.WrapSignedTxnsWithAD(txgroup)
- ep.Ledger = MakeLedger(nil)
+ ep := defaultEvalParams(targetTxn, appTxn, makeSampleTxn())
+ ep.Ledger = NewLedger(nil)
// should fail when no creatable was created
_, err := EvalApp(check0.Program, 1, 888, ep)
@@ -1982,8 +2043,7 @@ gtxn 0 Sender
txn.Txn.SelectionPK[:],
txn.Txn.Note,
}
- ep := defaultEvalParams(&txn)
- ep.TxnGroup = transactions.WrapSignedTxnsWithAD(makeSampleTxnGroup(txn))
+ ep := defaultEvalParams(makeSampleTxnGroup(txn)...)
testLogic(t, source, v, ep)
if v >= 3 {
gtxnsProg := strings.ReplaceAll(source, "gtxn 0", "int 0; gtxns")
@@ -2072,7 +2132,7 @@ txna ApplicationArgs 0
txn.Txn.Accounts = make([]basics.Address, 1)
txn.Txn.Accounts[0] = txn.Txn.Sender
txn.Txn.ApplicationArgs = [][]byte{txn.Txn.Sender[:]}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
testLogicBytes(t, ops.Program, ep)
// modify txn field
@@ -2106,7 +2166,7 @@ txn Sender
ops2 := testProg(t, source, AssemblerMaxVersion)
var txn2 transactions.SignedTxn
copy(txn2.Txn.Sender[:], []byte("aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"))
- ep2 := defaultEvalParams(&txn2)
+ ep2 := defaultEvalParams(txn2)
testLogicBytes(t, ops2.Program, ep2)
// check gtxna
@@ -2147,7 +2207,7 @@ txn Sender
ops3 := testProg(t, source, AssemblerMaxVersion)
var txn3 transactions.SignedTxn
copy(txn2.Txn.Sender[:], []byte("aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"))
- ep3 := defaultEvalParams(&txn3)
+ ep3 := defaultEvalParams(txn3)
testLogicBytes(t, ops3.Program, ep3)
}
@@ -2166,10 +2226,10 @@ int 0
var txn transactions.SignedTxn
txn.Txn.ApplicationArgs = make([][]byte, 1)
txn.Txn.ApplicationArgs[0] = []byte("")
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn))
txn.Txn.ApplicationArgs[0] = nil
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn))
source2 := `txna Accounts 1
global ZeroAddress
@@ -2180,10 +2240,10 @@ global ZeroAddress
var txn2 transactions.SignedTxn
txn2.Txn.Accounts = make([]basics.Address, 1)
txn2.Txn.Accounts[0] = basics.Address{}
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn2))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn2))
txn2.Txn.Accounts = make([]basics.Address, 1)
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn2))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn2))
}
func TestTxnBigPrograms(t *testing.T) {
@@ -2209,14 +2269,14 @@ int 1
for i := range txn.Txn.ApprovalProgram {
txn.Txn.ApprovalProgram[i] = byte(i % 7)
}
- testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(&txn))
+ testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(txn))
- testLogic(t, `txna ApprovalProgramPages 2`, AssemblerMaxVersion, defaultEvalParams(&txn),
+ testLogic(t, `txna ApprovalProgramPages 2`, AssemblerMaxVersion, defaultEvalParams(txn),
"invalid ApprovalProgramPages index")
// ClearStateProgram is not in the txn at all
- testLogic(t, `txn NumClearStateProgramPages; !`, AssemblerMaxVersion, defaultEvalParams(&txn))
- testLogic(t, `txna ClearStateProgramPages 0`, AssemblerMaxVersion, defaultEvalParams(&txn),
+ testLogic(t, `txn NumClearStateProgramPages; !`, AssemblerMaxVersion, defaultEvalParams(txn))
+ testLogic(t, `txna ClearStateProgramPages 0`, AssemblerMaxVersion, defaultEvalParams(txn),
"invalid ClearStateProgramPages index")
}
@@ -2236,7 +2296,7 @@ txnas ApplicationArgs
txn.Txn.Accounts = make([]basics.Address, 1)
txn.Txn.Accounts[0] = txn.Txn.Sender
txn.Txn.ApplicationArgs = [][]byte{txn.Txn.Sender[:]}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
testLogicBytes(t, ops.Program, ep)
// check special case: Account 0 == Sender
@@ -2249,7 +2309,7 @@ txn Sender
ops = testProg(t, source, AssemblerMaxVersion)
var txn2 transactions.SignedTxn
copy(txn2.Txn.Sender[:], []byte("aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"))
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn2))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn2))
// check gtxnas
source = `int 1
@@ -2269,7 +2329,7 @@ txn Sender
ops = testProg(t, source, AssemblerMaxVersion)
var txn3 transactions.SignedTxn
copy(txn3.Txn.Sender[:], []byte("aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"))
- testLogicBytes(t, ops.Program, defaultEvalParams(&txn3))
+ testLogicBytes(t, ops.Program, defaultEvalParams(txn3))
// check gtxnsas
source = `int 0
@@ -2301,8 +2361,8 @@ int 0x310
func TestStringOps(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
testAccepts(t, `byte 0x123456789abc
substring 1 3
byte 0x3456
@@ -2398,6 +2458,7 @@ len`, 2)
func TestExtractOp(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
+
testAccepts(t, "byte 0x123456789abc; extract 1 2; byte 0x3456; ==", 5)
testAccepts(t, "byte 0x123456789abc; extract 0 6; byte 0x123456789abc; ==", 5)
testAccepts(t, "byte 0x123456789abc; extract 3 0; byte 0x789abc; ==", 5)
@@ -2438,41 +2499,41 @@ func TestExtractFlop(t *testing.T) {
err := testPanics(t, `byte 0xf000000000000000
extract 1 8
len`, 5)
- require.Contains(t, err.Error(), "extract range beyond length of string")
+ require.Contains(t, err.Error(), "extraction end 9")
err = testPanics(t, `byte 0xf000000000000000
extract 9 0
len`, 5)
- require.Contains(t, err.Error(), "extract range beyond length of string")
+ require.Contains(t, err.Error(), "extraction start 9")
err = testPanics(t, `byte 0xf000000000000000
int 4
int 0xFFFFFFFFFFFFFFFE
extract3
len`, 5)
- require.Contains(t, err.Error(), "extract range beyond length of string")
+ require.Contains(t, err.Error(), "extraction end exceeds uint64")
err = testPanics(t, `byte 0xf000000000000000
int 100
int 2
extract3
len`, 5)
- require.Contains(t, err.Error(), "extract range beyond length of string")
+ require.Contains(t, err.Error(), "extraction start 100")
err = testPanics(t, `byte 0xf000000000000000
int 55
extract_uint16`, 5)
- require.Contains(t, err.Error(), "extract range beyond length of string")
+ require.Contains(t, err.Error(), "extraction start 55")
err = testPanics(t, `byte 0xf000000000000000
int 9
extract_uint32`, 5)
- require.Contains(t, err.Error(), "extract range beyond length of string")
+ require.Contains(t, err.Error(), "extraction start 9")
err = testPanics(t, `byte 0xf000000000000000
int 1
extract_uint64`, 5)
- require.Contains(t, err.Error(), "extract range beyond length of string")
+ require.Contains(t, err.Error(), "extraction end 9")
}
func TestReplace(t *testing.T) {
@@ -2506,8 +2567,8 @@ func TestReplace(t *testing.T) {
func TestLoadStore(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
testAccepts(t, "load 3; int 0; ==;", 1)
testAccepts(t, `int 37
@@ -2576,7 +2637,6 @@ int 5
func TestGload(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
// for simple app-call-only transaction groups
@@ -2588,48 +2648,22 @@ func TestGload(t *testing.T) {
simpleCase := scratchTestCase{
tealSources: []string{
- `
-int 2
-store 0
-int 1`,
- `
-gload 0 0
-int 2
-==
-`,
+ `int 2; store 0; int 1`,
+ `gload 0 0; int 2; ==`,
},
}
multipleTxnCase := scratchTestCase{
tealSources: []string{
- `
-byte "txn 1"
-store 0
-int 1`,
- `
-byte "txn 2"
-store 2
-int 1`,
- `
-gload 0 0
-byte "txn 1"
-==
-gload 1 2
-byte "txn 2"
-==
-&&
-`,
+ `byte "txn 1"; store 0; int 1`,
+ `byte "txn 2"; store 2; int 1`,
+ `gload 0 0; byte "txn 1"; ==; gload 1 2; byte "txn 2"; ==; &&`,
},
}
selfCase := scratchTestCase{
tealSources: []string{
- `
-gload 0 0
-int 2
-store 0
-int 1
-`,
+ `gload 0 0; int 2; store 0; int 1`,
},
errTxn: 0,
errContains: "can't use gload on self, use load instead",
@@ -2637,14 +2671,8 @@ int 1
laterTxnSlotCase := scratchTestCase{
tealSources: []string{
- `
-gload 1 0
-int 2
-==`,
- `
-int 2
-store 0
-int 1`,
+ `gload 1 0; int 2; ==`,
+ `int 2; store 0; int 1`,
},
errTxn: 0,
errContains: "gload can't get future scratch space from txn with index 1",
@@ -2665,9 +2693,9 @@ int 1`,
}
if testCase.errContains != "" {
- testApps(t, sources, txgroup, LogicVersion, MakeLedger(nil), Expect{testCase.errTxn, testCase.errContains})
+ testApps(t, sources, txgroup, LogicVersion, nil, Expect{testCase.errTxn, testCase.errContains})
} else {
- testApps(t, sources, txgroup, LogicVersion, MakeLedger(nil))
+ testApps(t, sources, txgroup, LogicVersion, nil)
}
})
}
@@ -2702,20 +2730,17 @@ int 1`,
failCases := []failureCase{nonAppCall, logicSigCall}
for j, failCase := range failCases {
t.Run(fmt.Sprintf("j=%d", j), func(t *testing.T) {
- program := testProg(t, "gload 0 0", AssemblerMaxVersion).Program
- txgroup := []transactions.SignedTxnWithAD{
- {SignedTxn: failCase.firstTxn},
- {},
+ appcall := transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ },
}
- ep := &EvalParams{
- Proto: makeTestProto(),
- TxnGroup: txgroup,
- pastScratch: make([]*scratchSpace, 2),
- SigLedger: MakeLedger(nil),
- }
+ ep := defaultEvalParams(failCase.firstTxn, appcall)
+ ep.SigLedger = NewLedger(nil)
+ program := testProg(t, "gload 0 0", AssemblerMaxVersion).Program
switch failCase.runMode {
case modeApp:
testAppBytes(t, program, ep, failCase.errContains)
@@ -2774,7 +2799,7 @@ int 1
txgroup[j].Txn.Type = protocol.ApplicationCallTx
}
- testApps(t, sources, txgroup, LogicVersion, MakeLedger(nil))
+ testApps(t, sources, txgroup, LogicVersion, nil)
}
const testCompareProgramText = `int 35
@@ -2863,19 +2888,19 @@ func TestSlowLogic(t *testing.T) {
// v1overspend fails (on v1)
ops := testProg(t, v1overspend, 1)
// We should never Eval this after it fails Check(), but nice to see it also fails.
- testLogicBytes(t, ops.Program, defaultEvalParamsWithVersion(nil, 1),
+ testLogicBytes(t, ops.Program, defaultEvalParamsWithVersion(1),
"static cost", "dynamic cost")
// v2overspend passes Check, even on v2 proto, because the old low cost is "grandfathered"
ops = testProg(t, v2overspend, 1)
- testLogicBytes(t, ops.Program, defaultEvalParamsWithVersion(nil, 2))
+ testLogicBytes(t, ops.Program, defaultEvalParamsWithVersion(2))
// even the shorter, v2overspend, fails when compiled as v2 code
ops = testProg(t, v2overspend, 2)
- testLogicBytes(t, ops.Program, defaultEvalParamsWithVersion(nil, 2),
+ testLogicBytes(t, ops.Program, defaultEvalParamsWithVersion(2),
"static cost", "dynamic cost")
// in v4 cost is still 134, but only matters in Eval, not Check, so both fail there
- ep4 := defaultEvalParamsWithVersion(nil, 4)
+ ep4 := defaultEvalParamsWithVersion(4)
ops = testProg(t, v1overspend, 4)
testLogicBytes(t, ops.Program, ep4, "dynamic cost")
@@ -2900,7 +2925,7 @@ func TestStackUnderflow(t *testing.T) {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, `int 1`, v)
ops.Program = append(ops.Program, 0x08) // +
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "stack underflow")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "stack underflow")
})
}
}
@@ -2913,7 +2938,7 @@ func TestWrongStackTypeRuntime(t *testing.T) {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, `int 1`, v)
ops.Program = append(ops.Program, 0x01, 0x15) // sha256, len
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "sha256 arg 0 wanted")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "sha256 arg 0 wanted")
})
}
}
@@ -2926,7 +2951,7 @@ func TestEqMismatch(t *testing.T) {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, `byte 0x1234; int 1`, v)
ops.Program = append(ops.Program, 0x12) // ==
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "cannot compare")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "cannot compare")
// TODO: Check should know the type stack was wrong
})
}
@@ -2940,7 +2965,7 @@ func TestNeqMismatch(t *testing.T) {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, `byte 0x1234; int 1`, v)
ops.Program = append(ops.Program, 0x13) // !=
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "cannot compare")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "cannot compare")
})
}
}
@@ -2953,7 +2978,7 @@ func TestWrongStackTypeRuntime2(t *testing.T) {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, `byte 0x1234; int 1`, v)
ops.Program = append(ops.Program, 0x08) // +
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "+ arg 0 wanted")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "+ arg 0 wanted")
})
}
}
@@ -2971,7 +2996,7 @@ func TestIllegalOp(t *testing.T) {
break
}
}
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "illegal opcode", "illegal opcode")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "illegal opcode", "illegal opcode")
})
}
}
@@ -2989,7 +3014,7 @@ int 1
`, v)
// cut two last bytes - intc_1 and last byte of bnz
ops.Program = ops.Program[:len(ops.Program)-2]
- testLogicBytes(t, ops.Program, defaultEvalParams(nil),
+ testLogicBytes(t, ops.Program, defaultEvalParams(),
"bnz program ends short", "bnz program ends short")
})
}
@@ -3004,7 +3029,7 @@ intc 0
intc 0
bnz done
done:`, 2)
- testLogicBytes(t, ops.Program, defaultEvalParams(nil))
+ testLogicBytes(t, ops.Program, defaultEvalParams())
}
func TestShortBytecblock(t *testing.T) {
@@ -3019,8 +3044,8 @@ func TestShortBytecblock(t *testing.T) {
for i := 2; i < len(fullops.Program); i++ {
program := fullops.Program[:i]
t.Run(hex.EncodeToString(program), func(t *testing.T) {
- testLogicBytes(t, program, defaultEvalParams(nil),
- "bytecblock", "bytecblock")
+ testLogicBytes(t, program, defaultEvalParams(),
+ "bytes list", "bytes list")
})
}
})
@@ -3041,7 +3066,7 @@ func TestShortBytecblock2(t *testing.T) {
t.Run(src, func(t *testing.T) {
program, err := hex.DecodeString(src)
require.NoError(t, err)
- testLogicBytes(t, program, defaultEvalParams(nil), "bytecblock", "bytecblock")
+ testLogicBytes(t, program, defaultEvalParams(), "const bytes list", "const bytes list")
})
}
}
@@ -3078,7 +3103,7 @@ func TestPanic(t *testing.T) {
break
}
}
- params := defaultEvalParams(nil)
+ params := defaultEvalParams()
params.logger = log
params.TxnGroup[0].Lsig.Logic = ops.Program
err := CheckSignature(0, params)
@@ -3092,7 +3117,7 @@ func TestPanic(t *testing.T) {
}
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
- params = defaultEvalParams(&txn)
+ params = defaultEvalParams(txn)
params.logger = log
pass, err := EvalSignature(0, params)
if pass {
@@ -3118,7 +3143,7 @@ func TestProgramTooNew(t *testing.T) {
t.Parallel()
var program [12]byte
vlen := binary.PutUvarint(program[:], evalMaxVersion+1)
- testLogicBytes(t, program[:vlen], defaultEvalParams(nil),
+ testLogicBytes(t, program[:vlen], defaultEvalParams(),
"greater than max supported", "greater than max supported")
}
@@ -3128,7 +3153,7 @@ func TestInvalidVersion(t *testing.T) {
t.Parallel()
program, err := hex.DecodeString("ffffffffffffffffffffffff")
require.NoError(t, err)
- testLogicBytes(t, program, defaultEvalParams(nil), "invalid version", "invalid version")
+ testLogicBytes(t, program, defaultEvalParams(), "invalid version", "invalid version")
}
func TestProgramProtoForbidden(t *testing.T) {
@@ -3137,7 +3162,7 @@ func TestProgramProtoForbidden(t *testing.T) {
t.Parallel()
var program [12]byte
vlen := binary.PutUvarint(program[:], evalMaxVersion)
- ep := defaultEvalParams(nil)
+ ep := defaultEvalParams()
ep.Proto = &config.ConsensusParams{
LogicSigVersion: evalMaxVersion - 1,
}
@@ -3162,16 +3187,16 @@ int 1`, v)
require.Equal(t, ops.Program, canonicalProgramBytes)
ops.Program[7] = 3 // clobber the branch offset to be in the middle of the bytecblock
// Since Eval() doesn't know the jump is bad, we reject "by luck"
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "aligned", "REJECT")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "aligned", "REJECT")
// back branches are checked differently, so test misaligned back branch
ops.Program[6] = 0xff // Clobber the two bytes of offset with 0xff 0xff = -1
ops.Program[7] = 0xff // That jumps into the offset itself (pc + 3 -1)
if v < backBranchEnabledVersion {
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "negative branch", "negative branch")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "negative branch", "negative branch")
} else {
// Again, if we were ever to Eval(), we would not know it's wrong. But we reject here "by luck"
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "back branch target", "REJECT")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "back branch target", "REJECT")
}
})
}
@@ -3194,7 +3219,7 @@ int 1`, v)
require.NoError(t, err)
require.Equal(t, ops.Program, canonicalProgramBytes)
ops.Program[7] = 200 // clobber the branch offset to be beyond the end of the program
- testLogicBytes(t, ops.Program, defaultEvalParams(nil),
+ testLogicBytes(t, ops.Program, defaultEvalParams(),
"outside of program", "outside of program")
})
}
@@ -3218,7 +3243,7 @@ int 1`, v)
require.NoError(t, err)
require.Equal(t, ops.Program, canonicalProgramBytes)
ops.Program[6] = 0x70 // clobber hi byte of branch offset
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "outside", "outside")
+ testLogicBytes(t, ops.Program, defaultEvalParams(), "outside", "outside")
})
}
branches := []string{
@@ -3238,7 +3263,7 @@ intc_1
require.NoError(t, err)
ops.Program[7] = 0xf0 // clobber the branch offset - highly negative
ops.Program[8] = 0xff // clobber the branch offset
- testLogicBytes(t, ops.Program, defaultEvalParams(nil),
+ testLogicBytes(t, ops.Program, defaultEvalParams(),
"outside of program", "outside of program")
})
}
@@ -3526,10 +3551,10 @@ func evalLoop(b *testing.B, runs int, program []byte) {
for i := 0; i < runs; i++ {
var txn transactions.SignedTxn
txn.Lsig.Logic = program
- pass, err := EvalSignature(0, benchmarkEvalParams(&txn))
+ pass, err := EvalSignature(0, benchmarkEvalParams(txn))
if !pass {
// rerun to trace it. tracing messes up timing too much
- ep := benchmarkEvalParams(&txn)
+ ep := benchmarkEvalParams(txn)
ep.Trace = &strings.Builder{}
pass, err = EvalSignature(0, ep)
b.Log(ep.Trace.String())
@@ -3796,7 +3821,7 @@ func BenchmarkCheckx5(b *testing.B) {
for _, program := range programs {
var txn transactions.SignedTxn
txn.Lsig.Logic = program
- err := CheckSignature(0, defaultEvalParams(&txn))
+ err := CheckSignature(0, defaultEvalParams(txn))
if err != nil {
require.NoError(b, err)
}
@@ -3900,16 +3925,16 @@ pop
txn.Lsig.Logic = ops.Program
txn.Txn.ApplicationArgs = [][]byte{[]byte("test")}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
testLogicBytes(t, ops.Program, ep)
- ep = defaultEvalParamsWithVersion(&txn, 1)
+ ep = defaultEvalParamsWithVersion(1, txn)
testLogicBytes(t, ops.Program, ep,
"greater than protocol supported version 1", "greater than protocol supported version 1")
// hack the version and fail on illegal opcode
ops.Program[0] = 0x1
- ep = defaultEvalParamsWithVersion(&txn, 1)
+ ep = defaultEvalParamsWithVersion(1, txn)
testLogicBytes(t, ops.Program, ep, "illegal opcode 0x36", "illegal opcode 0x36") // txna
}
@@ -3994,7 +4019,6 @@ byte 0x // empty byte constant
func TestArgType(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
var sv stackValue
@@ -4009,14 +4033,14 @@ func TestArgType(t *testing.T) {
func TestApplicationsDisallowOldTeal(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
const source = "int 1"
txn := makeSampleTxn()
txn.Txn.Type = protocol.ApplicationCallTx
txn.Txn.RekeyTo = basics.Address{}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
for v := uint64(0); v < appsEnabledVersion; v++ {
ops := testProg(t, source, v)
@@ -4029,8 +4053,8 @@ func TestApplicationsDisallowOldTeal(t *testing.T) {
func TestAnyRekeyToOrApplicationRaisesMinAvmVersion(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
const source = "int 1"
// Construct a group of two payments, no rekeying
@@ -4069,8 +4093,7 @@ func TestAnyRekeyToOrApplicationRaisesMinAvmVersion(t *testing.T) {
for ci, cse := range cases {
t.Run(fmt.Sprintf("ci=%d", ci), func(t *testing.T) {
- ep := defaultEvalParams(nil)
- ep.TxnGroup = transactions.WrapSignedTxnsWithAD(cse.group)
+ ep := defaultEvalParams(cse.group...)
// Computed MinAvmVersion should be == validFromVersion
calc := ComputeMinAvmVersion(ep.TxnGroup)
@@ -4080,14 +4103,18 @@ func TestAnyRekeyToOrApplicationRaisesMinAvmVersion(t *testing.T) {
expected := fmt.Sprintf("program version must be >= %d", cse.validFromVersion)
for v := uint64(0); v < cse.validFromVersion; v++ {
ops := testProg(t, source, v)
- testAppBytes(t, ops.Program, ep, expected, expected)
+ if ep.supportsAppEval() {
+ testAppBytes(t, ops.Program, ep, expected, expected)
+ }
testLogicBytes(t, ops.Program, ep, expected, expected)
}
// Should succeed for all versions >= validFromVersion
for v := cse.validFromVersion; v <= AssemblerMaxVersion; v++ {
ops := testProg(t, source, v)
- testAppBytes(t, ops.Program, ep)
+ if ep.supportsAppEval() {
+ testAppBytes(t, ops.Program, ep)
+ }
testLogicBytes(t, ops.Program, ep)
}
})
@@ -4133,7 +4160,7 @@ func TestAllowedOpcodesV2(t *testing.T) {
"gtxn": true,
}
- ep := defaultEvalParams(nil)
+ ep := defaultEvalParams()
cnt := 0
for _, spec := range OpSpecs {
@@ -4186,7 +4213,7 @@ func TestAllowedOpcodesV3(t *testing.T) {
"pushbytes": `pushbytes "stringsfail?"`,
}
- ep := defaultEvalParams(nil)
+ ep := defaultEvalParams()
cnt := 0
for _, spec := range OpSpecs {
@@ -4231,7 +4258,7 @@ func TestRekeyFailsOnOldVersion(t *testing.T) {
ops := testProg(t, "int 1", v)
var txn transactions.SignedTxn
txn.Txn.RekeyTo = basics.Address{1, 2, 3, 4}
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
e := fmt.Sprintf("program version must be >= %d", rekeyingEnabledVersion)
testLogicBytes(t, ops.Program, ep, e, e)
})
@@ -4272,13 +4299,13 @@ func testEvaluation(t *testing.T, program string, introduced uint64, tester eval
t.Helper()
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
- ep := defaultEvalParamsWithVersion(&txn, lv)
+ ep := defaultEvalParamsWithVersion(lv, txn)
err := CheckSignature(0, ep)
if err != nil {
t.Log(ep.Trace.String())
}
require.NoError(t, err)
- ep = defaultEvalParamsWithVersion(&txn, lv)
+ ep = defaultEvalParamsWithVersion(lv, txn)
pass, err := EvalSignature(0, ep)
ok := tester(t, pass, err)
if !ok {
@@ -4751,9 +4778,11 @@ func TestBytesMath(t *testing.T) {
testAccepts(t, "byte 0x01; byte 0x01; b/; byte 0x01; ==", 4)
testPanics(t, "byte 0x0200; byte b64(); b/; int 1; return", 4)
testPanics(t, "byte 0x01; byte 0x00; b/; int 1; return", 4)
+ testPanics(t, "int 65; bzero; byte 0x01; b/; int 1; return", 4)
testAccepts(t, "byte 0x10; byte 0x07; b%; byte 0x02; ==; return", 4)
testPanics(t, "byte 0x01; byte 0x00; b%; int 1; return", 4)
+ testPanics(t, "int 65; bzero; byte 0x10; b%", 4)
// Even 128 byte outputs are ok
testAccepts(t, fmt.Sprintf("byte 0x%s; byte 0x%s; b*; len; int 128; ==", effs, effs), 4)
@@ -4778,6 +4807,7 @@ func TestBytesCompare(t *testing.T) {
testAccepts(t, "byte 0x10; byte 0x10; b<; !", 4)
testAccepts(t, "byte 0x10; byte 0x10; b<=", 4)
+ testPanics(t, "byte 0x10; int 65; bzero; b<=", 4)
testAccepts(t, "byte 0x10; int 64; bzero; b>", 4)
testPanics(t, "byte 0x10; int 65; bzero; b>", 4)
@@ -4786,6 +4816,7 @@ func TestBytesCompare(t *testing.T) {
testAccepts(t, "byte 0x11; byte 0x10; b>=", 4)
testAccepts(t, "byte 0x11; byte 0x0011; b>=", 4)
+ testPanics(t, "byte 0x10; int 65; bzero; b>=", 4)
testAccepts(t, "byte 0x11; byte 0x11; b==", 4)
testAccepts(t, "byte 0x0011; byte 0x11; b==", 4)
@@ -4796,6 +4827,7 @@ func TestBytesCompare(t *testing.T) {
testAccepts(t, "byte 0x11; byte 0x00; b!=", 4)
testAccepts(t, "byte 0x0011; byte 0x1100; b!=", 4)
testPanics(t, notrack("byte 0x11; int 17; b!="), 4)
+ testPanics(t, "byte 0x10; int 65; bzero; b!=", 4)
}
func TestBytesBits(t *testing.T) {
@@ -4842,9 +4874,9 @@ func TestLog(t *testing.T) {
t.Parallel()
var txn transactions.SignedTxn
txn.Txn.Type = protocol.ApplicationCallTx
- ledger := MakeLedger(nil)
+ ledger := NewLedger(nil)
ledger.NewApp(txn.Txn.Receiver, 0, basics.AppParams{})
- ep := defaultEvalParams(&txn)
+ ep := defaultEvalParams(txn)
ep.Proto = makeTestProtoV(LogicVersion)
ep.Ledger = ledger
testCases := []struct {
@@ -4964,6 +4996,7 @@ func TestPcDetails(t *testing.T) {
t.Run(fmt.Sprintf("i=%d", i), func(t *testing.T) {
ops := testProg(t, test.source, LogicVersion)
ep, _, _ := makeSampleEnv()
+ ep.Trace = &strings.Builder{}
pass, cx, err := EvalContract(ops.Program, 0, 888, ep)
require.Error(t, err)
@@ -5196,16 +5229,12 @@ func TestProtocolParseDuplicateErrMsg(t *testing.T) {
func TestOpJSONRef(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- proto := makeTestProtoV(LogicVersion)
- txn := transactions.SignedTxn{
- Txn: transactions.Transaction{
- Type: protocol.ApplicationCallTx,
- },
- }
- ledger := MakeLedger(nil)
+
+ var txn transactions.SignedTxn
+ txn.Txn.Type = protocol.ApplicationCallTx
+ ledger := NewLedger(nil)
ledger.NewApp(txn.Txn.Receiver, 0, basics.AppParams{})
- ep := defaultEvalParams(&txn)
- ep.Proto = proto
+ ep := defaultEvalParams(txn)
ep.Ledger = ledger
testCases := []struct {
source string
@@ -5691,3 +5720,203 @@ int 88
switch done1 done2; done1: ; done2: ;
`, 8)
}
+
+func TestMatch(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // take the 0th label with int cases
+ testAccepts(t, `
+int 99
+int 100
+int 99
+match zero one
+err
+zero: int 1; return
+one: int 0;
+`, 8)
+
+ // take the 0th label with bytes cases
+ testAccepts(t, `
+byte "0"
+byte "1"
+byte "0"
+match zero one
+err
+zero: int 1; return
+one: int 0;
+`, 8)
+
+ // take the 1th label with int cases
+ testRejects(t, `
+int 99
+int 100
+int 100
+match zero one
+err
+zero: int 1; return
+one: int 0;
+`, 8)
+
+ // take the 1th label with bytes cases
+ testRejects(t, `
+byte "0"
+byte "1"
+byte "1"
+match zero one
+err
+zero: int 1; return
+one: int 0;
+`, 8)
+
+ // same, but jumping to end of program
+ testAccepts(t, `
+int 1; int 99; int 100; int 100
+match zero one
+zero: err
+one:
+`, 8)
+
+ // no match
+ testAccepts(t, `
+int 99
+int 100
+int 101
+match zero one
+int 1; return // falls through to here
+zero: int 0; return
+one: int 0; return
+`, 8)
+
+ // jump forward and backward
+ testAccepts(t, `
+int 99
+start:
+int 1
++
+int 100
+int 101
+dig 2
+match start end
+err
+end:
+int 101
+==
+assert
+int 1
+`, 8)
+
+ // 0 labels are allowed, but weird!
+ testAccepts(t, `
+int 0
+match
+int 1
+`, 8)
+
+ testPanics(t, notrack("match; int 1"), 8)
+
+ // make the match the final instruction
+ testAccepts(t, `
+int 1
+int 100
+int 99
+int 100
+match done1 done2; done1: ; done2: ;
+`, 8)
+
+ // make the switch the final instruction, and don't match
+ testAccepts(t, `
+int 1
+int 1
+int 2
+int 88
+match done1 done2; done1: ; done2: ;
+`, 8)
+
+ // allow mixed types for match cases
+ testAccepts(t, `
+int 1
+int 100
+byte "101"
+byte "101"
+match done1 done2; done1: ; done2: ;
+`, 8)
+
+ testAccepts(t, `
+byte "0"
+int 1
+byte "0"
+match zero one
+err
+zero: int 1; return
+one: int 0;
+`, 8)
+
+ testAccepts(t, `
+byte "0"
+int 1
+int 1
+match zero one
+err
+one: int 1; return
+zero: int 0;
+`, 8)
+
+ testAccepts(t, `
+byte "0"
+byte "1"
+int 1
+match zero one
+int 1; return
+zero: int 0;
+one: int 0;
+`, 8)
+}
+
+func TestPushConsts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ testAccepts(t, `
+pushints 1 2
+int 2
+==
+assert
+int 1
+==
+assert
+int 1
+`, 8)
+
+ testAccepts(t, `
+pushbytess "1" "2"
+byte "2"
+==
+assert
+byte "1"
+==
+assert
+int 1
+`, 8)
+
+ valsStr := make([]string, 256)
+ for i := range valsStr {
+ valsStr[i] = fmt.Sprintf("%d", i)
+ }
+ source := fmt.Sprintf(`pushints %s`, strings.Join(valsStr, " "))
+ testAccepts(t, source+`
+popn 255
+pop
+int 1
+`, 8)
+
+ for i := range valsStr {
+ valsStr[i] = fmt.Sprintf("\"%d\"", i)
+ }
+ source = fmt.Sprintf(`pushbytess %s`, strings.Join(valsStr, " "))
+ testAccepts(t, source+`
+popn 255
+pop
+int 1
+`, 8)
+}
diff --git a/data/transactions/logic/export_test.go b/data/transactions/logic/export_test.go
index 1a1a21ce2..67346482a 100644
--- a/data/transactions/logic/export_test.go
+++ b/data/transactions/logic/export_test.go
@@ -16,6 +16,8 @@
package logic
+import "github.com/algorand/go-algorand/data/basics"
+
// Export for testing only. See
// https://medium.com/@robiplus/golang-trick-export-for-test-aa16cbd7b8cd for a
// nice explanation. tl;dr: Since some of our testing is in logic_test package,
@@ -30,6 +32,18 @@ func (ep *EvalParams) Reset() {
ep.reset()
}
+// Inefficient (hashing), just a testing convenience
+func (l *Ledger) CreateBox(app basics.AppIndex, name string, size uint64) {
+ l.NewBox(app, name, make([]byte, size), app.Address())
+}
+
+// Inefficient (hashing), just a testing convenience
+func (l *Ledger) DelBoxes(app basics.AppIndex, names ...string) {
+ for _, n := range names {
+ l.DelBox(app, n, app.Address())
+ }
+}
+
var MakeSampleEnv = makeSampleEnv
var MakeSampleEnvWithVersion = makeSampleEnvWithVersion
var MakeSampleTxn = makeSampleTxn
diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go
index cb1685d91..0b62a5239 100644
--- a/data/transactions/logic/fields.go
+++ b/data/transactions/logic/fields.go
@@ -1202,9 +1202,32 @@ const (
AcctBalance AcctParamsField = iota
// AcctMinBalance is algos needed for this accounts apps and assets
AcctMinBalance
- //AcctAuthAddr is the rekeyed address if any, else ZeroAddress
+ // AcctAuthAddr is the rekeyed address if any, else ZeroAddress
AcctAuthAddr
+ // AcctTotalNumUint is the count of all uints from created global apps or opted in locals
+ AcctTotalNumUint
+ // AcctTotalNumByteSlice is the count of all byte slices from created global apps or opted in locals
+ AcctTotalNumByteSlice
+
+ // AcctTotalExtraAppPages is the extra code pages across all apps
+ AcctTotalExtraAppPages
+
+ // AcctTotalAppsCreated is the number of apps created by this account
+ AcctTotalAppsCreated
+ // AcctTotalAppsOptedIn is the number of apps opted in by this account
+ AcctTotalAppsOptedIn
+ // AcctTotalAssetsCreated is the number of ASAs created by this account
+ AcctTotalAssetsCreated
+ // AcctTotalAssets is the number of ASAs opted in by this account (always includes AcctTotalAssetsCreated)
+ AcctTotalAssets
+ // AcctTotalBoxes is the number of boxes created by the app this account is associated with
+ AcctTotalBoxes
+ // AcctTotalBoxBytes is the number of bytes in all boxes of this app account
+ AcctTotalBoxBytes
+
+ // AcctTotalAppSchema - consider how to expose
+
invalidAcctParamsField // compile-time constant for number of fields
)
@@ -1235,8 +1258,18 @@ func (fs acctParamsFieldSpec) Note() string {
var acctParamsFieldSpecs = [...]acctParamsFieldSpec{
{AcctBalance, StackUint64, 6, "Account balance in microalgos"},
- {AcctMinBalance, StackUint64, 6, "Minimum required blance for account, in microalgos"},
+ {AcctMinBalance, StackUint64, 6, "Minimum required balance for account, in microalgos"},
{AcctAuthAddr, StackBytes, 6, "Address the account is rekeyed to."},
+
+ {AcctTotalNumUint, StackUint64, 8, "The total number of uint64 values allocated by this account in Global and Local States."},
+ {AcctTotalNumByteSlice, StackUint64, 8, "The total number of byte array values allocated by this account in Global and Local States."},
+ {AcctTotalExtraAppPages, StackUint64, 8, "The number of extra app code pages used by this account."},
+ {AcctTotalAppsCreated, StackUint64, 8, "The number of existing apps created by this account."},
+ {AcctTotalAppsOptedIn, StackUint64, 8, "The number of apps this account is opted into."},
+ {AcctTotalAssetsCreated, StackUint64, 8, "The number of existing ASAs created by this account."},
+ {AcctTotalAssets, StackUint64, 8, "The numbers of ASAs held by this account (including ASAs this account created)."},
+ {AcctTotalBoxes, StackUint64, boxVersion, "The number of existing boxes created by this account's app."},
+ {AcctTotalBoxBytes, StackUint64, boxVersion, "The total number of bytes used by this account's app's box keys and values."},
}
func acctParamsFieldSpecByField(f AcctParamsField) (acctParamsFieldSpec, bool) {
diff --git a/data/transactions/logic/fields_string.go b/data/transactions/logic/fields_string.go
index 6c90a7a67..44531c2bd 100644
--- a/data/transactions/logic/fields_string.go
+++ b/data/transactions/logic/fields_string.go
@@ -183,12 +183,21 @@ func _() {
_ = x[AcctBalance-0]
_ = x[AcctMinBalance-1]
_ = x[AcctAuthAddr-2]
- _ = x[invalidAcctParamsField-3]
+ _ = x[AcctTotalNumUint-3]
+ _ = x[AcctTotalNumByteSlice-4]
+ _ = x[AcctTotalExtraAppPages-5]
+ _ = x[AcctTotalAppsCreated-6]
+ _ = x[AcctTotalAppsOptedIn-7]
+ _ = x[AcctTotalAssetsCreated-8]
+ _ = x[AcctTotalAssets-9]
+ _ = x[AcctTotalBoxes-10]
+ _ = x[AcctTotalBoxBytes-11]
+ _ = x[invalidAcctParamsField-12]
}
-const _AcctParamsField_name = "AcctBalanceAcctMinBalanceAcctAuthAddrinvalidAcctParamsField"
+const _AcctParamsField_name = "AcctBalanceAcctMinBalanceAcctAuthAddrAcctTotalNumUintAcctTotalNumByteSliceAcctTotalExtraAppPagesAcctTotalAppsCreatedAcctTotalAppsOptedInAcctTotalAssetsCreatedAcctTotalAssetsAcctTotalBoxesAcctTotalBoxBytesinvalidAcctParamsField"
-var _AcctParamsField_index = [...]uint8{0, 11, 25, 37, 59}
+var _AcctParamsField_index = [...]uint8{0, 11, 25, 37, 53, 74, 96, 116, 136, 158, 173, 187, 204, 226}
func (i AcctParamsField) String() string {
if i < 0 || i >= AcctParamsField(len(_AcctParamsField_index)-1) {
diff --git a/data/transactions/logic/fields_test.go b/data/transactions/logic/fields_test.go
index 2b5008f5c..5ae042294 100644
--- a/data/transactions/logic/fields_test.go
+++ b/data/transactions/logic/fields_test.go
@@ -41,7 +41,7 @@ func TestGlobalFieldsVersions(t *testing.T) {
}
require.Greater(t, len(fields), 1)
- ledger := MakeLedger(nil)
+ ledger := NewLedger(nil)
for _, field := range fields {
text := fmt.Sprintf("global %s", field.field.String())
// check assembler fails if version before introduction
@@ -59,7 +59,7 @@ func TestGlobalFieldsVersions(t *testing.T) {
if preLogicVersion < appsEnabledVersion {
require.False(t, proto.Application)
}
- ep := defaultEvalParams(nil)
+ ep := defaultEvalParams()
ep.Proto = proto
ep.Ledger = ledger
@@ -101,7 +101,7 @@ func TestTxnFieldVersions(t *testing.T) {
}
txnaVersion := uint64(appsEnabledVersion)
- ledger := MakeLedger(nil)
+ ledger := NewLedger(nil)
txn := makeSampleTxn()
// We'll reject too early if we have a nonzero RekeyTo, because that
// field must be zero for every txn in the group if this is an old
@@ -137,7 +137,7 @@ func TestTxnFieldVersions(t *testing.T) {
if preLogicVersion < appsEnabledVersion {
require.False(t, proto.Application)
}
- ep := defaultEvalParams(nil)
+ ep := defaultEvalParams()
ep.Proto = proto
ep.Ledger = ledger
ep.TxnGroup = transactions.WrapSignedTxnsWithAD(txgroup)
@@ -190,7 +190,7 @@ func TestTxnEffectsAvailable(t *testing.T) {
ep.TxnGroup[1].Lsig.Logic = ops.Program
_, err := EvalSignature(1, ep)
require.Error(t, err)
- ep.Ledger = MakeLedger(nil)
+ ep.Ledger = NewLedger(nil)
_, err = EvalApp(ops.Program, 1, 888, ep)
if v < txnEffectsVersion {
require.Error(t, err, source)
@@ -219,10 +219,18 @@ func TestAssetParamsFieldsVersions(t *testing.T) {
for _, field := range fields {
// Need to use intc so we can "backversion" the
// program and not have it fail because of pushint.
- text := fmt.Sprintf("intcblock 0 1; intc_0; asset_params_get %s; pop; pop; intc_1", field.field.String())
+ text := fmt.Sprintf("intcblock 0 1; intc_0; asset_params_get %s; bnz ok; err; ok: ", field.field.String())
+ switch field.ftype {
+ case StackUint64: // ensure the return type is uint64 by adding
+ text += " intc_1; +"
+ case StackBytes: // ensure the return type is bytes by using len
+ text += " len" // also happens to ensure that we get non empty - the params fields are fixed width
+ }
// check assembler fails if version before introduction
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
- ep, _, _ := makeSampleEnv()
+ ep, txn, ledger := makeSampleEnv()
+ // Create app 55, since txn.ForeignApps[0] == 55
+ ledger.NewAsset(txn.Sender, 55, basics.AssetParams{})
ep.Proto.LogicSigVersion = v
if field.version > v {
testProg(t, text, v, Expect{1, "...was introduced in..."})
@@ -242,7 +250,7 @@ func TestFieldVersions(t *testing.T) {
// This test is weird, it confirms that we don't need to
// bother with a "good" test for AssetHolding and AppParams
// fields. It will fail if we add a field that has a
- // different teal debut version, and then we'll need a test
+ // different debut version, and then we'll need a test
// like TestAssetParamsFieldsVersions that checks the field is
// unavailable before its debut.
@@ -257,3 +265,43 @@ func TestFieldVersions(t *testing.T) {
require.Equal(t, uint64(5), fs.version)
}
}
+
+func TestAcctParamsFieldsVersions(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ var fields []acctParamsFieldSpec
+ for _, fs := range acctParamsFieldSpecs {
+ if fs.version > 6 {
+ fields = append(fields, fs)
+ }
+ }
+ require.Greater(t, len(fields), 0)
+
+ for _, field := range fields {
+ // Need to use intc so we can "backversion" the program and not have it
+ // fail because of pushint.
+ // Use of '+' confirms the type, which is uint64 for all fields
+ text := fmt.Sprintf("intcblock 0 1; intc_0; acct_params_get %s; assert; intc_1; +", field.field.String())
+ // check assembler fails if version before introduction
+ for v := uint64(2); v <= AssemblerMaxVersion; v++ {
+ ep, txn, ledger := makeSampleEnv()
+ ledger.NewAccount(txn.Sender, 200_000)
+ ep.Proto.LogicSigVersion = v
+ if field.version > v {
+ testProg(t, text, v, Expect{1, "...was introduced in..."})
+ ops := testProg(t, text, field.version) // assemble in the future
+ ops.Program[0] = byte(v) // but set version back to before intro
+ if v < 6 {
+ testAppBytes(t, ops.Program, ep, "illegal opcode", "illegal opcode")
+ } else {
+ testAppBytes(t, ops.Program, ep, "invalid acct_params_get field")
+ }
+ } else {
+ testProg(t, text, v)
+ testApp(t, text, ep)
+ }
+ }
+
+ }
+}
diff --git a/data/transactions/logic/frames.go b/data/transactions/logic/frames.go
index e145ac8fc..1acc0c3c2 100644
--- a/data/transactions/logic/frames.go
+++ b/data/transactions/logic/frames.go
@@ -114,12 +114,7 @@ func opDupN(cx *EvalContext) error {
n := int(cx.program[cx.pc+1])
finalLen := len(cx.stack) + n
- if cap(cx.stack) < finalLen {
- // Let's grow all at once, plus a little slack.
- newStack := make([]stackValue, len(cx.stack), finalLen+4)
- copy(newStack, cx.stack)
- cx.stack = newStack
- }
+ cx.ensureStackCap(finalLen)
for i := 0; i < n; i++ {
// There will be enough room that this will not allocate
cx.stack = append(cx.stack, cx.stack[last])
diff --git a/data/transactions/logic/frames_test.go b/data/transactions/logic/frames_test.go
index f1c1780c3..b02714a88 100644
--- a/data/transactions/logic/frames_test.go
+++ b/data/transactions/logic/frames_test.go
@@ -178,7 +178,7 @@ main:
+ // This consumes the top arg. We could complain in assembly if checked stack height against pgm.fp
dup; dup // But the dup;dup restores it, so it _evals_ fine.
retsub
-`, AssemblerMaxVersion)
+`, fpVersion)
}
diff --git a/data/transactions/logic/langspec.json b/data/transactions/logic/langspec.json
index f4742f1b7..f4054ff14 100644
--- a/data/transactions/logic/langspec.json
+++ b/data/transactions/logic/langspec.json
@@ -1,6 +1,6 @@
{
"EvalMaxVersion": 8,
- "LogicSigVersion": 7,
+ "LogicSigVersion": 8,
"Ops": [
{
"Opcode": 0,
@@ -370,7 +370,7 @@
"Size": 0,
"Doc": "prepare block of uint64 constants for use by intc",
"DocExtra": "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.",
- "ImmediateNote": "{varuint length} [{varuint value}, ...]",
+ "ImmediateNote": "{varuint count} [{varuint value}, ...]",
"Groups": [
"Loading Values"
]
@@ -432,7 +432,7 @@
"Size": 0,
"Doc": "prepare block of byte-array constants for use by bytec",
"DocExtra": "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.",
- "ImmediateNote": "{varuint length} [({varuint value length} bytes), ...]",
+ "ImmediateNote": "{varuint count} [({varuint value length} bytes), ...]",
"Groups": [
"Loading Values"
]
@@ -1033,7 +1033,7 @@
"Name": "bury",
"Args": ".",
"Size": 2,
- "Doc": "Replace the Nth value from the top of the stack. bury 0 fails.",
+ "Doc": "replace the Nth value from the top of the stack with A. bury 0 fails.",
"ImmediateNote": "{uint8 depth}",
"Groups": [
"Flow Control"
@@ -1043,7 +1043,7 @@
"Opcode": 70,
"Name": "popn",
"Size": 2,
- "Doc": "Remove N values from the top of the stack",
+ "Doc": "remove N values from the top of the stack",
"ImmediateNote": "{uint8 stack depth}",
"Groups": [
"Flow Control"
@@ -1342,7 +1342,7 @@
"Args": ".",
"Returns": "U",
"Size": 1,
- "Doc": "get balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted.",
+ "Doc": "balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. Changes caused by inner transactions are observable immediately following `itxn_submit`",
"DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.",
"Groups": [
"State Access"
@@ -1531,9 +1531,18 @@
"ArgEnum": [
"AcctBalance",
"AcctMinBalance",
- "AcctAuthAddr"
+ "AcctAuthAddr",
+ "AcctTotalNumUint",
+ "AcctTotalNumByteSlice",
+ "AcctTotalExtraAppPages",
+ "AcctTotalAppsCreated",
+ "AcctTotalAppsOptedIn",
+ "AcctTotalAssetsCreated",
+ "AcctTotalAssets",
+ "AcctTotalBoxes",
+ "AcctTotalBoxBytes"
],
- "ArgEnumTypes": "UUB",
+ "ArgEnumTypes": "UUBUUUUUUUUU",
"Doc": "X is field F from account A. Y is 1 if A owns positive algos, else 0",
"ImmediateNote": "{uint8 account params field index}",
"Groups": [
@@ -1546,7 +1555,7 @@
"Args": ".",
"Returns": "U",
"Size": 1,
- "Doc": "get minimum required balance for account A, in microalgos. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes.",
+ "Doc": "minimum required balance for account A, in microalgos. Required balance is affected by ASA, App, and Box usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. Changes caused by inner transactions or box usage are observable immediately following the opcode effecting the change.",
"DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.",
"Groups": [
"State Access"
@@ -1577,6 +1586,28 @@
]
},
{
+ "Opcode": 130,
+ "Name": "pushbytess",
+ "Size": 0,
+ "Doc": "push sequences of immediate byte arrays to stack (first byte array being deepest)",
+ "DocExtra": "pushbytess args are not added to the bytecblock during assembly processes",
+ "ImmediateNote": "{varuint count} [({varuint value length} bytes), ...]",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 131,
+ "Name": "pushints",
+ "Size": 0,
+ "Doc": "push sequence of immediate uints to stack in the order they appear (first uint being deepest)",
+ "DocExtra": "pushints args are not added to the intcblock during assembly processes",
+ "ImmediateNote": "{varuint count} [{varuint value}, ...]",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
"Opcode": 132,
"Name": "ed25519verify_bare",
"Args": "BBB",
@@ -1635,7 +1666,7 @@
"Name": "frame_bury",
"Args": ".",
"Size": 2,
- "Doc": "Replace the Nth (signed) value from the frame pointer in the stack",
+ "Doc": "replace the Nth (signed) value from the frame pointer in the stack with A",
"ImmediateNote": "{int8 frame slot}",
"Groups": [
"Flow Control"
@@ -1653,6 +1684,17 @@
]
},
{
+ "Opcode": 142,
+ "Name": "match",
+ "Size": 0,
+ "Doc": "given match cases from A[1] to A[N], branch to the Ith label where A[I] = B. Continue to the following instruction if no matches are found.",
+ "DocExtra": "`match` consumes N+1 values from the stack. Let the top stack value be B. The following N values represent an ordered list of match cases/constants (A), where the first value (A[0]) is the deepest in the stack. The immediate arguments are an ordered list of N labels (T). `match` will branch to target T[I], where A[I] = B. If there are no matches then execution continues on to the next instruction.",
+ "ImmediateNote": "{uint8 branch count} [{int16 branch offset, big-endian}, ...]",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
"Opcode": 144,
"Name": "shl",
"Args": "UU",
@@ -2243,6 +2285,84 @@
]
},
{
+ "Opcode": 185,
+ "Name": "box_create",
+ "Args": "BU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "create a box named A, of length B. Fail if A is empty or B exceeds 32,768. Returns 0 if A already existed, else 1",
+ "DocExtra": "Newly created boxes are filled with 0 bytes. `box_create` will fail if the referenced box already exists with a different size. Otherwise, existing boxes are unchanged by `box_create`.",
+ "Groups": [
+ "Box Access"
+ ]
+ },
+ {
+ "Opcode": 186,
+ "Name": "box_extract",
+ "Args": "BUU",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "read C bytes from box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.",
+ "Groups": [
+ "Box Access"
+ ]
+ },
+ {
+ "Opcode": 187,
+ "Name": "box_replace",
+ "Args": "BUB",
+ "Size": 1,
+ "Doc": "write byte-array C into box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.",
+ "Groups": [
+ "Box Access"
+ ]
+ },
+ {
+ "Opcode": 188,
+ "Name": "box_del",
+ "Args": "B",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "delete box named A if it exists. Return 1 if A existed, 0 otherwise",
+ "Groups": [
+ "Box Access"
+ ]
+ },
+ {
+ "Opcode": 189,
+ "Name": "box_len",
+ "Args": "B",
+ "Returns": "UU",
+ "Size": 1,
+ "Doc": "X is the length of box A if A exists, else 0. Y is 1 if A exists, else 0.",
+ "Groups": [
+ "Box Access"
+ ]
+ },
+ {
+ "Opcode": 190,
+ "Name": "box_get",
+ "Args": "B",
+ "Returns": "BU",
+ "Size": 1,
+ "Doc": "X is the contents of box A if A exists, else ''. Y is 1 if A exists, else 0.",
+ "DocExtra": "For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`",
+ "Groups": [
+ "Box Access"
+ ]
+ },
+ {
+ "Opcode": 191,
+ "Name": "box_put",
+ "Args": "BB",
+ "Size": 1,
+ "Doc": "replaces the contents of box A with byte-array B. Fails if A exists and len(B) != len(box A). Creates A if it does not exist",
+ "DocExtra": "For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`",
+ "Groups": [
+ "Box Access"
+ ]
+ },
+ {
"Opcode": 192,
"Name": "txnas",
"Args": "U",
diff --git a/data/transactions/logic/ledger_test.go b/data/transactions/logic/ledger_test.go
index 9f427b0e9..570967305 100644
--- a/data/transactions/logic/ledger_test.go
+++ b/data/transactions/logic/ledger_test.go
@@ -16,6 +16,20 @@
package logic
+/* This Ledger implements LedgerForLogic for unit tests in the logic package. It
+ does *not* carry the protocol around, so it does *not* enforce the various
+ limits imposed there. This helps ensure that the logic package itself
+ enforces those limits, rather than rely on the ledger package. (Which should
+ also do so, to be defensive.)
+
+ This Ledger is not clever enough to have a good mechanism for making changes
+ and rolling them back if the program that makes them fails. It just has a
+ Reset() method that throws away all changes made by programs. Generally,
+ it's probably best to call Reset() after any error test, though you can keep
+ testing if you take into account that changes made before the failure will
+ take effect.
+*/
+
import (
"errors"
"fmt"
@@ -39,15 +53,14 @@ type balanceRecord struct {
mods map[basics.AppIndex]map[string]basics.ValueDelta
}
-func makeBalanceRecord(addr basics.Address, balance uint64) balanceRecord {
- br := balanceRecord{
+func newBalanceRecord(addr basics.Address, balance uint64) balanceRecord {
+ return balanceRecord{
addr: addr,
balance: balance,
locals: make(map[basics.AppIndex]basics.TealKeyValue),
holdings: make(map[basics.AssetIndex]basics.AssetHolding),
mods: make(map[basics.AppIndex]map[string]basics.ValueDelta),
}
- return br
}
// In our test ledger, we don't store the creatables with their
@@ -55,6 +68,9 @@ func makeBalanceRecord(addr basics.Address, balance uint64) balanceRecord {
type appParams struct {
basics.AppParams
Creator basics.Address
+
+ boxes map[string][]byte // will never contain a nil slice
+ boxMods map[string][]byte // nil slice indicates a deletion
}
type asaParams struct {
@@ -71,8 +87,8 @@ type Ledger struct {
rnd basics.Round
}
-// MakeLedger constructs a Ledger with the given balances.
-func MakeLedger(balances map[basics.Address]uint64) *Ledger {
+// NewLedger constructs a Ledger with the given balances.
+func NewLedger(balances map[basics.Address]uint64) *Ledger {
l := new(Ledger)
l.balances = make(map[basics.Address]balanceRecord)
for addr, balance := range balances {
@@ -91,11 +107,15 @@ func (l *Ledger) Reset() {
br.mods = make(map[basics.AppIndex]map[string]basics.ValueDelta)
l.balances[addr] = br
}
+ for id, app := range l.applications {
+ app.boxMods = nil
+ l.applications[id] = app
+ }
}
// NewAccount adds a new account with a given balance to the Ledger.
func (l *Ledger) NewAccount(addr basics.Address, balance uint64) {
- l.balances[addr] = makeBalanceRecord(addr, balance)
+ l.balances[addr] = newBalanceRecord(addr, balance)
}
// NewApp add a new AVM app to the Ledger. In most uses, it only sets up the id
@@ -120,7 +140,7 @@ func (l *Ledger) NewAsset(creator basics.Address, assetID basics.AssetIndex, par
}
br, ok := l.balances[creator]
if !ok {
- br = makeBalanceRecord(creator, 0)
+ br = newBalanceRecord(creator, 0)
}
br.holdings[assetID] = basics.AssetHolding{Amount: params.Total, Frozen: params.DefaultFrozen}
l.balances[creator] = br
@@ -147,7 +167,7 @@ func (l *Ledger) Counter() uint64 {
func (l *Ledger) NewHolding(addr basics.Address, assetID uint64, amount uint64, frozen bool) {
br, ok := l.balances[addr]
if !ok {
- br = makeBalanceRecord(addr, 0)
+ br = newBalanceRecord(addr, 0)
}
br.holdings[basics.AssetIndex(assetID)] = basics.AssetHolding{Amount: amount, Frozen: frozen}
l.balances[addr] = br
@@ -156,7 +176,7 @@ func (l *Ledger) NewHolding(addr basics.Address, assetID uint64, amount uint64,
// NewLocals essentially "opts in" an address to an app id.
func (l *Ledger) NewLocals(addr basics.Address, appID uint64) {
if _, ok := l.balances[addr]; !ok {
- l.balances[addr] = makeBalanceRecord(addr, 0)
+ l.balances[addr] = newBalanceRecord(addr, 0)
}
l.balances[addr].locals[basics.AppIndex(appID)] = basics.TealKeyValue{}
}
@@ -189,14 +209,9 @@ func (l *Ledger) Rekey(addr basics.Address, auth basics.Address) {
}
}
-// Round gives the current Round of the test ledger, which is random but consistent
-func (l *Ledger) Round() basics.Round {
- return l.round()
-}
-
// LatestTimestamp gives a uint64, chosen randomly. It should
// probably increase monotonically, but no tests care yet.
-func (l *Ledger) LatestTimestamp() int64 {
+func (l *Ledger) PrevTimestamp() int64 {
return int64(rand.Uint32() + 1)
}
@@ -234,12 +249,40 @@ func (l *Ledger) AccountData(addr basics.Address) (ledgercore.AccountData, error
schemaTotal := basics.StateSchema{}
pagesTotal := uint32(0)
+ boxesTotal := 0
+ boxBytesTotal := 0
+
apps := make(map[basics.AppIndex]basics.AppParams)
for a, p := range l.applications {
if p.Creator == addr {
apps[a] = p.AppParams
schemaTotal = schemaTotal.AddSchema(p.GlobalStateSchema)
- pagesTotal = p.ExtraProgramPages
+ pagesTotal += p.ExtraProgramPages
+ }
+ if a.Address() == addr {
+ // We found the app that corresponds to this app account. Get box info from there.
+ boxesTotal = len(p.boxes)
+ for k, v := range p.boxes {
+ boxBytesTotal += len(k) + len(v)
+ }
+ for k, v := range p.boxMods {
+ base, ok := p.boxes[k]
+ if ok {
+ if v == nil {
+ // deleted, so remove from totals
+ boxesTotal--
+ boxBytesTotal -= len(k) + len(base)
+ continue
+ }
+ if len(v) != len(base) {
+ panic(fmt.Sprintf("mismatch %v %v", v, base))
+ }
+ continue
+ }
+ // fresh box in mods, count it
+ boxesTotal++
+ boxBytesTotal += len(k) + len(v)
+ }
}
}
@@ -259,6 +302,9 @@ func (l *Ledger) AccountData(addr basics.Address) (ledgercore.AccountData, error
TotalAppLocalStates: uint64(len(locals)),
TotalAssetParams: uint64(len(assets)),
TotalAssets: uint64(len(br.holdings)),
+
+ TotalBoxes: uint64(boxesTotal),
+ TotalBoxBytes: uint64(boxBytesTotal),
},
}, nil
}
@@ -352,6 +398,90 @@ func (l *Ledger) DelGlobal(appIdx basics.AppIndex, key string) error {
return nil
}
+// NewBox makes a new box, through the boxMods mechanism. It can be Reset()
+func (l *Ledger) NewBox(appIdx basics.AppIndex, key string, value []byte, appAddr basics.Address) error {
+ if appIdx.Address() != appAddr {
+ panic(fmt.Sprintf("%d %v %v", appIdx, appIdx.Address(), appAddr))
+ }
+ params, ok := l.applications[appIdx]
+ if !ok {
+ return fmt.Errorf("no such app %d", appIdx)
+ }
+ if params.boxMods == nil {
+ params.boxMods = make(map[string][]byte)
+ }
+ if current, ok := params.boxMods[key]; ok {
+ if current != nil {
+ return fmt.Errorf("attempt to recreate %s", key)
+ }
+ } else if _, ok := params.boxes[key]; ok {
+ return fmt.Errorf("attempt to recreate %s", key)
+ }
+ params.boxMods[key] = value
+ l.applications[appIdx] = params
+ return nil
+}
+
+func (l *Ledger) GetBox(appIdx basics.AppIndex, key string) ([]byte, bool, error) {
+ params, ok := l.applications[appIdx]
+ if !ok {
+ return nil, false, nil
+ }
+ if params.boxMods != nil {
+ if ps, ok := params.boxMods[key]; ok {
+ if ps == nil { // deletion in mod
+ return nil, false, nil
+ }
+ return ps, true, nil
+ }
+ }
+ if params.boxes == nil {
+ return nil, false, nil
+ }
+ box, ok := params.boxes[key]
+ return box, ok, nil
+}
+
+// SetBox set a box value through the boxMods mechanism. It can be Reset()
+func (l *Ledger) SetBox(appIdx basics.AppIndex, key string, value []byte) error {
+ current, ok, err := l.GetBox(appIdx, key)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return fmt.Errorf("no such box %d", appIdx)
+ }
+ params := l.applications[appIdx] // assured, based on above
+ if params.boxMods == nil {
+ params.boxMods = make(map[string][]byte)
+ }
+ if len(current) != len(value) {
+ return fmt.Errorf("wrong box size %#v %d != %d", key, len(current), len(value))
+ }
+ params.boxMods[key] = value
+ return nil
+}
+
+// DelBox deletes a value through boxMods mechanism
+func (l *Ledger) DelBox(appIdx basics.AppIndex, key string, appAddr basics.Address) (bool, error) {
+ if appIdx.Address() != appAddr {
+ panic(fmt.Sprintf("%d %v %v", appIdx, appIdx.Address(), appAddr))
+ }
+ _, ok, err := l.GetBox(appIdx, key)
+ if err != nil {
+ return false, err
+ }
+ if !ok {
+ return false, nil
+ }
+ params := l.applications[appIdx] // assured, based on above
+ if params.boxMods == nil {
+ params.boxMods = make(map[string][]byte)
+ }
+ params.boxMods[key] = nil
+ return true, nil
+}
+
// GetLocal returns the current value bound to a local key, taking
// into account mods caused by earlier executions.
func (l *Ledger) GetLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) (basics.TealValue, bool, error) {
@@ -480,11 +610,11 @@ func (l *Ledger) AppParams(appID basics.AppIndex) (basics.AppParams, basics.Addr
func (l *Ledger) move(from basics.Address, to basics.Address, amount uint64) error {
fbr, ok := l.balances[from]
if !ok {
- fbr = makeBalanceRecord(from, 0)
+ fbr = newBalanceRecord(from, 0)
}
tbr, ok := l.balances[to]
if !ok {
- tbr = makeBalanceRecord(to, 0)
+ tbr = newBalanceRecord(to, 0)
}
if fbr.balance < amount {
return fmt.Errorf("insufficient balance")
@@ -546,7 +676,7 @@ func (l *Ledger) axfer(from basics.Address, xfer transactions.AssetTransferTxnFi
fbr, ok := l.balances[from]
if !ok {
- fbr = makeBalanceRecord(from, 0)
+ fbr = newBalanceRecord(from, 0)
}
fholding, ok := fbr.holdings[aid]
if !ok {
@@ -567,7 +697,7 @@ func (l *Ledger) axfer(from basics.Address, xfer transactions.AssetTransferTxnFi
}
tbr, ok := l.balances[to]
if !ok {
- tbr = makeBalanceRecord(to, 0)
+ tbr = newBalanceRecord(to, 0)
}
tholding, ok := tbr.holdings[aid]
if !ok && amount > 0 {
@@ -595,7 +725,7 @@ func (l *Ledger) axfer(from basics.Address, xfer transactions.AssetTransferTxnFi
if !close.IsZero() && fholding.Amount > 0 {
cbr, ok := l.balances[close]
if !ok {
- cbr = makeBalanceRecord(close, 0)
+ cbr = newBalanceRecord(close, 0)
}
cholding, ok := cbr.holdings[aid]
if !ok {
@@ -768,11 +898,6 @@ func (l *Ledger) Perform(gi int, ep *EvalParams) error {
}
}
-// Get() through allocated() implement cowForLogicLedger, so we should
-// be able to make logicLedger with this inside. That let's us to
-// write tests and then poke around and see how the balance table
-// inside is affected.
-
// Get returns the AccountData of an address. This test ledger does
// not handle rewards, so the pening rewards flag is ignored.
func (l *Ledger) Get(addr basics.Address, withPendingRewards bool) (basics.AccountData, error) {
@@ -822,7 +947,7 @@ func (l *Ledger) DelKey(addr basics.Address, aidx basics.AppIndex, global bool,
return nil
}
-func (l *Ledger) round() basics.Round {
+func (l *Ledger) Round() basics.Round {
if l.rnd == basics.Round(0) {
// Something big enough to shake out bugs from width
l.rnd = basics.Round(uint64(math.MaxUint32) + 5)
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index 1efab93be..38dde2e08 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -24,7 +24,7 @@ import (
)
// LogicVersion defines default assembler and max eval versions
-const LogicVersion = 8
+const LogicVersion = 9
// rekeyingEnabledVersion is the version of TEAL where RekeyTo functionality
// was enabled. This is important to remember so that old TEAL accounts cannot
@@ -71,6 +71,9 @@ const fpVersion = 8 // changes for frame pointers and simpler function d
// their version, and fixup TestAssemble() in assembler_test.go.
const pairingVersion = 9 // bn256 opcodes. will add bls12-381, and unify the available opcodes.
+// Unlimited Global Storage opcodes
+const boxVersion = 8 // box_*
+
type linearCost struct {
baseCost int
chunkCost int
@@ -448,13 +451,13 @@ var OpSpecs = []OpSpec{
{0x1e, "addw", opAddw, proto("ii:ii"), 2, detDefault()},
{0x1f, "divmodw", opDivModw, proto("iiii:iiii"), 4, costly(20)},
- {0x20, "intcblock", opIntConstBlock, proto(":"), 1, constants(asmIntCBlock, checkIntConstBlock, "uint ...", immInts)},
+ {0x20, "intcblock", opIntConstBlock, proto(":"), 1, constants(asmIntCBlock, checkIntImmArgs, "uint ...", immInts)},
{0x21, "intc", opIntConstLoad, proto(":i"), 1, immediates("i").assembler(asmIntC)},
{0x22, "intc_0", opIntConst0, proto(":i"), 1, detDefault()},
{0x23, "intc_1", opIntConst1, proto(":i"), 1, detDefault()},
{0x24, "intc_2", opIntConst2, proto(":i"), 1, detDefault()},
{0x25, "intc_3", opIntConst3, proto(":i"), 1, detDefault()},
- {0x26, "bytecblock", opByteConstBlock, proto(":"), 1, constants(asmByteCBlock, checkByteConstBlock, "bytes ...", immBytess)},
+ {0x26, "bytecblock", opByteConstBlock, proto(":"), 1, constants(asmByteCBlock, checkByteImmArgs, "bytes ...", immBytess)},
{0x27, "bytec", opByteConstLoad, proto(":b"), 1, immediates("i").assembler(asmByteC)},
{0x28, "bytec_0", opByteConst0, proto(":b"), 1, detDefault()},
{0x29, "bytec_1", opByteConst1, proto(":b"), 1, detDefault()},
@@ -552,6 +555,8 @@ var OpSpecs = []OpSpec{
// Immediate bytes and ints. Smaller code size for single use of constant.
{0x80, "pushbytes", opPushBytes, proto(":b"), 3, constants(asmPushBytes, opPushBytes, "bytes", immBytes)},
{0x81, "pushint", opPushInt, proto(":i"), 3, constants(asmPushInt, opPushInt, "uint", immInt)},
+ {0x82, "pushbytess", opPushBytess, proto(":", "", "[N items]"), 8, constants(asmPushBytess, checkByteImmArgs, "bytes ...", immBytess).typed(typePushBytess).trust()},
+ {0x83, "pushints", opPushInts, proto(":", "", "[N items]"), 8, constants(asmPushInts, checkIntImmArgs, "uint ...", immInts).typed(typePushInts).trust()},
{0x84, "ed25519verify_bare", opEd25519VerifyBare, proto("bbb:i"), 7, costly(1900)},
@@ -563,7 +568,7 @@ var OpSpecs = []OpSpec{
{0x8b, "frame_dig", opFrameDig, proto(":a"), fpVersion, immKinded(immInt8, "i").typed(typeFrameDig)},
{0x8c, "frame_bury", opFrameBury, proto("a:"), fpVersion, immKinded(immInt8, "i").typed(typeFrameBury)},
{0x8d, "switch", opSwitch, proto("i:"), 8, detSwitch()},
- // 0x8e will likely be a switch on pairs of values/targets, called `match`
+ {0x8e, "match", opMatch, proto(":", "[A1, A2, ..., AN], B", ""), 8, detSwitch().trust()},
// More math
{0x90, "shl", opShiftLeft, proto("ii:i"), 4, detDefault()},
@@ -612,6 +617,15 @@ var OpSpecs = []OpSpec{
{0xb7, "gitxn", opGitxn, proto(":a"), 6, immediates("t", "f").field("f", &TxnFields).only(modeApp).assembler(asmGitxn)},
{0xb8, "gitxna", opGitxna, proto(":a"), 6, immediates("t", "f", "i").field("f", &TxnArrayFields).only(modeApp)},
+ // Unlimited Global Storage - Boxes
+ {0xb9, "box_create", opBoxCreate, proto("bi:i"), boxVersion, only(modeApp)},
+ {0xba, "box_extract", opBoxExtract, proto("bii:b"), boxVersion, only(modeApp)},
+ {0xbb, "box_replace", opBoxReplace, proto("bib:"), boxVersion, only(modeApp)},
+ {0xbc, "box_del", opBoxDel, proto("b:i"), boxVersion, only(modeApp)},
+ {0xbd, "box_len", opBoxLen, proto("b:ii"), boxVersion, only(modeApp)},
+ {0xbe, "box_get", opBoxGet, proto("b:bi"), boxVersion, only(modeApp)},
+ {0xbf, "box_put", opBoxPut, proto("bb:"), boxVersion, only(modeApp)},
+
// Dynamic indexing
{0xc0, "txnas", opTxnas, proto("i:a"), 5, field("f", &TxnArrayFields)},
{0xc1, "gtxnas", opGtxnas, proto("i:a"), 5, immediates("t", "f").field("f", &TxnArrayFields)},
diff --git a/data/transactions/logic/pairing.go b/data/transactions/logic/pairing.go
index cb43efeb5..25dfea40a 100644
--- a/data/transactions/logic/pairing.go
+++ b/data/transactions/logic/pairing.go
@@ -110,7 +110,6 @@ func opBn256Pairing(cx *EvalContext) error {
return errors.New("pairing failed")
}
cx.stack = cx.stack[:last]
- cx.stack[prev].Uint = boolToUint(ok)
- cx.stack[prev].Bytes = nil
+ cx.stack[prev] = boolToSV(ok)
return nil
}
diff --git a/data/transactions/logic/parsing.go b/data/transactions/logic/parsing.go
new file mode 100644
index 000000000..7a7429221
--- /dev/null
+++ b/data/transactions/logic/parsing.go
@@ -0,0 +1,105 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+import (
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/algorand/avm-abi/abi"
+ "github.com/algorand/go-algorand/data/basics"
+)
+
+// AppCallBytes represents an encoding and a value of an app call argument.
+type AppCallBytes struct {
+ Encoding string `codec:"encoding"`
+ Value string `codec:"value"`
+}
+
+// NewAppCallBytes parses an argument of the form "encoding:value" to AppCallBytes.
+func NewAppCallBytes(arg string) (AppCallBytes, error) {
+ parts := strings.SplitN(arg, ":", 2)
+ if len(parts) != 2 {
+ return AppCallBytes{}, fmt.Errorf("all arguments and box names should be of the form 'encoding:value'")
+ }
+ return AppCallBytes{
+ Encoding: parts[0],
+ Value: parts[1],
+ }, nil
+}
+
+// Raw converts an AppCallBytes arg to a byte array.
+func (arg AppCallBytes) Raw() (rawValue []byte, parseErr error) {
+ switch arg.Encoding {
+ case "str", "string":
+ rawValue = []byte(arg.Value)
+ case "int", "integer":
+ num, err := strconv.ParseUint(arg.Value, 10, 64)
+ if err != nil {
+ parseErr = fmt.Errorf("Could not parse uint64 from string (%s): %v", arg.Value, err)
+ return
+ }
+ ibytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(ibytes, num)
+ rawValue = ibytes
+ case "addr", "address":
+ addr, err := basics.UnmarshalChecksumAddress(arg.Value)
+ if err != nil {
+ parseErr = fmt.Errorf("Could not unmarshal checksummed address from string (%s): %v", arg.Value, err)
+ return
+ }
+ rawValue = addr[:]
+ case "b32", "base32", "byte base32":
+ data, err := base32.StdEncoding.DecodeString(arg.Value)
+ if err != nil {
+ parseErr = fmt.Errorf("Could not decode base32-encoded string (%s): %v", arg.Value, err)
+ return
+ }
+ rawValue = data
+ case "b64", "base64", "byte base64":
+ data, err := base64.StdEncoding.DecodeString(arg.Value)
+ if err != nil {
+ parseErr = fmt.Errorf("Could not decode base64-encoded string (%s): %v", arg.Value, err)
+ return
+ }
+ rawValue = data
+ case "abi":
+ typeAndValue := strings.SplitN(arg.Value, ":", 2)
+ if len(typeAndValue) != 2 {
+ parseErr = fmt.Errorf("Could not decode abi string (%s): should split abi-type and abi-value with colon", arg.Value)
+ return
+ }
+ abiType, err := abi.TypeOf(typeAndValue[0])
+ if err != nil {
+ parseErr = fmt.Errorf("Could not decode abi type string (%s): %v", typeAndValue[0], err)
+ return
+ }
+ value, err := abiType.UnmarshalFromJSON([]byte(typeAndValue[1]))
+ if err != nil {
+ parseErr = fmt.Errorf("Could not decode abi value string (%s):%v ", typeAndValue[1], err)
+ return
+ }
+ return abiType.Encode(value)
+ default:
+ parseErr = fmt.Errorf("Unknown encoding: %s", arg.Encoding)
+ }
+ return
+}
diff --git a/data/transactions/logic/parsing_test.go b/data/transactions/logic/parsing_test.go
new file mode 100644
index 000000000..5bc3113b8
--- /dev/null
+++ b/data/transactions/logic/parsing_test.go
@@ -0,0 +1,139 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+import (
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "testing"
+
+ "github.com/algorand/avm-abi/abi"
+ "github.com/algorand/go-algorand/data/basics"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewAppCallBytes(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ t.Run("errors", func(t *testing.T) {
+ _, err := NewAppCallBytes("hello")
+ require.Error(t, err)
+
+ for _, v := range []string{":x", "int:-1"} {
+ acb, err := NewAppCallBytes(v)
+ _, err = acb.Raw()
+ require.Error(t, err)
+ }
+ })
+
+ for _, v := range []string{"hello", "1:2"} {
+ for _, e := range []string{"str", "string"} {
+ t.Run(fmt.Sprintf("encoding=%v,value=%v", e, v), func(t *testing.T) {
+ acb, err := NewAppCallBytes(fmt.Sprintf("%v:%v", e, v))
+ require.NoError(t, err)
+ r, err := acb.Raw()
+ require.NoError(t, err)
+ require.Equal(t, v, string(r))
+ })
+ }
+
+ for _, e := range []string{"b32", "base32", "byte base32"} {
+ ve := base32.StdEncoding.EncodeToString([]byte(v))
+ t.Run(fmt.Sprintf("encoding=%v,value=%v", e, ve), func(t *testing.T) {
+ acb, err := NewAppCallBytes(fmt.Sprintf("%v:%v", e, ve))
+ require.NoError(t, err)
+ r, err := acb.Raw()
+ require.NoError(t, err)
+ require.Equal(t, ve, base32.StdEncoding.EncodeToString(r))
+ })
+ }
+
+ for _, e := range []string{"b64", "base64", "byte base64"} {
+ ve := base64.StdEncoding.EncodeToString([]byte(v))
+ t.Run(fmt.Sprintf("encoding=%v,value=%v", e, ve), func(t *testing.T) {
+ acb, err := NewAppCallBytes(fmt.Sprintf("%v:%v", e, ve))
+ require.NoError(t, err)
+ r, err := acb.Raw()
+ require.NoError(t, err)
+ require.Equal(t, ve, base64.StdEncoding.EncodeToString(r))
+ })
+ }
+ }
+
+ for _, v := range []uint64{1, 0, math.MaxUint64} {
+ for _, e := range []string{"int", "integer"} {
+ t.Run(fmt.Sprintf("encoding=%v,value=%v", e, v), func(t *testing.T) {
+ acb, err := NewAppCallBytes(fmt.Sprintf("%v:%v", e, v))
+ require.NoError(t, err)
+ r, err := acb.Raw()
+ require.NoError(t, err)
+ require.Equal(t, v, binary.BigEndian.Uint64(r))
+ })
+ }
+ }
+
+ for _, v := range []string{"737777777777777777777777777777777777777777777777777UFEJ2CI"} {
+ for _, e := range []string{"addr", "address"} {
+ t.Run(fmt.Sprintf("encoding=%v,value=%v", e, v), func(t *testing.T) {
+ acb, err := NewAppCallBytes(fmt.Sprintf("%v:%v", e, v))
+ require.NoError(t, err)
+ r, err := acb.Raw()
+ require.NoError(t, err)
+ addr, err := basics.UnmarshalChecksumAddress(v)
+ require.NoError(t, err)
+ expectedBytes := []byte{}
+ expectedBytes = addr[:]
+ require.Equal(t, expectedBytes, r)
+ })
+ }
+ }
+
+ type abiCase struct {
+ abiType, rawValue string
+ }
+ for _, v := range []abiCase{
+ {
+ `(uint64,string,bool[])`,
+ `[399,"should pass",[true,false,false,true]]`,
+ }} {
+ for _, e := range []string{"abi"} {
+ t.Run(fmt.Sprintf("encoding=%v,value=%v", e, v), func(t *testing.T) {
+ acb, err := NewAppCallBytes(fmt.Sprintf(
+ "%v:%v:%v", e, v.abiType, v.rawValue))
+ require.NoError(t, err)
+ r, err := acb.Raw()
+ require.NoError(t, err)
+ require.NotEmpty(t, r)
+
+ // Confirm round-trip works.
+ abiType, err := abi.TypeOf(v.abiType)
+ require.NoError(t, err)
+ d, err := abiType.Decode(r)
+ require.NoError(t, err)
+ vv, err := abiType.Encode(d)
+ require.NoError(t, err)
+ require.Equal(t, r, vv)
+ })
+ }
+ }
+}
diff --git a/data/transactions/logic/teal.tmLanguage.json b/data/transactions/logic/teal.tmLanguage.json
index 7a299a962..59dc14368 100644
--- a/data/transactions/logic/teal.tmLanguage.json
+++ b/data/transactions/logic/teal.tmLanguage.json
@@ -64,15 +64,15 @@
},
{
"name": "keyword.control.teal",
- "match": "^(assert|b|bnz|bury|bz|callsub|cover|dig|dup|dup2|dupn|err|frame_bury|frame_dig|pop|popn|proto|retsub|return|select|swap|switch|uncover)\\b"
+ "match": "^(assert|b|bnz|bury|bz|callsub|cover|dig|dup|dup2|dupn|err|frame_bury|frame_dig|match|pop|popn|proto|retsub|return|select|swap|switch|uncover)\\b"
},
{
"name": "keyword.other.teal",
- "match": "^(int|byte|addr|arg|arg_0|arg_1|arg_2|arg_3|args|bytec|bytec_0|bytec_1|bytec_2|bytec_3|bytecblock|bzero|gaid|gaids|gload|gloads|gloadss|global|gtxn|gtxna|gtxnas|gtxns|gtxnsa|gtxnsas|intc|intc_0|intc_1|intc_2|intc_3|intcblock|load|loads|pushbytes|pushint|store|stores|txn|txna|txnas)\\b"
+ "match": "^(int|byte|addr|arg|arg_0|arg_1|arg_2|arg_3|args|bytec|bytec_0|bytec_1|bytec_2|bytec_3|bytecblock|bzero|gaid|gaids|gload|gloads|gloadss|global|gtxn|gtxna|gtxnas|gtxns|gtxnsa|gtxnsas|intc|intc_0|intc_1|intc_2|intc_3|intcblock|load|loads|pushbytes|pushbytess|pushint|pushints|store|stores|txn|txna|txnas)\\b"
},
{
"name": "keyword.other.unit.teal",
- "match": "^(acct_params_get|app_global_del|app_global_get|app_global_get_ex|app_global_put|app_local_del|app_local_get|app_local_get_ex|app_local_put|app_opted_in|app_params_get|asset_holding_get|asset_params_get|balance|block|log|min_balance)\\b"
+ "match": "^(box_create|box_del|box_extract|box_get|box_len|box_put|box_replace|acct_params_get|app_global_del|app_global_get|app_global_get_ex|app_global_put|app_local_del|app_local_get|app_local_get_ex|app_local_put|app_opted_in|app_params_get|asset_holding_get|asset_params_get|balance|block|log|min_balance)\\b"
},
{
"name": "keyword.operator.teal",
@@ -112,7 +112,7 @@
},
{
"name": "variable.parameter.teal",
- "match": "\\b(unknown|pay|keyreg|acfg|axfer|afrz|appl|NoOp|OptIn|CloseOut|ClearState|UpdateApplication|DeleteApplication|Secp256k1|Secp256r1|Sender|Fee|FirstValid|FirstValidTime|LastValid|Note|Lease|Receiver|Amount|CloseRemainderTo|VotePK|SelectionPK|VoteFirst|VoteLast|VoteKeyDilution|Type|TypeEnum|XferAsset|AssetAmount|AssetSender|AssetReceiver|AssetCloseTo|GroupIndex|TxID|ApplicationID|OnCompletion|NumAppArgs|NumAccounts|ApprovalProgram|ClearStateProgram|RekeyTo|ConfigAsset|ConfigAssetTotal|ConfigAssetDecimals|ConfigAssetDefaultFrozen|ConfigAssetUnitName|ConfigAssetName|ConfigAssetURL|ConfigAssetMetadataHash|ConfigAssetManager|ConfigAssetReserve|ConfigAssetFreeze|ConfigAssetClawback|FreezeAsset|FreezeAssetAccount|FreezeAssetFrozen|NumAssets|NumApplications|GlobalNumUint|GlobalNumByteSlice|LocalNumUint|LocalNumByteSlice|ExtraProgramPages|Nonparticipation|NumLogs|CreatedAssetID|CreatedApplicationID|LastLog|StateProofPK|NumApprovalProgramPages|NumClearStateProgramPages|MinTxnFee|MinBalance|MaxTxnLife|ZeroAddress|GroupSize|LogicSigVersion|Round|LatestTimestamp|CurrentApplicationID|CreatorAddress|CurrentApplicationAddress|GroupID|OpcodeBudget|CallerApplicationID|CallerApplicationAddress|ApplicationArgs|Accounts|Assets|Applications|Logs|ApprovalProgramPages|ClearStateProgramPages|URLEncoding|StdEncoding|JSONString|JSONUint64|JSONObject|AssetBalance|AssetFrozen|AssetTotal|AssetDecimals|AssetDefaultFrozen|AssetUnitName|AssetName|AssetURL|AssetMetadataHash|AssetManager|AssetReserve|AssetFreeze|AssetClawback|AssetCreator|AppApprovalProgram|AppClearStateProgram|AppGlobalNumUint|AppGlobalNumByteSlice|AppLocalNumUint|AppLocalNumByteSlice|AppExtraProgramPages|AppCreator|AppAddress|AcctBalance|AcctMinBalance|AcctAuthAddr|VrfAlgorand|BlkSeed|BlkTimestamp)\\b"
+ "match": "\\b(unknown|pay|keyreg|acfg|axfer|afrz|appl|NoOp|OptIn|CloseOut|ClearState|UpdateApplication|DeleteApplication|Secp256k1|Secp256r1|Sender|Fee|FirstValid|FirstValidTime|LastValid|Note|Lease|Receiver|Amount|CloseRemainderTo|VotePK|SelectionPK|VoteFirst|VoteLast|VoteKeyDilution|Type|TypeEnum|XferAsset|AssetAmount|AssetSender|AssetReceiver|AssetCloseTo|GroupIndex|TxID|ApplicationID|OnCompletion|NumAppArgs|NumAccounts|ApprovalProgram|ClearStateProgram|RekeyTo|ConfigAsset|ConfigAssetTotal|ConfigAssetDecimals|ConfigAssetDefaultFrozen|ConfigAssetUnitName|ConfigAssetName|ConfigAssetURL|ConfigAssetMetadataHash|ConfigAssetManager|ConfigAssetReserve|ConfigAssetFreeze|ConfigAssetClawback|FreezeAsset|FreezeAssetAccount|FreezeAssetFrozen|NumAssets|NumApplications|GlobalNumUint|GlobalNumByteSlice|LocalNumUint|LocalNumByteSlice|ExtraProgramPages|Nonparticipation|NumLogs|CreatedAssetID|CreatedApplicationID|LastLog|StateProofPK|NumApprovalProgramPages|NumClearStateProgramPages|MinTxnFee|MinBalance|MaxTxnLife|ZeroAddress|GroupSize|LogicSigVersion|Round|LatestTimestamp|CurrentApplicationID|CreatorAddress|CurrentApplicationAddress|GroupID|OpcodeBudget|CallerApplicationID|CallerApplicationAddress|ApplicationArgs|Accounts|Assets|Applications|Logs|ApprovalProgramPages|ClearStateProgramPages|URLEncoding|StdEncoding|JSONString|JSONUint64|JSONObject|AssetBalance|AssetFrozen|AssetTotal|AssetDecimals|AssetDefaultFrozen|AssetUnitName|AssetName|AssetURL|AssetMetadataHash|AssetManager|AssetReserve|AssetFreeze|AssetClawback|AssetCreator|AppApprovalProgram|AppClearStateProgram|AppGlobalNumUint|AppGlobalNumByteSlice|AppLocalNumUint|AppLocalNumByteSlice|AppExtraProgramPages|AppCreator|AppAddress|AcctBalance|AcctMinBalance|AcctAuthAddr|AcctTotalNumUint|AcctTotalNumByteSlice|AcctTotalExtraAppPages|AcctTotalAppsCreated|AcctTotalAppsOptedIn|AcctTotalAssetsCreated|AcctTotalAssets|AcctTotalBoxes|AcctTotalBoxBytes|VrfAlgorand|BlkSeed|BlkTimestamp)\\b"
}
]
},
diff --git a/data/transactions/msgp_gen.go b/data/transactions/msgp_gen.go
index 779a74d59..bcb87f64e 100644
--- a/data/transactions/msgp_gen.go
+++ b/data/transactions/msgp_gen.go
@@ -53,6 +53,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// BoxRef
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// EvalDelta
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -178,56 +186,60 @@ import (
func (z *ApplicationCallTxnFields) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0005Len := uint32(11)
- var zb0005Mask uint16 /* 12 bits */
+ zb0006Len := uint32(12)
+ var zb0006Mask uint16 /* 13 bits */
if len((*z).ApplicationArgs) == 0 {
- zb0005Len--
- zb0005Mask |= 0x2
+ zb0006Len--
+ zb0006Mask |= 0x2
}
if (*z).OnCompletion == 0 {
- zb0005Len--
- zb0005Mask |= 0x4
+ zb0006Len--
+ zb0006Mask |= 0x4
}
if len((*z).ApprovalProgram) == 0 {
- zb0005Len--
- zb0005Mask |= 0x8
+ zb0006Len--
+ zb0006Mask |= 0x8
}
if len((*z).ForeignAssets) == 0 {
- zb0005Len--
- zb0005Mask |= 0x10
+ zb0006Len--
+ zb0006Mask |= 0x10
}
if len((*z).Accounts) == 0 {
- zb0005Len--
- zb0005Mask |= 0x20
+ zb0006Len--
+ zb0006Mask |= 0x20
+ }
+ if len((*z).Boxes) == 0 {
+ zb0006Len--
+ zb0006Mask |= 0x40
}
if (*z).ExtraProgramPages == 0 {
- zb0005Len--
- zb0005Mask |= 0x40
+ zb0006Len--
+ zb0006Mask |= 0x80
}
if len((*z).ForeignApps) == 0 {
- zb0005Len--
- zb0005Mask |= 0x80
+ zb0006Len--
+ zb0006Mask |= 0x100
}
if (*z).GlobalStateSchema.MsgIsZero() {
- zb0005Len--
- zb0005Mask |= 0x100
+ zb0006Len--
+ zb0006Mask |= 0x200
}
if (*z).ApplicationID.MsgIsZero() {
- zb0005Len--
- zb0005Mask |= 0x200
+ zb0006Len--
+ zb0006Mask |= 0x400
}
if (*z).LocalStateSchema.MsgIsZero() {
- zb0005Len--
- zb0005Mask |= 0x400
+ zb0006Len--
+ zb0006Mask |= 0x800
}
if len((*z).ClearStateProgram) == 0 {
- zb0005Len--
- zb0005Mask |= 0x800
+ zb0006Len--
+ zb0006Mask |= 0x1000
}
- // variable map header, size zb0005Len
- o = append(o, 0x80|uint8(zb0005Len))
- if zb0005Len != 0 {
- if (zb0005Mask & 0x2) == 0 { // if not empty
+ // variable map header, size zb0006Len
+ o = append(o, 0x80|uint8(zb0006Len))
+ if zb0006Len != 0 {
+ if (zb0006Mask & 0x2) == 0 { // if not empty
// string "apaa"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x61)
if (*z).ApplicationArgs == nil {
@@ -239,17 +251,17 @@ func (z *ApplicationCallTxnFields) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendBytes(o, (*z).ApplicationArgs[zb0001])
}
}
- if (zb0005Mask & 0x4) == 0 { // if not empty
+ if (zb0006Mask & 0x4) == 0 { // if not empty
// string "apan"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x6e)
o = msgp.AppendUint64(o, uint64((*z).OnCompletion))
}
- if (zb0005Mask & 0x8) == 0 { // if not empty
+ if (zb0006Mask & 0x8) == 0 { // if not empty
// string "apap"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x70)
o = msgp.AppendBytes(o, (*z).ApprovalProgram)
}
- if (zb0005Mask & 0x10) == 0 { // if not empty
+ if (zb0006Mask & 0x10) == 0 { // if not empty
// string "apas"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x73)
if (*z).ForeignAssets == nil {
@@ -257,11 +269,11 @@ func (z *ApplicationCallTxnFields) MarshalMsg(b []byte) (o []byte) {
} else {
o = msgp.AppendArrayHeader(o, uint32(len((*z).ForeignAssets)))
}
- for zb0004 := range (*z).ForeignAssets {
- o = (*z).ForeignAssets[zb0004].MarshalMsg(o)
+ for zb0005 := range (*z).ForeignAssets {
+ o = (*z).ForeignAssets[zb0005].MarshalMsg(o)
}
}
- if (zb0005Mask & 0x20) == 0 { // if not empty
+ if (zb0006Mask & 0x20) == 0 { // if not empty
// string "apat"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x74)
if (*z).Accounts == nil {
@@ -273,12 +285,46 @@ func (z *ApplicationCallTxnFields) MarshalMsg(b []byte) (o []byte) {
o = (*z).Accounts[zb0002].MarshalMsg(o)
}
}
- if (zb0005Mask & 0x40) == 0 { // if not empty
+ if (zb0006Mask & 0x40) == 0 { // if not empty
+ // string "apbx"
+ o = append(o, 0xa4, 0x61, 0x70, 0x62, 0x78)
+ if (*z).Boxes == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Boxes)))
+ }
+ for zb0004 := range (*z).Boxes {
+ // omitempty: check for empty values
+ zb0007Len := uint32(2)
+ var zb0007Mask uint8 /* 3 bits */
+ if (*z).Boxes[zb0004].Index == 0 {
+ zb0007Len--
+ zb0007Mask |= 0x2
+ }
+ if len((*z).Boxes[zb0004].Name) == 0 {
+ zb0007Len--
+ zb0007Mask |= 0x4
+ }
+ // variable map header, size zb0007Len
+ o = append(o, 0x80|uint8(zb0007Len))
+ if (zb0007Mask & 0x2) == 0 { // if not empty
+ // string "i"
+ o = append(o, 0xa1, 0x69)
+ o = msgp.AppendUint64(o, (*z).Boxes[zb0004].Index)
+ }
+ if (zb0007Mask & 0x4) == 0 { // if not empty
+ // string "n"
+ o = append(o, 0xa1, 0x6e)
+ o = msgp.AppendBytes(o, (*z).Boxes[zb0004].Name)
+ }
+ }
+ }
+ if (zb0006Mask & 0x80) == 0 { // if not empty
// string "apep"
o = append(o, 0xa4, 0x61, 0x70, 0x65, 0x70)
o = msgp.AppendUint32(o, (*z).ExtraProgramPages)
}
- if (zb0005Mask & 0x80) == 0 { // if not empty
+ if (zb0006Mask & 0x100) == 0 { // if not empty
// string "apfa"
o = append(o, 0xa4, 0x61, 0x70, 0x66, 0x61)
if (*z).ForeignApps == nil {
@@ -290,22 +336,22 @@ func (z *ApplicationCallTxnFields) MarshalMsg(b []byte) (o []byte) {
o = (*z).ForeignApps[zb0003].MarshalMsg(o)
}
}
- if (zb0005Mask & 0x100) == 0 { // if not empty
+ if (zb0006Mask & 0x200) == 0 { // if not empty
// string "apgs"
o = append(o, 0xa4, 0x61, 0x70, 0x67, 0x73)
o = (*z).GlobalStateSchema.MarshalMsg(o)
}
- if (zb0005Mask & 0x200) == 0 { // if not empty
+ if (zb0006Mask & 0x400) == 0 { // if not empty
// string "apid"
o = append(o, 0xa4, 0x61, 0x70, 0x69, 0x64)
o = (*z).ApplicationID.MarshalMsg(o)
}
- if (zb0005Mask & 0x400) == 0 { // if not empty
+ if (zb0006Mask & 0x800) == 0 { // if not empty
// string "apls"
o = append(o, 0xa4, 0x61, 0x70, 0x6c, 0x73)
o = (*z).LocalStateSchema.MarshalMsg(o)
}
- if (zb0005Mask & 0x800) == 0 { // if not empty
+ if (zb0006Mask & 0x1000) == 0 { // if not empty
// string "apsu"
o = append(o, 0xa4, 0x61, 0x70, 0x73, 0x75)
o = msgp.AppendBytes(o, (*z).ClearStateProgram)
@@ -323,55 +369,55 @@ func (_ *ApplicationCallTxnFields) CanMarshalMsg(z interface{}) bool {
func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0005 > 0 {
- zb0005--
+ if zb0006 > 0 {
+ zb0006--
bts, err = (*z).ApplicationID.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApplicationID")
return
}
}
- if zb0005 > 0 {
- zb0005--
+ if zb0006 > 0 {
+ zb0006--
{
- var zb0007 uint64
- zb0007, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0008 uint64
+ zb0008, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OnCompletion")
return
}
- (*z).OnCompletion = OnCompletion(zb0007)
+ (*z).OnCompletion = OnCompletion(zb0008)
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
return
}
- if zb0008 > encodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(encodedMaxApplicationArgs))
+ if zb0009 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0009), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
return
}
- if zb0009 {
+ if zb0010 {
(*z).ApplicationArgs = nil
- } else if (*z).ApplicationArgs != nil && cap((*z).ApplicationArgs) >= zb0008 {
- (*z).ApplicationArgs = ((*z).ApplicationArgs)[:zb0008]
+ } else if (*z).ApplicationArgs != nil && cap((*z).ApplicationArgs) >= zb0009 {
+ (*z).ApplicationArgs = ((*z).ApplicationArgs)[:zb0009]
} else {
- (*z).ApplicationArgs = make([][]byte, zb0008)
+ (*z).ApplicationArgs = make([][]byte, zb0009)
}
for zb0001 := range (*z).ApplicationArgs {
(*z).ApplicationArgs[zb0001], bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationArgs[zb0001])
@@ -381,26 +427,26 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0010 int
- var zb0011 bool
- zb0010, zb0011, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Accounts")
return
}
- if zb0010 > encodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(encodedMaxAccounts))
+ if zb0011 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "struct-from-array", "Accounts")
return
}
- if zb0011 {
+ if zb0012 {
(*z).Accounts = nil
- } else if (*z).Accounts != nil && cap((*z).Accounts) >= zb0010 {
- (*z).Accounts = ((*z).Accounts)[:zb0010]
+ } else if (*z).Accounts != nil && cap((*z).Accounts) >= zb0011 {
+ (*z).Accounts = ((*z).Accounts)[:zb0011]
} else {
- (*z).Accounts = make([]basics.Address, zb0010)
+ (*z).Accounts = make([]basics.Address, zb0011)
}
for zb0002 := range (*z).Accounts {
bts, err = (*z).Accounts[zb0002].UnmarshalMsg(bts)
@@ -410,26 +456,26 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0012 int
- var zb0013 bool
- zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
return
}
- if zb0012 > encodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0012), uint64(encodedMaxForeignApps))
+ if zb0013 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
return
}
- if zb0013 {
+ if zb0014 {
(*z).ForeignApps = nil
- } else if (*z).ForeignApps != nil && cap((*z).ForeignApps) >= zb0012 {
- (*z).ForeignApps = ((*z).ForeignApps)[:zb0012]
+ } else if (*z).ForeignApps != nil && cap((*z).ForeignApps) >= zb0013 {
+ (*z).ForeignApps = ((*z).ForeignApps)[:zb0013]
} else {
- (*z).ForeignApps = make([]basics.AppIndex, zb0012)
+ (*z).ForeignApps = make([]basics.AppIndex, zb0013)
}
for zb0003 := range (*z).ForeignApps {
bts, err = (*z).ForeignApps[zb0003].UnmarshalMsg(bts)
@@ -439,61 +485,154 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0014 int
- var zb0015 bool
- zb0014, zb0015, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0015 int
+ var zb0016 bool
+ zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes")
+ return
+ }
+ if zb0015 > encodedMaxBoxes {
+ err = msgp.ErrOverflow(uint64(zb0015), uint64(encodedMaxBoxes))
+ err = msgp.WrapError(err, "struct-from-array", "Boxes")
+ return
+ }
+ if zb0016 {
+ (*z).Boxes = nil
+ } else if (*z).Boxes != nil && cap((*z).Boxes) >= zb0015 {
+ (*z).Boxes = ((*z).Boxes)[:zb0015]
+ } else {
+ (*z).Boxes = make([]BoxRef, zb0015)
+ }
+ for zb0004 := range (*z).Boxes {
+ var zb0017 int
+ var zb0018 bool
+ zb0017, zb0018, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004)
+ return
+ }
+ if zb0017 > 0 {
+ zb0017--
+ (*z).Boxes[zb0004].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "struct-from-array", "Index")
+ return
+ }
+ }
+ if zb0017 > 0 {
+ zb0017--
+ (*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "struct-from-array", "Name")
+ return
+ }
+ }
+ if zb0017 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0017)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004)
+ return
+ }
+ if zb0018 {
+ (*z).Boxes[zb0004] = BoxRef{}
+ }
+ for zb0017 > 0 {
+ zb0017--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004)
+ return
+ }
+ switch string(field) {
+ case "i":
+ (*z).Boxes[zb0004].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "Index")
+ return
+ }
+ case "n":
+ (*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "Name")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004)
+ return
+ }
+ }
+ }
+ }
+ }
+ }
+ if zb0006 > 0 {
+ zb0006--
+ var zb0019 int
+ var zb0020 bool
+ zb0019, zb0020, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
- if zb0014 > encodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0014), uint64(encodedMaxForeignAssets))
+ if zb0019 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0019), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
- if zb0015 {
+ if zb0020 {
(*z).ForeignAssets = nil
- } else if (*z).ForeignAssets != nil && cap((*z).ForeignAssets) >= zb0014 {
- (*z).ForeignAssets = ((*z).ForeignAssets)[:zb0014]
+ } else if (*z).ForeignAssets != nil && cap((*z).ForeignAssets) >= zb0019 {
+ (*z).ForeignAssets = ((*z).ForeignAssets)[:zb0019]
} else {
- (*z).ForeignAssets = make([]basics.AssetIndex, zb0014)
+ (*z).ForeignAssets = make([]basics.AssetIndex, zb0019)
}
- for zb0004 := range (*z).ForeignAssets {
- bts, err = (*z).ForeignAssets[zb0004].UnmarshalMsg(bts)
+ for zb0005 := range (*z).ForeignAssets {
+ bts, err = (*z).ForeignAssets[zb0005].UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0004)
+ err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0005)
return
}
}
}
- if zb0005 > 0 {
- zb0005--
+ if zb0006 > 0 {
+ zb0006--
bts, err = (*z).LocalStateSchema.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "LocalStateSchema")
return
}
}
- if zb0005 > 0 {
- zb0005--
+ if zb0006 > 0 {
+ zb0006--
bts, err = (*z).GlobalStateSchema.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GlobalStateSchema")
return
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0016 int
- zb0016, err = msgp.ReadBytesBytesHeader(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0021 int
+ zb0021, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram")
return
}
- if zb0016 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0016), uint64(config.MaxAvailableAppProgramLen))
+ if zb0021 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0021), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApprovalProgram)
@@ -502,16 +641,16 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
return
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0017 int
- zb0017, err = msgp.ReadBytesBytesHeader(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0022 int
+ zb0022, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram")
return
}
- if zb0017 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(config.MaxAvailableAppProgramLen))
+ if zb0022 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0022), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ClearStateProgram)
@@ -520,16 +659,16 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
return
}
}
- if zb0005 > 0 {
- zb0005--
+ if zb0006 > 0 {
+ zb0006--
(*z).ExtraProgramPages, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages")
return
}
}
- if zb0005 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0005)
+ if zb0006 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0006)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -540,11 +679,11 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
err = msgp.WrapError(err)
return
}
- if zb0006 {
+ if zb0007 {
(*z) = ApplicationCallTxnFields{}
}
- for zb0005 > 0 {
- zb0005--
+ for zb0006 > 0 {
+ zb0006--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -559,33 +698,33 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
case "apan":
{
- var zb0018 uint64
- zb0018, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0023 uint64
+ zb0023, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OnCompletion")
return
}
- (*z).OnCompletion = OnCompletion(zb0018)
+ (*z).OnCompletion = OnCompletion(zb0023)
}
case "apaa":
- var zb0019 int
- var zb0020 bool
- zb0019, zb0020, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0024 int
+ var zb0025 bool
+ zb0024, zb0025, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ApplicationArgs")
return
}
- if zb0019 > encodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0019), uint64(encodedMaxApplicationArgs))
+ if zb0024 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0024), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "ApplicationArgs")
return
}
- if zb0020 {
+ if zb0025 {
(*z).ApplicationArgs = nil
- } else if (*z).ApplicationArgs != nil && cap((*z).ApplicationArgs) >= zb0019 {
- (*z).ApplicationArgs = ((*z).ApplicationArgs)[:zb0019]
+ } else if (*z).ApplicationArgs != nil && cap((*z).ApplicationArgs) >= zb0024 {
+ (*z).ApplicationArgs = ((*z).ApplicationArgs)[:zb0024]
} else {
- (*z).ApplicationArgs = make([][]byte, zb0019)
+ (*z).ApplicationArgs = make([][]byte, zb0024)
}
for zb0001 := range (*z).ApplicationArgs {
(*z).ApplicationArgs[zb0001], bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationArgs[zb0001])
@@ -595,24 +734,24 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
case "apat":
- var zb0021 int
- var zb0022 bool
- zb0021, zb0022, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0026 int
+ var zb0027 bool
+ zb0026, zb0027, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Accounts")
return
}
- if zb0021 > encodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0021), uint64(encodedMaxAccounts))
+ if zb0026 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0026), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "Accounts")
return
}
- if zb0022 {
+ if zb0027 {
(*z).Accounts = nil
- } else if (*z).Accounts != nil && cap((*z).Accounts) >= zb0021 {
- (*z).Accounts = ((*z).Accounts)[:zb0021]
+ } else if (*z).Accounts != nil && cap((*z).Accounts) >= zb0026 {
+ (*z).Accounts = ((*z).Accounts)[:zb0026]
} else {
- (*z).Accounts = make([]basics.Address, zb0021)
+ (*z).Accounts = make([]basics.Address, zb0026)
}
for zb0002 := range (*z).Accounts {
bts, err = (*z).Accounts[zb0002].UnmarshalMsg(bts)
@@ -622,24 +761,24 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
case "apfa":
- var zb0023 int
- var zb0024 bool
- zb0023, zb0024, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0028 int
+ var zb0029 bool
+ zb0028, zb0029, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ForeignApps")
return
}
- if zb0023 > encodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0023), uint64(encodedMaxForeignApps))
+ if zb0028 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0028), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "ForeignApps")
return
}
- if zb0024 {
+ if zb0029 {
(*z).ForeignApps = nil
- } else if (*z).ForeignApps != nil && cap((*z).ForeignApps) >= zb0023 {
- (*z).ForeignApps = ((*z).ForeignApps)[:zb0023]
+ } else if (*z).ForeignApps != nil && cap((*z).ForeignApps) >= zb0028 {
+ (*z).ForeignApps = ((*z).ForeignApps)[:zb0028]
} else {
- (*z).ForeignApps = make([]basics.AppIndex, zb0023)
+ (*z).ForeignApps = make([]basics.AppIndex, zb0028)
}
for zb0003 := range (*z).ForeignApps {
bts, err = (*z).ForeignApps[zb0003].UnmarshalMsg(bts)
@@ -648,30 +787,121 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
return
}
}
+ case "apbx":
+ var zb0030 int
+ var zb0031 bool
+ zb0030, zb0031, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes")
+ return
+ }
+ if zb0030 > encodedMaxBoxes {
+ err = msgp.ErrOverflow(uint64(zb0030), uint64(encodedMaxBoxes))
+ err = msgp.WrapError(err, "Boxes")
+ return
+ }
+ if zb0031 {
+ (*z).Boxes = nil
+ } else if (*z).Boxes != nil && cap((*z).Boxes) >= zb0030 {
+ (*z).Boxes = ((*z).Boxes)[:zb0030]
+ } else {
+ (*z).Boxes = make([]BoxRef, zb0030)
+ }
+ for zb0004 := range (*z).Boxes {
+ var zb0032 int
+ var zb0033 bool
+ zb0032, zb0033, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0032, zb0033, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004)
+ return
+ }
+ if zb0032 > 0 {
+ zb0032--
+ (*z).Boxes[zb0004].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004, "struct-from-array", "Index")
+ return
+ }
+ }
+ if zb0032 > 0 {
+ zb0032--
+ (*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004, "struct-from-array", "Name")
+ return
+ }
+ }
+ if zb0032 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0032)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004)
+ return
+ }
+ if zb0033 {
+ (*z).Boxes[zb0004] = BoxRef{}
+ }
+ for zb0032 > 0 {
+ zb0032--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004)
+ return
+ }
+ switch string(field) {
+ case "i":
+ (*z).Boxes[zb0004].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004, "Index")
+ return
+ }
+ case "n":
+ (*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004, "Name")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004)
+ return
+ }
+ }
+ }
+ }
+ }
case "apas":
- var zb0025 int
- var zb0026 bool
- zb0025, zb0026, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0034 int
+ var zb0035 bool
+ zb0034, zb0035, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ForeignAssets")
return
}
- if zb0025 > encodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0025), uint64(encodedMaxForeignAssets))
+ if zb0034 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0034), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "ForeignAssets")
return
}
- if zb0026 {
+ if zb0035 {
(*z).ForeignAssets = nil
- } else if (*z).ForeignAssets != nil && cap((*z).ForeignAssets) >= zb0025 {
- (*z).ForeignAssets = ((*z).ForeignAssets)[:zb0025]
+ } else if (*z).ForeignAssets != nil && cap((*z).ForeignAssets) >= zb0034 {
+ (*z).ForeignAssets = ((*z).ForeignAssets)[:zb0034]
} else {
- (*z).ForeignAssets = make([]basics.AssetIndex, zb0025)
+ (*z).ForeignAssets = make([]basics.AssetIndex, zb0034)
}
- for zb0004 := range (*z).ForeignAssets {
- bts, err = (*z).ForeignAssets[zb0004].UnmarshalMsg(bts)
+ for zb0005 := range (*z).ForeignAssets {
+ bts, err = (*z).ForeignAssets[zb0005].UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "ForeignAssets", zb0004)
+ err = msgp.WrapError(err, "ForeignAssets", zb0005)
return
}
}
@@ -688,14 +918,14 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
return
}
case "apap":
- var zb0027 int
- zb0027, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0036 int
+ zb0036, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "ApprovalProgram")
return
}
- if zb0027 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0027), uint64(config.MaxAvailableAppProgramLen))
+ if zb0036 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0036), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApprovalProgram)
@@ -704,14 +934,14 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
return
}
case "apsu":
- var zb0028 int
- zb0028, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0037 int
+ zb0037, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "ClearStateProgram")
return
}
- if zb0028 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0028), uint64(config.MaxAvailableAppProgramLen))
+ if zb0037 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0037), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ClearStateProgram)
@@ -758,8 +988,12 @@ func (z *ApplicationCallTxnFields) Msgsize() (s int) {
s += (*z).ForeignApps[zb0003].Msgsize()
}
s += 5 + msgp.ArrayHeaderSize
- for zb0004 := range (*z).ForeignAssets {
- s += (*z).ForeignAssets[zb0004].Msgsize()
+ for zb0004 := range (*z).Boxes {
+ s += 1 + 2 + msgp.Uint64Size + 2 + msgp.BytesPrefixSize + len((*z).Boxes[zb0004].Name)
+ }
+ s += 5 + msgp.ArrayHeaderSize
+ for zb0005 := range (*z).ForeignAssets {
+ s += (*z).ForeignAssets[zb0005].Msgsize()
}
s += 5 + (*z).LocalStateSchema.Msgsize() + 5 + (*z).GlobalStateSchema.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).ApprovalProgram) + 5 + msgp.BytesPrefixSize + len((*z).ClearStateProgram) + 5 + msgp.Uint32Size
return
@@ -767,7 +1001,7 @@ func (z *ApplicationCallTxnFields) Msgsize() (s int) {
// MsgIsZero returns whether this is a zero value
func (z *ApplicationCallTxnFields) MsgIsZero() bool {
- return ((*z).ApplicationID.MsgIsZero()) && ((*z).OnCompletion == 0) && (len((*z).ApplicationArgs) == 0) && (len((*z).Accounts) == 0) && (len((*z).ForeignApps) == 0) && (len((*z).ForeignAssets) == 0) && ((*z).LocalStateSchema.MsgIsZero()) && ((*z).GlobalStateSchema.MsgIsZero()) && (len((*z).ApprovalProgram) == 0) && (len((*z).ClearStateProgram) == 0) && ((*z).ExtraProgramPages == 0)
+ return ((*z).ApplicationID.MsgIsZero()) && ((*z).OnCompletion == 0) && (len((*z).ApplicationArgs) == 0) && (len((*z).Accounts) == 0) && (len((*z).ForeignApps) == 0) && (len((*z).Boxes) == 0) && (len((*z).ForeignAssets) == 0) && ((*z).LocalStateSchema.MsgIsZero()) && ((*z).GlobalStateSchema.MsgIsZero()) && (len((*z).ApprovalProgram) == 0) && (len((*z).ClearStateProgram) == 0) && ((*z).ExtraProgramPages == 0)
}
// MarshalMsg implements msgp.Marshaler
@@ -1517,6 +1751,135 @@ func (z *AssetTransferTxnFields) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *BoxRef) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(2)
+ var zb0001Mask uint8 /* 3 bits */
+ if (*z).Index == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if len((*z).Name) == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "i"
+ o = append(o, 0xa1, 0x69)
+ o = msgp.AppendUint64(o, (*z).Index)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "n"
+ o = append(o, 0xa1, 0x6e)
+ o = msgp.AppendBytes(o, (*z).Name)
+ }
+ }
+ return
+}
+
+func (_ *BoxRef) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*BoxRef)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *BoxRef) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Index")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Name)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Name")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = BoxRef{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "i":
+ (*z).Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Index")
+ return
+ }
+ case "n":
+ (*z).Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Name)
+ if err != nil {
+ err = msgp.WrapError(err, "Name")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *BoxRef) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*BoxRef)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *BoxRef) Msgsize() (s int) {
+ s = 1 + 2 + msgp.Uint64Size + 2 + msgp.BytesPrefixSize + len((*z).Name)
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *BoxRef) MsgIsZero() bool {
+ return ((*z).Index == 0) && (len((*z).Name) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *EvalDelta) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
@@ -4200,212 +4563,216 @@ func (z *StateProofTxnFields) MsgIsZero() bool {
func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0006Len := uint32(45)
- var zb0006Mask uint64 /* 54 bits */
+ zb0007Len := uint32(46)
+ var zb0007Mask uint64 /* 55 bits */
if (*z).AssetTransferTxnFields.AssetAmount == 0 {
- zb0006Len--
- zb0006Mask |= 0x200
+ zb0007Len--
+ zb0007Mask |= 0x200
}
if (*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x400
+ zb0007Len--
+ zb0007Mask |= 0x400
}
if (*z).AssetFreezeTxnFields.AssetFrozen == false {
- zb0006Len--
- zb0006Mask |= 0x800
+ zb0007Len--
+ zb0007Mask |= 0x800
}
if (*z).PaymentTxnFields.Amount.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x1000
+ zb0007Len--
+ zb0007Mask |= 0x1000
}
if len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0 {
- zb0006Len--
- zb0006Mask |= 0x2000
+ zb0007Len--
+ zb0007Mask |= 0x2000
}
if (*z).ApplicationCallTxnFields.OnCompletion == 0 {
- zb0006Len--
- zb0006Mask |= 0x4000
+ zb0007Len--
+ zb0007Mask |= 0x4000
}
if len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0 {
- zb0006Len--
- zb0006Mask |= 0x8000
+ zb0007Len--
+ zb0007Mask |= 0x8000
}
if (*z).AssetConfigTxnFields.AssetParams.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x10000
+ zb0007Len--
+ zb0007Mask |= 0x10000
}
if len((*z).ApplicationCallTxnFields.ForeignAssets) == 0 {
- zb0006Len--
- zb0006Mask |= 0x20000
+ zb0007Len--
+ zb0007Mask |= 0x20000
}
if len((*z).ApplicationCallTxnFields.Accounts) == 0 {
- zb0006Len--
- zb0006Mask |= 0x40000
+ zb0007Len--
+ zb0007Mask |= 0x40000
+ }
+ if len((*z).ApplicationCallTxnFields.Boxes) == 0 {
+ zb0007Len--
+ zb0007Mask |= 0x80000
}
if (*z).ApplicationCallTxnFields.ExtraProgramPages == 0 {
- zb0006Len--
- zb0006Mask |= 0x80000
+ zb0007Len--
+ zb0007Mask |= 0x100000
}
if len((*z).ApplicationCallTxnFields.ForeignApps) == 0 {
- zb0006Len--
- zb0006Mask |= 0x100000
+ zb0007Len--
+ zb0007Mask |= 0x200000
}
if (*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x200000
+ zb0007Len--
+ zb0007Mask |= 0x400000
}
if (*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x400000
+ zb0007Len--
+ zb0007Mask |= 0x800000
}
if (*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x800000
+ zb0007Len--
+ zb0007Mask |= 0x1000000
}
if len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0 {
- zb0006Len--
- zb0006Mask |= 0x1000000
+ zb0007Len--
+ zb0007Mask |= 0x2000000
}
if (*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x2000000
+ zb0007Len--
+ zb0007Mask |= 0x4000000
}
if (*z).AssetTransferTxnFields.AssetSender.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x4000000
+ zb0007Len--
+ zb0007Mask |= 0x8000000
}
if (*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x8000000
+ zb0007Len--
+ zb0007Mask |= 0x10000000
}
if (*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x10000000
+ zb0007Len--
+ zb0007Mask |= 0x20000000
}
if (*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x20000000
+ zb0007Len--
+ zb0007Mask |= 0x40000000
}
if (*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x40000000
+ zb0007Len--
+ zb0007Mask |= 0x80000000
}
if (*z).Header.Fee.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x80000000
+ zb0007Len--
+ zb0007Mask |= 0x100000000
}
if (*z).Header.FirstValid.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x100000000
+ zb0007Len--
+ zb0007Mask |= 0x200000000
}
if (*z).Header.GenesisID == "" {
- zb0006Len--
- zb0006Mask |= 0x200000000
+ zb0007Len--
+ zb0007Mask |= 0x400000000
}
if (*z).Header.GenesisHash.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x400000000
+ zb0007Len--
+ zb0007Mask |= 0x800000000
}
if (*z).Header.Group.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x800000000
+ zb0007Len--
+ zb0007Mask |= 0x1000000000
}
if (*z).Header.LastValid.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x1000000000
+ zb0007Len--
+ zb0007Mask |= 0x2000000000
}
if (*z).Header.Lease == ([32]byte{}) {
- zb0006Len--
- zb0006Mask |= 0x2000000000
+ zb0007Len--
+ zb0007Mask |= 0x4000000000
}
if (*z).KeyregTxnFields.Nonparticipation == false {
- zb0006Len--
- zb0006Mask |= 0x4000000000
+ zb0007Len--
+ zb0007Mask |= 0x8000000000
}
if len((*z).Header.Note) == 0 {
- zb0006Len--
- zb0006Mask |= 0x8000000000
+ zb0007Len--
+ zb0007Mask |= 0x10000000000
}
if (*z).PaymentTxnFields.Receiver.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x10000000000
+ zb0007Len--
+ zb0007Mask |= 0x20000000000
}
if (*z).Header.RekeyTo.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x20000000000
+ zb0007Len--
+ zb0007Mask |= 0x40000000000
}
if (*z).KeyregTxnFields.SelectionPK.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x40000000000
+ zb0007Len--
+ zb0007Mask |= 0x80000000000
}
if (*z).Header.Sender.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x80000000000
+ zb0007Len--
+ zb0007Mask |= 0x100000000000
}
if (*z).StateProofTxnFields.StateProof.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x100000000000
+ zb0007Len--
+ zb0007Mask |= 0x200000000000
}
if (*z).StateProofTxnFields.Message.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x200000000000
+ zb0007Len--
+ zb0007Mask |= 0x400000000000
}
if (*z).KeyregTxnFields.StateProofPK.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x400000000000
+ zb0007Len--
+ zb0007Mask |= 0x800000000000
}
if (*z).StateProofTxnFields.StateProofType.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x800000000000
+ zb0007Len--
+ zb0007Mask |= 0x1000000000000
}
if (*z).Type.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x1000000000000
+ zb0007Len--
+ zb0007Mask |= 0x2000000000000
}
if (*z).KeyregTxnFields.VoteFirst.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x2000000000000
+ zb0007Len--
+ zb0007Mask |= 0x4000000000000
}
if (*z).KeyregTxnFields.VoteKeyDilution == 0 {
- zb0006Len--
- zb0006Mask |= 0x4000000000000
+ zb0007Len--
+ zb0007Mask |= 0x8000000000000
}
if (*z).KeyregTxnFields.VotePK.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x8000000000000
+ zb0007Len--
+ zb0007Mask |= 0x10000000000000
}
if (*z).KeyregTxnFields.VoteLast.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x10000000000000
+ zb0007Len--
+ zb0007Mask |= 0x20000000000000
}
if (*z).AssetTransferTxnFields.XferAsset.MsgIsZero() {
- zb0006Len--
- zb0006Mask |= 0x20000000000000
+ zb0007Len--
+ zb0007Mask |= 0x40000000000000
}
- // variable map header, size zb0006Len
- o = msgp.AppendMapHeader(o, zb0006Len)
- if zb0006Len != 0 {
- if (zb0006Mask & 0x200) == 0 { // if not empty
+ // variable map header, size zb0007Len
+ o = msgp.AppendMapHeader(o, zb0007Len)
+ if zb0007Len != 0 {
+ if (zb0007Mask & 0x200) == 0 { // if not empty
// string "aamt"
o = append(o, 0xa4, 0x61, 0x61, 0x6d, 0x74)
o = msgp.AppendUint64(o, (*z).AssetTransferTxnFields.AssetAmount)
}
- if (zb0006Mask & 0x400) == 0 { // if not empty
+ if (zb0007Mask & 0x400) == 0 { // if not empty
// string "aclose"
o = append(o, 0xa6, 0x61, 0x63, 0x6c, 0x6f, 0x73, 0x65)
o = (*z).AssetTransferTxnFields.AssetCloseTo.MarshalMsg(o)
}
- if (zb0006Mask & 0x800) == 0 { // if not empty
+ if (zb0007Mask & 0x800) == 0 { // if not empty
// string "afrz"
o = append(o, 0xa4, 0x61, 0x66, 0x72, 0x7a)
o = msgp.AppendBool(o, (*z).AssetFreezeTxnFields.AssetFrozen)
}
- if (zb0006Mask & 0x1000) == 0 { // if not empty
+ if (zb0007Mask & 0x1000) == 0 { // if not empty
// string "amt"
o = append(o, 0xa3, 0x61, 0x6d, 0x74)
o = (*z).PaymentTxnFields.Amount.MarshalMsg(o)
}
- if (zb0006Mask & 0x2000) == 0 { // if not empty
+ if (zb0007Mask & 0x2000) == 0 { // if not empty
// string "apaa"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x61)
if (*z).ApplicationCallTxnFields.ApplicationArgs == nil {
@@ -4417,22 +4784,22 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendBytes(o, (*z).ApplicationCallTxnFields.ApplicationArgs[zb0002])
}
}
- if (zb0006Mask & 0x4000) == 0 { // if not empty
+ if (zb0007Mask & 0x4000) == 0 { // if not empty
// string "apan"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x6e)
o = msgp.AppendUint64(o, uint64((*z).ApplicationCallTxnFields.OnCompletion))
}
- if (zb0006Mask & 0x8000) == 0 { // if not empty
+ if (zb0007Mask & 0x8000) == 0 { // if not empty
// string "apap"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x70)
o = msgp.AppendBytes(o, (*z).ApplicationCallTxnFields.ApprovalProgram)
}
- if (zb0006Mask & 0x10000) == 0 { // if not empty
+ if (zb0007Mask & 0x10000) == 0 { // if not empty
// string "apar"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x72)
o = (*z).AssetConfigTxnFields.AssetParams.MarshalMsg(o)
}
- if (zb0006Mask & 0x20000) == 0 { // if not empty
+ if (zb0007Mask & 0x20000) == 0 { // if not empty
// string "apas"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x73)
if (*z).ApplicationCallTxnFields.ForeignAssets == nil {
@@ -4440,11 +4807,11 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
} else {
o = msgp.AppendArrayHeader(o, uint32(len((*z).ApplicationCallTxnFields.ForeignAssets)))
}
- for zb0005 := range (*z).ApplicationCallTxnFields.ForeignAssets {
- o = (*z).ApplicationCallTxnFields.ForeignAssets[zb0005].MarshalMsg(o)
+ for zb0006 := range (*z).ApplicationCallTxnFields.ForeignAssets {
+ o = (*z).ApplicationCallTxnFields.ForeignAssets[zb0006].MarshalMsg(o)
}
}
- if (zb0006Mask & 0x40000) == 0 { // if not empty
+ if (zb0007Mask & 0x40000) == 0 { // if not empty
// string "apat"
o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x74)
if (*z).ApplicationCallTxnFields.Accounts == nil {
@@ -4456,12 +4823,46 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
o = (*z).ApplicationCallTxnFields.Accounts[zb0003].MarshalMsg(o)
}
}
- if (zb0006Mask & 0x80000) == 0 { // if not empty
+ if (zb0007Mask & 0x80000) == 0 { // if not empty
+ // string "apbx"
+ o = append(o, 0xa4, 0x61, 0x70, 0x62, 0x78)
+ if (*z).ApplicationCallTxnFields.Boxes == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).ApplicationCallTxnFields.Boxes)))
+ }
+ for zb0005 := range (*z).ApplicationCallTxnFields.Boxes {
+ // omitempty: check for empty values
+ zb0008Len := uint32(2)
+ var zb0008Mask uint8 /* 3 bits */
+ if (*z).ApplicationCallTxnFields.Boxes[zb0005].Index == 0 {
+ zb0008Len--
+ zb0008Mask |= 0x2
+ }
+ if len((*z).ApplicationCallTxnFields.Boxes[zb0005].Name) == 0 {
+ zb0008Len--
+ zb0008Mask |= 0x4
+ }
+ // variable map header, size zb0008Len
+ o = append(o, 0x80|uint8(zb0008Len))
+ if (zb0008Mask & 0x2) == 0 { // if not empty
+ // string "i"
+ o = append(o, 0xa1, 0x69)
+ o = msgp.AppendUint64(o, (*z).ApplicationCallTxnFields.Boxes[zb0005].Index)
+ }
+ if (zb0008Mask & 0x4) == 0 { // if not empty
+ // string "n"
+ o = append(o, 0xa1, 0x6e)
+ o = msgp.AppendBytes(o, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
+ }
+ }
+ }
+ if (zb0007Mask & 0x100000) == 0 { // if not empty
// string "apep"
o = append(o, 0xa4, 0x61, 0x70, 0x65, 0x70)
o = msgp.AppendUint32(o, (*z).ApplicationCallTxnFields.ExtraProgramPages)
}
- if (zb0006Mask & 0x100000) == 0 { // if not empty
+ if (zb0007Mask & 0x200000) == 0 { // if not empty
// string "apfa"
o = append(o, 0xa4, 0x61, 0x70, 0x66, 0x61)
if (*z).ApplicationCallTxnFields.ForeignApps == nil {
@@ -4473,167 +4874,167 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
o = (*z).ApplicationCallTxnFields.ForeignApps[zb0004].MarshalMsg(o)
}
}
- if (zb0006Mask & 0x200000) == 0 { // if not empty
+ if (zb0007Mask & 0x400000) == 0 { // if not empty
// string "apgs"
o = append(o, 0xa4, 0x61, 0x70, 0x67, 0x73)
o = (*z).ApplicationCallTxnFields.GlobalStateSchema.MarshalMsg(o)
}
- if (zb0006Mask & 0x400000) == 0 { // if not empty
+ if (zb0007Mask & 0x800000) == 0 { // if not empty
// string "apid"
o = append(o, 0xa4, 0x61, 0x70, 0x69, 0x64)
o = (*z).ApplicationCallTxnFields.ApplicationID.MarshalMsg(o)
}
- if (zb0006Mask & 0x800000) == 0 { // if not empty
+ if (zb0007Mask & 0x1000000) == 0 { // if not empty
// string "apls"
o = append(o, 0xa4, 0x61, 0x70, 0x6c, 0x73)
o = (*z).ApplicationCallTxnFields.LocalStateSchema.MarshalMsg(o)
}
- if (zb0006Mask & 0x1000000) == 0 { // if not empty
+ if (zb0007Mask & 0x2000000) == 0 { // if not empty
// string "apsu"
o = append(o, 0xa4, 0x61, 0x70, 0x73, 0x75)
o = msgp.AppendBytes(o, (*z).ApplicationCallTxnFields.ClearStateProgram)
}
- if (zb0006Mask & 0x2000000) == 0 { // if not empty
+ if (zb0007Mask & 0x4000000) == 0 { // if not empty
// string "arcv"
o = append(o, 0xa4, 0x61, 0x72, 0x63, 0x76)
o = (*z).AssetTransferTxnFields.AssetReceiver.MarshalMsg(o)
}
- if (zb0006Mask & 0x4000000) == 0 { // if not empty
+ if (zb0007Mask & 0x8000000) == 0 { // if not empty
// string "asnd"
o = append(o, 0xa4, 0x61, 0x73, 0x6e, 0x64)
o = (*z).AssetTransferTxnFields.AssetSender.MarshalMsg(o)
}
- if (zb0006Mask & 0x8000000) == 0 { // if not empty
+ if (zb0007Mask & 0x10000000) == 0 { // if not empty
// string "caid"
o = append(o, 0xa4, 0x63, 0x61, 0x69, 0x64)
o = (*z).AssetConfigTxnFields.ConfigAsset.MarshalMsg(o)
}
- if (zb0006Mask & 0x10000000) == 0 { // if not empty
+ if (zb0007Mask & 0x20000000) == 0 { // if not empty
// string "close"
o = append(o, 0xa5, 0x63, 0x6c, 0x6f, 0x73, 0x65)
o = (*z).PaymentTxnFields.CloseRemainderTo.MarshalMsg(o)
}
- if (zb0006Mask & 0x20000000) == 0 { // if not empty
+ if (zb0007Mask & 0x40000000) == 0 { // if not empty
// string "fadd"
o = append(o, 0xa4, 0x66, 0x61, 0x64, 0x64)
o = (*z).AssetFreezeTxnFields.FreezeAccount.MarshalMsg(o)
}
- if (zb0006Mask & 0x40000000) == 0 { // if not empty
+ if (zb0007Mask & 0x80000000) == 0 { // if not empty
// string "faid"
o = append(o, 0xa4, 0x66, 0x61, 0x69, 0x64)
o = (*z).AssetFreezeTxnFields.FreezeAsset.MarshalMsg(o)
}
- if (zb0006Mask & 0x80000000) == 0 { // if not empty
+ if (zb0007Mask & 0x100000000) == 0 { // if not empty
// string "fee"
o = append(o, 0xa3, 0x66, 0x65, 0x65)
o = (*z).Header.Fee.MarshalMsg(o)
}
- if (zb0006Mask & 0x100000000) == 0 { // if not empty
+ if (zb0007Mask & 0x200000000) == 0 { // if not empty
// string "fv"
o = append(o, 0xa2, 0x66, 0x76)
o = (*z).Header.FirstValid.MarshalMsg(o)
}
- if (zb0006Mask & 0x200000000) == 0 { // if not empty
+ if (zb0007Mask & 0x400000000) == 0 { // if not empty
// string "gen"
o = append(o, 0xa3, 0x67, 0x65, 0x6e)
o = msgp.AppendString(o, (*z).Header.GenesisID)
}
- if (zb0006Mask & 0x400000000) == 0 { // if not empty
+ if (zb0007Mask & 0x800000000) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).Header.GenesisHash.MarshalMsg(o)
}
- if (zb0006Mask & 0x800000000) == 0 { // if not empty
+ if (zb0007Mask & 0x1000000000) == 0 { // if not empty
// string "grp"
o = append(o, 0xa3, 0x67, 0x72, 0x70)
o = (*z).Header.Group.MarshalMsg(o)
}
- if (zb0006Mask & 0x1000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x2000000000) == 0 { // if not empty
// string "lv"
o = append(o, 0xa2, 0x6c, 0x76)
o = (*z).Header.LastValid.MarshalMsg(o)
}
- if (zb0006Mask & 0x2000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x4000000000) == 0 { // if not empty
// string "lx"
o = append(o, 0xa2, 0x6c, 0x78)
o = msgp.AppendBytes(o, ((*z).Header.Lease)[:])
}
- if (zb0006Mask & 0x4000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x8000000000) == 0 { // if not empty
// string "nonpart"
o = append(o, 0xa7, 0x6e, 0x6f, 0x6e, 0x70, 0x61, 0x72, 0x74)
o = msgp.AppendBool(o, (*z).KeyregTxnFields.Nonparticipation)
}
- if (zb0006Mask & 0x8000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x10000000000) == 0 { // if not empty
// string "note"
o = append(o, 0xa4, 0x6e, 0x6f, 0x74, 0x65)
o = msgp.AppendBytes(o, (*z).Header.Note)
}
- if (zb0006Mask & 0x10000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x20000000000) == 0 { // if not empty
// string "rcv"
o = append(o, 0xa3, 0x72, 0x63, 0x76)
o = (*z).PaymentTxnFields.Receiver.MarshalMsg(o)
}
- if (zb0006Mask & 0x20000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x40000000000) == 0 { // if not empty
// string "rekey"
o = append(o, 0xa5, 0x72, 0x65, 0x6b, 0x65, 0x79)
o = (*z).Header.RekeyTo.MarshalMsg(o)
}
- if (zb0006Mask & 0x40000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x80000000000) == 0 { // if not empty
// string "selkey"
o = append(o, 0xa6, 0x73, 0x65, 0x6c, 0x6b, 0x65, 0x79)
o = (*z).KeyregTxnFields.SelectionPK.MarshalMsg(o)
}
- if (zb0006Mask & 0x80000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x100000000000) == 0 { // if not empty
// string "snd"
o = append(o, 0xa3, 0x73, 0x6e, 0x64)
o = (*z).Header.Sender.MarshalMsg(o)
}
- if (zb0006Mask & 0x100000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x200000000000) == 0 { // if not empty
// string "sp"
o = append(o, 0xa2, 0x73, 0x70)
o = (*z).StateProofTxnFields.StateProof.MarshalMsg(o)
}
- if (zb0006Mask & 0x200000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x400000000000) == 0 { // if not empty
// string "spmsg"
o = append(o, 0xa5, 0x73, 0x70, 0x6d, 0x73, 0x67)
o = (*z).StateProofTxnFields.Message.MarshalMsg(o)
}
- if (zb0006Mask & 0x400000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x800000000000) == 0 { // if not empty
// string "sprfkey"
o = append(o, 0xa7, 0x73, 0x70, 0x72, 0x66, 0x6b, 0x65, 0x79)
o = (*z).KeyregTxnFields.StateProofPK.MarshalMsg(o)
}
- if (zb0006Mask & 0x800000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x1000000000000) == 0 { // if not empty
// string "sptype"
o = append(o, 0xa6, 0x73, 0x70, 0x74, 0x79, 0x70, 0x65)
o = (*z).StateProofTxnFields.StateProofType.MarshalMsg(o)
}
- if (zb0006Mask & 0x1000000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x2000000000000) == 0 { // if not empty
// string "type"
o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65)
o = (*z).Type.MarshalMsg(o)
}
- if (zb0006Mask & 0x2000000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x4000000000000) == 0 { // if not empty
// string "votefst"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x66, 0x73, 0x74)
o = (*z).KeyregTxnFields.VoteFirst.MarshalMsg(o)
}
- if (zb0006Mask & 0x4000000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x8000000000000) == 0 { // if not empty
// string "votekd"
o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x64)
o = msgp.AppendUint64(o, (*z).KeyregTxnFields.VoteKeyDilution)
}
- if (zb0006Mask & 0x8000000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x10000000000000) == 0 { // if not empty
// string "votekey"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x65, 0x79)
o = (*z).KeyregTxnFields.VotePK.MarshalMsg(o)
}
- if (zb0006Mask & 0x10000000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x20000000000000) == 0 { // if not empty
// string "votelst"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6c, 0x73, 0x74)
o = (*z).KeyregTxnFields.VoteLast.MarshalMsg(o)
}
- if (zb0006Mask & 0x20000000000000) == 0 { // if not empty
+ if (zb0007Mask & 0x40000000000000) == 0 { // if not empty
// string "xaid"
o = append(o, 0xa4, 0x78, 0x61, 0x69, 0x64)
o = (*z).AssetTransferTxnFields.XferAsset.MarshalMsg(o)
@@ -4651,65 +5052,65 @@ func (_ *Transaction) CanMarshalMsg(z interface{}) bool {
func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Type.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Type")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Header.Sender.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Sender")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Header.Fee.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Fee")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Header.FirstValid.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FirstValid")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Header.LastValid.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "LastValid")
return
}
}
- if zb0006 > 0 {
- zb0006--
- var zb0008 int
- zb0008, err = msgp.ReadBytesBytesHeader(bts)
+ if zb0007 > 0 {
+ zb0007--
+ var zb0009 int
+ zb0009, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Note")
return
}
- if zb0008 > config.MaxTxnNoteBytes {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxTxnNoteBytes))
+ if zb0009 > config.MaxTxnNoteBytes {
+ err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxTxnNoteBytes))
return
}
(*z).Header.Note, bts, err = msgp.ReadBytesBytes(bts, (*z).Header.Note)
@@ -4718,246 +5119,246 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
(*z).Header.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Header.GenesisHash.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisHash")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Header.Group.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Group")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = msgp.ReadExactBytes(bts, ((*z).Header.Lease)[:])
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Lease")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).Header.RekeyTo.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "RekeyTo")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).KeyregTxnFields.VotePK.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "VotePK")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).KeyregTxnFields.SelectionPK.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "SelectionPK")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).KeyregTxnFields.StateProofPK.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofPK")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).KeyregTxnFields.VoteFirst.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "VoteFirst")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).KeyregTxnFields.VoteLast.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "VoteLast")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
(*z).KeyregTxnFields.VoteKeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
(*z).KeyregTxnFields.Nonparticipation, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Nonparticipation")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).PaymentTxnFields.Receiver.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Receiver")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).PaymentTxnFields.Amount.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Amount")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).PaymentTxnFields.CloseRemainderTo.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "CloseRemainderTo")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetConfigTxnFields.ConfigAsset.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ConfigAsset")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetConfigTxnFields.AssetParams.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "AssetParams")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetTransferTxnFields.XferAsset.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "XferAsset")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
(*z).AssetTransferTxnFields.AssetAmount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "AssetAmount")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetTransferTxnFields.AssetSender.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "AssetSender")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetTransferTxnFields.AssetReceiver.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "AssetReceiver")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetTransferTxnFields.AssetCloseTo.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "AssetCloseTo")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetFreezeTxnFields.FreezeAccount.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FreezeAccount")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).AssetFreezeTxnFields.FreezeAsset.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "FreezeAsset")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
(*z).AssetFreezeTxnFields.AssetFrozen, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "AssetFrozen")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).ApplicationCallTxnFields.ApplicationID.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApplicationID")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
{
- var zb0009 uint64
- zb0009, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0010 uint64
+ zb0010, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OnCompletion")
return
}
- (*z).ApplicationCallTxnFields.OnCompletion = OnCompletion(zb0009)
+ (*z).ApplicationCallTxnFields.OnCompletion = OnCompletion(zb0010)
}
}
- if zb0006 > 0 {
- zb0006--
- var zb0010 int
- var zb0011 bool
- zb0010, zb0011, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0007 > 0 {
+ zb0007--
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
return
}
- if zb0010 > encodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(encodedMaxApplicationArgs))
+ if zb0011 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
return
}
- if zb0011 {
+ if zb0012 {
(*z).ApplicationCallTxnFields.ApplicationArgs = nil
- } else if (*z).ApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).ApplicationCallTxnFields.ApplicationArgs) >= zb0010 {
- (*z).ApplicationCallTxnFields.ApplicationArgs = ((*z).ApplicationCallTxnFields.ApplicationArgs)[:zb0010]
+ } else if (*z).ApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).ApplicationCallTxnFields.ApplicationArgs) >= zb0011 {
+ (*z).ApplicationCallTxnFields.ApplicationArgs = ((*z).ApplicationCallTxnFields.ApplicationArgs)[:zb0011]
} else {
- (*z).ApplicationCallTxnFields.ApplicationArgs = make([][]byte, zb0010)
+ (*z).ApplicationCallTxnFields.ApplicationArgs = make([][]byte, zb0011)
}
for zb0002 := range (*z).ApplicationCallTxnFields.ApplicationArgs {
(*z).ApplicationCallTxnFields.ApplicationArgs[zb0002], bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ApplicationArgs[zb0002])
@@ -4967,26 +5368,26 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
}
- if zb0006 > 0 {
- zb0006--
- var zb0012 int
- var zb0013 bool
- zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0007 > 0 {
+ zb0007--
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Accounts")
return
}
- if zb0012 > encodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0012), uint64(encodedMaxAccounts))
+ if zb0013 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "struct-from-array", "Accounts")
return
}
- if zb0013 {
+ if zb0014 {
(*z).ApplicationCallTxnFields.Accounts = nil
- } else if (*z).ApplicationCallTxnFields.Accounts != nil && cap((*z).ApplicationCallTxnFields.Accounts) >= zb0012 {
- (*z).ApplicationCallTxnFields.Accounts = ((*z).ApplicationCallTxnFields.Accounts)[:zb0012]
+ } else if (*z).ApplicationCallTxnFields.Accounts != nil && cap((*z).ApplicationCallTxnFields.Accounts) >= zb0013 {
+ (*z).ApplicationCallTxnFields.Accounts = ((*z).ApplicationCallTxnFields.Accounts)[:zb0013]
} else {
- (*z).ApplicationCallTxnFields.Accounts = make([]basics.Address, zb0012)
+ (*z).ApplicationCallTxnFields.Accounts = make([]basics.Address, zb0013)
}
for zb0003 := range (*z).ApplicationCallTxnFields.Accounts {
bts, err = (*z).ApplicationCallTxnFields.Accounts[zb0003].UnmarshalMsg(bts)
@@ -4996,26 +5397,26 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
}
- if zb0006 > 0 {
- zb0006--
- var zb0014 int
- var zb0015 bool
- zb0014, zb0015, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0007 > 0 {
+ zb0007--
+ var zb0015 int
+ var zb0016 bool
+ zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
return
}
- if zb0014 > encodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0014), uint64(encodedMaxForeignApps))
+ if zb0015 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0015), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
return
}
- if zb0015 {
+ if zb0016 {
(*z).ApplicationCallTxnFields.ForeignApps = nil
- } else if (*z).ApplicationCallTxnFields.ForeignApps != nil && cap((*z).ApplicationCallTxnFields.ForeignApps) >= zb0014 {
- (*z).ApplicationCallTxnFields.ForeignApps = ((*z).ApplicationCallTxnFields.ForeignApps)[:zb0014]
+ } else if (*z).ApplicationCallTxnFields.ForeignApps != nil && cap((*z).ApplicationCallTxnFields.ForeignApps) >= zb0015 {
+ (*z).ApplicationCallTxnFields.ForeignApps = ((*z).ApplicationCallTxnFields.ForeignApps)[:zb0015]
} else {
- (*z).ApplicationCallTxnFields.ForeignApps = make([]basics.AppIndex, zb0014)
+ (*z).ApplicationCallTxnFields.ForeignApps = make([]basics.AppIndex, zb0015)
}
for zb0004 := range (*z).ApplicationCallTxnFields.ForeignApps {
bts, err = (*z).ApplicationCallTxnFields.ForeignApps[zb0004].UnmarshalMsg(bts)
@@ -5025,61 +5426,154 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
}
- if zb0006 > 0 {
- zb0006--
- var zb0016 int
- var zb0017 bool
- zb0016, zb0017, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0007 > 0 {
+ zb0007--
+ var zb0017 int
+ var zb0018 bool
+ zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes")
+ return
+ }
+ if zb0017 > encodedMaxBoxes {
+ err = msgp.ErrOverflow(uint64(zb0017), uint64(encodedMaxBoxes))
+ err = msgp.WrapError(err, "struct-from-array", "Boxes")
+ return
+ }
+ if zb0018 {
+ (*z).ApplicationCallTxnFields.Boxes = nil
+ } else if (*z).ApplicationCallTxnFields.Boxes != nil && cap((*z).ApplicationCallTxnFields.Boxes) >= zb0017 {
+ (*z).ApplicationCallTxnFields.Boxes = ((*z).ApplicationCallTxnFields.Boxes)[:zb0017]
+ } else {
+ (*z).ApplicationCallTxnFields.Boxes = make([]BoxRef, zb0017)
+ }
+ for zb0005 := range (*z).ApplicationCallTxnFields.Boxes {
+ var zb0019 int
+ var zb0020 bool
+ zb0019, zb0020, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0019, zb0020, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005)
+ return
+ }
+ if zb0019 > 0 {
+ zb0019--
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "struct-from-array", "Index")
+ return
+ }
+ }
+ if zb0019 > 0 {
+ zb0019--
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "struct-from-array", "Name")
+ return
+ }
+ }
+ if zb0019 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0019)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005)
+ return
+ }
+ if zb0020 {
+ (*z).ApplicationCallTxnFields.Boxes[zb0005] = BoxRef{}
+ }
+ for zb0019 > 0 {
+ zb0019--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005)
+ return
+ }
+ switch string(field) {
+ case "i":
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "Index")
+ return
+ }
+ case "n":
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "Name")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005)
+ return
+ }
+ }
+ }
+ }
+ }
+ }
+ if zb0007 > 0 {
+ zb0007--
+ var zb0021 int
+ var zb0022 bool
+ zb0021, zb0022, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
- if zb0016 > encodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0016), uint64(encodedMaxForeignAssets))
+ if zb0021 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0021), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
- if zb0017 {
+ if zb0022 {
(*z).ApplicationCallTxnFields.ForeignAssets = nil
- } else if (*z).ApplicationCallTxnFields.ForeignAssets != nil && cap((*z).ApplicationCallTxnFields.ForeignAssets) >= zb0016 {
- (*z).ApplicationCallTxnFields.ForeignAssets = ((*z).ApplicationCallTxnFields.ForeignAssets)[:zb0016]
+ } else if (*z).ApplicationCallTxnFields.ForeignAssets != nil && cap((*z).ApplicationCallTxnFields.ForeignAssets) >= zb0021 {
+ (*z).ApplicationCallTxnFields.ForeignAssets = ((*z).ApplicationCallTxnFields.ForeignAssets)[:zb0021]
} else {
- (*z).ApplicationCallTxnFields.ForeignAssets = make([]basics.AssetIndex, zb0016)
+ (*z).ApplicationCallTxnFields.ForeignAssets = make([]basics.AssetIndex, zb0021)
}
- for zb0005 := range (*z).ApplicationCallTxnFields.ForeignAssets {
- bts, err = (*z).ApplicationCallTxnFields.ForeignAssets[zb0005].UnmarshalMsg(bts)
+ for zb0006 := range (*z).ApplicationCallTxnFields.ForeignAssets {
+ bts, err = (*z).ApplicationCallTxnFields.ForeignAssets[zb0006].UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0005)
+ err = msgp.WrapError(err, "struct-from-array", "ForeignAssets", zb0006)
return
}
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).ApplicationCallTxnFields.LocalStateSchema.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "LocalStateSchema")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).ApplicationCallTxnFields.GlobalStateSchema.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GlobalStateSchema")
return
}
}
- if zb0006 > 0 {
- zb0006--
- var zb0018 int
- zb0018, err = msgp.ReadBytesBytesHeader(bts)
+ if zb0007 > 0 {
+ zb0007--
+ var zb0023 int
+ zb0023, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram")
return
}
- if zb0018 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0018), uint64(config.MaxAvailableAppProgramLen))
+ if zb0023 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0023), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApplicationCallTxnFields.ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ApprovalProgram)
@@ -5088,16 +5582,16 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
}
- if zb0006 > 0 {
- zb0006--
- var zb0019 int
- zb0019, err = msgp.ReadBytesBytesHeader(bts)
+ if zb0007 > 0 {
+ zb0007--
+ var zb0024 int
+ zb0024, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram")
return
}
- if zb0019 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0019), uint64(config.MaxAvailableAppProgramLen))
+ if zb0024 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0024), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApplicationCallTxnFields.ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ClearStateProgram)
@@ -5106,40 +5600,40 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
(*z).ApplicationCallTxnFields.ExtraProgramPages, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ExtraProgramPages")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).StateProofTxnFields.StateProofType.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofType")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).StateProofTxnFields.StateProof.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProof")
return
}
}
- if zb0006 > 0 {
- zb0006--
+ if zb0007 > 0 {
+ zb0007--
bts, err = (*z).StateProofTxnFields.Message.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Message")
return
}
}
- if zb0006 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0006)
+ if zb0007 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0007)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -5150,11 +5644,11 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
- if zb0007 {
+ if zb0008 {
(*z) = Transaction{}
}
- for zb0006 > 0 {
- zb0006--
+ for zb0007 > 0 {
+ zb0007--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -5192,14 +5686,14 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "note":
- var zb0020 int
- zb0020, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0025 int
+ zb0025, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "Note")
return
}
- if zb0020 > config.MaxTxnNoteBytes {
- err = msgp.ErrOverflow(uint64(zb0020), uint64(config.MaxTxnNoteBytes))
+ if zb0025 > config.MaxTxnNoteBytes {
+ err = msgp.ErrOverflow(uint64(zb0025), uint64(config.MaxTxnNoteBytes))
return
}
(*z).Header.Note, bts, err = msgp.ReadBytesBytes(bts, (*z).Header.Note)
@@ -5365,33 +5859,33 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
case "apan":
{
- var zb0021 uint64
- zb0021, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0026 uint64
+ zb0026, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OnCompletion")
return
}
- (*z).ApplicationCallTxnFields.OnCompletion = OnCompletion(zb0021)
+ (*z).ApplicationCallTxnFields.OnCompletion = OnCompletion(zb0026)
}
case "apaa":
- var zb0022 int
- var zb0023 bool
- zb0022, zb0023, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0027 int
+ var zb0028 bool
+ zb0027, zb0028, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ApplicationArgs")
return
}
- if zb0022 > encodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0022), uint64(encodedMaxApplicationArgs))
+ if zb0027 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0027), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "ApplicationArgs")
return
}
- if zb0023 {
+ if zb0028 {
(*z).ApplicationCallTxnFields.ApplicationArgs = nil
- } else if (*z).ApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).ApplicationCallTxnFields.ApplicationArgs) >= zb0022 {
- (*z).ApplicationCallTxnFields.ApplicationArgs = ((*z).ApplicationCallTxnFields.ApplicationArgs)[:zb0022]
+ } else if (*z).ApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).ApplicationCallTxnFields.ApplicationArgs) >= zb0027 {
+ (*z).ApplicationCallTxnFields.ApplicationArgs = ((*z).ApplicationCallTxnFields.ApplicationArgs)[:zb0027]
} else {
- (*z).ApplicationCallTxnFields.ApplicationArgs = make([][]byte, zb0022)
+ (*z).ApplicationCallTxnFields.ApplicationArgs = make([][]byte, zb0027)
}
for zb0002 := range (*z).ApplicationCallTxnFields.ApplicationArgs {
(*z).ApplicationCallTxnFields.ApplicationArgs[zb0002], bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ApplicationArgs[zb0002])
@@ -5401,24 +5895,24 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
case "apat":
- var zb0024 int
- var zb0025 bool
- zb0024, zb0025, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0029 int
+ var zb0030 bool
+ zb0029, zb0030, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Accounts")
return
}
- if zb0024 > encodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0024), uint64(encodedMaxAccounts))
+ if zb0029 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0029), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "Accounts")
return
}
- if zb0025 {
+ if zb0030 {
(*z).ApplicationCallTxnFields.Accounts = nil
- } else if (*z).ApplicationCallTxnFields.Accounts != nil && cap((*z).ApplicationCallTxnFields.Accounts) >= zb0024 {
- (*z).ApplicationCallTxnFields.Accounts = ((*z).ApplicationCallTxnFields.Accounts)[:zb0024]
+ } else if (*z).ApplicationCallTxnFields.Accounts != nil && cap((*z).ApplicationCallTxnFields.Accounts) >= zb0029 {
+ (*z).ApplicationCallTxnFields.Accounts = ((*z).ApplicationCallTxnFields.Accounts)[:zb0029]
} else {
- (*z).ApplicationCallTxnFields.Accounts = make([]basics.Address, zb0024)
+ (*z).ApplicationCallTxnFields.Accounts = make([]basics.Address, zb0029)
}
for zb0003 := range (*z).ApplicationCallTxnFields.Accounts {
bts, err = (*z).ApplicationCallTxnFields.Accounts[zb0003].UnmarshalMsg(bts)
@@ -5428,24 +5922,24 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
case "apfa":
- var zb0026 int
- var zb0027 bool
- zb0026, zb0027, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0031 int
+ var zb0032 bool
+ zb0031, zb0032, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ForeignApps")
return
}
- if zb0026 > encodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0026), uint64(encodedMaxForeignApps))
+ if zb0031 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0031), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "ForeignApps")
return
}
- if zb0027 {
+ if zb0032 {
(*z).ApplicationCallTxnFields.ForeignApps = nil
- } else if (*z).ApplicationCallTxnFields.ForeignApps != nil && cap((*z).ApplicationCallTxnFields.ForeignApps) >= zb0026 {
- (*z).ApplicationCallTxnFields.ForeignApps = ((*z).ApplicationCallTxnFields.ForeignApps)[:zb0026]
+ } else if (*z).ApplicationCallTxnFields.ForeignApps != nil && cap((*z).ApplicationCallTxnFields.ForeignApps) >= zb0031 {
+ (*z).ApplicationCallTxnFields.ForeignApps = ((*z).ApplicationCallTxnFields.ForeignApps)[:zb0031]
} else {
- (*z).ApplicationCallTxnFields.ForeignApps = make([]basics.AppIndex, zb0026)
+ (*z).ApplicationCallTxnFields.ForeignApps = make([]basics.AppIndex, zb0031)
}
for zb0004 := range (*z).ApplicationCallTxnFields.ForeignApps {
bts, err = (*z).ApplicationCallTxnFields.ForeignApps[zb0004].UnmarshalMsg(bts)
@@ -5454,30 +5948,121 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
}
+ case "apbx":
+ var zb0033 int
+ var zb0034 bool
+ zb0033, zb0034, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes")
+ return
+ }
+ if zb0033 > encodedMaxBoxes {
+ err = msgp.ErrOverflow(uint64(zb0033), uint64(encodedMaxBoxes))
+ err = msgp.WrapError(err, "Boxes")
+ return
+ }
+ if zb0034 {
+ (*z).ApplicationCallTxnFields.Boxes = nil
+ } else if (*z).ApplicationCallTxnFields.Boxes != nil && cap((*z).ApplicationCallTxnFields.Boxes) >= zb0033 {
+ (*z).ApplicationCallTxnFields.Boxes = ((*z).ApplicationCallTxnFields.Boxes)[:zb0033]
+ } else {
+ (*z).ApplicationCallTxnFields.Boxes = make([]BoxRef, zb0033)
+ }
+ for zb0005 := range (*z).ApplicationCallTxnFields.Boxes {
+ var zb0035 int
+ var zb0036 bool
+ zb0035, zb0036, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0035, zb0036, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005)
+ return
+ }
+ if zb0035 > 0 {
+ zb0035--
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005, "struct-from-array", "Index")
+ return
+ }
+ }
+ if zb0035 > 0 {
+ zb0035--
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005, "struct-from-array", "Name")
+ return
+ }
+ }
+ if zb0035 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0035)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005)
+ return
+ }
+ if zb0036 {
+ (*z).ApplicationCallTxnFields.Boxes[zb0005] = BoxRef{}
+ }
+ for zb0035 > 0 {
+ zb0035--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005)
+ return
+ }
+ switch string(field) {
+ case "i":
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Index, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005, "Index")
+ return
+ }
+ case "n":
+ (*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005, "Name")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005)
+ return
+ }
+ }
+ }
+ }
+ }
case "apas":
- var zb0028 int
- var zb0029 bool
- zb0028, zb0029, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0037 int
+ var zb0038 bool
+ zb0037, zb0038, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ForeignAssets")
return
}
- if zb0028 > encodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0028), uint64(encodedMaxForeignAssets))
+ if zb0037 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0037), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "ForeignAssets")
return
}
- if zb0029 {
+ if zb0038 {
(*z).ApplicationCallTxnFields.ForeignAssets = nil
- } else if (*z).ApplicationCallTxnFields.ForeignAssets != nil && cap((*z).ApplicationCallTxnFields.ForeignAssets) >= zb0028 {
- (*z).ApplicationCallTxnFields.ForeignAssets = ((*z).ApplicationCallTxnFields.ForeignAssets)[:zb0028]
+ } else if (*z).ApplicationCallTxnFields.ForeignAssets != nil && cap((*z).ApplicationCallTxnFields.ForeignAssets) >= zb0037 {
+ (*z).ApplicationCallTxnFields.ForeignAssets = ((*z).ApplicationCallTxnFields.ForeignAssets)[:zb0037]
} else {
- (*z).ApplicationCallTxnFields.ForeignAssets = make([]basics.AssetIndex, zb0028)
+ (*z).ApplicationCallTxnFields.ForeignAssets = make([]basics.AssetIndex, zb0037)
}
- for zb0005 := range (*z).ApplicationCallTxnFields.ForeignAssets {
- bts, err = (*z).ApplicationCallTxnFields.ForeignAssets[zb0005].UnmarshalMsg(bts)
+ for zb0006 := range (*z).ApplicationCallTxnFields.ForeignAssets {
+ bts, err = (*z).ApplicationCallTxnFields.ForeignAssets[zb0006].UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "ForeignAssets", zb0005)
+ err = msgp.WrapError(err, "ForeignAssets", zb0006)
return
}
}
@@ -5494,14 +6079,14 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "apap":
- var zb0030 int
- zb0030, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0039 int
+ zb0039, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "ApprovalProgram")
return
}
- if zb0030 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0030), uint64(config.MaxAvailableAppProgramLen))
+ if zb0039 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0039), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApplicationCallTxnFields.ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ApprovalProgram)
@@ -5510,14 +6095,14 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "apsu":
- var zb0031 int
- zb0031, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0040 int
+ zb0040, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "ClearStateProgram")
return
}
- if zb0031 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0031), uint64(config.MaxAvailableAppProgramLen))
+ if zb0040 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0040), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApplicationCallTxnFields.ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ClearStateProgram)
@@ -5582,8 +6167,12 @@ func (z *Transaction) Msgsize() (s int) {
s += (*z).ApplicationCallTxnFields.ForeignApps[zb0004].Msgsize()
}
s += 5 + msgp.ArrayHeaderSize
- for zb0005 := range (*z).ApplicationCallTxnFields.ForeignAssets {
- s += (*z).ApplicationCallTxnFields.ForeignAssets[zb0005].Msgsize()
+ for zb0005 := range (*z).ApplicationCallTxnFields.Boxes {
+ s += 1 + 2 + msgp.Uint64Size + 2 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
+ }
+ s += 5 + msgp.ArrayHeaderSize
+ for zb0006 := range (*z).ApplicationCallTxnFields.ForeignAssets {
+ s += (*z).ApplicationCallTxnFields.ForeignAssets[zb0006].Msgsize()
}
s += 5 + (*z).ApplicationCallTxnFields.LocalStateSchema.Msgsize() + 5 + (*z).ApplicationCallTxnFields.GlobalStateSchema.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ApprovalProgram) + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ClearStateProgram) + 5 + msgp.Uint32Size + 7 + (*z).StateProofTxnFields.StateProofType.Msgsize() + 3 + (*z).StateProofTxnFields.StateProof.Msgsize() + 6 + (*z).StateProofTxnFields.Message.Msgsize()
return
@@ -5591,7 +6180,7 @@ func (z *Transaction) Msgsize() (s int) {
// MsgIsZero returns whether this is a zero value
func (z *Transaction) MsgIsZero() bool {
- return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([32]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.StateProofPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) && ((*z).ApplicationCallTxnFields.ExtraProgramPages == 0) && ((*z).StateProofTxnFields.StateProofType.MsgIsZero()) && ((*z).StateProofTxnFields.StateProof.MsgIsZero()) && ((*z).StateProofTxnFields.Message.MsgIsZero())
+ return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([32]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.StateProofPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.Boxes) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) && ((*z).ApplicationCallTxnFields.ExtraProgramPages == 0) && ((*z).StateProofTxnFields.StateProofType.MsgIsZero()) && ((*z).StateProofTxnFields.StateProof.MsgIsZero()) && ((*z).StateProofTxnFields.Message.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
diff --git a/data/transactions/msgp_gen_test.go b/data/transactions/msgp_gen_test.go
index ef4764d45..0ce6b29c3 100644
--- a/data/transactions/msgp_gen_test.go
+++ b/data/transactions/msgp_gen_test.go
@@ -314,6 +314,66 @@ func BenchmarkUnmarshalAssetTransferTxnFields(b *testing.B) {
}
}
+func TestMarshalUnmarshalBoxRef(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := BoxRef{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingBoxRef(t *testing.T) {
+ protocol.RunEncodingTest(t, &BoxRef{})
+}
+
+func BenchmarkMarshalMsgBoxRef(b *testing.B) {
+ v := BoxRef{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgBoxRef(b *testing.B) {
+ v := BoxRef{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalBoxRef(b *testing.B) {
+ v := BoxRef{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalEvalDelta(t *testing.T) {
partitiontest.PartitionTest(t)
v := EvalDelta{}
diff --git a/data/transactions/teal_test.go b/data/transactions/teal_test.go
index 990036895..e5920d0f1 100644
--- a/data/transactions/teal_test.go
+++ b/data/transactions/teal_test.go
@@ -192,7 +192,7 @@ func TestEvalDeltaEqual(t *testing.T) {
// TestUnchangedAllocBounds ensure that the allocbounds on EvalDelta have not
// changed. If they change, EvalDelta.checkAllocBounds must be changed, or at
// least reconsidered, as well. We must give plenty of thought to whether a new
-// allocound, used by new versions, is compatible with old code. If the change
+// allocbound, used by new versions, is compatible with old code. If the change
// can only show up in new protocol versions, it should be ok. But if we change
// a bound, it will go into effect immediately, not with Protocol upgrade. So we
// must be extremely careful that old protocol versions can not emit messages
diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go
index 6198a0663..a22abd7ff 100644
--- a/data/transactions/transaction.go
+++ b/data/transactions/transaction.go
@@ -377,12 +377,8 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa
// Ensure requested action is valid
switch tx.OnCompletion {
- case NoOpOC:
- case OptInOC:
- case CloseOutOC:
- case ClearStateOC:
- case UpdateApplicationOC:
- case DeleteApplicationOC:
+ case NoOpOC, OptInOC, CloseOutOC, ClearStateOC, UpdateApplicationOC, DeleteApplicationOC:
+ /* ok */
default:
return fmt.Errorf("invalid application OnCompletion")
}
@@ -448,8 +444,12 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa
return fmt.Errorf("tx.ForeignAssets too long, max number of foreign assets is %d", proto.MaxAppTxnForeignAssets)
}
+ if len(tx.Boxes) > proto.MaxAppBoxReferences {
+ return fmt.Errorf("tx.Boxes too long, max number of box references is %d", proto.MaxAppBoxReferences)
+ }
+
// Limit the sum of all types of references that bring in account records
- if len(tx.Accounts)+len(tx.ForeignApps)+len(tx.ForeignAssets) > proto.MaxAppTotalTxnReferences {
+ if len(tx.Accounts)+len(tx.ForeignApps)+len(tx.ForeignAssets)+len(tx.Boxes) > proto.MaxAppTotalTxnReferences {
return fmt.Errorf("tx references exceed MaxAppTotalTxnReferences = %d", proto.MaxAppTotalTxnReferences)
}
@@ -470,6 +470,13 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa
return fmt.Errorf("app programs too long. max total len %d bytes", pages*proto.MaxAppTotalProgramLen)
}
+ for i, br := range tx.Boxes {
+ // recall 0 is the current app so indexes are shifted, thus test is for greater than, not gte.
+ if br.Index > uint64(len(tx.ForeignApps)) {
+ return fmt.Errorf("tx.Boxes[%d].Index is %d. Exceeds len(tx.ForeignApps)", i, br.Index)
+ }
+ }
+
if tx.LocalStateSchema.NumEntries() > proto.MaxLocalSchemaEntries {
return fmt.Errorf("tx.LocalStateSchema too large, max number of keys is %d", proto.MaxLocalSchemaEntries)
}
diff --git a/data/transactions/transaction_test.go b/data/transactions/transaction_test.go
index bac43c22c..43467d3fd 100644
--- a/data/transactions/transaction_test.go
+++ b/data/transactions/transaction_test.go
@@ -273,6 +273,7 @@ func TestWellFormedErrors(t *testing.T) {
futureProto := config.Consensus[protocol.ConsensusFuture]
protoV27 := config.Consensus[protocol.ConsensusV27]
protoV28 := config.Consensus[protocol.ConsensusV28]
+ protoV32 := config.Consensus[protocol.ConsensusV32]
addr1, err := basics.UnmarshalChecksumAddress("NDQCJNNY5WWWFLP4GFZ7MEF2QJSMZYK6OWIV2AQ7OMAVLEFCGGRHFPKJJA")
require.NoError(t, err)
v5 := []byte{0x05}
@@ -284,7 +285,6 @@ func TestWellFormedErrors(t *testing.T) {
}
usecases := []struct {
tx Transaction
- spec SpecialAddresses
proto config.ConsensusParams
expectedError error
}{
@@ -296,7 +296,6 @@ func TestWellFormedErrors(t *testing.T) {
Fee: basics.MicroAlgos{Raw: 100},
},
},
- spec: specialAddr,
proto: protoV27,
expectedError: makeMinFeeErrorf("transaction had fee %d, which is less than the minimum %d", 100, curProto.MinTxnFee),
},
@@ -308,7 +307,6 @@ func TestWellFormedErrors(t *testing.T) {
Fee: basics.MicroAlgos{Raw: 100},
},
},
- spec: specialAddr,
proto: curProto,
},
{
@@ -321,7 +319,6 @@ func TestWellFormedErrors(t *testing.T) {
FirstValid: 105,
},
},
- spec: specialAddr,
proto: curProto,
expectedError: fmt.Errorf("transaction invalid range (%d--%d)", 105, 100),
},
@@ -339,7 +336,6 @@ func TestWellFormedErrors(t *testing.T) {
ExtraProgramPages: 1,
},
},
- spec: specialAddr,
proto: protoV27,
expectedError: fmt.Errorf("tx.ExtraProgramPages exceeds MaxExtraAppProgramPages = %d", protoV27.MaxExtraAppProgramPages),
},
@@ -353,7 +349,6 @@ func TestWellFormedErrors(t *testing.T) {
ClearStateProgram: []byte("Xjunk"),
},
},
- spec: specialAddr,
proto: protoV27,
expectedError: fmt.Errorf("approval program too long. max len 1024 bytes"),
},
@@ -367,7 +362,6 @@ func TestWellFormedErrors(t *testing.T) {
ClearStateProgram: []byte("Xjunk"),
},
},
- spec: specialAddr,
proto: futureProto,
},
{
@@ -380,7 +374,6 @@ func TestWellFormedErrors(t *testing.T) {
ClearStateProgram: []byte(strings.Repeat("X", 1025)),
},
},
- spec: specialAddr,
proto: futureProto,
expectedError: fmt.Errorf("app programs too long. max total len 2048 bytes"),
},
@@ -395,7 +388,6 @@ func TestWellFormedErrors(t *testing.T) {
ExtraProgramPages: 1,
},
},
- spec: specialAddr,
proto: futureProto,
},
{
@@ -410,7 +402,6 @@ func TestWellFormedErrors(t *testing.T) {
ExtraProgramPages: 1,
},
},
- spec: specialAddr,
proto: futureProto,
expectedError: fmt.Errorf("tx.ExtraProgramPages is immutable"),
},
@@ -428,7 +419,6 @@ func TestWellFormedErrors(t *testing.T) {
ExtraProgramPages: 4,
},
},
- spec: specialAddr,
proto: futureProto,
expectedError: fmt.Errorf("tx.ExtraProgramPages exceeds MaxExtraAppProgramPages = %d", futureProto.MaxExtraAppProgramPages),
},
@@ -441,7 +431,6 @@ func TestWellFormedErrors(t *testing.T) {
ForeignApps: []basics.AppIndex{10, 11},
},
},
- spec: specialAddr,
proto: protoV27,
},
{
@@ -453,7 +442,6 @@ func TestWellFormedErrors(t *testing.T) {
ForeignApps: []basics.AppIndex{10, 11, 12},
},
},
- spec: specialAddr,
proto: protoV27,
expectedError: fmt.Errorf("tx.ForeignApps too long, max number of foreign apps is 2"),
},
@@ -466,7 +454,6 @@ func TestWellFormedErrors(t *testing.T) {
ForeignApps: []basics.AppIndex{10, 11, 12, 13, 14, 15, 16, 17},
},
},
- spec: specialAddr,
proto: futureProto,
},
{
@@ -478,7 +465,6 @@ func TestWellFormedErrors(t *testing.T) {
ForeignAssets: []basics.AssetIndex{14, 15, 16, 17, 18, 19, 20, 21, 22},
},
},
- spec: specialAddr,
proto: futureProto,
expectedError: fmt.Errorf("tx.ForeignAssets too long, max number of foreign assets is 8"),
},
@@ -493,7 +479,6 @@ func TestWellFormedErrors(t *testing.T) {
ForeignAssets: []basics.AssetIndex{14, 15, 16, 17},
},
},
- spec: specialAddr,
proto: futureProto,
expectedError: fmt.Errorf("tx references exceed MaxAppTotalTxnReferences = 8"),
},
@@ -509,7 +494,6 @@ func TestWellFormedErrors(t *testing.T) {
OnCompletion: UpdateApplicationOC,
},
},
- spec: specialAddr,
proto: protoV28,
expectedError: fmt.Errorf("app programs too long. max total len %d bytes", curProto.MaxAppProgramLen),
},
@@ -525,7 +509,6 @@ func TestWellFormedErrors(t *testing.T) {
OnCompletion: UpdateApplicationOC,
},
},
- spec: specialAddr,
proto: futureProto,
},
{
@@ -543,13 +526,49 @@ func TestWellFormedErrors(t *testing.T) {
OnCompletion: UpdateApplicationOC,
},
},
- spec: specialAddr,
proto: protoV28,
expectedError: fmt.Errorf("tx.ExtraProgramPages is immutable"),
},
+ {
+ tx: Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: okHeader,
+ ApplicationCallTxnFields: ApplicationCallTxnFields{
+ ApplicationID: 1,
+ Boxes: []BoxRef{{Index: 1, Name: []byte("junk")}},
+ },
+ },
+ proto: futureProto,
+ expectedError: fmt.Errorf("tx.Boxes[0].Index is 1. Exceeds len(tx.ForeignApps)"),
+ },
+ {
+ tx: Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: okHeader,
+ ApplicationCallTxnFields: ApplicationCallTxnFields{
+ ApplicationID: 1,
+ Boxes: []BoxRef{{Index: 1, Name: []byte("junk")}},
+ ForeignApps: []basics.AppIndex{1},
+ },
+ },
+ proto: futureProto,
+ },
+ {
+ tx: Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: okHeader,
+ ApplicationCallTxnFields: ApplicationCallTxnFields{
+ ApplicationID: 1,
+ Boxes: []BoxRef{{Index: 1, Name: []byte("junk")}},
+ ForeignApps: []basics.AppIndex{1},
+ },
+ },
+ proto: protoV32,
+ expectedError: fmt.Errorf("tx.Boxes too long, max number of box references is 0"),
+ },
}
for _, usecase := range usecases {
- err := usecase.tx.WellFormed(usecase.spec, usecase.proto)
+ err := usecase.tx.WellFormed(specialAddr, usecase.proto)
require.Equal(t, usecase.expectedError, err)
}
}
diff --git a/data/txHandler.go b/data/txHandler.go
index cd4c25c8e..8e4a717ed 100644
--- a/data/txHandler.go
+++ b/data/txHandler.go
@@ -122,6 +122,8 @@ func reencode(stxns []transactions.SignedTxn) []byte {
// backlogWorker is the worker go routine that process the incoming messages from the postVerificationQueue and backlogQueue channels
// and dispatches them further.
func (handler *TxHandler) backlogWorker() {
+ // Note: TestIncomingTxHandle and TestIncomingTxGroupHandle emulate this function.
+ // Changes to the behavior in this function should be reflected in the test.
defer handler.backlogWg.Done()
for {
// prioritize the postVerificationQueue
diff --git a/data/txHandler_test.go b/data/txHandler_test.go
index 14a5495eb..da8fe6469 100644
--- a/data/txHandler_test.go
+++ b/data/txHandler_test.go
@@ -17,9 +17,12 @@
package data
import (
+ "encoding/binary"
"fmt"
"io"
"math/rand"
+ "strings"
+ "sync"
"testing"
"time"
@@ -38,6 +41,7 @@ import (
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/execpool"
+ "github.com/algorand/go-algorand/util/metrics"
)
func BenchmarkTxHandlerProcessing(b *testing.B) {
@@ -248,3 +252,319 @@ func BenchmarkTxHandlerDecoderMsgp(b *testing.B) {
require.Equal(b, benchTxnNum, idx)
}
}
+
+func TestIncomingTxHandle(t *testing.T) {
+ incomingTxHandlerProcessing(1, t)
+}
+
+func TestIncomingTxGroupHandle(t *testing.T) {
+ incomingTxHandlerProcessing(proto.MaxTxGroupSize, t)
+}
+
+// incomingTxHandlerProcessing is a comprehensive transaction handling test
+// It handles the singed transactions by passing them to the backlog for verification
+func incomingTxHandlerProcessing(maxGroupSize int, t *testing.T) {
+ const numUsers = 100
+ numberOfTransactionGroups := 1000
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Warn)
+ addresses := make([]basics.Address, numUsers)
+ secrets := make([]*crypto.SignatureSecrets, numUsers)
+
+ // prepare the accounts
+ genesis := make(map[basics.Address]basics.AccountData)
+ for i := 0; i < numUsers; i++ {
+ secret := keypair()
+ addr := basics.Address(secret.SignatureVerifier)
+ secrets[i] = secret
+ addresses[i] = addr
+ genesis[addr] = basics.AccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: 10000000000000},
+ }
+ }
+ genesis[poolAddr] = basics.AccountData{
+ Status: basics.NotParticipating,
+ MicroAlgos: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinBalance},
+ }
+
+ require.Equal(t, len(genesis), numUsers+1)
+ genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ ledgerName := fmt.Sprintf("%s-mem-%d", t.Name(), numberOfTransactionGroups)
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg)
+ require.NoError(t, err)
+
+ l := ledger
+ tp := pools.MakeTransactionPool(l.Ledger, cfg, logging.Base())
+ backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
+ handler := MakeTxHandler(tp, l, &mocks.MockNetwork{}, "", crypto.Digest{}, backlogPool)
+ defer handler.ctxCancel()
+
+ outChan := make(chan *txBacklogMsg, 10)
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ // Make a test backlog worker, which is simiar to backlogWorker, but sends the results
+ // through the outChan instead of passing it to postprocessCheckedTxn
+ go func() {
+ defer wg.Done()
+ defer close(outChan)
+ for {
+ // prioritize the postVerificationQueue
+ select {
+ case wi, ok := <-handler.postVerificationQueue:
+ if !ok {
+ return
+ }
+ outChan <- wi
+ // restart the loop so that we could empty out the post verification queue.
+ continue
+ default:
+ }
+
+ // we have no more post verification items. wait for either backlog queue item or post verification item.
+ select {
+ case wi, ok := <-handler.backlogQueue:
+ if !ok {
+ // shut down to end the test
+ handler.txVerificationPool.Shutdown()
+ close(handler.postVerificationQueue)
+ // wait until all the pending responses are obtained.
+ // this is not in backlogWorker, maybe should be
+ for wi := range handler.postVerificationQueue {
+ outChan <- wi
+ }
+ return
+ }
+ if handler.checkAlreadyCommitted(wi) {
+ // this is not expected during the test
+ continue
+ }
+
+ // enqueue the task to the verification pool.
+ handler.txVerificationPool.EnqueueBacklog(handler.ctx, handler.asyncVerifySignature, wi, nil)
+
+ case wi, ok := <-handler.postVerificationQueue:
+ if !ok {
+ return
+ }
+ outChan <- wi
+
+ case <-handler.ctx.Done():
+ return
+ }
+ }
+ }()
+
+ // Prepare the transactions
+ signedTransactionGroups, badTxnGroups :=
+ makeSignedTxnGroups(numberOfTransactionGroups, numUsers, maxGroupSize, 0.5, addresses, secrets)
+ encodedSignedTransactionGroups := make([]network.IncomingMessage, 0, numberOfTransactionGroups)
+ for _, stxngrp := range signedTransactionGroups {
+ data := make([]byte, 0)
+ for _, stxn := range stxngrp {
+ data = append(data, protocol.Encode(&stxn)...)
+ }
+ encodedSignedTransactionGroups =
+ append(encodedSignedTransactionGroups, network.IncomingMessage{Data: data})
+ }
+
+ // Process the results and make sure they are correct
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ groupCounter := 0
+ txnCounter := 0
+ invalidCounter := 0
+ defer func() {
+ t.Logf("processed %d txn groups (%d txns)\n", groupCounter, txnCounter)
+ }()
+ for wi := range outChan {
+ txnCounter = txnCounter + len(wi.unverifiedTxGroup)
+ groupCounter++
+ u, _ := binary.Uvarint(wi.unverifiedTxGroup[0].Txn.Note)
+ _, inBad := badTxnGroups[u]
+ if wi.verificationErr == nil {
+ require.False(t, inBad, "No error for invalid signature")
+ } else {
+ invalidCounter++
+ require.True(t, inBad, "Error for good signature")
+ }
+ }
+ t.Logf("Txn groups with invalid sigs: %d\n", invalidCounter)
+ }()
+
+ // Send the transactions to the verifier
+ for _, tg := range encodedSignedTransactionGroups {
+ handler.processIncomingTxn(tg)
+ randduration := time.Duration(uint64(((1 + rand.Float32()) * 3)))
+ time.Sleep(randduration * time.Microsecond)
+ }
+ close(handler.backlogQueue)
+ wg.Wait()
+
+ // Report the number of transactions dropped because the backlog was busy
+ var buf strings.Builder
+ metrics.DefaultRegistry().WriteMetrics(&buf, "")
+ str := buf.String()
+ x := strings.Index(str, "\nalgod_transaction_messages_dropped_backlog")
+ str = str[x+44 : x+44+strings.Index(str[x+44:], "\n")]
+ str = strings.TrimSpace(strings.ReplaceAll(str, "}", " "))
+ t.Logf("dropped %s txn gropus\n", str)
+}
+
+// makeSignedTxnGroups prepares N transaction groups of random (maxGroupSize) sizes with random
+// invalid signatures of a given probability (invalidProb)
+func makeSignedTxnGroups(N, numUsers, maxGroupSize int, invalidProb float32, addresses []basics.Address,
+ secrets []*crypto.SignatureSecrets) (ret [][]transactions.SignedTxn,
+ badTxnGroups map[uint64]interface{}) {
+ badTxnGroups = make(map[uint64]interface{})
+
+ protoMaxGrpSize := proto.MaxTxGroupSize
+ ret = make([][]transactions.SignedTxn, 0, N)
+ for u := 0; u < N; u++ {
+ grpSize := rand.Intn(protoMaxGrpSize-1) + 1
+ if grpSize > maxGroupSize {
+ grpSize = maxGroupSize
+ }
+ var txGroup transactions.TxGroup
+ txns := make([]transactions.Transaction, 0, grpSize)
+ for g := 0; g < grpSize; g++ {
+ // generate transactions
+ noteField := make([]byte, binary.MaxVarintLen64)
+ binary.PutUvarint(noteField, uint64(u))
+ tx := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addresses[(u+g)%numUsers],
+ Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2},
+ FirstValid: 0,
+ LastValid: basics.Round(proto.MaxTxnLife),
+ GenesisHash: genesisHash,
+ Note: noteField,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addresses[(u+g+1)%numUsers],
+ Amount: basics.MicroAlgos{Raw: mockBalancesMinBalance + (rand.Uint64() % 10000)},
+ },
+ }
+ txGroup.TxGroupHashes = append(txGroup.TxGroupHashes, crypto.Digest(tx.ID()))
+ txns = append(txns, tx)
+ }
+ groupHash := crypto.HashObj(txGroup)
+ signedTxGroup := make([]transactions.SignedTxn, 0, grpSize)
+ for g, txn := range txns {
+ txn.Group = groupHash
+ signedTx := txn.Sign(secrets[(u+g)%numUsers])
+ signedTx.Txn = txn
+ signedTxGroup = append(signedTxGroup, signedTx)
+ }
+ // randomly make bad signatures
+ if rand.Float32() < invalidProb {
+ tinGrp := rand.Intn(grpSize)
+ signedTxGroup[tinGrp].Sig[0] = signedTxGroup[tinGrp].Sig[0] + 1
+ badTxnGroups[uint64(u)] = struct{}{}
+ }
+ ret = append(ret, signedTxGroup)
+ }
+ return
+}
+
+// BenchmarkHandler sends singed transactions the the verifier
+func BenchmarkHandleTxns(b *testing.B) {
+ b.N = b.N * proto.MaxTxGroupSize / 2
+ runHandlerBenchmark(1, b)
+}
+
+// BenchmarkHandler sends singed transaction groups to the verifier
+func BenchmarkHandleTxnGroups(b *testing.B) {
+ runHandlerBenchmark(proto.MaxTxGroupSize, b)
+}
+
+// runHandlerBenchmark has a similar workflow to incomingTxHandlerProcessing,
+// but bypasses the backlog, and sends the transactions directly to the verifier
+func runHandlerBenchmark(maxGroupSize int, b *testing.B) {
+ const numUsers = 100
+ log := logging.TestingLog(b)
+ log.SetLevel(logging.Warn)
+ addresses := make([]basics.Address, numUsers)
+ secrets := make([]*crypto.SignatureSecrets, numUsers)
+
+ // prepare the accounts
+ genesis := make(map[basics.Address]basics.AccountData)
+ for i := 0; i < numUsers; i++ {
+ secret := keypair()
+ addr := basics.Address(secret.SignatureVerifier)
+ secrets[i] = secret
+ addresses[i] = addr
+ genesis[addr] = basics.AccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: 10000000000000},
+ }
+ }
+ genesis[poolAddr] = basics.AccountData{
+ Status: basics.NotParticipating,
+ MicroAlgos: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinBalance},
+ }
+
+ require.Equal(b, len(genesis), numUsers+1)
+ genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ ledgerName := fmt.Sprintf("%s-mem-%d", b.Name(), b.N)
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg)
+ require.NoError(b, err)
+
+ l := ledger
+ tp := pools.MakeTransactionPool(l.Ledger, cfg, logging.Base())
+ backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
+ handler := MakeTxHandler(tp, l, &mocks.MockNetwork{}, "", crypto.Digest{}, backlogPool)
+ defer handler.ctxCancel()
+
+ // Prepare the transactions
+ signedTransactionGroups, badTxnGroups := makeSignedTxnGroups(b.N, numUsers, maxGroupSize, 0.001, addresses, secrets)
+ outChan := handler.postVerificationQueue
+ wg := sync.WaitGroup{}
+
+ var tt time.Time
+ // Process the results and make sure they are correct
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ groupCounter := 0
+ var txnCounter uint64
+ invalidCounter := 0
+ for wi := range outChan {
+ txnCounter = txnCounter + uint64(len(wi.unverifiedTxGroup))
+ groupCounter++
+ u, _ := binary.Uvarint(wi.unverifiedTxGroup[0].Txn.Note)
+ _, inBad := badTxnGroups[u]
+ if wi.verificationErr == nil {
+ require.False(b, inBad, "No error for invalid signature")
+ } else {
+ invalidCounter++
+ require.True(b, inBad, "Error for good signature")
+ }
+ }
+ if txnCounter > 0 {
+ b.Logf("TPS: %d\n", uint64(txnCounter)*1000000000/uint64(time.Since(tt)))
+ b.Logf("Time/txn: %d(microsec)\n", uint64((time.Since(tt)/time.Microsecond))/txnCounter)
+ b.Logf("processed total: [%d groups (%d invalid)] [%d txns]\n", groupCounter, invalidCounter, txnCounter)
+ }
+ }()
+
+ b.ResetTimer()
+ tt = time.Now()
+ for _, stxngrp := range signedTransactionGroups {
+ blm := txBacklogMsg{rawmsg: nil, unverifiedTxGroup: stxngrp}
+ handler.txVerificationPool.EnqueueBacklog(handler.ctx, handler.asyncVerifySignature, &blm, nil)
+ }
+ // shut down to end the test
+ handler.txVerificationPool.Shutdown()
+ close(handler.postVerificationQueue)
+ close(handler.backlogQueue)
+ wg.Wait()
+}
diff --git a/data/txntest/txn.go b/data/txntest/txn.go
index 988753a0d..cab0ded5c 100644
--- a/data/txntest/txn.go
+++ b/data/txntest/txn.go
@@ -95,6 +95,7 @@ type Txn struct {
Accounts []basics.Address
ForeignApps []basics.AppIndex
ForeignAssets []basics.AssetIndex
+ Boxes []transactions.BoxRef
LocalStateSchema basics.StateSchema
GlobalStateSchema basics.StateSchema
ApprovalProgram interface{} // string, nil, or []bytes if already compiled
@@ -106,12 +107,50 @@ type Txn struct {
StateProofMsg stateproofmsg.Message
}
+// internalCopy "finishes" a shallow copy done by a simple Go assignment by
+// copying all of the slice fields
+func (tx *Txn) internalCopy() {
+ tx.Note = append([]byte(nil), tx.Note...)
+ if tx.ApplicationArgs != nil {
+ tx.ApplicationArgs = append([][]byte(nil), tx.ApplicationArgs...)
+ for i := range tx.ApplicationArgs {
+ tx.ApplicationArgs[i] = append([]byte(nil), tx.ApplicationArgs[i]...)
+ }
+ }
+ tx.Accounts = append([]basics.Address(nil), tx.Accounts...)
+ tx.ForeignApps = append([]basics.AppIndex(nil), tx.ForeignApps...)
+ tx.ForeignAssets = append([]basics.AssetIndex(nil), tx.ForeignAssets...)
+ tx.Boxes = append([]transactions.BoxRef(nil), tx.Boxes...)
+ for i := 0; i < len(tx.Boxes); i++ {
+ tx.Boxes[i].Name = append([]byte(nil), tx.Boxes[i].Name...)
+ }
+
+ // Programs may or may not actually be byte slices. The other
+ // possibilitiues don't require copies.
+ if program, ok := tx.ApprovalProgram.([]byte); ok {
+ tx.ApprovalProgram = append([]byte(nil), program...)
+ }
+ if program, ok := tx.ClearStateProgram.([]byte); ok {
+ tx.ClearStateProgram = append([]byte(nil), program...)
+ }
+}
+
// Noted returns a new Txn with the given note field.
-func (tx *Txn) Noted(note string) *Txn {
- copy := &Txn{}
- *copy = *tx
- copy.Note = []byte(note)
- return copy
+func (tx Txn) Noted(note string) *Txn {
+ tx.internalCopy()
+ tx.Note = []byte(note)
+ return &tx
+}
+
+// Args returns a new Txn with the given strings as app args
+func (tx Txn) Args(strings ...string) *Txn {
+ tx.internalCopy()
+ bytes := make([][]byte, len(strings))
+ for i, s := range strings {
+ bytes[i] = []byte(s)
+ }
+ tx.ApplicationArgs = bytes
+ return &tx
}
// FillDefaults populates some obvious defaults from config params,
@@ -235,6 +274,7 @@ func (tx Txn) Txn() transactions.Transaction {
Accounts: tx.Accounts,
ForeignApps: tx.ForeignApps,
ForeignAssets: tx.ForeignAssets,
+ Boxes: tx.Boxes,
LocalStateSchema: tx.LocalStateSchema,
GlobalStateSchema: tx.GlobalStateSchema,
ApprovalProgram: assemble(tx.ApprovalProgram),
diff --git a/docker/releases/build_releases.sh b/docker/releases/build_releases.sh
index 01d5f5372..1862c7ae9 100755
--- a/docker/releases/build_releases.sh
+++ b/docker/releases/build_releases.sh
@@ -78,7 +78,7 @@ esac
IFS='' read -r -d '' DOCKERFILE <<EOF
FROM ubuntu
-RUN apt-get update && apt-get install -y ca-certificates curl --no-install-recommends && \
+RUN apt-get update && apt-get -y dist-upgrade && apt-get install -y ca-certificates curl --no-install-recommends && \
curl --silent -L https://github.com/algorand/go-algorand-doc/blob/master/downloads/installers/linux_amd64/install_master_linux-amd64.tar.gz?raw=true | tar xzf - && \
./update.sh -c $CHANNEL -n -p ~/node -d ~/node/data -i -g $NETWORK
WORKDIR /root/node
diff --git a/gen/generate.go b/gen/generate.go
index 15eb09103..9274da420 100644
--- a/gen/generate.go
+++ b/gen/generate.go
@@ -149,14 +149,28 @@ func GenerateGenesisFiles(genesisData GenesisData, consensus config.ConsensusPro
return fmt.Errorf("couldn't make output directory '%s': %v", outDir, err.Error())
}
- return generateGenesisFiles(outDir, proto, consensusParams, genesisData.NetworkName, genesisData.VersionModifier, allocation, genesisData.FirstPartKeyRound, genesisData.LastPartKeyRound, genesisData.PartKeyDilution, genesisData.FeeSink, genesisData.RewardsPool, genesisData.Comment, genesisData.DevMode, verboseOut)
+ return generateGenesisFiles(
+ proto, consensusParams, allocation, genesisData, outDir, verboseOut,
+ )
}
-func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion, protoParams config.ConsensusParams, netName string, schemaVersionModifier string,
- allocation []genesisAllocation, firstWalletValid uint64, lastWalletValid uint64, partKeyDilution uint64, feeSink, rewardsPool basics.Address, comment string, devmode bool, verboseOut io.Writer) (err error) {
+func generateGenesisFiles(protoVersion protocol.ConsensusVersion, protoParams config.ConsensusParams, allocation []genesisAllocation, genData GenesisData, outDir string, verboseOut io.Writer) (err error) {
- genesisAddrs := make(map[string]basics.Address)
- records := make(map[string]basics.AccountData)
+ var (
+ netName = genData.NetworkName
+ schemaVersionModifier = genData.VersionModifier
+ firstWalletValid = genData.FirstPartKeyRound
+ lastWalletValid = genData.LastPartKeyRound
+ partKeyDilution = genData.PartKeyDilution
+ feeSink = genData.FeeSink
+ rewardsPool = genData.RewardsPool
+ devmode = genData.DevMode
+ rewardsBalance = genData.RewardsPoolBalance
+ comment = genData.Comment
+
+ genesisAddrs = make(map[string]basics.Address)
+ records = make(map[string]basics.AccountData)
+ )
if partKeyDilution == 0 {
partKeyDilution = protoParams.DefaultKeyDilution
@@ -326,24 +340,27 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion,
fmt.Fprintln(verboseOut, protoVersion, protoParams.MinBalance)
}
+ if rewardsBalance < protoParams.MinBalance {
+ // Needs to at least have min balance
+ rewardsBalance = protoParams.MinBalance
+ }
+
records["FeeSink"] = basics.AccountData{
Status: basics.NotParticipating,
MicroAlgos: basics.MicroAlgos{Raw: protoParams.MinBalance},
}
+
records["RewardsPool"] = basics.AccountData{
Status: basics.NotParticipating,
- MicroAlgos: basics.MicroAlgos{Raw: defaultIncentivePoolBalanceAtInception},
+ MicroAlgos: basics.MicroAlgos{Raw: rewardsBalance},
}
+ // Add FeeSink and RewardsPool to allocation slice to be handled with other allocations.
sinkAcct := genesisAllocation{
- Name: "FeeSink",
- Stake: protoParams.MinBalance,
- Online: basics.NotParticipating,
+ Name: "FeeSink",
}
poolAcct := genesisAllocation{
- Name: "RewardsPool",
- Stake: defaultIncentivePoolBalanceAtInception,
- Online: basics.NotParticipating,
+ Name: "RewardsPool",
}
alloc2 := make([]genesisAllocation, 0, len(allocation)+2)
diff --git a/gen/generate_test.go b/gen/generate_test.go
index fbffe9467..fe618cfec 100644
--- a/gen/generate_test.go
+++ b/gen/generate_test.go
@@ -17,19 +17,25 @@
package gen
import (
+ "encoding/json"
"fmt"
+ "io"
+ "os"
"path/filepath"
"strings"
"sync"
"testing"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/util/db"
-
"github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/util/db"
)
func TestLoadMultiRootKeyConcurrent(t *testing.T) {
@@ -114,3 +120,116 @@ func TestGenesisRoundoff(t *testing.T) {
require.NoError(t, err)
require.True(t, strings.Contains(verbosity.String(), "roundoff"))
}
+
+// `TestGenesisJsonCreation` defends against regressions to `genesis.json` generation by comparing a known, valid `genesis.json` against a version generated during test invocation.
+//
+// * For each `testCase`, there is a corresponding `genesis.json` in `gen/resources` representing the known, valid output.
+// * When adding test cases, it's assumed folks peer review new artifacts in `gen/resources`.
+// * Since _some_ `genesis.json` values are non-deterministic, the test replaces these values with static values to facilitate equality checks.
+func TestGenesisJsonCreation(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ type testCase struct {
+ name string
+ gd GenesisData
+ }
+
+ // `base` is a canonical test confirming `devnet.json` generates the intended `genesis.json`.
+ base := func() testCase {
+ jsonBytes, err := os.ReadFile("devnet.json")
+ require.NoError(t, err)
+
+ gd := DefaultGenesis
+ err = json.Unmarshal(jsonBytes, &gd)
+ require.NoError(t, err)
+
+ return testCase{"base", gd}
+ }
+
+ // `balance` extends `base` to confirm overriding the rewards pool balance works.
+ balance := func() testCase {
+ gd := base().gd
+ gd.RewardsPoolBalance = 0 // Expect generated balance == MinBalance
+ return testCase{"balance", gd}
+ }
+
+ // `blotOutRandomValues` replaces random values with static values to support equality checks.
+ blotOutRandomValues := func(as []bookkeeping.GenesisAllocation) {
+ deterministicAddresses := []string{"FeeSink", "RewardsPool"}
+
+ isNondeterministicAddress := func(name string) bool {
+ for _, address := range deterministicAddresses {
+ if name == address {
+ return false
+ }
+ }
+ return true
+ }
+
+ for i := range as {
+ require.Len(t, as[i].State.VoteID, 32)
+ as[i].State.VoteID = crypto.OneTimeSignatureVerifier{}
+ require.Len(t, as[i].State.VoteID, 32)
+ as[i].State.SelectionID = crypto.VRFVerifier{}
+
+ if isNondeterministicAddress(as[i].Comment) {
+ require.Len(t, as[i].Address, 58)
+ as[i].Address = ""
+ }
+ }
+ }
+
+ saveGeneratedGenesisJSON := func(filename, artifactName string) {
+ src, err := os.Open(filename)
+ require.NoError(t, err)
+ defer src.Close()
+
+ dst, err := os.CreateTemp("", "*-"+artifactName)
+ require.NoError(t, err)
+ defer dst.Close()
+
+ _, err = io.Copy(dst, src)
+ require.NoError(t, err)
+
+ t.Log("generated genesis.json = " + dst.Name())
+ }
+
+ // Since `t.TempDir` deletes the generated dir, retain generated `genesis.json` on test failure.
+ saveOnFailure := func(result bool, generatedFilename, artifactName string) {
+ if !result {
+ saveGeneratedGenesisJSON(generatedFilename, artifactName)
+ t.FailNow()
+ }
+ }
+
+ for _, tc := range []testCase{
+ base(),
+ balance(),
+ } {
+ t.Run(fmt.Sprintf("name=%v", tc.name), func(t *testing.T) {
+ gd := tc.gd
+ gd.LastPartKeyRound = 10 // Ensure quick test execution by reducing rounds.
+
+ outDir := t.TempDir()
+ err := GenerateGenesisFiles(gd, config.Consensus, outDir, nil)
+ require.NoError(t, err)
+
+ artifactName := fmt.Sprintf("genesis-%v.json", tc.name)
+ generatedFilename := fmt.Sprintf("%v/genesis.json", outDir)
+ saveOnFailure := func(result bool) {
+ saveOnFailure(result, generatedFilename, artifactName)
+ }
+
+ roundtrip, err := bookkeeping.LoadGenesisFromFile(generatedFilename)
+ require.NoError(t, err)
+
+ expected, err := bookkeeping.LoadGenesisFromFile("resources/" + artifactName)
+ saveOnFailure(assert.NoError(t, err))
+
+ blotOutRandomValues(expected.Allocation)
+ blotOutRandomValues(roundtrip.Allocation)
+ saveOnFailure(assert.Equal(t, expected, roundtrip))
+ })
+ }
+}
diff --git a/gen/resources/genesis-balance.json b/gen/resources/genesis-balance.json
new file mode 100644
index 000000000..53fc497b7
--- /dev/null
+++ b/gen/resources/genesis-balance.json
@@ -0,0 +1,290 @@
+{
+ "alloc": [
+ {
+ "addr": "7777777777777777777777777777777777777777777777777774MSJUVU",
+ "comment": "RewardsPool",
+ "state": {
+ "algo": 100000,
+ "onl": 2
+ }
+ },
+ {
+ "addr": "A7NMWS3NT3IUDMLVO26ULGXGIIOUQ3ND2TXSER6EBGRZNOBOUIQXHIBGDE",
+ "comment": "FeeSink",
+ "state": {
+ "algo": 100000,
+ "onl": 2
+ }
+ },
+ {
+ "addr": "LL5I5MBVV6LU26ENXVZ733A3IKUUTYMG6ZRCJB7G4GZIPSVFPHCCK2YKME",
+ "comment": "Wallet1",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "Tk6dVeLp2jkpR4GqTkUqmg4b7wwkgYshXpQ6FvWkJbQ=",
+ "vote": "DMXQB3LRyCznSDwFY7QhG+v6vrhaRR5DmcVBkiojGAw=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "LMSUTQYZ6PVSFF2UC2Y5BQQ6BPZPEEP4TJDZZAFGWFD7NR6JUIWYOBB7FA",
+ "comment": "Wallet10",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "FGmOjAOiA5WS36RbQqgW0R8+/7hNhr4d01w57E2Rj98=",
+ "vote": "rwRNmKNCR21GR7fGx0JscZFxAqDDntddmGPBrcPM3uM=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "IRLTNUT6N4RJPWHKHTGEFCC7XZPYXWUEQ3KPQDIPDOM4QCHMERLDCKB5BA",
+ "comment": "Wallet11",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "o3HGYpY+Cgon0G8h+LXt+x51iblyDAjU+UVh3i83QE0=",
+ "vote": "OisiREqsWPCCp/DQAAv/zv3t1cZuk9/EpHFTge45n9g=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "BL6N224XD2AO3UFAL3FYT4XV6TT225THQN2WZEVK2SZ2EN65MOWKOUHPCE",
+ "comment": "Wallet12",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "9t88h4TvAKqMIlRoLW9lfEIUZHkkeTeZNkxhf6cwBk4=",
+ "vote": "M2t4hO2Oe2cM0luPVQQjKFJUdUAve//NJLu31+AjSf8=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "7QAXF4XTPDNNP7IQXY2GAPU54SWOAEYN3KFNYZNYIT2TGTIATQYVYX34EE",
+ "comment": "Wallet13",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "EDectK5ACzkRBdTaK2jTJ3p2LRWVdIr2yqhw7vgLBAQ=",
+ "vote": "bD/yb+z+7TenZqd0G950WdDibXWcnH+5tLAUx6UWG28=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "EMOKFY3CUL5N63QL3K35AED5V23GLHFOCK247DW352NJQ2L7RX67TXBKLI",
+ "comment": "Wallet14",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "gFAz62RY4LPBD2TYmkKkm3RM0sE7kDH7XsA2nTjEMxU=",
+ "vote": "ZgVdTpahN5PgVcTsmNoLW8clsoHne2nfXnx7+jHbAeI=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "VHPOCATOURTR5Q4CCCSHFMDED2DJNQOSHIK5ZYRTWWXQ2NQHIFVVKC7IGM",
+ "comment": "Wallet15",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "faGqWez33n/hffAoQb1Mhiveb4SraHFUaXeBSTbDvtY=",
+ "vote": "i77hs2kcwF02SElbT7Bz9Pf75IYgF5nhqxhd8nQZ1y8=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "ZRTQWKBBNEGRYFDGVLVSI3FJHKIBQ7K527IWKXH75VTFCXMZVZGD4XA75E",
+ "comment": "Wallet16",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "0lQOFHTrORZmsJaFi3VbWtQM3eqIQuYtSiWm4UFI0/I=",
+ "vote": "mNQRvebWl6fHaO2icPtI4jv62UuaZXRFlz6PUvSXLjI=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "MFS6B3P3R7FOKMAU4FLMMMOKQVWTZCFYKEPE6SW66A5OJDWFFRFPMSO6AU",
+ "comment": "Wallet17",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "XKWnu0OXX1IaFYpm+IB00UXN83ap2d3uJqoSygqQyZE=",
+ "vote": "owK+kZlEr0VuFJZJMslXwpWHbnOdvRxQWLB3N4jpRt0=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "OLUNBJRRR2N4RSLIYNKAQNJIW7FK7M23AOPMIHER6RECHMGUINAV2WAXYA",
+ "comment": "Wallet18",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "c0Fku3FF4VxiDPxdTJ30+aoR9UdPkQ/iDV0DiKLjCh0=",
+ "vote": "eNwVkHYORjGE1Qme1D9o842cgFAn4nidryN35CgK78s=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "7VQYT4PAQBABDR3QODW5MUB3RU6MQ5MIWVIUR2HN3CSPA5H64TVQY4NT64",
+ "comment": "Wallet19",
+ "state": {
+ "algo": 400000000000000,
+ "onl": 1,
+ "sel": "TlelrGjnKdubOuRRMo2Mum2uUAP6UkG2ANDD+BZKOTo=",
+ "vote": "LWlAr9lRXcolQ9h4fX2DTw3LU8/KI6Eix6tJ+o8fsho=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "BSJ5G7YEOVTEKKDJZZKULAJPBH26SPIV4RJLV5AUSYZ36ELRN3JHZTZX2E",
+ "comment": "Wallet2",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "D3aE+VywLz4HA8NPX7mEB3m33FOer4L2ZBRa0qT/1fI=",
+ "vote": "ITColZ7Roe/p+qQXX8yk6FKibhN6sShNWFdvgDk3kGY=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "ETMCXHLEI7KOTMPVIOZAAPMQGQTST3CPZP4YG25HBCFB6XY3IIGH3YYUSE",
+ "comment": "Wallet20",
+ "state": {
+ "algo": 400000000000000,
+ "onl": 1,
+ "sel": "4Hg5XgcvEQsPiI7eaNE3hZgKZIvBl93CCKZZ8GZIDps=",
+ "vote": "HOecVUnX8xe86+Rrt237Z+jXFILhBM2I7GvyxCWxpbY=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "TQYRDSG5BS5XR6USGROXP5DSUAHJHKUTE5GBLENHBGWKAK2YNLEANQ7ELM",
+ "comment": "Wallet3",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "hOfLlBV0eDcF1lV7cUxMfo7dKBmCBflrgNJ1NxDv1KQ=",
+ "vote": "AgpySx7yp2177QjUueEJ+HN1xpQTW8Uf6sTu6lCHq2w=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "BOKSOEFFJE4RUPFMZHPQRJBEORY3W5BJNR5YUEYGOWKN6IZP4XLMHPLBCQ",
+ "comment": "Wallet4",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "arO4hIghNSwWvmr+hNHItYFniImOiCjGo4IwPDEBENg=",
+ "vote": "QLeClCU8nw2hHPA9YnrhWSlZ0Nc5awyk5WI/RhipGrU=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "VIMND5G6N6BYICGLXPBBKJNOO36K22HHUDV4KCKOS2STO225AKKGWNLOOM",
+ "comment": "Wallet5",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "sTAHT53hHEAMC/NXj+N6sAXvBrJzIU3QeIiD8YdyHN8=",
+ "vote": "qXu3+DUAPs71xZtugIxYOOFh4pxQ1zOD/wDvACmNAOo=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "6GQNPBPISA6YUSLIHCKFGNARGA36PPFEI3G77MVFZRLBB5T7ENOFLUIOM4",
+ "comment": "Wallet6",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "DVqDBjuBNwqDgn/Srl0M0iAhY4F7OaYlPM6Mksqag7s=",
+ "vote": "wwy76l2W220L/T7NxYu0RwkCtgZopwAby7+Ufo3FroE=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "HLBHM4N2YBRERHGBRMSBJ2H374X3UGLPB3EFGQ42P5HJ3AGOZPBZMNLHWI",
+ "comment": "Wallet7",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "HLNbmSEJe1ToTIT6QJ7xymrJlC9DPX00Ebw3MMyKb9Q=",
+ "vote": "D4+2cKNIhuAv0Rum9x5Tw5RT1/SsmuPpuxB8eQ4OrS8=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "M5O226M5GLGRHVQ3CWFFZBFUB4GPLK7RPSCGU25VY2Z5STSQI6D7LLXCQE",
+ "comment": "Wallet8",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "HgmAAyQuxjdD5dYW6QORoKL756+8PLfFckbtEtd6Qms=",
+ "vote": "+2NoVf0UHCM6+xf2crgh03vXHW4/rvJbvV1kgU6eb8A=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "66UUTNSIJMQJRM7GPNBDBNWQSLPDRX5MFRZYZDDILCATHB6EWFZVJEGC5M",
+ "comment": "Wallet9",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "74oS0XpjjqpTB+6nKj0bMl4ZMHN5HLvpSCnczSfkX0Q=",
+ "vote": "BWTZ9sJJVzjo20iFWBeCTrc1nxETWHP1IBAzjR+TtC8=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "HHDJTAQXM35CDYBVD5YB3ZYXZ3SA2G7EPX377CLYWSA4OAQTQ3JLS7GJJQ",
+ "comment": "bank",
+ "state": {
+ "algo": 100000000000000,
+ "onl": 1,
+ "sel": "UMIH8ldIewQ9K0dKs1iWIwLBNsS2+9GEaZh7/wkvPmI=",
+ "vote": "MSgY48yyvE+XHdeJGj7Muh17rn+JqNt0NMtzOBCM4yU=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "IVRXBBY53IKQW4I5M7CYRCE6W37TYIB4IIR73KBDWTROMVAOGPT5K75E3Y",
+ "comment": "pp1",
+ "state": {
+ "algo": 50000000000000
+ }
+ },
+ {
+ "addr": "UDMWNUYTV4AU6Y76RCNHMTWVZ6QYACYFJP4PAZNBMRWRFZKRNE6V7SBG7Q",
+ "comment": "pp2",
+ "state": {
+ "algo": 50000000000000
+ }
+ }
+ ],
+ "fees": "A7NMWS3NT3IUDMLVO26ULGXGIIOUQ3ND2TXSER6EBGRZNOBOUIQXHIBGDE",
+ "id": "v1.0",
+ "proto": "https://github.com/algorand/spec/tree/a26ed78ed8f834e2b9ccb6eb7d3ee9f629a6e622",
+ "rwd": "7777777777777777777777777777777777777777777777777774MSJUVU"
+}
diff --git a/gen/resources/genesis-base.json b/gen/resources/genesis-base.json
new file mode 100644
index 000000000..16cef5b98
--- /dev/null
+++ b/gen/resources/genesis-base.json
@@ -0,0 +1,290 @@
+{
+ "alloc": [
+ {
+ "addr": "7777777777777777777777777777777777777777777777777774MSJUVU",
+ "comment": "RewardsPool",
+ "state": {
+ "algo": 125000000000000,
+ "onl": 2
+ }
+ },
+ {
+ "addr": "A7NMWS3NT3IUDMLVO26ULGXGIIOUQ3ND2TXSER6EBGRZNOBOUIQXHIBGDE",
+ "comment": "FeeSink",
+ "state": {
+ "algo": 100000,
+ "onl": 2
+ }
+ },
+ {
+ "addr": "5P7SSF6IRMBZKQBH4KJLZMKNK3VB5SHK3ZEUJAUD4NODLLXJLOCJPY6GWA",
+ "comment": "Wallet1",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "R3ZILpY0ZOgcRbb3spZyXBtwt4Q9oeP4JkGmLWvp1wM=",
+ "vote": "fjzsjb8BcM4KhDPQyf7qxisv8mWzb6t+nv9eZnPbq3w=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "23PVZMF53OVWQ5YRJDG75P774AAQDOWMGT2YRRO3ZVCAMV7P6WQDUHTHSY",
+ "comment": "Wallet10",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "M/NhvAm8FfFxgcEynZ2XyiedqA0ZXsYPpQAT9j8UStg=",
+ "vote": "3QrVWdEk/JU5+W3KsOqZQWwqBP+3syw3jWkra5UlRyY=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "5GWNQULH2QB7IAMX7XAIYU6VQT7S4JTZZE2MVCO7HNWGFHM6GTGUCFVEIY",
+ "comment": "Wallet11",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "lEHWhsPoBlboeK+m9ASMV+eHe6aMWFr2wDFtW7BhZps=",
+ "vote": "m9hkZTh3JZtipCgRMQlxvjLHEBlPWI5rzi0a62nYmn4=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "MXEYXXM74MXMOJ7VCDDHYUIDELZBLETSB6ZUCR2K4OLCFKYPVBTJ7Q3WXA",
+ "comment": "Wallet12",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "dRgwg+/Q0WhqJu4pzVmFN15czLEwpzGbFHnqsGktRjk=",
+ "vote": "idEmvipGvNuebDonCSLcFJhaxjpft/1MZqxGzM0MOnY=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "MDPD6UI4D7CRJO2QP3OA4GJAZXMGE5LY5XV2FQ36RJ5MKKRLNT3NPXFM4Y",
+ "comment": "Wallet13",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "teFS7YWTutxUvxSWXHzuS8wwkX5ueR9CEanNR0IhsHM=",
+ "vote": "FpRYYVxA2I72a0m3+bJ+4fzD+wcsgRbMXzPrX+ihZKo=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "NW3ARRUBZDSTXH5FXI54LZUATS5QHYZGQHONXJFZWUQCU6EYVM57KQCSGU",
+ "comment": "Wallet14",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "3vwiEtd2C0wm89P9vex9nud8nM4rdgCd8OAT7SUWfpA=",
+ "vote": "dXoHsQvvUY6HiRIR2JUaTFR21RROq+tzA3ApYL8lw7I=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "54Q7QENDL6GRX7XOBONV3AY2OHD62IGQPEFNUCKL4IH6XD2HGSJETIR22Y",
+ "comment": "Wallet15",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "b0hfKTC81csTjO/x21E6w4OpDlcVNCZBWky0gwpo2qo=",
+ "vote": "T9UBwWaOpUnq4azO1yyaH8rTHWXzfanIHib0RxI9N+c=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "DU4LJRJKBIIREB7CKKWCOUP6NHUM5HKKVYH6LU2ZE63D67PZRL7VSHNADI",
+ "comment": "Wallet16",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "JIdWGPjyKJZCfC5Y+IYxqC1oDEa2pXePQ3TSBQ3v+K0=",
+ "vote": "nYIVrFrNAf4lWvt1RiLR2Nv/EzaxH5us/7prSQqbFgM=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "Z745PXKLO7EJOBMUYKMZQXZJQD2NOI2ZC5CUUVZYQDL6S5FCMG2RTDR43U",
+ "comment": "Wallet17",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "29+iADCFdA+wltMu0ZgCPzAogbbp5kr5zRRJteB2mdE=",
+ "vote": "vzXs2Ebue388Ya3vVgEZ2JX6IJaZuIn2MIcl1N1T0lk=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "PA2FWPXRLKGKDRTP5JFYWWM3PBE77SINTRZ5BQCCKAK363TMMVFUJOM3OA",
+ "comment": "Wallet18",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "fKYC8BIRXI/4s0E52wDYd0jSiOgvStdA+8Vr7fTh0TA=",
+ "vote": "+jaGxdKU5PpxQxp7XRCYhc0Oss6josumZW7GNhl9Kkg=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "R24KUNIHRTSDYEWVJVJMHJ2IYE3NXBTSE3NQXOHPG5ULJJ4YEP6DXLWIQM",
+ "comment": "Wallet19",
+ "state": {
+ "algo": 400000000000000,
+ "onl": 1,
+ "sel": "J1yWsrjAlqi0/HXQ++yK03+iFuPPu9q0HZVWp2V5gjU=",
+ "vote": "db7v7DZeGSeI9t3eXXoO0DxMghAGmidVh3bGBOeqcC8=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "WLEX5HDAKMEA35NKY2QUXFXIFIKMOF6PHLMNDD25KLNNTPXRRO7TPAP6SQ",
+ "comment": "Wallet2",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "AYda5HHpJpFnpbQ/oeqvc9eSKPccHldGczvqwsJzWiE=",
+ "vote": "HjXz+GAo7yqeKRsOB+RQNr2V5vjwS/bMynMTr37T8D8=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "XSQO4QZQZLFSAM7GOGYIOFH3EKTYC3WVO3CND7V4QQ7KXOQBD2OYRCCARE",
+ "comment": "Wallet20",
+ "state": {
+ "algo": 400000000000000,
+ "onl": 1,
+ "sel": "joC7f9011hnDVa87WsaRSD/+Z8HyJ6tPT8NCQJJKNQ4=",
+ "vote": "BRVqGnSjJZnBM2MdHs3YkApVB9iR6nd6VVTx2kvPQjw=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "KE2HQAZHGJDKZUKRBDBQTBQOSVSCDROUC7UFJZ27KTNHBKON6GKFBDD5MY",
+ "comment": "Wallet3",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "lb/5w4Q/6KE3g2+KeRc4GrslWubT/2QkKQX9pJqBwWA=",
+ "vote": "gs5MFqUUn/IDBb0kb4VJiv6gIOUVIcSSmpdaLteAKmw=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "7SCSRGS3YLKORJLJI6AGJOBEQYOVEMCPF52F3TK6KZZRZVZQCKCFGOGWIY",
+ "comment": "Wallet4",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "ImC3tocbWRoTdd90owSbskgsvnNpyFW1c79CNJl2mjU=",
+ "vote": "xFrT7tp0Yth3j39XJJKtKxxMaXDiFRbXtzoOjQnavFY=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "2FCYNSOOK6HGVK4ZJZ3ROAV6S3Y6KYY5467BY7JQEVTT75RNSIM3D6WK5M",
+ "comment": "Wallet5",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "aEKSz4Hqi6JY82890qxqAUhkiQ2YvDYNrQBjq6UOgwU=",
+ "vote": "Gxyf2lPDSVhLrfUOtS6vS0fvrV9g22XHR/uUl7uzzsw=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "6SUHM22SBOZ4O4E7VQIB2FWHGCFLBCDOZAQ6AWXV3CTEDVB45UZGX2GZ2U",
+ "comment": "Wallet6",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "OXOkW9CbraU2UM1IjDkkpS3zGlBCjm0SHc3mFgZrUdE=",
+ "vote": "ssh7jRwaim+vTO4W9F2tw+j5BoFOqTTTy1DOZfqIXwY=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "H2A6ALQ6U7P2JSABWPRLG3IRRKH3AAUJFZXLI7YHCBZ7K6DOTHWBVGFIZM",
+ "comment": "Wallet7",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "B9rVwMkp9YAbdOz3vOK2K7UyDBsHBGeHUEcErioligA=",
+ "vote": "MyheCHyiz464tL3rdZkztIl/fx7ARYPvEv8xuFozCfM=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "5ZBBKATPM6HM56TOGQOSPFMIOVUZJJAZAAUATNPVCB6SKQYVIMRUOC454Q",
+ "comment": "Wallet8",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "g/995Lmd4y+IhbDCD6CXLyuIMs+GhFbXbmvVSCwG9RU=",
+ "vote": "SJ0Hj+Xau09OB97nfbktnTnA5K4MqRzQCwDR1BN4R+8=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "UPL2Q6OLQHLAZWXBEP7DTN3225ISOJ45DACXEDCNJ2UBCPR7EWUCPMW3KE",
+ "comment": "Wallet9",
+ "state": {
+ "algo": 500000000000000,
+ "onl": 1,
+ "sel": "X8Df4E1SSkCEoWYfDOGMPTSoecc6FaHTzDRNYpOVL7I=",
+ "vote": "ZUkY2tLQ3XmcG6aVWjJLdJvWjWfoPI7EliAq/ZYaIUg=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "N6TENNVOWAILNAZIOT3RFX6C4DPC2HEYOQD5KBBGFDE77FUNZU7NMWBIKA",
+ "comment": "bank",
+ "state": {
+ "algo": 100000000000000,
+ "onl": 1,
+ "sel": "vBIkGS3JImg7DVRGr6L3ZLuzFe9EXSDedvZkHRgAGEQ=",
+ "vote": "NoVimVCKTiIntOa/q9zUcVEN5erCIdD0c5G2eIAzBL4=",
+ "voteKD": 10000,
+ "voteLst": 10
+ }
+ },
+ {
+ "addr": "XHV5PYJARUBZA6EMG6ERYAL4ROXVTLVHMC2W52LDGVOVQNCQOAFNLTG32U",
+ "comment": "pp1",
+ "state": {
+ "algo": 50000000000000
+ }
+ },
+ {
+ "addr": "4EDQJQLPFX5CBWHJJ2GQXKMV6UQC2O553PGZVID6QVB7JZT3XBQ5MCSD2Q",
+ "comment": "pp2",
+ "state": {
+ "algo": 50000000000000
+ }
+ }
+ ],
+ "fees": "A7NMWS3NT3IUDMLVO26ULGXGIIOUQ3ND2TXSER6EBGRZNOBOUIQXHIBGDE",
+ "id": "v1.0",
+ "proto": "https://github.com/algorand/spec/tree/a26ed78ed8f834e2b9ccb6eb7d3ee9f629a6e622",
+ "rwd": "7777777777777777777777777777777777777777777777777774MSJUVU"
+}
diff --git a/gen/walletData.go b/gen/walletData.go
index 95c508c27..7790e9a6b 100644
--- a/gen/walletData.go
+++ b/gen/walletData.go
@@ -27,8 +27,9 @@ import (
// DefaultGenesis should be used as the default initial state for any GenesisData
// instance (because we have no ctors...)
var DefaultGenesis = GenesisData{
- FirstPartKeyRound: 0,
- LastPartKeyRound: 3000000,
+ FirstPartKeyRound: 0,
+ LastPartKeyRound: 3000000,
+ RewardsPoolBalance: defaultIncentivePoolBalanceAtInception,
}
// WalletData represents a wallet's name, percent stake, and initial online status for a genesis.json file
@@ -40,17 +41,18 @@ type WalletData struct {
// GenesisData represents the genesis data for creating a genesis.json and wallets
type GenesisData struct {
- NetworkName string
- VersionModifier string
- ConsensusProtocol protocol.ConsensusVersion
- FirstPartKeyRound uint64
- LastPartKeyRound uint64
- PartKeyDilution uint64
- Wallets []WalletData
- FeeSink basics.Address
- RewardsPool basics.Address
- DevMode bool
- Comment string
+ NetworkName string
+ VersionModifier string
+ ConsensusProtocol protocol.ConsensusVersion
+ FirstPartKeyRound uint64
+ LastPartKeyRound uint64
+ PartKeyDilution uint64
+ Wallets []WalletData
+ FeeSink basics.Address
+ RewardsPool basics.Address
+ RewardsPoolBalance uint64 // Values < `ConsensusParams.MinBalance` are adjusted to `ConsensusParams.MinBalance`
+ DevMode bool
+ Comment string
}
// LoadGenesisData loads a GenesisData structure from a json file
diff --git a/go.mod b/go.mod
index 96ddeb704..a766d1ab4 100644
--- a/go.mod
+++ b/go.mod
@@ -3,13 +3,14 @@ module github.com/algorand/go-algorand
go 1.17
require (
+ github.com/DataDog/zstd v1.5.2
github.com/algorand/avm-abi v0.1.0
github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414
github.com/algorand/go-codec/codec v1.1.8
github.com/algorand/go-deadlock v0.2.2
github.com/algorand/go-sumhash v0.1.0
github.com/algorand/graphtrace v0.1.0
- github.com/algorand/msgp v1.1.52
+ github.com/algorand/msgp v1.1.53
github.com/algorand/oapi-codegen v1.3.7
github.com/algorand/websocket v1.4.5
github.com/aws/aws-sdk-go v1.16.5
@@ -39,8 +40,6 @@ require (
)
require (
- github.com/DataDog/zstd v1.5.2 // indirect
- github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e // indirect
github.com/cpuguy83/go-md2man v1.0.8 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
diff --git a/go.sum b/go.sum
index 548f9d7a0..305e99b7c 100644
--- a/go.sum
+++ b/go.sum
@@ -13,8 +13,8 @@ github.com/algorand/go-sumhash v0.1.0 h1:b/QRhyLuF//vOcicBIxBXYW8bERNoeLxieht/dU
github.com/algorand/go-sumhash v0.1.0/go.mod h1:OOe7jdDWUhLkuP1XytkK5gnLu9entAviN5DfDZh6XAc=
github.com/algorand/graphtrace v0.1.0 h1:QemP1iT0W56SExD0NfiU6rsG34/v0Je6bg5UZnptEUM=
github.com/algorand/graphtrace v0.1.0/go.mod h1:HscLQrzBdH1BH+5oehs3ICd8SYcXvnSL9BjfTu8WHCc=
-github.com/algorand/msgp v1.1.52 h1:Tw2OCCikKy0jaTWEIHwIfvThYHlJf9moviyKw+7PVVM=
-github.com/algorand/msgp v1.1.52/go.mod h1:5K3d58/poT5fPmtiwuQft6GjgSrVEM46KoXdLrID8ZU=
+github.com/algorand/msgp v1.1.53 h1:D6HKLyvLE6ltfsf8Apsrc+kqYb/CcOZEAfh1DpkPrNg=
+github.com/algorand/msgp v1.1.53/go.mod h1:5K3d58/poT5fPmtiwuQft6GjgSrVEM46KoXdLrID8ZU=
github.com/algorand/oapi-codegen v1.3.7 h1:TdXeGljgrnLXSCGPdeY6g6+i/G0Rr5CkjBgUJY6ht48=
github.com/algorand/oapi-codegen v1.3.7/go.mod h1:UvOtAiP3hc0M2GUKBnZVTjLe3HKGDKh6y9rs3e3JyOg=
github.com/algorand/websocket v1.4.5 h1:Cs6UTaCReAl02evYxmN8k57cNHmBILRcspfSxYg4AJE=
@@ -22,7 +22,6 @@ github.com/algorand/websocket v1.4.5/go.mod h1:79n6FSZY08yQagHzE/YWZqTPBYfY5wc3I
github.com/aws/aws-sdk-go v1.16.5 h1:NVxzZXIuwX828VcJrpNxxWjur1tlOBISdMdDdHIKHcc=
github.com/aws/aws-sdk-go v1.16.5/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e h1:CHPYEbz71w8DqJ7DRIq+MXyCQsdibK08vdcQTY4ufas=
-github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e/go.mod h1:6Xhs0ZlsRjXLIiSMLKafbZxML/j30pg9Z1priLuha5s=
github.com/consensys/gnark-crypto v0.7.0 h1:rwdy8+ssmLYRqKp+ryRRgQJl/rCq2uv+n83cOydm5UE=
github.com/consensys/gnark-crypto v0.7.0/go.mod h1:KPSuJzyxkJA8xZ/+CV47tyqkr9MmpZA3PXivK4VPrVg=
github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M=
diff --git a/installer/config.json.example b/installer/config.json.example
index 91b9413d9..84476092e 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -1,5 +1,5 @@
{
- "Version": 24,
+ "Version": 25,
"AccountUpdatesStatsInterval": 5000000000,
"AccountsRebuildSynchronousMode": 1,
"AgreementIncomingBundlesQueueLength": 7,
@@ -49,6 +49,7 @@
"EnableRequestLogger": false,
"EnableRuntimeMetrics": false,
"EnableTopAccountsReporting": false,
+ "EnableUsageLog": false,
"EnableVerbosedTransactionSyncLogging": false,
"EndpointAddress": "127.0.0.1:0",
"FallbackDNSResolverAddress": "",
@@ -63,6 +64,7 @@
"LogArchiveMaxAge": "",
"LogArchiveName": "node.archive.log",
"LogSizeLimit": 1073741824,
+ "MaxAPIBoxPerApplication": 100000,
"MaxAPIResourcesPerAccount": 100000,
"MaxAcctLookback": 4,
"MaxCatchpointDownloadDuration": 7200000000000,
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index 264687b20..5c92758c2 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -23,6 +23,7 @@ import (
"encoding/hex"
"errors"
"fmt"
+ "math"
"strings"
"time"
@@ -50,6 +51,8 @@ type accountsDbQueries struct {
lookupStmt *sql.Stmt
lookupResourcesStmt *sql.Stmt
lookupAllResourcesStmt *sql.Stmt
+ lookupKvPairStmt *sql.Stmt
+ lookupKeysByRangeStmt *sql.Stmt
lookupCreatorStmt *sql.Stmt
}
@@ -129,6 +132,12 @@ var createResourcesTable = []string{
PRIMARY KEY (addrid, aidx) ) WITHOUT ROWID`,
}
+var createBoxTable = []string{
+ `CREATE TABLE IF NOT EXISTS kvstore (
+ key blob primary key,
+ value blob)`,
+}
+
var createOnlineAccountsTable = []string{
`CREATE TABLE IF NOT EXISTS onlineaccounts (
address BLOB NOT NULL,
@@ -168,6 +177,7 @@ var accountsResetExprs = []string{
`DROP TABLE IF EXISTS acctrounds`,
`DROP TABLE IF EXISTS accounttotals`,
`DROP TABLE IF EXISTS accountbase`,
+ `DROP TABLE IF EXISTS kvstore`,
`DROP TABLE IF EXISTS assetcreators`,
`DROP TABLE IF EXISTS storedcatchpoints`,
`DROP TABLE IF EXISTS catchpointstate`,
@@ -183,7 +193,7 @@ var accountsResetExprs = []string{
// accountDBVersion is the database version that this binary would know how to support and how to upgrade to.
// details about the content of each of the versions can be found in the upgrade functions upgradeDatabaseSchemaXXXX
// and their descriptions.
-var accountDBVersion = int32(7)
+var accountDBVersion = int32(8)
// persistedAccountData is used for representing a single account stored on the disk. In addition to the
// basics.AccountData, it also stores complete referencing information used to maintain the base accounts
@@ -262,6 +272,15 @@ func (prd *persistedResourcesData) AccountResource() ledgercore.AccountResource
return ret
}
+//msgp:ignore persistedKVData
+type persistedKVData struct {
+ // kv value
+ value []byte
+ // the round number that is associated with the kv value. This field is the corresponding one to the round field
+ // in persistedAccountData, and serves the same purpose.
+ round basics.Round
+}
+
// resourceDelta is used as part of the compactResourcesDeltas to describe a change to a single resource.
type resourceDelta struct {
oldResource persistedResourcesData
@@ -354,6 +373,14 @@ const (
catchpointStateCatchpointLookback = catchpointState("catchpointLookback")
)
+// MaxEncodedBaseAccountDataSize is a rough estimate for the worst-case scenario we're going to have of the base account data serialized.
+// this number is verified by the TestEncodedBaseAccountDataSize function.
+const MaxEncodedBaseAccountDataSize = 350
+
+// MaxEncodedBaseResourceDataSize is a rough estimate for the worst-case scenario we're going to have of the base resource data serialized.
+// this number is verified by the TestEncodedBaseResourceSize function.
+const MaxEncodedBaseResourceDataSize = 20000
+
// normalizedAccountBalance is a staging area for a catchpoint file account information before it's being added to the catchpoint staging tables.
type normalizedAccountBalance struct {
// The public key address to which the account belongs.
@@ -1096,14 +1123,14 @@ func writeCatchpointStagingCreatable(ctx context.Context, tx *sql.Tx, bals []nor
if resData.IsOwning() {
// determine if it's an asset
if resData.IsAsset() {
- _, err := insertCreatorsStmt.ExecContext(ctx, basics.CreatableIndex(aidx), balance.address[:], basics.AssetCreatable)
+ _, err := insertCreatorsStmt.ExecContext(ctx, aidx, balance.address[:], basics.AssetCreatable)
if err != nil {
return err
}
}
// determine if it's an application
if resData.IsApp() {
- _, err := insertCreatorsStmt.ExecContext(ctx, basics.CreatableIndex(aidx), balance.address[:], basics.AppCreatable)
+ _, err := insertCreatorsStmt.ExecContext(ctx, aidx, balance.address[:], basics.AppCreatable)
if err != nil {
return err
}
@@ -1114,6 +1141,36 @@ func writeCatchpointStagingCreatable(ctx context.Context, tx *sql.Tx, bals []nor
return nil
}
+// writeCatchpointStagingKVs inserts all the KVs in the provided array into the
+// catchpoint kvstore staging table catchpointkvstore, and their hashes to the pending
+func writeCatchpointStagingKVs(ctx context.Context, tx *sql.Tx, kvrs []encodedKVRecordV6) error {
+ insertKV, err := tx.PrepareContext(ctx, "INSERT INTO catchpointkvstore(key, value) VALUES(?, ?)")
+ if err != nil {
+ return err
+ }
+ defer insertKV.Close()
+
+ insertHash, err := tx.PrepareContext(ctx, "INSERT INTO catchpointpendinghashes(data) VALUES(?)")
+ if err != nil {
+ return err
+ }
+ defer insertHash.Close()
+
+ for _, kvr := range kvrs {
+ _, err := insertKV.ExecContext(ctx, kvr.Key, kvr.Value)
+ if err != nil {
+ return err
+ }
+
+ hash := kvHashBuilderV6(string(kvr.Key), kvr.Value)
+ _, err = insertHash.ExecContext(ctx, hash)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func resetCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, newCatchup bool) (err error) {
s := []string{
"DROP TABLE IF EXISTS catchpointbalances",
@@ -1121,6 +1178,7 @@ func resetCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, newCatchup
"DROP TABLE IF EXISTS catchpointaccounthashes",
"DROP TABLE IF EXISTS catchpointpendinghashes",
"DROP TABLE IF EXISTS catchpointresources",
+ "DROP TABLE IF EXISTS catchpointkvstore",
"DELETE FROM accounttotals where id='catchpointStaging'",
}
@@ -1141,6 +1199,8 @@ func resetCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, newCatchup
"CREATE TABLE IF NOT EXISTS catchpointpendinghashes (data blob)",
"CREATE TABLE IF NOT EXISTS catchpointaccounthashes (id integer primary key, data blob)",
"CREATE TABLE IF NOT EXISTS catchpointresources (addrid INTEGER NOT NULL, aidx INTEGER NOT NULL, data BLOB NOT NULL, PRIMARY KEY (addrid, aidx) ) WITHOUT ROWID",
+ "CREATE TABLE IF NOT EXISTS catchpointkvstore (key blob primary key, value blob)",
+
createNormalizedOnlineBalanceIndex(idxnameBalances, "catchpointbalances"), // should this be removed ?
createUniqueAddressBalanceIndex(idxnameAddress, "catchpointbalances"),
)
@@ -1164,11 +1224,13 @@ func applyCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, balancesRou
"DROP TABLE IF EXISTS assetcreators",
"DROP TABLE IF EXISTS accounthashes",
"DROP TABLE IF EXISTS resources",
+ "DROP TABLE IF EXISTS kvstore",
"ALTER TABLE catchpointbalances RENAME TO accountbase",
"ALTER TABLE catchpointassetcreators RENAME TO assetcreators",
"ALTER TABLE catchpointaccounthashes RENAME TO accounthashes",
"ALTER TABLE catchpointresources RENAME TO resources",
+ "ALTER TABLE catchpointkvstore RENAME TO kvstore",
}
for _, stmt := range stmts {
@@ -1353,6 +1415,26 @@ func accountsCreateOnlineAccountsTable(ctx context.Context, tx *sql.Tx) error {
return nil
}
+// accountsCreateBoxTable creates the KVStore table for box-storage in the database.
+func accountsCreateBoxTable(ctx context.Context, tx *sql.Tx) error {
+ var exists bool
+ err := tx.QueryRow("SELECT 1 FROM pragma_table_info('kvstore') WHERE name='key'").Scan(&exists)
+ if err == nil {
+ // already exists
+ return nil
+ }
+ if err != sql.ErrNoRows {
+ return err
+ }
+ for _, stmt := range createBoxTable {
+ _, err = tx.ExecContext(ctx, stmt)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func accountsCreateTxTailTable(ctx context.Context, tx *sql.Tx) (err error) {
for _, stmt := range createTxTailTable {
_, err = tx.ExecContext(ctx, stmt)
@@ -1418,6 +1500,8 @@ type baseAccountData struct {
TotalAssets uint64 `codec:"j"`
TotalAppParams uint64 `codec:"k"`
TotalAppLocalStates uint64 `codec:"l"`
+ TotalBoxes uint64 `codec:"m"`
+ TotalBoxBytes uint64 `codec:"n"`
baseVotingData
@@ -1442,6 +1526,8 @@ func (ba *baseAccountData) IsEmpty() bool {
ba.TotalAssets == 0 &&
ba.TotalAppParams == 0 &&
ba.TotalAppLocalStates == 0 &&
+ ba.TotalBoxes == 0 &&
+ ba.TotalBoxBytes == 0 &&
ba.baseVotingData.IsEmpty()
}
@@ -1462,6 +1548,8 @@ func (ba *baseAccountData) SetCoreAccountData(ad *ledgercore.AccountData) {
ba.TotalAssets = ad.TotalAssets
ba.TotalAppParams = ad.TotalAppParams
ba.TotalAppLocalStates = ad.TotalAppLocalStates
+ ba.TotalBoxes = ad.TotalBoxes
+ ba.TotalBoxBytes = ad.TotalBoxBytes
ba.baseVotingData.SetCoreAccountData(ad)
}
@@ -1479,6 +1567,8 @@ func (ba *baseAccountData) SetAccountData(ad *basics.AccountData) {
ba.TotalAssets = uint64(len(ad.Assets))
ba.TotalAppParams = uint64(len(ad.AppParams))
ba.TotalAppLocalStates = uint64(len(ad.AppLocalStates))
+ ba.TotalBoxes = ad.TotalBoxes
+ ba.TotalBoxBytes = ad.TotalBoxBytes
ba.baseVotingData.VoteID = ad.VoteID
ba.baseVotingData.SelectionID = ad.SelectionID
@@ -1511,6 +1601,8 @@ func (ba *baseAccountData) GetLedgerCoreAccountBaseData() ledgercore.AccountBase
TotalAppLocalStates: ba.TotalAppLocalStates,
TotalAssetParams: ba.TotalAssetParams,
TotalAssets: ba.TotalAssets,
+ TotalBoxes: ba.TotalBoxes,
+ TotalBoxBytes: ba.TotalBoxBytes,
}
}
@@ -1537,6 +1629,8 @@ func (ba *baseAccountData) GetAccountData() basics.AccountData {
NumByteSlice: ba.TotalAppSchemaNumByteSlice,
},
TotalExtraAppPages: ba.TotalExtraAppPages,
+ TotalBoxes: ba.TotalBoxes,
+ TotalBoxBytes: ba.TotalBoxBytes,
VoteID: ba.VoteID,
SelectionID: ba.SelectionID,
@@ -2165,13 +2259,24 @@ func performTxTailTableMigration(ctx context.Context, tx *sql.Tx, blockDb db.Acc
return fmt.Errorf("latest block header %d cannot be retrieved : %w", dbRound, err)
}
- maxTxnLife := basics.Round(config.Consensus[latestHdr.CurrentProtocol].MaxTxnLife)
- deeperBlockHistory := basics.Round(config.Consensus[latestHdr.CurrentProtocol].DeeperBlockHeaderHistory)
- firstRound := (latestBlockRound + 1).SubSaturate(maxTxnLife + deeperBlockHistory)
+ proto := config.Consensus[latestHdr.CurrentProtocol]
+ maxTxnLife := basics.Round(proto.MaxTxnLife)
+ deeperBlockHistory := basics.Round(proto.DeeperBlockHeaderHistory)
+ // firstRound is either maxTxnLife + deeperBlockHistory back from the latest for regular init
+ // or maxTxnLife + deeperBlockHistory + CatchpointLookback back for catchpoint apply.
+ // Try to check the earliest available and start from there.
+ firstRound := (latestBlockRound + 1).SubSaturate(maxTxnLife + deeperBlockHistory + basics.Round(proto.CatchpointLookback))
// we don't need to have the txtail for round 0.
if firstRound == basics.Round(0) {
firstRound++
}
+ if _, err := blockGet(blockTx, firstRound); err != nil {
+ // looks like not catchpoint but a regular migration, start from maxTxnLife + deeperBlockHistory back
+ firstRound = (latestBlockRound + 1).SubSaturate(maxTxnLife + deeperBlockHistory)
+ if firstRound == basics.Round(0) {
+ firstRound++
+ }
+ }
tailRounds := make([][]byte, 0, maxTxnLife)
for rnd := firstRound; rnd <= dbRound; rnd++ {
blk, err := blockGet(blockTx, rnd)
@@ -2526,6 +2631,16 @@ func accountsInitDbQueries(q db.Queryable) (*accountsDbQueries, error) {
return nil, err
}
+ qs.lookupKvPairStmt, err = q.Prepare("SELECT acctrounds.rnd, kvstore.value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ if err != nil {
+ return nil, err
+ }
+
+ qs.lookupKeysByRangeStmt, err = q.Prepare("SELECT acctrounds.rnd, kvstore.key FROM acctrounds LEFT JOIN kvstore ON kvstore.key >= ? AND kvstore.key < ? WHERE id='acctbase'")
+ if err != nil {
+ return nil, err
+ }
+
qs.lookupCreatorStmt, err = q.Prepare("SELECT acctrounds.rnd, assetcreators.creator FROM acctrounds LEFT JOIN assetcreators ON asset = ? AND ctype = ? WHERE id='acctbase'")
if err != nil {
return nil, err
@@ -2588,6 +2703,108 @@ func (qs *accountsDbQueries) listCreatables(maxIdx basics.CreatableIndex, maxRes
return
}
+// sql.go has the following contradictory comments:
+
+// Reference types such as []byte are only valid until the next call to Scan
+// and should not be retained. Their underlying memory is owned by the driver.
+// If retention is necessary, copy their values before the next call to Scan.
+
+// If a dest argument has type *[]byte, Scan saves in that argument a
+// copy of the corresponding data. The copy is owned by the caller and
+// can be modified and held indefinitely. The copy can be avoided by
+// using an argument of type *RawBytes instead; see the documentation
+// for RawBytes for restrictions on its use.
+
+// After check source code, a []byte slice destination is definitely cloned.
+
+func (qs *accountsDbQueries) lookupKeyValue(key string) (pv persistedKVData, err error) {
+ err = db.Retry(func() error {
+ var val []byte
+ // Cast to []byte to avoid interpretation as character string, see note in upsertKvPair
+ err := qs.lookupKvPairStmt.QueryRow([]byte(key)).Scan(&pv.round, &val)
+ if err != nil {
+ // this should never happen; it indicates that we don't have a current round in the acctrounds table.
+ if err == sql.ErrNoRows {
+ // Return the zero value of data
+ err = fmt.Errorf("unable to query value for key %v : %w", key, err)
+ }
+ return err
+ }
+ if val != nil { // We got a non-null value, so it exists
+ pv.value = val
+ return nil
+ }
+ // we don't have that key, just return pv with the database round (pv.value==nil)
+ return nil
+ })
+ return
+}
+
+// keyPrefixIntervalPreprocessing is implemented to generate an interval for DB queries that look up keys by prefix.
+// Such DB query was designed this way, to trigger the binary search optimization in SQLITE3.
+// The DB comparison for blob typed primary key is lexicographic, i.e., byte by byte.
+// In this way, we can introduce an interval that a primary key should be >= some prefix, < some prefix increment.
+// A corner case to consider is that, the prefix has last byte 0xFF, or the prefix is full of 0xFF.
+// - The first case can be solved by carrying, e.g., prefix = 0x1EFF -> interval being >= 0x1EFF and < 0x1F
+// - The second case can be solved by disregarding the upper limit, i.e., prefix = 0xFFFF -> interval being >= 0xFFFF
+// Another corner case to consider is empty byte, []byte{} or nil.
+// - In both cases, the results are interval >= "", i.e., returns []byte{} for prefix, and nil for prefixIncr.
+func keyPrefixIntervalPreprocessing(prefix []byte) ([]byte, []byte) {
+ if prefix == nil {
+ prefix = []byte{}
+ }
+ prefixIncr := make([]byte, len(prefix))
+ copy(prefixIncr, prefix)
+ for i := len(prefix) - 1; i >= 0; i-- {
+ currentByteIncr := int(prefix[i]) + 1
+ if currentByteIncr > 0xFF {
+ prefixIncr = prefixIncr[:len(prefixIncr)-1]
+ continue
+ }
+ prefixIncr[i] = byte(currentByteIncr)
+ return prefix, prefixIncr
+ }
+ return prefix, nil
+}
+
+func (qs *accountsDbQueries) lookupKeysByPrefix(prefix string, maxKeyNum uint64, results map[string]bool, resultCount uint64) (round basics.Round, err error) {
+ start, end := keyPrefixIntervalPreprocessing([]byte(prefix))
+ if end == nil {
+ // Not an expected use case, it's asking for all keys, or all keys
+ // prefixed by some number of 0xFF bytes.
+ return 0, fmt.Errorf("Lookup by strange prefix %#v", prefix)
+ }
+ err = db.Retry(func() error {
+ var rows *sql.Rows
+ rows, err = qs.lookupKeysByRangeStmt.Query(start, end)
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+
+ var v sql.NullString
+
+ for rows.Next() {
+ if resultCount == maxKeyNum {
+ return nil
+ }
+ err = rows.Scan(&round, &v)
+ if err != nil {
+ return err
+ }
+ if v.Valid {
+ if _, ok := results[v.String]; ok {
+ continue
+ }
+ results[v.String] = true
+ resultCount++
+ }
+ }
+ return nil
+ })
+ return
+}
+
func (qs *accountsDbQueries) lookupCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (addr basics.Address, ok bool, dbRound basics.Round, err error) {
err = db.Retry(func() error {
var buf []byte
@@ -2919,6 +3136,8 @@ func (qs *accountsDbQueries) close() {
&qs.lookupStmt,
&qs.lookupResourcesStmt,
&qs.lookupAllResourcesStmt,
+ &qs.lookupKvPairStmt,
+ &qs.lookupKeysByRangeStmt,
&qs.lookupCreatorStmt,
}
for _, preparedQuery := range preparedQueries {
@@ -3131,6 +3350,9 @@ type accountsWriter interface {
deleteResource(addrid int64, aidx basics.CreatableIndex) (rowsAffected int64, err error)
updateResource(addrid int64, aidx basics.CreatableIndex, data resourcesData) (rowsAffected int64, err error)
+ upsertKvPair(key string, value []byte) error
+ deleteKvPair(key string) error
+
insertCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType, creator []byte) (rowid int64, err error)
deleteCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType) (rowsAffected int64, err error)
@@ -3147,6 +3369,7 @@ type accountsSQLWriter struct {
insertCreatableIdxStmt, deleteCreatableIdxStmt *sql.Stmt
deleteByRowIDStmt, insertStmt, updateStmt *sql.Stmt
deleteResourceStmt, insertResourceStmt, updateResourceStmt *sql.Stmt
+ deleteKvPairStmt, upsertKvPairStmt *sql.Stmt
}
type onlineAccountsSQLWriter struct {
@@ -3154,38 +3377,21 @@ type onlineAccountsSQLWriter struct {
}
func (w *accountsSQLWriter) close() {
- if w.deleteByRowIDStmt != nil {
- w.deleteByRowIDStmt.Close()
- w.deleteByRowIDStmt = nil
+ // Formatted to match the type definition above
+ preparedStmts := []**sql.Stmt{
+ &w.insertCreatableIdxStmt, &w.deleteCreatableIdxStmt,
+ &w.deleteByRowIDStmt, &w.insertStmt, &w.updateStmt,
+ &w.deleteResourceStmt, &w.insertResourceStmt, &w.updateResourceStmt,
+ &w.deleteKvPairStmt, &w.upsertKvPairStmt,
}
- if w.insertStmt != nil {
- w.insertStmt.Close()
- w.insertStmt = nil
- }
- if w.updateStmt != nil {
- w.updateStmt.Close()
- w.updateStmt = nil
- }
- if w.deleteResourceStmt != nil {
- w.deleteResourceStmt.Close()
- w.deleteResourceStmt = nil
- }
- if w.insertResourceStmt != nil {
- w.insertResourceStmt.Close()
- w.insertResourceStmt = nil
- }
- if w.updateResourceStmt != nil {
- w.updateResourceStmt.Close()
- w.updateResourceStmt = nil
- }
- if w.insertCreatableIdxStmt != nil {
- w.insertCreatableIdxStmt.Close()
- w.insertCreatableIdxStmt = nil
- }
- if w.deleteCreatableIdxStmt != nil {
- w.deleteCreatableIdxStmt.Close()
- w.deleteCreatableIdxStmt = nil
+
+ for _, stmt := range preparedStmts {
+ if (*stmt) != nil {
+ (*stmt).Close()
+ *stmt = nil
+ }
}
+
}
func (w *onlineAccountsSQLWriter) close() {
@@ -3195,7 +3401,7 @@ func (w *onlineAccountsSQLWriter) close() {
}
}
-func makeAccountsSQLWriter(tx *sql.Tx, hasAccounts bool, hasResources bool, hasCreatables bool) (w *accountsSQLWriter, err error) {
+func makeAccountsSQLWriter(tx *sql.Tx, hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (w *accountsSQLWriter, err error) {
w = new(accountsSQLWriter)
if hasAccounts {
@@ -3232,6 +3438,18 @@ func makeAccountsSQLWriter(tx *sql.Tx, hasAccounts bool, hasResources bool, hasC
}
}
+ if hasKvPairs {
+ w.upsertKvPairStmt, err = tx.Prepare("INSERT INTO kvstore (key, value) VALUES (?, ?) ON CONFLICT(key) DO UPDATE SET value=excluded.value")
+ if err != nil {
+ return
+ }
+
+ w.deleteKvPairStmt, err = tx.Prepare("DELETE FROM kvstore WHERE key=?")
+ if err != nil {
+ return
+ }
+ }
+
if hasCreatables {
w.insertCreatableIdxStmt, err = tx.Prepare("INSERT INTO assetcreators (asset, creator, ctype) VALUES (?, ?, ?)")
if err != nil {
@@ -3300,6 +3518,31 @@ func (w accountsSQLWriter) updateResource(addrid int64, aidx basics.CreatableInd
return
}
+func (w accountsSQLWriter) upsertKvPair(key string, value []byte) error {
+ // NOTE! If we are passing in `string`, then for `BoxKey` case,
+ // we might contain 0-byte in boxKey, coming from uint64 appID.
+ // The consequence would be DB key write in be cut off after such 0-byte.
+ // Casting `string` to `[]byte` avoids such trouble, and test:
+ // - `TestBoxNamesByAppIDs` in `acctupdates_test`
+ // relies on such modification.
+ result, err := w.upsertKvPairStmt.Exec([]byte(key), value)
+ if err != nil {
+ return err
+ }
+ _, err = result.LastInsertId()
+ return err
+}
+
+func (w accountsSQLWriter) deleteKvPair(key string) error {
+ // Cast to []byte to avoid interpretation as character string, see note in upsertKvPair
+ result, err := w.deleteKvPairStmt.Exec([]byte(key))
+ if err != nil {
+ return err
+ }
+ _, err = result.RowsAffected()
+ return err
+}
+
func (w accountsSQLWriter) insertCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType, creator []byte) (rowid int64, err error) {
result, err := w.insertCreatableIdxStmt.Exec(cidx, creator, ctype)
if err != nil {
@@ -3348,20 +3591,21 @@ func (w onlineAccountsSQLWriter) insertOnlineAccount(addr basics.Address, normBa
// accountsNewRound is a convenience wrapper for accountsNewRoundImpl
func accountsNewRound(
tx *sql.Tx,
- updates compactAccountDeltas, resources compactResourcesDeltas, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
+ updates compactAccountDeltas, resources compactResourcesDeltas, kvPairs map[string]modifiedKvValue, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
proto config.ConsensusParams, lastUpdateRound basics.Round,
-) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, err error) {
+) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, updatedKVs map[string]persistedKVData, err error) {
hasAccounts := updates.len() > 0
hasResources := resources.len() > 0
+ hasKvPairs := len(kvPairs) > 0
hasCreatables := len(creatables) > 0
- writer, err := makeAccountsSQLWriter(tx, hasAccounts, hasResources, hasCreatables)
+ writer, err := makeAccountsSQLWriter(tx, hasAccounts, hasResources, hasKvPairs, hasCreatables)
if err != nil {
return
}
defer writer.close()
- return accountsNewRoundImpl(writer, updates, resources, creatables, proto, lastUpdateRound)
+ return accountsNewRoundImpl(writer, updates, resources, kvPairs, creatables, proto, lastUpdateRound)
}
func onlineAccountsNewRound(
@@ -3385,10 +3629,9 @@ func onlineAccountsNewRound(
// The function returns a persistedAccountData for the modified accounts which can be stored in the base cache.
func accountsNewRoundImpl(
writer accountsWriter,
- updates compactAccountDeltas, resources compactResourcesDeltas, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
+ updates compactAccountDeltas, resources compactResourcesDeltas, kvPairs map[string]modifiedKvValue, creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
proto config.ConsensusParams, lastUpdateRound basics.Round,
-) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, err error) {
-
+) (updatedAccounts []persistedAccountData, updatedResources map[basics.Address][]persistedResourcesData, updatedKVs map[string]persistedKVData, err error) {
updatedAccounts = make([]persistedAccountData, updates.len())
updatedAccountIdx := 0
newAddressesRowIDs := make(map[basics.Address]int64)
@@ -3586,16 +3829,28 @@ func accountsNewRoundImpl(
}
}
- if len(creatables) > 0 {
- for cidx, cdelta := range creatables {
- if cdelta.Created {
- _, err = writer.insertCreatable(cidx, cdelta.Ctype, cdelta.Creator[:])
- } else {
- _, err = writer.deleteCreatable(cidx, cdelta.Ctype)
- }
- if err != nil {
- return
- }
+ updatedKVs = make(map[string]persistedKVData, len(kvPairs))
+ for key, value := range kvPairs {
+ if value.data != nil {
+ err = writer.upsertKvPair(key, value.data)
+ updatedKVs[key] = persistedKVData{value: value.data, round: lastUpdateRound}
+ } else {
+ err = writer.deleteKvPair(key)
+ updatedKVs[key] = persistedKVData{value: nil, round: lastUpdateRound}
+ }
+ if err != nil {
+ return
+ }
+ }
+
+ for cidx, cdelta := range creatables {
+ if cdelta.Created {
+ _, err = writer.insertCreatable(cidx, cdelta.Ctype, cdelta.Creator[:])
+ } else {
+ _, err = writer.deleteCreatable(cidx, cdelta.Ctype)
+ }
+ if err != nil {
+ return
}
}
@@ -3941,6 +4196,7 @@ func reencodeAccounts(ctx context.Context, tx *sql.Tx) (modifiedAccounts uint, e
}
// MerkleCommitter allows storing and loading merkletrie pages from a sqlite database.
+//
//msgp:ignore MerkleCommitter
type MerkleCommitter struct {
tx *sql.Tx
@@ -4111,8 +4367,9 @@ func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx,
iterator.Close()
return
}
- // we just finished reading the table.
- iterator.Close()
+ // Do not Close() the iterator here. It is the caller's responsibility to
+ // do so, signalled by the return of an empty chunk. If we Close() here, the
+ // next call to Next() will start all over!
return
}
@@ -4129,6 +4386,7 @@ func (iterator *encodedAccountsBatchIter) Close() {
}
// orderedAccountsIterStep is used by orderedAccountsIter to define the current step
+//
//msgp:ignore orderedAccountsIterStep
type orderedAccountsIterStep int
@@ -4165,18 +4423,16 @@ type orderedAccountsIter struct {
pendingBaseRow pendingBaseRow
pendingResourceRow pendingResourceRow
accountCount int
- resourceCount int
insertStmt *sql.Stmt
}
// makeOrderedAccountsIter creates an ordered account iterator. Note that due to implementation reasons,
// only a single iterator can be active at a time.
-func makeOrderedAccountsIter(tx *sql.Tx, accountCount int, resourceCount int) *orderedAccountsIter {
+func makeOrderedAccountsIter(tx *sql.Tx, accountCount int) *orderedAccountsIter {
return &orderedAccountsIter{
- tx: tx,
- accountCount: accountCount,
- resourceCount: resourceCount,
- step: oaiStepStartup,
+ tx: tx,
+ accountCount: accountCount,
+ step: oaiStepStartup,
}
}
@@ -4574,7 +4830,7 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []accountAd
count, iterator.pendingBaseRow, iterator.pendingResourceRow, err = processAllBaseAccountRecords(
iterator.accountBaseRows, iterator.resourcesRows,
baseCb, resCb,
- iterator.pendingBaseRow, iterator.pendingResourceRow, iterator.accountCount, iterator.resourceCount,
+ iterator.pendingBaseRow, iterator.pendingResourceRow, iterator.accountCount, math.MaxInt,
)
if err != nil {
iterator.Close(ctx)
@@ -4770,6 +5026,12 @@ func (prd *persistedResourcesData) before(other *persistedResourcesData) bool {
return prd.round < other.round
}
+// before compares the round numbers of two persistedKVData and determines if the current persistedKVData
+// happened before the other.
+func (prd persistedKVData) before(other *persistedKVData) bool {
+ return prd.round < other.round
+}
+
// before compares the round numbers of two persistedAccountData and determines if the current persistedAccountData
// happened before the other.
func (pac *persistedOnlineAccountData) before(other *persistedOnlineAccountData) bool {
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index 5491cb21d..dbaa40bc6 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -24,6 +24,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "math"
"math/rand"
"os"
"reflect"
@@ -33,6 +34,8 @@ import (
"testing"
"time"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
@@ -83,6 +86,9 @@ func accountsInitTest(tb testing.TB, tx *sql.Tx, initAccounts map[basics.Address
err = performOnlineRoundParamsTailMigration(context.Background(), tx, db.Accessor{}, true, proto)
require.NoError(tb, err)
+ err = accountsCreateBoxTable(context.Background(), tx)
+ require.NoError(tb, err)
+
return newDB
}
@@ -102,7 +108,7 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.
pad, err := aq.lookup(addr)
require.NoError(t, err)
d := pad.accountData.GetLedgerCoreAccountData()
- require.Equal(t, d, expected)
+ require.Equal(t, expected, d)
switch d.Status {
case basics.Online:
@@ -309,7 +315,7 @@ func TestAccountDBRound(t *testing.T) {
require.NoError(t, err)
expectedOnlineRoundParams = append(expectedOnlineRoundParams, onlineRoundParams)
- updatedAccts, updatesResources, err := accountsNewRound(tx, updatesCnt, resourceUpdatesCnt, ctbsWithDeletes, proto, basics.Round(i))
+ updatedAccts, updatesResources, updatedKVs, err := accountsNewRound(tx, updatesCnt, resourceUpdatesCnt, nil, ctbsWithDeletes, proto, basics.Round(i))
require.NoError(t, err)
require.Equal(t, updatesCnt.len(), len(updatedAccts))
numResUpdates := 0
@@ -317,6 +323,7 @@ func TestAccountDBRound(t *testing.T) {
numResUpdates += len(rs)
}
require.Equal(t, resourceUpdatesCnt.len(), numResUpdates)
+ require.Empty(t, updatedKVs)
updatedOnlineAccts, err := onlineAccountsNewRound(tx, updatesOnlineCnt, proto, basics.Round(i))
require.NoError(t, err)
@@ -446,7 +453,7 @@ func TestAccountDBInMemoryAcct(t *testing.T) {
err = outResourcesDeltas.resourcesLoadOld(tx, knownAddresses)
require.NoError(t, err)
- updatedAccts, updatesResources, err := accountsNewRound(tx, outAccountDeltas, outResourcesDeltas, nil, proto, basics.Round(lastRound))
+ updatedAccts, updatesResources, updatedKVs, err := accountsNewRound(tx, outAccountDeltas, outResourcesDeltas, nil, nil, proto, basics.Round(lastRound))
require.NoError(t, err)
require.Equal(t, 1, len(updatedAccts)) // we store empty even for deleted accounts
require.Equal(t,
@@ -459,6 +466,8 @@ func TestAccountDBInMemoryAcct(t *testing.T) {
persistedResourcesData{addrid: 0, aidx: 100, data: makeResourcesData(0), round: basics.Round(lastRound)},
updatesResources[addr][0],
)
+
+ require.Empty(t, updatedKVs)
})
}
}
@@ -524,10 +533,12 @@ func checkCreatables(t *testing.T,
// randomCreatableSampling sets elements to delete from previous iteration
// It consideres 10 elements in an iteration.
// loop 0: returns the first 10 elements
-// loop 1: returns: * the second 10 elements
-// * random sample of elements from the first 10: created changed from true -> false
-// loop 2: returns: * the elements 20->30
-// * random sample of elements from 10->20: created changed from true -> false
+// loop 1: returns:
+// - the second 10 elements
+// - random sample of elements from the first 10: created changed from true -> false
+// loop 2: returns:
+// - the elements 20->30
+// - random sample of elements from 10->20: created changed from true -> false
func randomCreatableSampling(iteration int, crtbsList []basics.CreatableIndex,
creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
expectedDbImage map[basics.CreatableIndex]ledgercore.ModifiedCreatable,
@@ -717,6 +728,7 @@ func benchmarkReadingRandomBalances(b *testing.B, inMemory bool) {
qs, err := accountsInitDbQueries(dbs.Rdb.Handle)
require.NoError(b, err)
+ defer qs.close()
// read all the balances in the database, shuffled
addrs := make([]basics.Address, len(accounts))
@@ -1015,8 +1027,8 @@ func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder boo
last64KSize = chunkSize
last64KAccountCreationTime = time.Duration(0)
}
- var balances catchpointFileBalancesChunkV6
- balances.Balances = make([]encodedBalanceRecordV6, chunkSize)
+ var chunk catchpointFileChunkV6
+ chunk.Balances = make([]encodedBalanceRecordV6, chunkSize)
for i := uint64(0); i < chunkSize; i++ {
var randomAccount encodedBalanceRecordV6
accountData := baseAccountData{RewardsBase: accountsLoaded + i}
@@ -1026,13 +1038,13 @@ func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder boo
if ascendingOrder {
binary.LittleEndian.PutUint64(randomAccount.Address[:], accountsLoaded+i)
}
- balances.Balances[i] = randomAccount
+ chunk.Balances[i] = randomAccount
}
balanceLoopDuration := time.Since(balancesLoopStart)
last64KAccountCreationTime += balanceLoopDuration
accountsGenerationDuration += balanceLoopDuration
- normalizedAccountBalances, err := prepareNormalizedBalancesV6(balances.Balances, proto)
+ normalizedAccountBalances, err := prepareNormalizedBalancesV6(chunk.Balances, proto)
require.NoError(b, err)
b.StartTimer()
err = l.trackerDBs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
@@ -1069,6 +1081,285 @@ func BenchmarkWriteCatchpointStagingBalances(b *testing.B) {
}
}
+func TestKeyPrefixIntervalPreprocessing(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ testCases := []struct {
+ input []byte
+ outputPrefix []byte
+ outputPrefixIncr []byte
+ }{
+ {input: []byte{0xAB, 0xCD}, outputPrefix: []byte{0xAB, 0xCD}, outputPrefixIncr: []byte{0xAB, 0xCE}},
+ {input: []byte{0xFF}, outputPrefix: []byte{0xFF}, outputPrefixIncr: nil},
+ {input: []byte{0xFE, 0xFF}, outputPrefix: []byte{0xFE, 0xFF}, outputPrefixIncr: []byte{0xFF}},
+ {input: []byte{0xFF, 0xFF}, outputPrefix: []byte{0xFF, 0xFF}, outputPrefixIncr: nil},
+ {input: []byte{0xAB, 0xCD}, outputPrefix: []byte{0xAB, 0xCD}, outputPrefixIncr: []byte{0xAB, 0xCE}},
+ {input: []byte{0x1E, 0xFF, 0xFF}, outputPrefix: []byte{0x1E, 0xFF, 0xFF}, outputPrefixIncr: []byte{0x1F}},
+ {input: []byte{0xFF, 0xFE, 0xFF, 0xFF}, outputPrefix: []byte{0xFF, 0xFE, 0xFF, 0xFF}, outputPrefixIncr: []byte{0xFF, 0xFF}},
+ {input: []byte{0x00, 0xFF}, outputPrefix: []byte{0x00, 0xFF}, outputPrefixIncr: []byte{0x01}},
+ {input: []byte(string("bx:123")), outputPrefix: []byte(string("bx:123")), outputPrefixIncr: []byte(string("bx:124"))},
+ {input: []byte{}, outputPrefix: []byte{}, outputPrefixIncr: nil},
+ {input: nil, outputPrefix: []byte{}, outputPrefixIncr: nil},
+ {input: []byte{0x1E, 0xFF, 0xFF}, outputPrefix: []byte{0x1E, 0xFF, 0xFF}, outputPrefixIncr: []byte{0x1F}},
+ {input: []byte{0xFF, 0xFE, 0xFF, 0xFF}, outputPrefix: []byte{0xFF, 0xFE, 0xFF, 0xFF}, outputPrefixIncr: []byte{0xFF, 0xFF}},
+ {input: []byte{0x00, 0xFF}, outputPrefix: []byte{0x00, 0xFF}, outputPrefixIncr: []byte{0x01}},
+ }
+ for _, tc := range testCases {
+ actualOutputPrefix, actualOutputPrefixIncr := keyPrefixIntervalPreprocessing(tc.input)
+ require.Equal(t, tc.outputPrefix, actualOutputPrefix)
+ require.Equal(t, tc.outputPrefixIncr, actualOutputPrefixIncr)
+ }
+}
+
+func TestLookupKeysByPrefix(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ dbs, fn := dbOpenTest(t, false)
+ setDbLogging(t, dbs)
+ defer cleanupTestDb(dbs, fn, false)
+
+ // return account data, initialize DB tables from accountsInitTest
+ _ = benchmarkInitBalances(t, 1, dbs, protocol.ConsensusCurrentVersion)
+
+ qs, err := accountsInitDbQueries(dbs.Rdb.Handle)
+ require.NoError(t, err)
+ defer qs.close()
+
+ kvPairDBPrepareSet := []struct {
+ key []byte
+ value []byte
+ }{
+ {key: []byte{0xFF, 0x12, 0x34, 0x56, 0x78}, value: []byte("val0")},
+ {key: []byte{0xFF, 0xFF, 0x34, 0x56, 0x78}, value: []byte("val1")},
+ {key: []byte{0xFF, 0xFF, 0xFF, 0x56, 0x78}, value: []byte("val2")},
+ {key: []byte{0xFF, 0xFF, 0xFF, 0xFF, 0x78}, value: []byte("val3")},
+ {key: []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, value: []byte("val4")},
+ {key: []byte{0xFF, 0xFE, 0xFF}, value: []byte("val5")},
+ {key: []byte{0xFF, 0xFF, 0x00, 0xFF, 0xFF}, value: []byte("val6")},
+ {key: []byte{0xFF, 0xFF}, value: []byte("should not confuse with 0xFF-0xFE")},
+ {key: []byte{0xBA, 0xDD, 0xAD, 0xFF, 0xFF}, value: []byte("baddadffff")},
+ {key: []byte{0xBA, 0xDD, 0xAE, 0x00}, value: []byte("baddae00")},
+ {key: []byte{0xBA, 0xDD, 0xAE}, value: []byte("baddae")},
+ {key: []byte("TACOCAT"), value: []byte("val6")},
+ {key: []byte("TACOBELL"), value: []byte("2bucks50cents?")},
+ {key: []byte("DingHo-SmallPack"), value: []byte("3bucks75cents")},
+ {key: []byte("DingHo-StandardPack"), value: []byte("5bucks25cents")},
+ {key: []byte("BostonKitchen-CheeseSlice"), value: []byte("3bucks50cents")},
+ {key: []byte(`™£´´∂ƒ∂ƒßƒ©∑®ƒß∂†¬∆`), value: []byte("random Bluh")},
+ }
+
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(t, err)
+
+ // writer is only for kvstore
+ writer, err := makeAccountsSQLWriter(tx, true, true, true, true)
+ if err != nil {
+ return
+ }
+
+ for i := 0; i < len(kvPairDBPrepareSet); i++ {
+ err := writer.upsertKvPair(string(kvPairDBPrepareSet[i].key), kvPairDBPrepareSet[i].value)
+ require.NoError(t, err)
+ }
+
+ err = tx.Commit()
+ require.NoError(t, err)
+ writer.close()
+
+ testCases := []struct {
+ prefix []byte
+ expectedNames [][]byte
+ err string
+ }{
+ {
+ prefix: []byte{0xFF},
+ err: "strange prefix",
+ },
+ {
+ prefix: []byte{0xFF, 0xFE},
+ expectedNames: [][]byte{
+ {0xFF, 0xFE, 0xFF},
+ },
+ },
+ {
+ prefix: []byte{0xFF, 0xFE, 0xFF},
+ expectedNames: [][]byte{
+ {0xFF, 0xFE, 0xFF},
+ },
+ },
+ {
+ prefix: []byte{0xFF, 0xFF},
+ err: "strange prefix",
+ },
+ {
+ prefix: []byte{0xFF, 0xFF, 0xFF},
+ err: "strange prefix",
+ },
+ {
+ prefix: []byte{0xFF, 0xFF, 0xFF, 0xFF},
+ err: "strange prefix",
+ },
+ {
+ prefix: []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+ err: "strange prefix",
+ },
+ {
+ prefix: []byte{0xBA, 0xDD, 0xAD, 0xFF},
+ expectedNames: [][]byte{
+ {0xBA, 0xDD, 0xAD, 0xFF, 0xFF},
+ },
+ },
+ {
+ prefix: []byte{0xBA, 0xDD, 0xAD, 0xFF, 0xFF},
+ expectedNames: [][]byte{
+ {0xBA, 0xDD, 0xAD, 0xFF, 0xFF},
+ },
+ },
+ {
+ prefix: []byte{0xBA, 0xDD},
+ expectedNames: [][]byte{
+ {0xBA, 0xDD, 0xAE},
+ {0xBA, 0xDD, 0xAE, 0x00},
+ {0xBA, 0xDD, 0xAD, 0xFF, 0xFF},
+ },
+ },
+ {
+ prefix: []byte{0xBA, 0xDD, 0xAE},
+ expectedNames: [][]byte{
+ {0xBA, 0xDD, 0xAE},
+ {0xBA, 0xDD, 0xAE, 0x00},
+ },
+ },
+ {
+ prefix: []byte("TACO"),
+ expectedNames: [][]byte{
+ []byte("TACOCAT"),
+ []byte("TACOBELL"),
+ },
+ },
+ {
+ prefix: []byte("TACOC"),
+ expectedNames: [][]byte{[]byte("TACOCAT")},
+ },
+ {
+ prefix: []byte("DingHo"),
+ expectedNames: [][]byte{
+ []byte("DingHo-SmallPack"),
+ []byte("DingHo-StandardPack"),
+ },
+ },
+ {
+ prefix: []byte("DingHo-S"),
+ expectedNames: [][]byte{
+ []byte("DingHo-SmallPack"),
+ []byte("DingHo-StandardPack"),
+ },
+ },
+ {
+ prefix: []byte("DingHo-Small"),
+ expectedNames: [][]byte{[]byte("DingHo-SmallPack")},
+ },
+ {
+ prefix: []byte("BostonKitchen"),
+ expectedNames: [][]byte{[]byte("BostonKitchen-CheeseSlice")},
+ },
+ {
+ prefix: []byte(`™£´´∂ƒ∂ƒßƒ©`),
+ expectedNames: [][]byte{[]byte(`™£´´∂ƒ∂ƒßƒ©∑®ƒß∂†¬∆`)},
+ },
+ {
+ prefix: []byte{},
+ err: "strange prefix",
+ },
+ }
+
+ for index, testCase := range testCases {
+ t.Run("lookupKVByPrefix-testcase-"+strconv.Itoa(index), func(t *testing.T) {
+ actual := make(map[string]bool)
+ _, err := qs.lookupKeysByPrefix(string(testCase.prefix), uint64(len(kvPairDBPrepareSet)), actual, 0)
+ if err != nil {
+ require.NotEmpty(t, testCase.err, testCase.prefix)
+ require.Contains(t, err.Error(), testCase.err)
+ } else {
+ require.Empty(t, testCase.err)
+ expected := make(map[string]bool)
+ for _, name := range testCase.expectedNames {
+ expected[string(name)] = true
+ }
+ require.Equal(t, actual, expected)
+ }
+ })
+ }
+}
+
+func BenchmarkLookupKeyByPrefix(b *testing.B) {
+ // learn something from BenchmarkWritingRandomBalancesDisk
+
+ dbs, fn := dbOpenTest(b, false)
+ setDbLogging(b, dbs)
+ defer cleanupTestDb(dbs, fn, false)
+
+ // return account data, initialize DB tables from accountsInitTest
+ _ = benchmarkInitBalances(b, 1, dbs, protocol.ConsensusCurrentVersion)
+
+ qs, err := accountsInitDbQueries(dbs.Rdb.Handle)
+ require.NoError(b, err)
+ defer qs.close()
+
+ currentDBSize := 0
+ nextDBSize := 2
+ increment := 2
+
+ nameBuffer := make([]byte, 5)
+ valueBuffer := make([]byte, 5)
+
+ // from 2^1 -> 2^2 -> ... -> 2^22 sized DB
+ for bIndex := 0; bIndex < 22; bIndex++ {
+ // make writer to DB
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(b, err)
+
+ // writer is only for kvstore
+ writer, err := makeAccountsSQLWriter(tx, true, true, true, true)
+ if err != nil {
+ return
+ }
+
+ var prefix string
+ // how to write to dbs a bunch of stuffs?
+ for i := 0; i < nextDBSize-currentDBSize; i++ {
+ crypto.RandBytes(nameBuffer)
+ crypto.RandBytes(valueBuffer)
+ appID := basics.AppIndex(crypto.RandUint64())
+ boxKey := logic.MakeBoxKey(appID, string(nameBuffer))
+ err = writer.upsertKvPair(boxKey, valueBuffer)
+ require.NoError(b, err)
+
+ if i == 0 {
+ prefix = logic.MakeBoxKey(appID, "")
+ }
+ }
+ err = tx.Commit()
+ require.NoError(b, err)
+ writer.close()
+
+ // benchmark the query against large DB, see if we have O(log N) speed
+ currentDBSize = nextDBSize
+ nextDBSize *= increment
+
+ b.Run("lookupKVByPrefix-DBsize"+strconv.Itoa(currentDBSize), func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ results := make(map[string]bool)
+ _, err := qs.lookupKeysByPrefix(prefix, uint64(currentDBSize), results, 0)
+ require.NoError(b, err)
+ require.True(b, len(results) >= 1)
+ }
+ })
+ }
+}
+
// upsert updates existing or inserts a new entry
func (a *compactResourcesDeltas) upsert(delta resourceDelta) {
if idx, exist := a.cache[accountCreatable{address: delta.address, index: delta.oldResource.aidx}]; exist {
@@ -1282,6 +1573,7 @@ func TestCompactResourceDeltas(t *testing.T) {
func TestResourcesDataApp(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
a := require.New(t)
@@ -1326,161 +1618,176 @@ func TestResourcesDataApp(t *testing.T) {
a.Equal(appParamsEmpty, rd.GetAppParams())
a.Equal(appLocalEmpty, rd.GetAppLocalState())
- // check empty states + non-empty params
- appParams := ledgertesting.RandomAppParams()
- rd = resourcesData{}
- rd.SetAppLocalState(appLocalEmpty)
- rd.SetAppParams(appParams, true)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.True(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
- a.Equal(appParams, rd.GetAppParams())
- a.Equal(appLocalEmpty, rd.GetAppLocalState())
-
- appState := ledgertesting.RandomAppLocalState()
- rd.SetAppLocalState(appState)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.True(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
- a.Equal(appParams, rd.GetAppParams())
- a.Equal(appState, rd.GetAppLocalState())
-
- // check ClearAppLocalState
- rd.ClearAppLocalState()
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.False(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
- a.Equal(appParams, rd.GetAppParams())
- a.Equal(appLocalEmpty, rd.GetAppLocalState())
-
- // check ClearAppParams
- rd.SetAppLocalState(appState)
- rd.ClearAppParams()
- a.True(rd.IsApp())
- a.False(rd.IsOwning())
- a.True(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
- a.Equal(appParamsEmpty, rd.GetAppParams())
- a.Equal(appState, rd.GetAppLocalState())
-
- // check both clear
- rd.ClearAppLocalState()
- a.False(rd.IsApp())
- a.False(rd.IsOwning())
- a.False(rd.IsHolding())
- a.True(rd.IsEmptyAppFields())
- a.True(rd.IsEmpty())
- a.Equal(appParamsEmpty, rd.GetAppParams())
- a.Equal(appLocalEmpty, rd.GetAppLocalState())
-
- // check params clear when non-empty params and empty holding
- rd = resourcesData{}
- rd.SetAppLocalState(appLocalEmpty)
- rd.SetAppParams(appParams, true)
- rd.ClearAppParams()
- a.True(rd.IsApp())
- a.False(rd.IsOwning())
- a.True(rd.IsHolding())
- a.True(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
- a.Equal(appParamsEmpty, rd.GetAppParams())
- a.Equal(appLocalEmpty, rd.GetAppLocalState())
-
- rd = resourcesData{}
- rd.SetAppLocalState(appLocalEmpty)
- a.True(rd.IsEmptyAppFields())
- a.True(rd.IsApp())
- a.False(rd.IsEmpty())
- a.Equal(rd.ResourceFlags, resourceFlagsEmptyApp)
- rd.ClearAppLocalState()
- a.False(rd.IsApp())
- a.True(rd.IsEmptyAppFields())
- a.True(rd.IsEmpty())
- a.Equal(rd.ResourceFlags, resourceFlagsNotHolding)
-
- // check migration flow (accountDataResources)
- // 1. both exist and empty
- rd = makeResourcesData(0)
- rd.SetAppLocalState(appLocalEmpty)
- rd.SetAppParams(appParamsEmpty, true)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.True(rd.IsHolding())
- a.True(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
-
- // 2. both exist and not empty
- rd = makeResourcesData(0)
- rd.SetAppLocalState(appState)
- rd.SetAppParams(appParams, true)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.True(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
-
- // 3. both exist: holding not empty, param is empty
- rd = makeResourcesData(0)
- rd.SetAppLocalState(appState)
- rd.SetAppParams(appParamsEmpty, true)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.True(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
-
- // 4. both exist: holding empty, param is not empty
- rd = makeResourcesData(0)
- rd.SetAppLocalState(appLocalEmpty)
- rd.SetAppParams(appParams, true)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.True(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
-
- // 5. holding does not exist and params is empty
- rd = makeResourcesData(0)
- rd.SetAppParams(appParamsEmpty, false)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.False(rd.IsHolding())
- a.True(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
-
- // 6. holding does not exist and params is not empty
- rd = makeResourcesData(0)
- rd.SetAppParams(appParams, false)
- a.True(rd.IsApp())
- a.True(rd.IsOwning())
- a.False(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
-
- // 7. holding exist and not empty and params does not exist
- rd = makeResourcesData(0)
- rd.SetAppLocalState(appState)
- a.True(rd.IsApp())
- a.False(rd.IsOwning())
- a.True(rd.IsHolding())
- a.False(rd.IsEmptyAppFields())
- a.False(rd.IsEmpty())
-
- // 8. both do not exist
- rd = makeResourcesData(0)
- a.False(rd.IsApp())
- a.False(rd.IsOwning())
- a.False(rd.IsHolding())
- a.True(rd.IsEmptyAppFields())
- a.True(rd.IsEmpty())
+ // Since some steps use randomly generated input, the test is run N times
+ // to cover a larger search space of inputs.
+ for i := 0; i < 1000; i++ {
+ // check empty states + non-empty params
+ appParams := ledgertesting.RandomAppParams()
+ rd = resourcesData{}
+ rd.SetAppLocalState(appLocalEmpty)
+ rd.SetAppParams(appParams, true)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.True(rd.IsHolding())
+ a.False(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+ a.Equal(appParams, rd.GetAppParams())
+ a.Equal(appLocalEmpty, rd.GetAppLocalState())
+
+ appState := ledgertesting.RandomAppLocalState()
+ rd.SetAppLocalState(appState)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.True(rd.IsHolding())
+ a.False(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+ a.Equal(appParams, rd.GetAppParams())
+ a.Equal(appState, rd.GetAppLocalState())
+
+ // check ClearAppLocalState
+ rd.ClearAppLocalState()
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.False(rd.IsHolding())
+ a.False(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+ a.Equal(appParams, rd.GetAppParams())
+ a.Equal(appLocalEmpty, rd.GetAppLocalState())
+
+ // check ClearAppParams
+ rd.SetAppLocalState(appState)
+ rd.ClearAppParams()
+ a.True(rd.IsApp())
+ a.False(rd.IsOwning())
+ a.True(rd.IsHolding())
+ if appState.Schema.NumEntries() == 0 {
+ a.True(rd.IsEmptyAppFields())
+ } else {
+ a.False(rd.IsEmptyAppFields())
+ }
+ a.False(rd.IsEmpty())
+ a.Equal(appParamsEmpty, rd.GetAppParams())
+ a.Equal(appState, rd.GetAppLocalState())
+
+ // check both clear
+ rd.ClearAppLocalState()
+ a.False(rd.IsApp())
+ a.False(rd.IsOwning())
+ a.False(rd.IsHolding())
+ a.True(rd.IsEmptyAppFields())
+ a.True(rd.IsEmpty())
+ a.Equal(appParamsEmpty, rd.GetAppParams())
+ a.Equal(appLocalEmpty, rd.GetAppLocalState())
+
+ // check params clear when non-empty params and empty holding
+ rd = resourcesData{}
+ rd.SetAppLocalState(appLocalEmpty)
+ rd.SetAppParams(appParams, true)
+ rd.ClearAppParams()
+ a.True(rd.IsApp())
+ a.False(rd.IsOwning())
+ a.True(rd.IsHolding())
+ a.True(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+ a.Equal(appParamsEmpty, rd.GetAppParams())
+ a.Equal(appLocalEmpty, rd.GetAppLocalState())
+
+ rd = resourcesData{}
+ rd.SetAppLocalState(appLocalEmpty)
+ a.True(rd.IsEmptyAppFields())
+ a.True(rd.IsApp())
+ a.False(rd.IsEmpty())
+ a.Equal(rd.ResourceFlags, resourceFlagsEmptyApp)
+ rd.ClearAppLocalState()
+ a.False(rd.IsApp())
+ a.True(rd.IsEmptyAppFields())
+ a.True(rd.IsEmpty())
+ a.Equal(rd.ResourceFlags, resourceFlagsNotHolding)
+
+ // check migration flow (accountDataResources)
+ // 1. both exist and empty
+ rd = makeResourcesData(0)
+ rd.SetAppLocalState(appLocalEmpty)
+ rd.SetAppParams(appParamsEmpty, true)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.True(rd.IsHolding())
+ a.True(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+
+ // 2. both exist and not empty
+ rd = makeResourcesData(0)
+ rd.SetAppLocalState(appState)
+ rd.SetAppParams(appParams, true)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.True(rd.IsHolding())
+ a.False(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+
+ // 3. both exist: holding not empty, param is empty
+ rd = makeResourcesData(0)
+ rd.SetAppLocalState(appState)
+ rd.SetAppParams(appParamsEmpty, true)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.True(rd.IsHolding())
+ if appState.Schema.NumEntries() == 0 {
+ a.True(rd.IsEmptyAppFields())
+ } else {
+ a.False(rd.IsEmptyAppFields())
+ }
+ a.False(rd.IsEmpty())
+
+ // 4. both exist: holding empty, param is not empty
+ rd = makeResourcesData(0)
+ rd.SetAppLocalState(appLocalEmpty)
+ rd.SetAppParams(appParams, true)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.True(rd.IsHolding())
+ a.False(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+
+ // 5. holding does not exist and params is empty
+ rd = makeResourcesData(0)
+ rd.SetAppParams(appParamsEmpty, false)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.False(rd.IsHolding())
+ a.True(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+
+ // 6. holding does not exist and params is not empty
+ rd = makeResourcesData(0)
+ rd.SetAppParams(appParams, false)
+ a.True(rd.IsApp())
+ a.True(rd.IsOwning())
+ a.False(rd.IsHolding())
+ a.False(rd.IsEmptyAppFields())
+ a.False(rd.IsEmpty())
+
+ // 7. holding exist and not empty and params does not exist
+ rd = makeResourcesData(0)
+ rd.SetAppLocalState(appState)
+ a.True(rd.IsApp())
+ a.False(rd.IsOwning())
+ a.True(rd.IsHolding())
+ if appState.Schema.NumEntries() == 0 {
+ a.True(rd.IsEmptyAppFields())
+ } else {
+ a.False(rd.IsEmptyAppFields())
+ }
+ a.False(rd.IsEmpty())
+ // 8. both do not exist
+ rd = makeResourcesData(0)
+ a.False(rd.IsApp())
+ a.False(rd.IsOwning())
+ a.False(rd.IsHolding())
+ a.True(rd.IsEmptyAppFields())
+ a.True(rd.IsEmpty())
+ }
}
func TestResourcesDataAsset(t *testing.T) {
@@ -2334,7 +2641,7 @@ func TestBaseAccountDataIsEmpty(t *testing.T) {
structureTesting := func(t *testing.T) {
encoding, err := json.Marshal(&empty)
zeros32 := "0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0"
- expectedEncoding := `{"Status":0,"MicroAlgos":{"Raw":0},"RewardsBase":0,"RewardedMicroAlgos":{"Raw":0},"AuthAddr":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ","TotalAppSchemaNumUint":0,"TotalAppSchemaNumByteSlice":0,"TotalExtraAppPages":0,"TotalAssetParams":0,"TotalAssets":0,"TotalAppParams":0,"TotalAppLocalStates":0,"VoteID":[` + zeros32 + `],"SelectionID":[` + zeros32 + `],"VoteFirstValid":0,"VoteLastValid":0,"VoteKeyDilution":0,"StateProofID":[` + zeros32 + `,` + zeros32 + `],"UpdateRound":0}`
+ expectedEncoding := `{"Status":0,"MicroAlgos":{"Raw":0},"RewardsBase":0,"RewardedMicroAlgos":{"Raw":0},"AuthAddr":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ","TotalAppSchemaNumUint":0,"TotalAppSchemaNumByteSlice":0,"TotalExtraAppPages":0,"TotalAssetParams":0,"TotalAssets":0,"TotalAppParams":0,"TotalAppLocalStates":0,"TotalBoxes":0,"TotalBoxBytes":0,"VoteID":[` + zeros32 + `],"SelectionID":[` + zeros32 + `],"VoteFirstValid":0,"VoteLastValid":0,"VoteKeyDilution":0,"StateProofID":[` + zeros32 + `,` + zeros32 + `],"UpdateRound":0}`
require.NoError(t, err)
require.Equal(t, expectedEncoding, string(encoding))
}
@@ -2551,6 +2858,8 @@ type mockAccountWriter struct {
rowids map[int64]basics.Address
resources map[mockResourcesKey]ledgercore.AccountResource
+ kvStore map[string][]byte
+
lastRowid int64
availRowIds []int64
}
@@ -2741,6 +3050,16 @@ func (m *mockAccountWriter) updateResource(addrid int64, aidx basics.CreatableIn
return 1, nil
}
+func (m *mockAccountWriter) upsertKvPair(key string, value []byte) error {
+ m.kvStore[key] = value
+ return nil
+}
+
+func (m *mockAccountWriter) deleteKvPair(key string) error {
+ delete(m.kvStore, key)
+ return nil
+}
+
func (m *mockAccountWriter) insertCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType, creator []byte) (rowid int64, err error) {
return 0, fmt.Errorf("insertCreatable: not implemented")
}
@@ -2984,12 +3303,13 @@ func TestAccountUnorderedUpdates(t *testing.T) {
t.Run(fmt.Sprintf("acct-perm-%d|res-perm-%d", i, j), func(t *testing.T) {
a := require.New(t)
mock2 := mock.clone()
- updatedAccounts, updatedResources, err := accountsNewRoundImpl(
- &mock2, acctVariant, resVariant, nil, config.ConsensusParams{}, latestRound,
+ updatedAccounts, updatedResources, updatedKVs, err := accountsNewRoundImpl(
+ &mock2, acctVariant, resVariant, nil, nil, config.ConsensusParams{}, latestRound,
)
a.NoError(err)
- a.Equal(3, len(updatedAccounts))
- a.Equal(3, len(updatedResources))
+ a.Len(updatedAccounts, 3)
+ a.Len(updatedResources, 3)
+ a.Empty(updatedKVs)
})
}
}
@@ -3066,12 +3386,13 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) {
a.Equal(1, len(resDeltas.misses)) // (addr2, aidx) does not exist
a.Equal(2, resDeltas.len()) // (addr1, aidx) found
- updatedAccounts, updatedResources, err := accountsNewRoundImpl(
- &mock, acctDeltas, resDeltas, nil, config.ConsensusParams{}, latestRound,
+ updatedAccounts, updatedResources, updatedKVs, err := accountsNewRoundImpl(
+ &mock, acctDeltas, resDeltas, nil, nil, config.ConsensusParams{}, latestRound,
)
a.NoError(err)
a.Equal(3, len(updatedAccounts))
a.Equal(2, len(updatedResources))
+ a.Equal(0, len(updatedKVs))
// one deletion entry for pre-existing account addr1, and one entry for in-memory account addr2
// in base accounts updates and in resources updates
@@ -3095,6 +3416,155 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) {
}
}
+func BenchmarkLRUResources(b *testing.B) {
+ var baseResources lruResources
+ baseResources.init(nil, 1000, 850)
+
+ var data persistedResourcesData
+ var has bool
+ addrs := make([]basics.Address, 850)
+ for i := 0; i < 850; i++ {
+ data.data.ApprovalProgram = make([]byte, 8096*4)
+ data.aidx = basics.CreatableIndex(1)
+ addrBytes := ([]byte(fmt.Sprintf("%d", i)))[:32]
+ var addr basics.Address
+ for i, b := range addrBytes {
+ addr[i] = b
+ }
+ addrs[i] = addr
+ baseResources.write(data, addr)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ pos := i % 850
+ data, has = baseResources.read(addrs[pos], basics.CreatableIndex(1))
+ require.True(b, has)
+ }
+}
+
+func initBoxDatabase(b *testing.B, totalBoxes, boxSize int) (db.Pair, func(), error) {
+ batchCount := 100
+ if batchCount > totalBoxes {
+ batchCount = 1
+ }
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ dbs, fn := dbOpenTest(b, false)
+ setDbLogging(b, dbs)
+ cleanup := func() {
+ cleanupTestDb(dbs, fn, false)
+ }
+
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(b, err)
+ _, err = accountsInit(tx, make(map[basics.Address]basics.AccountData), proto)
+ require.NoError(b, err)
+ err = tx.Commit()
+ require.NoError(b, err)
+ err = dbs.Wdb.SetSynchronousMode(context.Background(), db.SynchronousModeOff, false)
+ require.NoError(b, err)
+
+ cnt := 0
+ for batch := 0; batch <= batchCount; batch++ {
+ tx, err = dbs.Wdb.Handle.Begin()
+ require.NoError(b, err)
+ writer, err := makeAccountsSQLWriter(tx, false, false, true, false)
+ require.NoError(b, err)
+ for boxIdx := 0; boxIdx < totalBoxes/batchCount; boxIdx++ {
+ err = writer.upsertKvPair(fmt.Sprintf("%d", cnt), make([]byte, boxSize))
+ require.NoError(b, err)
+ cnt++
+ }
+
+ err = tx.Commit()
+ require.NoError(b, err)
+ writer.close()
+ }
+ err = dbs.Wdb.SetSynchronousMode(context.Background(), db.SynchronousModeFull, true)
+ return dbs, cleanup, err
+}
+
+func BenchmarkBoxDatabaseRead(b *testing.B) {
+ getBoxNamePermutation := func(totalBoxes int) []int {
+ rand.Seed(time.Now().UnixNano())
+ boxNames := make([]int, totalBoxes)
+ for i := 0; i < totalBoxes; i++ {
+ boxNames[i] = i
+ }
+ rand.Shuffle(len(boxNames), func(x, y int) { boxNames[x], boxNames[y] = boxNames[y], boxNames[x] })
+ return boxNames
+ }
+
+ boxCnt := []int{10, 1000, 100000}
+ boxSizes := []int{2, 2048, 4 * 8096}
+ for _, totalBoxes := range boxCnt {
+ for _, boxSize := range boxSizes {
+ b.Run(fmt.Sprintf("totalBoxes=%d/boxSize=%d", totalBoxes, boxSize), func(b *testing.B) {
+ b.StopTimer()
+
+ dbs, cleanup, err := initBoxDatabase(b, totalBoxes, boxSize)
+ require.NoError(b, err)
+
+ boxNames := getBoxNamePermutation(totalBoxes)
+ lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ require.NoError(b, err)
+ var v sql.NullString
+ for i := 0; i < b.N; i++ {
+ var pv persistedKVData
+ boxName := boxNames[i%totalBoxes]
+ b.StartTimer()
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.round, &v)
+ b.StopTimer()
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+ }
+
+ cleanup()
+ })
+ }
+ }
+
+ // test caching performance
+ lookbacks := []int{1, 32, 256, 2048}
+ for _, lookback := range lookbacks {
+ for _, boxSize := range boxSizes {
+ totalBoxes := 100000
+
+ b.Run(fmt.Sprintf("lookback=%d/boxSize=%d", lookback, boxSize), func(b *testing.B) {
+ b.StopTimer()
+
+ dbs, cleanup, err := initBoxDatabase(b, totalBoxes, boxSize)
+ require.NoError(b, err)
+
+ boxNames := getBoxNamePermutation(totalBoxes)
+ lookupStmt, err := dbs.Wdb.Handle.Prepare("SELECT rnd, value FROM acctrounds LEFT JOIN kvstore ON key = ? WHERE id='acctbase';")
+ require.NoError(b, err)
+ var v sql.NullString
+ for i := 0; i < b.N+lookback; i++ {
+ var pv persistedKVData
+ boxName := boxNames[i%totalBoxes]
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.round, &v)
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+
+ // benchmark reading the potentially cached value that was read lookback boxes ago
+ if i >= lookback {
+ boxName = boxNames[(i-lookback)%totalBoxes]
+ b.StartTimer()
+ err = lookupStmt.QueryRow([]byte(fmt.Sprintf("%d", boxName))).Scan(&pv.round, &v)
+ b.StopTimer()
+ require.NoError(b, err)
+ require.True(b, v.Valid)
+ }
+ }
+
+ cleanup()
+ })
+ }
+ }
+}
+
// TestAccountTopOnline ensures accountsOnlineTop return a right subset of accounts
// from the history table.
// Start with two online accounts A, B at round 1
@@ -3103,11 +3573,11 @@ func TestAccountsNewRoundDeletedResourceEntries(t *testing.T) {
//
// addr | rnd | status
// -----|-----|--------
-// A | 1 | 1
-// B | 1 | 1
-// A | 2 | 0
-// B | 3 | 0
-// C | 3 | 1
+// A | 1 | 1
+// B | 1 | 1
+// A | 2 | 0
+// B | 3 | 0
+// C | 3 | 1
//
// Ensure
// - for round 1 A and B returned
@@ -3215,7 +3685,7 @@ func TestAccountOnlineQueries(t *testing.T) {
err = accountsPutTotals(tx, totals, false)
require.NoError(t, err)
- updatedAccts, _, err := accountsNewRound(tx, updatesCnt, compactResourcesDeltas{}, map[basics.CreatableIndex]ledgercore.ModifiedCreatable{}, proto, rnd)
+ updatedAccts, _, _, err := accountsNewRound(tx, updatesCnt, compactResourcesDeltas{}, nil, nil, proto, rnd)
require.NoError(t, err)
require.Equal(t, updatesCnt.len(), len(updatedAccts))
@@ -4120,3 +4590,122 @@ func TestRemoveOfflineStateProofID(t *testing.T) {
}
}
}
+
+func TestEncodedBaseAccountDataSize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ vd := baseVotingData{
+ VoteFirstValid: basics.Round(crypto.RandUint64()),
+ VoteLastValid: basics.Round(crypto.RandUint64()),
+ VoteKeyDilution: crypto.RandUint64(),
+ }
+ crypto.RandBytes(vd.VoteID[:])
+ crypto.RandBytes(vd.StateProofID[:])
+ crypto.RandBytes(vd.SelectionID[:])
+
+ baseAD := baseAccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: crypto.RandUint64()},
+ RewardsBase: crypto.RandUint64(),
+ RewardedMicroAlgos: basics.MicroAlgos{Raw: crypto.RandUint64()},
+ AuthAddr: ledgertesting.RandomAddress(),
+ TotalAppSchemaNumUint: crypto.RandUint64(),
+ TotalAppSchemaNumByteSlice: crypto.RandUint64(),
+ TotalExtraAppPages: uint32(crypto.RandUint63() % uint64(math.MaxUint32)),
+ TotalAssetParams: crypto.RandUint64(),
+ TotalAssets: crypto.RandUint64(),
+ TotalAppParams: crypto.RandUint64(),
+ TotalAppLocalStates: crypto.RandUint64(),
+ baseVotingData: vd,
+ UpdateRound: crypto.RandUint64(),
+ }
+
+ encoded := baseAD.MarshalMsg(nil)
+ require.GreaterOrEqual(t, MaxEncodedBaseAccountDataSize, len(encoded))
+}
+
+func TestEncodedBaseResourceSize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ currentConsensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ // resourcesData is suiteable for keeping asset params, holding, app params, app local state
+ // but only asset + holding or app + local state can appear there
+ rdAsset := resourcesData{
+ Total: crypto.RandUint64(),
+ Decimals: uint32(crypto.RandUint63() % uint64(math.MaxUint32)),
+ DefaultFrozen: true,
+ // MetadataHash
+ UnitName: makeString(currentConsensusParams.MaxAssetUnitNameBytes),
+ AssetName: makeString(currentConsensusParams.MaxAssetNameBytes),
+ URL: makeString(currentConsensusParams.MaxAssetURLBytes),
+ Manager: ledgertesting.RandomAddress(),
+ Reserve: ledgertesting.RandomAddress(),
+ Freeze: ledgertesting.RandomAddress(),
+ Clawback: ledgertesting.RandomAddress(),
+
+ Amount: crypto.RandUint64(),
+ Frozen: true,
+ }
+ crypto.RandBytes(rdAsset.MetadataHash[:])
+
+ rdApp := resourcesData{
+
+ SchemaNumUint: crypto.RandUint64(),
+ SchemaNumByteSlice: crypto.RandUint64(),
+ // KeyValue
+
+ // ApprovalProgram
+ // ClearStateProgram
+ // GlobalState
+ LocalStateSchemaNumUint: crypto.RandUint64(),
+ LocalStateSchemaNumByteSlice: crypto.RandUint64(),
+ GlobalStateSchemaNumUint: crypto.RandUint64(),
+ GlobalStateSchemaNumByteSlice: crypto.RandUint64(),
+ ExtraProgramPages: uint32(crypto.RandUint63() % uint64(math.MaxUint32)),
+
+ ResourceFlags: 255,
+ UpdateRound: crypto.RandUint64(),
+ }
+
+ // MaxAvailableAppProgramLen is conbined size of approval and clear state since it is bound by proto.MaxAppTotalProgramLen
+ rdApp.ApprovalProgram = make([]byte, config.MaxAvailableAppProgramLen/2)
+ crypto.RandBytes(rdApp.ApprovalProgram)
+ rdApp.ClearStateProgram = make([]byte, config.MaxAvailableAppProgramLen/2)
+ crypto.RandBytes(rdApp.ClearStateProgram)
+
+ maxGlobalState := make(basics.TealKeyValue, currentConsensusParams.MaxGlobalSchemaEntries)
+ for globalKey := uint64(0); globalKey < currentConsensusParams.MaxGlobalSchemaEntries; globalKey++ {
+ prefix := fmt.Sprintf("%d|", globalKey)
+ padding := makeString(currentConsensusParams.MaxAppKeyLen - len(prefix))
+ maxKey := prefix + padding
+ maxValue := basics.TealValue{
+ Type: basics.TealBytesType,
+ Bytes: makeString(currentConsensusParams.MaxAppSumKeyValueLens - len(maxKey)),
+ }
+ maxGlobalState[maxKey] = maxValue
+ }
+
+ maxLocalState := make(basics.TealKeyValue, currentConsensusParams.MaxLocalSchemaEntries)
+ for localKey := uint64(0); localKey < currentConsensusParams.MaxLocalSchemaEntries; localKey++ {
+ prefix := fmt.Sprintf("%d|", localKey)
+ padding := makeString(currentConsensusParams.MaxAppKeyLen - len(prefix))
+ maxKey := prefix + padding
+ maxValue := basics.TealValue{
+ Type: basics.TealBytesType,
+ Bytes: makeString(currentConsensusParams.MaxAppSumKeyValueLens - len(maxKey)),
+ }
+ maxLocalState[maxKey] = maxValue
+ }
+
+ rdApp.GlobalState = maxGlobalState
+ rdApp.KeyValue = maxLocalState
+
+ encodedAsset := rdAsset.MarshalMsg(nil)
+ encodedApp := rdApp.MarshalMsg(nil)
+
+ require.Less(t, len(encodedAsset), len(encodedApp))
+ require.GreaterOrEqual(t, MaxEncodedBaseResourceDataSize, len(encodedApp))
+}
diff --git a/ledger/acctonline.go b/ledger/acctonline.go
index 03af7908d..e9a20046f 100644
--- a/ledger/acctonline.go
+++ b/ledger/acctonline.go
@@ -523,7 +523,7 @@ func (ao *onlineAccounts) onlineTotalsEx(rnd basics.Round) (basics.MicroAlgos, e
var roundOffsetError *RoundOffsetError
if !errors.As(err, &roundOffsetError) {
- ao.log.Errorf("onlineTotalsImpl error: %w", err)
+ ao.log.Errorf("onlineTotalsImpl error: %v", err)
}
totalsOnline, err = ao.accountsq.lookupOnlineTotalsHistory(rnd)
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index db2a99126..8f1a572d1 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -23,6 +23,7 @@ import (
"fmt"
"io"
"sort"
+ "strings"
"sync"
"sync/atomic"
"time"
@@ -60,12 +61,21 @@ const baseAccountsPendingAccountsWarnThreshold = 85000
// baseResourcesPendingAccountsBufferSize defines the size of the base resources pending accounts buffer size.
// At the beginning of a new round, the entries from this buffer are being flushed into the base resources map.
-const baseResourcesPendingAccountsBufferSize = 100000
+const baseResourcesPendingAccountsBufferSize = 10000
// baseResourcesPendingAccountsWarnThreshold defines the threshold at which the lruResources would generate a warning
// after we've surpassed a given pending account resources size. The warning is being generated when the pending accounts data
// is being flushed into the main base resources cache.
-const baseResourcesPendingAccountsWarnThreshold = 85000
+const baseResourcesPendingAccountsWarnThreshold = 8500
+
+// baseKVPendingBufferSize defines the size of the base KVs pending buffer size.
+// At the beginning of a new round, the entries from this buffer are being flushed into the base KVs map.
+const baseKVPendingBufferSize = 5000
+
+// baseKVPendingWarnThreshold defines the threshold at which the lruKV would generate a warning
+// after we've surpassed a given pending kv size. The warning is being generated when the pending kv data
+// is being flushed into the main base kv cache.
+const baseKVPendingWarnThreshold = 4250
// initializeCachesReadaheadBlocksStream defines how many block we're going to attempt to queue for the
// initializeCaches method before it can process and store the account changes to disk.
@@ -124,6 +134,23 @@ type modifiedResource struct {
ndeltas int
}
+// A modifiedKvValue represents a kv store change since the persistent state
+// stored in the DB (i.e., in the range of rounds covered by the accountUpdates
+// tracker).
+type modifiedKvValue struct {
+ // data stores the most recent value (nil == deleted)
+ data []byte
+
+ // oldData stores the previous vlaue (nil == didn't exist)
+ oldData []byte
+
+ // ndelta keeps track of how many times the key for this value appears in
+ // accountUpdates.deltas. This is used to evict modifiedValue entries when
+ // all changes to a key have been reflected in the kv table, and no
+ // outstanding modifications remain.
+ ndeltas int
+}
+
type accountUpdates struct {
// Connection to the database.
dbs db.Pair
@@ -146,6 +173,13 @@ type accountUpdates struct {
// address&resource that appears in deltas.
resources resourcesUpdates
+ // kvDeltas stores kvPair updates for every round after dbRound.
+ kvDeltas []map[string]ledgercore.KvValueDelta
+
+ // kvStore has the most recent kv pairs for every write/del that appears in
+ // deltas.
+ kvStore map[string]modifiedKvValue
+
// creatableDeltas stores creatable updates for every round after dbRound.
creatableDeltas []map[basics.CreatableIndex]ledgercore.ModifiedCreatable
@@ -184,6 +218,9 @@ type accountUpdates struct {
// baseResources stores the most recently used resources, at exactly dbRound
baseResources lruResources
+ // baseKVs stores the most recently used KV, at exactly dbRound
+ baseKVs lruKV
+
// logAccountUpdatesMetrics is a flag for enable/disable metrics logging
logAccountUpdatesMetrics bool
@@ -288,12 +325,253 @@ func (au *accountUpdates) close() {
}
au.baseAccounts.prune(0)
au.baseResources.prune(0)
+ au.baseKVs.prune(0)
}
func (au *accountUpdates) LookupResource(rnd basics.Round, addr basics.Address, aidx basics.CreatableIndex, ctype basics.CreatableType) (ledgercore.AccountResource, basics.Round, error) {
return au.lookupResource(rnd, addr, aidx, ctype, true /* take lock */)
}
+func (au *accountUpdates) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ return au.lookupKv(rnd, key, true /* take lock */)
+}
+
+func (au *accountUpdates) lookupKv(rnd basics.Round, key string, synchronized bool) ([]byte, error) {
+ needUnlock := false
+ if synchronized {
+ au.accountsMu.RLock()
+ needUnlock = true
+ }
+ defer func() {
+ if needUnlock {
+ au.accountsMu.RUnlock()
+ }
+ }()
+
+ // TODO: This loop and round handling is copied from other routines like
+ // lookupResource. I believe that it is overly cautious, as it always reruns
+ // the lookup if the DB round does not match the expected round. However, as
+ // long as the db round has not advanced too far (greater than `rnd`), I
+ // believe it would be valid to use. In the interest of minimizing changes,
+ // I'm not doing that now.
+
+ for {
+ currentDbRound := au.cachedDBRound
+ currentDeltaLen := len(au.deltas)
+ offset, err := au.roundOffset(rnd)
+ if err != nil {
+ return nil, err
+ }
+
+ // check if we have this key in `kvStore`, as that means the change we
+ // care about is in kvDeltas (and maybe just kvStore itself)
+ mval, indeltas := au.kvStore[key]
+ if indeltas {
+ // Check if this is the most recent round, in which case, we can
+ // use a cache of the most recent kvStore state
+ if offset == uint64(len(au.kvDeltas)) {
+ return mval.data, nil
+ }
+
+ // the key is in the deltas, but we don't know if it appears in the
+ // delta range of [0..offset-1], so we'll need to check. Walk deltas
+ // backwards so later updates take priority.
+ for offset > 0 {
+ offset--
+ mval, ok := au.kvDeltas[offset][key]
+ if ok {
+ return mval.Data, nil
+ }
+ }
+ } else {
+ // we know that the key is not in kvDeltas - so there is no point in scanning it.
+ // we've going to fall back to search in the database, but before doing so, we should
+ // update the rnd so that it would point to the end of the known delta range.
+ // ( that would give us the best validity range )
+ rnd = currentDbRound + basics.Round(currentDeltaLen)
+ }
+
+ // check the baseKV cache
+ if pbd, has := au.baseKVs.read(key); has {
+ // we don't technically need this, since it's already in the baseKV, however, writing this over
+ // would ensure that we promote this field.
+ au.baseKVs.writePending(pbd, key)
+ return pbd.value, nil
+ }
+
+ if synchronized {
+ au.accountsMu.RUnlock()
+ needUnlock = false
+ }
+
+ // No updates of this account in kvDeltas; use on-disk DB. The check in
+ // roundOffset() made sure the round is exactly the one present in the
+ // on-disk DB.
+
+ persistedData, err := au.accountsq.lookupKeyValue(key)
+ if err != nil {
+ return nil, err
+ }
+
+ if persistedData.round == currentDbRound {
+ // if we read actual data return it. This includes deleted values
+ // where persistedData.value == nil to avoid unnecessary db lookups
+ // for deleted KVs.
+ au.baseKVs.writePending(persistedData, key)
+ return persistedData.value, nil
+ }
+
+ // The db round is unexpected...
+ if synchronized {
+ if persistedData.round < currentDbRound {
+ // Somehow the db is LOWER than it should be.
+ au.log.Errorf("accountUpdates.lookupKvPair: database round %d is behind in-memory round %d", persistedData.round, currentDbRound)
+ return nil, &StaleDatabaseRoundError{databaseRound: persistedData.round, memoryRound: currentDbRound}
+ }
+ // The db is higher, so a write must have happened. Try again.
+ au.accountsMu.RLock()
+ needUnlock = true
+ // WHY BOTH - seems the goal is just to wait until the au is aware of progress. au.cachedDBRound should be enough?
+ for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
+ au.accountsReadCond.Wait()
+ }
+ } else {
+ // in non-sync mode, we don't wait since we already assume that we're synchronized.
+ au.log.Errorf("accountUpdates.lookupKvPair: database round %d mismatching in-memory round %d", persistedData.round, currentDbRound)
+ return nil, &MismatchingDatabaseRoundError{databaseRound: persistedData.round, memoryRound: currentDbRound}
+ }
+
+ }
+}
+
+func (au *accountUpdates) LookupKeysByPrefix(round basics.Round, keyPrefix string, maxKeyNum uint64) ([]string, error) {
+ return au.lookupKeysByPrefix(round, keyPrefix, maxKeyNum, true /* take lock */)
+}
+
+func (au *accountUpdates) lookupKeysByPrefix(round basics.Round, keyPrefix string, maxKeyNum uint64, synchronized bool) (resultKeys []string, err error) {
+ var results map[string]bool
+ // keep track of the number of result key with value
+ var resultCount uint64
+
+ needUnlock := false
+ if synchronized {
+ au.accountsMu.RLock()
+ needUnlock = true
+ }
+ defer func() {
+ if needUnlock {
+ au.accountsMu.RUnlock()
+ }
+ // preparation of result happens in deferring function
+ // prepare result only when err != nil
+ if err == nil {
+ resultKeys = make([]string, 0, resultCount)
+ for resKey, present := range results {
+ if present {
+ resultKeys = append(resultKeys, resKey)
+ }
+ }
+ }
+ }()
+
+ // TODO: This loop and round handling is copied from other routines like
+ // lookupResource. I believe that it is overly cautious, as it always reruns
+ // the lookup if the DB round does not match the expected round. However, as
+ // long as the db round has not advanced too far (greater than `rnd`), I
+ // believe it would be valid to use. In the interest of minimizing changes,
+ // I'm not doing that now.
+
+ for {
+ currentDBRound := au.cachedDBRound
+ currentDeltaLen := len(au.deltas)
+ offset, rndErr := au.roundOffset(round)
+ if rndErr != nil {
+ return nil, rndErr
+ }
+
+ // reset `results` to be empty each iteration
+ // if db round does not match the round number returned from DB query, start over again
+ // NOTE: `results` is maintained as we walk backwards from the latest round, to DB
+ // IT IS NOT SIMPLY A SET STORING KEY NAMES!
+ // - if the boolean for the key is true: we consider the key is still valid in later round
+ // - otherwise, we consider that the key is deleted in later round, and we will not return it as part of result
+ // Thus: `resultCount` keeps track of how many VALID keys in the `results`
+ // DO NOT TRY `len(results)` TO SEE NUMBER OF VALID KEYS!
+ results = map[string]bool{}
+ resultCount = 0
+
+ for offset > 0 {
+ offset--
+ for keyInRound, mv := range au.kvDeltas[offset] {
+ if !strings.HasPrefix(keyInRound, keyPrefix) {
+ continue
+ }
+ // whether it is set or deleted in later round, if such modification exists in later round
+ // we just ignore the earlier insert
+ if _, ok := results[keyInRound]; ok {
+ continue
+ }
+ if mv.Data == nil {
+ results[keyInRound] = false
+ } else {
+ // set such key to be valid with value
+ results[keyInRound] = true
+ resultCount++
+ // check if the size of `results` reaches `maxKeyNum`
+ // if so just return the list of keys
+ if resultCount == maxKeyNum {
+ return
+ }
+ }
+ }
+ }
+
+ round = currentDBRound + basics.Round(currentDeltaLen)
+
+ // after this line, we should dig into DB I guess
+ // OTHER LOOKUPS USE "base" caches here.
+ if synchronized {
+ au.accountsMu.RUnlock()
+ needUnlock = false
+ }
+
+ // NOTE: the kv cache isn't used here because the data structure doesn't support range
+ // queries. It may be preferable to increase the SQLite cache size if these reads become
+ // too slow.
+
+ // Finishing searching updates of this account in kvDeltas, keep going: use on-disk DB
+ // to find the rest matching keys in DB.
+ dbRound, dbErr := au.accountsq.lookupKeysByPrefix(keyPrefix, maxKeyNum, results, resultCount)
+ if dbErr != nil {
+ return nil, dbErr
+ }
+ if dbRound == currentDBRound {
+ return
+ }
+
+ // The DB round is unexpected... '_>'?
+ if synchronized {
+ if dbRound < currentDBRound {
+ // does not make sense if DB round is earlier than it should be
+ au.log.Errorf("accountUpdates.lookupKvPair: database round %d is behind in-memory round %d", dbRound, currentDBRound)
+ err = &StaleDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDBRound}
+ return
+ }
+ // The DB round is higher than expected, so a write-into-DB must have happened. Start over again.
+ au.accountsMu.RLock()
+ needUnlock = true
+ // WHY BOTH - seems the goal is just to wait until the au is aware of progress. au.cachedDBRound should be enough?
+ for currentDBRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
+ au.accountsReadCond.Wait()
+ }
+ } else {
+ au.log.Errorf("accountUpdates.lookupKvPair: database round %d mismatching in-memory round %d", dbRound, currentDBRound)
+ err = &MismatchingDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDBRound}
+ return
+ }
+ }
+}
+
// LookupWithoutRewards returns the account data for a given address at a given round.
func (au *accountUpdates) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (data ledgercore.AccountData, validThrough basics.Round, err error) {
data, validThrough, _, _, err = au.lookupWithoutRewards(rnd, addr, true /* take lock*/)
@@ -583,7 +861,7 @@ func (aul *accountUpdatesLedgerEvaluator) CheckDup(config.ConsensusParams, basic
return fmt.Errorf("accountUpdatesLedgerEvaluator: tried to check for dup during accountUpdates initialization ")
}
-// lookupWithoutRewards returns the account balance for a given address at a given round, without the reward
+// LookupWithoutRewards returns the account balance for a given address at a given round, without the reward
func (aul *accountUpdatesLedgerEvaluator) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, error) {
data, validThrough, _, _, err := aul.au.lookupWithoutRewards(rnd, addr, false /*don't sync*/)
if err != nil {
@@ -603,6 +881,10 @@ func (aul *accountUpdatesLedgerEvaluator) LookupAsset(rnd basics.Round, addr bas
return ledgercore.AssetResource{AssetParams: r.AssetParams, AssetHolding: r.AssetHolding}, err
}
+func (aul *accountUpdatesLedgerEvaluator) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ return aul.au.lookupKv(rnd, key, false /* don't sync */)
+}
+
// GetCreatorForRound returns the asset/app creator for a given asset/app index at a given round
func (aul *accountUpdatesLedgerEvaluator) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
return aul.au.getCreatorForRound(rnd, cidx, ctype, false /* don't sync */)
@@ -665,14 +947,17 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou
au.versions = []protocol.ConsensusVersion{hdr.CurrentProtocol}
au.deltas = nil
+ au.kvDeltas = nil
au.creatableDeltas = nil
au.accounts = make(map[basics.Address]modifiedAccount)
- au.resources = resourcesUpdates(make(map[accountCreatable]modifiedResource))
+ au.resources = make(resourcesUpdates)
+ au.kvStore = make(map[string]modifiedKvValue)
au.creatables = make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
au.deltasAccum = []int{0}
au.baseAccounts.init(au.log, baseAccountsPendingAccountsBufferSize, baseAccountsPendingAccountsWarnThreshold)
au.baseResources.init(au.log, baseResourcesPendingAccountsBufferSize, baseResourcesPendingAccountsWarnThreshold)
+ au.baseKVs.init(au.log, baseKVPendingBufferSize, baseKVPendingWarnThreshold)
return
}
@@ -692,10 +977,12 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.deltas = append(au.deltas, delta.Accts)
au.versions = append(au.versions, blk.CurrentProtocol)
au.creatableDeltas = append(au.creatableDeltas, delta.Creatables)
+ au.kvDeltas = append(au.kvDeltas, delta.KvMods)
au.deltasAccum = append(au.deltasAccum, delta.Accts.Len()+au.deltasAccum[len(au.deltasAccum)-1])
au.baseAccounts.flushPendingWrites()
au.baseResources.flushPendingWrites()
+ au.baseKVs.flushPendingWrites()
for i := 0; i < delta.Accts.Len(); i++ {
addr, data := delta.Accts.GetByIdx(i)
@@ -727,6 +1014,14 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.resources.set(key, mres)
}
+ for k, v := range delta.KvMods {
+ mvalue := au.kvStore[k]
+ mvalue.ndeltas++
+ mvalue.data = v.Data
+ // leave mvalue.oldData alone
+ au.kvStore[k] = mvalue
+ }
+
for cidx, cdelta := range delta.Creatables {
mcreat := au.creatables[cidx]
mcreat.Creator = cdelta.Creator
@@ -743,6 +1038,8 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
au.baseAccounts.prune(newBaseAccountSize)
newBaseResourcesSize := (len(au.resources) + 1) + baseResourcesPendingAccountsBufferSize
au.baseResources.prune(newBaseResourcesSize)
+ newBaseKVSize := (len(au.kvStore) + 1) + baseKVPendingBufferSize
+ au.baseKVs.prune(newBaseKVSize)
}
// lookupLatest returns the account data for a given address for the latest round.
@@ -1003,9 +1300,8 @@ func (au *accountUpdates) lookupResource(rnd basics.Round, addr basics.Address,
return macct.resource, rnd, nil
}
// the account appears in the deltas, but we don't know if it appears in the
- // delta range of [0..offset], so we'll need to check :
- // Traverse the deltas backwards to ensure that later updates take
- // priority if present.
+ // delta range of [0..offset-1], so we'll need to check. Walk deltas
+ // backwards to ensure that later updates take priority if present.
for offset > 0 {
offset--
r, ok := au.deltas[offset].GetResource(addr, aidx, ctype)
@@ -1105,9 +1401,8 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add
return macct.data, rnd, rewardsVersion, rewardsLevel, nil
}
// the account appears in the deltas, but we don't know if it appears in the
- // delta range of [0..offset], so we'll need to check :
- // Traverse the deltas backwards to ensure that later updates take
- // priority if present.
+ // delta range of [0..offset-1], so we'll need to check. Walk deltas
+ // backwards to ensure that later updates take priority if present.
for offset > 0 {
offset--
d, ok := au.deltas[offset].GetData(addr)
@@ -1309,6 +1604,7 @@ func (au *accountUpdates) prepareCommit(dcc *deferredCommitContext) error {
// being updated multiple times. When that happen, we can safely omit the intermediate updates.
dcc.compactAccountDeltas = makeCompactAccountDeltas(au.deltas[:offset], dcc.oldBase, setUpdateRound, au.baseAccounts)
dcc.compactResourcesDeltas = makeCompactResourceDeltas(au.deltas[:offset], dcc.oldBase, setUpdateRound, au.baseAccounts, au.baseResources)
+ dcc.compactKvDeltas = compactKvDeltas(au.kvDeltas[:offset])
dcc.compactCreatableDeltas = compactCreatableDeltas(au.creatableDeltas[:offset])
au.accountsMu.RUnlock()
@@ -1322,8 +1618,8 @@ func (au *accountUpdates) prepareCommit(dcc *deferredCommitContext) error {
return nil
}
-// commitRound closure is called within the same transaction for all trackers
-// it receives current offset and dbRound
+// commitRound is called within the same transaction for all trackers it
+// receives current offset and dbRound
func (au *accountUpdates) commitRound(ctx context.Context, tx *sql.Tx, dcc *deferredCommitContext) (err error) {
offset := dcc.offset
dbRound := dcc.oldBase
@@ -1375,7 +1671,7 @@ func (au *accountUpdates) commitRound(ctx context.Context, tx *sql.Tx, dcc *defe
// the updates of the actual account data is done last since the accountsNewRound would modify the compactDeltas old values
// so that we can update the base account back.
- dcc.updatedPersistedAccounts, dcc.updatedPersistedResources, err = accountsNewRound(tx, dcc.compactAccountDeltas, dcc.compactResourcesDeltas, dcc.compactCreatableDeltas, dcc.genesisProto, dbRound+basics.Round(offset))
+ dcc.updatedPersistedAccounts, dcc.updatedPersistedResources, dcc.updatedPersistedKVs, err = accountsNewRound(tx, dcc.compactAccountDeltas, dcc.compactResourcesDeltas, dcc.compactKvDeltas, dcc.compactCreatableDeltas, dcc.genesisProto, dbRound+basics.Round(offset))
if err != nil {
return err
}
@@ -1453,6 +1749,26 @@ func (au *accountUpdates) postCommit(ctx context.Context, dcc *deferredCommitCon
}
}
+ for key, out := range dcc.compactKvDeltas {
+ cnt := out.ndeltas
+ mval, ok := au.kvStore[key]
+ if !ok {
+ au.log.Panicf("inconsistency: flushed %d changes to key %s, but not in au.kvStore", cnt, key)
+ }
+ if cnt > mval.ndeltas {
+ au.log.Panicf("inconsistency: flushed %d changes to key %s, but au.kvStore had %d", cnt, key, mval.ndeltas)
+ } else if cnt == mval.ndeltas {
+ delete(au.kvStore, key)
+ } else {
+ mval.ndeltas -= cnt
+ au.kvStore[key] = mval
+ }
+ }
+
+ for key, persistedKV := range dcc.updatedPersistedKVs {
+ au.baseKVs.write(persistedKV, key)
+ }
+
for cidx, modCrt := range dcc.compactCreatableDeltas {
cnt := modCrt.Ndeltas
mcreat, ok := au.creatables[cidx]
@@ -1487,6 +1803,7 @@ func (au *accountUpdates) postCommit(ctx context.Context, dcc *deferredCommitCon
au.deltasAccum = au.deltasAccum[offset:]
au.versions = au.versions[offset:]
au.roundTotals = au.roundTotals[offset:]
+ au.kvDeltas = au.kvDeltas[offset:]
au.creatableDeltas = au.creatableDeltas[offset:]
au.cachedDBRound = newBase
@@ -1518,6 +1835,31 @@ func (au *accountUpdates) postCommit(ctx context.Context, dcc *deferredCommitCon
func (au *accountUpdates) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
}
+// compactKvDeltas takes an array of kv deltas (one array entry per round), and
+// compacts the array into a single map that contains all the
+// changes. Intermediate changes are eliminated. It counts the number of
+// changes per round by specifying it in the ndeltas field of the
+// modifiedKv. The modifiedValues in the returned map have the earliest
+// mv.oldData, and the newest mv.data.
+func compactKvDeltas(kvDeltas []map[string]ledgercore.KvValueDelta) map[string]modifiedKvValue {
+ if len(kvDeltas) == 0 {
+ return nil
+ }
+ outKvDeltas := make(map[string]modifiedKvValue)
+ for _, roundKv := range kvDeltas {
+ for key, current := range roundKv {
+ prev, ok := outKvDeltas[key]
+ if !ok { // Record only the first OldData
+ prev.oldData = current.OldData
+ }
+ prev.data = current.Data // Replace with newest Data
+ prev.ndeltas++
+ outKvDeltas[key] = prev
+ }
+ }
+ return outKvDeltas
+}
+
// compactCreatableDeltas takes an array of creatables map deltas ( one array entry per round ), and compact the array into a single
// map that contains all the deltas changes. While doing that, the function eliminate any intermediate changes.
// It counts the number of changes per round by specifying it in the ndeltas field of the modifiedCreatable.
@@ -1560,6 +1902,7 @@ func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) {
// rowid are flushed.
au.baseAccounts.prune(0)
au.baseResources.prune(0)
+ au.baseKVs.prune(0)
startTime := time.Now()
vacuumExitCh := make(chan struct{}, 1)
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index edd44549e..be1fa577b 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -35,6 +35,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger/internal"
"github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
@@ -1216,7 +1217,7 @@ func TestListCreatables(t *testing.T) {
// sync with the database
var updates compactAccountDeltas
var resUpdates compactResourcesDeltas
- _, _, err = accountsNewRound(tx, updates, resUpdates, ctbsWithDeletes, proto, basics.Round(1))
+ _, _, _, err = accountsNewRound(tx, updates, resUpdates, nil, ctbsWithDeletes, proto, basics.Round(1))
require.NoError(t, err)
// nothing left in cache
au.creatables = make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
@@ -1232,14 +1233,313 @@ func TestListCreatables(t *testing.T) {
// ******* Results are obtained from the database and from the cache *******
// ******* Deletes are in the database and in the cache *******
// sync with the database. This has deletes synced to the database.
- _, _, err = accountsNewRound(tx, updates, resUpdates, au.creatables, proto, basics.Round(1))
+ _, _, _, err = accountsNewRound(tx, updates, resUpdates, nil, au.creatables, proto, basics.Round(1))
require.NoError(t, err)
- // get new creatables in the cache. There will be deletes in the cache from the previous batch.
+ // get new creatables in the cache. There will be deleted in the cache from the previous batch.
au.creatables = randomCreatableSampling(3, ctbsList, randomCtbs,
expectedDbImage, numElementsPerSegement)
listAndCompareComb(t, au, expectedDbImage)
}
+func TestBoxNamesByAppIDs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ initialBlocksCount := 1
+ accts := make(map[basics.Address]basics.AccountData)
+
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ ml := makeMockLedgerForTracker(t, true, initialBlocksCount, protocol.ConsensusCurrentVersion,
+ []map[basics.Address]basics.AccountData{accts},
+ )
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au, _ := newAcctUpdates(t, ml, conf)
+ defer au.close()
+
+ knownCreatables := make(map[basics.CreatableIndex]bool)
+ opts := auNewBlockOpts{ledgercore.AccountDeltas{}, protocol.ConsensusCurrentVersion, protoParams, knownCreatables}
+
+ testingBoxNames := []string{
+ ` `,
+ ` `,
+ ` % `,
+ ` ? = % ;`,
+ `; DROP *;`,
+ `OR 1 = 1;`,
+ `" ; SELECT * FROM kvstore; DROP acctrounds; `,
+ `; SELECT key from kvstore WHERE key LIKE %;`,
+ `?&%!=`,
+ "SELECT * FROM kvstore " + string([]byte{0, 0}) + " WHERE key LIKE %; ",
+ `b64:APj/AA==`,
+ `str:123.3/aa\\0`,
+ string([]byte{0, 255, 254, 254}),
+ string([]byte{0, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF}),
+ string([]byte{'%', 'a', 'b', 'c', 0, 0, '%', 'a', '!'}),
+ `
+`,
+ `™£´´∂ƒ∂ƒßƒ©∑®ƒß∂†¬∆`,
+ `∑´´˙©˚¬∆ßåƒ√¬`,
+ `背负青天而莫之夭阏者,而后乃今将图南。`,
+ `於浩歌狂熱之際中寒﹔於天上看見深淵。`,
+ `於一切眼中看見無所有﹔於無所希望中得救。`,
+ `有一遊魂,化為長蛇,口有毒牙。`,
+ `不以嚙人,自嚙其身,終以殞顛。`,
+ `那些智力超常的人啊`,
+ `认为已经,熟悉了云和闪电的脾气`,
+ `就不再迷惑,就不必了解自己,世界和他人`,
+ `每天只管,被微风吹拂,与猛虎谈情`,
+ `他们从来,不需要楼梯,只有窗口`,
+ `把一切交付于梦境,和优美的浪潮`,
+ `在这颗行星所有的酒馆,青春自由似乎理所应得`,
+ `面向涣散的未来,只唱情歌,看不到坦克`,
+ `在科学和啤酒都不能安抚的夜晚`,
+ `他们丢失了四季,惶惑之行开始`,
+ `这颗行星所有的酒馆,无法听到远方的呼喊`,
+ `野心勃勃的灯火,瞬间吞没黑暗的脸庞`,
+ }
+
+ appIDset := make(map[basics.AppIndex]struct{}, len(testingBoxNames))
+ boxNameToAppID := make(map[string]basics.AppIndex, len(testingBoxNames))
+ var currentRound basics.Round
+
+ // keep adding one box key and one random appID (non-duplicated)
+ for i, boxName := range testingBoxNames {
+ currentRound = basics.Round(i + 1)
+
+ var appID basics.AppIndex
+ for {
+ appID = basics.AppIndex(crypto.RandUint64())
+ _, preExisting := appIDset[appID]
+ if !preExisting {
+ break
+ }
+ }
+
+ appIDset[appID] = struct{}{}
+ boxNameToAppID[boxName] = appID
+
+ boxChange := ledgercore.KvValueDelta{Data: []byte(boxName)}
+ auNewBlock(t, currentRound, au, accts, opts, map[string]ledgercore.KvValueDelta{
+ logic.MakeBoxKey(appID, boxName): boxChange,
+ })
+ auCommitSync(t, currentRound, au, ml)
+
+ // ensure rounds
+ rnd := au.latest()
+ require.Equal(t, currentRound, rnd)
+ if uint64(currentRound) > conf.MaxAcctLookback {
+ require.Equal(t, basics.Round(uint64(currentRound)-conf.MaxAcctLookback), au.cachedDBRound)
+ } else {
+ require.Equal(t, basics.Round(0), au.cachedDBRound)
+ }
+
+ // check input, see all present keys are all still there
+ for _, storedBoxName := range testingBoxNames[:i+1] {
+ res, err := au.LookupKeysByPrefix(currentRound, logic.MakeBoxKey(boxNameToAppID[storedBoxName], ""), 10000)
+ require.NoError(t, err)
+ require.Len(t, res, 1)
+ require.Equal(t, logic.MakeBoxKey(boxNameToAppID[storedBoxName], storedBoxName), res[0])
+ }
+ }
+
+ // removing inserted boxes
+ for _, boxName := range testingBoxNames {
+ currentRound++
+
+ // remove inserted box
+ appID := boxNameToAppID[boxName]
+ auNewBlock(t, currentRound, au, accts, opts, map[string]ledgercore.KvValueDelta{
+ logic.MakeBoxKey(appID, boxName): {},
+ })
+ auCommitSync(t, currentRound, au, ml)
+
+ // ensure recently removed key is not present, and it is not part of the result
+ res, err := au.LookupKeysByPrefix(currentRound, logic.MakeBoxKey(boxNameToAppID[boxName], ""), 10000)
+ require.NoError(t, err)
+ require.Len(t, res, 0)
+ }
+}
+
+func TestKVCache(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ initialBlocksCount := 1
+ accts := make(map[basics.Address]basics.AccountData)
+
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ ml := makeMockLedgerForTracker(t, true, initialBlocksCount, protocol.ConsensusCurrentVersion,
+ []map[basics.Address]basics.AccountData{accts},
+ )
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au, _ := newAcctUpdates(t, ml, conf)
+ defer au.close()
+
+ knownCreatables := make(map[basics.CreatableIndex]bool)
+ opts := auNewBlockOpts{ledgercore.AccountDeltas{}, protocol.ConsensusCurrentVersion, protoParams, knownCreatables}
+
+ kvCnt := 1000
+ kvsPerBlock := 100
+ curKV := 0
+ var currentRound basics.Round
+ currentDBRound := basics.Round(1)
+
+ kvMap := make(map[string][]byte)
+ for i := 0; i < kvCnt; i++ {
+ kvMap[fmt.Sprintf("%d", i)] = []byte(fmt.Sprintf("value%d", i))
+ }
+
+ // add kvsPerBlock KVs on each iteration. The first kvCnt/kvsPerBlock
+ // iterations produce a block with kvCnt kv manipulations. The last
+ // conf.MaxAcctLookback iterations are meant to verify the contents of the cache
+ // are correct after every kv containing block has been committed.
+ for i := 0; i < kvCnt/kvsPerBlock+int(conf.MaxAcctLookback); i++ {
+ currentRound = currentRound + 1
+ kvMods := make(map[string]ledgercore.KvValueDelta)
+ if i < kvCnt/kvsPerBlock {
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", curKV)
+ curKV++
+ val := kvMap[name]
+ kvMods[name] = ledgercore.KvValueDelta{Data: val, OldData: nil}
+ }
+ }
+
+ auNewBlock(t, currentRound, au, accts, opts, kvMods)
+ auCommitSync(t, currentRound, au, ml)
+
+ // ensure rounds
+ rnd := au.latest()
+ require.Equal(t, currentRound, rnd)
+ if uint64(currentRound) > conf.MaxAcctLookback {
+ require.Equal(t, basics.Round(uint64(currentRound)-conf.MaxAcctLookback), au.cachedDBRound)
+ } else {
+ require.Equal(t, basics.Round(0), au.cachedDBRound)
+ }
+
+ // verify cache doesn't contain the new kvs until committed to DB.
+ for name := range kvMods {
+ _, has := au.baseKVs.read(name)
+ require.False(t, has)
+ }
+
+ // verify commited kvs appear in the kv cache
+ for ; currentDBRound <= au.cachedDBRound; currentDBRound++ {
+ startKV := (currentDBRound - 1) * basics.Round(kvsPerBlock)
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", uint64(startKV)+uint64(j))
+ persistedValue, has := au.baseKVs.read(name)
+ require.True(t, has)
+ require.Equal(t, kvMap[name], persistedValue.value)
+ }
+ }
+ }
+
+ // updating inserted KVs
+ curKV = 0
+ for i := 0; i < kvCnt/kvsPerBlock+int(conf.MaxAcctLookback); i++ {
+ currentRound = currentRound + 1
+
+ kvMods := make(map[string]ledgercore.KvValueDelta)
+ if i < kvCnt/kvsPerBlock {
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", curKV)
+ val := fmt.Sprintf("modified value%d", curKV)
+ kvMods[name] = ledgercore.KvValueDelta{Data: []byte(val)}
+ curKV++
+ }
+ }
+
+ auNewBlock(t, currentRound, au, accts, opts, kvMods)
+ auCommitSync(t, currentRound, au, ml)
+
+ // ensure rounds
+ rnd := au.latest()
+ require.Equal(t, currentRound, rnd)
+ require.Equal(t, basics.Round(uint64(currentRound)-conf.MaxAcctLookback), au.cachedDBRound)
+
+ // verify cache doesn't contain updated kv values that haven't been committed to db
+ if i < kvCnt/kvsPerBlock {
+ for name := range kvMods {
+ persistedValue, has := au.baseKVs.read(name)
+ require.True(t, has)
+ require.Equal(t, kvMap[name], persistedValue.value)
+ }
+ }
+
+ // verify commited updated kv values appear in the kv cache
+ for ; currentDBRound <= au.cachedDBRound; currentDBRound++ {
+ lookback := basics.Round(kvCnt/kvsPerBlock + int(conf.MaxAcctLookback) + 1)
+ if currentDBRound < lookback {
+ continue
+ }
+
+ startKV := (currentDBRound - lookback) * basics.Round(kvsPerBlock)
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", uint64(startKV)+uint64(j))
+ persistedValue, has := au.baseKVs.read(name)
+ require.True(t, has)
+ expectedValue := fmt.Sprintf("modified value%s", name)
+ require.Equal(t, expectedValue, string(persistedValue.value))
+ }
+ }
+ }
+
+ // deleting KVs
+ curKV = 0
+ for i := 0; i < kvCnt/kvsPerBlock+int(conf.MaxAcctLookback); i++ {
+ currentRound = currentRound + 1
+
+ kvMods := make(map[string]ledgercore.KvValueDelta)
+ if i < kvCnt/kvsPerBlock {
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", curKV)
+ kvMods[name] = ledgercore.KvValueDelta{Data: nil}
+ curKV++
+ }
+ }
+
+ auNewBlock(t, currentRound, au, accts, opts, kvMods)
+ auCommitSync(t, currentRound, au, ml)
+
+ // ensure rounds
+ rnd := au.latest()
+ require.Equal(t, currentRound, rnd)
+ require.Equal(t, basics.Round(uint64(currentRound)-conf.MaxAcctLookback), au.cachedDBRound)
+
+ // verify cache doesn't contain updated kv values that haven't been committed to db
+ if i < kvCnt/kvsPerBlock {
+ for name := range kvMods {
+ persistedValue, has := au.baseKVs.read(name)
+ require.True(t, has)
+ value := fmt.Sprintf("modified value%s", name)
+ require.Equal(t, value, string(persistedValue.value))
+ }
+ }
+
+ // verify commited updated kv values appear in the kv cache
+ for ; currentDBRound <= au.cachedDBRound; currentDBRound++ {
+ lookback := basics.Round(2*(kvCnt/kvsPerBlock+int(conf.MaxAcctLookback)) + 1)
+ if currentDBRound < lookback {
+ continue
+ }
+
+ startKV := (currentDBRound - lookback) * basics.Round(kvsPerBlock)
+ for j := 0; j < kvsPerBlock; j++ {
+ name := fmt.Sprintf("%d", uint64(startKV)+uint64(j))
+ persistedValue, has := au.baseKVs.read(name)
+ require.True(t, has)
+ require.True(t, persistedValue.value == nil)
+ }
+ }
+ }
+}
+
func accountsAll(tx *sql.Tx) (bals map[basics.Address]basics.AccountData, err error) {
rows, err := tx.Query("SELECT rowid, address, data FROM accountbase")
if err != nil {
@@ -1319,7 +1619,7 @@ func BenchmarkLargeMerkleTrieRebuild(b *testing.B) {
}
err := ml.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, proto, basics.Round(1))
+ _, _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, nil, proto, basics.Round(1))
return
})
require.NoError(b, err)
@@ -1671,9 +1971,7 @@ func TestAcctUpdatesCachesInitialization(t *testing.T) {
newAccts := applyPartialDeltas(accts[i-1], updates)
blk := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: basics.Round(i),
- },
+ BlockHeader: bookkeeping.BlockHeader{Round: i},
}
blk.RewardsLevel = rewardLevel
blk.CurrentProtocol = protocolVersion
@@ -2453,7 +2751,7 @@ type auNewBlockOpts struct {
knownCreatables map[basics.CreatableIndex]bool
}
-func auNewBlock(t *testing.T, rnd basics.Round, au *accountUpdates, base map[basics.Address]basics.AccountData, data auNewBlockOpts) {
+func auNewBlock(t *testing.T, rnd basics.Round, au *accountUpdates, base map[basics.Address]basics.AccountData, data auNewBlockOpts, kvMods map[string]ledgercore.KvValueDelta) {
rewardLevel := uint64(0)
prevRound, prevTotals, err := au.LatestTotals()
require.Equal(t, rnd-1, prevRound)
@@ -2462,9 +2760,7 @@ func auNewBlock(t *testing.T, rnd basics.Round, au *accountUpdates, base map[bas
newTotals := ledgertesting.CalculateNewRoundAccountTotals(t, data.updates, rewardLevel, data.protoParams, base, prevTotals)
blk := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: basics.Round(rnd),
- },
+ BlockHeader: bookkeeping.BlockHeader{Round: rnd},
}
blk.RewardsLevel = rewardLevel
blk.CurrentProtocol = data.version
@@ -2472,6 +2768,7 @@ func auNewBlock(t *testing.T, rnd basics.Round, au *accountUpdates, base map[bas
delta.Accts.MergeAccounts(data.updates)
delta.Creatables = creatablesFromUpdates(base, data.updates, data.knownCreatables)
delta.Totals = newTotals
+ delta.KvMods = kvMods
au.newBlock(blk, delta)
}
@@ -2537,7 +2834,7 @@ func TestAcctUpdatesLookupLatestCacheRetry(t *testing.T) {
// prepare block
opts := auNewBlockOpts{updates, testProtocolVersion, protoParams, knownCreatables}
- auNewBlock(t, i, au, base, opts)
+ auNewBlock(t, i, au, base, opts, nil)
// commit changes synchroniously
auCommitSync(t, i, au, ml)
@@ -2601,7 +2898,7 @@ func TestAcctUpdatesLookupLatestCacheRetry(t *testing.T) {
au.cachedDBRound = oldCachedDBRound
au.accountsMu.Unlock()
opts := auNewBlockOpts{ledgercore.AccountDeltas{}, testProtocolVersion, protoParams, knownCreatables}
- auNewBlock(t, rnd+1, au, accts[rnd], opts)
+ auNewBlock(t, rnd+1, au, accts[rnd], opts, nil)
auCommitSync(t, rnd+1, au, ml)
wg.Wait()
@@ -2685,7 +2982,7 @@ func TestAcctUpdatesLookupResources(t *testing.T) {
// prepare block
opts := auNewBlockOpts{updates, testProtocolVersion, protoParams, knownCreatables}
- auNewBlock(t, i, au, base, opts)
+ auNewBlock(t, i, au, base, opts, nil)
if i <= basics.Round(protoParams.MaxBalLookback+1) {
auCommitSync(t, i, au, ml)
diff --git a/ledger/apply/application_test.go b/ledger/apply/application_test.go
index dd6213318..e9e9f699c 100644
--- a/ledger/apply/application_test.go
+++ b/ledger/apply/application_test.go
@@ -746,7 +746,7 @@ func TestAppCallOptIn(t *testing.T) {
prevMaxAppsOptedIn := config.Consensus[protocol.ConsensusV24].MaxAppsOptedIn
for _, testProtoVer := range optInCountTest {
cparams, ok := config.Consensus[testProtoVer]
- a.True(ok)
+ a.True(ok, testProtoVer)
if cparams.MaxAppsOptedIn > 0 {
a.LessOrEqual(prevMaxAppsOptedIn, cparams.MaxAppsOptedIn)
}
diff --git a/ledger/apply/payment.go b/ledger/apply/payment.go
index 9c1f0f94e..c86f791e5 100644
--- a/ledger/apply/payment.go
+++ b/ledger/apply/payment.go
@@ -99,6 +99,15 @@ func Payment(payment transactions.PaymentTxnFields, header transactions.Header,
return fmt.Errorf("cannot close: %d outstanding applications opted in. Please opt out or clear them", totalAppLocalStates)
}
+ // Confirm that there is no box-related state in the account
+ if rec.TotalBoxes > 0 {
+ return fmt.Errorf("cannot close: %d outstanding boxes", rec.TotalBoxes)
+ }
+ if rec.TotalBoxBytes > 0 {
+ // This should be impossible because every box byte comes from the existence of a box.
+ return fmt.Errorf("cannot close: %d outstanding box bytes", rec.TotalBoxBytes)
+ }
+
// Can't have created apps remaining either
totalAppParams := rec.TotalAppParams
if totalAppParams > 0 {
diff --git a/ledger/internal/apptxn_test.go b/ledger/apptxn_test.go
index 86a4a7fea..194c6e84a 100644
--- a/ledger/internal/apptxn_test.go
+++ b/ledger/apptxn_test.go
@@ -14,12 +14,12 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal_test
+package ledger
import (
"encoding/hex"
"fmt"
- "strings"
+ "strconv"
"testing"
"github.com/stretchr/testify/require"
@@ -27,43 +27,27 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/txntest"
- "github.com/algorand/go-algorand/ledger"
- "github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
- "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
-// main wraps up some TEAL source in a header and footer so that it is
-// an app that does nothing at create time, but otherwise runs source,
-// then approves, if the source avoids panicing and leaves the stack
-// empty.
-func main(source string) string {
- return strings.Replace(fmt.Sprintf(`txn ApplicationID
- bz end
- %s
- end: int 1`, source), ";", "\n", -1)
-}
-
// TestPayAction ensures a pay in teal affects balances
func TestPayAction(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
- genesisInitState, addrs, _ := ledgertesting.Genesis(10)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // Inner txns start in v30
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
- create := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ ai := dl.fundedApp(addrs[0], 200000, // account min balance, plus fees
+ main(`
itxn_begin
int pay
itxn_field TypeEnum
@@ -72,130 +56,110 @@ func TestPayAction(t *testing.T) {
txn Accounts 1
itxn_field Receiver
itxn_submit
-`),
- }
+ `))
- ai := basics.AppIndex(1)
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: ai.Address(),
- Amount: 200000, // account min balance, plus fees
- }
+ require.Equal(t, ai, basics.AppIndex(1))
- payout1 := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: ai,
- Accounts: []basics.Address{addrs[1]}, // pay self
- }
-
- eval := nextBlock(t, l)
- txns(t, l, eval, &create, &fund, &payout1)
- vb := endBlock(t, l, eval)
+ payout1 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: ai,
+ Accounts: []basics.Address{addrs[1]}, // pay self
+ }
- // AD contains expected appIndex
- require.Equal(t, ai, vb.Block().Payset[0].ApplyData.ApplicationID)
+ dl.fullBlock(&payout1)
- ad0 := micros(t, l, addrs[0])
- ad1 := micros(t, l, addrs[1])
- app := micros(t, l, ai.Address())
+ ad0 := micros(dl.t, dl.generator, addrs[0])
+ ad1 := micros(dl.t, dl.generator, addrs[1])
+ app := micros(dl.t, dl.generator, ai.Address())
- genAccounts := genesisInitState.Accounts
- // create(1000) and fund(1000 + 200000)
- require.Equal(t, uint64(202000), genAccounts[addrs[0]].MicroAlgos.Raw-ad0)
- // paid 5000, but 1000 fee
- require.Equal(t, uint64(4000), ad1-genAccounts[addrs[1]].MicroAlgos.Raw)
- // app still has 194000 (paid out 5000, and paid fee to do it)
- require.Equal(t, uint64(194000), app)
+ genAccounts := genBalances.Balances
+ // create(1000) and fund(1000 + 200000)
+ require.Equal(t, uint64(202000), genAccounts[addrs[0]].MicroAlgos.Raw-ad0)
+ // paid 5000, but 1000 fee
+ require.Equal(t, uint64(4000), ad1-genAccounts[addrs[1]].MicroAlgos.Raw)
+ // app still has 194000 (paid out 5000, and paid fee to do it)
+ require.Equal(t, uint64(194000), app)
- // Build up Residue in RewardsState so it's ready to pay
- for i := 1; i < 10; i++ {
- eval = nextBlock(t, l)
- endBlock(t, l, eval)
- }
-
- eval = nextBlock(t, l)
- payout2 := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: ai,
- Accounts: []basics.Address{addrs[2]}, // pay other
- }
- txn(t, l, eval, &payout2)
- // confirm that modifiedAccounts can see account in inner txn
- vb = endBlock(t, l, eval)
+ // Build up Residue in RewardsState so it's ready to pay
+ for i := 1; i < 10; i++ {
+ dl.fullBlock()
+ }
- deltas := vb.Delta()
- require.Contains(t, deltas.Accts.ModifiedAccounts(), addrs[2])
-
- payInBlock := vb.Block().Payset[0]
- rewards := payInBlock.ApplyData.SenderRewards.Raw
- require.Greater(t, rewards, uint64(2000)) // some biggish number
- inners := payInBlock.ApplyData.EvalDelta.InnerTxns
- require.Len(t, inners, 1)
-
- // addr[2] is going to get the same rewards as addr[1], who
- // originally sent the top-level txn. Both had their algo balance
- // touched and has very nearly the same balance.
- require.Equal(t, rewards, inners[0].ReceiverRewards.Raw)
- // app gets none, because it has less than 1A
- require.Equal(t, uint64(0), inners[0].SenderRewards.Raw)
-
- ad1 = micros(t, l, addrs[1])
- ad2 := micros(t, l, addrs[2])
- app = micros(t, l, ai.Address())
-
- // paid 5000, in first payout (only), but paid 1000 fee in each payout txn
- require.Equal(t, rewards+3000, ad1-genAccounts[addrs[1]].MicroAlgos.Raw)
- // app still has 188000 (paid out 10000, and paid 2k fees to do it)
- // no rewards because owns less than an algo
- require.Equal(t, uint64(200000)-10000-2000, app)
-
- // paid 5000 by payout2, never paid any fees, got same rewards
- require.Equal(t, rewards+uint64(5000), ad2-genAccounts[addrs[2]].MicroAlgos.Raw)
-
- // Now fund the app account much more, so we can confirm it gets rewards.
- tenkalgos := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: ai.Address(),
- Amount: 10 * 1000 * 1000000, // account min balance, plus fees
- }
- eval = nextBlock(t, l)
- txn(t, l, eval, &tenkalgos)
- endBlock(t, l, eval)
- beforepay := micros(t, l, ai.Address())
+ payout2 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: ai,
+ Accounts: []basics.Address{addrs[2]}, // pay other
+ }
+ vb := dl.fullBlock(&payout2)
+ // confirm that modifiedAccounts can see account in inner txn
+
+ deltas := vb.Delta()
+ require.Contains(t, deltas.Accts.ModifiedAccounts(), addrs[2])
+
+ payInBlock := vb.Block().Payset[0]
+ rewards := payInBlock.ApplyData.SenderRewards.Raw
+ require.Greater(t, rewards, uint64(2000)) // some biggish number
+ inners := payInBlock.ApplyData.EvalDelta.InnerTxns
+ require.Len(t, inners, 1)
+
+ // addr[2] is going to get the same rewards as addr[1], who
+ // originally sent the top-level txn. Both had their algo balance
+ // touched and has very nearly the same balance.
+ require.Equal(t, rewards, inners[0].ReceiverRewards.Raw)
+ // app gets none, because it has less than 1A
+ require.Equal(t, uint64(0), inners[0].SenderRewards.Raw)
+
+ ad1 = micros(dl.t, dl.validator, addrs[1])
+ ad2 := micros(dl.t, dl.validator, addrs[2])
+ app = micros(dl.t, dl.validator, ai.Address())
+
+ // paid 5000, in first payout (only), but paid 1000 fee in each payout txn
+ require.Equal(t, rewards+3000, ad1-genAccounts[addrs[1]].MicroAlgos.Raw)
+ // app still has 188000 (paid out 10000, and paid 2k fees to do it)
+ // no rewards because owns less than an algo
+ require.Equal(t, uint64(200000)-10000-2000, app)
+
+ // paid 5000 by payout2, never paid any fees, got same rewards
+ require.Equal(t, rewards+uint64(5000), ad2-genAccounts[addrs[2]].MicroAlgos.Raw)
+
+ // Now fund the app account much more, so we can confirm it gets rewards.
+ tenkalgos := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: ai.Address(),
+ Amount: 10 * 1000 * 1000000, // account min balance, plus fees
+ }
+ dl.fullBlock(&tenkalgos)
+ beforepay := micros(dl.t, dl.validator, ai.Address())
- // Build up Residue in RewardsState so it's ready to pay again
- for i := 1; i < 10; i++ {
- eval = nextBlock(t, l)
- endBlock(t, l, eval)
- }
- eval = nextBlock(t, l)
- txn(t, l, eval, payout2.Noted("2"))
- vb = endBlock(t, l, eval)
+ // Build up Residue in RewardsState so it's ready to pay again
+ for i := 1; i < 10; i++ {
+ dl.fullBlock()
+ }
+ vb = dl.fullBlock(payout2.Noted("2"))
- afterpay := micros(t, l, ai.Address())
+ afterpay := micros(dl.t, dl.validator, ai.Address())
- payInBlock = vb.Block().Payset[0]
- inners = payInBlock.ApplyData.EvalDelta.InnerTxns
- require.Len(t, inners, 1)
+ payInBlock = vb.Block().Payset[0]
+ inners = payInBlock.ApplyData.EvalDelta.InnerTxns
+ require.Len(t, inners, 1)
- appreward := inners[0].SenderRewards.Raw
- require.Greater(t, appreward, uint64(1000))
+ appreward := inners[0].SenderRewards.Raw
+ require.Greater(t, appreward, uint64(1000))
- require.Equal(t, beforepay+appreward-5000-1000, afterpay)
+ require.Equal(t, beforepay+appreward-5000-1000, afterpay)
+ })
}
// TestAxferAction ensures axfers in teal have the intended effects
func TestAxferAction(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
- genesisInitState, addrs, _ := ledgertesting.Genesis(10)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ l := newSimpleLedgerWithConsensusVersion(t, genBalances, protocol.ConsensusFuture)
defer l.Close()
asa := txntest.Txn{
@@ -383,36 +347,10 @@ submit: itxn_submit
require.Equal(t, uint64(20000), back-left)
}
-func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *ledger.Ledger {
- return newTestLedgerWithConsensusVersion(t, balances, protocol.ConsensusFuture)
-}
-
-func newTestLedgerWithConsensusVersion(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion) *ledger.Ledger {
- var genHash crypto.Digest
- crypto.RandBytes(genHash[:])
- return newTestLedgerFull(t, balances, cv, genHash)
-}
-
-func newTestLedgerFull(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, genHash crypto.Digest) *ledger.Ledger {
- genBlock, err := bookkeeping.MakeGenesisBlock(cv, balances, "test", genHash)
- require.NoError(t, err)
- require.False(t, genBlock.FeeSink.IsZero())
- require.False(t, genBlock.RewardsPool.IsZero())
- dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- l, err := ledger.OpenLedger(logging.Base(), dbName, true, ledgercore.InitState{
- Block: genBlock,
- Accounts: balances.Balances,
- GenesisHash: genHash,
- }, cfg)
- require.NoError(t, err)
- return l
-}
-
// TestClawbackAction ensures an app address can act as clawback address.
func TestClawbackAction(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -498,6 +436,7 @@ func TestClawbackAction(t *testing.T) {
// TestRekeyAction ensures an app can transact for a rekeyed account
func TestRekeyAction(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -602,6 +541,7 @@ skipclose:
// properly removes the app as an authorizer for the account
func TestRekeyActionCloseAccount(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -677,6 +617,7 @@ func TestRekeyActionCloseAccount(t *testing.T) {
// TestDuplicatePayAction shows two pays with same parameters can be done as inner tarnsactions
func TestDuplicatePayAction(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -752,6 +693,7 @@ func TestDuplicatePayAction(t *testing.T) {
// TestInnerTxCount ensures that inner transactions increment the TxnCounter
func TestInnerTxnCount(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -800,6 +742,7 @@ func TestInnerTxnCount(t *testing.T) {
// TestAcfgAction ensures assets can be created and configured in teal
func TestAcfgAction(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -976,6 +919,7 @@ submit: itxn_submit
// we can know, so it helps exercise txncounter changes.
func TestAsaDuringInit(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -1029,6 +973,7 @@ func TestAsaDuringInit(t *testing.T) {
func TestRekey(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -1080,6 +1025,7 @@ func TestRekey(t *testing.T) {
func TestNote(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -1128,6 +1074,7 @@ func TestNote(t *testing.T) {
func TestKeyreg(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -1228,6 +1175,7 @@ nonpart:
func TestInnerAppCall(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -1295,6 +1243,7 @@ func TestInnerAppCall(t *testing.T) {
// the changes expected when invoked.
func TestInnerAppManipulate(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -1405,8 +1354,8 @@ func TestCreateAndUse(t *testing.T) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
// At 30 the asset reference is illegal, then from v31 it works.
- testConsensusRange(t, 30, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
createapp := txntest.Txn{
@@ -1475,8 +1424,8 @@ func TestGtxnEffects(t *testing.T) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
// At 30 `gtxn CreatedAssetId is illegal, then from v31 it works.
- testConsensusRange(t, 30, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
createapp := txntest.Txn{
@@ -1537,8 +1486,8 @@ func TestBasicReentry(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
app0 := txntest.Txn{
@@ -1731,8 +1680,8 @@ func TestMaxInnerTxForSingleAppCall(t *testing.T) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
// v31 = inner appl
- testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
program := `
@@ -1891,8 +1840,8 @@ func TestInnerAppVersionCalling(t *testing.T) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
// 31 allowed inner appls. v34 lowered proto.MinInnerApplVersion
- testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
three, err := logic.AssembleStringWithVersion("int 1", 3)
@@ -2085,8 +2034,8 @@ func TestAppDowngrade(t *testing.T) {
// Confirm that in old protocol version, downgrade is legal
// Start at 28 because we want to v4 app to downgrade to v3
- testConsensusRange(t, 28, 30, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 28, 30, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
create := txntest.Txn{
@@ -2116,8 +2065,8 @@ func TestAppDowngrade(t *testing.T) {
dl.fullBlock(&update)
})
- testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
create := txntest.Txn{
@@ -2361,7 +2310,7 @@ func executeMegaContract(b *testing.B) {
var cv protocol.ConsensusVersion = "temp test"
config.Consensus[cv] = vTest
- l := newTestLedgerWithConsensusVersion(b, genBalances, cv)
+ l := newSimpleLedgerWithConsensusVersion(b, genBalances, cv)
defer l.Close()
defer delete(config.Consensus, cv)
@@ -2458,6 +2407,7 @@ func BenchmarkMaximumCallStackDepth(b *testing.B) {
// TestInnerClearState ensures inner ClearState performs close out properly, even if rejects.
func TestInnerClearState(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -2546,6 +2496,7 @@ itxn_submit
// allowed to use more than 700 (MaxAppProgramCost)
func TestInnerClearStateBadCallee(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -2647,6 +2598,7 @@ skip:
// be called with less than 700 (MaxAppProgramCost)) OpcodeBudget.
func TestInnerClearStateBadCaller(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -2768,6 +2720,7 @@ itxn_submit
// v30, but not in vFuture. (Test should add v31 after it exists.)
func TestClearStateInnerPay(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
tests := []struct {
consensus protocol.ConsensusVersion
@@ -2783,7 +2736,7 @@ func TestClearStateInnerPay(t *testing.T) {
t.Run(fmt.Sprintf("i=%d", i), func(t *testing.T) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedgerWithConsensusVersion(t, genBalances, test.consensus)
+ l := newSimpleLedgerWithConsensusVersion(t, genBalances, test.consensus)
defer l.Close()
app0 := txntest.Txn{
@@ -2880,6 +2833,7 @@ itxn_submit
// calls when using inners.
func TestGlobalChangesAcrossApps(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -2988,6 +2942,7 @@ check:
// calls when using inners.
func TestLocalChangesAcrossApps(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -3101,8 +3056,8 @@ func TestForeignAppAccountsAccessible(t *testing.T) {
partitiontest.PartitionTest(t)
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- testConsensusRange(t, 32, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 32, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
appA := txntest.Txn{
@@ -3167,8 +3122,8 @@ func TestForeignAppAccountsImmutable(t *testing.T) {
partitiontest.PartitionTest(t)
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- testConsensusRange(t, 32, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 32, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
appA := txntest.Txn{
@@ -3221,8 +3176,8 @@ func TestForeignAppAccountsMutable(t *testing.T) {
partitiontest.PartitionTest(t)
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- testConsensusRange(t, 32, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 32, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
appA := txntest.Txn{
@@ -3302,9 +3257,8 @@ func TestReloadWithTxns(t *testing.T) {
partitiontest.PartitionTest(t)
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- testConsensusRange(t, 34, 0, func(t *testing.T, ver int) {
- fmt.Printf("testConsensus %d\n", ver)
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ ledgertesting.TestConsensusRange(t, 34, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
defer dl.Close()
dl.fullBlock() // So that the `block` opcode has a block to inspect
@@ -3320,3 +3274,484 @@ func TestReloadWithTxns(t *testing.T) {
dl.reloadLedgers()
})
}
+
+// TestEvalAppState ensures txns in a group can't violate app state schema
+// limits. It ensures that commitToParent -> applyChild copies child's cow state
+// usage counts into parent and the usage counts are correctly propagated from
+// parent cow to child cow and back. When limits are not violated, the test
+// ensures that the updates are correct.
+func TestEvalAppState(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // v24 = apps
+ ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ appcall1 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[0],
+ GlobalStateSchema: basics.StateSchema{NumByteSlice: 1},
+ ApprovalProgram: `#pragma version 2
+ txn ApplicationID
+ bz create
+ byte "caller"
+ txn Sender
+ app_global_put
+ b ok
+create:
+ byte "creator"
+ txn Sender
+ app_global_put
+ok:
+ int 1`,
+ ClearStateProgram: "#pragma version 2\nint 1",
+ }
+
+ appcall2 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[0],
+ ApplicationID: 1,
+ }
+
+ dl.beginBlock()
+ dl.txgroup("store bytes count 2 exceeds schema bytes count 1", &appcall1, &appcall2)
+
+ appcall1.GlobalStateSchema = basics.StateSchema{NumByteSlice: 2}
+ dl.txgroup("", &appcall1, &appcall2)
+ vb := dl.endBlock()
+ deltas := vb.Delta()
+
+ params, ok := deltas.Accts.GetAppParams(addrs[0], 1)
+ require.True(t, ok)
+ state := params.Params.GlobalState
+ require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addrs[0][:])}, state["caller"])
+ require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addrs[0][:])}, state["creator"])
+ })
+}
+
+func TestGarbageClearState(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // v24 = apps
+ ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ createTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: "int 1",
+ ClearStateProgram: []byte{},
+ }
+
+ dl.txn(&createTxn, "invalid program (empty)")
+
+ createTxn.ClearStateProgram = []byte{0xfe} // bad uvarint
+ dl.txn(&createTxn, "invalid version")
+ })
+}
+
+func TestRewardsInAD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // v15 put rewards into ApplyData
+ ledgertesting.TestConsensusRange(t, 11, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ payTxn := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[1]}
+ nonpartTxn := txntest.Txn{Type: protocol.KeyRegistrationTx, Sender: addrs[2], Nonparticipation: true}
+ payNonPart := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[2]}
+
+ if ver < 18 { // Nonpart reyreg happens in v18
+ dl.txn(&nonpartTxn, "tries to mark an account as nonparticipating")
+ } else {
+ dl.fullBlock(&nonpartTxn)
+ }
+
+ // Build up Residue in RewardsState so it's ready to pay
+ for i := 1; i < 10; i++ {
+ dl.fullBlock()
+ }
+
+ vb := dl.fullBlock(&payTxn, &payNonPart)
+ payInBlock := vb.Block().Payset[0]
+ nonPartInBlock := vb.Block().Payset[1]
+ if ver >= 15 {
+ require.Greater(t, payInBlock.ApplyData.SenderRewards.Raw, uint64(1000))
+ require.Greater(t, payInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
+ require.Equal(t, payInBlock.ApplyData.SenderRewards, payInBlock.ApplyData.ReceiverRewards)
+ // Sender is not due for more, and Receiver is nonpart
+ require.Zero(t, nonPartInBlock.ApplyData.SenderRewards)
+ if ver < 18 {
+ require.Greater(t, nonPartInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
+ } else {
+ require.Zero(t, nonPartInBlock.ApplyData.ReceiverRewards)
+ }
+ } else {
+ require.Zero(t, payInBlock.ApplyData.SenderRewards)
+ require.Zero(t, payInBlock.ApplyData.ReceiverRewards)
+ require.Zero(t, nonPartInBlock.ApplyData.SenderRewards)
+ require.Zero(t, nonPartInBlock.ApplyData.ReceiverRewards)
+ }
+ })
+}
+
+// TestDeleteNonExistantKeys checks if the EvalDeltas from deleting missing keys are correct
+func TestDeleteNonExistantKeys(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // AVM v2 (apps)
+ ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ const appID basics.AppIndex = 1
+
+ createTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
+byte "missing_global"
+app_global_del
+int 0
+byte "missing_local"
+app_local_del
+`),
+ }
+
+ optInTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appID,
+ OnCompletion: transactions.OptInOC,
+ }
+
+ vb := dl.fullBlock(&createTxn, &optInTxn)
+ require.Len(t, vb.Block().Payset[1].EvalDelta.GlobalDelta, 0)
+ // For a while, we encoded an empty localdelta
+ deltas := 1
+ if ver >= 27 {
+ deltas = 0
+ }
+ require.Len(t, vb.Block().Payset[1].EvalDelta.LocalDeltas, deltas)
+ })
+}
+
+func TestDuplicates(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, 11, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ pay := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: addrs[1],
+ Amount: 10,
+ }
+ dl.txn(&pay)
+ dl.txn(&pay, "transaction already in ledger")
+
+ // Test same transaction in a later block
+ dl.txn(&pay, "transaction already in ledger")
+
+ // Change the note so it can go in again
+ pay.Note = []byte("1")
+ dl.txn(&pay)
+
+ // Change note again, but try the txn twice in same group
+ if dl.generator.GenesisProto().MaxTxGroupSize > 1 {
+ pay.Note = []byte("2")
+ dl.txgroup("transaction already in ledger", &pay, &pay)
+ }
+ })
+}
+
+// TestHeaderAccess tests FirstValidTime and `block` which can access previous
+// block headers.
+func TestHeaderAccess(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // Added in v34
+ ledgertesting.TestConsensusRange(t, 34, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ fvt := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ FirstValid: 0,
+ ApprovalProgram: "txn FirstValidTime",
+ }
+ dl.txn(&fvt, "round 0 is not available")
+
+ // advance current to 2
+ pay := txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]}
+ dl.fullBlock(&pay)
+
+ fvt.FirstValid = 1
+ dl.txn(&fvt, "round 0 is not available")
+
+ fvt.FirstValid = 2
+ dl.txn(&fvt) // current becomes 3
+
+ // Advance current round far enough to test access MaxTxnLife ago
+ for i := 0; i < int(config.Consensus[cv].MaxTxnLife); i++ {
+ dl.fullBlock()
+ }
+
+ // current should be 1003. Confirm.
+ require.EqualValues(t, 1002, dl.generator.Latest())
+ require.EqualValues(t, 1002, dl.validator.Latest())
+
+ fvt.FirstValid = 1003
+ fvt.LastValid = 1010
+ dl.txn(&fvt) // success advances the round
+ // now we're confident current is 1004, so construct a txn that is as
+ // old as possible, and confirm access.
+ fvt.FirstValid = 1004 - basics.Round(config.Consensus[cv].MaxTxnLife)
+ fvt.LastValid = 1004
+ dl.txn(&fvt)
+ })
+
+}
+
+// TestLogsInBlock ensures that logs appear in the block properly
+func TestLogsInBlock(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // Run tests from v30 onward
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ createTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: "byte \"APP\"\n log\n int 1",
+ // Fail the clear state
+ ClearStateProgram: "byte \"CLR\"\n log\n int 0",
+ }
+ vb := dl.fullBlock(&createTxn)
+ createInBlock := vb.Block().Payset[0]
+ appID := createInBlock.ApplyData.ApplicationID
+ require.Equal(t, "APP", createInBlock.ApplyData.EvalDelta.Logs[0])
+
+ optInTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[1],
+ ApplicationID: appID,
+ OnCompletion: transactions.OptInOC,
+ }
+ vb = dl.fullBlock(&optInTxn)
+ optInInBlock := vb.Block().Payset[0]
+ require.Equal(t, "APP", optInInBlock.ApplyData.EvalDelta.Logs[0])
+
+ clearTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[1],
+ ApplicationID: appID,
+ OnCompletion: transactions.ClearStateOC,
+ }
+ vb = dl.fullBlock(&clearTxn)
+ clearInBlock := vb.Block().Payset[0]
+ // Logs do not appear if the ClearState failed
+ require.Len(t, clearInBlock.ApplyData.EvalDelta.Logs, 0)
+ })
+}
+
+// TestUnfundedSenders confirms that accounts that don't even exist
+// can be the Sender in some situations. If some other transaction
+// covers the fee, and the transaction itself does not require an
+// asset or a min balance, it's fine.
+func TestUnfundedSenders(t *testing.T) {
+ /*
+ In a 0-fee transaction from unfunded sender, we still call balances.Move
+ to “pay” the fee. Move() does not short-circuit a Move of 0 (for good
+ reason, it allows compounding rewards). Therefore, in Move, we do
+ rewards processing on the unfunded account. Before
+ proto.UnfundedSenders, the rewards procesing would set the RewardsBase,
+ which would require the account be written to DB, and therefore the MBR
+ check would kick in (and fail). Now it skips the update if the account
+ has less than RewardsUnit, as the update is meaningless anyway.
+ */
+
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+
+ ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ asaIndex := basics.AssetIndex(1)
+
+ ghost := basics.Address{0x01}
+
+ asaCreate := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 10,
+ Clawback: ghost,
+ Freeze: ghost,
+ Manager: ghost,
+ },
+ }
+
+ appCreate := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ }
+
+ dl.fullBlock(&asaCreate, &appCreate)
+
+ // Advance so that rewardsLevel increases
+ for i := 1; i < 10; i++ {
+ dl.fullBlock()
+ }
+
+ benefactor := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: addrs[0],
+ Fee: 2000,
+ }
+
+ ephemeral := []txntest.Txn{
+ {
+ Type: "pay",
+ Amount: 0,
+ Sender: ghost,
+ Receiver: ghost,
+ Fee: 0,
+ },
+ { // Axfer of 0
+ Type: "axfer",
+ AssetAmount: 0,
+ Sender: ghost,
+ AssetReceiver: basics.Address{0x02},
+ XferAsset: basics.AssetIndex(1),
+ Fee: 0,
+ },
+ { // Clawback
+ Type: "axfer",
+ AssetAmount: 0,
+ Sender: ghost,
+ AssetReceiver: addrs[0],
+ AssetSender: addrs[1],
+ XferAsset: asaIndex,
+ Fee: 0,
+ },
+ { // Freeze
+ Type: "afrz",
+ Sender: ghost,
+ FreezeAccount: addrs[0], // creator, therefore is opted in
+ FreezeAsset: asaIndex,
+ AssetFrozen: true,
+ Fee: 0,
+ },
+ { // Unfreeze
+ Type: "afrz",
+ Sender: ghost,
+ FreezeAccount: addrs[0], // creator, therefore is opted in
+ FreezeAsset: asaIndex,
+ AssetFrozen: false,
+ Fee: 0,
+ },
+ { // App call
+ Type: "appl",
+ Sender: ghost,
+ ApplicationID: basics.AppIndex(2),
+ Fee: 0,
+ },
+ { // App creation (only works because it's also deleted)
+ Type: "appl",
+ Sender: ghost,
+ OnCompletion: transactions.DeleteApplicationOC,
+ Fee: 0,
+ },
+ }
+
+ // v34 is the likely version for UnfundedSenders. Change if that doesn't happen.
+ var problem string
+ if ver < 34 {
+ // In the old days, balances.Move would try to increase the rewardsState on the unfunded account
+ problem = "balance 0 below min"
+ }
+ for i, e := range ephemeral {
+ dl.txgroup(problem, benefactor.Noted(strconv.Itoa(i)), &e)
+ }
+ })
+}
+
+// TestAppCallAppDuringInit is similar to TestUnfundedSenders test, but now the
+// unfunded sender is a newly created app. The fee has been paid by the outer
+// transaction, so the app should be able to make an app call as that requires
+// no min balance.
+func TestAppCallAppDuringInit(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ approve := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ }
+
+ // construct a simple app
+ vb := dl.fullBlock(&approve)
+
+ // now make a new app that calls it during init
+ approveID := vb.Block().Payset[0].ApplicationID
+
+ // Advance so that rewardsLevel increases
+ for i := 1; i < 10; i++ {
+ dl.fullBlock()
+ }
+
+ callInInit := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: `
+ itxn_begin
+ int appl
+ itxn_field TypeEnum
+ txn Applications 1
+ itxn_field ApplicationID
+ itxn_submit
+ int 1
+ `,
+ ForeignApps: []basics.AppIndex{approveID},
+ Fee: 2000, // Enough to have the inner fee paid for
+ }
+ // v34 is the likely version for UnfundedSenders. Change if that doesn't happen.
+ var problem string
+ if ver < 34 {
+ // In the old days, balances.Move would try to increase the rewardsState on the unfunded account
+ problem = "balance 0 below min"
+ }
+ dl.txn(&callInInit, problem)
+ })
+}
diff --git a/ledger/boxtxn_test.go b/ledger/boxtxn_test.go
new file mode 100644
index 000000000..a66a8a7a6
--- /dev/null
+++ b/ledger/boxtxn_test.go
@@ -0,0 +1,663 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "bytes"
+ "encoding/binary"
+ "testing"
+ "time"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/txntest"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+var boxAppSource = main(`
+ txn ApplicationArgs 0
+ byte "create" // create box named arg[1]
+ ==
+ txn ApplicationArgs 0
+ byte "recreate"
+ ==
+ ||
+ bz del
+ txn ApplicationArgs 1
+ int 24
+ txn NumAppArgs
+ int 2
+ ==
+ bnz default
+ pop // get rid of 24
+ txn ApplicationArgs 2
+ btoi
+ default:
+ txn ApplicationArgs 0
+ byte "recreate"
+ ==
+ bz first
+ box_create
+ !
+ assert
+ b end
+ first:
+ box_create
+ assert
+ b end
+ del: // delete box arg[1]
+ txn ApplicationArgs 0; byte "delete"; ==
+ bz set
+ txn ApplicationArgs 1
+ box_del
+ assert
+ b end
+ set: // put arg[2] at start of box arg[1]
+ txn ApplicationArgs 0; byte "set"; ==
+ bz put
+ txn ApplicationArgs 1
+ int 0
+ txn ApplicationArgs 2
+ box_replace
+ b end
+ put: // box_put arg[2] as replacement for box arg[1]
+ txn ApplicationArgs 0; byte "put"; ==
+ bz get
+ txn ApplicationArgs 1
+ txn ApplicationArgs 2
+ box_put
+ b end
+ get: // log box arg[1], after getting it with box_get
+ txn ApplicationArgs 0; byte "get"; ==
+ bz check
+ txn ApplicationArgs 1
+ box_get
+ assert
+ log
+ b end
+ check: // fail unless arg[2] is the prefix of box arg[1]
+ txn ApplicationArgs 0; byte "check"; ==
+ bz bad
+ txn ApplicationArgs 1
+ int 0
+ txn ApplicationArgs 2
+ len
+ box_extract
+ txn ApplicationArgs 2
+ ==
+ assert
+ b end
+ bad:
+ err
+`)
+
+// Call the app in txn.Applications[1] the same way I was called.
+var passThruSource = main(`
+ itxn_begin
+ txn Applications 1; itxn_field ApplicationID
+ txn TypeEnum; itxn_field TypeEnum
+ // copy my app args into itxn app args (too lazy to write a loop), these are
+ // always called with 2 or 3 args.
+ txn ApplicationArgs 0; itxn_field ApplicationArgs
+ txn ApplicationArgs 1; itxn_field ApplicationArgs
+ txn NumAppArgs; int 2; ==; bnz skip
+ txn ApplicationArgs 2; itxn_field ApplicationArgs
+ skip:
+ itxn_submit
+`)
+
+const boxVersion = 36
+
+func boxFee(p config.ConsensusParams, nameAndValueSize uint64) uint64 {
+ return p.BoxFlatMinBalance + p.BoxByteMinBalance*(nameAndValueSize)
+}
+
+// TestBoxCreate tests MBR changes around allocation, deallocation
+func TestBoxCreate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ // increment for a size 24 box with 4 letter name
+ proto := config.Consensus[cv]
+ mbr := boxFee(proto, 28)
+
+ appIndex := dl.fundedApp(addrs[0], proto.MinBalance+3*mbr, boxAppSource)
+
+ call := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appIndex,
+ }
+
+ adam := call.Args("create", "adam")
+ dl.txn(adam, "invalid Box reference adam")
+ adam.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("adam")}}
+ dl.txn(adam)
+ dl.txn(adam.Args("check", "adam", "\x00\x00"))
+ dl.txgroup("box_create\nassert", adam.Noted("one"), adam.Noted("two"))
+ bobo := call.Args("create", "bobo")
+ dl.txn(bobo, "invalid Box reference bobo")
+ bobo.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("bobo")}}
+ dl.txn(bobo)
+ dl.txgroup("box_create\nassert", bobo.Noted("one"), bobo.Noted("two"))
+
+ dl.beginBlock()
+ chaz := call.Args("create", "chaz")
+ chaz.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("chaz")}}
+ dl.txn(chaz)
+ dl.txn(chaz.Noted("again"), "box_create\nassert")
+ dl.endBlock()
+
+ // new block
+ dl.txn(chaz.Noted("again"), "box_create\nassert")
+ dogg := call.Args("create", "dogg")
+ dogg.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("dogg")}}
+ dl.txn(dogg, "below min")
+ dl.txn(chaz.Args("delete", "chaz"))
+ dl.txn(chaz.Args("delete", "chaz").Noted("again"), "box_del\nassert")
+ dl.txn(dogg)
+ dl.txn(bobo.Args("delete", "bobo"))
+
+ // empty name is illegal
+ empty := call.Args("create", "")
+ dl.txn(empty, "box names may not be zero")
+ // and, of course, that's true even if there's a box ref with the empty name
+ empty.Boxes = []transactions.BoxRef{{}}
+ dl.txn(empty, "box names may not be zero")
+ })
+}
+
+// TestBoxRecreate tests behavior when box_create is called for a box that already exists
+func TestBoxRecreate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ // increment for a size 4 box with 4 letter name
+ proto := config.Consensus[cv]
+ mbr := boxFee(proto, 8)
+
+ appIndex := dl.fundedApp(addrs[0], proto.MinBalance+mbr, boxAppSource)
+
+ call := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appIndex,
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("adam")}},
+ }
+
+ create := call.Args("create", "adam", "\x04") // box value size is 4 bytes
+ recreate := call.Args("recreate", "adam", "\x04")
+
+ dl.txn(recreate, "box_create\n!\nassert")
+ dl.txn(create)
+ dl.txn(recreate)
+ dl.txn(call.Args("set", "adam", "\x01\x02\x03\x04"))
+ dl.txn(call.Args("check", "adam", "\x01\x02\x03\x04"))
+ dl.txn(recreate.Noted("again"))
+ // a recreate does not change the value
+ dl.txn(call.Args("check", "adam", "\x01\x02\x03\x04").Noted("after recreate"))
+ // recreating with a smaller size fails
+ dl.txn(call.Args("recreate", "adam", "\x03"), "box size mismatch 4 3")
+ // recreating with a larger size fails
+ dl.txn(call.Args("recreate", "adam", "\x05"), "box size mismatch 4 5")
+ dl.txn(call.Args("check", "adam", "\x01\x02\x03\x04").Noted("after failed recreates"))
+
+ // delete and actually create again
+ dl.txn(call.Args("delete", "adam"))
+ dl.txn(call.Args("create", "adam", "\x03"))
+
+ dl.txn(call.Args("set", "adam", "\x03\x02\x01"))
+ dl.txn(call.Args("check", "adam", "\x03\x02\x01"))
+ dl.txn(recreate.Noted("after delete"), "box size mismatch 3 4")
+ dl.txn(call.Args("recreate", "adam", "\x03"))
+ dl.txn(call.Args("check", "adam", "\x03\x02\x01").Noted("after delete and recreate"))
+ })
+}
+
+func TestBoxCreateAvailability(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ accessInCreate := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: 0, // This is a create
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("hello")}},
+ ApprovalProgram: `
+ byte "hello"
+ int 10
+ box_create
+`,
+ }
+
+ // We know box_create worked because we finished and checked MBR
+ dl.txn(&accessInCreate, "balance 0 below min")
+
+ // But let's fund it and be sure. This is "psychic". We're going to fund
+ // the app address that we know the app will get. So this is a nice
+ // test, but unrealistic way to actual create a box.
+ psychic := basics.AppIndex(2)
+
+ proto := config.Consensus[cv]
+ dl.txn(&txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: psychic.Address(),
+ Amount: proto.MinBalance + boxFee(proto, 15),
+ })
+ dl.txn(&accessInCreate)
+
+ // Now, a more realistic, though tricky, way to get a box created during
+ // the app's first txgroup in existence is to create it in tx0, and then
+ // in tx1 fund it using an inner tx, then invoke it with an inner
+ // transaction. During that invocation, the app will have access to the
+ // boxes supplied as "0 refs", since they were resolved to the app ID
+ // during creation.
+
+ accessWhenCalled := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: 0, // This is a create
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("hello")}},
+ // Note that main() wraps the program so it does not run at creation time.
+ ApprovalProgram: main(`
+ byte "hello"
+ int 10
+ box_create
+ assert
+ byte "we did it"
+ log
+`),
+ }
+
+ trampoline := dl.fundedApp(addrs[0], 1_000_000, main(`
+ // Fund the app created in the txn behind me.
+ txn GroupIndex
+ int 1
+ -
+ gtxns CreatedApplicationID
+ dup // copy for use when calling
+ dup // test copy
+ assert
+ app_params_get AppAddress
+ assert
+
+ itxn_begin
+ itxn_field Receiver
+ int 500000
+ itxn_field Amount
+ int pay
+ itxn_field TypeEnum
+ itxn_submit
+
+ // Now invoke it, so it can intialize (and create the "hello" box)
+ itxn_begin
+ itxn_field ApplicationID
+ int appl
+ itxn_field TypeEnum
+ itxn_submit
+`))
+
+ call := txntest.Txn{
+ Sender: addrs[0],
+ Type: "appl",
+ ApplicationID: trampoline,
+ }
+
+ dl.beginBlock()
+ dl.txgroup("", &accessWhenCalled, &call)
+ vb := dl.endBlock()
+
+ // Make sure that we actually did it.
+ require.Equal(t, "we did it", vb.Block().Payset[1].ApplyData.EvalDelta.InnerTxns[1].EvalDelta.Logs[0])
+ })
+}
+
+// TestBoxRW tests reading writing boxes in consecutive transactions
+func TestBoxRW(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ var bufNewLogger bytes.Buffer
+ log := logging.NewLogger()
+ log.SetOutput(&bufNewLogger)
+
+ appIndex := dl.fundedApp(addrs[0], 1_000_000, boxAppSource)
+ call := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appIndex,
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("x")}},
+ }
+
+ dl.txn(call.Args("create", "x", "\x10")) // 16
+ dl.txn(call.Args("set", "x", "ABCDEFGHIJ")) // 10 long
+ dl.txn(call.Args("check", "x", "ABCDE"))
+ dl.txn(call.Args("check", "x", "ABCDEFGHIJ"))
+ dl.txn(call.Args("check", "x", "ABCDEFGHIJ\x00"))
+
+ dl.txn(call.Args("delete", "x"))
+ dl.txn(call.Args("check", "x", "ABC"), "no such box")
+ dl.txn(call.Args("create", "x", "\x08"))
+ dl.txn(call.Args("check", "x", "\x00")) // it was cleared
+ dl.txn(call.Args("set", "x", "ABCDEFGHIJ"), "replacement end 10")
+ dl.txn(call.Args("check", "x", "\x00")) // still clear
+ dl.txn(call.Args("set", "x", "ABCDEFGH"))
+ dl.txn(call.Args("check", "x", "ABCDEFGH\x00"), "extraction end 9")
+ dl.txn(call.Args("check", "x", "ABCDEFGH"))
+ dl.txn(call.Args("set", "x", "ABCDEFGHI"), "replacement end 9")
+
+ // Advance more than 320 rounds, ensure box is still there
+ for i := 0; i < 330; i++ {
+ dl.fullBlock()
+ }
+ time.Sleep(5 * time.Second) // balancesFlushInterval, so commit happens
+ dl.fullBlock(call.Args("check", "x", "ABCDEFGH"))
+ time.Sleep(100 * time.Millisecond) // give commit time to run, and prune au caches
+ dl.fullBlock(call.Args("check", "x", "ABCDEFGH"))
+
+ dl.txn(call.Args("create", "yy"), "invalid Box reference yy")
+ withBr := call.Args("create", "yy")
+ withBr.Boxes = append(withBr.Boxes, transactions.BoxRef{Index: 1, Name: []byte("yy")})
+ require.Error(dl.t, withBr.Txn().WellFormed(transactions.SpecialAddresses{}, dl.generator.GenesisProto()))
+ withBr.Boxes[1].Index = 0
+ dl.txn(withBr)
+ })
+}
+
+// TestBoxAccountData tests that an account's data changes when boxes are created
+func TestBoxAccountData(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ uint64ToArgStr := func(i uint64) string {
+ encoded := make([]byte, 8)
+ binary.BigEndian.PutUint64(encoded, i)
+ return string(encoded)
+ }
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ proto := config.Consensus[cv]
+
+ var bufNewLogger bytes.Buffer
+ log := logging.NewLogger()
+ log.SetOutput(&bufNewLogger)
+
+ appIndex := dl.fundedApp(addrs[0], 1_000_000, boxAppSource)
+ call := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appIndex,
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("x")}, {Index: 0, Name: []byte("y")}},
+ }
+
+ verifyAppSrc := main(`
+txn ApplicationArgs 0
+btoi
+txn Accounts 1
+acct_params_get AcctMinBalance
+assert
+==
+assert
+
+txn ApplicationArgs 1
+btoi
+txn Accounts 1
+acct_params_get AcctTotalBoxes
+assert
+==
+assert
+
+txn ApplicationArgs 2
+btoi
+txn Accounts 1
+acct_params_get AcctTotalBoxBytes
+assert
+==
+assert
+`)
+ verifyAppIndex := dl.fundedApp(addrs[0], 0, verifyAppSrc)
+ verifyAppCall := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: verifyAppIndex,
+ Accounts: []basics.Address{appIndex.Address()},
+ }
+
+ // The app account has no box data initially
+ dl.txn(verifyAppCall.Args(uint64ToArgStr(proto.MinBalance), "\x00", "\x00"))
+
+ dl.txn(call.Args("create", "x", "\x10")) // 16
+
+ // It gets updated when a new box is created
+ dl.txn(verifyAppCall.Args(uint64ToArgStr(proto.MinBalance+proto.BoxFlatMinBalance+17*proto.BoxByteMinBalance), "\x01", "\x11"))
+
+ dl.txn(call.Args("create", "y", "\x05"))
+
+ // And again
+ dl.txn(verifyAppCall.Args(uint64ToArgStr(proto.MinBalance+2*proto.BoxFlatMinBalance+23*proto.BoxByteMinBalance), "\x02", "\x17"))
+
+ // Advance more than 320 rounds, ensure box is still there
+ for i := 0; i < 330; i++ {
+ dl.fullBlock()
+ }
+ time.Sleep(5 * time.Second) // balancesFlushInterval, so commit happens
+ dl.fullBlock(call.Args("check", "x", string(make([]byte, 16))))
+ time.Sleep(100 * time.Millisecond) // give commit time to run, and prune au caches
+ dl.fullBlock(call.Args("check", "x", string(make([]byte, 16))))
+
+ // Still the same after caches are flushed
+ dl.txn(verifyAppCall.Args(uint64ToArgStr(proto.MinBalance+2*proto.BoxFlatMinBalance+23*proto.BoxByteMinBalance), "\x02", "\x17"))
+
+ dl.txns(call.Args("delete", "x"), call.Args("delete", "y"))
+
+ // Data gets removed after boxes are deleted
+ dl.txn(verifyAppCall.Args(uint64ToArgStr(proto.MinBalance), "\x00", "\x00"))
+ })
+}
+
+func TestBoxIOBudgets(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ appIndex := dl.fundedApp(addrs[0], 0, boxAppSource)
+ call := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appIndex,
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("x")}},
+ }
+ dl.txn(call.Args("create", "x", "\x10\x00"), // 4096
+ "write budget (1024) exceeded")
+ call.Boxes = append(call.Boxes, transactions.BoxRef{})
+ dl.txn(call.Args("create", "x", "\x10\x00"), // 4096
+ "write budget (2048) exceeded")
+ call.Boxes = append(call.Boxes, transactions.BoxRef{})
+ dl.txn(call.Args("create", "x", "\x10\x00"), // 4096
+ "write budget (3072) exceeded")
+ call.Boxes = append(call.Boxes, transactions.BoxRef{})
+ dl.txn(call.Args("create", "x", "\x10\x00"), // now there are 4 box refs
+ "below min") // big box would need more balance
+ dl.txn(call.Args("create", "x", "\x10\x01"), // 4097
+ "write budget (4096) exceeded")
+
+ // Create 4,096 byte box
+ proto := config.Consensus[cv]
+ fundApp := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: appIndex.Address(),
+ Amount: proto.MinBalance + boxFee(proto, 4096+1), // remember key len!
+ }
+ create := call.Args("create", "x", "\x10\x00")
+
+ // Slight detour - Prove insufficient funding fails creation.
+ fundApp.Amount--
+ dl.txgroup("below min", &fundApp, create)
+ fundApp.Amount++
+
+ // Confirm desired creation happens.
+ dl.txgroup("", &fundApp, create)
+
+ // Now that we've created a 4,096 byte box, test READ budget
+ // It works at the start, because call still has 4 brs.
+ dl.txn(call.Args("check", "x", "\x00"))
+ call.Boxes = call.Boxes[:3]
+ dl.txn(call.Args("check", "x", "\x00"),
+ "box read budget (3072) exceeded")
+
+ // Give a budget over 32768, confirm failure anyway
+ empties := [32]transactions.BoxRef{}
+ // These tests skip WellFormed, so the huge Boxes is ok
+ call.Boxes = append(call.Boxes, empties[:]...)
+ dl.txn(call.Args("create", "x", "\x80\x01"), "box size too large") // 32769
+ })
+}
+
+// TestBoxInners trys various box manipulations through inner transactions
+func TestBoxInners(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ ledgertesting.TestConsensusRange(t, boxVersion, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion) {
+ dl := NewDoubleLedger(t, genBalances, cv)
+ defer dl.Close()
+
+ // Advance the creatable counter, so we don't have very low app ids that
+ // could be mistaken for indices into ForeignApps.
+ dl.txn(&txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]})
+ dl.txn(&txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]})
+ dl.txn(&txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]})
+ dl.txn(&txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]})
+
+ boxIndex := dl.fundedApp(addrs[0], 2_000_000, boxAppSource) // there are some big boxes made
+ passIndex := dl.fundedApp(addrs[0], 120_000, passThruSource) // lowish, show it's not paying for boxes
+ call := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: passIndex,
+ ForeignApps: []basics.AppIndex{boxIndex},
+ Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("x")}},
+ }
+ // The current Boxes gives top-level access to "x", not the inner app
+ dl.txn(call.Args("create", "x", "\x10"), // 8
+ "invalid Box reference x")
+
+ // This isn't right: Index should be index into ForeignApps
+ call.Boxes = []transactions.BoxRef{{Index: uint64(boxIndex), Name: []byte("x")}}
+ require.Error(t, call.Txn().WellFormed(transactions.SpecialAddresses{}, dl.generator.genesisProto))
+
+ call.Boxes = []transactions.BoxRef{{Index: 1, Name: []byte("x")}}
+ dl.txn(call.Args("create", "x", "\x10\x00"), // 4096
+ "write budget (1024) exceeded")
+ dl.txn(call.Args("create", "x", "\x04\x00")) // 1024
+ call.Boxes = append(call.Boxes, transactions.BoxRef{Index: 1, Name: []byte("y")})
+ dl.txn(call.Args("create", "y", "\x08\x00")) // 2048
+
+ require.Len(t, call.Boxes, 2)
+ setX := call.Args("set", "x", "A")
+ dl.txn(setX, "read budget") // Boxes has x and y, their combined length is too big
+ setX.Boxes = []transactions.BoxRef{{Index: 1, Name: []byte("x")}}
+ dl.txn(setX)
+
+ setY := call.Args("set", "y", "B")
+ dl.txn(setY, "read budget") // Boxes has x and y, their combined length is too big
+ setY.Boxes = []transactions.BoxRef{{Index: 1, Name: []byte("y")}}
+ dl.txn(setY, "read budget") // Y is bigger needs more than 1 br
+ // We recommend "empty" br, but a duplicate is also ok
+ setY.Boxes = append(setY.Boxes, transactions.BoxRef{Index: 1, Name: []byte("y")})
+ dl.txn(setY) // len(y) = 2048, io budget is 2*1024 right now
+
+ // non-existent box also works
+ setY.Boxes = []transactions.BoxRef{{Index: 1, Name: []byte("y")}, {Index: 0, Name: []byte("nope")}}
+ dl.txn(setY) // len(y) = 2048, io budget is 2*1024 right now
+
+ // now show can read both boxes based on brs in tx1
+ checkX := call.Args("check", "x", "A")
+ checkX.Boxes = nil
+ checkY := call.Args("check", "y", "B")
+ require.Len(t, checkY.Boxes, 2)
+ // can't see x and y because read budget is only 2*1024
+ dl.txgroup("box read budget", checkX, checkY)
+ checkY.Boxes = append(checkY.Boxes, transactions.BoxRef{})
+ dl.txgroup("", checkX, checkY)
+
+ require.Len(t, setY.Boxes, 2) // recall that setY has ("y", "nope") right now. no "x"
+ dl.txgroup("invalid Box reference x", checkX, setY)
+
+ setY.Boxes = append(setY.Boxes, transactions.BoxRef{Index: 1, Name: []byte("x")})
+ dl.txgroup("", checkX, setY)
+
+ // Cleanup
+ dl.txn(call.Args("del", "x"), "read budget")
+ dl.txn(call.Args("del", "y"), "read budget")
+ // surprising but correct: they work when combined, because both txns
+ // have both box refs, so the read budget goes up.
+ dl.txgroup("", call.Args("delete", "x"), call.Args("delete", "y"))
+
+ // Try some get/put action
+ dl.txn(call.Args("put", "x", "john doe"))
+ vb := dl.fullBlock(call.Args("get", "x"))
+ // we are passing this thru to the underlying box app which logs the get
+ require.Equal(t, "john doe", vb.Block().Payset[0].ApplyData.EvalDelta.InnerTxns[0].EvalDelta.Logs[0])
+ dl.txn(call.Args("check", "x", "john"))
+
+ // bad change because of length
+ dl.txn(call.Args("put", "x", "steve doe"), "box_put wrong size")
+ vb = dl.fullBlock(call.Args("get", "x"))
+ require.Equal(t, "john doe", vb.Block().Payset[0].ApplyData.EvalDelta.InnerTxns[0].EvalDelta.Logs[0])
+
+ // good change
+ dl.txn(call.Args("put", "x", "mark doe"))
+ dl.txn(call.Args("check", "x", "mark d"))
+ })
+}
diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go
index 5cfe0f3c4..5f22b5d84 100644
--- a/ledger/catchpointtracker.go
+++ b/ledger/catchpointtracker.go
@@ -18,6 +18,7 @@ package ledger
import (
"archive/tar"
+ "bytes"
"compress/gzip"
"context"
"database/sql"
@@ -62,9 +63,6 @@ const (
trieRebuildAccountChunkSize = 16384
// trieRebuildCommitFrequency defines the number of accounts that would get added before we call evict to commit the changes and adjust the memory cache.
trieRebuildCommitFrequency = 65536
- // trieAccumulatedChangesFlush defines the number of pending changes that would be applied to the merkle trie before
- // we attempt to commit them to disk while writing a batch of rounds balances to disk.
- trieAccumulatedChangesFlush = 256
// CatchpointDirName represents the directory name in which all the catchpoints files are stored
CatchpointDirName = "catchpoints"
@@ -217,10 +215,10 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic
var biggestChunkLen uint64
if ct.enableGeneratingCatchpointFiles {
- // Generate the catchpoint file. This need to be done inline so that it will
- // block any new accounts that from being written. generateCatchpointData()
- // expects that the accounts data would not be modified in the background during
- // it's execution.
+ // Generate the catchpoint file. This is done inline so that it will
+ // block any new accounts from being written. generateCatchpointData()
+ // expects that the accounts data would not be modified in the
+ // background during its execution.
var err error
totalAccounts, totalChunks, biggestChunkLen, err = ct.generateCatchpointData(
ctx, dbRound, updatingBalancesDuration)
@@ -534,7 +532,7 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx *sql.Tx, dcc *d
dcc.stats.MerkleTrieUpdateDuration = time.Duration(time.Now().UnixNano())
}
- err = ct.accountsUpdateBalances(dcc.compactAccountDeltas, dcc.compactResourcesDeltas)
+ err = ct.accountsUpdateBalances(dcc.compactAccountDeltas, dcc.compactResourcesDeltas, dcc.compactKvDeltas, dcc.oldBase, dcc.newBase)
if err != nil {
return err
}
@@ -647,6 +645,11 @@ func doRepackCatchpoint(ctx context.Context, header CatchpointFileHeader, bigges
}
}
+// repackCatchpoint takes the header (that must be made "late" in order to have
+// the latest blockhash) and the (snappy compressed) catchpoint data from
+// dataPath and regurgitates it to look like catchpoints have always looked - a
+// tar file with the header in the first "file" and the catchpoint data in file
+// chunks, all compressed with gzip instead of snappy.
func repackCatchpoint(ctx context.Context, header CatchpointFileHeader, biggestChunkLen uint64, dataPath string, outPath string) error {
// Initialize streams.
fin, err := os.OpenFile(dataPath, os.O_RDONLY, 0666)
@@ -925,7 +928,7 @@ func (ct *catchpointTracker) close() {
}
// accountsUpdateBalances applies the given compactAccountDeltas to the merkle trie
-func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccountDeltas, resourcesDeltas compactResourcesDeltas) (err error) {
+func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccountDeltas, resourcesDeltas compactResourcesDeltas, kvDeltas map[string]modifiedKvValue, oldBase basics.Round, newBase basics.Round) (err error) {
if !ct.catchpointEnabled() {
return nil
}
@@ -1007,19 +1010,67 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun
}
}
- if accumulatedChanges >= trieAccumulatedChangesFlush {
- accumulatedChanges = 0
- _, err = ct.balancesTrie.Commit()
- if err != nil {
- return
+ for key, mv := range kvDeltas {
+ if mv.oldData == nil && mv.data == nil { // Came and went within the delta span
+ continue
+ }
+ if mv.oldData != nil {
+ if mv.data != nil && bytes.Equal(mv.oldData, mv.data) {
+ continue // changed back within the delta span
+ }
+ deleteHash := kvHashBuilderV6(key, mv.oldData)
+ deleted, err = ct.balancesTrie.Delete(deleteHash)
+ if err != nil {
+ return fmt.Errorf("failed to delete kv hash '%s' from merkle trie for key %v: %w", hex.EncodeToString(deleteHash), key, err)
+ }
+ if !deleted {
+ ct.log.Warnf("failed to delete kv hash '%s' from merkle trie for key %v", hex.EncodeToString(deleteHash), key)
+ } else {
+ accumulatedChanges++
+ }
+ }
+
+ if mv.data != nil {
+ addHash := kvHashBuilderV6(key, mv.data)
+ added, err = ct.balancesTrie.Add(addHash)
+ if err != nil {
+ return fmt.Errorf("attempted to add duplicate kv hash '%s' from merkle trie for key %v: %w", hex.EncodeToString(addHash), key, err)
+ }
+ if !added {
+ ct.log.Warnf("attempted to add duplicate kv hash '%s' from merkle trie for key %v", hex.EncodeToString(addHash), key)
+ } else {
+ accumulatedChanges++
+ }
}
}
// write it all to disk.
+ var cstats merkletrie.CommitStats
if accumulatedChanges > 0 {
- _, err = ct.balancesTrie.Commit()
+ cstats, err = ct.balancesTrie.Commit()
}
+ if ct.log.GetTelemetryEnabled() {
+ root, rootErr := ct.balancesTrie.RootHash()
+ if rootErr != nil {
+ ct.log.Infof("accountsUpdateBalances: error retrieving balances trie root: %v", rootErr)
+ return
+ }
+ ct.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.CatchpointRootUpdateEvent, telemetryspec.CatchpointRootUpdateEventDetails{
+ Root: root.String(),
+ OldBase: uint64(oldBase),
+ NewBase: uint64(newBase),
+ NewPageCount: cstats.NewPageCount,
+ NewNodeCount: cstats.NewNodeCount,
+ UpdatedPageCount: cstats.UpdatedPageCount,
+ UpdatedNodeCount: cstats.UpdatedNodeCount,
+ DeletedPageCount: cstats.DeletedPageCount,
+ FanoutReallocatedNodeCount: cstats.FanoutReallocatedNodeCount,
+ PackingReallocatedNodeCount: cstats.PackingReallocatedNodeCount,
+ LoadedPages: cstats.LoadedPages,
+ })
+
+ }
return
}
@@ -1057,7 +1108,7 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
start := time.Now()
ledgerGeneratecatchpointCount.Inc(nil)
err := ct.dbs.Rdb.Atomic(func(dbCtx context.Context, tx *sql.Tx) (err error) {
- catchpointWriter, err = makeCatchpointWriter(ctx, catchpointDataFilePath, tx, DefaultMaxResourcesPerChunk)
+ catchpointWriter, err = makeCatchpointWriter(ctx, catchpointDataFilePath, tx, ResourcesPerCatchpointFileChunk)
if err != nil {
return
}
@@ -1114,9 +1165,9 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
return 0, 0, 0, err
}
- catchpointGenerationStats.FileSize = uint64(catchpointWriter.GetSize())
+ catchpointGenerationStats.FileSize = uint64(catchpointWriter.writtenBytes)
catchpointGenerationStats.WritingDuration = uint64(time.Since(startTime).Nanoseconds())
- catchpointGenerationStats.AccountsCount = catchpointWriter.GetTotalAccounts()
+ catchpointGenerationStats.AccountsCount = catchpointWriter.totalAccounts
ct.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.CatchpointGenerationEvent, catchpointGenerationStats)
ct.log.With("accountsRound", accountsRound).
With("writingDuration", catchpointGenerationStats.WritingDuration).
@@ -1127,7 +1178,7 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
With("catchpointLabel", catchpointGenerationStats.CatchpointLabel).
Infof("Catchpoint data file was generated")
- return catchpointWriter.GetTotalAccounts(), catchpointWriter.GetTotalChunks(), catchpointWriter.GetBiggestChunkLen(), nil
+ return catchpointWriter.totalAccounts, catchpointWriter.chunkNum, catchpointWriter.biggestChunkLen, nil
}
func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx *sql.Tx, accountsRound basics.Round, totalAccounts uint64, totalChunks uint64, biggestChunkLen uint64) error {
@@ -1357,47 +1408,64 @@ func removeSingleCatchpointFileFromDisk(dbDirectory, fileToDelete string) (err e
return nil
}
+func hashBufV6(affinity uint64, kind byte) []byte {
+ hash := make([]byte, 4+crypto.DigestSize)
+ // write out the lowest 32 bits of the affinity value. This should improve
+ // the caching of the trie by allowing recent updates to be in-cache, and
+ // "older" nodes will be left alone.
+ for i, prefix := 3, affinity; i >= 0; i, prefix = i-1, prefix>>8 {
+ // the following takes the prefix & 255 -> hash[i]
+ hash[i] = byte(prefix)
+ }
+ hash[4] = kind
+ return hash
+}
+
+func finishV6(v6hash []byte, prehash []byte) []byte {
+ entryHash := crypto.Hash(prehash)
+ copy(v6hash[5:], entryHash[1:])
+ return v6hash[:]
+
+}
+
// accountHashBuilderV6 calculates the hash key used for the trie by combining the account address and the account data
func accountHashBuilderV6(addr basics.Address, accountData *baseAccountData, encodedAccountData []byte) []byte {
- hash := make([]byte, 4+crypto.DigestSize)
hashIntPrefix := accountData.UpdateRound
if hashIntPrefix == 0 {
hashIntPrefix = accountData.RewardsBase
}
+ hash := hashBufV6(hashIntPrefix, 0) // 0 indicates an account
// write out the lowest 32 bits of the reward base. This should improve the caching of the trie by allowing
// recent updated to be in-cache, and "older" nodes will be left alone.
- for i, prefix := 3, hashIntPrefix; i >= 0; i, prefix = i-1, prefix>>8 {
- // the following takes the prefix & 255 -> hash[i]
- hash[i] = byte(prefix)
- }
- hash[4] = 0 // set the 5th byte to zero to indicate it's a account base record hash
prehash := make([]byte, crypto.DigestSize+len(encodedAccountData))
copy(prehash[:], addr[:])
copy(prehash[crypto.DigestSize:], encodedAccountData[:])
- entryHash := crypto.Hash(prehash)
- copy(hash[5:], entryHash[1:])
- return hash[:]
+
+ return finishV6(hash, prehash)
}
// accountHashBuilderV6 calculates the hash key used for the trie by combining the account address and the account data
func resourcesHashBuilderV6(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType, updateRound uint64, encodedResourceData []byte) []byte {
- hash := make([]byte, 4+crypto.DigestSize)
- // write out the lowest 32 bits of the reward base. This should improve the caching of the trie by allowing
- // recent updated to be in-cache, and "older" nodes will be left alone.
- for i, prefix := 3, updateRound; i >= 0; i, prefix = i-1, prefix>>8 {
- // the following takes the prefix & 255 -> hash[i]
- hash[i] = byte(prefix)
- }
- hash[4] = byte(ctype + 1) // set the 5th byte to one or two ( asset / application ) so we could differentiate the hashes.
+ hash := hashBufV6(updateRound, byte(ctype+1)) // one or two ( asset / application ) so we could differentiate the hashes.
prehash := make([]byte, 8+crypto.DigestSize+len(encodedResourceData))
copy(prehash[:], addr[:])
binary.LittleEndian.PutUint64(prehash[crypto.DigestSize:], uint64(cidx))
copy(prehash[crypto.DigestSize+8:], encodedResourceData[:])
- entryHash := crypto.Hash(prehash)
- copy(hash[5:], entryHash[1:])
- return hash[:]
+
+ return finishV6(hash, prehash)
+}
+
+// kvHashBuilderV6 calculates the hash key used for the trie by combining the key and value
+func kvHashBuilderV6(key string, value []byte) []byte {
+ hash := hashBufV6(0, 3) // 3 indicates a kv pair
+
+ prehash := make([]byte, len(key)+len(value))
+ copy(prehash[:], key)
+ copy(prehash[len(key):], value)
+
+ return finishV6(hash, prehash)
}
// accountHashBuilder calculates the hash key used for the trie by combining the account address and the account data
@@ -1459,7 +1527,7 @@ func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *s
if rootHash.IsZero() {
ct.log.Infof("accountsInitialize rebuilding merkle trie for round %d", rnd)
- accountBuilderIt := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize, DefaultMaxResourcesPerChunk)
+ accountBuilderIt := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize)
defer accountBuilderIt.Close(ctx)
startTrieBuildTime := time.Now()
trieHashCount := 0
@@ -1484,7 +1552,7 @@ func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *s
return fmt.Errorf("accountsInitialize was unable to add changes to trie: %v", err)
}
if !added {
- // we need to transalate the "addrid" into actual account address so that
+ // we need to translate the "addrid" into actual account address so that
// we can report the failure.
addr, err := lookupAccountAddressFromAddressID(ctx, tx, acct.addrid)
if err != nil {
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
index a12e6fa9a..004f6572e 100644
--- a/ledger/catchpointtracker_test.go
+++ b/ledger/catchpointtracker_test.go
@@ -362,7 +362,7 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) {
i++
}
- _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, proto, basics.Round(1))
+ _, _, _, err = accountsNewRound(tx, updates, compactResourcesDeltas{}, nil, nil, proto, basics.Round(1))
if err != nil {
return
}
diff --git a/ledger/catchpointwriter.go b/ledger/catchpointwriter.go
index 6f1e11dfe..81da3beab 100644
--- a/ledger/catchpointwriter.go
+++ b/ledger/catchpointwriter.go
@@ -36,9 +36,15 @@ const (
// note that the last chunk would typically be less than this number.
BalancesPerCatchpointFileChunk = 512
- // DefaultMaxResourcesPerChunk defines the max number of resources that go in a singular chunk
- // 300000 resources * 300B/resource => roughly max 100MB per chunk
- DefaultMaxResourcesPerChunk = 300000
+ // ResourcesPerCatchpointFileChunk defines the max number of resources that go in a singular chunk
+ // 100,000 resources * 20KB/resource => roughly max 2GB per chunk if all of them are max'ed out apps.
+ // In reality most entries are asset holdings, and they are very small.
+ ResourcesPerCatchpointFileChunk = 100_000
+
+ // resourcesPerCatchpointFileChunkBackwardCompatible is the old value for ResourcesPerCatchpointFileChunk.
+ // Size of a single resource entry was underestimated to 300 bytes that holds only for assets and not for apps.
+ // It is safe to remove after April, 2023 since we are only supporting catchpoint that are 6 months old.
+ resourcesPerCatchpointFileChunkBackwardCompatible = 300_000
)
// catchpointWriter is the struct managing the persistence of accounts data into the catchpoint file.
@@ -50,24 +56,24 @@ type catchpointWriter struct {
tx *sql.Tx
filePath string
totalAccounts uint64
- totalChunks uint64
file *os.File
tar *tar.Writer
compressor io.WriteCloser
- balancesChunk catchpointFileBalancesChunkV6
- balancesChunkNum uint64
- numAccountsProcessed uint64
+ chunk catchpointFileChunkV6
+ chunkNum uint64
writtenBytes int64
biggestChunkLen uint64
accountsIterator encodedAccountsBatchIter
maxResourcesPerChunk int
+ accountsDone bool
+ kvRows *sql.Rows
}
type encodedBalanceRecordV5 struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Address basics.Address `codec:"pk,allocbound=crypto.DigestSize"`
- AccountData msgp.Raw `codec:"ad,allocbound=basics.MaxEncodedAccountDataSize"`
+ AccountData msgp.Raw `codec:"ad"`
}
type catchpointFileBalancesChunkV5 struct {
@@ -75,7 +81,7 @@ type catchpointFileBalancesChunkV5 struct {
Balances []encodedBalanceRecordV5 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
}
-// SortUint64 re-export this sort, which is implmented in basics, and being used by the msgp when
+// SortUint64 re-export this sort, which is implemented in basics, and being used by the msgp when
// encoding the resources map below.
type SortUint64 = basics.SortUint64
@@ -83,17 +89,43 @@ type encodedBalanceRecordV6 struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Address basics.Address `codec:"a,allocbound=crypto.DigestSize"`
- AccountData msgp.Raw `codec:"b,allocbound=basics.MaxEncodedAccountDataSize"`
- Resources map[uint64]msgp.Raw `codec:"c,allocbound=basics.MaxEncodedAccountDataSize"`
+ AccountData msgp.Raw `codec:"b"`
+ Resources map[uint64]msgp.Raw `codec:"c,allocbound=resourcesPerCatchpointFileChunkBackwardCompatible"`
// flag indicating whether there are more records for the same account coming up
ExpectingMoreEntries bool `codec:"e"`
}
-type catchpointFileBalancesChunkV6 struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
+// Adjust these to be big enough for boxes, but not directly tied to box values.
+const (
+ // For boxes: "bx:<8 bytes><64 byte name>"
+ encodedKVRecordV6MaxKeyLength = 128
+
+ // For boxes: MaxBoxSize
+ encodedKVRecordV6MaxValueLength = 32768
+
+ // MaxEncodedKVDataSize is the max size of serialized KV entry, checked with TestEncodedKVDataSize.
+ // Exact value is 32906 that is 10 bytes more than 32768 + 128
+ MaxEncodedKVDataSize = 33000
+)
+
+type encodedKVRecordV6 struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Key []byte `codec:"k,allocbound=encodedKVRecordV6MaxKeyLength"`
+ Value []byte `codec:"v,allocbound=encodedKVRecordV6MaxValueLength"`
+}
+
+type catchpointFileChunkV6 struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
Balances []encodedBalanceRecordV6 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
numAccounts uint64
+ KVs []encodedKVRecordV6 `codec:"kv,allocbound=BalancesPerCatchpointFileChunk"`
+}
+
+func (chunk catchpointFileChunkV6) empty() bool {
+ return len(chunk.Balances) == 0 && len(chunk.KVs) == 0
}
func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx, maxResourcesPerChunk int) (*catchpointWriter, error) {
@@ -121,7 +153,6 @@ func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx, maxR
tx: tx,
filePath: filePath,
totalAccounts: totalAccounts,
- totalChunks: (totalAccounts + BalancesPerCatchpointFileChunk - 1) / BalancesPerCatchpointFileChunk,
file: file,
compressor: compressor,
tar: tar,
@@ -138,93 +169,110 @@ func (cw *catchpointWriter) Abort() error {
return os.Remove(cw.filePath)
}
+// WriteStep works for a short period of time (determined by stepCtx) to get
+// some more data (accounts/resources/kvpairs) by using readDatabaseStep, and
+// write that data to the open tar file in cw.tar. The writing is done in
+// asyncWriter, so that it can proceed concurrently with reading the data from
+// the db. asyncWriter only runs long enough to process the data read during a
+// single call to WriteStep, and WriteStep ensures that asyncWriter has finished
+// writing by waiting for it in a defer block, collecting any errors that may
+// have occurred during writing. Therefore, WriteStep looks like a simple
+// synchronous function to its callers.
func (cw *catchpointWriter) WriteStep(stepCtx context.Context) (more bool, err error) {
// have we timed-out / canceled by that point ?
if more, err = hasContextDeadlineExceeded(stepCtx); more || err != nil {
return
}
- writerRequest := make(chan catchpointFileBalancesChunkV6, 1)
+ writerRequest := make(chan catchpointFileChunkV6, 1)
writerResponse := make(chan error, 2)
- go cw.asyncWriter(writerRequest, writerResponse, cw.balancesChunkNum, cw.numAccountsProcessed)
+ go cw.asyncWriter(writerRequest, writerResponse, cw.chunkNum)
defer func() {
+ // For simplicity, all cleanup is done once, here. The writerRequest is
+ // closed, signaling asyncWriter that it can exit, and then
+ // writerResponse is drained, ensuring any problems from asyncWriter are
+ // noted (and that the writing is done).
close(writerRequest)
- // wait for the writerResponse to close.
+ drain:
for {
select {
case writerError, open := <-writerResponse:
if open {
err = writerError
} else {
- return
+ break drain
}
}
}
+ if !more {
+ // If we're done, close up the tar file and report on size
+ cw.tar.Close()
+ cw.compressor.Close()
+ cw.file.Close()
+ fileInfo, statErr := os.Stat(cw.filePath)
+ if statErr != nil {
+ err = statErr
+ }
+ cw.writtenBytes = fileInfo.Size()
+
+ // These don't HAVE to be closed, since the "owning" tx will be cmmmitted/rolledback
+ cw.accountsIterator.Close()
+ if cw.kvRows != nil {
+ cw.kvRows.Close()
+ cw.kvRows = nil
+ }
+ }
}()
for {
- // have we timed-out / canceled by that point ?
+ // have we timed-out or been canceled ?
if more, err = hasContextDeadlineExceeded(stepCtx); more || err != nil {
return
}
- if len(cw.balancesChunk.Balances) == 0 {
+ if cw.chunk.empty() {
err = cw.readDatabaseStep(cw.ctx, cw.tx)
if err != nil {
return
}
+ // readDatabaseStep yielded nothing, we're done
+ if cw.chunk.empty() {
+ return false, nil
+ }
}
- // have we timed-out / canceled by that point ?
+ // have we timed-out or been canceled ?
if more, err = hasContextDeadlineExceeded(stepCtx); more || err != nil {
return
}
// check if we had any error on the writer from previous iterations.
+ // this should not be required for correctness, since we'll find the
+ // error in the defer block. But this might notice earlier.
select {
case err := <-writerResponse:
- // we ran into an error. wait for the channel to close before returning with the error.
- <-writerResponse
return false, err
default:
}
- // write to disk.
- if len(cw.balancesChunk.Balances) > 0 {
- cw.numAccountsProcessed += cw.balancesChunk.numAccounts
- cw.balancesChunkNum++
- writerRequest <- cw.balancesChunk
- if cw.numAccountsProcessed == cw.totalAccounts {
- cw.accountsIterator.Close()
- // if we're done, wait for the writer to complete it's writing.
- err, opened := <-writerResponse
- if opened {
- // we ran into an error. wait for the channel to close before returning with the error.
- <-writerResponse
- return false, err
- }
- // channel is closed. we're done writing and no issues detected.
- return false, nil
- }
- cw.balancesChunk.Balances = nil
- }
+ // send the chunk to the asyncWriter channel
+ cw.chunkNum++
+ writerRequest <- cw.chunk
+ // indicate that we need a readDatabaseStep
+ cw.chunk = catchpointFileChunkV6{}
}
}
-func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChunkV6, response chan error, initialBalancesChunkNum uint64, initialNumAccounts uint64) {
+func (cw *catchpointWriter) asyncWriter(chunks chan catchpointFileChunkV6, response chan error, chunkNum uint64) {
defer close(response)
- balancesChunkNum := initialBalancesChunkNum
- numAccountsProcessed := initialNumAccounts
- for bc := range balances {
- balancesChunkNum++
- numAccountsProcessed += bc.numAccounts
- if len(bc.Balances) == 0 {
+ for chk := range chunks {
+ chunkNum++
+ if chk.empty() {
break
}
-
- encodedChunk := protocol.Encode(&bc)
+ encodedChunk := protocol.Encode(&chk)
err := cw.tar.WriteHeader(&tar.Header{
- Name: fmt.Sprintf("balances.%d.%d.msgpack", balancesChunkNum, cw.totalChunks),
+ Name: fmt.Sprintf("balances.%d.msgpack", chunkNum),
Mode: 0600,
Size: int64(len(encodedChunk)),
})
@@ -240,43 +288,53 @@ func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChun
if chunkLen := uint64(len(encodedChunk)); cw.biggestChunkLen < chunkLen {
cw.biggestChunkLen = chunkLen
}
- if numAccountsProcessed == cw.totalAccounts {
- cw.tar.Close()
- cw.compressor.Close()
- cw.file.Close()
- var fileInfo os.FileInfo
- fileInfo, err = os.Stat(cw.filePath)
- if err != nil {
- response <- err
- break
- }
- cw.writtenBytes = fileInfo.Size()
- break
- }
}
}
-func (cw *catchpointWriter) readDatabaseStep(ctx context.Context, tx *sql.Tx) (err error) {
- cw.balancesChunk.Balances, cw.balancesChunk.numAccounts, err = cw.accountsIterator.Next(ctx, tx, BalancesPerCatchpointFileChunk, cw.maxResourcesPerChunk)
- return
-}
-
-// GetSize returns the number of bytes that have been written to the file.
-func (cw *catchpointWriter) GetSize() int64 {
- return cw.writtenBytes
-}
-
-// GetBalancesCount returns the number of balances written to this catchpoint file.
-func (cw *catchpointWriter) GetTotalAccounts() uint64 {
- return cw.totalAccounts
-}
+// readDatabaseStep places the next chunk of records into cw.chunk. It yields
+// all of the account chunks first, and then the kv chunks. Even if the accounts
+// are evenly divisible by BalancesPerCatchpointFileChunk, it must not return an
+// empty chunk between accounts and kvs.
+func (cw *catchpointWriter) readDatabaseStep(ctx context.Context, tx *sql.Tx) error {
+ if !cw.accountsDone {
+ balances, numAccounts, err := cw.accountsIterator.Next(ctx, tx, BalancesPerCatchpointFileChunk, cw.maxResourcesPerChunk)
+ if err != nil {
+ return err
+ }
+ if len(balances) > 0 {
+ cw.chunk = catchpointFileChunkV6{Balances: balances, numAccounts: numAccounts}
+ return nil
+ }
+ // It might seem reasonable, but do not close accountsIterator here,
+ // else it will start over on the next iteration
+ // cw.accountsIterator.Close()
+ cw.accountsDone = true
+ }
-func (cw *catchpointWriter) GetTotalChunks() uint64 {
- return cw.totalChunks
-}
+ // Create the *Rows iterator JIT
+ if cw.kvRows == nil {
+ rows, err := tx.QueryContext(ctx, "SELECT key, value FROM kvstore")
+ if err != nil {
+ return err
+ }
+ cw.kvRows = rows
+ }
-func (cw *catchpointWriter) GetBiggestChunkLen() uint64 {
- return cw.biggestChunkLen
+ kvrs := make([]encodedKVRecordV6, 0, BalancesPerCatchpointFileChunk)
+ for cw.kvRows.Next() {
+ var k []byte
+ var v []byte
+ err := cw.kvRows.Scan(&k, &v)
+ if err != nil {
+ return err
+ }
+ kvrs = append(kvrs, encodedKVRecordV6{Key: k, Value: v})
+ if len(kvrs) == BalancesPerCatchpointFileChunk {
+ break
+ }
+ }
+ cw.chunk = catchpointFileChunkV6{KVs: kvrs}
+ return nil
}
// hasContextDeadlineExceeded examine the given context and see if it was canceled or timed-out.
diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go
index 8c8493372..30f1b0a14 100644
--- a/ledger/catchpointwriter_test.go
+++ b/ledger/catchpointwriter_test.go
@@ -24,9 +24,11 @@ import (
"database/sql"
"fmt"
"io"
+ "math"
"os"
"path/filepath"
"runtime"
+ "strconv"
"testing"
"github.com/stretchr/testify/require"
@@ -34,11 +36,17 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/crypto/merkletrie"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/data/txntest"
"github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/db"
)
func makeString(len int) string {
@@ -157,6 +165,7 @@ func makeTestEncodedBalanceRecordV5(t *testing.T) encodedBalanceRecordV5 {
func TestEncodedBalanceRecordEncoding(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
er := makeTestEncodedBalanceRecordV5(t)
encodedBr := er.MarshalMsg(nil)
@@ -170,9 +179,10 @@ func TestEncodedBalanceRecordEncoding(t *testing.T) {
func TestCatchpointFileBalancesChunkEncoding(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
// The next operations are heavy on the memory.
- // Garbage collection helps prevent trashing
+ // Garbage collection helps prevent thrashing
runtime.GC()
fbc := catchpointFileBalancesChunkV5{}
@@ -193,6 +203,7 @@ func TestCatchpointFileBalancesChunkEncoding(t *testing.T) {
func TestBasicCatchpointWriter(t *testing.T) {
partitiontest.PartitionTest(t)
+ // t.Parallel() NO! config.Consensus is modified
// create new protocol version, which has lower lookback
testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestBasicCatchpointWriter")
@@ -219,7 +230,7 @@ func TestBasicCatchpointWriter(t *testing.T) {
readDb := ml.trackerDB().Rdb
err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- writer, err := makeCatchpointWriter(context.Background(), fileName, tx, DefaultMaxResourcesPerChunk)
+ writer, err := makeCatchpointWriter(context.Background(), fileName, tx, ResourcesPerCatchpointFileChunk)
if err != nil {
return err
}
@@ -262,51 +273,29 @@ func TestBasicCatchpointWriter(t *testing.T) {
}
}
- require.Equal(t, "balances.1.1.msgpack", header.Name)
+ require.Equal(t, "balances.1.msgpack", header.Name)
- var balances catchpointFileBalancesChunkV6
- err = protocol.Decode(balancesBlockBytes, &balances)
+ var chunk catchpointFileChunkV6
+ err = protocol.Decode(balancesBlockBytes, &chunk)
require.NoError(t, err)
- require.Equal(t, uint64(len(accts)), uint64(len(balances.Balances)))
+ require.Equal(t, uint64(len(accts)), uint64(len(chunk.Balances)))
_, err = tarReader.Next()
require.Equal(t, io.EOF, err)
}
-func TestFullCatchpointWriter(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- // create new protocol version, which has lower lookback
- testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestFullCatchpointWriter")
- protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
- protoParams.CatchpointLookback = 32
- config.Consensus[testProtocolVersion] = protoParams
- temporaryDirectory := t.TempDir()
- defer func() {
- delete(config.Consensus, testProtocolVersion)
- }()
-
- accts := ledgertesting.RandomAccounts(BalancesPerCatchpointFileChunk*3, false)
- ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
- defer ml.Close()
-
- conf := config.GetDefaultLocal()
- conf.CatchpointInterval = 1
- conf.Archival = true
- au, _ := newAcctUpdates(t, ml, conf)
- err := au.loadFromDisk(ml, 0)
- require.NoError(t, err)
- au.close()
- catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data")
- catchpointFilePath := filepath.Join(temporaryDirectory, "15.catchpoint")
- readDb := ml.trackerDB().Rdb
+func testWriteCatchpoint(t *testing.T, rdb db.Accessor, datapath string, filepath string, maxResourcesPerChunk int) CatchpointFileHeader {
var totalAccounts uint64
var totalChunks uint64
var biggestChunkLen uint64
var accountsRnd basics.Round
var totals ledgercore.AccountTotals
- err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- writer, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, DefaultMaxResourcesPerChunk)
+ if maxResourcesPerChunk <= 0 {
+ maxResourcesPerChunk = ResourcesPerCatchpointFileChunk
+ }
+
+ err := rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ writer, err := makeCatchpointWriter(context.Background(), datapath, tx, maxResourcesPerChunk)
if err != nil {
return err
}
@@ -317,9 +306,9 @@ func TestFullCatchpointWriter(t *testing.T) {
break
}
}
- totalAccounts = writer.GetTotalAccounts()
- totalChunks = writer.GetTotalChunks()
- biggestChunkLen = writer.GetBiggestChunkLen()
+ totalAccounts = writer.totalAccounts
+ totalChunks = writer.chunkNum
+ biggestChunkLen = writer.biggestChunkLen
accountsRnd, err = accountsRound(tx)
if err != nil {
return
@@ -343,73 +332,13 @@ func TestFullCatchpointWriter(t *testing.T) {
}
err = repackCatchpoint(
context.Background(), catchpointFileHeader, biggestChunkLen,
- catchpointDataFilePath, catchpointFilePath)
+ datapath, filepath)
require.NoError(t, err)
- // create a ledger.
- var initState ledgercore.InitState
- initState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
- l, err := OpenLedger(ml.log, "TestFullCatchpointWriter", true, initState, conf)
- require.NoError(t, err)
+ l := testNewLedgerFromCatchpoint(t, filepath)
defer l.Close()
- accessor := MakeCatchpointCatchupAccessor(l, l.log)
-
- err = accessor.ResetStagingBalances(context.Background(), true)
- require.NoError(t, err)
-
- // load the file from disk.
- fileContent, err := os.ReadFile(catchpointFilePath)
- require.NoError(t, err)
- gzipReader, err := gzip.NewReader(bytes.NewBuffer(fileContent))
- require.NoError(t, err)
- tarReader := tar.NewReader(gzipReader)
- var catchupProgress CatchpointCatchupAccessorProgress
- defer gzipReader.Close()
- for {
- header, err := tarReader.Next()
- if err != nil {
- if err == io.EOF {
- break
- }
- require.NoError(t, err)
- break
- }
- balancesBlockBytes := make([]byte, header.Size)
- readComplete := int64(0)
-
- for readComplete < header.Size {
- bytesRead, err := tarReader.Read(balancesBlockBytes[readComplete:])
- readComplete += int64(bytesRead)
- if err != nil {
- if err == io.EOF {
- if readComplete == header.Size {
- break
- }
- require.NoError(t, err)
- }
- break
- }
- }
- err = accessor.ProgressStagingBalances(context.Background(), header.Name, balancesBlockBytes, &catchupProgress)
- require.NoError(t, err)
- }
-
- err = accessor.BuildMerkleTrie(context.Background(), nil)
- require.NoError(t, err)
-
- err = l.trackerDBs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- err := applyCatchpointStagingBalances(ctx, tx, 0, 0)
- return err
- })
- require.NoError(t, err)
- // verify that the account data aligns with what we originally stored :
- for addr, acct := range accts {
- acctData, validThrough, _, err := l.LookupLatest(addr)
- require.NoErrorf(t, err, "failed to lookup for account %v after restoring from catchpoint", addr)
- require.Equal(t, acct, acctData)
- require.Equal(t, basics.Round(0), validThrough)
- }
+ return catchpointFileHeader
}
func TestCatchpointReadDatabaseOverflowSingleAccount(t *testing.T) {
@@ -468,14 +397,14 @@ func TestCatchpointReadDatabaseOverflowSingleAccount(t *testing.T) {
}
// repeat this until read all accts
for totalAccountsWritten < expectedTotalAccounts {
- cw.balancesChunk.Balances = nil
+ cw.chunk.Balances = nil
err := cw.readDatabaseStep(cw.ctx, cw.tx)
if err != nil {
return err
}
- totalAccountsWritten += cw.balancesChunk.numAccounts
+ totalAccountsWritten += cw.chunk.numAccounts
numResources := 0
- for _, balance := range cw.balancesChunk.Balances {
+ for _, balance := range cw.chunk.Balances {
numResources += len(balance.Resources)
}
if numResources > maxResourcesPerChunk {
@@ -557,14 +486,14 @@ func TestCatchpointReadDatabaseOverflowAccounts(t *testing.T) {
}
// repeat this until read all accts
for totalAccountsWritten < expectedTotalAccounts {
- cw.balancesChunk.Balances = nil
+ cw.chunk.Balances = nil
err := cw.readDatabaseStep(cw.ctx, cw.tx)
if err != nil {
return err
}
- totalAccountsWritten += cw.balancesChunk.numAccounts
+ totalAccountsWritten += cw.chunk.numAccounts
numResources := 0
- for _, balance := range cw.balancesChunk.Balances {
+ for _, balance := range cw.chunk.Balances {
numResources += len(balance.Resources)
}
if numResources > maxResourcesPerChunk {
@@ -609,66 +538,96 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
au.close()
catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data")
catchpointFilePath := filepath.Join(temporaryDirectory, "15.catchpoint")
- readDb := ml.trackerDB().Rdb
- var totalAccounts uint64
- var totalChunks uint64
- var biggestChunkLen uint64
- var accountsRnd basics.Round
- var totals ledgercore.AccountTotals
- err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- writer, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, 5)
- if err != nil {
- return err
- }
- for {
- more, err := writer.WriteStep(context.Background())
+ const maxResourcesPerChunk = 5
+ testWriteCatchpoint(t, ml.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, maxResourcesPerChunk)
+
+ l := testNewLedgerFromCatchpoint(t, catchpointFilePath)
+ defer l.Close()
+
+ // verify that the account data aligns with what we originally stored :
+ for addr, acct := range accts {
+ acctData, validThrough, _, err := l.LookupLatest(addr)
+ require.NoErrorf(t, err, "failed to lookup for account %v after restoring from catchpoint", addr)
+ require.Equal(t, acct, acctData)
+ require.Equal(t, basics.Round(0), validThrough)
+ }
+
+ err = l.reloadLedger()
+ require.NoError(t, err)
+
+ // now manually construct the MT and ensure the reading makeOrderedAccountsIter works as expected:
+ // no errors on read, hashes match
+ ctx := context.Background()
+ tx, err := l.trackerDBs.Wdb.Handle.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable})
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ // save the existing hash
+ committer, err := MakeMerkleCommitter(tx, false)
+ require.NoError(t, err)
+ trie, err := merkletrie.MakeTrie(committer, TrieMemoryConfig)
+ require.NoError(t, err)
+
+ h1, err := trie.RootHash()
+ require.NoError(t, err)
+ require.NotEmpty(t, h1)
+
+ // reset hashes
+ err = resetAccountHashes(ctx, tx)
+ require.NoError(t, err)
+
+ // rebuild the MT
+ committer, err = MakeMerkleCommitter(tx, false)
+ require.NoError(t, err)
+ trie, err = merkletrie.MakeTrie(committer, TrieMemoryConfig)
+ require.NoError(t, err)
+
+ h, err := trie.RootHash()
+ require.NoError(t, err)
+ require.Zero(t, h)
+
+ iter := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize)
+ defer iter.Close(ctx)
+ for {
+ accts, _, err := iter.Next(ctx)
+ if err == sql.ErrNoRows {
+ // the account builder would return sql.ErrNoRows when no more data is available.
+ err = nil
+ break
+ } else if err != nil {
require.NoError(t, err)
- if !more {
- break
- }
}
- totalAccounts = writer.GetTotalAccounts()
- totalChunks = writer.GetTotalChunks()
- biggestChunkLen = writer.GetBiggestChunkLen()
- accountsRnd, err = accountsRound(tx)
- if err != nil {
- return
+
+ if len(accts) > 0 {
+ for _, acct := range accts {
+ added, err := trie.Add(acct.digest)
+ require.NoError(t, err)
+ require.True(t, added)
+ }
}
- totals, err = accountsTotals(ctx, tx, false)
- return
- })
- require.NoError(t, err)
- blocksRound := accountsRnd + 1
- blockHeaderDigest := crypto.Hash([]byte{1, 2, 3})
- catchpointLabel := fmt.Sprintf("%d#%v", blocksRound, blockHeaderDigest) // this is not a correct way to create a label, but it's good enough for this unit test
- catchpointFileHeader := CatchpointFileHeader{
- Version: CatchpointFileVersionV6,
- BalancesRound: accountsRnd,
- BlocksRound: blocksRound,
- Totals: totals,
- TotalAccounts: totalAccounts,
- TotalChunks: totalChunks,
- Catchpoint: catchpointLabel,
- BlockHeaderDigest: blockHeaderDigest,
}
- err = repackCatchpoint(
- context.Background(), catchpointFileHeader, biggestChunkLen,
- catchpointDataFilePath, catchpointFilePath)
require.NoError(t, err)
+ h2, err := trie.RootHash()
+ require.NoError(t, err)
+ require.NotEmpty(t, h2)
+
+ require.Equal(t, h1, h2)
+}
+func testNewLedgerFromCatchpoint(t *testing.T, filepath string) *Ledger {
// create a ledger.
var initState ledgercore.InitState
initState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
- l, err := OpenLedger(ml.log, "TestFullCatchpointWriter", true, initState, conf)
+ conf := config.GetDefaultLocal()
+ l, err := OpenLedger(logging.TestingLog(t), t.Name()+"FromCatchpoint", true, initState, conf)
require.NoError(t, err)
- defer l.Close()
accessor := MakeCatchpointCatchupAccessor(l, l.log)
err = accessor.ResetStagingBalances(context.Background(), true)
require.NoError(t, err)
// load the file from disk.
- fileContent, err := os.ReadFile(catchpointFilePath)
+ fileContent, err := os.ReadFile(filepath)
require.NoError(t, err)
gzipReader, err := gzip.NewReader(bytes.NewBuffer(fileContent))
require.NoError(t, err)
@@ -700,7 +659,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
break
}
}
- err = accessor.ProgressStagingBalances(context.Background(), header.Name, balancesBlockBytes, &catchupProgress)
+ err = accessor.ProcessStagingBalances(context.Background(), header.Name, balancesBlockBytes, &catchupProgress)
require.NoError(t, err)
}
@@ -712,7 +671,41 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
return err
})
require.NoError(t, err)
+ return l
+}
+
+func TestFullCatchpointWriter(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ // t.Parallel() NO! config.Consensus is modified
+
+ // create new protocol version, which has lower lookback
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestFullCatchpointWriter")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.CatchpointLookback = 32
+ config.Consensus[testProtocolVersion] = protoParams
+ temporaryDirectory := t.TempDir()
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ accts := ledgertesting.RandomAccounts(BalancesPerCatchpointFileChunk*3, false)
+ ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.CatchpointInterval = 1
+ conf.Archival = true
+ au, _ := newAcctUpdates(t, ml, conf)
+ err := au.loadFromDisk(ml, 0)
+ require.NoError(t, err)
+ au.close()
+ catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data")
+ catchpointFilePath := filepath.Join(temporaryDirectory, "15.catchpoint")
+ testWriteCatchpoint(t, ml.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0)
+
+ l := testNewLedgerFromCatchpoint(t, catchpointFilePath)
+ defer l.Close()
// verify that the account data aligns with what we originally stored :
for addr, acct := range accts {
acctData, validThrough, _, err := l.LookupLatest(addr)
@@ -721,3 +714,172 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
require.Equal(t, basics.Round(0), validThrough)
}
}
+
+func TestExactAccountChunk(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ dl := NewDoubleLedger(t, genBalances, protocol.ConsensusFuture)
+ defer dl.Close()
+
+ pay := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Amount: 1_000_000,
+ }
+ // There are 12 accounts in the NewTestGenesis, so we create more so that we
+ // have exactly one chunk's worth, to make sure that works without an empty
+ // chunk between accounts and kvstore.
+ for i := 0; i < (BalancesPerCatchpointFileChunk - 12); i++ {
+ newacctpay := pay
+ newacctpay.Receiver = ledgertesting.RandomAddress()
+ dl.fullBlock(&newacctpay)
+ }
+
+ // At least 32 more blocks so that we catchpoint after the accounts exist
+ for i := 0; i < 40; i++ {
+ selfpay := pay
+ selfpay.Receiver = addrs[0]
+ selfpay.Note = ledgertesting.RandomNote()
+ dl.fullBlock(&selfpay)
+ }
+
+ tempDir := t.TempDir()
+
+ catchpointDataFilePath := filepath.Join(tempDir, t.Name()+".data")
+ catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz")
+
+ cph := testWriteCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0)
+ require.EqualValues(t, cph.TotalChunks, 1)
+
+ l := testNewLedgerFromCatchpoint(t, catchpointFilePath)
+ defer l.Close()
+}
+
+func TestCatchpointAfterTxns(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ dl := NewDoubleLedger(t, genBalances, protocol.ConsensusFuture)
+ defer dl.Close()
+
+ boxApp := dl.fundedApp(addrs[1], 1_000_000, boxAppSource)
+ callBox := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[2],
+ ApplicationID: boxApp,
+ }
+
+ makeBox := callBox.Args("create", "xxx")
+ makeBox.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("xxx")}}
+ dl.txn(makeBox)
+
+ pay := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: addrs[1],
+ Amount: 100000,
+ }
+ // There are 12 accounts in the NewTestGenesis, plus 1 app account, so we
+ // create more so that we have exactly one chunk's worth, to make sure that
+ // works without an empty chunk between accounts and kvstore.
+ for i := 0; i < (BalancesPerCatchpointFileChunk - 13); i++ {
+ newacctpay := pay
+ newacctpay.Receiver = ledgertesting.RandomAddress()
+ dl.fullBlock(&newacctpay)
+ }
+ for i := 0; i < 40; i++ {
+ dl.fullBlock(pay.Noted(strconv.Itoa(i)))
+ }
+
+ tempDir := t.TempDir()
+
+ catchpointDataFilePath := filepath.Join(tempDir, t.Name()+".data")
+ catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz")
+
+ cph := testWriteCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0)
+ require.EqualValues(t, 2, cph.TotalChunks)
+
+ l := testNewLedgerFromCatchpoint(t, catchpointFilePath)
+ defer l.Close()
+ values, err := l.LookupKeysByPrefix(l.Latest(), "bx:", 10)
+ require.NoError(t, err)
+ require.Len(t, values, 1)
+
+ // Add one more account
+ newacctpay := pay
+ last := ledgertesting.RandomAddress()
+ newacctpay.Receiver = last
+ dl.fullBlock(&newacctpay)
+
+ // Write and read back in, and ensure even the last effect exists.
+ cph = testWriteCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0)
+ require.EqualValues(t, cph.TotalChunks, 2) // Still only 2 chunks, as last was in a recent block
+
+ // Drive home the point that `last` is _not_ included in the catchpoint by inspecting balance read from catchpoint.
+ {
+ l = testNewLedgerFromCatchpoint(t, catchpointFilePath)
+ defer l.Close()
+ _, _, algos, err := l.LookupLatest(last)
+ require.NoError(t, err)
+ require.Equal(t, basics.MicroAlgos{}, algos)
+ }
+
+ for i := 0; i < 40; i++ { // Advance so catchpoint sees the txns
+ dl.fullBlock(pay.Noted(strconv.Itoa(i)))
+ }
+
+ cph = testWriteCatchpoint(t, dl.validator.trackerDB().Rdb, catchpointDataFilePath, catchpointFilePath, 0)
+ require.EqualValues(t, cph.TotalChunks, 3)
+
+ l = testNewLedgerFromCatchpoint(t, catchpointFilePath)
+ defer l.Close()
+ values, err = l.LookupKeysByPrefix(l.Latest(), "bx:", 10)
+ require.NoError(t, err)
+ require.Len(t, values, 1)
+
+ // Confirm `last` balance is now available in the catchpoint.
+ {
+ // Since fast catchup consists of multiple steps and the test only performs catchpoint reads, the resulting ledger is incomplete.
+ // That's why the assertion ignores rewards and does _not_ use `LookupLatest`.
+ ad, _, err := l.LookupWithoutRewards(0, last)
+ require.NoError(t, err)
+ require.Equal(t, basics.MicroAlgos{Raw: 100_000}, ad.MicroAlgos)
+ }
+}
+
+func TestEncodedKVRecordV6Allocbounds(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ for version, params := range config.Consensus {
+ require.GreaterOrEqualf(t, uint64(encodedKVRecordV6MaxValueLength), params.MaxBoxSize, "Allocbound constant no longer valid as of consensus version %s", version)
+ longestPossibleBoxName := string(make([]byte, params.MaxAppKeyLen))
+ longestPossibleKey := logic.MakeBoxKey(basics.AppIndex(math.MaxUint64), longestPossibleBoxName)
+ require.GreaterOrEqualf(t, encodedKVRecordV6MaxValueLength, len(longestPossibleKey), "Allocbound constant no longer valid as of consensus version %s", version)
+ }
+}
+
+func TestEncodedKVDataSize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ currentConsensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ require.GreaterOrEqual(t, encodedKVRecordV6MaxKeyLength, currentConsensusParams.MaxAppKeyLen)
+ require.GreaterOrEqual(t, uint64(encodedKVRecordV6MaxValueLength), currentConsensusParams.MaxBoxSize)
+
+ kvEntry := encodedKVRecordV6{
+ Key: make([]byte, encodedKVRecordV6MaxKeyLength),
+ Value: make([]byte, encodedKVRecordV6MaxValueLength),
+ }
+
+ crypto.RandBytes(kvEntry.Key[:])
+ crypto.RandBytes(kvEntry.Value[:])
+
+ encoded := kvEntry.MarshalMsg(nil)
+ require.GreaterOrEqual(t, MaxEncodedKVDataSize, len(encoded))
+
+}
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index 2a12377d3..fa1b6803b 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -55,8 +55,8 @@ type CatchpointCatchupAccessor interface {
// ResetStagingBalances resets the current staging balances, preparing for a new set of balances to be added
ResetStagingBalances(ctx context.Context, newCatchup bool) (err error)
- // ProgressStagingBalances deserialize the given bytes as a temporary staging balances
- ProgressStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error)
+ // ProcessStagingBalances deserialize the given bytes as a temporary staging balances
+ ProcessStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error)
// BuildMerkleTrie inserts the account hashes into the merkle trie
BuildMerkleTrie(ctx context.Context, progressUpdates func(uint64)) (err error)
@@ -95,6 +95,7 @@ type stagingWriter interface {
writeBalances(context.Context, []normalizedAccountBalance) error
writeCreatables(context.Context, []normalizedAccountBalance) error
writeHashes(context.Context, []normalizedAccountBalance) error
+ writeKVs(context.Context, []encodedKVRecordV6) error
isShared() bool
}
@@ -108,6 +109,12 @@ func (w *stagingWriterImpl) writeBalances(ctx context.Context, balances []normal
})
}
+func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encodedKVRecordV6) error {
+ return w.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ return writeCatchpointStagingKVs(ctx, tx, kvrs)
+ })
+}
+
func (w *stagingWriterImpl) writeCreatables(ctx context.Context, balances []normalizedAccountBalance) error {
return w.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
return writeCatchpointStagingCreatable(ctx, tx, balances)
@@ -263,7 +270,7 @@ func (c *catchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context
return
}
-// CatchpointCatchupAccessorProgress is used by the caller of ProgressStagingBalances to obtain progress information
+// CatchpointCatchupAccessorProgress is used by the caller of ProcessStagingBalances to obtain progress information
type CatchpointCatchupAccessorProgress struct {
TotalAccounts uint64
ProcessedAccounts uint64
@@ -280,10 +287,11 @@ type CatchpointCatchupAccessorProgress struct {
BalancesWriteDuration time.Duration
CreatablesWriteDuration time.Duration
HashesWriteDuration time.Duration
+ KVWriteDuration time.Duration
}
-// ProgressStagingBalances deserialize the given bytes as a temporary staging balances
-func (c *catchpointCatchupAccessorImpl) ProgressStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error) {
+// ProcessStagingBalances deserialize the given bytes as a temporary staging balances
+func (c *catchpointCatchupAccessorImpl) ProcessStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error) {
if sectionName == "content.msgpack" {
return c.processStagingContent(ctx, bytes, progress)
}
@@ -291,7 +299,7 @@ func (c *catchpointCatchupAccessorImpl) ProgressStagingBalances(ctx context.Cont
return c.processStagingBalances(ctx, bytes, progress)
}
// we want to allow undefined sections to support backward compatibility.
- c.log.Warnf("CatchpointCatchupAccessorImpl::ProgressStagingBalances encountered unexpected section name '%s' of length %d, which would be ignored", sectionName, len(bytes))
+ c.log.Warnf("CatchpointCatchupAccessorImpl::ProcessStagingBalances encountered unexpected section name '%s' of length %d, which would be ignored", sectionName, len(bytes))
return nil
}
@@ -354,6 +362,7 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
var normalizedAccountBalances []normalizedAccountBalance
var expectingMoreEntries []bool
+ var chunkKVs []encodedKVRecordV6
switch progress.Version {
default:
@@ -375,21 +384,22 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
expectingMoreEntries = make([]bool, len(balances.Balances))
case CatchpointFileVersionV6:
- var balances catchpointFileBalancesChunkV6
- err = protocol.Decode(bytes, &balances)
+ var chunk catchpointFileChunkV6
+ err = protocol.Decode(bytes, &chunk)
if err != nil {
return err
}
- if len(balances.Balances) == 0 {
- return fmt.Errorf("processStagingBalances received a chunk with no accounts")
+ if len(chunk.Balances) == 0 && len(chunk.KVs) == 0 {
+ return fmt.Errorf("processStagingBalances received a chunk with no accounts or KVs")
}
- normalizedAccountBalances, err = prepareNormalizedBalancesV6(balances.Balances, c.ledger.GenesisProto())
- expectingMoreEntries = make([]bool, len(balances.Balances))
- for i, balance := range balances.Balances {
+ normalizedAccountBalances, err = prepareNormalizedBalancesV6(chunk.Balances, c.ledger.GenesisProto())
+ expectingMoreEntries = make([]bool, len(chunk.Balances))
+ for i, balance := range chunk.Balances {
expectingMoreEntries[i] = balance.ExpectingMoreEntries
}
+ chunkKVs = chunk.KVs
}
if err != nil {
@@ -468,9 +478,11 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
var errBalances error
var errCreatables error
var errHashes error
+ var errKVs error
var durBalances time.Duration
var durCreatables time.Duration
var durHashes time.Duration
+ var durKVs time.Duration
// start the balances writer
wg.Add(1)
@@ -520,6 +532,21 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
durHashes = time.Since(start)
}()
+ // on a in-memory database, wait for the writer to finish before starting the new writer
+ if c.stagingWriter.isShared() {
+ wg.Wait()
+ }
+
+ // start the kv store writer
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ start := time.Now()
+ errKVs = c.stagingWriter.writeKVs(ctx, chunkKVs)
+ durKVs = time.Since(start)
+ }()
+
wg.Wait()
if errBalances != nil {
@@ -531,10 +558,14 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
if errHashes != nil {
return errHashes
}
+ if errKVs != nil {
+ return errKVs
+ }
progress.BalancesWriteDuration += durBalances
progress.CreatablesWriteDuration += durCreatables
progress.HashesWriteDuration += durHashes
+ progress.KVWriteDuration += durKVs
ledgerProcessstagingbalancesMicros.AddMicrosecondsSince(start, nil)
progress.ProcessedBytes += uint64(len(bytes))
diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go
index 1394726bd..adc1dcb5f 100644
--- a/ledger/catchupaccessor_test.go
+++ b/ledger/catchupaccessor_test.go
@@ -55,8 +55,8 @@ func createTestingEncodedChunks(accountsCount uint64) (encodedAccountChunks [][]
if accounts >= accountsCount-64*1024 && last64KIndex == -1 {
last64KIndex = len(encodedAccountChunks)
}
- var balances catchpointFileBalancesChunkV6
- balances.Balances = make([]encodedBalanceRecordV6, chunkSize)
+ var chunk catchpointFileChunkV6
+ chunk.Balances = make([]encodedBalanceRecordV6, chunkSize)
for i := uint64(0); i < chunkSize; i++ {
var randomAccount encodedBalanceRecordV6
accountData := baseAccountData{}
@@ -67,9 +67,9 @@ func createTestingEncodedChunks(accountsCount uint64) (encodedAccountChunks [][]
crypto.RandBytes(randomAccount.Address[:])
}
binary.LittleEndian.PutUint64(randomAccount.Address[:], accounts+i)
- balances.Balances[i] = randomAccount
+ chunk.Balances[i] = randomAccount
}
- encodedAccountChunks = append(encodedAccountChunks, protocol.Encode(&balances))
+ encodedAccountChunks = append(encodedAccountChunks, protocol.Encode(&chunk))
accounts += chunkSize
}
return
@@ -110,7 +110,7 @@ func benchmarkRestoringFromCatchpointFileHelper(b *testing.B) {
}
encodedFileHeader := protocol.Encode(&fileHeader)
var progress CatchpointCatchupAccessorProgress
- err = catchpointAccessor.ProgressStagingBalances(context.Background(), "content.msgpack", encodedFileHeader, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(context.Background(), "content.msgpack", encodedFileHeader, &progress)
require.NoError(b, err)
// pre-create all encoded chunks.
@@ -126,7 +126,7 @@ func benchmarkRestoringFromCatchpointFileHelper(b *testing.B) {
last64KStart = time.Now()
}
- err = catchpointAccessor.ProgressStagingBalances(context.Background(), "balances.XX.msgpack", encodedAccounts, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(context.Background(), "balances.XX.msgpack", encodedAccounts, &progress)
require.NoError(b, err)
last64KIndex--
}
@@ -240,16 +240,16 @@ func TestBuildMerkleTrie(t *testing.T) {
progressCallCount = 0
err = catchpointAccessor.ResetStagingBalances(ctx, true)
require.NoError(t, err, "ResetStagingBalances")
- // TODO: catchpointAccessor.ProgressStagingBalances() like in ledgerFetcher.downloadLedger(cs.ctx, peer, round) like catchup/catchpointService.go which is the real usage of BuildMerkleTrie()
+ // TODO: catchpointAccessor.ProcessStagingBalances() like in ledgerFetcher.downloadLedger(cs.ctx, peer, round) like catchup/catchpointService.go which is the real usage of BuildMerkleTrie()
var blob []byte = nil // TODO: content!
var progress CatchpointCatchupAccessorProgress
- err = catchpointAccessor.ProgressStagingBalances(ctx, "ignoredContent", blob, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "ignoredContent", blob, &progress)
require.NoError(t, err)
// this shouldn't work yet
- err = catchpointAccessor.ProgressStagingBalances(ctx, "balances.FAKE.msgpack", blob, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "balances.FAKE.msgpack", blob, &progress)
require.Error(t, err)
// this needs content
- err = catchpointAccessor.ProgressStagingBalances(ctx, "content.msgpack", blob, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "content.msgpack", blob, &progress)
require.Error(t, err)
// content.msgpack from this:
@@ -265,14 +265,14 @@ func TestBuildMerkleTrie(t *testing.T) {
BlockHeaderDigest: crypto.Digest{},
}
encodedFileHeader := protocol.Encode(&fileHeader)
- err = catchpointAccessor.ProgressStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
require.NoError(t, err)
// shouldn't work a second time
- err = catchpointAccessor.ProgressStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
require.Error(t, err)
// This should still fail, but slightly different coverage path
- err = catchpointAccessor.ProgressStagingBalances(ctx, "balances.FAKE.msgpack", blob, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "balances.FAKE.msgpack", blob, &progress)
require.Error(t, err)
// create some catchpoint data
@@ -280,7 +280,7 @@ func TestBuildMerkleTrie(t *testing.T) {
for _, encodedAccounts := range encodedAccountChunks {
- err = catchpointAccessor.ProgressStagingBalances(context.Background(), "balances.XX.msgpack", encodedAccounts, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(context.Background(), "balances.XX.msgpack", encodedAccounts, &progress)
require.NoError(t, err)
}
@@ -406,10 +406,10 @@ func TestCatchupAccessorResourceCountMismatch(t *testing.T) {
BlockHeaderDigest: crypto.Digest{},
}
encodedFileHeader := protocol.Encode(&fileHeader)
- err = catchpointAccessor.ProgressStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
require.NoError(t, err)
- var balances catchpointFileBalancesChunkV6
+ var balances catchpointFileChunkV6
balances.Balances = make([]encodedBalanceRecordV6, 1)
var randomAccount encodedBalanceRecordV6
accountData := baseAccountData{}
@@ -422,7 +422,7 @@ func TestCatchupAccessorResourceCountMismatch(t *testing.T) {
encodedAccounts := protocol.Encode(&balances)
// expect error since there is a resource count mismatch
- err = catchpointAccessor.ProgressStagingBalances(ctx, "balances.XX.msgpack", encodedAccounts, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, "balances.XX.msgpack", encodedAccounts, &progress)
require.Error(t, err)
}
@@ -439,6 +439,10 @@ func (w *testStagingWriter) writeCreatables(ctx context.Context, balances []norm
return nil
}
+func (w *testStagingWriter) writeKVs(ctx context.Context, kvrs []encodedKVRecordV6) error {
+ return nil
+}
+
func (w *testStagingWriter) writeHashes(ctx context.Context, balances []normalizedAccountBalance) error {
for _, bal := range balances {
for _, hash := range bal.accountHashes {
@@ -528,7 +532,7 @@ func TestCatchupAccessorProcessStagingBalances(t *testing.T) {
}
// make chunks
- chunks := []catchpointFileBalancesChunkV6{
+ chunks := []catchpointFileChunkV6{
{
Balances: []encodedBalanceRecordV6{
encodedBalanceRecordFromBase(ledgertesting.RandomAddress(), acctA, nil, false),
diff --git a/ledger/internal/double_test.go b/ledger/double_test.go
index b33f3ca03..40e39b88c 100644
--- a/ledger/internal/double_test.go
+++ b/ledger/double_test.go
@@ -14,15 +14,15 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal_test
+package ledger
import (
"testing"
+ "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/txntest"
- "github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/internal"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
@@ -43,8 +43,8 @@ import (
type DoubleLedger struct {
t *testing.T
- generator *ledger.Ledger
- validator *ledger.Ledger
+ generator *Ledger
+ validator *Ledger
eval *internal.BlockEvaluator
}
@@ -56,8 +56,8 @@ func (dl DoubleLedger) Close() {
// NewDoubleLedger creates a new DoubleLedger with the supplied balances and consensus version.
func NewDoubleLedger(t *testing.T, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion) DoubleLedger {
- g := newTestLedgerWithConsensusVersion(t, balances, cv)
- v := newTestLedgerFull(t, balances, cv, g.GenesisHash())
+ g := newSimpleLedgerWithConsensusVersion(t, balances, cv)
+ v := newSimpleLedgerFull(t, balances, cv, g.GenesisHash())
return DoubleLedger{t, g, v, nil}
}
@@ -124,17 +124,39 @@ func (dl *DoubleLedger) fullBlock(txs ...*txntest.Txn) *ledgercore.ValidatedBloc
func (dl *DoubleLedger) endBlock() *ledgercore.ValidatedBlock {
vb := endBlock(dl.t, dl.generator, dl.eval)
- checkBlock(dl.t, dl.validator, vb)
+ if dl.validator != nil { // Allows setting to nil while debugging, to simplify
+ checkBlock(dl.t, dl.validator, vb)
+ }
dl.eval = nil // Ensure it's not used again
return vb
}
+func (dl *DoubleLedger) fundedApp(sender basics.Address, amount uint64, source string) basics.AppIndex {
+ createapp := txntest.Txn{
+ Type: "appl",
+ Sender: sender,
+ ApprovalProgram: source,
+ }
+ vb := dl.fullBlock(&createapp)
+ appIndex := vb.Block().Payset[0].ApplyData.ApplicationID
+
+ fund := txntest.Txn{
+ Type: "pay",
+ Sender: sender,
+ Receiver: appIndex.Address(),
+ Amount: amount,
+ }
+
+ dl.txn(&fund)
+ return appIndex
+}
+
func (dl *DoubleLedger) reloadLedgers() {
require.NoError(dl.t, dl.generator.ReloadLedger())
require.NoError(dl.t, dl.validator.ReloadLedger())
}
-func checkBlock(t *testing.T, checkLedger *ledger.Ledger, vb *ledgercore.ValidatedBlock) {
+func checkBlock(t *testing.T, checkLedger *Ledger, vb *ledgercore.ValidatedBlock) {
bl := vb.Block()
msg := bl.MarshalMsg(nil)
var reconstituted bookkeeping.Block
@@ -160,9 +182,9 @@ func checkBlock(t *testing.T, checkLedger *ledger.Ledger, vb *ledgercore.Validat
err := check.TransactionGroup(group)
require.NoError(t, err, "%+v", reconstituted.Payset)
}
- check.SetGenerate(true)
+ check.SetGenerateForTesting(true)
cb := endBlock(t, checkLedger, check)
- check.SetGenerate(false)
+ check.SetGenerateForTesting(false)
require.Equal(t, vb.Block(), cb.Block())
// vb.Delta() need not actually be Equal, in the sense of require.Equal
@@ -179,7 +201,7 @@ func checkBlock(t *testing.T, checkLedger *ledger.Ledger, vb *ledgercore.Validat
// require.Equal(t, vb.Delta().Accts, cb.Delta().Accts)
}
-func nextCheckBlock(t testing.TB, ledger *ledger.Ledger, rs bookkeeping.RewardsState) *internal.BlockEvaluator {
+func nextCheckBlock(t testing.TB, ledger *Ledger, rs bookkeeping.RewardsState) *internal.BlockEvaluator {
rnd := ledger.Latest()
hdr, err := ledger.BlockHdr(rnd)
require.NoError(t, err)
diff --git a/ledger/eval_simple_test.go b/ledger/eval_simple_test.go
new file mode 100644
index 000000000..802e8de9d
--- /dev/null
+++ b/ledger/eval_simple_test.go
@@ -0,0 +1,545 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/txntest"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/execpool"
+)
+
+func TestBlockEvaluator(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genesisInitState, addrs, keys := ledgertesting.Genesis(10)
+
+ l, err := OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ genesisBlockHeader, err := l.BlockHdr(basics.Round(0))
+ require.NoError(t, err)
+ newBlock := bookkeeping.MakeBlock(genesisBlockHeader)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ genHash := l.GenesisHash()
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addrs[0],
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round(),
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addrs[1],
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+
+ // Correct signature should work
+ st := txn.Sign(keys[0])
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ // Broken signature should fail
+ stbad := st
+ st.Sig[2] ^= 8
+ txgroup := []transactions.SignedTxn{stbad}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+
+ // Repeat should fail
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // out of range should fail
+ btxn := txn
+ btxn.FirstValid++
+ btxn.LastValid += 2
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // bogus group should fail
+ btxn = txn
+ btxn.Group[1] = 1
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ err = eval.Transaction(st, transactions.ApplyData{})
+ require.Error(t, err)
+
+ // mixed fields should fail
+ btxn = txn
+ btxn.XferAsset = 3
+ st = btxn.Sign(keys[0])
+ txgroup = []transactions.SignedTxn{st}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ // We don't test eval.Transaction() here because it doesn't check txn.WellFormed(), instead relying on that to have already been checked by the transaction pool.
+ // err = eval.Transaction(st, transactions.ApplyData{})
+ // require.Error(t, err)
+
+ selfTxn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addrs[2],
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round(),
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addrs[2],
+ Amount: basics.MicroAlgos{Raw: 100},
+ },
+ }
+ stxn := selfTxn.Sign(keys[2])
+
+ // TestTransactionGroup() and Transaction() should have the same outcome, but work slightly different code paths.
+ txgroup = []transactions.SignedTxn{stxn}
+ err = eval.TestTransactionGroup(txgroup)
+ require.NoError(t, err)
+
+ err = eval.Transaction(stxn, transactions.ApplyData{})
+ require.NoError(t, err)
+
+ t3 := txn
+ t3.Amount.Raw++
+ t4 := selfTxn
+ t4.Amount.Raw++
+
+ // a group without .Group should fail
+ s3 := t3.Sign(keys[0])
+ s4 := t4.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ txgroupad := transactions.WrapSignedTxnsWithAD(txgroup)
+ err = eval.TransactionGroup(txgroupad)
+ require.Error(t, err)
+
+ // Test a group that should work
+ var group transactions.TxGroup
+ group.TxGroupHashes = []crypto.Digest{crypto.HashObj(t3), crypto.HashObj(t4)}
+ t3.Group = crypto.HashObj(group)
+ t4.Group = t3.Group
+ s3 = t3.Sign(keys[0])
+ s4 = t4.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4}
+ err = eval.TestTransactionGroup(txgroup)
+ require.NoError(t, err)
+
+ // disagreement on Group id should fail
+ t4bad := t4
+ t4bad.Group[3] ^= 3
+ s4bad := t4bad.Sign(keys[2])
+ txgroup = []transactions.SignedTxn{s3, s4bad}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+ txgroupad = transactions.WrapSignedTxnsWithAD(txgroup)
+ err = eval.TransactionGroup(txgroupad)
+ require.Error(t, err)
+
+ // missing part of the group should fail
+ txgroup = []transactions.SignedTxn{s3}
+ err = eval.TestTransactionGroup(txgroup)
+ require.Error(t, err)
+
+ validatedBlock, err := eval.GenerateBlock()
+ require.NoError(t, err)
+
+ accts := genesisInitState.Accounts
+ bal0 := accts[addrs[0]]
+ bal1 := accts[addrs[1]]
+ bal2 := accts[addrs[2]]
+
+ l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+
+ bal0new, _, _, err := l.LookupAccount(newBlock.Round(), addrs[0])
+ require.NoError(t, err)
+ bal1new, _, _, err := l.LookupAccount(newBlock.Round(), addrs[1])
+ require.NoError(t, err)
+ bal2new, _, _, err := l.LookupAccount(newBlock.Round(), addrs[2])
+ require.NoError(t, err)
+
+ require.Equal(t, bal0new.MicroAlgos.Raw, bal0.MicroAlgos.Raw-minFee.Raw-100)
+ require.Equal(t, bal1new.MicroAlgos.Raw, bal1.MicroAlgos.Raw+100)
+ require.Equal(t, bal2new.MicroAlgos.Raw, bal2.MicroAlgos.Raw-minFee.Raw)
+}
+
+func TestRekeying(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ // t.Parallel() NO! This test manipulates []protocol.Consensus
+
+ // Pretend rekeying is supported
+ actual := config.Consensus[protocol.ConsensusCurrentVersion]
+ pretend := actual
+ pretend.SupportRekeying = true
+ config.Consensus[protocol.ConsensusCurrentVersion] = pretend
+ defer func() {
+ config.Consensus[protocol.ConsensusCurrentVersion] = actual
+ }()
+
+ // Bring up a ledger
+ genesisInitState, addrs, keys := ledgertesting.Genesis(10)
+
+ l, err := OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
+ require.NoError(t, err)
+ defer l.Close()
+
+ // Make a new block
+ nextRound := l.Latest() + basics.Round(1)
+ genHash := l.GenesisHash()
+
+ // Test plan
+ // Syntax: [A -> B][C, D] means transaction from A that rekeys to B with authaddr C and actual sig from D
+ makeTxn := func(sender, rekeyto, authaddr basics.Address, signer *crypto.SignatureSecrets, uniq uint8) transactions.SignedTxn {
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: minFee,
+ FirstValid: nextRound,
+ LastValid: nextRound,
+ GenesisHash: genHash,
+ RekeyTo: rekeyto,
+ Note: []byte{uniq},
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: sender,
+ },
+ }
+ sig := signer.Sign(txn)
+ return transactions.SignedTxn{Txn: txn, Sig: sig, AuthAddr: authaddr}
+ }
+
+ tryBlock := func(stxns []transactions.SignedTxn) error {
+ // We'll make a block using the evaluator.
+ // When generating a block, the evaluator doesn't check transaction sigs -- it assumes the transaction pool already did that.
+ // So the ValidatedBlock that comes out isn't necessarily actually a valid block. We'll call Validate ourselves.
+ genesisHdr, err := l.BlockHdr(basics.Round(0))
+ require.NoError(t, err)
+ newBlock := bookkeeping.MakeBlock(genesisHdr)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ require.NoError(t, err)
+
+ for _, stxn := range stxns {
+ err = eval.Transaction(stxn, transactions.ApplyData{})
+ if err != nil {
+ return err
+ }
+ }
+ validatedBlock, err := eval.GenerateBlock()
+ if err != nil {
+ return err
+ }
+
+ backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
+ defer backlogPool.Shutdown()
+ _, err = l.Validate(context.Background(), validatedBlock.Block(), backlogPool)
+ return err
+ }
+
+ // Preamble transactions, which all of the blocks in this test will start with
+ // [A -> 0][0,A] (normal transaction)
+ // [A -> B][0,A] (rekey)
+ txn0 := makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 0) // Normal transaction
+ txn1 := makeTxn(addrs[0], addrs[1], basics.Address{}, keys[0], 1) // Rekey transaction
+
+ // Test 1: Do only good things
+ // (preamble)
+ // [A -> 0][B,B] (normal transaction using new key)
+ // [A -> A][B,B] (rekey back to A, transaction still signed by B)
+ // [A -> 0][0,A] (normal transaction again)
+ test1txns := []transactions.SignedTxn{
+ txn0, txn1, // (preamble)
+ makeTxn(addrs[0], basics.Address{}, addrs[1], keys[1], 2), // [A -> 0][B,B]
+ makeTxn(addrs[0], addrs[0], addrs[1], keys[1], 3), // [A -> A][B,B]
+ makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 4), // [A -> 0][0,A]
+ }
+ err = tryBlock(test1txns)
+ require.NoError(t, err)
+
+ // Test 2: Use old key after rekeying
+ // (preamble)
+ // [A -> A][0,A] (rekey back to A, but signed by A instead of B)
+ test2txns := []transactions.SignedTxn{
+ txn0, txn1, // (preamble)
+ makeTxn(addrs[0], addrs[0], basics.Address{}, keys[0], 2), // [A -> A][0,A]
+ }
+ err = tryBlock(test2txns)
+ require.Error(t, err)
+
+ // TODO: More tests
+}
+
+func testEvalAppPoolingGroup(t *testing.T, schema basics.StateSchema, approvalProgram string, consensusVersion protocol.ConsensusVersion) error {
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ l := newSimpleLedgerWithConsensusVersion(t, genBalances, consensusVersion)
+ defer l.Close()
+
+ eval := nextBlock(t, l)
+
+ appcall1 := txntest.Txn{
+ Sender: addrs[0],
+ Type: protocol.ApplicationCallTx,
+ GlobalStateSchema: schema,
+ ApprovalProgram: approvalProgram,
+ }
+
+ appcall2 := txntest.Txn{
+ Sender: addrs[0],
+ Type: protocol.ApplicationCallTx,
+ ApplicationID: basics.AppIndex(1),
+ }
+
+ appcall3 := txntest.Txn{
+ Sender: addrs[1],
+ Type: protocol.ApplicationCallTx,
+ ApplicationID: basics.AppIndex(1),
+ }
+
+ return txgroup(t, l, eval, &appcall1, &appcall2, &appcall3)
+}
+
+// TestEvalAppPooledBudgetWithTxnGroup ensures 3 app call txns can successfully pool
+// budgets in a group txn and return an error if the budget is exceeded
+func TestEvalAppPooledBudgetWithTxnGroup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ source := func(n int, m int) string {
+ return "#pragma version 4\nbyte 0x1337BEEF\n" + strings.Repeat("keccak256\n", n) +
+ strings.Repeat("substring 0 4\n", m) + "pop\nint 1\n"
+ }
+
+ params := []protocol.ConsensusVersion{
+ protocol.ConsensusV29,
+ protocol.ConsensusFuture,
+ }
+
+ cases := []struct {
+ prog string
+ isSuccessV29 bool
+ isSuccessVFuture bool
+ expectedErrorV29 string
+ expectedErrorVFuture string
+ }{
+ {source(5, 47), true, true,
+ "",
+ ""},
+ {source(5, 48), false, true,
+ "pc=157 dynamic cost budget exceeded, executing pushint",
+ ""},
+ {source(16, 17), false, true,
+ "pc= 12 dynamic cost budget exceeded, executing keccak256",
+ ""},
+ {source(16, 18), false, false,
+ "pc= 12 dynamic cost budget exceeded, executing keccak256",
+ "pc= 78 dynamic cost budget exceeded, executing pushint"},
+ }
+
+ for i, param := range params {
+ for j, testCase := range cases {
+ t.Run(fmt.Sprintf("i=%d,j=%d", i, j), func(t *testing.T) {
+ err := testEvalAppPoolingGroup(t, basics.StateSchema{NumByteSlice: 3}, testCase.prog, param)
+ if !testCase.isSuccessV29 && reflect.DeepEqual(param, protocol.ConsensusV29) {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), testCase.expectedErrorV29)
+ } else if !testCase.isSuccessVFuture && reflect.DeepEqual(param, protocol.ConsensusFuture) {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), testCase.expectedErrorVFuture)
+ }
+ })
+ }
+ }
+}
+
+func TestMinBalanceChanges(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ l := newSimpleLedgerWithConsensusVersion(t, genBalances, protocol.ConsensusCurrentVersion)
+ defer l.Close()
+
+ createTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 3,
+ Manager: addrs[1],
+ Reserve: addrs[2],
+ Freeze: addrs[3],
+ Clawback: addrs[4],
+ },
+ }
+
+ const expectedID basics.AssetIndex = 1
+ optInTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[5],
+ XferAsset: expectedID,
+ AssetReceiver: addrs[5],
+ }
+
+ ad0init, _, _, err := l.LookupLatest(addrs[0])
+ require.NoError(t, err)
+ ad5init, _, _, err := l.LookupLatest(addrs[5])
+ require.NoError(t, err)
+
+ eval := nextBlock(t, l)
+ txns(t, l, eval, &createTxn, &optInTxn)
+ endBlock(t, l, eval)
+
+ ad0new, _, _, err := l.LookupLatest(addrs[0])
+ require.NoError(t, err)
+ ad5new, _, _, err := l.LookupLatest(addrs[5])
+ require.NoError(t, err)
+
+ proto := l.GenesisProto()
+ // Check balance and min balance requirement changes
+ require.Equal(t, ad0init.MicroAlgos.Raw, ad0new.MicroAlgos.Raw+1000) // fee
+ require.Equal(t, ad0init.MinBalance(&proto).Raw, ad0new.MinBalance(&proto).Raw-100000) // create
+ require.Equal(t, ad5init.MicroAlgos.Raw, ad5new.MicroAlgos.Raw+1000) // fee
+ require.Equal(t, ad5init.MinBalance(&proto).Raw, ad5new.MinBalance(&proto).Raw-100000) // optin
+
+ optOutTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[5],
+ XferAsset: expectedID,
+ AssetReceiver: addrs[0],
+ AssetCloseTo: addrs[0],
+ }
+
+ closeTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[1], // The manager, not the creator
+ ConfigAsset: expectedID,
+ }
+
+ eval = nextBlock(t, l)
+ txns(t, l, eval, &optOutTxn, &closeTxn)
+ endBlock(t, l, eval)
+
+ ad0final, _, _, err := l.LookupLatest(addrs[0])
+ require.NoError(t, err)
+ ad5final, _, _, err := l.LookupLatest(addrs[5])
+ require.NoError(t, err)
+ // Check we got our balance "back"
+ require.Equal(t, ad0final.MinBalance(&proto), ad0init.MinBalance(&proto))
+ require.Equal(t, ad5final.MinBalance(&proto), ad5init.MinBalance(&proto))
+}
+
+// TestAppInsMinBalance checks that accounts with MaxAppsOptedIn are accepted by block evaluator
+// and do not cause any MaximumMinimumBalance problems
+func TestAppInsMinBalance(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ l := newSimpleLedgerWithConsensusVersion(t, genBalances, protocol.ConsensusV30)
+ defer l.Close()
+
+ const appid basics.AppIndex = 1
+
+ maxAppsOptedIn := config.Consensus[protocol.ConsensusV30].MaxAppsOptedIn
+ require.Greater(t, maxAppsOptedIn, 0)
+ maxAppsCreated := config.Consensus[protocol.ConsensusV30].MaxAppsCreated
+ require.Greater(t, maxAppsCreated, 0)
+ maxLocalSchemaEntries := config.Consensus[protocol.ConsensusV30].MaxLocalSchemaEntries
+ require.Greater(t, maxLocalSchemaEntries, uint64(0))
+
+ txnsCreate := make([]*txntest.Txn, 0, maxAppsOptedIn)
+ txnsOptIn := make([]*txntest.Txn, 0, maxAppsOptedIn)
+ appsCreated := make(map[basics.Address]int, len(addrs)-1)
+
+ acctIdx := 0
+ for i := 0; i < maxAppsOptedIn; i++ {
+ creator := addrs[acctIdx]
+ createTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: creator,
+ ApprovalProgram: "int 1",
+ LocalStateSchema: basics.StateSchema{NumByteSlice: maxLocalSchemaEntries},
+ Note: ledgertesting.RandomNote(),
+ }
+ txnsCreate = append(txnsCreate, &createTxn)
+ count := appsCreated[creator]
+ count++
+ appsCreated[creator] = count
+ if count == maxAppsCreated {
+ acctIdx++
+ }
+
+ optInTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[9],
+ ApplicationID: appid + basics.AppIndex(i),
+ OnCompletion: transactions.OptInOC,
+ }
+ txnsOptIn = append(txnsOptIn, &optInTxn)
+ }
+
+ eval := nextBlock(t, l)
+ txns1 := append(txnsCreate, txnsOptIn...)
+ txns(t, l, eval, txns1...)
+ vb := endBlock(t, l, eval)
+ mods := vb.Delta()
+ appAppResources := mods.Accts.GetAllAppResources()
+ appParamsCount := 0
+ appLocalStatesCount := 0
+ for _, ap := range appAppResources {
+ if ap.Params.Params != nil {
+ appParamsCount++
+ }
+ if ap.State.LocalState != nil {
+ appLocalStatesCount++
+ }
+ }
+ require.Equal(t, appLocalStatesCount, 50)
+ require.Equal(t, appParamsCount, 50)
+}
diff --git a/ledger/evalindexer.go b/ledger/evalindexer.go
index 5f11874c4..daefd3035 100644
--- a/ledger/evalindexer.go
+++ b/ledger/evalindexer.go
@@ -43,6 +43,7 @@ type indexerLedgerForEval interface {
GetAssetCreator(map[basics.AssetIndex]struct{}) (map[basics.AssetIndex]FoundAddress, error)
GetAppCreator(map[basics.AppIndex]struct{}) (map[basics.AppIndex]FoundAddress, error)
LatestTotals() (ledgercore.AccountTotals, error)
+ LookupKv(basics.Round, string) ([]byte, error)
BlockHdrCached(basics.Round) (bookkeeping.BlockHeader, error)
}
@@ -149,6 +150,15 @@ func (l indexerLedgerConnector) lookupResource(round basics.Round, address basic
return accountResourceMap[address][Creatable{aidx, ctype}], nil
}
+// LookupKv delegates to the Ledger and marks the box key as touched for post-processing
+func (l indexerLedgerConnector) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ value, err := l.il.LookupKv(rnd, key)
+ if err != nil {
+ return value, fmt.Errorf("LookupKv() in indexerLedgerConnector internal error: %w", err)
+ }
+ return value, nil
+}
+
// GetCreatorForRound is part of LedgerForEvaluator interface.
func (l indexerLedgerConnector) GetCreatorForRound(_ basics.Round, cindex basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
var foundAddress FoundAddress
diff --git a/ledger/evalindexer_test.go b/ledger/evalindexer_test.go
index 23997afa5..77b7dbde7 100644
--- a/ledger/evalindexer_test.go
+++ b/ledger/evalindexer_test.go
@@ -107,6 +107,11 @@ func (il indexerLedgerForEvalImpl) GetAppCreator(map[basics.AppIndex]struct{}) (
return nil, errors.New("GetAppCreator() not implemented")
}
+func (il indexerLedgerForEvalImpl) LookupKv(basics.Round, string) ([]byte, error) {
+ // This function is unused.
+ return nil, errors.New("LookupKv() not implemented")
+}
+
func (il indexerLedgerForEvalImpl) LatestTotals() (totals ledgercore.AccountTotals, err error) {
_, totals, err = il.l.LatestTotals()
return
diff --git a/ledger/internal/appcow.go b/ledger/internal/appcow.go
index 503ddd06b..a04405db0 100644
--- a/ledger/internal/appcow.go
+++ b/ledger/internal/appcow.go
@@ -284,11 +284,6 @@ func (cb *roundCowState) DeallocateApp(addr basics.Address, aidx basics.AppIndex
return nil
}
-// GetKey looks for a key in {addr, aidx, global} storage
-func (cb *roundCowState) GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) {
- return cb.getKey(addr, aidx, global, key, accountIdx)
-}
-
// getKey looks for a key in {addr, aidx, global} storage
// This is hierarchical lookup: if the key not in this cow cache, then request parent and all way down to ledger
func (cb *roundCowState) getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) {
@@ -339,8 +334,8 @@ func (cb *roundCowState) getKey(addr basics.Address, aidx basics.AppIndex, globa
return cb.lookupParent.getKey(addr, aidx, global, key, accountIdx)
}
-// SetKey creates a new key-value in {addr, aidx, global} storage
-func (cb *roundCowState) SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error {
+// setKey creates a new key-value in {addr, aidx, global} storage
+func (cb *roundCowState) setKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error {
// Enforce maximum key length
if len(key) > cb.proto.MaxAppKeyLen {
return fmt.Errorf("key too long: length was %d, maximum is %d", len(key), cb.proto.MaxAppKeyLen)
@@ -368,7 +363,7 @@ func (cb *roundCowState) SetKey(addr basics.Address, aidx basics.AppIndex, globa
}
// Fetch the old value + presence so we know how to update
- oldValue, oldOk, err := cb.GetKey(addr, aidx, global, key, accountIdx)
+ oldValue, oldOk, err := cb.getKey(addr, aidx, global, key, accountIdx)
if err != nil {
return err
}
@@ -398,8 +393,8 @@ func (cb *roundCowState) SetKey(addr basics.Address, aidx basics.AppIndex, globa
return lsd.checkCounts()
}
-// DelKey removes a key from {addr, aidx, global} storage
-func (cb *roundCowState) DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error {
+// delKey removes a key from {addr, aidx, global} storage
+func (cb *roundCowState) delKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error {
// Check that account has allocated storage
allocated, err := cb.allocated(addr, aidx, global)
if err != nil {
@@ -411,7 +406,7 @@ func (cb *roundCowState) DelKey(addr basics.Address, aidx basics.AppIndex, globa
}
// Fetch the old value + presence so we know how to update counts
- oldValue, oldOk, err := cb.GetKey(addr, aidx, global, key, accountIdx)
+ oldValue, oldOk, err := cb.getKey(addr, aidx, global, key, accountIdx)
if err != nil {
return err
}
@@ -461,7 +456,7 @@ func MakeDebugBalances(l LedgerForCowBase, round basics.Round, proto protocol.Co
func (cb *roundCowState) StatefulEval(gi int, params *logic.EvalParams, aidx basics.AppIndex, program []byte) (pass bool, evalDelta transactions.EvalDelta, err error) {
// Make a child cow to eval our program in
calf := cb.child(1)
- params.Ledger = newLogicLedger(calf)
+ params.Ledger = calf
// Eval the program
pass, cx, err := logic.EvalContract(program, gi, aidx, params)
@@ -487,7 +482,7 @@ func (cb *roundCowState) StatefulEval(gi int, params *logic.EvalParams, aidx bas
// changes from this app and any inner called apps. Instead, we now keep
// the EvalDelta built as we go, in app evaluation. So just use it.
if cb.proto.LogicSigVersion < 6 {
- evalDelta, err = calf.BuildEvalDelta(aidx, &params.TxnGroup[gi].Txn)
+ evalDelta, err = calf.buildEvalDelta(aidx, &params.TxnGroup[gi].Txn)
if err != nil {
return false, transactions.EvalDelta{}, err
}
@@ -502,9 +497,9 @@ func (cb *roundCowState) StatefulEval(gi int, params *logic.EvalParams, aidx bas
return pass, evalDelta, nil
}
-// BuildEvalDelta creates an EvalDelta by converting internal sdeltas
+// buildEvalDelta creates an EvalDelta by converting internal sdeltas
// into the (Global|Local)Delta fields.
-func (cb *roundCowState) BuildEvalDelta(aidx basics.AppIndex, txn *transactions.Transaction) (evalDelta transactions.EvalDelta, err error) {
+func (cb *roundCowState) buildEvalDelta(aidx basics.AppIndex, txn *transactions.Transaction) (evalDelta transactions.EvalDelta, err error) {
// sdeltas
foundGlobal := false
for addr, smod := range cb.sdeltas {
diff --git a/ledger/internal/appcow_test.go b/ledger/internal/appcow_test.go
index 76b4f1f8d..10ff4cdbf 100644
--- a/ledger/internal/appcow_test.go
+++ b/ledger/internal/appcow_test.go
@@ -87,7 +87,11 @@ func (ml *emptyLedger) getKey(addr basics.Address, aidx basics.AppIndex, global
return basics.TealValue{}, false, nil
}
-func (ml *emptyLedger) txnCounter() uint64 {
+func (ml *emptyLedger) kvGet(key string) ([]byte, bool, error) {
+ return nil, false, nil
+}
+
+func (ml *emptyLedger) Counter() uint64 {
return 0
}
@@ -287,7 +291,7 @@ func TestCowStorage(t *testing.T) {
actuallyAllocated := st.allocated(aapp)
rkey := allKeys[rand.Intn(len(allKeys))]
rval := allValues[rand.Intn(len(allValues))]
- err := cow.SetKey(addr, sptr.aidx, sptr.global, rkey, rval, 0)
+ err := cow.setKey(addr, sptr.aidx, sptr.global, rkey, rval, 0)
if actuallyAllocated {
require.NoError(t, err)
err = st.set(aapp, rkey, rval)
@@ -302,7 +306,7 @@ func TestCowStorage(t *testing.T) {
if rand.Float32() < 0.25 {
actuallyAllocated := st.allocated(aapp)
rkey := allKeys[rand.Intn(len(allKeys))]
- err := cow.DelKey(addr, sptr.aidx, sptr.global, rkey, 0)
+ err := cow.delKey(addr, sptr.aidx, sptr.global, rkey, 0)
if actuallyAllocated {
require.NoError(t, err)
err = st.del(aapp, rkey)
@@ -344,7 +348,7 @@ func TestCowStorage(t *testing.T) {
tval, tok, err := st.get(aapp, key)
require.NoError(t, err)
- cval, cok, err := cow.GetKey(addr, sptr.aidx, sptr.global, key, 0)
+ cval, cok, err := cow.getKey(addr, sptr.aidx, sptr.global, key, 0)
require.NoError(t, err)
require.Equal(t, tok, cok)
require.Equal(t, tval, cval)
@@ -383,29 +387,29 @@ func TestCowBuildDelta(t *testing.T) {
cow := roundCowState{}
cow.sdeltas = make(map[basics.Address]map[storagePtr]*storageDelta)
txn := transactions.Transaction{}
- ed, err := cow.BuildEvalDelta(aidx, &txn)
+ ed, err := cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Empty(ed)
cow.sdeltas[creator] = make(map[storagePtr]*storageDelta)
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Empty(ed)
// check global delta
cow.sdeltas[creator][storagePtr{aidx, true}] = &storageDelta{}
- ed, err = cow.BuildEvalDelta(1, &txn)
+ ed, err = cow.buildEvalDelta(1, &txn)
a.Error(err)
a.Contains(err.Error(), "found storage delta for different app")
a.Empty(ed)
cow.sdeltas[creator][storagePtr{aidx, true}] = &storageDelta{}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(transactions.EvalDelta{GlobalDelta: basics.StateDelta{}}, ed)
cow.sdeltas[creator][storagePtr{aidx + 1, true}] = &storageDelta{}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.Error(err)
a.Contains(err.Error(), "found storage delta for different app")
a.Empty(ed)
@@ -413,7 +417,7 @@ func TestCowBuildDelta(t *testing.T) {
delete(cow.sdeltas[creator], storagePtr{aidx + 1, true})
cow.sdeltas[sender] = make(map[storagePtr]*storageDelta)
cow.sdeltas[sender][storagePtr{aidx, true}] = &storageDelta{}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.Error(err)
a.Contains(err.Error(), "found more than one global delta")
a.Empty(ed)
@@ -422,7 +426,7 @@ func TestCowBuildDelta(t *testing.T) {
delete(cow.sdeltas[sender], storagePtr{aidx, true})
cow.sdeltas[sender][storagePtr{aidx, false}] = &storageDelta{}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.Error(err)
a.Contains(err.Error(), "invalid Account reference ")
a.Empty(ed)
@@ -432,7 +436,7 @@ func TestCowBuildDelta(t *testing.T) {
cow.mods.Hdr = &bookkeeping.BlockHeader{
UpgradeState: bookkeeping.UpgradeState{CurrentProtocol: protocol.ConsensusV25},
}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(
transactions.EvalDelta{
@@ -445,7 +449,7 @@ func TestCowBuildDelta(t *testing.T) {
// check v27 behavior for empty deltas
cow.mods.Hdr = nil
cow.proto = config.Consensus[protocol.ConsensusCurrentVersion]
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(
transactions.EvalDelta{
@@ -468,7 +472,7 @@ func TestCowBuildDelta(t *testing.T) {
},
},
}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(
transactions.EvalDelta{
@@ -508,7 +512,7 @@ func TestCowBuildDelta(t *testing.T) {
},
}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(
transactions.EvalDelta{
@@ -542,7 +546,7 @@ func TestCowBuildDelta(t *testing.T) {
},
},
}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(
transactions.EvalDelta{
@@ -573,7 +577,7 @@ func TestCowBuildDelta(t *testing.T) {
},
accountIdx: 1,
}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(
transactions.EvalDelta{
@@ -601,7 +605,7 @@ func TestCowBuildDelta(t *testing.T) {
},
accountIdx: 1,
}
- ed, err = cow.BuildEvalDelta(aidx, &txn)
+ ed, err = cow.buildEvalDelta(aidx, &txn)
a.NoError(err)
a.Equal(
transactions.EvalDelta{
@@ -1067,8 +1071,8 @@ func TestCowGetters(t *testing.T) {
ts := int64(11223344)
c.mods.PrevTimestamp = ts
- a.Equal(round, c.round())
- a.Equal(ts, c.prevTimestamp())
+ a.Equal(round, c.Round())
+ a.Equal(ts, c.PrevTimestamp())
}
func TestCowGet(t *testing.T) {
@@ -1108,7 +1112,7 @@ func TestCowGetKey(t *testing.T) {
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
addr: {storagePtr{aidx, true}: &storageDelta{action: deallocAction}},
}
- _, ok, err := c.GetKey(addr, aidx, true, "gkey", 0)
+ _, ok, err := c.getKey(addr, aidx, true, "gkey", 0)
a.Error(err)
a.False(ok)
a.Contains(err.Error(), "cannot fetch key")
@@ -1116,10 +1120,10 @@ func TestCowGetKey(t *testing.T) {
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
addr: {storagePtr{aidx, true}: &storageDelta{action: allocAction}},
}
- _, ok, err = c.GetKey(addr, aidx, true, "gkey", 0)
+ _, ok, err = c.getKey(addr, aidx, true, "gkey", 0)
a.NoError(err)
a.False(ok)
- _, ok, err = c.GetKey(addr, aidx, true, "gkey", 0)
+ _, ok, err = c.getKey(addr, aidx, true, "gkey", 0)
a.NoError(err)
a.False(ok)
@@ -1132,7 +1136,7 @@ func TestCowGetKey(t *testing.T) {
},
},
}
- _, ok, err = c.GetKey(addr, aidx, true, "gkey", 0)
+ _, ok, err = c.getKey(addr, aidx, true, "gkey", 0)
a.NoError(err)
a.False(ok)
@@ -1144,7 +1148,7 @@ func TestCowGetKey(t *testing.T) {
},
},
}
- val, ok, err := c.GetKey(addr, aidx, true, "gkey", 0)
+ val, ok, err := c.getKey(addr, aidx, true, "gkey", 0)
a.NoError(err)
a.True(ok)
a.Equal(tv, val)
@@ -1159,14 +1163,14 @@ func TestCowGetKey(t *testing.T) {
},
}
- val, ok, err = c.GetKey(addr, aidx, false, "lkey", 0)
+ val, ok, err = c.getKey(addr, aidx, false, "lkey", 0)
a.NoError(err)
a.True(ok)
a.Equal(tv, val)
// ensure other requests go down to roundCowParent
- a.Panics(func() { c.GetKey(ledgertesting.RandomAddress(), aidx, false, "lkey", 0) })
- a.Panics(func() { c.GetKey(addr, aidx+1, false, "lkey", 0) })
+ a.Panics(func() { c.getKey(ledgertesting.RandomAddress(), aidx, false, "lkey", 0) })
+ a.Panics(func() { c.getKey(addr, aidx+1, false, "lkey", 0) })
}
func TestCowSetKey(t *testing.T) {
@@ -1183,14 +1187,14 @@ func TestCowSetKey(t *testing.T) {
key := strings.Repeat("key", 100)
val := "val"
tv := basics.TealValue{Type: basics.TealBytesType, Bytes: val}
- err := c.SetKey(addr, aidx, true, key, tv, 0)
+ err := c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "key too long")
key = "key"
val = strings.Repeat("val", 100)
tv = basics.TealValue{Type: basics.TealBytesType, Bytes: val}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "value too long")
@@ -1199,7 +1203,7 @@ func TestCowSetKey(t *testing.T) {
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
addr: {storagePtr{aidx, true}: &storageDelta{action: deallocAction}},
}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "cannot set key")
@@ -1215,13 +1219,13 @@ func TestCowSetKey(t *testing.T) {
},
},
}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "exceeds schema bytes")
counts = basics.StateSchema{NumUint: 1}
maxCounts = basics.StateSchema{NumByteSlice: 1}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "exceeds schema integer")
@@ -1236,12 +1240,12 @@ func TestCowSetKey(t *testing.T) {
},
},
}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.NoError(err)
counts = basics.StateSchema{NumUint: 1}
maxCounts = basics.StateSchema{NumByteSlice: 1, NumUint: 1}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.NoError(err)
// check local
@@ -1256,12 +1260,12 @@ func TestCowSetKey(t *testing.T) {
},
},
}
- err = c.SetKey(addr1, aidx, false, key, tv, 0)
+ err = c.setKey(addr1, aidx, false, key, tv, 0)
a.NoError(err)
// ensure other requests go down to roundCowParent
- a.Panics(func() { c.SetKey(ledgertesting.RandomAddress(), aidx, false, key, tv, 0) })
- a.Panics(func() { c.SetKey(addr, aidx+1, false, key, tv, 0) })
+ a.Panics(func() { c.setKey(ledgertesting.RandomAddress(), aidx, false, key, tv, 0) })
+ a.Panics(func() { c.setKey(addr, aidx+1, false, key, tv, 0) })
}
func TestCowSetKeyVFuture(t *testing.T) {
@@ -1280,21 +1284,21 @@ func TestCowSetKeyVFuture(t *testing.T) {
key := strings.Repeat("key", 100)
val := "val"
tv := basics.TealValue{Type: basics.TealBytesType, Bytes: val}
- err := c.SetKey(addr, aidx, true, key, tv, 0)
+ err := c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "key too long")
key = "key"
val = strings.Repeat("val", 100)
tv = basics.TealValue{Type: basics.TealBytesType, Bytes: val}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "value too long")
key = strings.Repeat("k", protoF.MaxAppKeyLen)
val = strings.Repeat("v", protoF.MaxAppSumKeyValueLens-len(key)+1)
tv = basics.TealValue{Type: basics.TealBytesType, Bytes: val}
- err = c.SetKey(addr, aidx, true, key, tv, 0)
+ err = c.setKey(addr, aidx, true, key, tv, 0)
a.Error(err)
a.Contains(err.Error(), "key/value total too long")
}
@@ -1362,7 +1366,7 @@ func TestCowDelKey(t *testing.T) {
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
addr: {storagePtr{aidx, true}: &storageDelta{action: deallocAction}},
}
- err := c.DelKey(addr, aidx, true, key, 0)
+ err := c.delKey(addr, aidx, true, key, 0)
a.Error(err)
a.Contains(err.Error(), "cannot del key")
@@ -1378,7 +1382,7 @@ func TestCowDelKey(t *testing.T) {
},
},
}
- err = c.DelKey(addr, aidx, true, key, 0)
+ err = c.delKey(addr, aidx, true, key, 0)
a.NoError(err)
c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{
@@ -1391,10 +1395,10 @@ func TestCowDelKey(t *testing.T) {
},
},
}
- err = c.DelKey(addr, aidx, false, key, 0)
+ err = c.delKey(addr, aidx, false, key, 0)
a.NoError(err)
// ensure other requests go down to roundCowParent
- a.Panics(func() { c.DelKey(ledgertesting.RandomAddress(), aidx, false, key, 0) })
- a.Panics(func() { c.DelKey(addr, aidx+1, false, key, 0) })
+ a.Panics(func() { c.delKey(ledgertesting.RandomAddress(), aidx, false, key, 0) })
+ a.Panics(func() { c.delKey(addr, aidx+1, false, key, 0) })
}
diff --git a/ledger/internal/applications.go b/ledger/internal/applications.go
index 27d306ac9..5671515e4 100644
--- a/ledger/internal/applications.go
+++ b/ledger/internal/applications.go
@@ -21,55 +21,25 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger/apply"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
)
-type logicLedger struct {
- cow cowForLogicLedger
-}
-
-type cowForLogicLedger interface {
- Get(addr basics.Address, withPendingRewards bool) (ledgercore.AccountData, error)
- GetAppParams(addr basics.Address, aidx basics.AppIndex) (basics.AppParams, bool, error)
- GetAssetParams(addr basics.Address, aidx basics.AssetIndex) (basics.AssetParams, bool, error)
- GetAssetHolding(addr basics.Address, aidx basics.AssetIndex) (basics.AssetHolding, bool, error)
- GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error)
- GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error)
- BuildEvalDelta(aidx basics.AppIndex, txn *transactions.Transaction) (transactions.EvalDelta, error)
-
- SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error
- DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error
-
- round() basics.Round
- prevTimestamp() int64
- allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error)
- txnCounter() uint64
- incTxnCount()
-
- // The method should use the txtail to ensure MaxTxnLife+1 headers back are available
- blockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error)
-}
+/* This file adds functions to roundCowState that make it more palatable for use
+ outside of the ledger package. The LedgerForLogic interface expects them. */
-func newLogicLedger(cow cowForLogicLedger) *logicLedger {
- return &logicLedger{
- cow: cow,
- }
-}
-
-func (al *logicLedger) AccountData(addr basics.Address) (ledgercore.AccountData, error) {
- record, err := al.cow.Get(addr, true)
+func (cs *roundCowState) AccountData(addr basics.Address) (ledgercore.AccountData, error) {
+ record, err := cs.Get(addr, true)
if err != nil {
return ledgercore.AccountData{}, err
}
return record, nil
}
-func (al *logicLedger) Authorizer(addr basics.Address) (basics.Address, error) {
- record, err := al.cow.Get(addr, false) // pending rewards unneeded
+func (cs *roundCowState) Authorizer(addr basics.Address) (basics.Address, error) {
+ record, err := cs.Get(addr, false) // pending rewards unneeded
if err != nil {
return basics.Address{}, err
}
@@ -79,25 +49,24 @@ func (al *logicLedger) Authorizer(addr basics.Address) (basics.Address, error) {
return addr, nil
}
-func (al *logicLedger) AssetHolding(addr basics.Address, assetIdx basics.AssetIndex) (basics.AssetHolding, error) {
+func (cs *roundCowState) AssetHolding(addr basics.Address, assetIdx basics.AssetIndex) (basics.AssetHolding, error) {
// Fetch the requested balance record
- holding, ok, err := al.cow.GetAssetHolding(addr, assetIdx)
+ holding, ok, err := cs.GetAssetHolding(addr, assetIdx)
if err != nil {
return basics.AssetHolding{}, err
}
// Ensure we have the requested holding
if !ok {
- err = fmt.Errorf("account %s has not opted in to asset %d", addr.String(), assetIdx)
- return basics.AssetHolding{}, err
+ return basics.AssetHolding{}, fmt.Errorf("account %s has not opted in to asset %d", addr, assetIdx)
}
return holding, nil
}
-func (al *logicLedger) AssetParams(assetIdx basics.AssetIndex) (basics.AssetParams, basics.Address, error) {
+func (cs *roundCowState) AssetParams(assetIdx basics.AssetIndex) (basics.AssetParams, basics.Address, error) {
// Find asset creator
- creator, ok, err := al.cow.GetCreator(basics.CreatableIndex(assetIdx), basics.AssetCreatable)
+ creator, ok, err := cs.GetCreator(basics.CreatableIndex(assetIdx), basics.AssetCreatable)
if err != nil {
return basics.AssetParams{}, creator, err
}
@@ -108,23 +77,22 @@ func (al *logicLedger) AssetParams(assetIdx basics.AssetIndex) (basics.AssetPara
}
// Fetch the requested balance record
- params, ok, err := al.cow.GetAssetParams(creator, assetIdx)
+ params, ok, err := cs.GetAssetParams(creator, assetIdx)
if err != nil {
return basics.AssetParams{}, creator, err
}
// Ensure account created the requested asset
if !ok {
- err = fmt.Errorf("account %s has not created asset %d", creator, assetIdx)
- return basics.AssetParams{}, creator, err
+ return basics.AssetParams{}, creator, fmt.Errorf("account %s has not created asset %d", creator, assetIdx)
}
return params, creator, nil
}
-func (al *logicLedger) AppParams(appIdx basics.AppIndex) (basics.AppParams, basics.Address, error) {
+func (cs *roundCowState) AppParams(appIdx basics.AppIndex) (basics.AppParams, basics.Address, error) {
// Find app creator
- creator, ok, err := al.cow.GetCreator(basics.CreatableIndex(appIdx), basics.AppCreatable)
+ creator, ok, err := cs.GetCreator(basics.CreatableIndex(appIdx), basics.AppCreatable)
if err != nil {
return basics.AppParams{}, creator, err
}
@@ -135,51 +103,42 @@ func (al *logicLedger) AppParams(appIdx basics.AppIndex) (basics.AppParams, basi
}
// Fetch the requested balance record
- params, ok, err := al.cow.GetAppParams(creator, appIdx)
+ params, ok, err := cs.GetAppParams(creator, appIdx)
if err != nil {
return basics.AppParams{}, creator, err
}
// Ensure account created the requested app
if !ok {
- err = fmt.Errorf("account %s has not created app %d", creator, appIdx)
- return basics.AppParams{}, creator, err
+ return basics.AppParams{}, creator, fmt.Errorf("account %s has not created app %d", creator, appIdx)
}
return params, creator, nil
}
-func (al *logicLedger) Round() basics.Round {
- return al.cow.round()
+func (cs *roundCowState) OptedIn(addr basics.Address, appIdx basics.AppIndex) (bool, error) {
+ return cs.allocated(addr, appIdx, false)
}
-func (al *logicLedger) LatestTimestamp() int64 {
- return al.cow.prevTimestamp()
+func (cs *roundCowState) GetLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) (basics.TealValue, bool, error) {
+ return cs.getKey(addr, appIdx, false, key, accountIdx)
}
-func (al *logicLedger) BlockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error) {
- return al.cow.blockHdrCached(round)
+func (cs *roundCowState) SetLocal(addr basics.Address, appIdx basics.AppIndex, key string, value basics.TealValue, accountIdx uint64) error {
+ return cs.setKey(addr, appIdx, false, key, value, accountIdx)
}
-func (al *logicLedger) OptedIn(addr basics.Address, appIdx basics.AppIndex) (bool, error) {
- return al.cow.allocated(addr, appIdx, false)
+func (cs *roundCowState) BlockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error) {
+ return cs.blockHdrCached(round)
}
-func (al *logicLedger) GetLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) (basics.TealValue, bool, error) {
- return al.cow.GetKey(addr, appIdx, false, key, accountIdx)
+func (cs *roundCowState) DelLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) error {
+ return cs.delKey(addr, appIdx, false, key, accountIdx)
}
-func (al *logicLedger) SetLocal(addr basics.Address, appIdx basics.AppIndex, key string, value basics.TealValue, accountIdx uint64) error {
- return al.cow.SetKey(addr, appIdx, false, key, value, accountIdx)
-}
-
-func (al *logicLedger) DelLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) error {
- return al.cow.DelKey(addr, appIdx, false, key, accountIdx)
-}
-
-func (al *logicLedger) fetchAppCreator(appIdx basics.AppIndex) (basics.Address, error) {
+func (cs *roundCowState) fetchAppCreator(appIdx basics.AppIndex) (basics.Address, error) {
// Fetch the application creator
- addr, ok, err := al.cow.GetCreator(basics.CreatableIndex(appIdx), basics.AppCreatable)
+ addr, ok, err := cs.GetCreator(basics.CreatableIndex(appIdx), basics.AppCreatable)
if err != nil {
return basics.Address{}, err
@@ -190,52 +149,158 @@ func (al *logicLedger) fetchAppCreator(appIdx basics.AppIndex) (basics.Address,
return addr, nil
}
-func (al *logicLedger) GetGlobal(appIdx basics.AppIndex, key string) (basics.TealValue, bool, error) {
- addr, err := al.fetchAppCreator(appIdx)
+func (cs *roundCowState) GetGlobal(appIdx basics.AppIndex, key string) (basics.TealValue, bool, error) {
+ creator, err := cs.fetchAppCreator(appIdx)
if err != nil {
return basics.TealValue{}, false, err
}
- return al.cow.GetKey(addr, appIdx, true, key, 0)
+ return cs.getKey(creator, appIdx, true, key, 0)
}
-func (al *logicLedger) SetGlobal(appIdx basics.AppIndex, key string, value basics.TealValue) error {
- creator, err := al.fetchAppCreator(appIdx)
+func (cs *roundCowState) SetGlobal(appIdx basics.AppIndex, key string, value basics.TealValue) error {
+ creator, err := cs.fetchAppCreator(appIdx)
if err != nil {
return err
}
- return al.cow.SetKey(creator, appIdx, true, key, value, 0)
+ return cs.setKey(creator, appIdx, true, key, value, 0)
}
-func (al *logicLedger) DelGlobal(appIdx basics.AppIndex, key string) error {
- creator, err := al.fetchAppCreator(appIdx)
+func (cs *roundCowState) DelGlobal(appIdx basics.AppIndex, key string) error {
+ creator, err := cs.fetchAppCreator(appIdx)
if err != nil {
return err
}
- return al.cow.DelKey(creator, appIdx, true, key, 0)
+ return cs.delKey(creator, appIdx, true, key, 0)
}
-func (al *logicLedger) balances() (apply.Balances, error) {
- balances, ok := al.cow.(apply.Balances)
+func (cs *roundCowState) kvGet(key string) ([]byte, bool, error) {
+ value, ok := cs.mods.KvMods[key]
if !ok {
- return nil, fmt.Errorf("cannot get a Balances object from %v", al)
+ return cs.lookupParent.kvGet(key)
}
- return balances, nil
+ // If value is nil, it's a marker for a local deletion
+ return value.Data, value.Data != nil, nil
}
-func (al *logicLedger) Perform(gi int, ep *logic.EvalParams) error {
- txn := &ep.TxnGroup[gi]
- balances, err := al.balances()
+func (cb *roundCowBase) kvGet(key string) ([]byte, bool, error) {
+ value, ok := cb.kvStore[key]
+ if !ok {
+ v, err := cb.l.LookupKv(cb.rnd, key)
+ if err != nil {
+ return nil, false, err
+ }
+ value = v
+ cb.kvStore[key] = value
+ }
+ // If value is nil, it caches a lookup that returned nothing.
+ return value, value != nil, nil
+}
+
+func (cs *roundCowState) kvPut(key string, value []byte) error {
+ cs.mods.KvMods[key] = ledgercore.KvValueDelta{Data: value}
+ return nil
+}
+
+func (cs *roundCowState) kvDel(key string) error {
+ cs.mods.KvMods[key] = ledgercore.KvValueDelta{Data: nil}
+ return nil
+}
+
+func (cs *roundCowState) NewBox(appIdx basics.AppIndex, key string, value []byte, appAddr basics.Address) error {
+ // Use same limit on key length as for global/local storage
+ if len(key) > cs.proto.MaxAppKeyLen {
+ return fmt.Errorf("name too long: length was %d, maximum is %d", len(key), cs.proto.MaxAppKeyLen)
+ }
+ // This rule is NOT like global/local storage, but seems like it will limit
+ // confusion, since these are standalone entities.
+ if len(key) == 0 {
+ return fmt.Errorf("box names may not be zero length")
+ }
+
+ size := uint64(len(value))
+ if size > cs.proto.MaxBoxSize {
+ return fmt.Errorf("box size too large: %d, maximum is %d", size, cs.proto.MaxBoxSize)
+ }
+
+ fullKey := logic.MakeBoxKey(appIdx, key)
+ _, exists, err := cs.kvGet(fullKey)
if err != nil {
return err
}
+ if exists {
+ return fmt.Errorf("attempt to recreate %s", key)
+ }
+
+ record, err := cs.Get(appAddr, false)
+ if err != nil {
+ return err
+ }
+ record.TotalBoxes = basics.AddSaturate(record.TotalBoxes, 1)
+ record.TotalBoxBytes = basics.AddSaturate(record.TotalBoxBytes, uint64(len(key))+size)
+ err = cs.Put(appAddr, record)
+ if err != nil {
+ return err
+ }
+
+ return cs.kvPut(fullKey, value)
+}
+
+func (cs *roundCowState) GetBox(appIdx basics.AppIndex, key string) ([]byte, bool, error) {
+ fullKey := logic.MakeBoxKey(appIdx, key)
+ return cs.kvGet(fullKey)
+}
+
+func (cs *roundCowState) SetBox(appIdx basics.AppIndex, key string, value []byte) error {
+ fullKey := logic.MakeBoxKey(appIdx, key)
+ old, ok, err := cs.kvGet(fullKey)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return fmt.Errorf("box %s does not exist for %d", key, appIdx)
+ }
+ if len(old) != len(value) {
+ return fmt.Errorf("box %s is wrong size old:%d != new:%d",
+ key, len(old), len(value))
+ }
+ return cs.kvPut(fullKey, value)
+}
+
+func (cs *roundCowState) DelBox(appIdx basics.AppIndex, key string, appAddr basics.Address) (bool, error) {
+ fullKey := logic.MakeBoxKey(appIdx, key)
+
+ value, ok, err := cs.kvGet(fullKey)
+ if err != nil {
+ return false, err
+ }
+ if !ok {
+ return false, nil
+ }
+
+ record, err := cs.Get(appAddr, false)
+ if err != nil {
+ return false, err
+ }
+ record.TotalBoxes = basics.SubSaturate(record.TotalBoxes, 1)
+ record.TotalBoxBytes = basics.SubSaturate(record.TotalBoxBytes, uint64(len(key)+len(value)))
+ err = cs.Put(appAddr, record)
+ if err != nil {
+ return false, err
+ }
+
+ return true, cs.kvDel(fullKey)
+}
+
+func (cs *roundCowState) Perform(gi int, ep *logic.EvalParams) error {
+ txn := &ep.TxnGroup[gi]
// move fee to pool
- err = balances.Move(txn.Txn.Sender, ep.Specials.FeeSink, txn.Txn.Fee, &txn.ApplyData.SenderRewards, nil)
+ err := cs.Move(txn.Txn.Sender, ep.Specials.FeeSink, txn.Txn.Fee, &txn.ApplyData.SenderRewards, nil)
if err != nil {
return err
}
- err = apply.Rekey(balances, &txn.Txn)
+ err = apply.Rekey(cs, &txn.Txn)
if err != nil {
return err
}
@@ -249,29 +314,29 @@ func (al *logicLedger) Perform(gi int, ep *logic.EvalParams) error {
// ahead of processing, we'd have to do ours *after* so that we'd
// use the next id. So either way, this would seem backwards at
// first glance.
- al.cow.incTxnCount()
+ cs.incTxnCount()
switch txn.Txn.Type {
case protocol.PaymentTx:
- err = apply.Payment(txn.Txn.PaymentTxnFields, txn.Txn.Header, balances, *ep.Specials, &txn.ApplyData)
+ err = apply.Payment(txn.Txn.PaymentTxnFields, txn.Txn.Header, cs, *ep.Specials, &txn.ApplyData)
case protocol.KeyRegistrationTx:
- err = apply.Keyreg(txn.Txn.KeyregTxnFields, txn.Txn.Header, balances, *ep.Specials, &txn.ApplyData,
- al.Round())
+ err = apply.Keyreg(txn.Txn.KeyregTxnFields, txn.Txn.Header, cs, *ep.Specials, &txn.ApplyData,
+ cs.Round())
case protocol.AssetConfigTx:
- err = apply.AssetConfig(txn.Txn.AssetConfigTxnFields, txn.Txn.Header, balances, *ep.Specials, &txn.ApplyData,
- al.cow.txnCounter())
+ err = apply.AssetConfig(txn.Txn.AssetConfigTxnFields, txn.Txn.Header, cs, *ep.Specials, &txn.ApplyData,
+ cs.Counter())
case protocol.AssetTransferTx:
- err = apply.AssetTransfer(txn.Txn.AssetTransferTxnFields, txn.Txn.Header, balances, *ep.Specials, &txn.ApplyData)
+ err = apply.AssetTransfer(txn.Txn.AssetTransferTxnFields, txn.Txn.Header, cs, *ep.Specials, &txn.ApplyData)
case protocol.AssetFreezeTx:
- err = apply.AssetFreeze(txn.Txn.AssetFreezeTxnFields, txn.Txn.Header, balances, *ep.Specials, &txn.ApplyData)
+ err = apply.AssetFreeze(txn.Txn.AssetFreezeTxnFields, txn.Txn.Header, cs, *ep.Specials, &txn.ApplyData)
case protocol.ApplicationCallTx:
- err = apply.ApplicationCall(txn.Txn.ApplicationCallTxnFields, txn.Txn.Header, balances, &txn.ApplyData,
- gi, ep, al.cow.txnCounter())
+ err = apply.ApplicationCall(txn.Txn.ApplicationCallTxnFields, txn.Txn.Header, cs, &txn.ApplyData,
+ gi, ep, cs.Counter())
default:
err = fmt.Errorf("%s tx in AVM", txn.Txn.Type)
@@ -287,9 +352,4 @@ func (al *logicLedger) Perform(gi int, ep *logic.EvalParams) error {
// modifiedAccounts().
return nil
-
-}
-
-func (al *logicLedger) Counter() uint64 {
- return al.cow.txnCounter()
}
diff --git a/ledger/internal/applications_test.go b/ledger/internal/applications_test.go
deleted file mode 100644
index ea28712c9..000000000
--- a/ledger/internal/applications_test.go
+++ /dev/null
@@ -1,365 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package internal
-
-import (
- "fmt"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/ledger/ledgercore"
- ledgertesting "github.com/algorand/go-algorand/ledger/testing"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-type creatableLocator struct {
- cidx basics.CreatableIndex
- ctype basics.CreatableType
-}
-type storeLocator struct {
- addr basics.Address
- aidx basics.AppIndex
- global bool
-}
-type mockCowForLogicLedger struct {
- rnd basics.Round
- ts int64
- cr map[creatableLocator]basics.Address
- brs map[basics.Address]basics.AccountData
- stores map[storeLocator]basics.TealKeyValue
- txc uint64
-}
-
-func (c *mockCowForLogicLedger) Get(addr basics.Address, withPendingRewards bool) (ledgercore.AccountData, error) {
- acct, err := c.getAccount(addr, withPendingRewards)
- return ledgercore.ToAccountData(acct), err
-}
-
-func (c *mockCowForLogicLedger) getAccount(addr basics.Address, withPendingRewards bool) (basics.AccountData, error) {
- br, ok := c.brs[addr]
- if !ok {
- return basics.AccountData{}, fmt.Errorf("addr %s not in mock cow", addr.String())
- }
- return br, nil
-}
-
-func (c *mockCowForLogicLedger) MinBalance(addr basics.Address, proto *config.ConsensusParams) (res basics.MicroAlgos, err error) {
- br, ok := c.brs[addr]
- if !ok {
- return basics.MicroAlgos{}, fmt.Errorf("addr %s not in mock cow", addr.String())
- }
- return br.MinBalance(proto), nil
-}
-
-func (c *mockCowForLogicLedger) GetAppParams(addr basics.Address, aidx basics.AppIndex) (ret basics.AppParams, ok bool, err error) {
- acct, err := c.getAccount(addr, false)
- if err != nil {
- return
- }
- ret, ok = acct.AppParams[aidx]
- return
-}
-func (c *mockCowForLogicLedger) GetAssetParams(addr basics.Address, aidx basics.AssetIndex) (ret basics.AssetParams, ok bool, err error) {
- acct, err := c.getAccount(addr, false)
- if err != nil {
- return
- }
- ret, ok = acct.AssetParams[aidx]
- return
-}
-func (c *mockCowForLogicLedger) GetAssetHolding(addr basics.Address, aidx basics.AssetIndex) (ret basics.AssetHolding, ok bool, err error) {
- acct, err := c.getAccount(addr, false)
- if err != nil {
- return
- }
- ret, ok = acct.Assets[aidx]
- return
-}
-
-func (c *mockCowForLogicLedger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
- addr, found := c.cr[creatableLocator{cidx, ctype}]
- return addr, found, nil
-}
-
-func (c *mockCowForLogicLedger) GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) {
- kv, ok := c.stores[storeLocator{addr, aidx, global}]
- if !ok {
- return basics.TealValue{}, false, fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
- }
- tv, found := kv[key]
- return tv, found, nil
-}
-
-func (c *mockCowForLogicLedger) BuildEvalDelta(aidx basics.AppIndex, txn *transactions.Transaction) (evalDelta transactions.EvalDelta, err error) {
- return transactions.EvalDelta{}, nil
-}
-
-func (c *mockCowForLogicLedger) SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error {
- kv, ok := c.stores[storeLocator{addr, aidx, global}]
- if !ok {
- return fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
- }
- kv[key] = value
- c.stores[storeLocator{addr, aidx, global}] = kv
- return nil
-}
-
-func (c *mockCowForLogicLedger) DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error {
- kv, ok := c.stores[storeLocator{addr, aidx, global}]
- if !ok {
- return fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global)
- }
- delete(kv, key)
- c.stores[storeLocator{addr, aidx, global}] = kv
- return nil
-}
-
-func (c *mockCowForLogicLedger) round() basics.Round {
- return c.rnd
-}
-
-func (c *mockCowForLogicLedger) prevTimestamp() int64 {
- return c.ts
-}
-
-func (c *mockCowForLogicLedger) allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error) {
- _, found := c.stores[storeLocator{addr, aidx, global}]
- return found, nil
-}
-
-func (c *mockCowForLogicLedger) txnCounter() uint64 {
- return c.txc
-}
-
-func (c *mockCowForLogicLedger) incTxnCount() {
- c.txc++
-}
-
-// No unit tests care about this yet, so this is a lame implementation
-func (c *mockCowForLogicLedger) blockHdrCached(round basics.Round) (bookkeeping.BlockHeader, error) {
- return bookkeeping.BlockHeader{Round: round}, nil
-}
-
-func newCowMock(creatables []modsData) *mockCowForLogicLedger {
- var m mockCowForLogicLedger
- m.cr = make(map[creatableLocator]basics.Address, len(creatables))
- for _, e := range creatables {
- m.cr[creatableLocator{e.cidx, e.ctype}] = e.addr
- }
- return &m
-}
-
-func TestLogicLedgerMake(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- c := &mockCowForLogicLedger{}
- l := newLogicLedger(c)
- a.NotNil(l)
- a.Equal(c, l.cow)
-}
-
-func TestLogicLedgerBalances(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- c := newCowMock(nil)
- l := newLogicLedger(c)
- a.NotNil(l)
-
- addr1 := ledgertesting.RandomAddress()
- ble := basics.MicroAlgos{Raw: 100}
- c.brs = map[basics.Address]basics.AccountData{addr1: {MicroAlgos: ble}}
- acct, err := l.AccountData(addr1)
- a.NoError(err)
- a.Equal(ble, acct.MicroAlgos)
-}
-
-func TestLogicLedgerGetters(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := ledgertesting.RandomAddress()
- aidx := basics.AppIndex(1)
- c := newCowMock([]modsData{{addr, basics.CreatableIndex(aidx), basics.AppCreatable}})
- l := newLogicLedger(c)
- a.NotNil(l)
-
- round := basics.Round(1234)
- c.rnd = round
- ts := int64(11223344)
- c.ts = ts
-
- addr1 := ledgertesting.RandomAddress()
- c.stores = map[storeLocator]basics.TealKeyValue{{addr1, aidx, false}: {}}
- a.Equal(round, l.Round())
- a.Equal(ts, l.LatestTimestamp())
- a.True(l.OptedIn(addr1, aidx))
- a.False(l.OptedIn(addr, aidx))
-}
-
-func TestLogicLedgerAsset(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := ledgertesting.RandomAddress()
- addr1 := ledgertesting.RandomAddress()
- aidx := basics.AppIndex(1)
- assetIdx := basics.AssetIndex(2)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- {addr1, basics.CreatableIndex(assetIdx), basics.AssetCreatable},
- })
- l := newLogicLedger(c)
- a.NotNil(l)
-
- _, _, err := l.AssetParams(basics.AssetIndex(aidx))
- a.Error(err)
- a.Contains(err.Error(), fmt.Sprintf("asset %d does not exist", aidx))
-
- c.brs = map[basics.Address]basics.AccountData{
- addr1: {AssetParams: map[basics.AssetIndex]basics.AssetParams{assetIdx: {Total: 1000}}},
- }
-
- ap, creator, err := l.AssetParams(assetIdx)
- a.NoError(err)
- a.Equal(addr1, creator)
- a.Equal(uint64(1000), ap.Total)
-
- _, err = l.AssetHolding(addr1, assetIdx)
- a.Error(err)
- a.Contains(err.Error(), "has not opted in to asset")
-
- c.brs = map[basics.Address]basics.AccountData{
- addr1: {
- AssetParams: map[basics.AssetIndex]basics.AssetParams{assetIdx: {Total: 1000}},
- Assets: map[basics.AssetIndex]basics.AssetHolding{assetIdx: {Amount: 99}},
- },
- }
-
- ah, err := l.AssetHolding(addr1, assetIdx)
- a.NoError(err)
- a.Equal(uint64(99), ah.Amount)
-}
-
-func TestLogicLedgerGetKey(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := ledgertesting.RandomAddress()
- addr1 := ledgertesting.RandomAddress()
- aidx := basics.AppIndex(1)
- assetIdx := basics.AssetIndex(2)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- {addr1, basics.CreatableIndex(assetIdx), basics.AssetCreatable},
- })
- l := newLogicLedger(c)
- a.NotNil(l)
-
- _, ok, err := l.GetGlobal(basics.AppIndex(assetIdx), "gkey")
- a.Error(err)
- a.False(ok)
- a.Contains(err.Error(), fmt.Sprintf("app %d does not exist", assetIdx))
-
- tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx + 1, true}: {"gkey": tv}}
- val, ok, err := l.GetGlobal(aidx, "gkey")
- a.Error(err)
- a.False(ok)
- a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
-
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
- val, ok, err = l.GetGlobal(aidx, "gkey")
- a.NoError(err)
- a.True(ok)
- a.Equal(tv, val)
-
- // check local
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, false}: {"lkey": tv}}
- val, ok, err = l.GetLocal(addr, aidx, "lkey", 0)
- a.NoError(err)
- a.True(ok)
- a.Equal(tv, val)
-}
-
-func TestLogicLedgerSetKey(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := ledgertesting.RandomAddress()
- aidx := basics.AppIndex(1)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- })
- l := newLogicLedger(c)
- a.NotNil(l)
-
- tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
- err := l.SetGlobal(aidx, "gkey", tv)
- a.Error(err)
- a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
-
- tv2 := basics.TealValue{Type: basics.TealUintType, Uint: 2}
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
- err = l.SetGlobal(aidx, "gkey", tv2)
- a.NoError(err)
-
- // check local
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, false}: {"lkey": tv}}
- err = l.SetLocal(addr, aidx, "lkey", tv2, 0)
- a.NoError(err)
-}
-
-func TestLogicLedgerDelKey(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- addr := ledgertesting.RandomAddress()
- aidx := basics.AppIndex(1)
- c := newCowMock([]modsData{
- {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
- })
- l := newLogicLedger(c)
- a.NotNil(l)
-
- err := l.DelGlobal(aidx, "gkey")
- a.Error(err)
- a.Contains(err.Error(), fmt.Sprintf("no store for (%s %d %v) in mock cow", addr, aidx, true))
-
- tv := basics.TealValue{Type: basics.TealUintType, Uint: 1}
- c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, true}: {"gkey": tv}}
- err = l.DelGlobal(aidx, "gkey")
- a.NoError(err)
-
- addr1 := ledgertesting.RandomAddress()
- c.stores = map[storeLocator]basics.TealKeyValue{{addr1, aidx, false}: {"lkey": tv}}
- err = l.DelLocal(addr1, aidx, "lkey", 0)
- a.NoError(err)
-}
diff --git a/ledger/internal/cow.go b/ledger/internal/cow.go
index 0baaa9fc6..0d6de6498 100644
--- a/ledger/internal/cow.go
+++ b/ledger/internal/cow.go
@@ -51,7 +51,7 @@ type roundCowParent interface {
lookupAssetHolding(addr basics.Address, aidx basics.AssetIndex, cacheOnly bool) (ledgercore.AssetHoldingDelta, bool, error)
checkDup(basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error
- txnCounter() uint64
+ Counter() uint64
getCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error)
GetStateProofNextRound() basics.Round
BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error)
@@ -62,6 +62,8 @@ type roundCowParent interface {
getStorageLimits(addr basics.Address, aidx basics.AppIndex, global bool) (basics.StateSchema, error)
allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error)
getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error)
+
+ kvGet(key string) ([]byte, bool, error)
}
type roundCowState struct {
@@ -76,7 +78,7 @@ type roundCowState struct {
// storage deltas populated as side effects of AppCall transaction
// 1. Opt-in/Close actions (see Allocate/Deallocate)
- // 2. Stateful TEAL evaluation (see SetKey/DelKey)
+ // 2. Stateful TEAL evaluation (see setKey/delKey)
// must be incorporated into mods.accts before passing deltas forward
sdeltas map[basics.Address]map[storagePtr]*storageDelta
@@ -105,7 +107,7 @@ func makeRoundCowState(b roundCowParent, hdr bookkeeping.BlockHeader, proto conf
// local delta has account index as it specified in TEAL either in set/del key or prior get key calls.
// The predicate is that complex in order to cover all the block seen on testnet and mainnet.
compatibilityMode := (hdr.CurrentProtocol == protocol.ConsensusV24) &&
- (hdr.NextProtocol != protocol.ConsensusV26 || (hdr.UpgradePropose == "" && hdr.UpgradeApprove == false && hdr.Round < hdr.UpgradeState.NextProtocolVoteBefore))
+ (hdr.NextProtocol != protocol.ConsensusV26 || (hdr.UpgradePropose == "" && !hdr.UpgradeApprove && hdr.Round < hdr.UpgradeState.NextProtocolVoteBefore))
if compatibilityMode {
cb.compatibilityMode = true
cb.compatibilityGetKeyCache = make(map[basics.Address]map[storagePtr]uint64)
@@ -126,6 +128,17 @@ func (cb *roundCowState) deltas() ledgercore.StateDelta {
}
}
}
+
+ // Populate old values by looking through parent
+ for key, value := range cb.mods.KvMods {
+ old, _, err := cb.lookupParent.kvGet(key) // Because of how boxes are prefetched, value will be cached
+ if err != nil {
+ panic(fmt.Errorf("Error looking up %v : %w", key, err))
+ }
+ value.OldData = old
+ cb.mods.KvMods[key] = value
+ }
+
return cb.mods
}
@@ -133,11 +146,11 @@ func (cb *roundCowState) rewardsLevel() uint64 {
return cb.mods.Hdr.RewardsLevel
}
-func (cb *roundCowState) round() basics.Round {
+func (cb *roundCowState) Round() basics.Round {
return cb.mods.Hdr.Round
}
-func (cb *roundCowState) prevTimestamp() int64 {
+func (cb *roundCowState) PrevTimestamp() int64 {
return cb.mods.PrevTimestamp
}
@@ -213,8 +226,8 @@ func (cb *roundCowState) checkDup(firstValid, lastValid basics.Round, txid trans
return cb.lookupParent.checkDup(firstValid, lastValid, txid, txl)
}
-func (cb *roundCowState) txnCounter() uint64 {
- return cb.lookupParent.txnCounter() + cb.txnCount
+func (cb *roundCowState) Counter() uint64 {
+ return cb.lookupParent.Counter() + cb.txnCount
}
func (cb *roundCowState) GetStateProofNextRound() basics.Round {
@@ -294,6 +307,10 @@ func (cb *roundCowState) commitToParent() {
}
}
cb.commitParent.mods.StateProofNext = cb.mods.StateProofNext
+
+ for key, value := range cb.mods.KvMods {
+ cb.commitParent.mods.KvMods[key] = value
+ }
}
func (cb *roundCowState) modifiedAccounts() []basics.Address {
diff --git a/ledger/internal/cow_test.go b/ledger/internal/cow_test.go
index 32e6a36e4..bd942d63e 100644
--- a/ledger/internal/cow_test.go
+++ b/ledger/internal/cow_test.go
@@ -85,7 +85,11 @@ func (ml *mockLedger) getKey(addr basics.Address, aidx basics.AppIndex, global b
return basics.TealValue{}, false, nil
}
-func (ml *mockLedger) txnCounter() uint64 {
+func (ml *mockLedger) kvGet(key string) ([]byte, bool, error) {
+ return nil, false, nil
+}
+
+func (ml *mockLedger) Counter() uint64 {
return 0
}
diff --git a/ledger/internal/eval.go b/ledger/internal/eval.go
index f2750d8a0..a149c7d01 100644
--- a/ledger/internal/eval.go
+++ b/ledger/internal/eval.go
@@ -45,6 +45,7 @@ type LedgerForCowBase interface {
LookupWithoutRewards(basics.Round, basics.Address) (ledgercore.AccountData, basics.Round, error)
LookupAsset(basics.Round, basics.Address, basics.AssetIndex) (ledgercore.AssetResource, error)
LookupApplication(basics.Round, basics.Address, basics.AppIndex) (ledgercore.AppResource, error)
+ LookupKv(basics.Round, string) ([]byte, error)
GetCreatorForRound(basics.Round, basics.CreatableIndex, basics.CreatableType) (basics.Address, bool, error)
}
@@ -132,6 +133,9 @@ type roundCowBase struct {
// Similar cache for asset/app creators.
creators map[creatable]foundAddress
+
+ // Similar cache for kv entries. A nil entry means ledger has no such pair
+ kvStore map[string][]byte
}
func makeRoundCowBase(l LedgerForCowBase, rnd basics.Round, txnCount uint64, stateProofNextRnd basics.Round, proto config.ConsensusParams) *roundCowBase {
@@ -147,6 +151,7 @@ func makeRoundCowBase(l LedgerForCowBase, rnd basics.Round, txnCount uint64, sta
appLocalStates: make(map[ledgercore.AccountApp]cachedAppLocalState),
assets: make(map[ledgercore.AccountAsset]cachedAssetHolding),
creators: make(map[creatable]foundAddress),
+ kvStore: make(map[string][]byte),
}
}
@@ -320,7 +325,7 @@ func (x *roundCowBase) checkDup(firstValid, lastValid basics.Round, txid transac
return x.l.CheckDup(x.proto, x.rnd+1, firstValid, lastValid, txid, txl)
}
-func (x *roundCowBase) txnCounter() uint64 {
+func (x *roundCowBase) Counter() uint64 {
return x.txnCount
}
@@ -832,11 +837,9 @@ func (eval *BlockEvaluator) TestTransactionGroup(txgroup []transactions.SignedTx
return fmt.Errorf("group size %d exceeds maximum %d", len(txgroup), eval.proto.MaxTxGroupSize)
}
- cow := eval.state.child(len(txgroup))
-
var group transactions.TxGroup
for gi, txn := range txgroup {
- err := eval.TestTransaction(txn, cow)
+ err := eval.TestTransaction(txn)
if err != nil {
return err
}
@@ -871,7 +874,7 @@ func (eval *BlockEvaluator) TestTransactionGroup(txgroup []transactions.SignedTx
// TestTransaction performs basic duplicate detection and well-formedness checks
// on a single transaction, but does not actually add the transaction to the block
// evaluator, or modify the block evaluator state in any other visible way.
-func (eval *BlockEvaluator) TestTransaction(txn transactions.SignedTxn, cow *roundCowState) error {
+func (eval *BlockEvaluator) TestTransaction(txn transactions.SignedTxn) error {
// Transaction valid (not expired)?
err := txn.Txn.Alive(eval.block)
if err != nil {
@@ -885,7 +888,7 @@ func (eval *BlockEvaluator) TestTransaction(txn transactions.SignedTxn, cow *rou
// Transaction already in the ledger?
txid := txn.ID()
- err = cow.checkDup(txn.Txn.First(), txn.Txn.Last(), txid, ledgercore.Txlease{Sender: txn.Txn.Sender, Lease: txn.Txn.Lease})
+ err = eval.state.checkDup(txn.Txn.First(), txn.Txn.Last(), txid, ledgercore.Txlease{Sender: txn.Txn.Sender, Lease: txn.Txn.Lease})
if err != nil {
return err
}
@@ -1062,7 +1065,7 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams *
}
// Apply the transaction, updating the cow balances
- applyData, err := eval.applyTransaction(txn.Txn, cow, evalParams, gi, cow.txnCounter())
+ applyData, err := eval.applyTransaction(txn.Txn, cow, evalParams, gi, cow.Counter())
if err != nil {
return fmt.Errorf("transaction %v: %w", txid, err)
}
@@ -1125,7 +1128,7 @@ func (eval *BlockEvaluator) applyTransaction(tx transactions.Transaction, cow *r
err = apply.Payment(tx.PaymentTxnFields, tx.Header, cow, eval.specials, &ad)
case protocol.KeyRegistrationTx:
- err = apply.Keyreg(tx.KeyregTxnFields, tx.Header, cow, eval.specials, &ad, cow.round())
+ err = apply.Keyreg(tx.KeyregTxnFields, tx.Header, cow, eval.specials, &ad, cow.Round())
case protocol.AssetConfigTx:
err = apply.AssetConfig(tx.AssetConfigTxnFields, tx.Header, cow, eval.specials, &ad, ctr)
@@ -1199,7 +1202,7 @@ func (eval *BlockEvaluator) stateProofVotersAndTotal() (root crypto.GenericDiges
// TestingTxnCounter - the method returns the current evaluator transaction counter. The method is used for testing purposes only.
func (eval *BlockEvaluator) TestingTxnCounter() uint64 {
- return eval.state.txnCounter()
+ return eval.state.Counter()
}
// Call "endOfBlock" after all the block's rewards and transactions are processed.
@@ -1212,7 +1215,7 @@ func (eval *BlockEvaluator) endOfBlock() error {
}
if eval.proto.TxnCounter {
- eval.block.TxnCounter = eval.state.txnCounter()
+ eval.block.TxnCounter = eval.state.Counter()
} else {
eval.block.TxnCounter = 0
}
@@ -1255,7 +1258,7 @@ func (eval *BlockEvaluator) endOfBlock() error {
var expectedTxnCount uint64
if eval.proto.TxnCounter {
- expectedTxnCount = eval.state.txnCounter()
+ expectedTxnCount = eval.state.Counter()
}
if eval.block.TxnCounter != expectedTxnCount {
return fmt.Errorf("txn count wrong: %d != %d", eval.block.TxnCounter, expectedTxnCount)
@@ -1443,6 +1446,12 @@ func (eval *BlockEvaluator) GenerateBlock() (*ledgercore.ValidatedBlock, error)
return &vb, nil
}
+// SetGenerateForTesting is exported so that a ledger being used for testing can
+// force a block evalator to create a block and compare it to another.
+func (eval *BlockEvaluator) SetGenerateForTesting(g bool) {
+ eval.generate = g
+}
+
type evalTxValidator struct {
txcache verify.VerifiedTransactionCache
block bookkeeping.Block
@@ -1549,45 +1558,57 @@ transactionGroupLoop:
if !ok {
break transactionGroupLoop
} else if txgroup.Err != nil {
- return ledgercore.StateDelta{}, txgroup.Err
- }
-
- for _, br := range txgroup.Accounts {
- if _, have := base.accounts[*br.Address]; !have {
- base.accounts[*br.Address] = *br.Data
- }
+ logging.Base().Errorf("eval prefetcher error: %v", txgroup.Err)
}
- for _, lr := range txgroup.Resources {
- if lr.Address == nil {
- // we attempted to look for the creator, and failed.
- base.creators[creatable{cindex: lr.CreatableIndex, ctype: lr.CreatableType}] =
- foundAddress{exists: false}
- continue
- }
- if lr.CreatableType == basics.AssetCreatable {
- if lr.Resource.AssetHolding != nil {
- base.assets[ledgercore.AccountAsset{Address: *lr.Address, Asset: basics.AssetIndex(lr.CreatableIndex)}] = cachedAssetHolding{value: *lr.Resource.AssetHolding, exists: true}
- } else {
- base.assets[ledgercore.AccountAsset{Address: *lr.Address, Asset: basics.AssetIndex(lr.CreatableIndex)}] = cachedAssetHolding{exists: false}
- }
- if lr.Resource.AssetParams != nil {
- base.assetParams[ledgercore.AccountAsset{Address: *lr.Address, Asset: basics.AssetIndex(lr.CreatableIndex)}] = cachedAssetParams{value: *lr.Resource.AssetParams, exists: true}
- base.creators[creatable{cindex: lr.CreatableIndex, ctype: basics.AssetCreatable}] = foundAddress{address: *lr.Address, exists: true}
- } else {
- base.assetParams[ledgercore.AccountAsset{Address: *lr.Address, Asset: basics.AssetIndex(lr.CreatableIndex)}] = cachedAssetParams{exists: false}
+ if txgroup.Err == nil {
+ for _, br := range txgroup.Accounts {
+ if _, have := base.accounts[*br.Address]; !have {
+ base.accounts[*br.Address] = *br.Data
}
- } else {
- if lr.Resource.AppLocalState != nil {
- base.appLocalStates[ledgercore.AccountApp{Address: *lr.Address, App: basics.AppIndex(lr.CreatableIndex)}] = cachedAppLocalState{value: *lr.Resource.AppLocalState, exists: true}
- } else {
- base.appLocalStates[ledgercore.AccountApp{Address: *lr.Address, App: basics.AppIndex(lr.CreatableIndex)}] = cachedAppLocalState{exists: false}
+ }
+ for _, lr := range txgroup.Resources {
+ if lr.Address == nil {
+ // we attempted to look for the creator, and failed.
+ creatableKey := creatable{cindex: lr.CreatableIndex, ctype: lr.CreatableType}
+ base.creators[creatableKey] = foundAddress{exists: false}
+ continue
}
- if lr.Resource.AppParams != nil {
- base.appParams[ledgercore.AccountApp{Address: *lr.Address, App: basics.AppIndex(lr.CreatableIndex)}] = cachedAppParams{value: *lr.Resource.AppParams, exists: true}
- base.creators[creatable{cindex: lr.CreatableIndex, ctype: basics.AppCreatable}] = foundAddress{address: *lr.Address, exists: true}
+ if lr.CreatableType == basics.AssetCreatable {
+ assetKey := ledgercore.AccountAsset{
+ Address: *lr.Address,
+ Asset: basics.AssetIndex(lr.CreatableIndex),
+ }
+
+ if lr.Resource.AssetHolding != nil {
+ base.assets[assetKey] = cachedAssetHolding{value: *lr.Resource.AssetHolding, exists: true}
+ } else {
+ base.assets[assetKey] = cachedAssetHolding{exists: false}
+ }
+ if lr.Resource.AssetParams != nil {
+ creatableKey := creatable{cindex: lr.CreatableIndex, ctype: basics.AssetCreatable}
+ base.assetParams[assetKey] = cachedAssetParams{value: *lr.Resource.AssetParams, exists: true}
+ base.creators[creatableKey] = foundAddress{address: *lr.Address, exists: true}
+ } else {
+ base.assetParams[assetKey] = cachedAssetParams{exists: false}
+ }
} else {
- base.appParams[ledgercore.AccountApp{Address: *lr.Address, App: basics.AppIndex(lr.CreatableIndex)}] = cachedAppParams{exists: false}
+ appKey := ledgercore.AccountApp{
+ Address: *lr.Address,
+ App: basics.AppIndex(lr.CreatableIndex),
+ }
+ if lr.Resource.AppLocalState != nil {
+ base.appLocalStates[appKey] = cachedAppLocalState{value: *lr.Resource.AppLocalState, exists: true}
+ } else {
+ base.appLocalStates[appKey] = cachedAppLocalState{exists: false}
+ }
+ if lr.Resource.AppParams != nil {
+ creatableKey := creatable{cindex: lr.CreatableIndex, ctype: basics.AppCreatable}
+ base.appParams[appKey] = cachedAppParams{value: *lr.Resource.AppParams, exists: true}
+ base.creators[creatableKey] = foundAddress{address: *lr.Address, exists: true}
+ } else {
+ base.appParams[appKey] = cachedAppParams{exists: false}
+ }
}
}
}
diff --git a/ledger/internal/eval_blackbox_test.go b/ledger/internal/eval_blackbox_test.go
deleted file mode 100644
index b6eb6b9c1..000000000
--- a/ledger/internal/eval_blackbox_test.go
+++ /dev/null
@@ -1,1256 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package internal_test
-
-import (
- "context"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/agreement"
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/data/txntest"
- "github.com/algorand/go-algorand/ledger"
- "github.com/algorand/go-algorand/ledger/internal"
- "github.com/algorand/go-algorand/ledger/ledgercore"
- ledgertesting "github.com/algorand/go-algorand/ledger/testing"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/algorand/go-algorand/util/execpool"
-)
-
-var minFee basics.MicroAlgos
-
-func init() {
- params := config.Consensus[protocol.ConsensusCurrentVersion]
- minFee = basics.MicroAlgos{Raw: params.MinTxnFee}
-}
-
-func TestBlockEvaluator(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genesisInitState, addrs, keys := ledgertesting.Genesis(10)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
-
- genesisBlockHeader, err := l.BlockHdr(basics.Round(0))
- require.NoError(t, err)
- newBlock := bookkeeping.MakeBlock(genesisBlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
- require.NoError(t, err)
-
- genHash := l.GenesisHash()
- txn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: addrs[0],
- Fee: minFee,
- FirstValid: newBlock.Round(),
- LastValid: newBlock.Round(),
- GenesisHash: genHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: addrs[1],
- Amount: basics.MicroAlgos{Raw: 100},
- },
- }
-
- // Correct signature should work
- st := txn.Sign(keys[0])
- err = eval.Transaction(st, transactions.ApplyData{})
- require.NoError(t, err)
-
- // Broken signature should fail
- stbad := st
- st.Sig[2] ^= 8
- txgroup := []transactions.SignedTxn{stbad}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
-
- // Repeat should fail
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- err = eval.Transaction(st, transactions.ApplyData{})
- require.Error(t, err)
-
- // out of range should fail
- btxn := txn
- btxn.FirstValid++
- btxn.LastValid += 2
- st = btxn.Sign(keys[0])
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- err = eval.Transaction(st, transactions.ApplyData{})
- require.Error(t, err)
-
- // bogus group should fail
- btxn = txn
- btxn.Group[1] = 1
- st = btxn.Sign(keys[0])
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- err = eval.Transaction(st, transactions.ApplyData{})
- require.Error(t, err)
-
- // mixed fields should fail
- btxn = txn
- btxn.XferAsset = 3
- st = btxn.Sign(keys[0])
- txgroup = []transactions.SignedTxn{st}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- // We don't test eval.Transaction() here because it doesn't check txn.WellFormed(), instead relying on that to have already been checked by the transaction pool.
- // err = eval.Transaction(st, transactions.ApplyData{})
- // require.Error(t, err)
-
- selfTxn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: addrs[2],
- Fee: minFee,
- FirstValid: newBlock.Round(),
- LastValid: newBlock.Round(),
- GenesisHash: genHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: addrs[2],
- Amount: basics.MicroAlgos{Raw: 100},
- },
- }
- stxn := selfTxn.Sign(keys[2])
-
- // TestTransactionGroup() and Transaction() should have the same outcome, but work slightly different code paths.
- txgroup = []transactions.SignedTxn{stxn}
- err = eval.TestTransactionGroup(txgroup)
- require.NoError(t, err)
-
- err = eval.Transaction(stxn, transactions.ApplyData{})
- require.NoError(t, err)
-
- t3 := txn
- t3.Amount.Raw++
- t4 := selfTxn
- t4.Amount.Raw++
-
- // a group without .Group should fail
- s3 := t3.Sign(keys[0])
- s4 := t4.Sign(keys[2])
- txgroup = []transactions.SignedTxn{s3, s4}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- txgroupad := transactions.WrapSignedTxnsWithAD(txgroup)
- err = eval.TransactionGroup(txgroupad)
- require.Error(t, err)
-
- // Test a group that should work
- var group transactions.TxGroup
- group.TxGroupHashes = []crypto.Digest{crypto.HashObj(t3), crypto.HashObj(t4)}
- t3.Group = crypto.HashObj(group)
- t4.Group = t3.Group
- s3 = t3.Sign(keys[0])
- s4 = t4.Sign(keys[2])
- txgroup = []transactions.SignedTxn{s3, s4}
- err = eval.TestTransactionGroup(txgroup)
- require.NoError(t, err)
-
- // disagreement on Group id should fail
- t4bad := t4
- t4bad.Group[3] ^= 3
- s4bad := t4bad.Sign(keys[2])
- txgroup = []transactions.SignedTxn{s3, s4bad}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
- txgroupad = transactions.WrapSignedTxnsWithAD(txgroup)
- err = eval.TransactionGroup(txgroupad)
- require.Error(t, err)
-
- // missing part of the group should fail
- txgroup = []transactions.SignedTxn{s3}
- err = eval.TestTransactionGroup(txgroup)
- require.Error(t, err)
-
- validatedBlock, err := eval.GenerateBlock()
- require.NoError(t, err)
-
- accts := genesisInitState.Accounts
- bal0 := accts[addrs[0]]
- bal1 := accts[addrs[1]]
- bal2 := accts[addrs[2]]
-
- l.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
-
- bal0new, _, _, err := l.LookupAccount(newBlock.Round(), addrs[0])
- require.NoError(t, err)
- bal1new, _, _, err := l.LookupAccount(newBlock.Round(), addrs[1])
- require.NoError(t, err)
- bal2new, _, _, err := l.LookupAccount(newBlock.Round(), addrs[2])
- require.NoError(t, err)
-
- require.Equal(t, bal0new.MicroAlgos.Raw, bal0.MicroAlgos.Raw-minFee.Raw-100)
- require.Equal(t, bal1new.MicroAlgos.Raw, bal1.MicroAlgos.Raw+100)
- require.Equal(t, bal2new.MicroAlgos.Raw, bal2.MicroAlgos.Raw-minFee.Raw)
-}
-
-func TestRekeying(t *testing.T) {
- partitiontest.PartitionTest(t)
- // t.Parallel() NO! This test manipulates []protocol.Consensus
-
- // Pretend rekeying is supported
- actual := config.Consensus[protocol.ConsensusCurrentVersion]
- pretend := actual
- pretend.SupportRekeying = true
- config.Consensus[protocol.ConsensusCurrentVersion] = pretend
- defer func() {
- config.Consensus[protocol.ConsensusCurrentVersion] = actual
- }()
-
- // Bring up a ledger
- genesisInitState, addrs, keys := ledgertesting.Genesis(10)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
-
- // Make a new block
- nextRound := l.Latest() + basics.Round(1)
- genHash := l.GenesisHash()
-
- // Test plan
- // Syntax: [A -> B][C, D] means transaction from A that rekeys to B with authaddr C and actual sig from D
- makeTxn := func(sender, rekeyto, authaddr basics.Address, signer *crypto.SignatureSecrets, uniq uint8) transactions.SignedTxn {
- txn := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: sender,
- Fee: minFee,
- FirstValid: nextRound,
- LastValid: nextRound,
- GenesisHash: genHash,
- RekeyTo: rekeyto,
- Note: []byte{uniq},
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: sender,
- },
- }
- sig := signer.Sign(txn)
- return transactions.SignedTxn{Txn: txn, Sig: sig, AuthAddr: authaddr}
- }
-
- tryBlock := func(stxns []transactions.SignedTxn) error {
- // We'll make a block using the evaluator.
- // When generating a block, the evaluator doesn't check transaction sigs -- it assumes the transaction pool already did that.
- // So the ValidatedBlock that comes out isn't necessarily actually a valid block. We'll call Validate ourselves.
- genesisHdr, err := l.BlockHdr(basics.Round(0))
- require.NoError(t, err)
- newBlock := bookkeeping.MakeBlock(genesisHdr)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
- require.NoError(t, err)
-
- for _, stxn := range stxns {
- err = eval.Transaction(stxn, transactions.ApplyData{})
- if err != nil {
- return err
- }
- }
- validatedBlock, err := eval.GenerateBlock()
- if err != nil {
- return err
- }
-
- backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
- defer backlogPool.Shutdown()
- _, err = l.Validate(context.Background(), validatedBlock.Block(), backlogPool)
- return err
- }
-
- // Preamble transactions, which all of the blocks in this test will start with
- // [A -> 0][0,A] (normal transaction)
- // [A -> B][0,A] (rekey)
- txn0 := makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 0) // Normal transaction
- txn1 := makeTxn(addrs[0], addrs[1], basics.Address{}, keys[0], 1) // Rekey transaction
-
- // Test 1: Do only good things
- // (preamble)
- // [A -> 0][B,B] (normal transaction using new key)
- // [A -> A][B,B] (rekey back to A, transaction still signed by B)
- // [A -> 0][0,A] (normal transaction again)
- test1txns := []transactions.SignedTxn{
- txn0, txn1, // (preamble)
- makeTxn(addrs[0], basics.Address{}, addrs[1], keys[1], 2), // [A -> 0][B,B]
- makeTxn(addrs[0], addrs[0], addrs[1], keys[1], 3), // [A -> A][B,B]
- makeTxn(addrs[0], basics.Address{}, basics.Address{}, keys[0], 4), // [A -> 0][0,A]
- }
- err = tryBlock(test1txns)
- require.NoError(t, err)
-
- // Test 2: Use old key after rekeying
- // (preamble)
- // [A -> A][0,A] (rekey back to A, but signed by A instead of B)
- test2txns := []transactions.SignedTxn{
- txn0, txn1, // (preamble)
- makeTxn(addrs[0], addrs[0], basics.Address{}, keys[0], 2), // [A -> A][0,A]
- }
- err = tryBlock(test2txns)
- require.Error(t, err)
-
- // TODO: More tests
-}
-
-// TestEvalAppState ensures txns in a group can't violate app state schema
-// limits the test ensures that commitToParent -> applyChild copies child's cow
-// state usage counts into parent and the usage counts correctly propagated from
-// parent cow to child cow and back. When limits are not violated, the test
-// ensures that the updates are correct.
-func TestEvalAppState(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // v24 = apps
- testConsensusRange(t, 24, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- appcall1 := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: addrs[0],
- GlobalStateSchema: basics.StateSchema{NumByteSlice: 1},
- ApprovalProgram: `#pragma version 2
- txn ApplicationID
- bz create
- byte "caller"
- txn Sender
- app_global_put
- b ok
-create:
- byte "creator"
- txn Sender
- app_global_put
-ok:
- int 1`,
- ClearStateProgram: "#pragma version 2\nint 1",
- }
-
- appcall2 := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: addrs[0],
- ApplicationID: 1,
- }
-
- dl.beginBlock()
- dl.txgroup("store bytes count 2 exceeds schema bytes count 1", &appcall1, &appcall2)
-
- appcall1.GlobalStateSchema = basics.StateSchema{NumByteSlice: 2}
- dl.txgroup("", &appcall1, &appcall2)
- vb := dl.endBlock()
- deltas := vb.Delta()
-
- params, ok := deltas.Accts.GetAppParams(addrs[0], 1)
- require.True(t, ok)
- state := params.Params.GlobalState
- require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addrs[0][:])}, state["caller"])
- require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addrs[0][:])}, state["creator"])
- })
-}
-
-// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
-func nextBlock(t testing.TB, ledger *ledger.Ledger) *internal.BlockEvaluator {
- rnd := ledger.Latest()
- hdr, err := ledger.BlockHdr(rnd)
- require.NoError(t, err)
-
- nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
- nextHdr.TimeStamp = hdr.TimeStamp + 1 // ensure deterministic tests
- eval, err := internal.StartEvaluator(ledger, nextHdr, internal.EvaluatorOptions{
- Generate: true,
- Validate: true, // Do the complete checks that a new txn would be subject to
- })
- require.NoError(t, err)
- return eval
-}
-
-func fillDefaults(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn) {
- if txn.GenesisHash.IsZero() && ledger.GenesisProto().SupportGenesisHash {
- txn.GenesisHash = ledger.GenesisHash()
- }
- if txn.FirstValid == 0 {
- txn.FirstValid = eval.Round()
- }
-
- txn.FillDefaults(ledger.GenesisProto())
-}
-
-func txns(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) {
- t.Helper()
- for _, txn1 := range txns {
- txn(t, ledger, eval, txn1)
- }
-}
-
-func txn(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn, problem ...string) {
- t.Helper()
- fillDefaults(t, ledger, eval, txn)
- err := eval.Transaction(txn.SignedTxn(), transactions.ApplyData{})
- if err != nil {
- if len(problem) == 1 && problem[0] != "" {
- require.Contains(t, err.Error(), problem[0])
- } else {
- require.NoError(t, err) // Will obviously fail
- }
- return
- }
- require.True(t, len(problem) == 0 || problem[0] == "")
-}
-
-func txgroup(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) error {
- t.Helper()
- for _, txn := range txns {
- fillDefaults(t, ledger, eval, txn)
- }
- txgroup := txntest.SignedTxns(txns...)
-
- return eval.TransactionGroup(transactions.WrapSignedTxnsWithAD(txgroup))
-}
-
-func testEvalAppPoolingGroup(t *testing.T, schema basics.StateSchema, approvalProgram string, consensusVersion protocol.ConsensusVersion) error {
- genesisInitState, addrs, _ := ledgertesting.GenesisWithProto(10, consensusVersion)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
-
- eval := nextBlock(t, l)
-
- appcall1 := txntest.Txn{
- Sender: addrs[0],
- Type: protocol.ApplicationCallTx,
- GlobalStateSchema: schema,
- ApprovalProgram: approvalProgram,
- }
-
- appcall2 := txntest.Txn{
- Sender: addrs[0],
- Type: protocol.ApplicationCallTx,
- ApplicationID: basics.AppIndex(1),
- }
-
- appcall3 := txntest.Txn{
- Sender: addrs[1],
- Type: protocol.ApplicationCallTx,
- ApplicationID: basics.AppIndex(1),
- }
-
- return txgroup(t, l, eval, &appcall1, &appcall2, &appcall3)
-}
-
-// TestEvalAppPooledBudgetWithTxnGroup ensures 3 app call txns can successfully pool
-// budgets in a group txn and return an error if the budget is exceeded
-func TestEvalAppPooledBudgetWithTxnGroup(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- source := func(n int, m int) string {
- return "#pragma version 4\nbyte 0x1337BEEF\n" + strings.Repeat("keccak256\n", n) +
- strings.Repeat("substring 0 4\n", m) + "pop\nint 1\n"
- }
-
- params := []protocol.ConsensusVersion{
- protocol.ConsensusV29,
- protocol.ConsensusFuture,
- }
-
- cases := []struct {
- prog string
- isSuccessV29 bool
- isSuccessVFuture bool
- expectedErrorV29 string
- expectedErrorVFuture string
- }{
- {source(5, 47), true, true,
- "",
- ""},
- {source(5, 48), false, true,
- "pc=157 dynamic cost budget exceeded, executing pushint",
- ""},
- {source(16, 17), false, true,
- "pc= 12 dynamic cost budget exceeded, executing keccak256",
- ""},
- {source(16, 18), false, false,
- "pc= 12 dynamic cost budget exceeded, executing keccak256",
- "pc= 78 dynamic cost budget exceeded, executing pushint"},
- }
-
- for i, param := range params {
- for j, testCase := range cases {
- t.Run(fmt.Sprintf("i=%d,j=%d", i, j), func(t *testing.T) {
- err := testEvalAppPoolingGroup(t, basics.StateSchema{NumByteSlice: 3}, testCase.prog, param)
- if !testCase.isSuccessV29 && reflect.DeepEqual(param, protocol.ConsensusV29) {
- require.Error(t, err)
- require.Contains(t, err.Error(), testCase.expectedErrorV29)
- } else if !testCase.isSuccessVFuture && reflect.DeepEqual(param, protocol.ConsensusFuture) {
- require.Error(t, err)
- require.Contains(t, err.Error(), testCase.expectedErrorVFuture)
- }
- })
- }
- }
-}
-
-// endBlock completes the block being created, returns the ValidatedBlock for inspection
-func endBlock(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator) *ledgercore.ValidatedBlock {
- validatedBlock, err := eval.GenerateBlock()
- require.NoError(t, err)
- err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
- require.NoError(t, err)
- // `rndBQ` gives the latest known block round added to the ledger
- // we should wait until `rndBQ` block to be committed to blockQueue,
- // in case there is a data race, noted in
- // https://github.com/algorand/go-algorand/issues/4349
- // where writing to `callTxnGroup` after `dl.fullBlock` caused data race,
- // because the underlying async goroutine `go bq.syncer()` is reading `callTxnGroup`.
- // A solution here would be wait until all new added blocks are committed,
- // then we return the result and continue the execution.
- rndBQ := ledger.Latest()
- ledger.WaitForCommit(rndBQ)
- return validatedBlock
-}
-
-// lookup gets the current accountdata for an address
-func lookup(t testing.TB, ledger *ledger.Ledger, addr basics.Address) basics.AccountData {
- ad, _, _, err := ledger.LookupLatest(addr)
- require.NoError(t, err)
- return ad
-}
-
-// micros gets the current microAlgo balance for an address
-func micros(t testing.TB, ledger *ledger.Ledger, addr basics.Address) uint64 {
- return lookup(t, ledger, addr).MicroAlgos.Raw
-}
-
-// holding gets the current balance and optin status for some asa for an address
-func holding(t testing.TB, ledger *ledger.Ledger, addr basics.Address, asset basics.AssetIndex) (uint64, bool) {
- if holding, ok := lookup(t, ledger, addr).Assets[asset]; ok {
- return holding.Amount, true
- }
- return 0, false
-}
-
-// asaParams gets the asset params for a given asa index
-func asaParams(t testing.TB, ledger *ledger.Ledger, asset basics.AssetIndex) (basics.AssetParams, error) {
- creator, ok, err := ledger.GetCreator(basics.CreatableIndex(asset), basics.AssetCreatable)
- if err != nil {
- return basics.AssetParams{}, err
- }
- if !ok {
- return basics.AssetParams{}, fmt.Errorf("no asset (%d)", asset)
- }
- if params, ok := lookup(t, ledger, creator).AssetParams[asset]; ok {
- return params, nil
- }
- return basics.AssetParams{}, fmt.Errorf("bad lookup (%d)", asset)
-}
-
-func TestGarbageClearState(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // v24 = apps
- testConsensusRange(t, 24, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- createTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: "int 1",
- ClearStateProgram: []byte{},
- }
-
- dl.txn(&createTxn, "invalid program (empty)")
-
- createTxn.ClearStateProgram = []byte{0xfe} // bad uvarint
- dl.txn(&createTxn, "invalid version")
- })
-}
-
-func TestRewardsInAD(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // v15 put rewards into ApplyData
- testConsensusRange(t, 11, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- payTxn := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[1]}
- nonpartTxn := txntest.Txn{Type: protocol.KeyRegistrationTx, Sender: addrs[2], Nonparticipation: true}
- payNonPart := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[2]}
-
- if ver < 18 { // Nonpart reyreg happens in v18
- dl.txn(&nonpartTxn, "tries to mark an account as nonparticipating")
- } else {
- dl.fullBlock(&nonpartTxn)
- }
-
- // Build up Residue in RewardsState so it's ready to pay
- for i := 1; i < 10; i++ {
- dl.fullBlock()
- }
-
- vb := dl.fullBlock(&payTxn, &payNonPart)
- payInBlock := vb.Block().Payset[0]
- nonPartInBlock := vb.Block().Payset[1]
- if ver >= 15 {
- require.Greater(t, payInBlock.ApplyData.SenderRewards.Raw, uint64(1000))
- require.Greater(t, payInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
- require.Equal(t, payInBlock.ApplyData.SenderRewards, payInBlock.ApplyData.ReceiverRewards)
- // Sender is not due for more, and Receiver is nonpart
- require.Zero(t, nonPartInBlock.ApplyData.SenderRewards)
- if ver < 18 {
- require.Greater(t, nonPartInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
- } else {
- require.Zero(t, nonPartInBlock.ApplyData.ReceiverRewards)
- }
- } else {
- require.Zero(t, payInBlock.ApplyData.SenderRewards)
- require.Zero(t, payInBlock.ApplyData.ReceiverRewards)
- require.Zero(t, nonPartInBlock.ApplyData.SenderRewards)
- require.Zero(t, nonPartInBlock.ApplyData.ReceiverRewards)
- }
- })
-}
-
-func TestMinBalanceChanges(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genesisInitState, addrs, _ := ledgertesting.Genesis(10)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
-
- createTxn := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- AssetParams: basics.AssetParams{
- Total: 3,
- Manager: addrs[1],
- Reserve: addrs[2],
- Freeze: addrs[3],
- Clawback: addrs[4],
- },
- }
-
- const expectedID basics.AssetIndex = 1
- optInTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[5],
- XferAsset: expectedID,
- AssetReceiver: addrs[5],
- }
-
- ad0init, _, _, err := l.LookupLatest(addrs[0])
- require.NoError(t, err)
- ad5init, _, _, err := l.LookupLatest(addrs[5])
- require.NoError(t, err)
-
- eval := nextBlock(t, l)
- txns(t, l, eval, &createTxn, &optInTxn)
- endBlock(t, l, eval)
-
- ad0new, _, _, err := l.LookupLatest(addrs[0])
- require.NoError(t, err)
- ad5new, _, _, err := l.LookupLatest(addrs[5])
- require.NoError(t, err)
-
- proto := l.GenesisProto()
- // Check balance and min balance requirement changes
- require.Equal(t, ad0init.MicroAlgos.Raw, ad0new.MicroAlgos.Raw+1000) // fee
- require.Equal(t, ad0init.MinBalance(&proto).Raw, ad0new.MinBalance(&proto).Raw-100000) // create
- require.Equal(t, ad5init.MicroAlgos.Raw, ad5new.MicroAlgos.Raw+1000) // fee
- require.Equal(t, ad5init.MinBalance(&proto).Raw, ad5new.MinBalance(&proto).Raw-100000) // optin
-
- optOutTxn := txntest.Txn{
- Type: "axfer",
- Sender: addrs[5],
- XferAsset: expectedID,
- AssetReceiver: addrs[0],
- AssetCloseTo: addrs[0],
- }
-
- closeTxn := txntest.Txn{
- Type: "acfg",
- Sender: addrs[1], // The manager, not the creator
- ConfigAsset: expectedID,
- }
-
- eval = nextBlock(t, l)
- txns(t, l, eval, &optOutTxn, &closeTxn)
- endBlock(t, l, eval)
-
- ad0final, _, _, err := l.LookupLatest(addrs[0])
- require.NoError(t, err)
- ad5final, _, _, err := l.LookupLatest(addrs[5])
- require.NoError(t, err)
- // Check we got our balance "back"
- require.Equal(t, ad0final.MinBalance(&proto), ad0init.MinBalance(&proto))
- require.Equal(t, ad5final.MinBalance(&proto), ad5init.MinBalance(&proto))
-}
-
-// TestDeleteNonExistantKeys checks if the EvalDeltas from deleting missing keys are correct
-func TestDeleteNonExistantKeys(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // AVM v2 (apps)
- testConsensusRange(t, 24, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- const appid basics.AppIndex = 1
-
- createTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
-byte "missing_global"
-app_global_del
-int 0
-byte "missing_local"
-app_local_del
-`),
- }
-
- optInTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appid,
- OnCompletion: transactions.OptInOC,
- }
-
- vb := dl.fullBlock(&createTxn, &optInTxn)
- require.Len(t, vb.Block().Payset[1].EvalDelta.GlobalDelta, 0)
- // For a while, we encoded an empty localdelta
- deltas := 1
- if ver >= 27 {
- deltas = 0
- }
- require.Len(t, vb.Block().Payset[1].EvalDelta.LocalDeltas, deltas)
- })
-}
-
-// TestAppInsMinBalance checks that accounts with MaxAppsOptedIn are accepted by block evaluator
-// and do not cause any MaximumMinimumBalance problems
-func TestAppInsMinBalance(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genesisInitState, addrs, _ := ledgertesting.Genesis(10)
- genesisInitState.Block.CurrentProtocol = protocol.ConsensusV30
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
-
- const appid basics.AppIndex = 1
-
- maxAppsOptedIn := config.Consensus[protocol.ConsensusV30].MaxAppsOptedIn
- require.Greater(t, maxAppsOptedIn, 0)
- maxAppsCreated := config.Consensus[protocol.ConsensusV30].MaxAppsCreated
- require.Greater(t, maxAppsCreated, 0)
- maxLocalSchemaEntries := config.Consensus[protocol.ConsensusV30].MaxLocalSchemaEntries
- require.Greater(t, maxLocalSchemaEntries, uint64(0))
-
- txnsCreate := make([]*txntest.Txn, 0, maxAppsOptedIn)
- txnsOptIn := make([]*txntest.Txn, 0, maxAppsOptedIn)
- appsCreated := make(map[basics.Address]int, len(addrs)-1)
-
- acctIdx := 0
- for i := 0; i < maxAppsOptedIn; i++ {
- creator := addrs[acctIdx]
- createTxn := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: creator,
- ApprovalProgram: "int 1",
- LocalStateSchema: basics.StateSchema{NumByteSlice: maxLocalSchemaEntries},
- Note: ledgertesting.RandomNote(),
- }
- txnsCreate = append(txnsCreate, &createTxn)
- count := appsCreated[creator]
- count++
- appsCreated[creator] = count
- if count == maxAppsCreated {
- acctIdx++
- }
-
- optInTxn := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: addrs[9],
- ApplicationID: appid + basics.AppIndex(i),
- OnCompletion: transactions.OptInOC,
- }
- txnsOptIn = append(txnsOptIn, &optInTxn)
- }
-
- eval := nextBlock(t, l)
- txns1 := append(txnsCreate, txnsOptIn...)
- txns(t, l, eval, txns1...)
- vb := endBlock(t, l, eval)
- mods := vb.Delta()
- appAppResources := mods.Accts.GetAllAppResources()
- appParamsCount := 0
- appLocalStatesCount := 0
- for _, ap := range appAppResources {
- if ap.Params.Params != nil {
- appParamsCount++
- }
- if ap.State.LocalState != nil {
- appLocalStatesCount++
- }
- }
- require.Equal(t, appLocalStatesCount, 50)
- require.Equal(t, appParamsCount, 50)
-}
-
-func TestDuplicates(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- testConsensusRange(t, 11, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- pay := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: addrs[1],
- Amount: 10,
- }
- dl.txn(&pay)
- dl.txn(&pay, "transaction already in ledger")
-
- // Test same transaction in a later block
- dl.txn(&pay, "transaction already in ledger")
-
- // Change the note so it can go in again
- pay.Note = []byte("1")
- dl.txn(&pay)
-
- // Change note again, but try the txn twice in same group
- if dl.generator.GenesisProto().MaxTxGroupSize > 1 {
- pay.Note = []byte("2")
- dl.txgroup("transaction already in ledger", &pay, &pay)
- }
- })
-}
-
-var consensusByNumber = []protocol.ConsensusVersion{
- "", "", "", "", "", "", "",
- protocol.ConsensusV7,
- protocol.ConsensusV8,
- protocol.ConsensusV9,
- protocol.ConsensusV10,
- protocol.ConsensusV11, // first with viable payset commit type
- protocol.ConsensusV12,
- protocol.ConsensusV13,
- protocol.ConsensusV14,
- protocol.ConsensusV15, // rewards in AD
- protocol.ConsensusV16,
- protocol.ConsensusV17,
- protocol.ConsensusV18,
- protocol.ConsensusV19,
- protocol.ConsensusV20,
- protocol.ConsensusV21,
- protocol.ConsensusV22,
- protocol.ConsensusV23,
- protocol.ConsensusV24, // AVM v2 (apps)
- protocol.ConsensusV25,
- protocol.ConsensusV26,
- protocol.ConsensusV27,
- protocol.ConsensusV28,
- protocol.ConsensusV29,
- protocol.ConsensusV30, // AVM v5 (inner txs)
- protocol.ConsensusV31, // AVM v6 (inner txs with appls)
- protocol.ConsensusV32, // unlimited assets and apps
- protocol.ConsensusV33, // 320 rounds
- protocol.ConsensusV34, // AVM v7, stateproofs
- protocol.ConsensusV35, // stateproofs stake fix
- protocol.ConsensusFuture,
-}
-
-// TestReleasedVersion ensures that the necessary tidying is done when a new
-// protocol release happens. The new version must be added to
-// consensusByNumber, and a new LogicSigVersion must be added to vFuture.
-func TestReleasedVersion(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- // This confirms that the proto before future has no ApprovedUpgrades. Once
- // it does, that new version should be added to consensusByNumber.
- require.Len(t, config.Consensus[consensusByNumber[len(consensusByNumber)-2]].ApprovedUpgrades, 0)
- // And no funny business with vFuture
- require.Equal(t, protocol.ConsensusFuture, consensusByNumber[len(consensusByNumber)-1])
-
- // Ensure that vFuture gets a new LogicSigVersion when we promote the
- // existing one. That allows TestExperimental in the logic package to
- // prevent unintended releases of experimental opcodes.
- relV := config.Consensus[consensusByNumber[len(consensusByNumber)-2]].LogicSigVersion
- futureV := config.Consensus[protocol.ConsensusFuture].LogicSigVersion
- require.Equal(t, relV+1, futureV)
-}
-
-// testConsensusRange allows for running tests against a range of consensus
-// versions. Generally `start` will be the version that introduced the feature,
-// and `stop` will be 0 to indicate it should work right on up through vFuture.
-// `stop` will be an actual version number if we're confirming that something
-// STOPS working as of a particular version. When writing the test for a new
-// feature that is currently in vFuture, use the expected version number as
-// `start`. That will correspond to vFuture until a new consensus version is
-// created and inserted in consensusByNumber. At that point, your feature is
-// probably active in that version. (If it's being held in vFuture, just
-// increment your `start`.)
-func testConsensusRange(t *testing.T, start, stop int, test func(t *testing.T, ver int)) {
- if stop == 0 { // Treat 0 as "future"
- stop = len(consensusByNumber) - 1
- }
- for i := start; i <= stop; i++ {
- var version string
- if i == len(consensusByNumber)-1 {
- version = "vFuture"
- } else {
- version = fmt.Sprintf("v%d", i)
- }
- t.Run(fmt.Sprintf("cv=%s", version), func(t *testing.T) { test(t, i) })
- }
-}
-
-func benchConsensusRange(b *testing.B, start, stop int, bench func(t *testing.B, ver int)) {
- if stop == 0 { // Treat 0 as "future"
- stop = len(consensusByNumber) - 1
- }
- for i := start; i <= stop; i++ {
- var version string
- if i == len(consensusByNumber)-1 {
- version = "vFuture"
- } else {
- version = fmt.Sprintf("v%d", i)
- }
- b.Run(fmt.Sprintf("cv=%s", version), func(b *testing.B) { bench(b, i) })
- }
-}
-
-// TestHeaderAccess tests FirstValidTime and `block` which can access previous
-// block headers.
-func TestHeaderAccess(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // Added in v34
- testConsensusRange(t, 34, 0, func(t *testing.T, ver int) {
- cv := consensusByNumber[ver]
- dl := NewDoubleLedger(t, genBalances, cv)
- defer dl.Close()
-
- fvt := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- FirstValid: 0,
- ApprovalProgram: "txn FirstValidTime",
- }
- dl.txn(&fvt, "round 0 is not available")
-
- // advance current to 2
- pay := txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]}
- dl.fullBlock(&pay)
-
- fvt.FirstValid = 1
- dl.txn(&fvt, "round 0 is not available")
-
- fvt.FirstValid = 2
- dl.txn(&fvt) // current becomes 3
-
- // Advance current round far enough to test access MaxTxnLife ago
- for i := 0; i < int(config.Consensus[cv].MaxTxnLife); i++ {
- dl.fullBlock()
- }
-
- // current should be 1003. Confirm.
- require.EqualValues(t, 1002, dl.generator.Latest())
- require.EqualValues(t, 1002, dl.validator.Latest())
-
- fvt.FirstValid = 1003
- fvt.LastValid = 1010
- dl.txn(&fvt) // success advances the round
- // now we're confident current is 1004, so construct a txn that is as
- // old as possible, and confirm access.
- fvt.FirstValid = 1004 - basics.Round(config.Consensus[cv].MaxTxnLife)
- fvt.LastValid = 1004
- dl.txn(&fvt)
- })
-
-}
-
-// TestLogsInBlock ensures that logs appear in the block properly
-func TestLogsInBlock(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // Run tests from v30 onward
- testConsensusRange(t, 30, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- createTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: "byte \"APP\"\n log\n int 1",
- // Fail the clear state
- ClearStateProgram: "byte \"CLR\"\n log\n int 0",
- }
- vb := dl.fullBlock(&createTxn)
- createInBlock := vb.Block().Payset[0]
- appID := createInBlock.ApplyData.ApplicationID
- require.Equal(t, "APP", createInBlock.ApplyData.EvalDelta.Logs[0])
-
- optInTxn := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: addrs[1],
- ApplicationID: appID,
- OnCompletion: transactions.OptInOC,
- }
- vb = dl.fullBlock(&optInTxn)
- optInInBlock := vb.Block().Payset[0]
- require.Equal(t, "APP", optInInBlock.ApplyData.EvalDelta.Logs[0])
-
- clearTxn := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: addrs[1],
- ApplicationID: appID,
- OnCompletion: transactions.ClearStateOC,
- }
- vb = dl.fullBlock(&clearTxn)
- clearInBlock := vb.Block().Payset[0]
- // Logs do not appear if the ClearState failed
- require.Len(t, clearInBlock.ApplyData.EvalDelta.Logs, 0)
- })
-}
-
-// TestUnfundedSenders confirms that accounts that don't even exist
-// can be the Sender in some situations. If some other transaction
-// covers the fee, and the transaction itself does not require an
-// asset or a min balance, it's fine.
-func TestUnfundedSenders(t *testing.T) {
- /*
- In a 0-fee transaction from unfunded sender, we still call balances.Move
- to “pay” the fee. Move() does not short-circuit a Move of 0 (for good
- reason, it allows compounding rewards). Therefore, in Move, we do
- rewards processing on the unfunded account. Before
- proto.UnfundedSenders, the rewards procesing would set the RewardsBase,
- which would require the account be written to DB, and therefore the MBR
- check would kick in (and fail). Now it skips the update if the account
- has less than RewardsUnit, as the update is meaningless anyway.
- */
-
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
-
- testConsensusRange(t, 24, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- asaIndex := basics.AssetIndex(1)
-
- ghost := basics.Address{0x01}
-
- asaCreate := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- AssetParams: basics.AssetParams{
- Total: 10,
- Clawback: ghost,
- Freeze: ghost,
- Manager: ghost,
- },
- }
-
- appCreate := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- }
-
- dl.fullBlock(&asaCreate, &appCreate)
-
- // Advance so that rewardsLevel increases
- for i := 1; i < 10; i++ {
- dl.fullBlock()
- }
-
- fmt.Printf("addrs[0] = %+v\n", addrs[0])
- fmt.Printf("addrs[1] = %+v\n", addrs[1])
-
- benefactor := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: addrs[0],
- Fee: 2000,
- }
-
- ephemeral := []txntest.Txn{
- {
- Type: "pay",
- Amount: 0,
- Sender: ghost,
- Receiver: ghost,
- Fee: 0,
- },
- { // Axfer of 0
- Type: "axfer",
- AssetAmount: 0,
- Sender: ghost,
- AssetReceiver: basics.Address{0x02},
- XferAsset: basics.AssetIndex(1),
- Fee: 0,
- },
- { // Clawback
- Type: "axfer",
- AssetAmount: 0,
- Sender: ghost,
- AssetReceiver: addrs[0],
- AssetSender: addrs[1],
- XferAsset: asaIndex,
- Fee: 0,
- },
- { // Freeze
- Type: "afrz",
- Sender: ghost,
- FreezeAccount: addrs[0], // creator, therefore is opted in
- FreezeAsset: asaIndex,
- AssetFrozen: true,
- Fee: 0,
- },
- { // Unfreeze
- Type: "afrz",
- Sender: ghost,
- FreezeAccount: addrs[0], // creator, therefore is opted in
- FreezeAsset: asaIndex,
- AssetFrozen: false,
- Fee: 0,
- },
- { // App call
- Type: "appl",
- Sender: ghost,
- ApplicationID: basics.AppIndex(2),
- Fee: 0,
- },
- { // App creation (only works because it's also deleted)
- Type: "appl",
- Sender: ghost,
- OnCompletion: transactions.DeleteApplicationOC,
- Fee: 0,
- },
- }
-
- // v34 is the likely version for UnfundedSenders. Change if that doesn't happen.
- var problem string
- if ver < 34 {
- // In the old days, balances.Move would try to increase the rewardsState on the unfunded account
- problem = "balance 0 below min"
- }
- for i, e := range ephemeral {
- dl.txgroup(problem, benefactor.Noted(strconv.Itoa(i)), &e)
- }
- })
-}
-
-// TestAppCallAppDuringInit is similar to TestUnfundedSenders test, but now the
-// unfunded sender is a newly created app. The fee has been paid by the outer
-// transaction, so the app should be able to make an app call as that requires
-// no min balance.
-func TestAppCallAppDuringInit(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
- dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
- defer dl.Close()
-
- approve := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- }
-
- // construct a simple app
- vb := dl.fullBlock(&approve)
-
- // now make a new app that calls it during init
- approveID := vb.Block().Payset[0].ApplicationID
-
- // Advance so that rewardsLevel increases
- for i := 1; i < 10; i++ {
- dl.fullBlock()
- }
-
- callInInit := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: `
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
- int 1
- `,
- ForeignApps: []basics.AppIndex{approveID},
- Fee: 2000, // Enough to have the inner fee paid for
- }
- // v34 is the likely version for UnfundedSenders. Change if that doesn't happen.
- var problem string
- if ver < 34 {
- // In the old days, balances.Move would try to increase the rewardsState on the unfunded account
- problem = "balance 0 below min"
- }
- dl.txn(&callInInit, problem)
- })
-}
diff --git a/ledger/internal/eval_test.go b/ledger/internal/eval_test.go
index 8f07612be..da73bc60a 100644
--- a/ledger/internal/eval_test.go
+++ b/ledger/internal/eval_test.go
@@ -577,6 +577,10 @@ func (ledger *evalTestLedger) LookupAsset(rnd basics.Round, addr basics.Address,
return res, nil
}
+func (ledger *evalTestLedger) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ panic("unimplemented")
+}
+
// GenesisHash returns the genesis hash for this ledger.
func (ledger *evalTestLedger) GenesisHash() crypto.Digest {
return ledger.genesisHash
@@ -768,6 +772,10 @@ func (l *testCowBaseLedger) LookupAsset(rnd basics.Round, addr basics.Address, a
return ledgercore.AssetResource{}, errors.New("not implemented")
}
+func (l *testCowBaseLedger) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ return nil, errors.New("not implemented")
+}
+
func (l *testCowBaseLedger) GetCreatorForRound(_ basics.Round, cindex basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
res := l.creators[0]
l.creators = l.creators[1:]
diff --git a/ledger/internal/prefetcher/prefetcher.go b/ledger/internal/prefetcher/prefetcher.go
index 82e0d830c..b7223806f 100644
--- a/ledger/internal/prefetcher/prefetcher.go
+++ b/ledger/internal/prefetcher/prefetcher.go
@@ -76,7 +76,7 @@ type LoadedTransactionGroup struct {
type accountPrefetcher struct {
ledger Ledger
rnd basics.Round
- groups [][]transactions.SignedTxnWithAD
+ txnGroups [][]transactions.SignedTxnWithAD
feeSinkAddr basics.Address
consensusParams config.ConsensusParams
outChan chan LoadedTransactionGroup
@@ -84,14 +84,14 @@ type accountPrefetcher struct {
// PrefetchAccounts loads the account data for the provided transaction group list. It also loads the feeSink account and add it to the first returned transaction group.
// The order of the transaction groups returned by the channel is identical to the one in the input array.
-func PrefetchAccounts(ctx context.Context, l Ledger, rnd basics.Round, groups [][]transactions.SignedTxnWithAD, feeSinkAddr basics.Address, consensusParams config.ConsensusParams) <-chan LoadedTransactionGroup {
+func PrefetchAccounts(ctx context.Context, l Ledger, rnd basics.Round, txnGroups [][]transactions.SignedTxnWithAD, feeSinkAddr basics.Address, consensusParams config.ConsensusParams) <-chan LoadedTransactionGroup {
prefetcher := &accountPrefetcher{
ledger: l,
rnd: rnd,
- groups: groups,
+ txnGroups: txnGroups,
feeSinkAddr: feeSinkAddr,
consensusParams: consensusParams,
- outChan: make(chan LoadedTransactionGroup, len(groups)),
+ outChan: make(chan LoadedTransactionGroup, len(txnGroups)),
}
go prefetcher.prefetch(ctx)
@@ -117,6 +117,9 @@ type groupTask struct {
resources []LoadedResourcesEntry
// resourcesCount is the number of resources that nees to be loaded per transaction group
resourcesCount int
+
+ // error while processing this group task
+ err *GroupTaskError
}
// preloaderTask manage the loading of a single element, whether it's a resource or an account address.
@@ -128,9 +131,9 @@ type preloaderTask struct {
// resource type
creatableType basics.CreatableType
// a list of transaction group tasks that depends on this address or resource
- groups []*groupTask
+ groupTasks []*groupTask
// a list of indices into the groupTask.balances or groupTask.resources where the address would be stored
- groupIndices []int
+ groupTasksIndices []int
}
// preloaderTaskQueue is a dynamic linked list of enqueued entries, optimized for non-syncronized insertion and
@@ -198,18 +201,18 @@ func loadAccountsAddAccountTask(addr *basics.Address, wt *groupTask, accountTask
}
if task, have := accountTasks[*addr]; !have {
task := &preloaderTask{
- address: addr,
- groups: make([]*groupTask, 1, 4),
- groupIndices: make([]int, 1, 4),
+ address: addr,
+ groupTasks: make([]*groupTask, 1, 4),
+ groupTasksIndices: make([]int, 1, 4),
}
- task.groups[0] = wt
- task.groupIndices[0] = wt.balancesCount
+ task.groupTasks[0] = wt
+ task.groupTasksIndices[0] = wt.balancesCount
accountTasks[*addr] = task
queue.enqueue(task)
} else {
- task.groups = append(task.groups, wt)
- task.groupIndices = append(task.groupIndices, wt.balancesCount)
+ task.groupTasks = append(task.groupTasks, wt)
+ task.groupTasksIndices = append(task.groupTasksIndices, wt.balancesCount)
}
wt.balancesCount++
}
@@ -226,20 +229,20 @@ func loadAccountsAddResourceTask(addr *basics.Address, cidx basics.CreatableInde
}
if task, have := resourceTasks[key]; !have {
task := &preloaderTask{
- address: addr,
- groups: make([]*groupTask, 1, 4),
- groupIndices: make([]int, 1, 4),
- creatableIndex: cidx,
- creatableType: ctype,
+ address: addr,
+ groupTasks: make([]*groupTask, 1, 4),
+ groupTasksIndices: make([]int, 1, 4),
+ creatableIndex: cidx,
+ creatableType: ctype,
}
- task.groups[0] = wt
- task.groupIndices[0] = wt.resourcesCount
+ task.groupTasks[0] = wt
+ task.groupTasksIndices[0] = wt.resourcesCount
resourceTasks[key] = task
queue.enqueue(task)
} else {
- task.groups = append(task.groups, wt)
- task.groupIndices = append(task.groupIndices, wt.resourcesCount)
+ task.groupTasks = append(task.groupTasks, wt)
+ task.groupTasksIndices = append(task.groupTasksIndices, wt.resourcesCount)
}
wt.resourcesCount++
}
@@ -250,6 +253,7 @@ func loadAccountsAddResourceTask(addr *basics.Address, cidx basics.CreatableInde
func (p *accountPrefetcher) prefetch(ctx context.Context) {
defer close(p.outChan)
accountTasks := make(map[basics.Address]*preloaderTask)
+ resourceTasks := make(map[accountCreatableKey]*preloaderTask)
var maxTxnGroupEntries int
if p.consensusParams.Application {
@@ -260,21 +264,21 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
maxTxnGroupEntries = p.consensusParams.MaxTxGroupSize * 8
}
- tasksQueue := allocPreloaderQueue(len(p.groups), maxTxnGroupEntries)
+ tasksQueue := allocPreloaderQueue(len(p.txnGroups), maxTxnGroupEntries)
// totalBalances counts the total number of balances over all the transaction groups
totalBalances := 0
totalResources := 0
- groupsReady := make([]groupTask, len(p.groups))
+ groupsReady := make([]groupTask, len(p.txnGroups))
// Add fee sink to the first group
- if len(p.groups) > 0 {
+ if len(p.txnGroups) > 0 {
// the feeSinkAddr is known to be non-empty
feeSinkPreloader := &preloaderTask{
- address: &p.feeSinkAddr,
- groups: []*groupTask{&groupsReady[0]},
- groupIndices: []int{0},
+ address: &p.feeSinkAddr,
+ groupTasks: []*groupTask{&groupsReady[0]},
+ groupTasksIndices: []int{0},
}
groupsReady[0].balancesCount = 1
accountTasks[p.feeSinkAddr] = feeSinkPreloader
@@ -283,21 +287,64 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
// iterate over the transaction groups and add all their account addresses to the list
queue := &tasksQueue
- for i := range p.groups {
+ for i := range p.txnGroups {
task := &groupsReady[i]
- for j := range p.groups[i] {
- stxn := &p.groups[i][j]
+ for j := range p.txnGroups[i] {
+ stxn := &p.txnGroups[i][j]
switch stxn.Txn.Type {
case protocol.PaymentTx:
loadAccountsAddAccountTask(&stxn.Txn.Receiver, task, accountTasks, queue)
loadAccountsAddAccountTask(&stxn.Txn.CloseRemainderTo, task, accountTasks, queue)
case protocol.AssetConfigTx:
+ loadAccountsAddResourceTask(nil, basics.CreatableIndex(stxn.Txn.ConfigAsset), basics.AssetCreatable, task, resourceTasks, queue)
case protocol.AssetTransferTx:
+ if !stxn.Txn.AssetSender.IsZero() {
+ loadAccountsAddResourceTask(nil, basics.CreatableIndex(stxn.Txn.XferAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ loadAccountsAddResourceTask(&stxn.Txn.AssetSender, basics.CreatableIndex(stxn.Txn.XferAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ } else {
+ if stxn.Txn.AssetAmount == 0 && (stxn.Txn.AssetReceiver == stxn.Txn.Sender) { // opt in
+ loadAccountsAddResourceTask(nil, basics.CreatableIndex(stxn.Txn.XferAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ }
+ if stxn.Txn.AssetAmount != 0 { // zero transfer is noop
+ loadAccountsAddResourceTask(&stxn.Txn.Sender, basics.CreatableIndex(stxn.Txn.XferAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ }
+ }
+ if !stxn.Txn.AssetReceiver.IsZero() {
+ if stxn.Txn.AssetAmount != 0 || (stxn.Txn.AssetReceiver == stxn.Txn.Sender) {
+ // if not zero transfer or opt in then prefetch
+ loadAccountsAddResourceTask(&stxn.Txn.AssetReceiver, basics.CreatableIndex(stxn.Txn.XferAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ }
+ }
+ if !stxn.Txn.AssetCloseTo.IsZero() {
+ loadAccountsAddResourceTask(&stxn.Txn.AssetCloseTo, basics.CreatableIndex(stxn.Txn.XferAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ }
case protocol.AssetFreezeTx:
+ if !stxn.Txn.FreezeAccount.IsZero() {
+ loadAccountsAddResourceTask(nil, basics.CreatableIndex(stxn.Txn.FreezeAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ loadAccountsAddResourceTask(&stxn.Txn.FreezeAccount, basics.CreatableIndex(stxn.Txn.FreezeAsset), basics.AssetCreatable, task, resourceTasks, queue)
+ loadAccountsAddAccountTask(&stxn.Txn.FreezeAccount, task, accountTasks, queue)
+ }
case protocol.ApplicationCallTx:
+ if stxn.Txn.ApplicationID != 0 {
+ // load the global - so that we'll have the program
+ loadAccountsAddResourceTask(nil, basics.CreatableIndex(stxn.Txn.ApplicationID), basics.AppCreatable, task, resourceTasks, queue)
+ // load the local - so that we'll have the local state
+ // TODO: this is something we need to decide if we want to enable, since not
+ // every application call would use local storage.
+ if (stxn.Txn.ApplicationCallTxnFields.OnCompletion == transactions.OptInOC) ||
+ (stxn.Txn.ApplicationCallTxnFields.OnCompletion == transactions.CloseOutOC) ||
+ (stxn.Txn.ApplicationCallTxnFields.OnCompletion == transactions.ClearStateOC) {
+ loadAccountsAddResourceTask(&stxn.Txn.Sender, basics.CreatableIndex(stxn.Txn.ApplicationID), basics.AppCreatable, task, resourceTasks, queue)
+ }
+ }
+
+ // do not preload Txn.ForeignApps, Txn.ForeignAssets, Txn.Accounts
+ // since they might be non-used arbitrary values
+
case protocol.StateProofTx:
case protocol.KeyRegistrationTx:
}
+
// If you add new addresses here, also add them in getTxnAddresses().
if !stxn.Txn.Sender.IsZero() {
loadAccountsAddAccountTask(&stxn.Txn.Sender, task, accountTasks, queue)
@@ -356,24 +403,20 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
// iterate on the transaction groups tasks. This array retains the original order.
completed := make(map[int64]bool)
- for i := int64(0); i < int64(len(p.groups)); {
+ for i := int64(0); i < int64(len(p.txnGroups)); {
wait:
incompleteCount := atomic.LoadInt64(&groupsReady[i].incompleteCount)
if incompleteCount > 0 || (incompleteCount != dependencyFreeGroup && !completed[i]) {
select {
case done := <-groupDoneCh:
if done.err != nil {
- // if there is an error, report the error to the output channel.
- p.outChan <- LoadedTransactionGroup{
- Err: &GroupTaskError{
- err: done.err,
- GroupIdx: done.groupIdx,
- Address: done.task.address,
- CreatableIndex: done.task.creatableIndex,
- CreatableType: done.task.creatableType,
- },
+ groupsReady[done.groupIdx].err = &GroupTaskError{
+ err: done.err,
+ GroupIdx: done.groupIdx,
+ Address: done.task.address,
+ CreatableIndex: done.task.creatableIndex,
+ CreatableType: done.task.creatableType,
}
- return
}
if done.groupIdx > i {
// mark future txn as ready.
@@ -388,7 +431,7 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
}
}
next := i
- for ; next < int64(len(p.groups)); next++ {
+ for ; next < int64(len(p.txnGroups)); next++ {
if !completed[next] {
if next > i {
i = next
@@ -399,10 +442,11 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
delete(completed, next)
- // if we had no error, write the result to the output channel.
+ // write the result to the output channel.
// this write will not block since we preallocated enough space on the channel.
p.outChan <- LoadedTransactionGroup{
- TxnGroup: p.groups[next],
+ Err: groupsReady[next].err,
+ TxnGroup: p.txnGroups[next],
Accounts: groupsReady[next].balances,
Resources: groupsReady[next].resources,
}
@@ -460,15 +504,19 @@ func (p *accountPrefetcher) asyncPrefetchRoutine(queue *preloaderTaskQueue, task
// if there was an error..
if err != nil {
// there was an error loading that entry.
- break
+ for _, wt := range task.groupTasks {
+ // notify the channel of the error.
+ wt.markCompletionAcctError(err, task, groupDoneCh)
+ }
+ continue
}
br := LoadedAccountDataEntry{
Address: task.address,
Data: &acctData,
}
// update all the group tasks with the new acquired balance.
- for i, wt := range task.groups {
- wt.markCompletionAcct(task.groupIndices[i], br, groupDoneCh)
+ for i, wt := range task.groupTasks {
+ wt.markCompletionAcct(task.groupTasksIndices[i], br, groupDoneCh)
}
continue
}
@@ -479,7 +527,11 @@ func (p *accountPrefetcher) asyncPrefetchRoutine(queue *preloaderTaskQueue, task
creator, ok, err = p.ledger.GetCreatorForRound(p.rnd, task.creatableIndex, task.creatableType)
if err != nil {
// there was an error loading that entry.
- break
+ for _, wt := range task.groupTasks {
+ // notify the channel of the error.
+ wt.markCompletionAcctError(err, task, groupDoneCh)
+ }
+ continue
}
if !ok {
re := LoadedResourcesEntry{
@@ -487,8 +539,8 @@ func (p *accountPrefetcher) asyncPrefetchRoutine(queue *preloaderTaskQueue, task
CreatableType: task.creatableType,
}
// update all the group tasks with the new acquired balance.
- for i, wt := range task.groups {
- wt.markCompletionResource(task.groupIndices[i], re, groupDoneCh)
+ for i, wt := range task.groupTasks {
+ wt.markCompletionResource(task.groupTasksIndices[i], re, groupDoneCh)
}
continue
}
@@ -508,7 +560,11 @@ func (p *accountPrefetcher) asyncPrefetchRoutine(queue *preloaderTaskQueue, task
}
if err != nil {
// there was an error loading that entry.
- break
+ for _, wt := range task.groupTasks {
+ // notify the channel of the error.
+ wt.markCompletionAcctError(err, task, groupDoneCh)
+ }
+ continue
}
re := LoadedResourcesEntry{
Resource: &resource,
@@ -517,14 +573,8 @@ func (p *accountPrefetcher) asyncPrefetchRoutine(queue *preloaderTaskQueue, task
CreatableType: task.creatableType,
}
// update all the group tasks with the new acquired balance.
- for i, wt := range task.groups {
- wt.markCompletionResource(task.groupIndices[i], re, groupDoneCh)
+ for i, wt := range task.groupTasks {
+ wt.markCompletionResource(task.groupTasksIndices[i], re, groupDoneCh)
}
}
- // if we got here, it means that there was an error.
- // in every case we get here, the task is gurenteed to be a non-nil.
- for _, wt := range task.groups {
- // notify the channel of the error.
- wt.markCompletionAcctError(err, task, groupDoneCh)
- }
}
diff --git a/ledger/internal/prefetcher/prefetcher_alignment_test.go b/ledger/internal/prefetcher/prefetcher_alignment_test.go
index 05a672d32..5d78ce286 100644
--- a/ledger/internal/prefetcher/prefetcher_alignment_test.go
+++ b/ledger/internal/prefetcher/prefetcher_alignment_test.go
@@ -145,6 +145,9 @@ func (l *prefetcherAlignmentTestLedger) LookupAsset(rnd basics.Round, addr basic
return l.assets[addr][aidx], nil
}
+func (l *prefetcherAlignmentTestLedger) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ panic("not implemented")
+}
func (l *prefetcherAlignmentTestLedger) GetCreatorForRound(_ basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
l.mu.Lock()
if l.requestedCreators == nil {
@@ -395,7 +398,6 @@ func TestEvaluatorPrefetcherAlignmentCreateAsset(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentReconfigAsset(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
addr := makeAddress(1)
@@ -448,7 +450,6 @@ func TestEvaluatorPrefetcherAlignmentReconfigAsset(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentAssetOptIn(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
assetID := basics.AssetIndex(5)
@@ -503,8 +504,7 @@ func TestEvaluatorPrefetcherAlignmentAssetOptIn(t *testing.T) {
require.Equal(t, requested, prefetched)
}
-func TestEvaluatorPrefetcherAlignmentAssetTransfer(t *testing.T) {
- t.Skip("disabled")
+func TestEvaluatorPrefetcherAlignmentAssetOptInCloseTo(t *testing.T) {
partitiontest.PartitionTest(t)
assetID := basics.AssetIndex(5)
@@ -570,8 +570,97 @@ func TestEvaluatorPrefetcherAlignmentAssetTransfer(t *testing.T) {
require.Equal(t, requested, prefetched)
}
+func TestEvaluatorPrefetcherAlignmentAssetTransfer(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ assetID := basics.AssetIndex(5)
+ l := &prefetcherAlignmentTestLedger{
+ balances: map[basics.Address]ledgercore.AccountData{
+ rewardsPool(): {
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 1234567890},
+ },
+ },
+ makeAddress(1): {
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 1000001},
+ TotalAssets: 1,
+ TotalAssetParams: 1,
+ },
+ },
+ makeAddress(2): {
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 1000002},
+ },
+ },
+ makeAddress(3): {
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 1000003},
+ },
+ },
+ },
+ assets: map[basics.Address]map[basics.AssetIndex]ledgercore.AssetResource{
+ makeAddress(1): {
+ assetID: {
+ AssetParams: &basics.AssetParams{},
+ AssetHolding: &basics.AssetHolding{},
+ },
+ },
+ makeAddress(2): {
+ assetID: {
+ AssetHolding: &basics.AssetHolding{Amount: 5},
+ },
+ },
+ makeAddress(3): {
+ assetID: {
+ AssetHolding: &basics.AssetHolding{},
+ },
+ },
+ },
+ creators: map[basics.CreatableIndex]basics.Address{
+ basics.CreatableIndex(assetID): makeAddress(1),
+ },
+ }
+
+ txn := transactions.Transaction{
+ Type: protocol.AssetTransferTx,
+ Header: transactions.Header{
+ Sender: makeAddress(2),
+ GenesisHash: genesisHash(),
+ },
+ AssetTransferTxnFields: transactions.AssetTransferTxnFields{
+ XferAsset: assetID,
+ AssetReceiver: makeAddress(3),
+ AssetAmount: 1,
+ },
+ }
+
+ requested, prefetched := run(t, l, txn)
+
+ prefetched.Accounts[rewardsPool()] = struct{}{}
+ require.Equal(t, requested, prefetched)
+
+ // zero transfer of any asset
+ txn = transactions.Transaction{
+ Type: protocol.AssetTransferTx,
+ Header: transactions.Header{
+ Sender: makeAddress(1),
+ GenesisHash: genesisHash(),
+ },
+ AssetTransferTxnFields: transactions.AssetTransferTxnFields{
+ XferAsset: assetID + 12345,
+ AssetReceiver: makeAddress(2),
+ AssetAmount: 0,
+ },
+ }
+
+ requested, prefetched = run(t, l, txn)
+
+ prefetched.Accounts[rewardsPool()] = struct{}{}
+ require.Equal(t, requested, prefetched)
+}
+
func TestEvaluatorPrefetcherAlignmentAssetClawback(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
assetID := basics.AssetIndex(5)
@@ -811,7 +900,6 @@ func TestEvaluatorPrefetcherAlignmentCreateApplication(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentDeleteApplication(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
addr := makeAddress(1)
@@ -866,7 +954,6 @@ func TestEvaluatorPrefetcherAlignmentDeleteApplication(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentApplicationOptIn(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -925,7 +1012,6 @@ func TestEvaluatorPrefetcherAlignmentApplicationOptIn(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentApplicationCloseOut(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -990,7 +1076,6 @@ func TestEvaluatorPrefetcherAlignmentApplicationCloseOut(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentApplicationClearState(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -1055,7 +1140,6 @@ func TestEvaluatorPrefetcherAlignmentApplicationClearState(t *testing.T) {
}
func TestEvaluatorPrefetcherAlignmentApplicationCallAccountsDeclaration(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -1116,15 +1200,14 @@ func TestEvaluatorPrefetcherAlignmentApplicationCallAccountsDeclaration(t *testi
requested, prefetched := run(t, l, txn)
prefetched.Accounts[rewardsPool()] = struct{}{}
- // Loading accounts depends on the smart contract program. Ignore the addresses
- // not requested.
- requested.Accounts[makeAddress(5)] = struct{}{}
- requested.Accounts[makeAddress(3)] = struct{}{}
+ // Foreign accounts are not loaded, ensure they are not prefetched
+ require.NotContains(t, prefetched.Accounts, makeAddress(5))
+ require.NotContains(t, prefetched.Accounts, makeAddress(3))
+
require.Equal(t, requested, prefetched)
}
func TestEvaluatorPrefetcherAlignmentApplicationCallForeignAppsDeclaration(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -1185,15 +1268,13 @@ func TestEvaluatorPrefetcherAlignmentApplicationCallForeignAppsDeclaration(t *te
requested, prefetched := run(t, l, txn)
prefetched.Accounts[rewardsPool()] = struct{}{}
- // Loading foreign apps depends on the smart contract program. Ignore the apps
- // not requested.
- requested.Creators[creatable{cindex: 6, ctype: basics.AppCreatable}] = struct{}{}
- requested.Creators[creatable{cindex: 8, ctype: basics.AppCreatable}] = struct{}{}
+ // Foreign apps are not loaded, ensure they are not prefetched
+ require.NotContains(t, prefetched.Creators, creatable{cindex: 6, ctype: basics.AppCreatable})
+ require.NotContains(t, prefetched.Creators, creatable{cindex: 8, ctype: basics.AppCreatable})
require.Equal(t, requested, prefetched)
}
func TestEvaluatorPrefetcherAlignmentApplicationCallForeignAssetsDeclaration(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
appID := basics.AppIndex(5)
@@ -1254,10 +1335,9 @@ func TestEvaluatorPrefetcherAlignmentApplicationCallForeignAssetsDeclaration(t *
requested, prefetched := run(t, l, txn)
prefetched.Accounts[rewardsPool()] = struct{}{}
- // Loading foreign assets depends on the smart contract program. Ignore the assets
- // not requested.
- requested.Creators[creatable{cindex: 6, ctype: basics.AssetCreatable}] = struct{}{}
- requested.Creators[creatable{cindex: 8, ctype: basics.AssetCreatable}] = struct{}{}
+ // Foreign apps are not loaded, ensure they are not prefetched
+ require.NotContains(t, prefetched.Creators, creatable{cindex: 6, ctype: basics.AssetCreatable})
+ require.NotContains(t, prefetched.Creators, creatable{cindex: 8, ctype: basics.AssetCreatable})
require.Equal(t, requested, prefetched)
}
diff --git a/ledger/internal/prefetcher/prefetcher_test.go b/ledger/internal/prefetcher/prefetcher_test.go
index 40fe6949b..555cc8f6d 100644
--- a/ledger/internal/prefetcher/prefetcher_test.go
+++ b/ledger/internal/prefetcher/prefetcher_test.go
@@ -259,7 +259,6 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
{
name: "asset config transaction for a non-existing asset",
- skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.AssetConfigTx,
@@ -296,7 +295,6 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
{
name: "asset config transaction for an existing asset",
- skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.AssetConfigTx,
@@ -333,7 +331,6 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
{
name: "asset transfer transaction",
- skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.AssetTransferTx,
@@ -342,6 +339,7 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
AssetTransferTxnFields: transactions.AssetTransferTxnFields{
XferAsset: 1001,
+ AssetAmount: 1,
AssetSender: makeAddress(2),
AssetReceiver: makeAddress(3),
AssetCloseTo: makeAddress(4),
@@ -384,8 +382,52 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
},
{
+ name: "asset transfer transaction zero amount",
+ signedTxn: transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.AssetTransferTx,
+ Header: transactions.Header{
+ Sender: makeAddress(1),
+ },
+ AssetTransferTxnFields: transactions.AssetTransferTxnFields{
+ XferAsset: 1001,
+ AssetSender: makeAddress(2),
+ AssetReceiver: makeAddress(3),
+ AssetCloseTo: makeAddress(4),
+ },
+ },
+ },
+ accounts: []prefetcher.LoadedAccountDataEntry{
+ {
+ Address: &feeSinkAddr,
+ Data: &ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 0}},
+ },
+ },
+ {
+ Address: makeAddressPtr(1),
+ Data: &ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 100000000}},
+ },
+ },
+ },
+ resources: []prefetcher.LoadedResourcesEntry{
+ {
+ Address: makeAddressPtr(2),
+ CreatableIndex: 1001,
+ CreatableType: basics.AssetCreatable,
+ Resource: &ledgercore.AccountResource{},
+ },
+ {
+ Address: makeAddressPtr(4),
+ CreatableIndex: 1001,
+ CreatableType: basics.AssetCreatable,
+ Resource: &ledgercore.AccountResource{},
+ },
+ },
+ },
+ {
name: "asset freeze transaction",
- skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.AssetFreezeTx,
@@ -435,7 +477,6 @@ func TestEvaluatorPrefetcher(t *testing.T) {
},
{
name: "application transaction",
- skip: true,
signedTxn: transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.ApplicationCallTx,
@@ -471,20 +512,23 @@ func TestEvaluatorPrefetcher(t *testing.T) {
AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 100000000}},
},
},
- {
- Address: makeAddressPtr(4),
- Data: &ledgercore.AccountData{
- AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 0}},
+ /*
+ {
+ Address: makeAddressPtr(4),
+ Data: &ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 0}},
+ },
},
- },
- {
- Address: makeAddressPtr(5),
- Data: &ledgercore.AccountData{
- AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 0}},
+ {
+ Address: makeAddressPtr(5),
+ Data: &ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 0}},
+ },
},
- },
+ */
},
resources: []prefetcher.LoadedResourcesEntry{
+ /* - if we'll decide that we want to prefetch the foreign apps/assets, then this should be enabled
{
Address: makeAddressPtr(2),
CreatableIndex: 1001,
@@ -503,7 +547,8 @@ func TestEvaluatorPrefetcher(t *testing.T) {
CreatableType: basics.AppCreatable,
Resource: nil,
},
- /* - if we'll decide that we want to perfetch the account local state, then this should be enabled.
+ */
+ /* - if we'll decide that we want to prefetch the account local state, then this should be enabled.
{
address: acctAddrPtr(1),
creatableIndex: 10,
@@ -545,7 +590,6 @@ func TestEvaluatorPrefetcher(t *testing.T) {
// Test for error from LookupAsset
func TestAssetLookupError(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
rnd := basics.Round(5)
@@ -568,10 +612,12 @@ func TestAssetLookupError(t *testing.T) {
}
errorReceived := false
- groups := make([][]transactions.SignedTxnWithAD, 5)
- for i := 0; i < 5; i++ {
- groups[i] = make([]transactions.SignedTxnWithAD, 2)
- for j := 0; j < 2; j++ {
+ const numGroups = 5
+ const txnPerGroup = 2
+ groups := make([][]transactions.SignedTxnWithAD, numGroups)
+ for i := 0; i < numGroups; i++ {
+ groups[i] = make([]transactions.SignedTxnWithAD, txnPerGroup)
+ for j := 0; j < txnPerGroup; j++ {
groups[i][j].SignedTxn = assetTransferTxn
if i == 2 {
// force error in asset lookup in the second txn group only
@@ -579,8 +625,12 @@ func TestAssetLookupError(t *testing.T) {
}
}
}
+
preloadedTxnGroupsCh := prefetcher.PrefetchAccounts(context.Background(), ledger, rnd+100, groups, feeSinkAddr, config.Consensus[proto])
+
+ receivedNumGroups := 0
for loadedTxnGroup := range preloadedTxnGroupsCh {
+ receivedNumGroups++
if loadedTxnGroup.Err != nil {
errorReceived = true
require.Equal(t, int64(2), loadedTxnGroup.Err.GroupIdx)
@@ -589,13 +639,14 @@ func TestAssetLookupError(t *testing.T) {
require.Equal(t, errorTriggerAssetIndex, int(loadedTxnGroup.Err.CreatableIndex))
require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType)
}
+ require.Equal(t, txnPerGroup, len(loadedTxnGroup.TxnGroup))
}
require.True(t, errorReceived)
+ require.Equal(t, numGroups, receivedNumGroups)
}
// Test for error from GetCreatorForRound
func TestGetCreatorForRoundError(t *testing.T) {
- t.Skip("disabled")
partitiontest.PartitionTest(t)
rnd := basics.Round(5)
@@ -610,23 +661,33 @@ func TestGetCreatorForRoundError(t *testing.T) {
Sender: makeAddress(1),
},
AssetConfigTxnFields: transactions.AssetConfigTxnFields{
- ConfigAsset: errorTriggerCreatableIndex,
+ ConfigAsset: 101,
},
},
}
+ createAssetFailedTxn := createAssetTxn
+ createAssetFailedTxn.Txn.ConfigAsset = errorTriggerCreatableIndex
errorReceived := false
- groups := make([][]transactions.SignedTxnWithAD, 5)
- for i := 0; i < 5; i++ {
- groups[i] = make([]transactions.SignedTxnWithAD, 10)
- for j := 0; j < 10; j++ {
+ const numGroups = 5
+ const txnPerGroup = 10
+ groups := make([][]transactions.SignedTxnWithAD, numGroups)
+ for i := 0; i < numGroups; i++ {
+ groups[i] = make([]transactions.SignedTxnWithAD, txnPerGroup)
+ for j := 0; j < txnPerGroup; j++ {
groups[i][j].SignedTxn = createAssetTxn
+ // fail only the first txn in the first group
+ if i == 0 && j == 0 {
+ groups[i][j].SignedTxn = createAssetFailedTxn
+ }
}
}
preloadedTxnGroupsCh := prefetcher.PrefetchAccounts(context.Background(), ledger, rnd+100, groups, feeSinkAddr, config.Consensus[proto])
+ receivedNumGroups := 0
for loadedTxnGroup := range preloadedTxnGroupsCh {
+ receivedNumGroups++
if loadedTxnGroup.Err != nil {
errorReceived = true
require.True(t, errors.Is(loadedTxnGroup.Err, getCreatorError{}))
@@ -634,8 +695,10 @@ func TestGetCreatorForRoundError(t *testing.T) {
require.Equal(t, errorTriggerCreatableIndex, int(loadedTxnGroup.Err.CreatableIndex))
require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType)
}
+ require.Equal(t, txnPerGroup, len(loadedTxnGroup.TxnGroup))
}
require.True(t, errorReceived)
+ require.Equal(t, numGroups, receivedNumGroups)
}
// Test for error from LookupWithoutRewards
@@ -658,29 +721,41 @@ func TestLookupWithoutRewards(t *testing.T) {
},
},
}
+ createAssetFailedTxn := createAssetTxn
+ createAssetFailedTxn.Txn.Sender = makeAddress(10)
errorReceived := false
- groups := make([][]transactions.SignedTxnWithAD, 5)
- for i := 0; i < 5; i++ {
- groups[i] = make([]transactions.SignedTxnWithAD, 10)
- for j := 0; j < 10; j++ {
+ const numGroups = 5
+ const txnPerGroup = 10
+ groups := make([][]transactions.SignedTxnWithAD, numGroups)
+ for i := 0; i < numGroups; i++ {
+ groups[i] = make([]transactions.SignedTxnWithAD, txnPerGroup)
+ for j := 0; j < txnPerGroup; j++ {
groups[i][j].SignedTxn = createAssetTxn
+ // fail only last txn in the first group
+ if i == 0 && j == txnPerGroup-1 {
+ groups[i][j].SignedTxn = createAssetFailedTxn
+ }
}
}
- ledger.errorTriggerAddress[createAssetTxn.Txn.Sender] = true
+ ledger.errorTriggerAddress[createAssetFailedTxn.Txn.Sender] = true
preloadedTxnGroupsCh := prefetcher.PrefetchAccounts(context.Background(), ledger, rnd+100, groups, feeSinkAddr, config.Consensus[proto])
+ receivedNumGroups := 0
for loadedTxnGroup := range preloadedTxnGroupsCh {
+ receivedNumGroups++
if loadedTxnGroup.Err != nil {
errorReceived = true
require.True(t, errors.Is(loadedTxnGroup.Err, lookupError{}))
- require.Equal(t, makeAddress(1), *loadedTxnGroup.Err.Address)
+ require.Equal(t, makeAddress(10), *loadedTxnGroup.Err.Address)
require.Equal(t, 0, int(loadedTxnGroup.Err.CreatableIndex))
require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType)
}
+ require.Equal(t, txnPerGroup, len(loadedTxnGroup.TxnGroup))
}
require.True(t, errorReceived)
+ require.Equal(t, numGroups, receivedNumGroups)
}
func TestEvaluatorPrefetcherQueueExpansion(t *testing.T) {
diff --git a/ledger/ledger.go b/ledger/ledger.go
index 85b31ade3..99fe75966 100644
--- a/ledger/ledger.go
+++ b/ledger/ledger.go
@@ -532,6 +532,23 @@ func (l *Ledger) lookupResource(rnd basics.Round, addr basics.Address, aidx basi
return res, nil
}
+// LookupKv loads a KV pair from the accounts update
+func (l *Ledger) LookupKv(rnd basics.Round, key string) ([]byte, error) {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+
+ return l.accts.LookupKv(rnd, key)
+}
+
+// LookupKeysByPrefix searches keys with specific prefix, up to `maxKeyNum`
+// if `maxKeyNum` == 0, then it loads all keys with such prefix
+func (l *Ledger) LookupKeysByPrefix(round basics.Round, keyPrefix string, maxKeyNum uint64) ([]string, error) {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+
+ return l.accts.LookupKeysByPrefix(round, keyPrefix, maxKeyNum)
+}
+
// LookupAgreement returns account data used by agreement.
func (l *Ledger) LookupAgreement(rnd basics.Round, addr basics.Address) (basics.OnlineAccountData, error) {
l.trackerMu.RLock()
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index 95a0a84e2..c051dddeb 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -2382,9 +2382,10 @@ int %d // 10001000
func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) {
partitiontest.PartitionTest(t)
+ prevAccountDBVersion := accountDBVersion
accountDBVersion = 6
defer func() {
- accountDBVersion = 7
+ accountDBVersion = prevAccountDBVersion
}()
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
testProtocolVersion := protocol.ConsensusVersion("test-protocol-migrate-shrink-deltas")
@@ -2420,6 +2421,11 @@ func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) {
if err := accountsCreateCatchpointFirstStageInfoTable(ctx, tx); err != nil {
return err
}
+ // this line creates kvstore table, even if it is not required in accountDBVersion 6 -> 7
+ // or in later version where we need kvstore table, this test will fail
+ if err := accountsCreateBoxTable(ctx, tx); err != nil {
+ return err
+ }
return nil
})
require.NoError(t, err)
diff --git a/ledger/ledgercore/accountdata.go b/ledger/ledgercore/accountdata.go
index eb09706ff..bafd9f32a 100644
--- a/ledger/ledgercore/accountdata.go
+++ b/ledger/ledgercore/accountdata.go
@@ -40,12 +40,14 @@ type AccountBaseData struct {
RewardedMicroAlgos basics.MicroAlgos
AuthAddr basics.Address
- TotalAppSchema basics.StateSchema
- TotalExtraAppPages uint32
- TotalAppParams uint64
- TotalAppLocalStates uint64
- TotalAssetParams uint64
- TotalAssets uint64
+ TotalAppSchema basics.StateSchema // Totals across created globals, and opted in locals.
+ TotalExtraAppPages uint32 // Total number of extra pages across all created apps
+ TotalAppParams uint64 // Total number of apps this account has created
+ TotalAppLocalStates uint64 // Total number of apps this account is opted into.
+ TotalAssetParams uint64 // Total number of assets created by this account
+ TotalAssets uint64 // Total of asset creations and optins (i.e. number of holdings)
+ TotalBoxes uint64 // Total number of boxes associated to this account
+ TotalBoxBytes uint64 // Total bytes for this account's boxes. keys _and_ values count
}
// VotingData holds participation information
@@ -82,6 +84,8 @@ func ToAccountData(acct basics.AccountData) AccountData {
TotalAssets: uint64(len(acct.Assets)),
TotalAppParams: uint64(len(acct.AppParams)),
TotalAppLocalStates: uint64(len(acct.AppLocalStates)),
+ TotalBoxes: acct.TotalBoxes,
+ TotalBoxBytes: acct.TotalBoxBytes,
},
VotingData: VotingData{
VoteID: acct.VoteID,
@@ -112,6 +116,8 @@ func AssignAccountData(a *basics.AccountData, acct AccountData) {
a.AuthAddr = acct.AuthAddr
a.TotalAppSchema = acct.TotalAppSchema
a.TotalExtraAppPages = acct.TotalExtraAppPages
+ a.TotalBoxes = acct.TotalBoxes
+ a.TotalBoxBytes = acct.TotalBoxBytes
}
// WithUpdatedRewards calls basics account data WithUpdatedRewards
@@ -138,6 +144,7 @@ func (u AccountData) MinBalance(proto *config.ConsensusParams) (res basics.Micro
u.TotalAppSchema,
uint64(u.TotalAppParams), uint64(u.TotalAppLocalStates),
uint64(u.TotalExtraAppPages),
+ u.TotalBoxes, u.TotalBoxBytes,
)
}
diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go
index 57bbbb607..4319cc302 100644
--- a/ledger/ledgercore/statedelta.go
+++ b/ledger/ledgercore/statedelta.go
@@ -73,14 +73,25 @@ type IncludedTransactions struct {
Intra uint64 // the index of the transaction in the block
}
+// A KvValueDelta shows how the Data associated with a key in the kvstore has
+// changed. However, OldData is elided during evaluation, and only filled in at
+// the conclusion of a block during the called to roundCowState.deltas()
+type KvValueDelta struct {
+ // Data stores the most recent value (nil == deleted)
+ Data []byte
+
+ // OldData stores the previous vlaue (nil == didn't exist)
+ OldData []byte
+}
+
// StateDelta describes the delta between a given round to the previous round
type StateDelta struct {
- // modified accounts
- // Accts AccountDeltas
-
// modified new accounts
Accts AccountDeltas
+ // modified kv pairs (nil == delete)
+ KvMods map[string]KvValueDelta
+
// new Txids for the txtail and TxnCounter, mapped to txn.LastValid
Txids map[transactions.Txid]IncludedTransactions
@@ -107,8 +118,8 @@ type StateDelta struct {
Totals AccountTotals
}
-// NewBalanceRecord is similar to basics.BalanceRecord but with decoupled base and voting data
-type NewBalanceRecord struct {
+// BalanceRecord is similar to basics.BalanceRecord but with decoupled base and voting data
+type BalanceRecord struct {
Addr basics.Address
AccountData
}
@@ -160,18 +171,18 @@ type AssetResourceRecord struct {
// The map would point the address/address+creatable id onto the index of the
// element within the slice.
type AccountDeltas struct {
- // Actual data. If an account is deleted, `accts` contains the NewBalanceRecord
+ // Actual data. If an account is deleted, `Accts` contains the BalanceRecord
// with an empty `AccountData` and a populated `Addr`.
- accts []NewBalanceRecord
+ Accts []BalanceRecord
// cache for addr to deltas index resolution
acctsCache map[basics.Address]int
- // AppResources deltas. If app params or local state is deleted, there is a nil value in appResources.Params or appResources.State and Deleted flag set
- appResources []AppResourceRecord
+ // AppResources deltas. If app params or local state is deleted, there is a nil value in AppResources.Params or AppResources.State and Deleted flag set
+ AppResources []AppResourceRecord
// caches for {addr, app id} to app params delta resolution
appResourcesCache map[AccountApp]int
- assetResources []AssetResourceRecord
+ AssetResources []AssetResourceRecord
assetResourcesCache map[AccountAsset]int
}
@@ -181,6 +192,7 @@ type AccountDeltas struct {
func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int, stateProofNext basics.Round) StateDelta {
return StateDelta{
Accts: MakeAccountDeltas(hint),
+ KvMods: make(map[string]KvValueDelta),
Txids: make(map[transactions.Txid]IncludedTransactions, hint),
Txleases: make(map[Txlease]basics.Round),
// asset or application creation are considered as rare events so do not pre-allocate space for them
@@ -195,7 +207,7 @@ func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int,
// MakeAccountDeltas creates account delta
func MakeAccountDeltas(hint int) AccountDeltas {
return AccountDeltas{
- accts: make([]NewBalanceRecord, 0, hint*2),
+ Accts: make([]BalanceRecord, 0, hint*2),
acctsCache: make(map[basics.Address]int, hint*2),
appResourcesCache: make(map[AccountApp]int),
@@ -209,13 +221,13 @@ func (ad AccountDeltas) GetData(addr basics.Address) (AccountData, bool) {
if !ok {
return AccountData{}, false
}
- return ad.accts[idx].AccountData, true
+ return ad.Accts[idx].AccountData, true
}
// GetAppParams returns app params delta value
func (ad AccountDeltas) GetAppParams(addr basics.Address, aidx basics.AppIndex) (AppParamsDelta, bool) {
if idx, ok := ad.appResourcesCache[AccountApp{addr, aidx}]; ok {
- result := ad.appResources[idx].Params
+ result := ad.AppResources[idx].Params
return result, result.Deleted || result.Params != nil
}
return AppParamsDelta{}, false
@@ -224,7 +236,7 @@ func (ad AccountDeltas) GetAppParams(addr basics.Address, aidx basics.AppIndex)
// GetAssetParams returns asset params delta value
func (ad AccountDeltas) GetAssetParams(addr basics.Address, aidx basics.AssetIndex) (AssetParamsDelta, bool) {
if idx, ok := ad.assetResourcesCache[AccountAsset{addr, aidx}]; ok {
- result := ad.assetResources[idx].Params
+ result := ad.AssetResources[idx].Params
return result, result.Deleted || result.Params != nil
}
return AssetParamsDelta{}, false
@@ -233,7 +245,7 @@ func (ad AccountDeltas) GetAssetParams(addr basics.Address, aidx basics.AssetInd
// GetAppLocalState returns app local state delta value
func (ad AccountDeltas) GetAppLocalState(addr basics.Address, aidx basics.AppIndex) (AppLocalStateDelta, bool) {
if idx, ok := ad.appResourcesCache[AccountApp{addr, aidx}]; ok {
- result := ad.appResources[idx].State
+ result := ad.AppResources[idx].State
return result, result.Deleted || result.LocalState != nil
}
return AppLocalStateDelta{}, false
@@ -242,7 +254,7 @@ func (ad AccountDeltas) GetAppLocalState(addr basics.Address, aidx basics.AppInd
// GetAssetHolding returns asset holding delta value
func (ad AccountDeltas) GetAssetHolding(addr basics.Address, aidx basics.AssetIndex) (AssetHoldingDelta, bool) {
if idx, ok := ad.assetResourcesCache[AccountAsset{addr, aidx}]; ok {
- result := ad.assetResources[idx].Holding
+ result := ad.AssetResources[idx].Holding
return result, result.Deleted || result.Holding != nil
}
return AssetHoldingDelta{}, false
@@ -250,32 +262,32 @@ func (ad AccountDeltas) GetAssetHolding(addr basics.Address, aidx basics.AssetIn
// ModifiedAccounts returns list of addresses of modified accounts
func (ad AccountDeltas) ModifiedAccounts() []basics.Address {
- result := make([]basics.Address, len(ad.accts))
- for i := 0; i < len(ad.accts); i++ {
- result[i] = ad.accts[i].Addr
+ result := make([]basics.Address, len(ad.Accts))
+ for i := 0; i < len(ad.Accts); i++ {
+ result[i] = ad.Accts[i].Addr
}
// consistency check: ensure all addresses for deleted params/holdings/states are also in base accounts
// it is nice to check created params/holdings/states but we lack of such info here
for aapp, idx := range ad.appResourcesCache {
- if ad.appResources[idx].Params.Deleted {
+ if ad.AppResources[idx].Params.Deleted {
if _, ok := ad.acctsCache[aapp.Address]; !ok {
panic(fmt.Sprintf("account app param delta: addr %s not in base account", aapp.Address))
}
}
- if ad.appResources[idx].State.Deleted {
+ if ad.AppResources[idx].State.Deleted {
if _, ok := ad.acctsCache[aapp.Address]; !ok {
panic(fmt.Sprintf("account app state delta: addr %s not in base account", aapp.Address))
}
}
}
for aapp, idx := range ad.assetResourcesCache {
- if ad.assetResources[idx].Params.Deleted {
+ if ad.AssetResources[idx].Params.Deleted {
if _, ok := ad.acctsCache[aapp.Address]; !ok {
panic(fmt.Sprintf("account asset param delta: addr %s not in base account", aapp.Address))
}
}
- if ad.assetResources[idx].Holding.Deleted {
+ if ad.AssetResources[idx].Holding.Deleted {
if _, ok := ad.acctsCache[aapp.Address]; !ok {
panic(fmt.Sprintf("account asset holding delta: addr %s not in base account", aapp.Address))
}
@@ -287,20 +299,20 @@ func (ad AccountDeltas) ModifiedAccounts() []basics.Address {
// MergeAccounts applies other accounts into this StateDelta accounts
func (ad *AccountDeltas) MergeAccounts(other AccountDeltas) {
- for new := range other.accts {
- addr := other.accts[new].Addr
- acct := other.accts[new].AccountData
+ for new := range other.Accts {
+ addr := other.Accts[new].Addr
+ acct := other.Accts[new].AccountData
ad.Upsert(addr, acct)
}
for aapp, idx := range other.appResourcesCache {
- params := other.appResources[idx].Params
- state := other.appResources[idx].State
+ params := other.AppResources[idx].Params
+ state := other.AppResources[idx].State
ad.UpsertAppResource(aapp.Address, aapp.App, params, state)
}
for aapp, idx := range other.assetResourcesCache {
- params := other.assetResources[idx].Params
- holding := other.assetResources[idx].Holding
+ params := other.AssetResources[idx].Params
+ holding := other.AssetResources[idx].Holding
ad.UpsertAssetResource(aapp.Address, aapp.Asset, params, holding)
}
}
@@ -312,16 +324,16 @@ func (ad AccountDeltas) GetResource(addr basics.Address, aidx basics.CreatableIn
aa := AccountAsset{addr, basics.AssetIndex(aidx)}
idx, ok := ad.assetResourcesCache[aa]
if ok {
- ret.AssetParams = ad.assetResources[idx].Params.Params
- ret.AssetHolding = ad.assetResources[idx].Holding.Holding
+ ret.AssetParams = ad.AssetResources[idx].Params.Params
+ ret.AssetHolding = ad.AssetResources[idx].Holding.Holding
}
return ret, ok
case basics.AppCreatable:
aa := AccountApp{addr, basics.AppIndex(aidx)}
idx, ok := ad.appResourcesCache[aa]
if ok {
- ret.AppParams = ad.appResources[idx].Params.Params
- ret.AppLocalState = ad.appResources[idx].State.LocalState
+ ret.AppParams = ad.AppResources[idx].Params.Params
+ ret.AppLocalState = ad.AppResources[idx].State.LocalState
}
return ret, ok
}
@@ -330,24 +342,24 @@ func (ad AccountDeltas) GetResource(addr basics.Address, aidx basics.CreatableIn
// Len returns number of stored accounts
func (ad *AccountDeltas) Len() int {
- return len(ad.accts)
+ return len(ad.Accts)
}
// GetByIdx returns address and AccountData
// It does NOT check boundaries.
func (ad *AccountDeltas) GetByIdx(i int) (basics.Address, AccountData) {
- return ad.accts[i].Addr, ad.accts[i].AccountData
+ return ad.Accts[i].Addr, ad.Accts[i].AccountData
}
// Upsert adds ledgercore.AccountData into deltas
func (ad *AccountDeltas) Upsert(addr basics.Address, data AccountData) {
if idx, exist := ad.acctsCache[addr]; exist { // nil map lookup is OK
- ad.accts[idx] = NewBalanceRecord{Addr: addr, AccountData: data}
+ ad.Accts[idx] = BalanceRecord{Addr: addr, AccountData: data}
return
}
- last := len(ad.accts)
- ad.accts = append(ad.accts, NewBalanceRecord{Addr: addr, AccountData: data})
+ last := len(ad.Accts)
+ ad.Accts = append(ad.Accts, BalanceRecord{Addr: addr, AccountData: data})
if ad.acctsCache == nil {
ad.acctsCache = make(map[basics.Address]int)
@@ -360,12 +372,12 @@ func (ad *AccountDeltas) UpsertAppResource(addr basics.Address, aidx basics.AppI
key := AccountApp{addr, aidx}
value := AppResourceRecord{aidx, addr, params, state}
if idx, exist := ad.appResourcesCache[key]; exist {
- ad.appResources[idx] = value
+ ad.AppResources[idx] = value
return
}
- last := len(ad.appResources)
- ad.appResources = append(ad.appResources, value)
+ last := len(ad.AppResources)
+ ad.AppResources = append(ad.AppResources, value)
if ad.appResourcesCache == nil {
ad.appResourcesCache = make(map[AccountApp]int)
@@ -378,12 +390,12 @@ func (ad *AccountDeltas) UpsertAssetResource(addr basics.Address, aidx basics.As
key := AccountAsset{addr, aidx}
value := AssetResourceRecord{aidx, addr, params, holding}
if idx, exist := ad.assetResourcesCache[key]; exist {
- ad.assetResources[idx] = value
+ ad.AssetResources[idx] = value
return
}
- last := len(ad.assetResources)
- ad.assetResources = append(ad.assetResources, value)
+ last := len(ad.AssetResources)
+ ad.AssetResources = append(ad.AssetResources, value)
if ad.assetResourcesCache == nil {
ad.assetResourcesCache = make(map[AccountAsset]int)
@@ -395,11 +407,11 @@ func (ad *AccountDeltas) UpsertAssetResource(addr basics.Address, aidx basics.As
// For each data structure, reallocate if it would save us at least 50MB aggregate
// If provided maxBalLookback or maxTxnLife are zero, dependent optimizations will not occur.
func (sd *StateDelta) OptimizeAllocatedMemory(maxBalLookback uint64) {
- // accts takes up 232 bytes per entry, and is saved for 320 rounds
- if uint64(cap(sd.Accts.accts)-len(sd.Accts.accts))*accountArrayEntrySize*maxBalLookback > stateDeltaTargetOptimizationThreshold {
- accts := make([]NewBalanceRecord, len(sd.Accts.accts))
- copy(accts, sd.Accts.accts)
- sd.Accts.accts = accts
+ // Accts takes up 232 bytes per entry, and is saved for 320 rounds
+ if uint64(cap(sd.Accts.Accts)-len(sd.Accts.Accts))*accountArrayEntrySize*maxBalLookback > stateDeltaTargetOptimizationThreshold {
+ accts := make([]BalanceRecord, len(sd.Accts.Accts))
+ copy(accts, sd.Accts.Accts)
+ sd.Accts.Accts = accts
}
// acctsCache takes up 64 bytes per entry, and is saved for 320 rounds
@@ -423,14 +435,14 @@ func (ad AccountDeltas) GetBasicsAccountData(addr basics.Address) (basics.Accoun
}
result := basics.AccountData{}
- acct := ad.accts[idx].AccountData
+ acct := ad.Accts[idx].AccountData
AssignAccountData(&result, acct)
if len(ad.appResourcesCache) > 0 {
result.AppParams = make(map[basics.AppIndex]basics.AppParams)
result.AppLocalStates = make(map[basics.AppIndex]basics.AppLocalState)
for aapp, idx := range ad.appResourcesCache {
- rec := ad.appResources[idx]
+ rec := ad.AppResources[idx]
if aapp.Address == addr {
if !rec.Params.Deleted && rec.Params.Params != nil {
result.AppParams[aapp.App] = *rec.Params.Params
@@ -452,7 +464,7 @@ func (ad AccountDeltas) GetBasicsAccountData(addr basics.Address) (basics.Accoun
result.AssetParams = make(map[basics.AssetIndex]basics.AssetParams)
result.Assets = make(map[basics.AssetIndex]basics.AssetHolding)
for aapp, idx := range ad.assetResourcesCache {
- rec := ad.assetResources[idx]
+ rec := ad.AssetResources[idx]
if aapp.Address == addr {
if !rec.Params.Deleted && rec.Params.Params != nil {
result.AssetParams[aapp.Asset] = *rec.Params.Params
@@ -475,9 +487,9 @@ func (ad AccountDeltas) GetBasicsAccountData(addr basics.Address) (basics.Accoun
// ToModifiedCreatables is only used in tests, to create a map of ModifiedCreatable.
func (ad AccountDeltas) ToModifiedCreatables(seen map[basics.CreatableIndex]struct{}) map[basics.CreatableIndex]ModifiedCreatable {
- result := make(map[basics.CreatableIndex]ModifiedCreatable, len(ad.appResources)+len(ad.assetResources))
+ result := make(map[basics.CreatableIndex]ModifiedCreatable, len(ad.AppResources)+len(ad.AssetResources))
for aapp, idx := range ad.appResourcesCache {
- rec := ad.appResources[idx]
+ rec := ad.AppResources[idx]
if rec.Params.Deleted {
result[basics.CreatableIndex(rec.Aidx)] = ModifiedCreatable{
Ctype: basics.AppCreatable,
@@ -496,7 +508,7 @@ func (ad AccountDeltas) ToModifiedCreatables(seen map[basics.CreatableIndex]stru
}
for aapp, idx := range ad.assetResourcesCache {
- rec := ad.assetResources[idx]
+ rec := ad.AssetResources[idx]
if rec.Params.Deleted {
result[basics.CreatableIndex(rec.Aidx)] = ModifiedCreatable{
Ctype: basics.AssetCreatable,
@@ -540,7 +552,7 @@ func AccumulateDeltas(base map[basics.Address]basics.AccountData, deltas Account
if ad.AppLocalStates == nil {
ad.AppLocalStates = make(map[basics.AppIndex]basics.AppLocalState, acct.TotalAppLocalStates)
}
- rec := deltas.appResources[idx]
+ rec := deltas.AppResources[idx]
if rec.Params.Deleted {
delete(ad.AppParams, aapp.App)
} else if rec.Params.Params != nil {
@@ -566,7 +578,7 @@ func AccumulateDeltas(base map[basics.Address]basics.AccountData, deltas Account
if ad.Assets == nil {
ad.Assets = make(map[basics.AssetIndex]basics.AssetHolding, acct.TotalAssets)
}
- rec := deltas.assetResources[idx]
+ rec := deltas.AssetResources[idx]
if rec.Params.Deleted {
delete(ad.AssetParams, aapp.Asset)
} else if rec.Params.Params != nil {
@@ -617,7 +629,7 @@ func (ad AccountDeltas) ApplyToBasicsAccountData(addr basics.Address, prev basic
}
for aapp, idx := range ad.appResourcesCache {
if aapp.Address == addr {
- rec := ad.appResources[idx]
+ rec := ad.AppResources[idx]
if rec.Params.Deleted {
delete(result.AppParams, aapp.App)
} else if rec.Params.Params != nil {
@@ -637,7 +649,7 @@ func (ad AccountDeltas) ApplyToBasicsAccountData(addr basics.Address, prev basic
}
for aapp, idx := range ad.appResourcesCache {
if aapp.Address == addr {
- rec := ad.appResources[idx]
+ rec := ad.AppResources[idx]
if rec.State.Deleted {
delete(result.AppLocalStates, aapp.App)
} else if rec.State.LocalState != nil {
@@ -657,7 +669,7 @@ func (ad AccountDeltas) ApplyToBasicsAccountData(addr basics.Address, prev basic
}
for aapp, idx := range ad.assetResourcesCache {
if aapp.Address == addr {
- rec := ad.assetResources[idx]
+ rec := ad.AssetResources[idx]
if rec.Params.Deleted {
delete(result.AssetParams, aapp.Asset)
} else if rec.Params.Params != nil {
@@ -677,7 +689,7 @@ func (ad AccountDeltas) ApplyToBasicsAccountData(addr basics.Address, prev basic
}
for aapp, idx := range ad.assetResourcesCache {
if aapp.Address == addr {
- rec := ad.assetResources[idx]
+ rec := ad.AssetResources[idx]
if rec.Holding.Deleted {
delete(result.Assets, aapp.Asset)
} else if rec.Holding.Holding != nil {
@@ -695,10 +707,10 @@ func (ad AccountDeltas) ApplyToBasicsAccountData(addr basics.Address, prev basic
// GetAllAppResources returns all AppResourceRecords
func (ad *AccountDeltas) GetAllAppResources() []AppResourceRecord {
- return ad.appResources
+ return ad.AppResources
}
// GetAllAssetResources returns all AssetResourceRecords
func (ad *AccountDeltas) GetAllAssetResources() []AssetResourceRecord {
- return ad.assetResources
+ return ad.AssetResources
}
diff --git a/ledger/lrukv.go b/ledger/lrukv.go
new file mode 100644
index 000000000..45f4f5027
--- /dev/null
+++ b/ledger/lrukv.go
@@ -0,0 +1,132 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "github.com/algorand/go-algorand/logging"
+)
+
+//msgp:ignore cachedKVData
+type cachedKVData struct {
+ persistedKVData
+
+ // kv key
+ key string
+}
+
+// lruKV provides a storage class for the most recently used kv data.
+// It doesn't have any synchronization primitive on it's own and require to be
+// syncronized by the caller.
+type lruKV struct {
+ // kvList contain the list of persistedKVData, where the front ones are the most "fresh"
+ // and the ones on the back are the oldest.
+ kvList *persistedKVDataList
+
+ // kvs provides fast access to the various elements in the list by using the key
+ kvs map[string]*persistedKVDataListNode
+
+ // pendingKVs are used as a way to avoid taking a write-lock. When the caller needs to "materialize" these,
+ // it would call flushPendingWrites and these would be merged into the kvs/kvList
+ pendingKVs chan cachedKVData
+
+ // log interface; used for logging the threshold event.
+ log logging.Logger
+
+ // pendingWritesWarnThreshold is the threshold beyond we would write a warning for exceeding the number of pendingKVs entries
+ pendingWritesWarnThreshold int
+}
+
+// init initializes the lruKV for use.
+// thread locking semantics : write lock
+func (m *lruKV) init(log logging.Logger, pendingWrites int, pendingWritesWarnThreshold int) {
+ m.kvList = newPersistedKVList().allocateFreeNodes(pendingWrites)
+ m.kvs = make(map[string]*persistedKVDataListNode, pendingWrites)
+ m.pendingKVs = make(chan cachedKVData, pendingWrites)
+ m.log = log
+ m.pendingWritesWarnThreshold = pendingWritesWarnThreshold
+}
+
+// read the persistedKVData object that the lruKV has for the given key.
+// thread locking semantics : read lock
+func (m *lruKV) read(key string) (data persistedKVData, has bool) {
+ if el := m.kvs[key]; el != nil {
+ return el.Value.persistedKVData, true
+ }
+ return persistedKVData{}, false
+}
+
+// flushPendingWrites flushes the pending writes to the main lruKV cache.
+// thread locking semantics : write lock
+func (m *lruKV) flushPendingWrites() {
+ pendingEntriesCount := len(m.pendingKVs)
+ if pendingEntriesCount >= m.pendingWritesWarnThreshold {
+ m.log.Warnf("lruKV: number of entries in pendingKVs(%d) exceed the warning threshold of %d", pendingEntriesCount, m.pendingWritesWarnThreshold)
+ }
+ for ; pendingEntriesCount > 0; pendingEntriesCount-- {
+ select {
+ case pendingKVData := <-m.pendingKVs:
+ m.write(pendingKVData.persistedKVData, pendingKVData.key)
+ default:
+ return
+ }
+ }
+}
+
+// writePending write a single persistedKVData entry to the pendingKVs buffer.
+// the function doesn't block, and in case of a buffer overflow the entry would not be added.
+// thread locking semantics : no lock is required.
+func (m *lruKV) writePending(kv persistedKVData, key string) {
+ select {
+ case m.pendingKVs <- cachedKVData{persistedKVData: kv, key: key}:
+ default:
+ }
+}
+
+// write a single persistedKVData to the lruKV cache.
+// when writing the entry, the round number would be used to determine if it's a newer
+// version of what's already on the cache or not. In all cases, the entry is going
+// to be promoted to the front of the list.
+// thread locking semantics : write lock
+func (m *lruKV) write(kvData persistedKVData, key string) {
+ if el := m.kvs[key]; el != nil {
+ // already exists; is it a newer ?
+ if el.Value.before(&kvData) {
+ // we update with a newer version.
+ el.Value = &cachedKVData{persistedKVData: kvData, key: key}
+ }
+ m.kvList.moveToFront(el)
+ } else {
+ // new entry.
+ m.kvs[key] = m.kvList.pushFront(&cachedKVData{persistedKVData: kvData, key: key})
+ }
+}
+
+// prune adjust the current size of the lruKV cache, by dropping the least
+// recently used entries.
+// thread locking semantics : write lock
+func (m *lruKV) prune(newSize int) (removed int) {
+ for {
+ if len(m.kvs) <= newSize {
+ break
+ }
+ back := m.kvList.back()
+ delete(m.kvs, back.Value.key)
+ m.kvList.remove(back)
+ removed++
+ }
+ return
+}
diff --git a/ledger/lrukv_test.go b/ledger/lrukv_test.go
new file mode 100644
index 000000000..d26616731
--- /dev/null
+++ b/ledger/lrukv_test.go
@@ -0,0 +1,240 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestLRUBasicKV(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseKV lruKV
+ baseKV.init(logging.TestingLog(t), 10, 5)
+
+ kvNum := 50
+ // write 50 KVs
+ for i := 0; i < kvNum; i++ {
+ kvValue := fmt.Sprintf("kv %d value", i)
+ kv := persistedKVData{
+ value: []byte(kvValue),
+ round: basics.Round(i),
+ }
+ baseKV.write(kv, fmt.Sprintf("key%d", i))
+ }
+
+ // verify that all these KVs are truly there.
+ for i := 0; i < kvNum; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), kv.round)
+ require.Equal(t, fmt.Sprintf("kv %d value", i), string(kv.value))
+ }
+
+ // verify expected missing entries
+ for i := kvNum; i < kvNum*2; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+ require.False(t, has)
+ require.Equal(t, persistedKVData{}, kv)
+ }
+
+ baseKV.prune(kvNum / 2)
+
+ // verify expected (missing/existing) entries
+ for i := 0; i < kvNum*2; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+
+ if i >= kvNum/2 && i < kvNum {
+ // expected to have it.
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), kv.round)
+ require.Equal(t, fmt.Sprintf("kv %d value", i), string(kv.value))
+ } else {
+ require.False(t, has)
+ require.Equal(t, persistedKVData{}, kv)
+ }
+ }
+}
+
+func TestLRUKVPendingWrites(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseKV lruKV
+ kvNum := 250
+ baseKV.init(logging.TestingLog(t), kvNum*2, kvNum)
+
+ for i := 0; i < kvNum; i++ {
+ go func(i int) {
+ time.Sleep(time.Duration((crypto.RandUint64() % 50)) * time.Millisecond)
+ kvValue := fmt.Sprintf("kv %d value", i)
+ kv := persistedKVData{
+ value: []byte(kvValue),
+ round: basics.Round(i),
+ }
+ baseKV.writePending(kv, fmt.Sprintf("key%d", i))
+ }(i)
+ }
+ testStarted := time.Now()
+ for {
+ baseKV.flushPendingWrites()
+
+ // check if all kvs were loaded into "main" cache.
+ allKVsLoaded := true
+ for i := 0; i < kvNum; i++ {
+ _, has := baseKV.read(fmt.Sprintf("key%d", i))
+ if !has {
+ allKVsLoaded = false
+ break
+ }
+ }
+ if allKVsLoaded {
+ break
+ }
+ if time.Since(testStarted).Seconds() > 20 {
+ require.Fail(t, "failed after waiting for 20 second")
+ }
+ // not yet, keep looping.
+ }
+}
+
+type lruKVTestLogger struct {
+ logging.Logger
+ WarnfCallback func(string, ...interface{})
+ warnMsgCount int
+}
+
+func (cl *lruKVTestLogger) Warnf(s string, args ...interface{}) {
+ cl.warnMsgCount++
+}
+
+func TestLRUKVPendingWritesWarning(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseKV lruKV
+ pendingWritesBuffer := 50
+ pendingWritesThreshold := 40
+ log := &lruKVTestLogger{Logger: logging.TestingLog(t)}
+ baseKV.init(log, pendingWritesBuffer, pendingWritesThreshold)
+ for j := 0; j < 50; j++ {
+ for i := 0; i < j; i++ {
+ kvValue := fmt.Sprintf("kv %d value", i)
+ kv := persistedKVData{
+ value: []byte(kvValue),
+ round: basics.Round(i),
+ }
+ baseKV.writePending(kv, fmt.Sprintf("key%d", i))
+ }
+ baseKV.flushPendingWrites()
+ if j >= pendingWritesThreshold {
+ // expect a warning in the log
+ require.Equal(t, 1+j-pendingWritesThreshold, log.warnMsgCount)
+ }
+ }
+}
+
+func TestLRUKVOmittedPendingWrites(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var baseKV lruKV
+ pendingWritesBuffer := 50
+ pendingWritesThreshold := 40
+ log := &lruKVTestLogger{Logger: logging.TestingLog(t)}
+ baseKV.init(log, pendingWritesBuffer, pendingWritesThreshold)
+
+ for i := 0; i < pendingWritesBuffer*2; i++ {
+ kvValue := fmt.Sprintf("kv %d value", i)
+ kv := persistedKVData{
+ value: []byte(kvValue),
+ round: basics.Round(i),
+ }
+ baseKV.writePending(kv, fmt.Sprintf("key%d", i))
+ }
+
+ baseKV.flushPendingWrites()
+
+ // verify that all these kvs are truly there.
+ for i := 0; i < pendingWritesBuffer; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+ require.True(t, has)
+ require.Equal(t, basics.Round(i), kv.round)
+ require.Equal(t, fmt.Sprintf("kv %d value", i), string(kv.value))
+ }
+
+ // verify expected missing entries
+ for i := pendingWritesBuffer; i < pendingWritesBuffer*2; i++ {
+ kv, has := baseKV.read(fmt.Sprintf("key%d", i))
+ require.False(t, has)
+ require.Equal(t, persistedKVData{}, kv)
+ }
+}
+
+func BenchmarkLRUKVWrite(b *testing.B) {
+ numTestKV := 5000
+ // there are 2500 kvs that overlap
+ fillerKVs := generatePersistedKVData(0, 97500)
+ kvs := generatePersistedKVData(97500-numTestKV/2, 97500+numTestKV/2)
+
+ benchLruWriteKVs(b, fillerKVs, kvs)
+}
+
+func benchLruWriteKVs(b *testing.B, fillerKVs []cachedKVData, kvs []cachedKVData) {
+ b.ResetTimer()
+ b.StopTimer()
+ var baseKV lruKV
+ // setting up the baseKV with a predefined cache size
+ baseKV.init(logging.TestingLog(b), baseKVPendingBufferSize, baseKVPendingWarnThreshold)
+ for i := 0; i < b.N; i++ {
+ baseKV = fillLRUKV(baseKV, fillerKVs)
+
+ b.StartTimer()
+ fillLRUKV(baseKV, kvs)
+ b.StopTimer()
+ baseKV.prune(0)
+ }
+}
+
+func fillLRUKV(baseKV lruKV, fillerKVs []cachedKVData) lruKV {
+ for _, entry := range fillerKVs {
+ baseKV.write(entry.persistedKVData, entry.key)
+ }
+ return baseKV
+}
+
+func generatePersistedKVData(startRound, endRound int) []cachedKVData {
+ kvs := make([]cachedKVData, endRound-startRound)
+ for i := startRound; i < endRound; i++ {
+ kvValue := fmt.Sprintf("kv %d value", i)
+
+ kvs[i-startRound] = cachedKVData{
+ persistedKVData: persistedKVData{
+ value: []byte(kvValue),
+ round: basics.Round(i + startRound),
+ },
+ key: fmt.Sprintf("key%d", i),
+ }
+ }
+ return kvs
+}
diff --git a/ledger/msgp_gen.go b/ledger/msgp_gen.go
index d76a3a0db..3e0a185ab 100644
--- a/ledger/msgp_gen.go
+++ b/ledger/msgp_gen.go
@@ -61,13 +61,13 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
-// catchpointFileBalancesChunkV6
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
+// catchpointFileChunkV6
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
//
// catchpointFirstStageInfo
// |-----> (*) MarshalMsg
@@ -101,6 +101,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// encodedKVRecordV6
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// resourceFlags
// |-----> MarshalMsg
// |-----> CanMarshalMsg
@@ -451,8 +459,8 @@ func (z *CatchpointFileHeader) MsgIsZero() bool {
func (z *baseAccountData) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0001Len := uint32(19)
- var zb0001Mask uint32 /* 21 bits */
+ zb0001Len := uint32(21)
+ var zb0001Mask uint32 /* 23 bits */
if (*z).baseVotingData.VoteID.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x1
@@ -525,10 +533,18 @@ func (z *baseAccountData) MarshalMsg(b []byte) (o []byte) {
zb0001Len--
zb0001Mask |= 0x80000
}
- if (*z).UpdateRound == 0 {
+ if (*z).TotalBoxes == 0 {
zb0001Len--
zb0001Mask |= 0x100000
}
+ if (*z).TotalBoxBytes == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x200000
+ }
+ if (*z).UpdateRound == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x400000
+ }
// variable map header, size zb0001Len
o = msgp.AppendMapHeader(o, zb0001Len)
if zb0001Len != 0 {
@@ -623,6 +639,16 @@ func (z *baseAccountData) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendUint64(o, (*z).TotalAppLocalStates)
}
if (zb0001Mask & 0x100000) == 0 { // if not empty
+ // string "m"
+ o = append(o, 0xa1, 0x6d)
+ o = msgp.AppendUint64(o, (*z).TotalBoxes)
+ }
+ if (zb0001Mask & 0x200000) == 0 { // if not empty
+ // string "n"
+ o = append(o, 0xa1, 0x6e)
+ o = msgp.AppendUint64(o, (*z).TotalBoxBytes)
+ }
+ if (zb0001Mask & 0x400000) == 0 { // if not empty
// string "z"
o = append(o, 0xa1, 0x7a)
o = msgp.AppendUint64(o, (*z).UpdateRound)
@@ -747,6 +773,22 @@ func (z *baseAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
+ (*z).TotalBoxes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalBoxes")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).TotalBoxBytes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "TotalBoxBytes")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
bts, err = (*z).baseVotingData.VoteID.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "VoteID")
@@ -896,6 +938,18 @@ func (z *baseAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "TotalAppLocalStates")
return
}
+ case "m":
+ (*z).TotalBoxes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalBoxes")
+ return
+ }
+ case "n":
+ (*z).TotalBoxBytes, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TotalBoxBytes")
+ return
+ }
case "A":
bts, err = (*z).baseVotingData.VoteID.UnmarshalMsg(bts)
if err != nil {
@@ -958,13 +1012,13 @@ func (_ *baseAccountData) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *baseAccountData) Msgsize() (s int) {
- s = 3 + 2 + (*z).Status.Msgsize() + 2 + (*z).MicroAlgos.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).RewardedMicroAlgos.Msgsize() + 2 + (*z).AuthAddr.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint32Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + (*z).baseVotingData.VoteID.Msgsize() + 2 + (*z).baseVotingData.SelectionID.Msgsize() + 2 + (*z).baseVotingData.VoteFirstValid.Msgsize() + 2 + (*z).baseVotingData.VoteLastValid.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).baseVotingData.StateProofID.Msgsize() + 2 + msgp.Uint64Size
+ s = 3 + 2 + (*z).Status.Msgsize() + 2 + (*z).MicroAlgos.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).RewardedMicroAlgos.Msgsize() + 2 + (*z).AuthAddr.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint32Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + (*z).baseVotingData.VoteID.Msgsize() + 2 + (*z).baseVotingData.SelectionID.Msgsize() + 2 + (*z).baseVotingData.VoteFirstValid.Msgsize() + 2 + (*z).baseVotingData.VoteLastValid.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).baseVotingData.StateProofID.Msgsize() + 2 + msgp.Uint64Size
return
}
// MsgIsZero returns whether this is a zero value
func (z *baseAccountData) MsgIsZero() bool {
- return ((*z).Status.MsgIsZero()) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) && ((*z).RewardedMicroAlgos.MsgIsZero()) && ((*z).AuthAddr.MsgIsZero()) && ((*z).TotalAppSchemaNumUint == 0) && ((*z).TotalAppSchemaNumByteSlice == 0) && ((*z).TotalExtraAppPages == 0) && ((*z).TotalAssetParams == 0) && ((*z).TotalAssets == 0) && ((*z).TotalAppParams == 0) && ((*z).TotalAppLocalStates == 0) && ((*z).baseVotingData.VoteID.MsgIsZero()) && ((*z).baseVotingData.SelectionID.MsgIsZero()) && ((*z).baseVotingData.VoteFirstValid.MsgIsZero()) && ((*z).baseVotingData.VoteLastValid.MsgIsZero()) && ((*z).baseVotingData.VoteKeyDilution == 0) && ((*z).baseVotingData.StateProofID.MsgIsZero()) && ((*z).UpdateRound == 0)
+ return ((*z).Status.MsgIsZero()) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) && ((*z).RewardedMicroAlgos.MsgIsZero()) && ((*z).AuthAddr.MsgIsZero()) && ((*z).TotalAppSchemaNumUint == 0) && ((*z).TotalAppSchemaNumByteSlice == 0) && ((*z).TotalExtraAppPages == 0) && ((*z).TotalAssetParams == 0) && ((*z).TotalAssets == 0) && ((*z).TotalAppParams == 0) && ((*z).TotalAppLocalStates == 0) && ((*z).TotalBoxes == 0) && ((*z).TotalBoxBytes == 0) && ((*z).baseVotingData.VoteID.MsgIsZero()) && ((*z).baseVotingData.SelectionID.MsgIsZero()) && ((*z).baseVotingData.VoteFirstValid.MsgIsZero()) && ((*z).baseVotingData.VoteLastValid.MsgIsZero()) && ((*z).baseVotingData.VoteKeyDilution == 0) && ((*z).baseVotingData.StateProofID.MsgIsZero()) && ((*z).UpdateRound == 0)
}
// MarshalMsg implements msgp.Marshaler
@@ -1764,19 +1818,23 @@ func (z *catchpointFileBalancesChunkV5) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
-func (z *catchpointFileBalancesChunkV6) MarshalMsg(b []byte) (o []byte) {
+func (z *catchpointFileChunkV6) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0002Len := uint32(1)
- var zb0002Mask uint8 /* 3 bits */
+ zb0003Len := uint32(2)
+ var zb0003Mask uint8 /* 4 bits */
if len((*z).Balances) == 0 {
- zb0002Len--
- zb0002Mask |= 0x2
+ zb0003Len--
+ zb0003Mask |= 0x2
}
- // variable map header, size zb0002Len
- o = append(o, 0x80|uint8(zb0002Len))
- if zb0002Len != 0 {
- if (zb0002Mask & 0x2) == 0 { // if not empty
+ if len((*z).KVs) == 0 {
+ zb0003Len--
+ zb0003Mask |= 0x4
+ }
+ // variable map header, size zb0003Len
+ o = append(o, 0x80|uint8(zb0003Len))
+ if zb0003Len != 0 {
+ if (zb0003Mask & 0x2) == 0 { // if not empty
// string "bl"
o = append(o, 0xa2, 0x62, 0x6c)
if (*z).Balances == nil {
@@ -1788,48 +1846,82 @@ func (z *catchpointFileBalancesChunkV6) MarshalMsg(b []byte) (o []byte) {
o = (*z).Balances[zb0001].MarshalMsg(o)
}
}
+ if (zb0003Mask & 0x4) == 0 { // if not empty
+ // string "kv"
+ o = append(o, 0xa2, 0x6b, 0x76)
+ if (*z).KVs == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).KVs)))
+ }
+ for zb0002 := range (*z).KVs {
+ // omitempty: check for empty values
+ zb0004Len := uint32(2)
+ var zb0004Mask uint8 /* 3 bits */
+ if len((*z).KVs[zb0002].Key) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x2
+ }
+ if len((*z).KVs[zb0002].Value) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x4
+ }
+ // variable map header, size zb0004Len
+ o = append(o, 0x80|uint8(zb0004Len))
+ if (zb0004Mask & 0x2) == 0 { // if not empty
+ // string "k"
+ o = append(o, 0xa1, 0x6b)
+ o = msgp.AppendBytes(o, (*z).KVs[zb0002].Key)
+ }
+ if (zb0004Mask & 0x4) == 0 { // if not empty
+ // string "v"
+ o = append(o, 0xa1, 0x76)
+ o = msgp.AppendBytes(o, (*z).KVs[zb0002].Value)
+ }
+ }
+ }
}
return
}
-func (_ *catchpointFileBalancesChunkV6) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*catchpointFileBalancesChunkV6)
+func (_ *catchpointFileChunkV6) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*catchpointFileChunkV6)
return ok
}
// UnmarshalMsg implements msgp.Unmarshaler
-func (z *catchpointFileBalancesChunkV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
+func (z *catchpointFileChunkV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0002 int
- var zb0003 bool
- zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0002 > 0 {
- zb0002--
- var zb0004 int
- var zb0005 bool
- zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Balances")
return
}
- if zb0004 > BalancesPerCatchpointFileChunk {
- err = msgp.ErrOverflow(uint64(zb0004), uint64(BalancesPerCatchpointFileChunk))
+ if zb0005 > BalancesPerCatchpointFileChunk {
+ err = msgp.ErrOverflow(uint64(zb0005), uint64(BalancesPerCatchpointFileChunk))
err = msgp.WrapError(err, "struct-from-array", "Balances")
return
}
- if zb0005 {
+ if zb0006 {
(*z).Balances = nil
- } else if (*z).Balances != nil && cap((*z).Balances) >= zb0004 {
- (*z).Balances = ((*z).Balances)[:zb0004]
+ } else if (*z).Balances != nil && cap((*z).Balances) >= zb0005 {
+ (*z).Balances = ((*z).Balances)[:zb0005]
} else {
- (*z).Balances = make([]encodedBalanceRecordV6, zb0004)
+ (*z).Balances = make([]encodedBalanceRecordV6, zb0005)
}
for zb0001 := range (*z).Balances {
bts, err = (*z).Balances[zb0001].UnmarshalMsg(bts)
@@ -1839,8 +1931,141 @@ func (z *catchpointFileBalancesChunkV6) UnmarshalMsg(bts []byte) (o []byte, err
}
}
}
- if zb0002 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0002)
+ if zb0003 > 0 {
+ zb0003--
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs")
+ return
+ }
+ if zb0007 > BalancesPerCatchpointFileChunk {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(BalancesPerCatchpointFileChunk))
+ err = msgp.WrapError(err, "struct-from-array", "KVs")
+ return
+ }
+ if zb0008 {
+ (*z).KVs = nil
+ } else if (*z).KVs != nil && cap((*z).KVs) >= zb0007 {
+ (*z).KVs = ((*z).KVs)[:zb0007]
+ } else {
+ (*z).KVs = make([]encodedKVRecordV6, zb0007)
+ }
+ for zb0002 := range (*z).KVs {
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002)
+ return
+ }
+ if zb0009 > 0 {
+ zb0009--
+ var zb0011 int
+ zb0011, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array", "Key")
+ return
+ }
+ if zb0011 > encodedKVRecordV6MaxKeyLength {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(encodedKVRecordV6MaxKeyLength))
+ return
+ }
+ (*z).KVs[zb0002].Key, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Key)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array", "Key")
+ return
+ }
+ }
+ if zb0009 > 0 {
+ zb0009--
+ var zb0012 int
+ zb0012, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array", "Value")
+ return
+ }
+ if zb0012 > encodedKVRecordV6MaxValueLength {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(encodedKVRecordV6MaxValueLength))
+ return
+ }
+ (*z).KVs[zb0002].Value, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Value)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array", "Value")
+ return
+ }
+ }
+ if zb0009 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0009)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002)
+ return
+ }
+ if zb0010 {
+ (*z).KVs[zb0002] = encodedKVRecordV6{}
+ }
+ for zb0009 > 0 {
+ zb0009--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002)
+ return
+ }
+ switch string(field) {
+ case "k":
+ var zb0013 int
+ zb0013, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "Key")
+ return
+ }
+ if zb0013 > encodedKVRecordV6MaxKeyLength {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(encodedKVRecordV6MaxKeyLength))
+ return
+ }
+ (*z).KVs[zb0002].Key, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Key)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "Key")
+ return
+ }
+ case "v":
+ var zb0014 int
+ zb0014, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "Value")
+ return
+ }
+ if zb0014 > encodedKVRecordV6MaxValueLength {
+ err = msgp.ErrOverflow(uint64(zb0014), uint64(encodedKVRecordV6MaxValueLength))
+ return
+ }
+ (*z).KVs[zb0002].Value, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Value)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "Value")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002)
+ return
+ }
+ }
+ }
+ }
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -1851,11 +2076,11 @@ func (z *catchpointFileBalancesChunkV6) UnmarshalMsg(bts []byte) (o []byte, err
err = msgp.WrapError(err)
return
}
- if zb0003 {
- (*z) = catchpointFileBalancesChunkV6{}
+ if zb0004 {
+ (*z) = catchpointFileChunkV6{}
}
- for zb0002 > 0 {
- zb0002--
+ for zb0003 > 0 {
+ zb0003--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -1863,24 +2088,24 @@ func (z *catchpointFileBalancesChunkV6) UnmarshalMsg(bts []byte) (o []byte, err
}
switch string(field) {
case "bl":
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0015 int
+ var zb0016 bool
+ zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Balances")
return
}
- if zb0006 > BalancesPerCatchpointFileChunk {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(BalancesPerCatchpointFileChunk))
+ if zb0015 > BalancesPerCatchpointFileChunk {
+ err = msgp.ErrOverflow(uint64(zb0015), uint64(BalancesPerCatchpointFileChunk))
err = msgp.WrapError(err, "Balances")
return
}
- if zb0007 {
+ if zb0016 {
(*z).Balances = nil
- } else if (*z).Balances != nil && cap((*z).Balances) >= zb0006 {
- (*z).Balances = ((*z).Balances)[:zb0006]
+ } else if (*z).Balances != nil && cap((*z).Balances) >= zb0015 {
+ (*z).Balances = ((*z).Balances)[:zb0015]
} else {
- (*z).Balances = make([]encodedBalanceRecordV6, zb0006)
+ (*z).Balances = make([]encodedBalanceRecordV6, zb0015)
}
for zb0001 := range (*z).Balances {
bts, err = (*z).Balances[zb0001].UnmarshalMsg(bts)
@@ -1889,6 +2114,137 @@ func (z *catchpointFileBalancesChunkV6) UnmarshalMsg(bts []byte) (o []byte, err
return
}
}
+ case "kv":
+ var zb0017 int
+ var zb0018 bool
+ zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs")
+ return
+ }
+ if zb0017 > BalancesPerCatchpointFileChunk {
+ err = msgp.ErrOverflow(uint64(zb0017), uint64(BalancesPerCatchpointFileChunk))
+ err = msgp.WrapError(err, "KVs")
+ return
+ }
+ if zb0018 {
+ (*z).KVs = nil
+ } else if (*z).KVs != nil && cap((*z).KVs) >= zb0017 {
+ (*z).KVs = ((*z).KVs)[:zb0017]
+ } else {
+ (*z).KVs = make([]encodedKVRecordV6, zb0017)
+ }
+ for zb0002 := range (*z).KVs {
+ var zb0019 int
+ var zb0020 bool
+ zb0019, zb0020, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0019, zb0020, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002)
+ return
+ }
+ if zb0019 > 0 {
+ zb0019--
+ var zb0021 int
+ zb0021, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array", "Key")
+ return
+ }
+ if zb0021 > encodedKVRecordV6MaxKeyLength {
+ err = msgp.ErrOverflow(uint64(zb0021), uint64(encodedKVRecordV6MaxKeyLength))
+ return
+ }
+ (*z).KVs[zb0002].Key, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Key)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array", "Key")
+ return
+ }
+ }
+ if zb0019 > 0 {
+ zb0019--
+ var zb0022 int
+ zb0022, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array", "Value")
+ return
+ }
+ if zb0022 > encodedKVRecordV6MaxValueLength {
+ err = msgp.ErrOverflow(uint64(zb0022), uint64(encodedKVRecordV6MaxValueLength))
+ return
+ }
+ (*z).KVs[zb0002].Value, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Value)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array", "Value")
+ return
+ }
+ }
+ if zb0019 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0019)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002)
+ return
+ }
+ if zb0020 {
+ (*z).KVs[zb0002] = encodedKVRecordV6{}
+ }
+ for zb0019 > 0 {
+ zb0019--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002)
+ return
+ }
+ switch string(field) {
+ case "k":
+ var zb0023 int
+ zb0023, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "Key")
+ return
+ }
+ if zb0023 > encodedKVRecordV6MaxKeyLength {
+ err = msgp.ErrOverflow(uint64(zb0023), uint64(encodedKVRecordV6MaxKeyLength))
+ return
+ }
+ (*z).KVs[zb0002].Key, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Key)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "Key")
+ return
+ }
+ case "v":
+ var zb0024 int
+ zb0024, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "Value")
+ return
+ }
+ if zb0024 > encodedKVRecordV6MaxValueLength {
+ err = msgp.ErrOverflow(uint64(zb0024), uint64(encodedKVRecordV6MaxValueLength))
+ return
+ }
+ (*z).KVs[zb0002].Value, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Value)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002, "Value")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002)
+ return
+ }
+ }
+ }
+ }
+ }
default:
err = msgp.ErrNoField(string(field))
if err != nil {
@@ -1902,23 +2258,27 @@ func (z *catchpointFileBalancesChunkV6) UnmarshalMsg(bts []byte) (o []byte, err
return
}
-func (_ *catchpointFileBalancesChunkV6) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*catchpointFileBalancesChunkV6)
+func (_ *catchpointFileChunkV6) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*catchpointFileChunkV6)
return ok
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *catchpointFileBalancesChunkV6) Msgsize() (s int) {
+func (z *catchpointFileChunkV6) Msgsize() (s int) {
s = 1 + 3 + msgp.ArrayHeaderSize
for zb0001 := range (*z).Balances {
s += (*z).Balances[zb0001].Msgsize()
}
+ s += 3 + msgp.ArrayHeaderSize
+ for zb0002 := range (*z).KVs {
+ s += 1 + 2 + msgp.BytesPrefixSize + len((*z).KVs[zb0002].Key) + 2 + msgp.BytesPrefixSize + len((*z).KVs[zb0002].Value)
+ }
return
}
// MsgIsZero returns whether this is a zero value
-func (z *catchpointFileBalancesChunkV6) MsgIsZero() bool {
- return (len((*z).Balances) == 0)
+func (z *catchpointFileChunkV6) MsgIsZero() bool {
+ return (len((*z).Balances) == 0) && (len((*z).KVs) == 0)
}
// MarshalMsg implements msgp.Marshaler
@@ -2401,8 +2761,8 @@ func (z *encodedBalanceRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error)
err = msgp.WrapError(err, "struct-from-array", "Resources")
return
}
- if zb0005 > basics.MaxEncodedAccountDataSize {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(basics.MaxEncodedAccountDataSize))
+ if zb0005 > resourcesPerCatchpointFileChunkBackwardCompatible {
+ err = msgp.ErrOverflow(uint64(zb0005), uint64(resourcesPerCatchpointFileChunkBackwardCompatible))
err = msgp.WrapError(err, "struct-from-array", "Resources")
return
}
@@ -2479,8 +2839,8 @@ func (z *encodedBalanceRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error)
err = msgp.WrapError(err, "Resources")
return
}
- if zb0007 > basics.MaxEncodedAccountDataSize {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(basics.MaxEncodedAccountDataSize))
+ if zb0007 > resourcesPerCatchpointFileChunkBackwardCompatible {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(resourcesPerCatchpointFileChunkBackwardCompatible))
err = msgp.WrapError(err, "Resources")
return
}
@@ -2549,6 +2909,175 @@ func (z *encodedBalanceRecordV6) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *encodedKVRecordV6) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(2)
+ var zb0001Mask uint8 /* 3 bits */
+ if len((*z).Key) == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if len((*z).Value) == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "k"
+ o = append(o, 0xa1, 0x6b)
+ o = msgp.AppendBytes(o, (*z).Key)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "v"
+ o = append(o, 0xa1, 0x76)
+ o = msgp.AppendBytes(o, (*z).Value)
+ }
+ }
+ return
+}
+
+func (_ *encodedKVRecordV6) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*encodedKVRecordV6)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *encodedKVRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ var zb0003 int
+ zb0003, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Key")
+ return
+ }
+ if zb0003 > encodedKVRecordV6MaxKeyLength {
+ err = msgp.ErrOverflow(uint64(zb0003), uint64(encodedKVRecordV6MaxKeyLength))
+ return
+ }
+ (*z).Key, bts, err = msgp.ReadBytesBytes(bts, (*z).Key)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Key")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ var zb0004 int
+ zb0004, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Value")
+ return
+ }
+ if zb0004 > encodedKVRecordV6MaxValueLength {
+ err = msgp.ErrOverflow(uint64(zb0004), uint64(encodedKVRecordV6MaxValueLength))
+ return
+ }
+ (*z).Value, bts, err = msgp.ReadBytesBytes(bts, (*z).Value)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Value")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = encodedKVRecordV6{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "k":
+ var zb0005 int
+ zb0005, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Key")
+ return
+ }
+ if zb0005 > encodedKVRecordV6MaxKeyLength {
+ err = msgp.ErrOverflow(uint64(zb0005), uint64(encodedKVRecordV6MaxKeyLength))
+ return
+ }
+ (*z).Key, bts, err = msgp.ReadBytesBytes(bts, (*z).Key)
+ if err != nil {
+ err = msgp.WrapError(err, "Key")
+ return
+ }
+ case "v":
+ var zb0006 int
+ zb0006, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Value")
+ return
+ }
+ if zb0006 > encodedKVRecordV6MaxValueLength {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(encodedKVRecordV6MaxValueLength))
+ return
+ }
+ (*z).Value, bts, err = msgp.ReadBytesBytes(bts, (*z).Value)
+ if err != nil {
+ err = msgp.WrapError(err, "Value")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *encodedKVRecordV6) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*encodedKVRecordV6)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *encodedKVRecordV6) Msgsize() (s int) {
+ s = 1 + 2 + msgp.BytesPrefixSize + len((*z).Key) + 2 + msgp.BytesPrefixSize + len((*z).Value)
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *encodedKVRecordV6) MsgIsZero() bool {
+ return (len((*z).Key) == 0) && (len((*z).Value) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z resourceFlags) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendUint8(o, uint8(z))
diff --git a/ledger/msgp_gen_test.go b/ledger/msgp_gen_test.go
index 165ecec8d..248102398 100644
--- a/ledger/msgp_gen_test.go
+++ b/ledger/msgp_gen_test.go
@@ -314,9 +314,9 @@ func BenchmarkUnmarshalcatchpointFileBalancesChunkV5(b *testing.B) {
}
}
-func TestMarshalUnmarshalcatchpointFileBalancesChunkV6(t *testing.T) {
+func TestMarshalUnmarshalcatchpointFileChunkV6(t *testing.T) {
partitiontest.PartitionTest(t)
- v := catchpointFileBalancesChunkV6{}
+ v := catchpointFileChunkV6{}
bts := v.MarshalMsg(nil)
left, err := v.UnmarshalMsg(bts)
if err != nil {
@@ -335,12 +335,12 @@ func TestMarshalUnmarshalcatchpointFileBalancesChunkV6(t *testing.T) {
}
}
-func TestRandomizedEncodingcatchpointFileBalancesChunkV6(t *testing.T) {
- protocol.RunEncodingTest(t, &catchpointFileBalancesChunkV6{})
+func TestRandomizedEncodingcatchpointFileChunkV6(t *testing.T) {
+ protocol.RunEncodingTest(t, &catchpointFileChunkV6{})
}
-func BenchmarkMarshalMsgcatchpointFileBalancesChunkV6(b *testing.B) {
- v := catchpointFileBalancesChunkV6{}
+func BenchmarkMarshalMsgcatchpointFileChunkV6(b *testing.B) {
+ v := catchpointFileChunkV6{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -348,8 +348,8 @@ func BenchmarkMarshalMsgcatchpointFileBalancesChunkV6(b *testing.B) {
}
}
-func BenchmarkAppendMsgcatchpointFileBalancesChunkV6(b *testing.B) {
- v := catchpointFileBalancesChunkV6{}
+func BenchmarkAppendMsgcatchpointFileChunkV6(b *testing.B) {
+ v := catchpointFileChunkV6{}
bts := make([]byte, 0, v.Msgsize())
bts = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
@@ -360,8 +360,8 @@ func BenchmarkAppendMsgcatchpointFileBalancesChunkV6(b *testing.B) {
}
}
-func BenchmarkUnmarshalcatchpointFileBalancesChunkV6(b *testing.B) {
- v := catchpointFileBalancesChunkV6{}
+func BenchmarkUnmarshalcatchpointFileChunkV6(b *testing.B) {
+ v := catchpointFileChunkV6{}
bts := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
@@ -554,6 +554,66 @@ func BenchmarkUnmarshalencodedBalanceRecordV6(b *testing.B) {
}
}
+func TestMarshalUnmarshalencodedKVRecordV6(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := encodedKVRecordV6{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingencodedKVRecordV6(t *testing.T) {
+ protocol.RunEncodingTest(t, &encodedKVRecordV6{})
+}
+
+func BenchmarkMarshalMsgencodedKVRecordV6(b *testing.B) {
+ v := encodedKVRecordV6{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgencodedKVRecordV6(b *testing.B) {
+ v := encodedKVRecordV6{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalencodedKVRecordV6(b *testing.B) {
+ v := encodedKVRecordV6{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalresourcesData(t *testing.T) {
partitiontest.PartitionTest(t)
v := resourcesData{}
diff --git a/ledger/persistedkvs.go b/ledger/persistedkvs.go
new file mode 100644
index 000000000..34f3c36ec
--- /dev/null
+++ b/ledger/persistedkvs.go
@@ -0,0 +1,143 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+// persistedKVDataList represents a doubly linked list.
+// must initiate with newPersistedKVList.
+type persistedKVDataList struct {
+ root persistedKVDataListNode // sentinel list element, only &root, root.prev, and root.next are used
+ freeList *persistedKVDataListNode // preallocated nodes location
+}
+
+type persistedKVDataListNode struct {
+ // Next and previous pointers in the doubly-linked list of elements.
+ // To simplify the implementation, internally a list l is implemented
+ // as a ring, such that &l.root is both the next element of the last
+ // list element (l.Back()) and the previous element of the first list
+ // element (l.Front()).
+ next, prev *persistedKVDataListNode
+
+ Value *cachedKVData
+}
+
+func newPersistedKVList() *persistedKVDataList {
+ l := new(persistedKVDataList)
+ l.root.next = &l.root
+ l.root.prev = &l.root
+ // used as a helper but does not store value
+ l.freeList = new(persistedKVDataListNode)
+
+ return l
+}
+
+func (l *persistedKVDataList) insertNodeToFreeList(otherNode *persistedKVDataListNode) {
+ otherNode.next = l.freeList.next
+ otherNode.prev = nil
+ otherNode.Value = nil
+
+ l.freeList.next = otherNode
+}
+
+func (l *persistedKVDataList) getNewNode() *persistedKVDataListNode {
+ if l.freeList.next == nil {
+ return new(persistedKVDataListNode)
+ }
+ newNode := l.freeList.next
+ l.freeList.next = newNode.next
+
+ return newNode
+}
+
+func (l *persistedKVDataList) allocateFreeNodes(numAllocs int) *persistedKVDataList {
+ if l.freeList == nil {
+ return l
+ }
+ for i := 0; i < numAllocs; i++ {
+ l.insertNodeToFreeList(new(persistedKVDataListNode))
+ }
+
+ return l
+}
+
+// Back returns the last element of list l or nil if the list is empty.
+func (l *persistedKVDataList) back() *persistedKVDataListNode {
+ isEmpty := func(list *persistedKVDataList) bool {
+ // assumes we are inserting correctly to the list - using pushFront.
+ return list.root.next == &list.root
+ }
+ if isEmpty(l) {
+ return nil
+ }
+ return l.root.prev
+}
+
+// remove removes e from l if e is an element of list l.
+// It returns the element value e.Value.
+// The element must not be nil.
+func (l *persistedKVDataList) remove(e *persistedKVDataListNode) {
+ e.prev.next = e.next
+ e.next.prev = e.prev
+ e.next = nil // avoid memory leaks
+ e.prev = nil // avoid memory leaks
+
+ l.insertNodeToFreeList(e)
+}
+
+// pushFront inserts a new element e with value v at the front of list l and returns e.
+func (l *persistedKVDataList) pushFront(v *cachedKVData) *persistedKVDataListNode {
+ newNode := l.getNewNode()
+ newNode.Value = v
+ return l.insertValue(newNode, &l.root)
+}
+
+// insertValue inserts e after at, increments l.len, and returns e.
+func (l *persistedKVDataList) insertValue(newNode *persistedKVDataListNode, at *persistedKVDataListNode) *persistedKVDataListNode {
+ n := at.next
+ at.next = newNode
+ newNode.prev = at
+ newNode.next = n
+ n.prev = newNode
+
+ return newNode
+}
+
+// moveToFront moves element e to the front of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *persistedKVDataList) moveToFront(e *persistedKVDataListNode) {
+ if l.root.next == e {
+ return
+ }
+ l.move(e, &l.root)
+}
+
+// move moves e to next to at and returns e.
+func (l *persistedKVDataList) move(e, at *persistedKVDataListNode) *persistedKVDataListNode {
+ if e == at {
+ return e
+ }
+ e.prev.next = e.next
+ e.next.prev = e.prev
+
+ n := at.next
+ at.next = e
+ e.prev = at
+ e.next = n
+ n.prev = e
+
+ return e
+}
diff --git a/ledger/persistedkvs_test.go b/ledger/persistedkvs_test.go
new file mode 100644
index 000000000..eb5ed9dff
--- /dev/null
+++ b/ledger/persistedkvs_test.go
@@ -0,0 +1,175 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func (l *persistedKVDataList) getRoot() dataListNode {
+ return &l.root
+}
+
+func (l *persistedKVDataListNode) getNext() dataListNode {
+ // get rid of returning nil wrapped into an interface to let i = x.getNext(); i != nil work.
+ if l.next == nil {
+ return nil
+ }
+ return l.next
+}
+
+func (l *persistedKVDataListNode) getPrev() dataListNode {
+ if l.prev == nil {
+ return nil
+ }
+ return l.prev
+}
+
+// inspect that the list seems like the array
+func checkListPointersBD(t *testing.T, l *persistedKVDataList, es []*persistedKVDataListNode) {
+ es2 := make([]dataListNode, len(es))
+ for i, el := range es {
+ es2[i] = el
+ }
+
+ checkListPointers(t, l, es2)
+}
+
+func TestRemoveFromListBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList()
+ e1 := l.pushFront(&cachedKVData{key: "key1"})
+ e2 := l.pushFront(&cachedKVData{key: "key2"})
+ e3 := l.pushFront(&cachedKVData{key: "key3"})
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e2, e1})
+
+ l.remove(e2)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e1})
+ l.remove(e3)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1})
+}
+
+func TestAddingNewNodeWithAllocatedFreeListBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList().allocateFreeNodes(10)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+ if countListSize(l.freeList) != 10 {
+ t.Errorf("free list did not allocate nodes")
+ return
+ }
+ // test elements
+ e1 := l.pushFront(&cachedKVData{key: "key1"})
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1})
+
+ if countListSize(l.freeList) != 9 {
+ t.Errorf("free list did not provide a node on new list entry")
+ return
+ }
+}
+
+func TestMultielementListPositioningBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList()
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+ // test elements
+ e2 := l.pushFront(&cachedKVData{key: "key1"})
+ e1 := l.pushFront(&cachedKVData{key: "key2"})
+ e3 := l.pushFront(&cachedKVData{key: "key3"})
+ e4 := l.pushFront(&cachedKVData{key: "key4"})
+ e5 := l.pushFront(&cachedKVData{key: "key5"})
+
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e5, e4, e3, e1, e2})
+
+ l.move(e4, e1)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e5, e3, e1, e4, e2})
+
+ l.remove(e5)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e1, e4, e2})
+
+ l.move(e1, e4) // swap in middle
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e4, e1, e2})
+
+ l.moveToFront(e4)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e4, e3, e1, e2})
+
+ l.remove(e2)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e4, e3, e1})
+
+ l.moveToFront(e3) // move from middle
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e3, e4, e1})
+
+ l.moveToFront(e1) // move from end
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1, e3, e4})
+
+ l.moveToFront(e1) // no movement
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1, e3, e4})
+
+ e2 = l.pushFront(&cachedKVData{key: "key2"})
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2, e1, e3, e4})
+
+ l.remove(e3) // removing from middle
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2, e1, e4})
+
+ l.remove(e4) // removing from end
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2, e1})
+
+ l.move(e2, e1) // swapping between two elements
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e1, e2})
+
+ l.remove(e1) // removing front
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2})
+
+ l.move(e2, l.back()) // swapping element with itself.
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2})
+
+ l.remove(e2) // remove last one
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+}
+
+func TestSingleElementListPositioningBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList()
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+ e := l.pushFront(&cachedKVData{key: "key1"})
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e})
+ l.moveToFront(e)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e})
+ l.remove(e)
+ checkListPointersBD(t, l, []*persistedKVDataListNode{})
+}
+
+func TestRemovedNodeShouldBeMovedToFreeListBD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ l := newPersistedKVList()
+ e1 := l.pushFront(&cachedKVData{key: "key1"})
+ e2 := l.pushFront(&cachedKVData{key: "key2"})
+
+ checkListPointersBD(t, l, []*persistedKVDataListNode{e2, e1})
+
+ e := l.back()
+ l.remove(e)
+
+ for i := l.freeList.next; i != nil; i = i.next {
+ if i == e {
+ // stopping the tst with good results:
+ return
+ }
+ }
+ t.Error("expected the removed node to appear at the freelist")
+}
diff --git a/ledger/persistedresources_list.go b/ledger/persistedresources_list.go
index 57b0cdc44..baa7ac351 100644
--- a/ledger/persistedresources_list.go
+++ b/ledger/persistedresources_list.go
@@ -17,7 +17,7 @@
package ledger
// persistedResourcesDataList represents a doubly linked list.
-// must initiate with newPersistedAccountList.
+// must initiate with newPersistedResourcesList.
type persistedResourcesDataList struct {
root persistedResourcesDataListNode // sentinel list element, only &root, root.prev, and root.next are used
freeList *persistedResourcesDataListNode // preallocated nodes location
diff --git a/ledger/simple_test.go b/ledger/simple_test.go
new file mode 100644
index 000000000..22781c70f
--- /dev/null
+++ b/ledger/simple_test.go
@@ -0,0 +1,187 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/stretchr/testify/require"
+)
+
+func newSimpleLedger(t testing.TB, balances bookkeeping.GenesisBalances) *Ledger {
+ return newSimpleLedgerWithConsensusVersion(t, balances, protocol.ConsensusFuture)
+}
+
+func newSimpleLedgerWithConsensusVersion(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion) *Ledger {
+ var genHash crypto.Digest
+ crypto.RandBytes(genHash[:])
+ return newSimpleLedgerFull(t, balances, cv, genHash)
+}
+
+func newSimpleLedgerFull(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, genHash crypto.Digest) *Ledger {
+ genBlock, err := bookkeeping.MakeGenesisBlock(cv, balances, "test", genHash)
+ require.NoError(t, err)
+ require.False(t, genBlock.FeeSink.IsZero())
+ require.False(t, genBlock.RewardsPool.IsZero())
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ l, err := OpenLedger(logging.Base(), dbName, true, ledgercore.InitState{
+ Block: genBlock,
+ Accounts: balances.Balances,
+ GenesisHash: genHash,
+ }, cfg)
+ require.NoError(t, err)
+ return l
+}
+
+// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
+func nextBlock(t testing.TB, ledger *Ledger) *internal.BlockEvaluator {
+ rnd := ledger.Latest()
+ hdr, err := ledger.BlockHdr(rnd)
+ require.NoError(t, err)
+
+ nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
+ nextHdr.TimeStamp = hdr.TimeStamp + 1 // ensure deterministic tests
+ eval, err := internal.StartEvaluator(ledger, nextHdr, internal.EvaluatorOptions{
+ Generate: true,
+ Validate: true, // Do the complete checks that a new txn would be subject to
+ })
+ require.NoError(t, err)
+ return eval
+}
+
+func fillDefaults(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn) {
+ if txn.GenesisHash.IsZero() && ledger.GenesisProto().SupportGenesisHash {
+ txn.GenesisHash = ledger.GenesisHash()
+ }
+ if txn.FirstValid == 0 {
+ txn.FirstValid = eval.Round()
+ }
+
+ txn.FillDefaults(ledger.GenesisProto())
+}
+
+func txns(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) {
+ t.Helper()
+ for _, txn1 := range txns {
+ txn(t, ledger, eval, txn1)
+ }
+}
+
+func txn(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn, problem ...string) {
+ t.Helper()
+ fillDefaults(t, ledger, eval, txn)
+ err := eval.Transaction(txn.SignedTxn(), transactions.ApplyData{})
+ if err != nil {
+ if len(problem) == 1 && problem[0] != "" {
+ require.Contains(t, err.Error(), problem[0])
+ } else {
+ require.NoError(t, err) // Will obviously fail
+ }
+ return
+ }
+ require.True(t, len(problem) == 0 || problem[0] == "")
+}
+
+func txgroup(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) error {
+ t.Helper()
+ for _, txn := range txns {
+ fillDefaults(t, ledger, eval, txn)
+ }
+ txgroup := txntest.SignedTxns(txns...)
+
+ return eval.TransactionGroup(transactions.WrapSignedTxnsWithAD(txgroup))
+}
+
+// endBlock completes the block being created, returns the ValidatedBlock for inspection
+func endBlock(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator) *ledgercore.ValidatedBlock {
+ validatedBlock, err := eval.GenerateBlock()
+ require.NoError(t, err)
+ err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+ require.NoError(t, err)
+ // `rndBQ` gives the latest known block round added to the ledger
+ // we should wait until `rndBQ` block to be committed to blockQueue,
+ // in case there is a data race, noted in
+ // https://github.com/algorand/go-algorand/issues/4349
+ // where writing to `callTxnGroup` after `dl.fullBlock` caused data race,
+ // because the underlying async goroutine `go bq.syncer()` is reading `callTxnGroup`.
+ // A solution here would be wait until all new added blocks are committed,
+ // then we return the result and continue the execution.
+ rndBQ := ledger.Latest()
+ ledger.WaitForCommit(rndBQ)
+ return validatedBlock
+}
+
+// main wraps up some TEAL source in a header and footer so that it is
+// an app that does nothing at create time, but otherwise runs source,
+// then approves, if the source avoids panicing and leaves the stack
+// empty.
+func main(source string) string {
+ return strings.Replace(fmt.Sprintf(`txn ApplicationID
+ bz end
+ %s
+end: int 1`, source), ";", "\n", -1)
+}
+
+// lookup gets the current accountdata for an address
+func lookup(t testing.TB, ledger *Ledger, addr basics.Address) basics.AccountData {
+ ad, _, _, err := ledger.LookupLatest(addr)
+ require.NoError(t, err)
+ return ad
+}
+
+// micros gets the current microAlgo balance for an address
+func micros(t testing.TB, ledger *Ledger, addr basics.Address) uint64 {
+ return lookup(t, ledger, addr).MicroAlgos.Raw
+}
+
+// holding gets the current balance and optin status for some asa for an address
+func holding(t testing.TB, ledger *Ledger, addr basics.Address, asset basics.AssetIndex) (uint64, bool) {
+ if holding, ok := lookup(t, ledger, addr).Assets[asset]; ok {
+ return holding.Amount, true
+ }
+ return 0, false
+}
+
+// asaParams gets the asset params for a given asa index
+func asaParams(t testing.TB, ledger *Ledger, asset basics.AssetIndex) (basics.AssetParams, error) {
+ creator, ok, err := ledger.GetCreator(basics.CreatableIndex(asset), basics.AssetCreatable)
+ if err != nil {
+ return basics.AssetParams{}, err
+ }
+ if !ok {
+ return basics.AssetParams{}, fmt.Errorf("no asset (%d)", asset)
+ }
+ if params, ok := lookup(t, ledger, creator).AssetParams[asset]; ok {
+ return params, nil
+ }
+ return basics.AssetParams{}, fmt.Errorf("bad lookup (%d)", asset)
+}
diff --git a/ledger/testing/consensusRange.go b/ledger/testing/consensusRange.go
new file mode 100644
index 000000000..877e03fae
--- /dev/null
+++ b/ledger/testing/consensusRange.go
@@ -0,0 +1,106 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package testing
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/stretchr/testify/require"
+)
+
+var consensusByNumber = []protocol.ConsensusVersion{
+ "", "", "", "", "", "", "",
+ protocol.ConsensusV7,
+ protocol.ConsensusV8,
+ protocol.ConsensusV9,
+ protocol.ConsensusV10,
+ protocol.ConsensusV11, // first with viable payset commit type
+ protocol.ConsensusV12,
+ protocol.ConsensusV13,
+ protocol.ConsensusV14,
+ protocol.ConsensusV15, // rewards in AD
+ protocol.ConsensusV16,
+ protocol.ConsensusV17,
+ protocol.ConsensusV18,
+ protocol.ConsensusV19,
+ protocol.ConsensusV20,
+ protocol.ConsensusV21,
+ protocol.ConsensusV22,
+ protocol.ConsensusV23,
+ protocol.ConsensusV24, // AVM v2 (apps)
+ protocol.ConsensusV25,
+ protocol.ConsensusV26,
+ protocol.ConsensusV27,
+ protocol.ConsensusV28,
+ protocol.ConsensusV29,
+ protocol.ConsensusV30, // AVM v5 (inner txs)
+ protocol.ConsensusV31, // AVM v6 (inner txs with appls)
+ protocol.ConsensusV32, // unlimited assets and apps
+ protocol.ConsensusV33, // 320 rounds
+ protocol.ConsensusV34, // AVM v7, stateproofs
+ protocol.ConsensusV35, // minor, double upgrade withe v34
+ protocol.ConsensusV36, // box storage
+ protocol.ConsensusFuture,
+}
+
+// TestConsensusRange allows for running tests against a range of consensus
+// versions. Generally `start` will be the version that introduced the feature,
+// and `stop` will be 0 to indicate it should work right on up through vFuture.
+// `stop` will be an actual version number if we're confirming that something
+// STOPS working as of a particular version. When writing the test for a new
+// feature that is currently in vFuture, use the expected version number as
+// `start`. That will correspond to vFuture until a new consensus version is
+// created and inserted in consensusByNumber. At that point, your feature is
+// probably active in that version. (If it's being held in vFuture, just
+// increment your `start`.)
+func TestConsensusRange(t *testing.T, start, stop int, test func(t *testing.T, ver int, cv protocol.ConsensusVersion)) {
+ if stop == 0 { // Treat 0 as "future"
+ stop = len(consensusByNumber) - 1
+ }
+ require.LessOrEqual(t, start, stop)
+ for i := start; i <= stop; i++ {
+ var version string
+ if i == len(consensusByNumber)-1 {
+ version = "vFuture"
+ } else {
+ version = fmt.Sprintf("v%d", i)
+ }
+ t.Run(fmt.Sprintf("cv=%s", version), func(t *testing.T) {
+ test(t, i, consensusByNumber[i])
+ })
+ }
+}
+
+// BenchConsensusRange is for getting benchmarks across consensus versions.
+func BenchConsensusRange(b *testing.B, start, stop int, bench func(t *testing.B, ver int, cv protocol.ConsensusVersion)) {
+ if stop == 0 { // Treat 0 as "future"
+ stop = len(consensusByNumber) - 1
+ }
+ for i := start; i <= stop; i++ {
+ var version string
+ if i == len(consensusByNumber)-1 {
+ version = "vFuture"
+ } else {
+ version = fmt.Sprintf("v%d", i)
+ }
+ b.Run(fmt.Sprintf("cv=%s", version), func(b *testing.B) {
+ bench(b, i, consensusByNumber[i])
+ })
+ }
+}
diff --git a/ledger/testing/consensusRange_test.go b/ledger/testing/consensusRange_test.go
new file mode 100644
index 000000000..df51ec720
--- /dev/null
+++ b/ledger/testing/consensusRange_test.go
@@ -0,0 +1,58 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package testing
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+// TestReleasedVersion ensures that the necessary tidying is done when a new
+// protocol release happens. The new version must be added to
+// consensusByNumber, and a new LogicSigVersion must be added to vFuture.
+func TestReleasedVersion(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // This confirms that the proto before future has no ApprovedUpgrades. Once
+ // it does, that new version should be added to consensusByNumber.
+ require.Len(t, config.Consensus[consensusByNumber[len(consensusByNumber)-2]].ApprovedUpgrades, 0)
+ // And no funny business with vFuture
+ require.Equal(t, protocol.ConsensusFuture, consensusByNumber[len(consensusByNumber)-1])
+
+ // Ensure that vFuture gets a new LogicSigVersion when we promote the
+ // existing one. That allows TestExperimental in the logic package to
+ // prevent unintended releases of experimental opcodes.
+ relV := config.Consensus[consensusByNumber[len(consensusByNumber)-2]].LogicSigVersion
+ futureV := config.Consensus[protocol.ConsensusFuture].LogicSigVersion
+ require.Less(t, int(relV), int(futureV))
+
+ // Require that all are present
+ for _, cv := range consensusByNumber {
+ if cv == "" {
+ continue
+ }
+ params, ok := config.Consensus[cv]
+ require.True(t, ok, string(cv))
+ require.NotZero(t, params) // just making sure an empty one didn't get put in
+ }
+
+}
diff --git a/ledger/testing/randomAccounts.go b/ledger/testing/randomAccounts.go
index 947ddc1b4..c3c559911 100644
--- a/ledger/testing/randomAccounts.go
+++ b/ledger/testing/randomAccounts.go
@@ -339,6 +339,11 @@ func RandomFullAccountData(rewardsLevel uint64, lastCreatableID *basics.Creatabl
data.TotalExtraAppPages = uint32(crypto.RandUint64() % 50)
}
+ if (crypto.RandUint64() % 3) == 1 {
+ data.TotalBoxes = crypto.RandUint64() % 100
+ data.TotalBoxBytes = crypto.RandUint64() % 10000
+ }
+
return data
}
diff --git a/ledger/tracker.go b/ledger/tracker.go
index ae5077719..1945018ef 100644
--- a/ledger/tracker.go
+++ b/ledger/tracker.go
@@ -227,7 +227,7 @@ type deferredCommitRange struct {
catchpointSecondStage bool
}
-// deferredCommitContext is used in order to syncornize the persistence of a given deferredCommitRange.
+// deferredCommitContext is used in order to synchronize the persistence of a given deferredCommitRange.
// prepareCommit, commitRound and postCommit are all using it to exchange data.
type deferredCommitContext struct {
deferredCommitRange
@@ -243,10 +243,12 @@ type deferredCommitContext struct {
compactAccountDeltas compactAccountDeltas
compactResourcesDeltas compactResourcesDeltas
+ compactKvDeltas map[string]modifiedKvValue
compactCreatableDeltas map[basics.CreatableIndex]ledgercore.ModifiedCreatable
updatedPersistedAccounts []persistedAccountData
updatedPersistedResources map[basics.Address][]persistedResourcesData
+ updatedPersistedKVs map[string]persistedKVData
compactOnlineAccountDeltas compactOnlineAccountDeltas
updatedPersistedOnlineAccounts []persistedOnlineAccountData
@@ -439,7 +441,7 @@ func (tr *trackerRegistry) commitSyncer(deferredCommits chan *deferredCommitCont
}
err := tr.commitRound(commit)
if err != nil {
- tr.log.Warnf("Could not commit round: %w", err)
+ tr.log.Warnf("Could not commit round: %v", err)
}
case <-tr.ctx.Done():
// drain the pending commits queue:
diff --git a/ledger/trackerdb.go b/ledger/trackerdb.go
index 3e8773225..15882d920 100644
--- a/ledger/trackerdb.go
+++ b/ledger/trackerdb.go
@@ -183,6 +183,12 @@ func runMigrations(ctx context.Context, tx *sql.Tx, params trackerDBParams, log
tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 6 : %v", err)
return
}
+ case 7:
+ err = tu.upgradeDatabaseSchema7(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 7 : %v", err)
+ return
+ }
default:
return trackerDBInitParams{}, fmt.Errorf("trackerDBInitialize unable to upgrade database from schema version %d", tu.schemaVersion)
}
@@ -503,6 +509,16 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema6(ctx context.Context
return tu.setVersion(ctx, tx, 7)
}
+// upgradeDatabaseSchema7 upgrades the database schema from version 7 to version 8.
+// adding the kvstore table for box feature support.
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema7(ctx context.Context, tx *sql.Tx) (err error) {
+ err = accountsCreateBoxTable(ctx, tx)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema7 unable to create kvstore through createTables : %v", err)
+ }
+ return tu.setVersion(ctx, tx, 8)
+}
+
// isDirEmpty returns if a given directory is empty or not.
func isDirEmpty(path string) (bool, error) {
dir, err := os.Open(path)
diff --git a/ledger/internal/txnbench_test.go b/ledger/txnbench_test.go
index 9c92c896c..788ecbe1f 100644
--- a/ledger/internal/txnbench_test.go
+++ b/ledger/txnbench_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal_test
+package ledger
import (
"errors"
@@ -27,14 +27,15 @@ import (
"github.com/algorand/go-algorand/data/txntest"
"github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/protocol"
"github.com/stretchr/testify/require"
)
// BenchmarkTxnTypes compares the execution time of various txn types
func BenchmarkTxnTypes(b *testing.B) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- benchConsensusRange(b, 30, 0, func(b *testing.B, ver int) {
- l := newTestLedgerWithConsensusVersion(b, genBalances, consensusByNumber[ver])
+ ledgertesting.BenchConsensusRange(b, 30, 0, func(b *testing.B, ver int, cv protocol.ConsensusVersion) {
+ l := newSimpleLedgerWithConsensusVersion(b, genBalances, cv)
defer l.Close()
createasa := txntest.Txn{
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index a7cdde4ea..68ca808db 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -775,6 +775,25 @@ func (c *Client) ApplicationInformation(index uint64) (resp generatedV2.Applicat
return
}
+// ApplicationBoxes takes an app's index and returns the names of boxes under it
+func (c *Client) ApplicationBoxes(appID uint64, maxBoxNum uint64) (resp generatedV2.BoxesResponse, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err == nil {
+ resp, err = algod.ApplicationBoxes(appID, maxBoxNum)
+ }
+ return
+}
+
+// GetApplicationBoxByName takes an app's index and box name and returns its value.
+// The box name should be of the form `encoding:value`. See logic.AppCallBytes for more information.
+func (c *Client) GetApplicationBoxByName(index uint64, name string) (resp generatedV2.BoxResponse, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err == nil {
+ resp, err = algod.GetApplicationBoxByName(index, name)
+ }
+ return
+}
+
// TransactionInformation takes an address and associated txid and return its information
func (c *Client) TransactionInformation(addr, txid string) (resp v1.Transaction, err error) {
algod, err := c.ensureAlgodClient()
diff --git a/libgoal/transactions.go b/libgoal/transactions.go
index fb788f024..e4c4573d7 100644
--- a/libgoal/transactions.go
+++ b/libgoal/transactions.go
@@ -503,50 +503,50 @@ func (c *Client) FillUnsignedTxTemplate(sender string, firstValid, lastValid, fe
}
// MakeUnsignedAppCreateTx makes a transaction for creating an application
-func (c *Client) MakeUnsignedAppCreateTx(onComplete transactions.OnCompletion, approvalProg []byte, clearProg []byte, globalSchema basics.StateSchema, localSchema basics.StateSchema, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, extrapages uint32) (tx transactions.Transaction, err error) {
- return c.MakeUnsignedApplicationCallTx(0, appArgs, accounts, foreignApps, foreignAssets, onComplete, approvalProg, clearProg, globalSchema, localSchema, extrapages)
+func (c *Client) MakeUnsignedAppCreateTx(onComplete transactions.OnCompletion, approvalProg []byte, clearProg []byte, globalSchema basics.StateSchema, localSchema basics.StateSchema, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, extrapages uint32) (tx transactions.Transaction, err error) {
+ return c.MakeUnsignedApplicationCallTx(0, appArgs, accounts, foreignApps, foreignAssets, boxes, onComplete, approvalProg, clearProg, globalSchema, localSchema, extrapages)
}
// MakeUnsignedAppUpdateTx makes a transaction for updating an application's programs
-func (c *Client) MakeUnsignedAppUpdateTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, approvalProg []byte, clearProg []byte) (tx transactions.Transaction, err error) {
- return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, transactions.UpdateApplicationOC, approvalProg, clearProg, emptySchema, emptySchema, 0)
+func (c *Client) MakeUnsignedAppUpdateTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, approvalProg []byte, clearProg []byte) (tx transactions.Transaction, err error) {
+ return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.UpdateApplicationOC, approvalProg, clearProg, emptySchema, emptySchema, 0)
}
// MakeUnsignedAppDeleteTx makes a transaction for deleting an application
-func (c *Client) MakeUnsignedAppDeleteTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) (tx transactions.Transaction, err error) {
- return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, transactions.DeleteApplicationOC, nil, nil, emptySchema, emptySchema, 0)
+func (c *Client) MakeUnsignedAppDeleteTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef) (tx transactions.Transaction, err error) {
+ return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.DeleteApplicationOC, nil, nil, emptySchema, emptySchema, 0)
}
// MakeUnsignedAppOptInTx makes a transaction for opting in to (allocating
// some account-specific state for) an application
-func (c *Client) MakeUnsignedAppOptInTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) (tx transactions.Transaction, err error) {
- return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, transactions.OptInOC, nil, nil, emptySchema, emptySchema, 0)
+func (c *Client) MakeUnsignedAppOptInTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef) (tx transactions.Transaction, err error) {
+ return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.OptInOC, nil, nil, emptySchema, emptySchema, 0)
}
// MakeUnsignedAppCloseOutTx makes a transaction for closing out of
// (deallocating all account-specific state for) an application
-func (c *Client) MakeUnsignedAppCloseOutTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) (tx transactions.Transaction, err error) {
- return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, transactions.CloseOutOC, nil, nil, emptySchema, emptySchema, 0)
+func (c *Client) MakeUnsignedAppCloseOutTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef) (tx transactions.Transaction, err error) {
+ return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.CloseOutOC, nil, nil, emptySchema, emptySchema, 0)
}
// MakeUnsignedAppClearStateTx makes a transaction for clearing out all
// account-specific state for an application. It may not be rejected by the
// application's logic.
-func (c *Client) MakeUnsignedAppClearStateTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) (tx transactions.Transaction, err error) {
- return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, transactions.ClearStateOC, nil, nil, emptySchema, emptySchema, 0)
+func (c *Client) MakeUnsignedAppClearStateTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef) (tx transactions.Transaction, err error) {
+ return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.ClearStateOC, nil, nil, emptySchema, emptySchema, 0)
}
// MakeUnsignedAppNoOpTx makes a transaction for interacting with an existing
// application, potentially updating any account-specific local state and
// global state associated with it.
-func (c *Client) MakeUnsignedAppNoOpTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64) (tx transactions.Transaction, err error) {
- return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, transactions.NoOpOC, nil, nil, emptySchema, emptySchema, 0)
+func (c *Client) MakeUnsignedAppNoOpTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef) (tx transactions.Transaction, err error) {
+ return c.MakeUnsignedApplicationCallTx(appIdx, appArgs, accounts, foreignApps, foreignAssets, boxes, transactions.NoOpOC, nil, nil, emptySchema, emptySchema, 0)
}
// MakeUnsignedApplicationCallTx is a helper for the above ApplicationCall
// transaction constructors. A fully custom ApplicationCall transaction may
// be constructed using this method.
-func (c *Client) MakeUnsignedApplicationCallTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, onCompletion transactions.OnCompletion, approvalProg []byte, clearProg []byte, globalSchema basics.StateSchema, localSchema basics.StateSchema, extrapages uint32) (tx transactions.Transaction, err error) {
+func (c *Client) MakeUnsignedApplicationCallTx(appIdx uint64, appArgs [][]byte, accounts []string, foreignApps []uint64, foreignAssets []uint64, boxes []transactions.BoxRef, onCompletion transactions.OnCompletion, approvalProg []byte, clearProg []byte, globalSchema basics.StateSchema, localSchema basics.StateSchema, extrapages uint32) (tx transactions.Transaction, err error) {
tx.Type = protocol.ApplicationCallTx
tx.ApplicationID = basics.AppIndex(appIdx)
tx.OnCompletion = onCompletion
@@ -559,6 +559,7 @@ func (c *Client) MakeUnsignedApplicationCallTx(appIdx uint64, appArgs [][]byte,
tx.ForeignApps = parseTxnForeignApps(foreignApps)
tx.ForeignAssets = parseTxnForeignAssets(foreignAssets)
+ tx.Boxes = boxes
tx.ApprovalProgram = approvalProg
tx.ClearStateProgram = clearProg
tx.LocalStateSchema = localSchema
diff --git a/logging/telemetryspec/event.go b/logging/telemetryspec/event.go
index 15a046164..91c7bddf9 100644
--- a/logging/telemetryspec/event.go
+++ b/logging/telemetryspec/event.go
@@ -301,7 +301,7 @@ type PeerConnectionDetails struct {
// MessageDelay is the avarage relative message delay. Not being used for incoming connection.
MessageDelay int64 `json:",omitempty"`
// DuplicateFilterCount is the number of times this peer has sent us a message hash to filter that it had already sent before.
- DuplicateFilterCount int64
+ DuplicateFilterCount uint64
}
// CatchpointGenerationEvent event
@@ -327,6 +327,25 @@ type CatchpointGenerationEventDetails struct {
CatchpointLabel string
}
+// CatchpointRootUpdateEvent event
+const CatchpointRootUpdateEvent Event = "CatchpointRoot"
+
+// CatchpointRootUpdateEventDetails is generated when the catchpoint merkle trie root is updated, when
+// account updates for rounds are flushed to disk.
+type CatchpointRootUpdateEventDetails struct {
+ Root string
+ OldBase uint64
+ NewBase uint64
+ NewPageCount int `json:"npc"`
+ NewNodeCount int `json:"nnc"`
+ UpdatedPageCount int `json:"upc"`
+ UpdatedNodeCount int `json:"unc"`
+ DeletedPageCount int `json:"dpc"`
+ FanoutReallocatedNodeCount int `json:"frnc"`
+ PackingReallocatedNodeCount int `json:"prnc"`
+ LoadedPages int `json:"lp"`
+}
+
// BalancesAccountVacuumEvent event
const BalancesAccountVacuumEvent Event = "VacuumBalances"
diff --git a/logging/usage.go b/logging/usage.go
index 6646dfbae..da668a72a 100644
--- a/logging/usage.go
+++ b/logging/usage.go
@@ -18,12 +18,16 @@ package logging
import (
"context"
+ "runtime"
"sync"
"time"
"github.com/algorand/go-algorand/util"
+ "github.com/algorand/go-algorand/util/metrics"
)
+var ramUsageGauge = metrics.MakeGauge(metrics.MetricName{Name: "algod_ram_usage", Description: "number of bytes runtime.ReadMemStats().HeapInuse"})
+
// UsageLogThread utility logging method
func UsageLogThread(ctx context.Context, log Logger, period time.Duration, wg *sync.WaitGroup) {
if wg != nil {
@@ -34,6 +38,7 @@ func UsageLogThread(ctx context.Context, log Logger, period time.Duration, wg *s
var prevUtime, prevStime int64
var Utime, Stime int64
var prevTime time.Time
+ var mst runtime.MemStats
ticker := time.NewTicker(period)
hasPrev := false
@@ -48,13 +53,16 @@ func UsageLogThread(ctx context.Context, log Logger, period time.Duration, wg *s
now = time.Now()
Utime, Stime, _ = util.GetCurrentProcessTimes()
+ runtime.ReadMemStats(&mst)
+ ramUsageGauge.Set(float64(mst.HeapInuse))
+
if hasPrev {
userNanos := Utime - prevUtime
sysNanos := Stime - prevStime
wallNanos := now.Sub(prevTime).Nanoseconds()
userf := float64(userNanos) / float64(wallNanos)
sysf := float64(sysNanos) / float64(wallNanos)
- log.Infof("usage nanos wall=%d user=%d sys=%d pu=%0.4f%% ps=%0.4f%%", wallNanos, userNanos, sysNanos, userf*100.0, sysf*100.0)
+ log.Infof("usage nanos wall=%d user=%d sys=%d pu=%0.4f%% ps=%0.4f%% inuse=%d", wallNanos, userNanos, sysNanos, userf*100.0, sysf*100.0, mst.HeapInuse)
} else {
hasPrev = true
}
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 48884f177..d5f8b111e 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -1058,7 +1058,7 @@ func (wn *WebsocketNetwork) checkIncomingConnectionVariables(response http.Respo
response.WriteHeader(http.StatusPreconditionFailed)
n, err := response.Write([]byte("mismatching genesis ID"))
if err != nil {
- wn.log.Warnf("ws failed to write mismatching genesis ID response '%s' : n = %d err = %v", n, err)
+ wn.log.Warnf("ws failed to write mismatching genesis ID response '%s' : n = %d err = %v", otherGenesisID, n, err)
}
return http.StatusPreconditionFailed
}
diff --git a/network/wsPeer.go b/network/wsPeer.go
index b16345576..e1ac6ffb3 100644
--- a/network/wsPeer.go
+++ b/network/wsPeer.go
@@ -180,6 +180,11 @@ type wsPeer struct {
// Nonce used to uniquely identify requests
requestNonce uint64
+ // duplicateFilterCount counts how many times the remote peer has sent us a message hash
+ // to filter that it had already sent before.
+ // this needs to be 64-bit aligned for use with atomic.AddUint64 on 32-bit platforms.
+ duplicateFilterCount uint64
+
wsPeerCore
// conn will be *websocket.Conn (except in testing)
@@ -203,9 +208,6 @@ type wsPeer struct {
incomingMsgFilter *messageFilter
outgoingMsgFilter *messageFilter
- // duplicateFilterCount counts how many times the remote peer has sent us a message hash
- // to filter that it had already sent before.
- duplicateFilterCount int64
processed chan struct{}
@@ -515,7 +517,7 @@ func (wp *wsPeer) readLoop() {
case channel <- &Response{Topics: topics}:
// do nothing. writing was successful.
default:
- wp.net.log.Warnf("wsPeer readLoop: channel blocked. Could not pass the response to the requester", wp.conn.RemoteAddr().String())
+ wp.net.log.Warn("wsPeer readLoop: channel blocked. Could not pass the response to the requester", wp.conn.RemoteAddr().String())
}
continue
case protocol.MsgDigestSkipTag:
@@ -614,7 +616,7 @@ func (wp *wsPeer) handleFilterMessage(msg IncomingMessage) {
// large message concurrently from several peers, and then sent the filter message to us after
// each large message finished transferring.
duplicateNetworkFilterReceivedTotal.Inc(nil)
- atomic.AddInt64(&wp.duplicateFilterCount, 1)
+ atomic.AddUint64(&wp.duplicateFilterCount, 1)
}
}
diff --git a/network/wsPeer_test.go b/network/wsPeer_test.go
index d61c182d3..2798a5256 100644
--- a/network/wsPeer_test.go
+++ b/network/wsPeer_test.go
@@ -103,6 +103,7 @@ func TestAtomicVariablesAlignment(t *testing.T) {
require.True(t, (unsafe.Offsetof(p.requestNonce)%8) == 0)
require.True(t, (unsafe.Offsetof(p.lastPacketTime)%8) == 0)
require.True(t, (unsafe.Offsetof(p.intermittentOutgoingMessageEnqueueTime)%8) == 0)
+ require.True(t, (unsafe.Offsetof(p.duplicateFilterCount)%8) == 0)
}
func TestTagCounterFiltering(t *testing.T) {
diff --git a/node/node.go b/node/node.go
index a21aad188..11ce27fda 100644
--- a/node/node.go
+++ b/node/node.go
@@ -385,8 +385,9 @@ func (node *AlgorandFullNode) startMonitoringRoutines() {
// Delete old participation keys
go node.oldKeyDeletionThread(node.ctx.Done())
- // TODO re-enable with configuration flag post V1
- //go logging.UsageLogThread(node.ctx, node.log, 100*time.Millisecond, nil)
+ if node.config.EnableUsageLog {
+ go logging.UsageLogThread(node.ctx, node.log, 100*time.Millisecond, nil)
+ }
}
// waitMonitoringRoutines waits for all the monitoring routines to exit. Note that
@@ -941,7 +942,7 @@ func (node *AlgorandFullNode) loadParticipationKeys() error {
renamedFileName := filepath.Join(fullname, ".old")
err = os.Rename(fullname, renamedFileName)
if err != nil {
- node.log.Warn("loadParticipationKeys: failed to rename unsupported participation key file '%s' to '%s': %v", fullname, renamedFileName, err)
+ node.log.Warnf("loadParticipationKeys: failed to rename unsupported participation key file '%s' to '%s': %v", fullname, renamedFileName, err)
}
} else {
return fmt.Errorf("AlgorandFullNode.loadParticipationKeys: cannot load account at %v: %v", info.Name(), err)
@@ -1070,7 +1071,7 @@ func (node *AlgorandFullNode) oldKeyDeletionThread(done <-chan struct{}) {
// Persist participation registry updates to last-used round and voting key changes.
err = node.accountManager.Registry().Flush(participationRegistryFlushMaxWaitDuration)
if err != nil {
- node.log.Warnf("error while flushing the registry: %w", err)
+ node.log.Warnf("error while flushing the registry: %v", err)
}
}
}
diff --git a/protocol/codec_tester.go b/protocol/codec_tester.go
index f40270039..8d784a069 100644
--- a/protocol/codec_tester.go
+++ b/protocol/codec_tester.go
@@ -241,7 +241,14 @@ func randomizeValue(v reflect.Value, datapath string, tag string, remainingChang
*remainingChanges--
case reflect.String:
var buf []byte
- len := rand.Int() % 64
+ var len int
+ if strings.HasSuffix(v.Type().PkgPath(), "go-algorand/agreement") && v.Type().Name() == "serializableError" {
+ // Don't generate empty strings for serializableError since nil values of *string type
+ // will serialize differently by msgp and go-codec
+ len = rand.Int()%63 + 1
+ } else {
+ len = rand.Int() % 64
+ }
for i := 0; i < len; i++ {
buf = append(buf, byte(rand.Uint32()))
}
@@ -270,6 +277,10 @@ func randomizeValue(v reflect.Value, datapath string, tag string, remainingChang
// unexported
continue
}
+ if st.Name() == "messageEvent" && f.Name == "Tail" {
+ // Don't try and set the Tail field since it's recursive
+ continue
+ }
if rawMsgpType == f.Type {
return errSkipRawMsgpTesting
}
diff --git a/protocol/consensus.go b/protocol/consensus.go
index cd03519fb..51654ed3f 100644
--- a/protocol/consensus.go
+++ b/protocol/consensus.go
@@ -192,6 +192,11 @@ const ConsensusV35 = ConsensusVersion(
"https://github.com/algorandfoundation/specs/tree/433d8e9a7274b6fca703d91213e05c7e6a589e69",
)
+// ConsensusV36 adds box storage
+const ConsensusV36 = ConsensusVersion(
+ "https://github.com/algorandfoundation/specs/tree/44fa607d6051730f5264526bf3c108d51f0eadb6",
+)
+
// ConsensusFuture is a protocol that should not appear in any production
// network, but is used to test features before they are released.
const ConsensusFuture = ConsensusVersion(
@@ -218,7 +223,7 @@ const ConsensusVAlpha4 = ConsensusVersion("alpha4")
// ConsensusCurrentVersion is the latest version and should be used
// when a specific version is not provided.
-const ConsensusCurrentVersion = ConsensusV35
+const ConsensusCurrentVersion = ConsensusV36
// Error is used to indicate that an unsupported protocol has been detected.
type Error ConsensusVersion
diff --git a/rpcs/txService.go b/rpcs/txService.go
index e621d6541..654a8e69f 100644
--- a/rpcs/txService.go
+++ b/rpcs/txService.go
@@ -143,7 +143,7 @@ func (txs *TxService) ServeHTTP(response http.ResponseWriter, request *http.Requ
response.WriteHeader(http.StatusOK)
_, err = response.Write(txblob)
if err != nil {
- txs.log.Warnf("http block write failed ", err)
+ txs.log.Warn("http block write failed", err)
}
}
diff --git a/scripts/buildtools/versions b/scripts/buildtools/versions
index 04960db22..076f8fe3a 100644
--- a/scripts/buildtools/versions
+++ b/scripts/buildtools/versions
@@ -1,6 +1,6 @@
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616
golang.org/x/tools v0.1.5
-github.com/algorand/msgp v1.1.52
+github.com/algorand/msgp v1.1.53
github.com/algorand/oapi-codegen v1.3.7
github.com/go-swagger/go-swagger v0.25.0
gotest.tools/gotestsum v1.6.4
diff --git a/scripts/dump_genesis.sh b/scripts/dump_genesis.sh
index 3ee876554..386924588 100755
--- a/scripts/dump_genesis.sh
+++ b/scripts/dump_genesis.sh
@@ -76,6 +76,9 @@ for LEDGER in $LEDGERS; do
unfinishedcatchpoints)
SORT=round
;;
+ kvstore)
+ SORT=key
+ ;;
*)
echo "Unknown table $T" >&2
exit 1
diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go
index 610a0e9b5..83228124a 100644
--- a/shared/pingpong/accounts.go
+++ b/shared/pingpong/accounts.go
@@ -479,7 +479,67 @@ func genBigNoOpAndBigHashes(numOps uint32, numHashes uint32, hashSize string) []
return ops.Program
}
-func genAppProgram(numOps uint32, numHashes uint32, hashSize string, numGlobalKeys uint32, numLocalKeys uint32) ([]byte, string) {
+func genAppProgram(numOps uint32, numHashes uint32, hashSize string, numGlobalKeys, numLocalKeys, numBoxUpdate, numBoxRead uint32) ([]byte, string) {
+ if numBoxUpdate != 0 || numBoxRead != 0 {
+ prologue := `#pragma version 8
+ txn ApplicationID
+ bz done
+ `
+ createBoxes := `
+ byte "%d"
+ int 1024
+ box_create
+ pop
+ `
+ updateBoxes := `
+ byte "%d"
+ int 0
+ byte "1"
+ box_replace
+ `
+ getBoxes := `
+ byte "%d"
+ box_get
+ assert
+ pop
+ `
+ done := `
+ done:
+ int 1
+ return
+ `
+
+ progParts := []string{prologue}
+
+ // note: only one of numBoxUpdate or numBoxRead should be nonzero
+ if numBoxUpdate != 0 {
+ for i := uint32(0); i < numBoxUpdate; i++ {
+ progParts = append(progParts, fmt.Sprintf(createBoxes, i))
+ }
+
+ for i := uint32(0); i < numBoxUpdate; i++ {
+ progParts = append(progParts, fmt.Sprintf(updateBoxes, i))
+ }
+ } else {
+ for i := uint32(0); i < numBoxRead; i++ {
+ progParts = append(progParts, fmt.Sprintf(createBoxes, i))
+ }
+
+ for i := uint32(0); i < numBoxRead; i++ {
+ progParts = append(progParts, fmt.Sprintf(getBoxes, i))
+ }
+ }
+ progParts = append(progParts, done)
+
+ // assemble
+ progAsm := strings.Join(progParts, "\n")
+ ops, err := logic.AssembleString(progAsm)
+ if err != nil {
+ panic(err)
+ }
+ return ops.Program, progAsm
+ }
+
prologueSize := uint32(2 + 3 + 2 + 1 + 1 + 3)
prologue := `#pragma version 2
txn ApplicationID
@@ -794,19 +854,28 @@ func (pps *WorkerState) prepareApps(client *libgoal.Client) (err error) {
//txgroup = txgroup[:0]
//senders = senders[:0]
}
+
+ for appid := range pps.cinfo.AppParams {
+ // use source account to fund all apps
+ err = pps.appFundFromSourceAccount(appid, client)
+ if err != nil {
+ return
+ }
+ }
+
return
}
func (pps *WorkerState) newApp(addr string, client *libgoal.Client) (tx transactions.Transaction, err error) {
// generate app program with roughly some number of operations
- prog, asm := genAppProgram(pps.cfg.AppProgOps, pps.cfg.AppProgHashes, pps.cfg.AppProgHashSize, pps.cfg.AppGlobKeys, pps.cfg.AppLocalKeys)
+ prog, asm := genAppProgram(pps.cfg.AppProgOps, pps.cfg.AppProgHashes, pps.cfg.AppProgHashSize, pps.cfg.AppGlobKeys, pps.cfg.AppLocalKeys, pps.cfg.NumBoxUpdate, pps.cfg.NumBoxRead)
if !pps.cfg.Quiet {
fmt.Printf("generated program: \n%s\n", asm)
}
globSchema := basics.StateSchema{NumByteSlice: proto.MaxGlobalSchemaEntries}
locSchema := basics.StateSchema{NumByteSlice: proto.MaxLocalSchemaEntries}
- tx, err = client.MakeUnsignedAppCreateTx(transactions.NoOpOC, prog, prog, globSchema, locSchema, nil, nil, nil, nil, 0)
+ tx, err = client.MakeUnsignedAppCreateTx(transactions.NoOpOC, prog, prog, globSchema, locSchema, nil, nil, nil, nil, nil, 0)
if err != nil {
fmt.Printf("Cannot create app txn\n")
panic(err)
@@ -827,7 +896,7 @@ func (pps *WorkerState) newApp(addr string, client *libgoal.Client) (tx transact
}
func (pps *WorkerState) appOptIn(addr string, appID uint64, client *libgoal.Client) (tx transactions.Transaction, err error) {
- tx, err = client.MakeUnsignedAppOptInTx(appID, nil, nil, nil, nil)
+ tx, err = client.MakeUnsignedAppOptInTx(appID, nil, nil, nil, nil, nil)
if err != nil {
fmt.Printf("Cannot create app txn\n")
panic(err)
@@ -844,6 +913,35 @@ func (pps *WorkerState) appOptIn(addr string, appID uint64, client *libgoal.Clie
return
}
+func (pps *WorkerState) appFundFromSourceAccount(appID uint64, client *libgoal.Client) (err error) {
+ // currently, apps only need to be funded if boxes are used
+ if pps.getNumBoxes() > 0 {
+ var srcFunds uint64
+ srcFunds, err = client.GetBalance(pps.cfg.SrcAccount)
+ if err != nil {
+ return err
+ }
+
+ appAddr := basics.AppIndex(appID).Address()
+ mbr := proto.MinBalance +
+ proto.BoxFlatMinBalance*uint64(pps.getNumBoxes()) +
+ proto.BoxByteMinBalance*(proto.MaxBoxSize+uint64(proto.MaxAppKeyLen))*uint64(pps.getNumBoxes())
+
+ pps.schedule(1)
+ var txn transactions.Transaction
+ txn, err = pps.sendPaymentFromSourceAccount(client, appAddr.String(), 0, mbr, pps.accounts[pps.cfg.SrcAccount])
+ if err != nil {
+ return err
+ }
+
+ srcFunds -= mbr
+ srcFunds -= txn.Fee.Raw
+ pps.accounts[pps.cfg.SrcAccount].setBalance(srcFunds)
+ }
+
+ return nil
+}
+
func takeTopAccounts(allAccounts map[string]*pingPongAccount, numAccounts uint32, srcAccount string) (accounts map[string]*pingPongAccount) {
allAddrs := make([]string, len(allAccounts))
var i int
diff --git a/shared/pingpong/config.go b/shared/pingpong/config.go
index 8e406d255..f595b05f4 100644
--- a/shared/pingpong/config.go
+++ b/shared/pingpong/config.go
@@ -60,7 +60,11 @@ type PpConfig struct {
// NumApp is the total number of apps to create
NumApp uint32
// NumAppOptIn is the number of apps each account opts in to
- NumAppOptIn uint32
+ NumAppOptIn uint32
+ // NumBoxUpdate is the number of boxes used per app, where box values are updated each call
+ NumBoxUpdate uint32
+ // NumBoxRead is the number of boxes used per app, where box values are only read each call
+ NumBoxRead uint32
AppProgOps uint32
AppProgHashes uint32
AppProgHashSize string
@@ -106,6 +110,8 @@ var DefaultConfig = PpConfig{
NumAsset: 0,
MinAccountAsset: 10000000,
NumApp: 0,
+ NumBoxUpdate: 0,
+ NumBoxRead: 0,
AppProgOps: 0,
AppProgHashes: 0,
AppProgHashSize: "sha256",
diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go
index e0ee6812a..77eb59580 100644
--- a/shared/pingpong/pingpong.go
+++ b/shared/pingpong/pingpong.go
@@ -150,6 +150,17 @@ type WorkerState struct {
client *libgoal.Client
}
+// returns the number of boxes per app
+func (pps *WorkerState) getNumBoxes() uint32 {
+ // only one of NumBoxUpdate and NumBoxRead should be nonzero. There isn't
+ // currently support for mixed box workloads so these numbers should not be
+ // added together.
+ if pps.cfg.NumBoxUpdate > 0 {
+ return pps.cfg.NumBoxUpdate
+ }
+ return pps.cfg.NumBoxRead
+}
+
// PrepareAccounts to set up accounts and asset accounts required for Ping Pong run
func (pps *WorkerState) PrepareAccounts(ac *libgoal.Client) (err error) {
pps.client = ac
@@ -1099,6 +1110,13 @@ func (pps *WorkerState) constructAppTxn(from, to string, fee uint64, client *lib
err = fmt.Errorf("no known apps")
return
}
+
+ // construct box ref array
+ var boxRefs []transactions.BoxRef
+ for i := uint32(0); i < pps.getNumBoxes(); i++ {
+ boxRefs = append(boxRefs, transactions.BoxRef{Index: 0, Name: []byte{fmt.Sprintf("%d", i)[0]}})
+ }
+
appOptIns := pps.cinfo.OptIns[aidx]
sender = from
if len(appOptIns) > 0 {
@@ -1128,7 +1146,7 @@ func (pps *WorkerState) constructAppTxn(from, to string, fee uint64, client *lib
}
accounts = accounts[1:]
}
- txn, err = client.MakeUnsignedAppNoOpTx(aidx, nil, accounts, nil, nil)
+ txn, err = client.MakeUnsignedAppNoOpTx(aidx, nil, accounts, nil, nil, boxRefs)
if err != nil {
return
}
diff --git a/stateproof/builder.go b/stateproof/builder.go
index fd800ebaf..3d14c1f74 100644
--- a/stateproof/builder.go
+++ b/stateproof/builder.go
@@ -98,7 +98,7 @@ func (spw *Worker) initBuilders() {
return
})
if err != nil {
- spw.log.Warnf("initBuilders: getPendingSigs: %w", err)
+ spw.log.Warnf("initBuilders: getPendingSigs: %v", err)
return
}
@@ -128,7 +128,7 @@ func (spw *Worker) addSigsToBuilder(sigs []pendingSig, rnd basics.Round) {
isPresent, err := builderForRound.Present(pos)
if err != nil {
- spw.log.Warnf("addSigsToBuilder: failed to invoke builderForRound.Present on pos %d - %w ", pos, err)
+ spw.log.Warnf("addSigsToBuilder: failed to invoke builderForRound.Present on pos %d - %v", pos, err)
continue
}
if isPresent {
@@ -141,7 +141,7 @@ func (spw *Worker) addSigsToBuilder(sigs []pendingSig, rnd basics.Round) {
continue
}
if err := builderForRound.Add(pos, sig.sig); err != nil {
- spw.log.Warnf("addSigsToBuilder: error while adding sig. inner error: %w", err)
+ spw.log.Warnf("addSigsToBuilder: error while adding sig. inner error: %v", err)
continue
}
}
@@ -407,7 +407,7 @@ func (spw *Worker) tryBroadcast() {
sp, err := b.Build()
if err != nil {
- spw.log.Warnf("spw.tryBroadcast: building state proof for %d failed: %w", rnd, err)
+ spw.log.Warnf("spw.tryBroadcast: building state proof for %d failed: %v", rnd, err)
continue
}
diff --git a/test/commandandcontrol/cc_agent/main.go b/test/commandandcontrol/cc_agent/main.go
index a64ca804e..136c38f0d 100644
--- a/test/commandandcontrol/cc_agent/main.go
+++ b/test/commandandcontrol/cc_agent/main.go
@@ -120,7 +120,7 @@ func main() {
serverWs, _, err := websocket.DefaultDialer.Dial(u.String(), nil)
if err != nil {
- log.Errorf("dial:", err)
+ log.Error("dial:", err)
}
serverWs.Unsafe = true
defer func() {
@@ -168,7 +168,7 @@ func main() {
case t := <-ticker.C:
err := serverWs.WriteMessage(websocket.TextMessage, []byte(fmt.Sprintf("heartbeat from agent %s with time %s", component.GetHostAgent().Host.Name, t.String())))
if err != nil {
- log.Errorf("write:", err)
+ log.Error("write:", err)
return
}
case <-interrupt:
@@ -177,7 +177,7 @@ func main() {
// waiting (with timeout) for the server to close the connection.
err := serverWs.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if err != nil {
- log.Errorf("write close:", err)
+ log.Error("write close:", err)
return
}
select {
diff --git a/test/commandandcontrol/cc_client/main.go b/test/commandandcontrol/cc_client/main.go
index 817afb850..7125ecd31 100644
--- a/test/commandandcontrol/cc_client/main.go
+++ b/test/commandandcontrol/cc_client/main.go
@@ -129,7 +129,7 @@ func main() {
func closeServiceConnection(serverWs *websocket.Conn) {
err := serverWs.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if err != nil {
- log.Errorf("write close:", err)
+ log.Error("write close:", err)
return
}
}
diff --git a/test/commandandcontrol/cc_service/main.go b/test/commandandcontrol/cc_service/main.go
index a95130320..26e592e66 100644
--- a/test/commandandcontrol/cc_service/main.go
+++ b/test/commandandcontrol/cc_service/main.go
@@ -56,7 +56,7 @@ func main() {
func handleClientConnections(w http.ResponseWriter, r *http.Request) {
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
- log.Errorf("upgrade:", err)
+ log.Error("upgrade:", err)
return
}
ws.Unsafe = true
@@ -72,7 +72,7 @@ func handleAgentConnections(w http.ResponseWriter, r *http.Request) {
// Upgrade initial GET request to a websocket
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
- log.Errorf("problem initializing agent web socket", err)
+ log.Error("problem initializing agent web socket", err)
return
}
ws.Unsafe = true
diff --git a/test/e2e-go/features/accountPerf/sixMillion_test.go b/test/e2e-go/features/accountPerf/sixMillion_test.go
index 679d2728a..ca12e1f55 100644
--- a/test/e2e-go/features/accountPerf/sixMillion_test.go
+++ b/test/e2e-go/features/accountPerf/sixMillion_test.go
@@ -1155,7 +1155,7 @@ int 1
// create the app
appTx, err = client.MakeUnsignedAppCreateTx(
- transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, 0)
+ transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, nil, 0)
require.NoError(t, err)
note := make([]byte, 8)
@@ -1182,7 +1182,7 @@ func makeOptInAppTransaction(
tLife uint64,
genesisHash crypto.Digest) (appTx transactions.Transaction) {
- appTx, err := client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil)
+ appTx, err := client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil)
require.NoError(t, err)
appTx.Header = transactions.Header{
@@ -1288,7 +1288,7 @@ func callAppTransaction(
tLife uint64,
genesisHash crypto.Digest) (appTx transactions.Transaction) {
- appTx, err := client.MakeUnsignedAppNoOpTx(uint64(appIdx), nil, nil, nil, nil)
+ appTx, err := client.MakeUnsignedAppNoOpTx(uint64(appIdx), nil, nil, nil, nil, nil)
require.NoError(t, err)
appTx.Header = transactions.Header{
diff --git a/test/e2e-go/features/transactions/accountv2_test.go b/test/e2e-go/features/transactions/accountv2_test.go
index 52b39c9ee..feb3b244f 100644
--- a/test/e2e-go/features/transactions/accountv2_test.go
+++ b/test/e2e-go/features/transactions/accountv2_test.go
@@ -25,6 +25,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
@@ -159,7 +160,7 @@ int 1
// create the app
tx, err := client.MakeUnsignedAppCreateTx(
- transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, 0)
+ transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, nil, 0)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(creator, 0, 0, fee, tx)
a.NoError(err)
@@ -214,7 +215,7 @@ int 1
checkEvalDelta(t, &client, txnRound, txnRound+1, 1, 1)
// call the app
- tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil)
+ tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
a.NoError(err)
@@ -293,7 +294,321 @@ int 1
a.Equal(creator, app.Params.Creator)
// call the app
- tx, err = client.MakeUnsignedAppNoOpTx(uint64(appIdx), nil, nil, nil, nil)
+ tx, err = client.MakeUnsignedAppNoOpTx(uint64(appIdx), nil, nil, nil, nil, nil)
+ a.NoError(err)
+ tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
+ a.NoError(err)
+ signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
+ a.NoError(err)
+ txid, err = client.BroadcastTransaction(signedTxn)
+ a.NoError(err)
+ for {
+ round, err = client.CurrentRound()
+ a.NoError(err)
+ _, err = client.WaitForRound(round + 1)
+ a.NoError(err)
+ // Ensure the txn committed
+ resp, err = client.GetPendingTransactions(2)
+ a.NoError(err)
+ if resp.TotalTxns == 1 {
+ a.Equal(resp.TruncatedTxns.Transactions[0].TxID, txid)
+ continue
+ }
+ a.Equal(uint64(0), resp.TotalTxns)
+ break
+ }
+
+ ad, err = client.AccountData(creator)
+ a.NoError(err)
+ a.Equal(1, len(ad.AppParams))
+ params, ok = ad.AppParams[appIdx]
+ a.True(ok)
+ value, ok = params.GlobalState["counter"]
+ a.True(ok)
+ a.Equal(uint64(3), value.Uint)
+
+ txInfo, err = fixture.LibGoalClient.PendingTransactionInformationV2(txid)
+ a.NoError(err)
+ a.NotNil(txInfo.ConfirmedRound)
+ a.NotZero(*txInfo.ConfirmedRound)
+ txnRound = *txInfo.ConfirmedRound
+
+ // 3 global state update in total, 2 local state updates
+ checkEvalDelta(t, &client, txnRound, txnRound+1, 3, 2)
+}
+
+// Add offending asset index greater than uint64
+func TestAccountInformationWithBadAssetIdx(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ t.Parallel()
+ accountInformationCheckWithOffendingFields(t, []basics.AssetIndex{12181853637140359511}, nil, nil)
+}
+
+// Add missing asset index
+func TestAccountInformationWithMissingAssetIdx(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ accountInformationCheckWithOffendingFields(t, []basics.AssetIndex{121818}, nil, nil)
+}
+
+// Add offending app index greater than uint64
+func TestAccountInformationWithBadAppIdx(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ accountInformationCheckWithOffendingFields(t, nil, []basics.AppIndex{12181853637140359511}, nil)
+}
+
+// Add missing app index
+func TestAccountInformationWithMissingApp(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ accountInformationCheckWithOffendingFields(t, nil, []basics.AppIndex{121818}, nil)
+}
+
+// Add missing account address
+func TestAccountInformationWithMissingAddress(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ randAddr := basics.Address{}
+ crypto.RandBytes(randAddr[:])
+ accountInformationCheckWithOffendingFields(t, nil, nil, []basics.Address{randAddr})
+}
+
+func accountInformationCheckWithOffendingFields(t *testing.T,
+ foreignAssets []basics.AssetIndex,
+ foreignApps []basics.AppIndex,
+ accounts []basics.Address) {
+
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ var fixture fixtures.RestClientFixture
+ proto, ok := config.Consensus[protocol.ConsensusFuture]
+ a.True(ok)
+ proto.AgreementFilterTimeoutPeriod0 = 400 * time.Millisecond
+ proto.AgreementFilterTimeout = 400 * time.Millisecond
+ fixture.SetConsensus(config.ConsensusProtocols{protocol.ConsensusFuture: proto})
+
+ fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachV26.json"))
+ defer fixture.Shutdown()
+
+ client := fixture.LibGoalClient
+ accountList, err := fixture.GetWalletsSortedByBalance()
+ a.NoError(err)
+
+ creator := accountList[0].Address
+ wh, err := client.GetUnencryptedWalletHandle()
+ a.NoError(err)
+
+ user, err := client.GenerateAddress(wh)
+ a.NoError(err)
+
+ fee := uint64(1000)
+
+ var txn transactions.Transaction
+
+ // Fund the manager, so it can issue transactions later on
+ txn, err = client.SendPaymentFromUnencryptedWallet(creator, user, fee, 10000000000, nil)
+ a.NoError(err)
+
+ round, err := client.CurrentRound()
+ a.NoError(err)
+ fixture.WaitForConfirmedTxn(round+4, creator, txn.ID().String())
+
+ // There should be no apps to start with
+ ad, err := client.AccountData(creator)
+ a.NoError(err)
+ a.Zero(len(ad.AppParams))
+
+ ad, err = client.AccountData(user)
+ a.NoError(err)
+ a.Zero(len(ad.AppParams))
+ a.Equal(basics.MicroAlgos{Raw: 10000000000}, ad.MicroAlgos)
+
+ counter := `#pragma version 2
+// a simple global and local calls counter app
+byte b64 Y291bnRlcg== // counter
+dup
+app_global_get
+int 1
++
+app_global_put // update the counter
+int 0
+int 0
+app_opted_in
+bnz opted_in
+err
+opted_in:
+int 0 // account idx for app_local_put
+byte b64 Y291bnRlcg== // counter
+int 0
+byte b64 Y291bnRlcg==
+app_local_get
+int 1 // increment
++
+app_local_put
+int 1
+`
+ approvalOps, err := logic.AssembleString(counter)
+ a.NoError(err)
+ clearstateOps, err := logic.AssembleString("#pragma version 2\nint 1")
+ a.NoError(err)
+ schema := basics.StateSchema{
+ NumUint: 1,
+ }
+
+ // create the app
+ tx, err := client.MakeUnsignedAppCreateTx(
+ transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, nil, 0)
+ a.NoError(err)
+ tx, err = client.FillUnsignedTxTemplate(creator, 0, 0, fee, tx)
+ a.NoError(err)
+ wh, err = client.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ signedTxn, err := client.SignTransactionWithWallet(wh, nil, tx)
+ a.NoError(err)
+ txid, err := client.BroadcastTransaction(signedTxn)
+ a.NoError(err)
+ round, err = client.CurrentRound()
+ a.NoError(err)
+ // ensure transaction is accepted into a block within 5 rounds.
+ confirmed := fixture.WaitForAllTxnsToConfirm(round+5, map[string]string{txid: signedTxn.Txn.Sender.String()})
+ a.True(confirmed)
+
+ // check creator's balance record for the app entry and the state changes
+ ad, err = client.AccountData(creator)
+ a.NoError(err)
+ a.Equal(1, len(ad.AppParams))
+ var appIdx basics.AppIndex
+ var params basics.AppParams
+ for i, p := range ad.AppParams {
+ appIdx = i
+ params = p
+ break
+ }
+ a.Equal(approvalOps.Program, params.ApprovalProgram)
+ a.Equal(clearstateOps.Program, params.ClearStateProgram)
+ a.Equal(schema, params.LocalStateSchema)
+ a.Equal(schema, params.GlobalStateSchema)
+ a.Equal(1, len(params.GlobalState))
+ value, ok := params.GlobalState["counter"]
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
+
+ a.Equal(1, len(ad.AppLocalStates))
+ state, ok := ad.AppLocalStates[appIdx]
+ a.True(ok)
+ a.Equal(schema, state.Schema)
+ a.Equal(1, len(state.KeyValue))
+ value, ok = state.KeyValue["counter"]
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
+
+ txInfo, err := fixture.LibGoalClient.PendingTransactionInformationV2(txid)
+ a.NoError(err)
+ a.NotNil(txInfo.ConfirmedRound)
+ a.NotZero(*txInfo.ConfirmedRound)
+ txnRound := *txInfo.ConfirmedRound
+
+ // 1 global state update in total, 1 local state updates
+ checkEvalDelta(t, &client, txnRound, txnRound+1, 1, 1)
+
+ // call the app
+ tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil)
+ a.NoError(err)
+ if foreignAssets != nil {
+ tx.ForeignAssets = foreignAssets
+ }
+ if foreignApps != nil {
+ tx.ForeignApps = foreignApps
+ }
+ if accounts != nil {
+ tx.Accounts = accounts
+ }
+ tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
+ a.NoError(err)
+ wh, err = client.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
+ a.NoError(err)
+ txid, err = client.BroadcastTransaction(signedTxn)
+ a.NoError(err)
+ round, err = client.CurrentRound()
+ a.NoError(err)
+ _, err = client.WaitForRound(round + 3)
+ a.NoError(err)
+
+ // Ensure the txn committed
+ resp, err := client.GetPendingTransactions(2)
+ a.NoError(err)
+ a.Equal(uint64(0), resp.TotalTxns)
+ txinfo, err := client.TransactionInformation(signedTxn.Txn.Sender.String(), txid)
+ a.NoError(err)
+ a.True(txinfo.ConfirmedRound != 0)
+
+ // check creator's balance record for the app entry and the state changes
+ ad, err = client.AccountData(creator)
+ a.NoError(err)
+ a.Equal(1, len(ad.AppParams))
+ params, ok = ad.AppParams[appIdx]
+ a.True(ok)
+ a.Equal(approvalOps.Program, params.ApprovalProgram)
+ a.Equal(clearstateOps.Program, params.ClearStateProgram)
+ a.Equal(schema, params.LocalStateSchema)
+ a.Equal(schema, params.GlobalStateSchema)
+ a.Equal(1, len(params.GlobalState))
+ value, ok = params.GlobalState["counter"]
+ a.True(ok)
+ a.Equal(uint64(2), value.Uint)
+
+ a.Equal(1, len(ad.AppLocalStates))
+ state, ok = ad.AppLocalStates[appIdx]
+ a.True(ok)
+ a.Equal(schema, state.Schema)
+ a.Equal(1, len(state.KeyValue))
+ value, ok = state.KeyValue["counter"]
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
+
+ a.Equal(uint64(2), ad.TotalAppSchema.NumUint)
+
+ // check user's balance record for the app entry and the state changes
+ ad, err = client.AccountData(user)
+ a.NoError(err)
+ a.Equal(0, len(ad.AppParams))
+
+ a.Equal(1, len(ad.AppLocalStates))
+ state, ok = ad.AppLocalStates[appIdx]
+ a.True(ok)
+ a.Equal(schema, state.Schema)
+ a.Equal(1, len(state.KeyValue))
+ value, ok = state.KeyValue["counter"]
+ a.True(ok)
+ a.Equal(uint64(1), value.Uint)
+
+ txInfo, err = fixture.LibGoalClient.PendingTransactionInformationV2(txid)
+ a.NoError(err)
+ a.NotNil(txInfo.ConfirmedRound)
+ a.NotZero(*txInfo.ConfirmedRound)
+ txnRound = *txInfo.ConfirmedRound
+
+ // 2 global state update in total, 1 local state updates
+ checkEvalDelta(t, &client, txnRound, txnRound+1, 2, 1)
+
+ a.Equal(basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos)
+
+ app, err := client.ApplicationInformation(uint64(appIdx))
+ a.NoError(err)
+ a.Equal(uint64(appIdx), app.Id)
+ a.Equal(creator, app.Params.Creator)
+
+ // call the app
+ tx, err = client.MakeUnsignedAppNoOpTx(uint64(appIdx), nil, nil, nil, nil, nil)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
a.NoError(err)
diff --git a/test/e2e-go/features/transactions/app_pages_test.go b/test/e2e-go/features/transactions/app_pages_test.go
index 5ab2e4ad1..31ea8168c 100644
--- a/test/e2e-go/features/transactions/app_pages_test.go
+++ b/test/e2e-go/features/transactions/app_pages_test.go
@@ -89,7 +89,7 @@ return
// create app 1 with 1 extra page
app1ExtraPages := uint32(1)
- tx, err := client.MakeUnsignedAppCreateTx(transactions.NoOpOC, smallProgram, smallProgram, globalSchema, localSchema, nil, nil, nil, nil, app1ExtraPages)
+ tx, err := client.MakeUnsignedAppCreateTx(transactions.NoOpOC, smallProgram, smallProgram, globalSchema, localSchema, nil, nil, nil, nil, nil, app1ExtraPages)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
a.NoError(err)
@@ -110,7 +110,7 @@ return
a.Equal(*accountInfo.AppsTotalExtraPages, uint64(app1ExtraPages))
// update app 1 and ensure the extra page still works
- tx, err = client.MakeUnsignedAppUpdateTx(app1ID, nil, nil, nil, nil, bigProgram, smallProgram)
+ tx, err = client.MakeUnsignedAppUpdateTx(app1ID, nil, nil, nil, nil, nil, bigProgram, smallProgram)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
a.NoError(err)
@@ -130,7 +130,7 @@ return
// create app 2 with 2 extra pages
app2ExtraPages := uint32(2)
- tx, err = client.MakeUnsignedAppCreateTx(transactions.NoOpOC, bigProgram, smallProgram, globalSchema, localSchema, nil, nil, nil, nil, app2ExtraPages)
+ tx, err = client.MakeUnsignedAppCreateTx(transactions.NoOpOC, bigProgram, smallProgram, globalSchema, localSchema, nil, nil, nil, nil, nil, app2ExtraPages)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
a.NoError(err)
@@ -151,7 +151,7 @@ return
a.Equal(*accountInfo.AppsTotalExtraPages, uint64(app1ExtraPages+app2ExtraPages))
// delete app 1
- tx, err = client.MakeUnsignedAppDeleteTx(app1ID, nil, nil, nil, nil)
+ tx, err = client.MakeUnsignedAppDeleteTx(app1ID, nil, nil, nil, nil, nil)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
a.NoError(err)
@@ -170,7 +170,7 @@ return
a.Equal(*accountInfo.AppsTotalExtraPages, uint64(app2ExtraPages))
// delete app 2
- tx, err = client.MakeUnsignedAppDeleteTx(app2ID, nil, nil, nil, nil)
+ tx, err = client.MakeUnsignedAppDeleteTx(app2ID, nil, nil, nil, nil, nil)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
a.NoError(err)
diff --git a/test/e2e-go/features/transactions/application_test.go b/test/e2e-go/features/transactions/application_test.go
index a5786cc4f..380264b29 100644
--- a/test/e2e-go/features/transactions/application_test.go
+++ b/test/e2e-go/features/transactions/application_test.go
@@ -97,7 +97,7 @@ log
// create the app
tx, err := client.MakeUnsignedAppCreateTx(
- transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, 0)
+ transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, nil, 0)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(creator, 0, 0, fee, tx)
a.NoError(err)
diff --git a/test/e2e-go/restAPI/restClient_test.go b/test/e2e-go/restAPI/restClient_test.go
index 1c9c94216..6cdd0d577 100644
--- a/test/e2e-go/restAPI/restClient_test.go
+++ b/test/e2e-go/restAPI/restClient_test.go
@@ -18,18 +18,22 @@ package restapi
import (
"context"
+ "encoding/binary"
"encoding/hex"
"errors"
"flag"
-
+ "fmt"
+ "github.com/algorand/go-algorand/daemon/algod/api/client"
"math"
"math/rand"
"os"
"path/filepath"
+ "sort"
"strings"
"testing"
"time"
- "unicode"
+
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/stretchr/testify/require"
@@ -99,16 +103,6 @@ func mutateStringAtIndex(in string, i int) (out string) {
return out
}
-// checks whether a string is all letters-or-spaces
-func isLetterOrSpace(s string) bool {
- for _, r := range s {
- if !unicode.IsLetter(r) && !unicode.IsSpace(r) {
- return false
- }
- }
- return true
-}
-
func getMaxBalAddr(t *testing.T, testClient libgoal.Client, addresses []string) (someBal uint64, someAddress string) {
a := require.New(fixtures.SynchronizedTest(t))
someBal = 0
@@ -542,6 +536,7 @@ func TestAccountParticipationInfo(t *testing.T) {
}
a.NoError(err)
addr, err := basics.UnmarshalChecksumAddress(someAddress)
+ a.NoError(err)
params, err := testClient.SuggestedParams()
a.NoError(err)
@@ -620,7 +615,7 @@ func TestClientCanGetGoRoutines(t *testing.T) {
goRoutines, err := testClient.GetGoRoutines(ctx)
a.NoError(err)
a.NotEmpty(goRoutines)
- a.True(strings.Index(goRoutines, "goroutine profile:") >= 0)
+ a.True(strings.Contains(goRoutines, "goroutine profile:"))
}
func TestSendingTooMuchFails(t *testing.T) {
@@ -985,15 +980,17 @@ int 1
return
`
ops, err := logic.AssembleString(prog)
+ a.NoError(err)
approv := ops.Program
ops, err = logic.AssembleString("#pragma version 5 \nint 1")
clst := ops.Program
+ a.NoError(err)
gl := basics.StateSchema{}
lc := basics.StateSchema{}
// create app
- appCreateTxn, err := testClient.MakeUnsignedApplicationCallTx(0, nil, nil, nil, nil, transactions.NoOpOC, approv, clst, gl, lc, 0)
+ appCreateTxn, err := testClient.MakeUnsignedApplicationCallTx(0, nil, nil, nil, nil, nil, transactions.NoOpOC, approv, clst, gl, lc, 0)
a.NoError(err)
appCreateTxn, err = testClient.FillUnsignedTxTemplate(someAddress, 0, 0, 0, appCreateTxn)
a.NoError(err)
@@ -1017,7 +1014,7 @@ return
a.NoError(err)
// call app, which will issue an ASA create inner txn
- appCallTxn, err := testClient.MakeUnsignedAppNoOpTx(uint64(createdAppID), nil, nil, nil, nil)
+ appCallTxn, err := testClient.MakeUnsignedAppNoOpTx(uint64(createdAppID), nil, nil, nil, nil, nil)
a.NoError(err)
appCallTxn, err = testClient.FillUnsignedTxTemplate(someAddress, 0, 0, 0, appCallTxn)
a.NoError(err)
@@ -1148,6 +1145,7 @@ func TestStateProofParticipationKeysAPI(t *testing.T) {
a.NoError(err)
partkey, err := account.RestoreParticipation(partdb)
+ a.NoError(err)
pRoot, err := testClient.GetParticipationKeys()
a.NoError(err)
@@ -1222,3 +1220,390 @@ func TestNilStateProofInParticipationInfo(t *testing.T) {
a.NoError(err)
a.Nil(account.Participation.StateProofKey)
}
+
+func TestBoxNamesByAppID(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ a := require.New(fixtures.SynchronizedTest(t))
+ var localFixture fixtures.RestClientFixture
+ localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
+ defer localFixture.Shutdown()
+
+ testClient := localFixture.LibGoalClient
+
+ testClient.WaitForRound(1)
+
+ wh, err := testClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ addresses, err := testClient.ListAddresses(wh)
+ a.NoError(err)
+ _, someAddress := getMaxBalAddr(t, testClient, addresses)
+ if someAddress == "" {
+ t.Error("no addr with funds")
+ }
+ a.NoError(err)
+
+ prog := `#pragma version 8
+ txn ApplicationID
+ bz end // create the app
+ txn NumAppArgs
+ bz end // approve when no app args
+ txn ApplicationArgs 0 // [arg[0]] // fails if no args && app already exists
+ byte "create" // [arg[0], "create"] // create box named arg[1]
+ == // [arg[0]=?="create"]
+ bz del // "create" ? continue : goto del
+ int 5 // [5]
+ txn ApplicationArgs 1 // [5, arg[1]]
+ swap
+ box_create // [] // boxes: arg[1] -> [5]byte
+ assert
+ b end
+del: // delete box arg[1]
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "delete" // [arg[0], "delete"]
+ == // [arg[0]=?="delete"]
+ bz set // "delete" ? continue : goto set
+ txn ApplicationArgs 1 // [arg[1]]
+ box_del // del boxes[arg[1]]
+ assert
+ b end
+set: // put arg[1] at start of box arg[0] ... so actually a _partial_ "set"
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "set" // [arg[0], "set"]
+ == // [arg[0]=?="set"]
+ bz bad // "delete" ? continue : goto bad
+ txn ApplicationArgs 1 // [arg[1]]
+ int 0 // [arg[1], 0]
+ txn ApplicationArgs 2 // [arg[1], 0, arg[2]]
+ box_replace // [] // boxes: arg[1] -> replace(boxes[arg[1]], 0, arg[2])
+ b end
+bad:
+ err
+end:
+ int 1
+`
+ ops, err := logic.AssembleString(prog)
+ approval := ops.Program
+ ops, err = logic.AssembleString("#pragma version 8\nint 1")
+ clearState := ops.Program
+
+ gl := basics.StateSchema{}
+ lc := basics.StateSchema{}
+
+ // create app
+ appCreateTxn, err := testClient.MakeUnsignedApplicationCallTx(
+ 0, nil, nil, nil,
+ nil, nil, transactions.NoOpOC,
+ approval, clearState, gl, lc, 0,
+ )
+ a.NoError(err)
+ appCreateTxn, err = testClient.FillUnsignedTxTemplate(someAddress, 0, 0, 0, appCreateTxn)
+ a.NoError(err)
+ appCreateTxID, err := testClient.SignAndBroadcastTransaction(wh, nil, appCreateTxn)
+ a.NoError(err)
+ _, err = waitForTransaction(t, testClient, someAddress, appCreateTxID, 30*time.Second)
+ a.NoError(err)
+
+ // get app ID
+ submittedAppCreateTxn, err := testClient.PendingTransactionInformationV2(appCreateTxID)
+ a.NoError(err)
+ a.NotNil(submittedAppCreateTxn.ApplicationIndex)
+ createdAppID := basics.AppIndex(*submittedAppCreateTxn.ApplicationIndex)
+ a.Greater(uint64(createdAppID), uint64(0))
+
+ // fund app account
+ appFundTxn, err := testClient.SendPaymentFromWallet(
+ wh, nil, someAddress, createdAppID.Address().String(),
+ 0, 10_000_000, nil, "", 0, 0,
+ )
+ a.NoError(err)
+ appFundTxID := appFundTxn.ID()
+ _, err = waitForTransaction(t, testClient, someAddress, appFundTxID.String(), 30*time.Second)
+ a.NoError(err)
+
+ createdBoxName := map[string]bool{}
+ var createdBoxCount uint64 = 0
+
+ // define operate box helper
+ operateBoxAndSendTxn := func(operation string, boxNames []string, boxValues []string, errPrefix ...string) {
+ txns := make([]transactions.Transaction, len(boxNames))
+ txIDs := make(map[string]string, len(boxNames))
+
+ for i := 0; i < len(boxNames); i++ {
+ appArgs := [][]byte{
+ []byte(operation),
+ []byte(boxNames[i]),
+ []byte(boxValues[i]),
+ }
+ boxRef := transactions.BoxRef{
+ Name: []byte(boxNames[i]),
+ Index: 0,
+ }
+
+ txns[i], err = testClient.MakeUnsignedAppNoOpTx(
+ uint64(createdAppID), appArgs,
+ nil, nil, nil,
+ []transactions.BoxRef{boxRef},
+ )
+ a.NoError(err)
+ txns[i], err = testClient.FillUnsignedTxTemplate(someAddress, 0, 0, 0, txns[i])
+ a.NoError(err)
+ txIDs[txns[i].ID().String()] = someAddress
+ }
+
+ var gid crypto.Digest
+ gid, err = testClient.GroupID(txns)
+ a.NoError(err)
+
+ stxns := make([]transactions.SignedTxn, len(boxNames))
+ for i := 0; i < len(boxNames); i++ {
+ txns[i].Group = gid
+ wh, err = testClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ stxns[i], err = testClient.SignTransactionWithWallet(wh, nil, txns[i])
+ a.NoError(err)
+ }
+
+ err = testClient.BroadcastTransactionGroup(stxns)
+ if len(errPrefix) == 0 {
+ a.NoError(err)
+ _, err = waitForTransaction(t, testClient, someAddress, txns[0].ID().String(), 30*time.Second)
+ a.NoError(err)
+ } else {
+ a.ErrorContains(err, errPrefix[0])
+ }
+ }
+
+ // `assertErrorResponse` confirms the _Result limit exceeded_ error response provides expected fields and values.
+ assertErrorResponse := func(err error, expectedCount, requestedMax uint64) {
+ a.Error(err)
+ e := err.(client.HTTPError)
+ a.Equal(400, e.StatusCode)
+
+ var er *generated.ErrorResponse
+ err = protocol.DecodeJSON([]byte(e.ErrorString), &er)
+ a.NoError(err)
+ a.Equal("Result limit exceeded", er.Message)
+ a.Equal(uint64(100000), ((*er.Data)["max-api-box-per-application"]).(uint64))
+ a.Equal(requestedMax, ((*er.Data)["max"]).(uint64))
+ a.Equal(expectedCount, ((*er.Data)["total-boxes"]).(uint64))
+
+ a.Len(*er.Data, 3, fmt.Sprintf("error response (%v) contains unverified fields. Extend test for new fields.", *er.Data))
+ }
+
+ // `assertBoxCount` sanity checks that the REST API respects `expectedCount` through different queries against app ID = `createdAppID`.
+ assertBoxCount := func(expectedCount uint64) {
+ // Query without client-side limit.
+ resp, err := testClient.ApplicationBoxes(uint64(createdAppID), 0)
+ a.NoError(err)
+ a.Len(resp.Boxes, int(expectedCount))
+
+ // Query with requested max < expected expectedCount.
+ _, err = testClient.ApplicationBoxes(uint64(createdAppID), expectedCount-1)
+ assertErrorResponse(err, expectedCount, expectedCount-1)
+
+ // Query with requested max == expected expectedCount.
+ resp, err = testClient.ApplicationBoxes(uint64(createdAppID), expectedCount)
+ a.NoError(err)
+ a.Len(resp.Boxes, int(expectedCount))
+
+ // Query with requested max > expected expectedCount.
+ resp, err = testClient.ApplicationBoxes(uint64(createdAppID), expectedCount+1)
+ a.NoError(err)
+ a.Len(resp.Boxes, int(expectedCount))
+ }
+
+ // helper function, take operation and a slice of box names
+ // then submit transaction group containing all operations on box names
+ // Then we check these boxes are appropriately created/deleted
+ operateAndMatchRes := func(operation string, boxNames []string) {
+ boxValues := make([]string, len(boxNames))
+ if operation == "create" {
+ for i, box := range boxNames {
+ keyValid, ok := createdBoxName[box]
+ a.False(ok && keyValid)
+ boxValues[i] = ""
+ }
+ } else if operation == "delete" {
+ for i, box := range boxNames {
+ keyValid, ok := createdBoxName[box]
+ a.True(keyValid == ok)
+ boxValues[i] = ""
+ }
+ } else {
+ a.Failf("Unknown operation %s", operation)
+ }
+
+ operateBoxAndSendTxn(operation, boxNames, boxValues)
+
+ if operation == "create" {
+ for _, box := range boxNames {
+ createdBoxName[box] = true
+ }
+ createdBoxCount += uint64(len(boxNames))
+ } else if operation == "delete" {
+ for _, box := range boxNames {
+ createdBoxName[box] = false
+ }
+ createdBoxCount -= uint64(len(boxNames))
+ }
+
+ var resp generated.BoxesResponse
+ resp, err = testClient.ApplicationBoxes(uint64(createdAppID), 0)
+ a.NoError(err)
+
+ expectedCreatedBoxes := make([]string, 0, createdBoxCount)
+ for name, isCreate := range createdBoxName {
+ if isCreate {
+ expectedCreatedBoxes = append(expectedCreatedBoxes, name)
+ }
+ }
+ sort.Strings(expectedCreatedBoxes)
+
+ actualBoxes := make([]string, len(resp.Boxes))
+ for i, box := range resp.Boxes {
+ actualBoxes[i] = string(box.Name)
+ }
+ sort.Strings(actualBoxes)
+
+ a.Equal(expectedCreatedBoxes, actualBoxes)
+ }
+
+ testingBoxNames := []string{
+ ` `,
+ ` `,
+ ` ? = % ;`,
+ `; DROP *;`,
+ `OR 1 = 1;`,
+ `" ; SELECT * FROM kvstore; DROP acctrounds; `,
+ `背负青天而莫之夭阏者,而后乃今将图南。`,
+ `於浩歌狂熱之際中寒﹔於天上看見深淵。`,
+ `於一切眼中看見無所有﹔於無所希望中得救。`,
+ `有一遊魂,化為長蛇,口有毒牙。`,
+ `不以嚙人,自嚙其身,終以殞顛。`,
+ `那些智力超常的人啊`,
+ `认为已经,熟悉了云和闪电的脾气`,
+ `就不再迷惑,就不必了解自己,世界和他人`,
+ `每天只管,被微风吹拂,与猛虎谈情`,
+ `他们从来,不需要楼梯,只有窗口`,
+ `把一切交付于梦境,和优美的浪潮`,
+ `在这颗行星所有的酒馆,青春自由似乎理所应得`,
+ `面向涣散的未来,只唱情歌,看不到坦克`,
+ `在科学和啤酒都不能安抚的夜晚`,
+ `他们丢失了四季,惶惑之行开始`,
+ `这颗行星所有的酒馆,无法听到远方的呼喊`,
+ `野心勃勃的灯火,瞬间吞没黑暗的脸庞`,
+ `b64:APj/AA==`,
+ `str:123.3/aa\\0`,
+ string([]byte{0, 255, 254, 254}),
+ string([]byte{0, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF}),
+ `; SELECT key from kvstore WHERE key LIKE %;`,
+ `?&%!=`,
+ "SELECT * FROM kvstore " + string([]byte{0, 0}) + " WHERE key LIKE %; ",
+ string([]byte{'%', 'a', 'b', 'c', 0, 0, '%', 'a', '!'}),
+ `
+`,
+ `™£´´∂ƒ∂ƒßƒ©∑®ƒß∂†¬∆`,
+ `∑´´˙©˚¬∆ßåƒ√¬`,
+ }
+
+ // Happy Vanilla paths:
+ resp, err := testClient.ApplicationBoxes(uint64(createdAppID), 0)
+ a.NoError(err)
+ a.Empty(resp.Boxes)
+
+ // Some Un-Happy / Non-Vanilla paths:
+
+ // Even though the next box _does not exist_ as asserted by the error below,
+ // querying it for boxes _DOES NOT ERROR_. There is no easy way to tell
+ // the difference between non-existing boxes for an app that once existed
+ // vs. an app the NEVER existed.
+ nonexistantAppIndex := uint64(1337)
+ _, err = testClient.ApplicationInformation(nonexistantAppIndex)
+ a.ErrorContains(err, "application does not exist")
+ resp, err = testClient.ApplicationBoxes(nonexistantAppIndex, 0)
+ a.NoError(err)
+ a.Len(resp.Boxes, 0)
+
+ operateBoxAndSendTxn("create", []string{``}, []string{``}, "box names may not be zero length")
+
+ for i := 0; i < len(testingBoxNames); i += 16 {
+ var strSliceTest []string
+ // grouping box names to operate, and create such boxes
+ if i+16 >= len(testingBoxNames) {
+ strSliceTest = testingBoxNames[i:]
+ } else {
+ strSliceTest = testingBoxNames[i : i+16]
+ }
+ operateAndMatchRes("create", strSliceTest)
+ }
+
+ assertBoxCount(uint64(len(testingBoxNames)))
+
+ for i := 0; i < len(testingBoxNames); i += 16 {
+ var strSliceTest []string
+ // grouping box names to operate, and delete such boxes
+ if i+16 >= len(testingBoxNames) {
+ strSliceTest = testingBoxNames[i:]
+ } else {
+ strSliceTest = testingBoxNames[i : i+16]
+ }
+ operateAndMatchRes("delete", strSliceTest)
+ }
+
+ resp, err = testClient.ApplicationBoxes(uint64(createdAppID), 0)
+ a.NoError(err)
+ a.Empty(resp.Boxes)
+
+ // Get Box value from box name
+ encodeInt := func(n uint64) []byte {
+ ibytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(ibytes, n)
+ return ibytes
+ }
+
+ boxTests := []struct {
+ name []byte
+ encodedName string
+ value []byte
+ }{
+ {[]byte("foo"), "str:foo", []byte("bar12")},
+ {encodeInt(12321), "int:12321", []byte{0, 1, 254, 3, 2}},
+ {[]byte{0, 248, 255, 32}, "b64:APj/IA==", []byte("lux56")},
+ }
+ for _, boxTest := range boxTests {
+ // Box values are 5 bytes, as defined by the test TEAL program.
+ operateBoxAndSendTxn("create", []string{string(boxTest.name)}, []string{""})
+ operateBoxAndSendTxn("set", []string{string(boxTest.name)}, []string{string(boxTest.value)})
+
+ boxResponse, err := testClient.GetApplicationBoxByName(uint64(createdAppID), boxTest.encodedName)
+ a.NoError(err)
+ a.Equal(boxTest.name, boxResponse.Name)
+ a.Equal(boxTest.value, boxResponse.Value)
+ }
+
+ const numberOfBoxesRemaining = uint64(3)
+ assertBoxCount(numberOfBoxesRemaining)
+
+ // Non-vanilla. Wasteful but correct. Can delete an app without first cleaning up its boxes.
+ appAccountData, err := testClient.AccountData(createdAppID.Address().String())
+ a.NoError(err)
+ a.Equal(numberOfBoxesRemaining, appAccountData.TotalBoxes)
+ a.Equal(uint64(30), appAccountData.TotalBoxBytes)
+
+ // delete the app
+ appDeleteTxn, err := testClient.MakeUnsignedAppDeleteTx(uint64(createdAppID), nil, nil, nil, nil, nil)
+ a.NoError(err)
+ appDeleteTxn, err = testClient.FillUnsignedTxTemplate(someAddress, 0, 0, 0, appDeleteTxn)
+ a.NoError(err)
+ appDeleteTxID, err := testClient.SignAndBroadcastTransaction(wh, nil, appDeleteTxn)
+ a.NoError(err)
+ _, err = waitForTransaction(t, testClient, someAddress, appDeleteTxID, 30*time.Second)
+ a.NoError(err)
+
+ _, err = testClient.ApplicationInformation(uint64(createdAppID))
+ a.ErrorContains(err, "application does not exist")
+
+ assertBoxCount(numberOfBoxesRemaining)
+}
diff --git a/test/e2e-go/upgrades/application_support_test.go b/test/e2e-go/upgrades/application_support_test.go
index 3667cc3ce..9d289f4b3 100644
--- a/test/e2e-go/upgrades/application_support_test.go
+++ b/test/e2e-go/upgrades/application_support_test.go
@@ -149,7 +149,7 @@ int 1
// create the app
tx, err := client.MakeUnsignedAppCreateTx(
- transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, 0)
+ transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, nil, 0)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(creator, 0, 0, fee, tx)
a.NoError(err)
@@ -236,7 +236,7 @@ int 1
a.Equal(uint64(1), value.Uint)
// call the app
- tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil)
+ tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
a.NoError(err)
@@ -395,7 +395,7 @@ int 1
// create the app
tx, err := client.MakeUnsignedAppCreateTx(
- transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, 0)
+ transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, nil, 0)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(creator, round, round+primaryNodeUnupgradedProtocol.DefaultUpgradeWaitRounds, fee, tx)
a.NoError(err)
@@ -491,7 +491,7 @@ int 1
a.Equal(uint64(1), value.Uint)
// call the app
- tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil)
+ tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil, nil)
a.NoError(err)
tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
a.NoError(err)
diff --git a/test/heapwatch/block_history.py b/test/heapwatch/block_history.py
index 29182e760..ac5c631c4 100644
--- a/test/heapwatch/block_history.py
+++ b/test/heapwatch/block_history.py
@@ -48,6 +48,19 @@ def addr_token_from_algod(algorand_data):
def loads(blob):
return msgpack.loads(base64.b64decode(blob), strict_map_key=False)
+def bstr(x):
+ if isinstance(x, bytes):
+ try:
+ return x.decode()
+ except:
+ pass
+ return x
+
+def obnice(ob):
+ if isinstance(ob, dict):
+ return {bstr(k):obnice(v) for k,v in ob.items()}
+ return ob
+
def dumps(blob):
return base64.b64encode(msgpack.dumps(blob))
@@ -180,8 +193,10 @@ class Fetcher:
if b is None:
print("got None nextblock. exiting")
return
- b = msgpack.loads(b, strict_map_key=False)
+ b = msgpack.loads(b, strict_map_key=False, raw=True)
+ b = obnice(b)
nowround = b['block'].get('rnd', 0)
+ logger.debug('r%d', nowround)
if (lastround is not None) and (nowround != lastround + 1):
logger.info('round jump %d to %d', lastround, nowround)
self._block_handler(b)
@@ -226,7 +241,7 @@ def main():
logging.basicConfig(level=logging.INFO)
algorand_data = args.algod or os.getenv('ALGORAND_DATA')
- if not algorand_data and not (args.token and args.addr):
+ if not algorand_data and not ((args.token or args.headers) and args.addr):
sys.stderr.write('must specify algod data dir by $ALGORAND_DATA or -d/--algod; OR --a/--addr and -t/--token\n')
sys.exit(1)
diff --git a/test/heapwatch/block_history_plot.py b/test/heapwatch/block_history_plot.py
index 174c1dca1..73de45601 100644
--- a/test/heapwatch/block_history_plot.py
+++ b/test/heapwatch/block_history_plot.py
@@ -119,8 +119,12 @@ def process(path, args):
ax1.set_title('round time (seconds)')
ax1.hist(list(filter(lambda x: x < 9,dtv[start:end])),bins=20)
- ax2.set_title('TPS')
- ax2.hist(tpsv[start:end],bins=20)
+ if args.rtime:
+ ax2.set_title('round time')
+ ax2.plot(dtv)
+ else:
+ ax2.set_title('TPS')
+ ax2.hist(tpsv[start:end],bins=20)
ax3.set_title('txn/block')
ax3.hist(txnv[start:end],bins=20)
@@ -152,6 +156,7 @@ def main():
ap.add_argument('files', nargs='+')
ap.add_argument('--all', default=False, action='store_true')
ap.add_argument('--tps1', default=False, action='store_true')
+ ap.add_argument('--rtime', default=False, action='store_true')
ap.add_argument('--start', default=0, type=int, help='start round')
args = ap.parse_args()
diff --git a/test/heapwatch/metrics_delta.py b/test/heapwatch/metrics_delta.py
index 70324c3c7..3ec493988 100644
--- a/test/heapwatch/metrics_delta.py
+++ b/test/heapwatch/metrics_delta.py
@@ -191,7 +191,7 @@ class summary:
def blockinfo(self, curtime):
return self.biByTime.get(curtime)
- def byMsg(self):
+ def byMsg(self, html=False):
txPSums = {}
rxPSums = {}
secondsSum = 0
@@ -209,10 +209,14 @@ class summary:
dictMax(rxMax, ns.rxPLists)
dictMin(txMin, ns.txPLists)
dictMin(rxMin, ns.rxPLists)
- lines = [
- '{} nodes: {}'.format(len(nicks), nicks),
- '\ttx B/s\trx B/s',
- ]
+ nodesummary = '{} nodes: {}'.format(len(nicks), nicks)
+ lines = []
+ if html:
+ lines.append('<div>{}</div>'.format(nodesummary))
+ lines.append('<table><tr><th></th><th>tx B/s</th><th>rx B/s</th></tr>')
+ else:
+ lines.append(nodesummary)
+ lines.append('\ttx B/s\trx B/s')
for msg, txB in txPSums.items():
if msg not in rxPSums:
rxPSums[msg] = 0
@@ -220,7 +224,12 @@ class summary:
txBps = txPSums.get(msg,0)/secondsSum
if (txBps < 0.5) and (rxBps < 0.5):
continue
- lines.append('{}\t{:.0f}\t{:.0f}'.format(msg, txBps, rxBps))
+ if html:
+ lines.append('<tr><td>{}</td><td>{:.0f}</td><td>{:.0f}</td></tr>'.format(msg, txBps, rxBps))
+ else:
+ lines.append('{}\t{:.0f}\t{:.0f}'.format(msg, txBps, rxBps))
+ if html:
+ lines.append('</table>')
return '\n'.join(lines)
def txPool(self):
@@ -242,6 +251,12 @@ class summary:
)
def __str__(self):
+ return self.str(html=False)
+
+ def html(self):
+ return self.str(html=True)
+
+ def str(self, html=False):
if not self.sumsCount:
tps, txbps, rxbps = math.nan, math.nan, math.nan
blockTimes = math.nan
@@ -256,9 +271,17 @@ class summary:
labelspace = self.label + " "
if self.verifyMillis:
verifyMillis = labelspace + 'verify ms ({:.0f}/{:.0f}/{:.0f})\n'.format(min(self.verifyMillis), meanOrZero(self.verifyMillis), max(self.verifyMillis))
+ if html:
+ verifyMillis = '<div>' + verifyMillis + '</div>'
else:
verifyMillis = ''
- return '{byMsg}\n{verifyMillis}{labelspace}{txPool}\n{labelspace}summary: {TPS:0.2f} TPS, {bt:1.2f}s/block, tx {txBps}B/s, rx {rxBps}B/s'.format(labelspace=labelspace, byMsg=self.byMsg(), txPool=self.txPool(), TPS=tps, txBps=hunum(txbps), rxBps=hunum(rxbps), bt=blockTimes, verifyMillis=verifyMillis)
+ if html:
+ fmt = '{byMsg}\n{verifyMillis}<div>{labelspace}{txPool}</div>\n<div>{labelspace}summary: {TPS:0.2f} TPS, {bt:1.2f}s/block, tx {txBps}B/s, rx {rxBps}B/s</div>'
+ if self.label:
+ fmt = '<div class="lh">' + self.label + '</div>' + fmt
+ else:
+ fmt = '{byMsg}\n{verifyMillis}{labelspace}{txPool}\n{labelspace}summary: {TPS:0.2f} TPS, {bt:1.2f}s/block, tx {txBps}B/s, rx {rxBps}B/s'
+ return fmt.format(labelspace=labelspace, byMsg=self.byMsg(html), txPool=self.txPool(), TPS=tps, txBps=hunum(txbps), rxBps=hunum(rxbps), bt=blockTimes, verifyMillis=verifyMillis)
def plot_pool(self, outpath):
from matplotlib import pyplot as plt
@@ -330,6 +353,7 @@ def main():
ap.add_argument('--mintps', default=None, type=float, help="records below min TPS don't add into summary")
ap.add_argument('--deltas', default=None, help='path to write csv deltas')
ap.add_argument('--report', default=None, help='path to write csv report')
+ ap.add_argument('--html-summary', default=None, help='path to write html summary')
ap.add_argument('--nick-re', action='append', default=[], help='regexp to filter node names, may be repeated')
ap.add_argument('--nick-lre', action='append', default=[], help='label:regexp to filter node names, may be repeated')
ap.add_argument('--pool-plot-root', help='write to foo.svg and .png')
@@ -396,6 +420,9 @@ def main():
if args.pool_plot_root:
grsum.plot_pool(args.pool_plot_root)
+ htmlout = None
+ if args.html_summary:
+ htmlout = open(args.html_summary, 'wt')
# maybe subprocess for stats across named groups
if args.nick_re:
# use each --nick-re=foo as a group
@@ -404,6 +431,8 @@ def main():
process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args, grsum)
print(rsum)
print('\n')
+ if htmlout:
+ htmlout.write(rsum.html())
return 0
if args.nick_lre:
for lnre in args.nick_lre:
@@ -412,10 +441,14 @@ def main():
process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args, grsum)
print(rsum)
print('\n')
+ if htmlout:
+ htmlout.write(rsum.html())
return 0
# no filters, print global result
print(grsum)
+ if htmlout:
+ htmlout.write(grsum.html())
return 0
def perProtocol(prefix, lists, sums, deltas, dt):
@@ -515,6 +548,8 @@ class nodestats:
self.biByTime[curtime] = bi
if bi is None:
bi = bisource.get(curtime)
+ if bi is None:
+ logger.warning('%s no blockinfo', path)
self.txPool.append(cur.get('algod_tx_pool_count{}'))
#logger.debug('%s: %r', path, cur)
verifyGood = cur.get('algod_agreement_proposal_verify_good{}')
diff --git a/test/scripts/e2e_subs/box-search.sh b/test/scripts/e2e_subs/box-search.sh
new file mode 100755
index 000000000..fb0d37f17
--- /dev/null
+++ b/test/scripts/e2e_subs/box-search.sh
@@ -0,0 +1,135 @@
+#!/bin/bash
+
+filename=$(basename "$0")
+scriptname="${filename%.*}"
+date "+${scriptname} start %Y%m%d_%H%M%S"
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+TEAL=test/scripts/e2e_subs/tealprogs
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+# Version 8 clear program
+printf '#pragma version 8\nint 1' > "${TEMPDIR}/clear.teal"
+
+APPID=$(${gcmd} app create --creator "$ACCOUNT" --approval-prog=${TEAL}/boxes.teal --clear-prog "$TEMPDIR/clear.teal" --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }')
+
+# Fund the app account 10 algos
+APP_ACCOUNT=$(${gcmd} app info --app-id "$APPID" | grep "Application account" | awk '{print $3}')
+${gcmd} clerk send --to "$APP_ACCOUNT" --from "$ACCOUNT" --amount 10000000
+
+# Confirm that we are informed if no application boxes exist
+BOX_LIST=$(${gcmd} app box list --app-id "$APPID" 2>&1 || true)
+EXPECTED="No boxes found for appid $APPID"
+
+[ "$BOX_LIST" = "$EXPECTED" ]
+
+# Confirm that we are informed if a specific application box does not exist
+BOX_INFO=$(${gcmd} app box info --app-id "$APPID" --name "str:not_found" 2>&1 || true)
+EXPECTED="No box found for appid $APPID with name str:not_found"
+
+[ "$BOX_INFO" = "$EXPECTED" ]
+
+# Create several boxes
+BOX_NAMES=("str:box1" "str:with spaces" "b64:YmFzZTY0" "b64:AQIDBA==") # b64:YmFzZTY0 == str:base64, b64:AQIDBA== is not unicode
+BOX_VALUE="box value"
+B64_BOX_VALUE="b64:Ym94IHZhbHVlAAAAAAAAAAAAAAAAAAAA"
+
+for BOX_NAME in "${BOX_NAMES[@]}"
+do
+ # Create the box
+ ${gcmd} app call --from "$ACCOUNT" --app-id "$APPID" --box "$BOX_NAME" --app-arg "str:create" --app-arg "$BOX_NAME"
+
+ # Set box value
+ ${gcmd} app call --from "$ACCOUNT" --app-id "$APPID" --box "$BOX_NAME" --app-arg "str:set" --app-arg "$BOX_NAME" --app-arg "str:$BOX_VALUE"
+done
+
+# Confirm that we can get the values of each individual box
+for BOX_NAME in "${BOX_NAMES[@]}"
+do
+ ${gcmd} app box info --app-id "$APPID" --name "$BOX_NAME"
+ NAME=$(${gcmd} app box info --app-id "$APPID" --name "$BOX_NAME" | grep Name | tr -s ' ' | cut -d" " -f2-)
+ [ "$NAME" = "$BOX_NAME" ]
+
+ VALUE=$(${gcmd} app box info --app-id "$APPID" --name "$BOX_NAME" | grep Value | tr -s ' ' | cut -d" " -f2-)
+ [ "$VALUE" = "$B64_BOX_VALUE" ]
+done
+
+# Confirm that the account data representation knows about all the boxes
+APP_ACCOUNT_JSON_DUMP=$(${gcmd} account dump --address "$APP_ACCOUNT")
+ACTUAL_APP_ACCOUNT_NUM_BOXES=$(printf "$APP_ACCOUNT_JSON_DUMP" | jq '.tbx')
+EXPECTED_APP_ACCOUNT_NUM_BOXES=4
+ACTUAL_APP_ACCOUNT_BOX_BYTES=$(printf "$APP_ACCOUNT_JSON_DUMP" | jq '.tbxb')
+EXPECTED_APP_ACCOUNT_BOX_BYTES=121
+[ "$ACTUAL_APP_ACCOUNT_NUM_BOXES" -eq "$EXPECTED_APP_ACCOUNT_NUM_BOXES" ]
+[ "$ACTUAL_APP_ACCOUNT_BOX_BYTES" -eq "$EXPECTED_APP_ACCOUNT_BOX_BYTES" ]
+
+# Confirm that we can get a list of boxes belonging to a particular application
+BOX_LIST=$(${gcmd} app box list --app-id "$APPID")
+EXPECTED="str:box1
+str:with spaces
+str:base64
+b64:AQIDBA=="
+
+# shellcheck disable=SC2059
+[ "$(printf "$BOX_LIST" | sort)" = "$(printf "$EXPECTED" | sort)" ]
+
+# Confirm that we can limit the number of boxes returned
+BOX_LIST=$(${gcmd} app box list --app-id "$APPID" --max 4)
+[ "$(echo "$BOX_LIST" | wc -l)" -eq 4 ] # only one line
+# shellcheck disable=SC2143
+[ "$(grep -w "$BOX_LIST" <<< "$EXPECTED")" ] # actual box is in the expected list
+
+# Create and set a box in an atomic txn group:
+
+BOX_NAME="str:great box"
+echo "Create $BOX_NAME"
+${gcmd} app call --from "$ACCOUNT" --app-id "$APPID" --box "$BOX_NAME" --app-arg "str:create" --app-arg "$BOX_NAME" -o box_create.txn
+
+echo "Set $BOX_NAME using $BOX_VALUE"
+${gcmd} app call --from "$ACCOUNT" --app-id "$APPID" --app-arg "str:set" --app-arg "$BOX_NAME" --app-arg "str:$BOX_VALUE" -o box_set.txn
+
+# Group them, sign and broadcast:
+cat box_create.txn box_set.txn > box_create_n_set.txn
+${gcmd} clerk group -i box_create_n_set.txn -o box_group.txn
+${gcmd} clerk sign -i box_group.txn -o box_group.stx
+${gcmd} clerk rawsend -f box_group.stx
+
+echo "Confirm that NAME $BOX_NAME as expected"
+${gcmd} app box info --app-id "$APPID" --name "$BOX_NAME"
+NAME=$(${gcmd} app box info --app-id "$APPID" --name "$BOX_NAME" | grep Name | tr -s ' ' | cut -d" " -f2-)
+[ "$NAME" = "$BOX_NAME" ]
+
+echo "Confirm that VALUE $BOX_VALUE i.e. ($B64_BOX_VALUE) as expected"
+VALUE=$(${gcmd} app box info --app-id "$APPID" --name "$BOX_NAME" | grep Value | tr -s ' ' | cut -d" " -f2-)
+[ "$VALUE" = "$B64_BOX_VALUE" ]
+
+# Confirm that we can still get the list of boxes
+BOX_LIST=$(${gcmd} app box list --app-id "$APPID")
+EXPECTED="str:box1
+str:with spaces
+str:base64
+b64:AQIDBA==
+str:great box"
+
+# shellcheck disable=SC2059
+[ "$(printf "$BOX_LIST" | sort)" = "$(printf "$EXPECTED" | sort)" ]
+
+# Confirm that the account data representation still knows about all the boxes
+APP_ACCOUNT_JSON_DUMP=$(${gcmd} account dump --address "$APP_ACCOUNT")
+ACTUAL_APP_ACCOUNT_NUM_BOXES=$(printf "$APP_ACCOUNT_JSON_DUMP" | jq '.tbx')
+EXPECTED_APP_ACCOUNT_NUM_BOXES=5
+ACTUAL_APP_ACCOUNT_BOX_BYTES=$(printf "$APP_ACCOUNT_JSON_DUMP" | jq '.tbxb')
+EXPECTED_APP_ACCOUNT_BOX_BYTES=154
+[ "$ACTUAL_APP_ACCOUNT_NUM_BOXES" -eq "$EXPECTED_APP_ACCOUNT_NUM_BOXES" ]
+[ "$ACTUAL_APP_ACCOUNT_BOX_BYTES" -eq "$EXPECTED_APP_ACCOUNT_BOX_BYTES" ]
+
+date "+${scriptname} OK %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/tealprogs/boxes.teal b/test/scripts/e2e_subs/tealprogs/boxes.teal
new file mode 100644
index 000000000..a8885a7bb
--- /dev/null
+++ b/test/scripts/e2e_subs/tealprogs/boxes.teal
@@ -0,0 +1,60 @@
+// Copied directly from cmd/goal/examples/boxes.teal
+
+#pragma version 8
+ txn ApplicationID
+ bz end
+ txn ApplicationArgs 0 // [arg[0]] // fails if no args && app already exists
+ byte "create" // [arg[0], "create"] // create box named arg[1]
+ == // [arg[0]=?="create"]
+ bz del // "create" ? continue : goto del
+ int 24 // [24]
+ txn NumAppArgs // [24, NumAppArgs]
+ int 2 // [24, NumAppArgs, 2]
+ == // [24, NumAppArgs=?=2]
+ bnz default // WARNING: Assumes that when "create" provided, NumAppArgs >= 3
+ pop // get rid of 24 // NumAppArgs != 2
+ txn ApplicationArgs 2 // [arg[2]] // ERROR when NumAppArgs == 1
+ btoi // [btoi(arg[2])]
+default: // [24] // NumAppArgs >= 3
+ txn ApplicationArgs 1 // [24, arg[1]]
+ swap
+ box_create // [] // boxes: arg[1] -> [24]byte
+ assert
+ b end
+del: // delete box arg[1]
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "delete" // [arg[0], "delete"]
+ == // [arg[0]=?="delete"]
+ bz set // "delete" ? continue : goto set
+ txn ApplicationArgs 1 // [arg[1]]
+ box_del // del boxes[arg[1]]
+ assert
+ b end
+set: // put arg[1] at start of box arg[0] ... so actually a _partial_ "set"
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "set" // [arg[0], "set"]
+ == // [arg[0]=?="set"]
+ bz test // "set" ? continue : goto test
+ txn ApplicationArgs 1 // [arg[1]]
+ int 0 // [arg[1], 0]
+ txn ApplicationArgs 2 // [arg[1], 0, arg[2]]
+ box_replace // [] // boxes: arg[1] -> replace(boxes[arg[1]], 0, arg[2])
+ b end
+test: // fail unless arg[2] is the prefix of box arg[1]
+ txn ApplicationArgs 0 // [arg[0]]
+ byte "check" // [arg[0], "check"]
+ == // [arg[0]=?="check"]
+ bz bad // "check" ? continue : goto bad
+ txn ApplicationArgs 1 // [arg[1]]
+ int 0 // [arg[1], 0]
+ txn ApplicationArgs 2 // [arg[1], 0, arg[2]]
+ len // [arg[1], 0, len(arg[2])]
+ box_extract // [ boxes[arg[1]][0:len(arg[2])] ]
+ txn ApplicationArgs 2 // [ boxes[arg[1]][0:len(arg[2])], arg[2] ]
+ == // [ boxes[arg[1]][0:len(arg[2])]=?=arg[2] ]
+ assert // boxes[arg[1]].startwith(arg[2]) ? pop : ERROR
+ b end
+bad: // arg[0] ∉ {"create", "delete", "set", "check"}
+ err
+end:
+ int 1
diff --git a/test/testdata/configs/config-v24.json b/test/testdata/configs/config-v24.json
index 54bd0f9f2..0f8ea8350 100644
--- a/test/testdata/configs/config-v24.json
+++ b/test/testdata/configs/config-v24.json
@@ -49,6 +49,7 @@
"EnableRequestLogger": false,
"EnableRuntimeMetrics": false,
"EnableTopAccountsReporting": false,
+ "EnableUsageLog": false,
"EnableVerbosedTransactionSyncLogging": false,
"EndpointAddress": "127.0.0.1:0",
"FallbackDNSResolverAddress": "",
@@ -63,8 +64,8 @@
"LogArchiveMaxAge": "",
"LogArchiveName": "node.archive.log",
"LogSizeLimit": 1073741824,
- "MaxAcctLookback": 4,
"MaxAPIResourcesPerAccount": 100000,
+ "MaxAcctLookback": 4,
"MaxCatchpointDownloadDuration": 7200000000000,
"MaxConnectionsPerIP": 30,
"MinCatchpointFileDownloadBytesPerSecond": 20480,
diff --git a/test/testdata/configs/config-v25.json b/test/testdata/configs/config-v25.json
new file mode 100644
index 000000000..8647d9358
--- /dev/null
+++ b/test/testdata/configs/config-v25.json
@@ -0,0 +1,108 @@
+{
+ "Version": 25,
+ "AccountUpdatesStatsInterval": 5000000000,
+ "AccountsRebuildSynchronousMode": 1,
+ "AgreementIncomingBundlesQueueLength": 7,
+ "AgreementIncomingProposalsQueueLength": 25,
+ "AgreementIncomingVotesQueueLength": 10000,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BlockServiceCustomFallbackEndpoints": "",
+ "BroadcastConnectionsLimit": -1,
+ "CadaverSizeTarget": 0,
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchpointTracking": 0,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "DNSBootstrapID": "<network>.algorand.network",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DeadlockDetectionThreshold": 30,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": true,
+ "EnableCatchupFromArchiveServers": false,
+ "EnableDeveloperAPI": false,
+ "EnableGossipBlockService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableRuntimeMetrics": false,
+ "EnableTopAccountsReporting": false,
+ "EnableUsageLog": false,
+ "EnableVerbosedTransactionSyncLogging": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceFetchTransactions": false,
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "IncomingConnectionsLimit": 800,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "IsIndexerActive": false,
+ "LedgerSynchronousMode": 2,
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogSizeLimit": 1073741824,
+ "MaxAcctLookback": 4,
+ "MaxAPIBoxPerApplication": 100000,
+ "MaxAPIResourcesPerAccount": 100000,
+ "MaxCatchpointDownloadDuration": 7200000000000,
+ "MaxConnectionsPerIP": 30,
+ "MinCatchpointFileDownloadBytesPerSecond": 20480,
+ "NetAddress": "",
+ "NetworkMessageTraceServer": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "ParticipationKeysRefreshInterval": 60000000000,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "ProposalAssemblyTime": 500000000,
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestConnectionsHardLimit": 2048,
+ "RestConnectionsSoftLimit": 1024,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TransactionSyncDataExchangeRate": 0,
+ "TransactionSyncSignificantMessageThreshold": 0,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 75000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": "",
+ "VerifiedTranscationsCacheSize": 150000
+}
diff --git a/test/testdata/deployednettemplates/generate-recipe/generate_network.py b/test/testdata/deployednettemplates/generate-recipe/generate_network.py
index aeeef4384..0a92aed20 100755
--- a/test/testdata/deployednettemplates/generate-recipe/generate_network.py
+++ b/test/testdata/deployednettemplates/generate-recipe/generate_network.py
@@ -31,13 +31,13 @@ def build_netgoal_params(template_dict):
instances = template_dict['instances']
relay_count = 0
- participating_node_count = 0
- non_participating_node_count = 0
+ participating_instance_count = 0
+ non_participating_instance_count = 0
for group in template_dict['groups']:
relay_count += getInstanceCount(instances['relays'], group['percent']['relays'])
- participating_node_count += getInstanceCount(instances['participatingNodes'], group['percent']['participatingNodes'])
- non_participating_node_count += getInstanceCount(instances['nonParticipatingNodes'], group['percent']['nonParticipatingNodes'])
+ participating_instance_count += getInstanceCount(instances['participatingNodes'], group['percent']['participatingNodes'])
+ non_participating_instance_count += getInstanceCount(instances['nonParticipatingNodes'], group['percent']['nonParticipatingNodes'])
relay_config = instances['relays']['config']
participating_node_config = instances['participatingNodes']['config']
@@ -45,13 +45,15 @@ def build_netgoal_params(template_dict):
wallets_count = template_dict['network']['wallets']
nodes_count = template_dict['network']['nodes']
+ npn_count = template_dict['network']['npn']
return [
'-w', str(wallets_count),
'-R', str(relay_count),
- '-N', str(participating_node_count),
- '-H', str(non_participating_node_count),
+ '-N', str(participating_instance_count),
+ '-X', str(non_participating_instance_count),
'-n', str(nodes_count),
+ '-x', str(npn_count),
'--relay-template', relay_config,
'--node-template', participating_node_config,
'--non-participating-node-template', non_participating_node_config
@@ -72,7 +74,7 @@ def build_genesis(template_path, netgoal_params, template_dict):
]
args.extend(netgoal_params)
netgoal(args, template_path)
- if template_dict['network']['ConsensusProtocol']:
+ if 'ConsensusProtocol' in template_dict['network']:
updateProtocol(f"{template_path}/generated/genesis.json", template_dict['network']['ConsensusProtocol'])
def updateProtocol(genesis_path, consensus_protocol):
diff --git a/test/testdata/deployednettemplates/recipes/README.md b/test/testdata/deployednettemplates/recipes/README.md
new file mode 100644
index 000000000..214c47aab
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/README.md
@@ -0,0 +1,19 @@
+# Recipes
+
+Most of the recipes' net.json and genesis.json use one of the following methods to call `netgoal generate`:
+1. `Makefile`
+2. `python3 {GO_ALGORAND_PATH}/test/testdata/deployednettemplates/generate-recipe/generate_network.py -f {PATH_TO}/network-tpl.json`
+
+Details for netgoal generate could be found in the binary with:
+```
+netgoal generate -h
+```
+
+Source code for netgoal can be found in `{GO_ALGORAND_PATH}/cmd/netgoal/generate.go`
+[Documentation](../../../../cmd/netgoal/README.md)
+
+Make sure you set the PATH and GOPATH variables to the netgoal binary's path.
+
+## Custom Recipe
+Leverages the generate_network.py script and has unique instructions found in the README:
+https://github.com/algorand/go-algorand/tree/master/test/testdata/deployednettemplates/recipes/custom
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile b/test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile
index 2a7d45039..4792348ed 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 20 -R 1 -N 20 -n 20 -H 1 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 20 -R 1 -N 20 -n 20 --npn-algod-nodes 1 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: topology.json net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/Makefile b/test/testdata/deployednettemplates/recipes/alphanet/Makefile
index 4cb3c207d..7b89472ad 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/Makefile
+++ b/test/testdata/deployednettemplates/recipes/alphanet/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 20 -R 5 -N 20 -n 20 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 20 -R 5 -N 20 -n 20 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: topology.json net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/betanet-model-2/Makefile b/test/testdata/deployednettemplates/recipes/betanet-model-2/Makefile
index fd573230b..e04c879bb 100644
--- a/test/testdata/deployednettemplates/recipes/betanet-model-2/Makefile
+++ b/test/testdata/deployednettemplates/recipes/betanet-model-2/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 20 -R 5 -N 20 -n 20 -H 20 -X 20 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 20 -R 5 -N 20 -n 20 --npn-algod-nodes 20 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: topology.json net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile
index 626f3ff85..b7fb60dc9 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 100 -R 8 -N 20 -n 100 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 100 -R 8 -N 20 -n 100 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
FILEPARAMS=--rounds 5000 --ntxns 1000 --naccounts 3000000 --nassets 20000 --napps 20000 --wallet-name "wallet1" --bal 100000 --bal 1000000
all: net.json genesis.json boostrappedFile.json
diff --git a/test/testdata/deployednettemplates/recipes/custom/README.md b/test/testdata/deployednettemplates/recipes/custom/README.md
index 78c0d3330..a74784c25 100644
--- a/test/testdata/deployednettemplates/recipes/custom/README.md
+++ b/test/testdata/deployednettemplates/recipes/custom/README.md
@@ -22,7 +22,7 @@ Build and create the recipe.
## "Quick" Start - Manual recipe generation (not using Jenkins)
Generate the recipe with the `network-tpl.json` file
-- (See the first section above for small networks.)
+- (See the first section above for small networks. See Troubleshooting for netgoal path set up)
1. Make sure you're in the same directory as this README and `cp network_templates/network-tpl.json network-tpl.json`
2. Generate the recipe with a python script:
```
@@ -67,7 +67,7 @@ Most parameters that can be modified by config.json can be found in `go-algorand
## Troubleshooting
### Can't find netgoal
-- Make sure you have netgoal installed
+- Make sure you have netgoal installed (you can either download it or run through the go-algorand build process)
- Make sure you export GOBIN and GOPATH in your environment and add it to your path.
On a mac, update by editing `~/.zshrc`, add
```
@@ -81,4 +81,4 @@ export PATH=$PATH:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/Users/ec2-user/L
- Make sure the machine type exists. It uses the regions in the groups and the type to come up with the host template name in `test/testdata/deployednettemplates/hosttemplates/hosttemplates.json`. If it doesn't exist, you will have to add it to that file.
### couldn't initialize the node: unsupported protocol
-- check your consensus.json. It may be missing the keys in the future protocol if you are doing this manually. Compare the consensus.json with `goal protocols > generated_consensus.json` \ No newline at end of file
+- check your consensus.json. It may be missing the keys in the future protocol if you are doing this manually. Compare the consensus.json with `goal protocols > generated_consensus.json`
diff --git a/test/testdata/deployednettemplates/recipes/custom/example/network_templates/c5dmachines.json b/test/testdata/deployednettemplates/recipes/custom/example/network_templates/c5dmachines.json
index 1f6a8b2fb..53c07e5fb 100644
--- a/test/testdata/deployednettemplates/recipes/custom/example/network_templates/c5dmachines.json
+++ b/test/testdata/deployednettemplates/recipes/custom/example/network_templates/c5dmachines.json
@@ -2,6 +2,7 @@
"network": {
"wallets": 6,
"nodes": 3,
+ "npn": 5,
"ConsensusProtocol": "future"
},
"instances": {
diff --git a/test/testdata/deployednettemplates/recipes/custom/example/network_templates/network-tpl.json b/test/testdata/deployednettemplates/recipes/custom/example/network_templates/network-tpl.json
index 5bc36419d..76dd8e77f 100644
--- a/test/testdata/deployednettemplates/recipes/custom/example/network_templates/network-tpl.json
+++ b/test/testdata/deployednettemplates/recipes/custom/example/network_templates/network-tpl.json
@@ -2,6 +2,7 @@
"network": {
"wallets": 6,
"nodes": 3,
+ "npn": 5,
"ConsensusProtocol": "future"
},
"instances": {
diff --git a/test/testdata/deployednettemplates/recipes/custom/network_templates/network-tpl.json b/test/testdata/deployednettemplates/recipes/custom/network_templates/network-tpl.json
index 5bc36419d..6e8c20c5c 100644
--- a/test/testdata/deployednettemplates/recipes/custom/network_templates/network-tpl.json
+++ b/test/testdata/deployednettemplates/recipes/custom/network_templates/network-tpl.json
@@ -1,24 +1,25 @@
{
"network": {
- "wallets": 6,
- "nodes": 3,
+ "wallets": 3,
+ "nodes": 1,
+ "npn": 1,
"ConsensusProtocol": "future"
},
"instances": {
"relays": {
"config": "./configs/relay.json",
- "type": "m5d.4xl",
+ "type": "m5d.2xl",
"count": 1
},
"participatingNodes": {
"config": "./configs/node.json",
- "type": "m5d.4xl",
- "count": 3
+ "type": "m5d.2xl",
+ "count": 1
},
"nonParticipatingNodes": {
"config": "./configs/nonPartNode.json",
- "type": "m5d.4xl",
- "count": 5
+ "type": "m5d.2xl",
+ "count": 1
}
},
"groups": [
diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json b/test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json
index 38d6fa96e..ae8d81048 100644
--- a/test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json
+++ b/test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json
@@ -1,7 +1,8 @@
{
"network": {
"wallets": 100,
- "nodes": 50
+ "nodes": 50,
+ "npn": 10
},
"instances": {
"relays": {
diff --git a/test/testdata/deployednettemplates/recipes/mmnet/Makefile b/test/testdata/deployednettemplates/recipes/mmnet/Makefile
index 21d38bbbd..5d8811ea7 100644
--- a/test/testdata/deployednettemplates/recipes/mmnet/Makefile
+++ b/test/testdata/deployednettemplates/recipes/mmnet/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 130 -R 136 -n 130 -H 16 --node-template configs/node.json --relay-template configs/relay.json --non-participating-node-template configs/nonPartNode.json
+PARAMS=-w 130 -R 136 -n 130 --npn-algod-nodes 16 --node-template configs/node.json --relay-template configs/relay.json --non-participating-node-template configs/nonPartNode.json
all: topology.json net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/network-partition/Makefile b/test/testdata/deployednettemplates/recipes/network-partition/Makefile
index 24226bc5b..96c6b9a2f 100644
--- a/test/testdata/deployednettemplates/recipes/network-partition/Makefile
+++ b/test/testdata/deployednettemplates/recipes/network-partition/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 100 -R 8 -N 20 -n 100 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 100 -R 8 -N 20 -n 100 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/scenario1/Makefile b/test/testdata/deployednettemplates/recipes/scenario1/Makefile
index 24226bc5b..96c6b9a2f 100644
--- a/test/testdata/deployednettemplates/recipes/scenario1/Makefile
+++ b/test/testdata/deployednettemplates/recipes/scenario1/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 100 -R 8 -N 20 -n 100 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 100 -R 8 -N 20 -n 100 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/Makefile b/test/testdata/deployednettemplates/recipes/scenario1s/Makefile
index dc973560b..32fe1c2bb 100644
--- a/test/testdata/deployednettemplates/recipes/scenario1s/Makefile
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/Makefile
@@ -1,5 +1,5 @@
# scenario1s is scenario1 but smaller, (100 nodes, 100 wallets) -> (20 nodes, 20 wallets), each algod gets single tenancy on a smaller ec2 instance
-PARAMS=-w 20 -R 8 -N 20 -n 20 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 20 -R 8 -N 20 -n 20 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/scenario2/Makefile b/test/testdata/deployednettemplates/recipes/scenario2/Makefile
index c6f8415b9..b5539235e 100644
--- a/test/testdata/deployednettemplates/recipes/scenario2/Makefile
+++ b/test/testdata/deployednettemplates/recipes/scenario2/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 400 -R 20 -N 40 -n 200 -H 15 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 400 -R 20 -N 40 -n 200 --npn-algod-nodes 15 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/scenario3/Makefile b/test/testdata/deployednettemplates/recipes/scenario3/Makefile
index d53f1fb6d..092d2f12f 100644
--- a/test/testdata/deployednettemplates/recipes/scenario3/Makefile
+++ b/test/testdata/deployednettemplates/recipes/scenario3/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 10000 -R 20 -N 100 -n 1000 -H 15 --node-template node.json --relay-template relay.json
+PARAMS=-w 10000 -R 20 -N 100 -n 1000 --npn-algod-nodes 15 --node-template node.json --relay-template relay.json
all: net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/scenario3s/Makefile b/test/testdata/deployednettemplates/recipes/scenario3s/Makefile
index f49294616..fe1ea4f9e 100644
--- a/test/testdata/deployednettemplates/recipes/scenario3s/Makefile
+++ b/test/testdata/deployednettemplates/recipes/scenario3s/Makefile
@@ -1,5 +1,5 @@
# scenario3s is scenario3 but smaller. (10000 wallets -> 500) (1000 algod participating nodes -> 100) It still keeps a global datacenter distribution.
-PARAMS=-w 500 -R 20 -N 100 -n 100 -H 15 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 500 -R 20 -N 100 -n 100 --npn-algod-nodes 15 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
SOURCES=node.json ${GOPATH}/bin/netgoal Makefile relay.json nonPartNode.json
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/Makefile b/test/testdata/deployednettemplates/recipes/txnsync/Makefile
index bd62e7b67..e86564e10 100644
--- a/test/testdata/deployednettemplates/recipes/txnsync/Makefile
+++ b/test/testdata/deployednettemplates/recipes/txnsync/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 10 -R 4 -N 10 -n 10 -H 1 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 10 -R 4 -N 10 -n 10 --npn-algod-nodes 1 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: net.json genesis.json
diff --git a/util/db/dbutil.go b/util/db/dbutil.go
index 551a32f34..b4e734c43 100644
--- a/util/db/dbutil.go
+++ b/util/db/dbutil.go
@@ -25,6 +25,7 @@ import (
"database/sql"
"errors"
"fmt"
+ "os"
"reflect"
"runtime"
"strings"
@@ -209,7 +210,7 @@ func (db *Accessor) IsSharedCacheConnection() bool {
// Atomic executes a piece of code with respect to the database atomically.
// For transactions where readOnly is false, sync determines whether or not to wait for the result.
// The return error of fn should be a native sqlite3.Error type or an error wrapping it.
-// DO NOT return a custom error - the internal logic of Atmoic expects an sqlite error and uses that value.
+// DO NOT return a custom error - the internal logic of Atomic expects an sqlite error and uses that value.
func (db *Accessor) Atomic(fn idemFn, extras ...interface{}) (err error) {
return db.atomic(fn, extras...)
}
@@ -228,6 +229,12 @@ func (db *Accessor) atomic(fn idemFn, extras ...interface{}) (err error) {
if !ok {
err = fmt.Errorf("%v", r)
}
+
+ buf := make([]byte, 16*1024)
+ stlen := runtime.Stack(buf, false)
+ errstr := string(buf[:stlen])
+ fmt.Fprintf(os.Stderr, "recovered panic in atomic: %s", errstr)
+
}
}()
@@ -309,7 +316,7 @@ func (db *Accessor) atomic(fn idemFn, extras ...interface{}) (err error) {
return
}
-// ResetTransactionWarnDeadline allow the atomic function to extend it's warn deadline by setting a new deadline.
+// ResetTransactionWarnDeadline allow the atomic function to extend its warn deadline by setting a new deadline.
// The Accessor can be copied and therefore isn't suitable for multi-threading directly,
// however, the transaction context and transaction object can be used to uniquely associate the request
// with a particular deadline.