summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2022-05-09 09:42:56 -0400
committerGitHub <noreply@github.com>2022-05-09 09:42:56 -0400
commit5e6bc6fcfa117c5e37a1e2d1bf1667e86f1941c9 (patch)
treea447a47d71f0fc4387a8f665395f510e48141e41
parentaa2fb0ee33f348270fe390c033e77ee139f8af31 (diff)
parent0d1e44872758ab94fefe73ce10325f9358b431f5 (diff)
Merge pull request #3960 from Algo-devops-service/relstable3.6.2v3.6.2-stable
-rw-r--r--.circleci/config.yml48
-rw-r--r--.github/workflows/build.yml2
-rw-r--r--.github/workflows/reviewdog.yml2
-rw-r--r--Makefile1
-rw-r--r--agreement/actions.go4
-rw-r--r--agreement/cryptoVerifier.go5
-rw-r--r--agreement/pseudonode.go9
-rw-r--r--buildnumber.dat2
-rw-r--r--cmd/algocfg/getCommand.go10
-rw-r--r--cmd/algocfg/getCommand_test.go (renamed from util/metrics/stringGauge_test.go)67
-rw-r--r--cmd/algod/main.go26
-rw-r--r--cmd/algokey/keyreg.go10
-rw-r--r--cmd/algokey/part.go2
-rw-r--r--cmd/goal/account.go4
-rw-r--r--cmd/goal/application.go6
-rw-r--r--cmd/goal/asset.go82
-rw-r--r--cmd/goal/bundledGenesisInject.go2
-rw-r--r--cmd/goal/clerk.go61
-rw-r--r--cmd/goal/commands.go24
-rw-r--r--cmd/goal/logging.go6
-rw-r--r--cmd/goal/messages.go2
-rw-r--r--cmd/goal/network.go9
-rw-r--r--cmd/opdoc/opdoc.go242
-rw-r--r--cmd/opdoc/tmLanguage.go39
-rw-r--r--cmd/tealdbg/cdtSession.go67
-rw-r--r--cmd/tealdbg/cdtSession_test.go2
-rw-r--r--cmd/tealdbg/cdtState.go68
-rw-r--r--cmd/tealdbg/cdtStateObjects.go67
-rw-r--r--cmd/tealdbg/cdtdbg_test.go6
-rw-r--r--cmd/tealdbg/debugger.go163
-rw-r--r--cmd/tealdbg/debugger_test.go198
-rw-r--r--cmd/tealdbg/local.go39
-rw-r--r--cmd/tealdbg/local_test.go430
-rw-r--r--cmd/tealdbg/util.go27
-rw-r--r--cmd/tealdbg/webdbg.go19
-rw-r--r--compactcert/abstractions.go2
-rw-r--r--compactcert/builder.go2
-rw-r--r--compactcert/worker_test.go2
-rw-r--r--config/consensus.go13
-rw-r--r--config/localTemplate.go2
-rw-r--r--config/version.go2
-rw-r--r--crypto/secp256k1/secp256_test.go2
-rw-r--r--daemon/algod/api/algod.oas2.json73
-rw-r--r--daemon/algod/api/algod.oas3.yml98
-rw-r--r--daemon/algod/api/client/restClient.go2
-rw-r--r--daemon/algod/api/server/v2/dryrun.go2
-rw-r--r--daemon/algod/api/server/v2/dryrun_test.go54
-rw-r--r--daemon/algod/api/server/v2/generated/private/routes.go293
-rw-r--r--daemon/algod/api/server/v2/generated/private/types.go7
-rw-r--r--daemon/algod/api/server/v2/generated/routes.go418
-rw-r--r--daemon/algod/api/server/v2/generated/types.go7
-rw-r--r--daemon/algod/api/server/v2/handlers.go36
-rw-r--r--daemon/algod/api/server/v2/test/handlers_test.go62
-rw-r--r--daemon/algod/deadlockLogger.go3
-rw-r--r--daemon/algod/server.go9
-rw-r--r--daemon/kmd/wallet/driver/ledger.go1
-rw-r--r--daemon/kmd/wallet/driver/ledger_hid.go23
-rw-r--r--data/abi/abi_encode.go71
-rw-r--r--data/abi/abi_encode_test.go81
-rw-r--r--data/abi/abi_json_test.go9
-rw-r--r--data/abi/abi_type.go13
-rw-r--r--data/bookkeeping/block.go2
-rw-r--r--data/ledger_test.go43
-rw-r--r--data/transactions/logic/.gitignore2
-rw-r--r--data/transactions/logic/README.md21
-rw-r--r--data/transactions/logic/README_in.md3
-rw-r--r--data/transactions/logic/TEAL_opcodes.md96
-rw-r--r--data/transactions/logic/assembler.go1435
-rw-r--r--data/transactions/logic/assembler_test.go307
-rw-r--r--data/transactions/logic/backwardCompat_test.go20
-rw-r--r--data/transactions/logic/debugger.go57
-rw-r--r--data/transactions/logic/debugger_test.go59
-rw-r--r--data/transactions/logic/doc.go204
-rw-r--r--data/transactions/logic/doc_test.go15
-rw-r--r--data/transactions/logic/eval.go1923
-rw-r--r--data/transactions/logic/evalAppTxn_test.go67
-rw-r--r--data/transactions/logic/evalCrypto_test.go96
-rw-r--r--data/transactions/logic/evalStateful_test.go229
-rw-r--r--data/transactions/logic/eval_test.go274
-rw-r--r--data/transactions/logic/export_test.go3
-rw-r--r--data/transactions/logic/fields.go992
-rw-r--r--data/transactions/logic/fields_string.go9
-rw-r--r--data/transactions/logic/fields_test.go10
-rw-r--r--data/transactions/logic/langspec.json2222
-rw-r--r--data/transactions/logic/ledger_test.go2
-rw-r--r--data/transactions/logic/opcodes.go677
-rw-r--r--data/transactions/logic/opcodes_test.go21
-rw-r--r--data/transactions/logic/sourcemap.go95
-rw-r--r--data/transactions/logic/sourcemap_test.go64
-rw-r--r--data/transactions/logic/teal.tmLanguage.json136
-rwxr-xr-xdata/transactions/logic/tlhc.py4
-rw-r--r--data/transactions/transaction.go36
-rw-r--r--data/transactions/verify/txn.go2
-rw-r--r--data/transactions/verify/verifiedTxnCache.go2
-rw-r--r--docker/build/cicd.ubuntu.Dockerfile6
-rw-r--r--gen/generate.go4
-rw-r--r--go.mod26
-rw-r--r--go.sum79
-rw-r--r--ledger/acctupdates.go21
-rw-r--r--ledger/acctupdates_test.go274
-rw-r--r--ledger/applications_test.go9
-rw-r--r--ledger/apply/application.go2
-rw-r--r--ledger/apply/asset.go2
-rw-r--r--ledger/catchpointtracker_test.go3
-rw-r--r--ledger/internal/apptxn_test.go1101
-rw-r--r--ledger/internal/double_test.go178
-rw-r--r--ledger/internal/eval_blackbox_test.go532
-rw-r--r--ledger/internal/export_test.go28
-rw-r--r--ledger/internal/prefetcher/error.go43
-rw-r--r--ledger/internal/prefetcher/prefetcher.go21
-rw-r--r--ledger/internal/prefetcher/prefetcher_alignment_test.go2
-rw-r--r--ledger/internal/prefetcher/prefetcher_test.go203
-rw-r--r--ledger/internal/txnbench_test.go248
-rw-r--r--ledger/ledger.go6
-rw-r--r--ledger/ledger_test.go46
-rw-r--r--ledger/ledgercore/accountdata.go5
-rw-r--r--ledger/ledgercore/accountresource.go11
-rw-r--r--ledger/tracker.go9
-rw-r--r--ledger/tracker_test.go173
-rw-r--r--libgoal/libgoal.go4
-rw-r--r--libgoal/participation.go14
-rw-r--r--logging/telemetry.go12
-rw-r--r--logging/telemetry_test.go39
-rw-r--r--logging/telemetryspec/event.go11
-rw-r--r--netdeploy/network.go8
-rw-r--r--netdeploy/networkTemplate.go16
-rw-r--r--network/dialer.go3
-rw-r--r--network/rateLimitingTransport.go13
-rw-r--r--network/wsNetwork.go3
-rw-r--r--node/node.go21
-rw-r--r--scripts/buildtools/go.mod6
-rw-r--r--scripts/buildtools/go.sum8
-rwxr-xr-xscripts/buildtools/install_buildtools.sh6
-rwxr-xr-xscripts/create_and_deploy_recipe.sh1
-rwxr-xr-xscripts/get_golang_version.sh6
-rwxr-xr-xscripts/install_linux_deps.sh2
-rwxr-xr-xscripts/travis/before_build.sh1
-rw-r--r--shared/pingpong/accounts.go3
-rw-r--r--shared/pingpong/pingpong.go5
-rw-r--r--test/README.md8
-rw-r--r--test/e2e-go/cli/goal/account_test.go9
-rw-r--r--test/e2e-go/cli/goal/clerk_test.go3
-rw-r--r--test/e2e-go/cli/goal/node_cleanup_test.go3
-rw-r--r--test/e2e-go/features/participation/participationRewards_test.go4
-rw-r--r--test/e2e-go/features/transactions/sendReceive_test.go1
-rw-r--r--test/framework/fixtures/libgoalFixture.go2
-rw-r--r--test/heapwatch/heapWatch.py4
-rw-r--r--test/muleCI/mule.yaml29
-rwxr-xr-xtest/scripts/e2e.sh25
-rwxr-xr-xtest/scripts/e2e_go_tests.sh1
-rwxr-xr-xtest/scripts/e2e_subs/app-assets.sh8
-rwxr-xr-xtest/scripts/e2e_subs/asset-misc.sh6
-rwxr-xr-xtest/scripts/e2e_subs/assets-app-b.sh51
-rwxr-xr-xtest/scripts/e2e_subs/assets-app.sh95
-rwxr-xr-xtest/scripts/e2e_subs/create_destroy_optin_optout.sh8
-rwxr-xr-xtest/scripts/e2e_subs/dex.sh2
-rwxr-xr-xtest/scripts/e2e_subs/dynamic-fee-teal-test.sh1
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-real-assets-round.sh2
-rwxr-xr-xtest/scripts/e2e_subs/e2e-teal.sh12
-rwxr-xr-xtest/scripts/e2e_subs/limit-swap-test.sh4
-rwxr-xr-xtest/scripts/e2e_subs/sectok-app.sh106
-rwxr-xr-xtest/scripts/e2e_subs/teal-app-params.sh2
-rw-r--r--test/scripts/e2e_subs/tealprogs/approve-all5.teal2
-rw-r--r--test/scripts/e2e_subs/tealprogs/quine.map1
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json2
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/Makefile12
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/gen_topology.py30
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/genesis.json69
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/net.json346
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/node.json11
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/nonPartNode.json5
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/recipe.json7
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/relay.json11
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/topology.json64
-rw-r--r--util/condvar/timedwait.go6
-rw-r--r--util/io.go11
-rw-r--r--util/io_test.go (renamed from cmd/tealdbg/util_test.go)25
-rw-r--r--util/metrics/counter.go9
-rw-r--r--util/metrics/gauge.go8
-rw-r--r--util/metrics/metrics_test.go21
-rw-r--r--util/metrics/registry.go2
-rw-r--r--util/metrics/registryCommon.go11
-rw-r--r--util/metrics/registry_test.go27
-rw-r--r--util/metrics/stringGauge.go64
-rw-r--r--util/metrics/tagcounter.go4
-rw-r--r--util/metrics/tagcounter_test.go2
-rw-r--r--util/sleep.go (renamed from util/metrics/stringGaugeCommon.go)19
-rw-r--r--util/sleep_linux.go39
-rw-r--r--util/sleep_linux_32.go35
-rw-r--r--util/sleep_linux_64.go34
190 files changed, 10961 insertions, 5682 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 327b49719..34828ef5e 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -138,7 +138,9 @@ workflows:
- << matrix.platform >>_build
filters:
<<: *filters-nightly
- context: slack-secrets
+ context:
+ - slack-secrets
+ - aws-secrets
- tests_verification_job:
name: << matrix.platform >>_<< matrix.job_type >>_verification
@@ -196,7 +198,7 @@ commands:
shell: bash.exe
command: |
choco install -y msys2 pacman make wget --force
- choco install -y golang --version=1.14.7 --force
+ choco install -y golang --version=1.16.15 --force
choco install -y python3 --version=3.7.3 --force
export msys2='cmd //C RefreshEnv.cmd '
export msys2+='& set MSYS=winsymlinks:nativestrict '
@@ -335,9 +337,6 @@ commands:
key: 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-{{ .Environment.CIRCLE_BUILD_NUM }}'
paths:
- tmp/go-cache
- - upload_to_buildpulse:
- platform: << parameters.platform >>
- path: << parameters.result_path >>/<< parameters.result_subdir>>
upload_coverage:
description: Collect coverage reports and upload them
@@ -348,40 +347,6 @@ commands:
command: |
scripts/travis/upload_coverage.sh || true
- upload_to_buildpulse:
- description: Collect build reports and upload them
- parameters:
- platform:
- type: string
- path:
- type: string
- steps:
- - run:
- name: Send test results to BuildPulse
- when: always
- command: |
- set -e
- if ! ls << parameters.path >>/*/*.xml &> /dev/null; then exit 0; fi
- sed -i"" -e 's/classname="/classname="<< parameters.platform >>-/' << parameters.path >>/*/*.xml
- case "<< parameters.platform >>" in
- arm64)
- URL=https://github.com/buildpulse/test-reporter/releases/download/v0.21.0-pre/test-reporter-linux-arm64
- SUM=53f94c29ad162c2b9ebb1f4a2f967f5262c0459ee4a0c34332977d8c89aafc18
- ;;
- amd64)
- URL=https://github.com/buildpulse/test-reporter/releases/download/v0.21.0-pre/test-reporter-linux-amd64
- SUM=4655e54d756580c0de0112cab488e6e08d0af75e9fc8caea2d63f9e13be8beb5
- ;;
- mac_amd64)
- URL=https://github.com/buildpulse/test-reporter/releases/download/v0.21.0-pre/test-reporter-darwin-amd64
- SUM=2f9e20a6f683c80f35d04e36bc57ecfe605bb48fee5a1b8d8f7c45094028eea3
- ;;
- esac
- curl -fsSL --retry 3 --retry-connrefused $URL > ./buildpulse-test-reporter
- echo "$SUM *buildpulse-test-reporter" | shasum -a 256 -c --status
- chmod +x ./buildpulse-test-reporter
- ./buildpulse-test-reporter submit << parameters.path >> --account-id 23182699 --repository-id 191266671 || true
-
generic_integration:
description: Run integration tests from build workspace, for re-use by diferent architectures
parameters:
@@ -432,6 +397,7 @@ commands:
export PARTITION_ID=${CIRCLE_NODE_INDEX}
export PARALLEL_FLAG="-p 1"
test/scripts/run_integration_tests.sh
+
- store_artifacts:
path: << parameters.result_path >>
destination: test-results
@@ -441,9 +407,6 @@ commands:
root: << parameters.result_path >>
paths:
- << parameters.result_subdir >>
- - upload_to_buildpulse:
- platform: << parameters.platform >>
- path: << parameters.result_path >>/<< parameters.result_subdir>>
tests_verification_command:
description: Check if all tests were run at least once and only once across all parallel runs
@@ -651,6 +614,7 @@ jobs:
working_directory: << pipeline.parameters.build_dir >>/project
environment:
E2E_TEST_FILTER: "SCRIPTS"
+ E2E_PLATFORM: << parameters.platform >>
steps:
- prepare_build_dir
- prepare_go
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 4bcee861e..4cc8eaba4 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -20,7 +20,7 @@ jobs:
- name: Install golang
uses: actions/setup-go@v2
with:
- go-version: '1.14.7'
+ go-version: '1.16.15'
- name: Build Test
run: |
export ALGORAND_DEADLOCK=enable
diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml
index 30b76064b..134685fc6 100644
--- a/.github/workflows/reviewdog.yml
+++ b/.github/workflows/reviewdog.yml
@@ -44,7 +44,7 @@ jobs:
- name: Install specific golang
uses: actions/setup-go@v2
with:
- go-version: '1.16.6'
+ go-version: '1.16.15'
- name: Create folders for golangci-lint
run: mkdir -p cicdtmp/golangci-lint
- name: Check if custom golangci-lint is already built
diff --git a/Makefile b/Makefile
index f64ab7013..90bcc7021 100644
--- a/Makefile
+++ b/Makefile
@@ -8,7 +8,6 @@ else
export GOPATH := $(shell go env GOPATH)
GOPATH1 := $(firstword $(subst :, ,$(GOPATH)))
endif
-export GO111MODULE := on
export GOPROXY := direct
SRCPATH := $(shell pwd)
ARCH := $(shell ./scripts/archtype.sh)
diff --git a/agreement/actions.go b/agreement/actions.go
index be216da4e..42cfbcebb 100644
--- a/agreement/actions.go
+++ b/agreement/actions.go
@@ -237,6 +237,8 @@ func (a ensureAction) do(ctx context.Context, s *Service) {
Round: uint64(a.Certificate.Round),
ValidatedAt: a.Payload.validatedAt,
PreValidated: true,
+ PropBufLen: uint64(len(s.demux.rawProposals)),
+ VoteBufLen: uint64(len(s.demux.rawVotes)),
})
s.Ledger.EnsureValidatedBlock(a.Payload.ve, a.Certificate)
} else {
@@ -249,6 +251,8 @@ func (a ensureAction) do(ctx context.Context, s *Service) {
Round: uint64(a.Certificate.Round),
ValidatedAt: a.Payload.validatedAt,
PreValidated: false,
+ PropBufLen: uint64(len(s.demux.rawProposals)),
+ VoteBufLen: uint64(len(s.demux.rawVotes)),
})
s.Ledger.EnsureBlock(block, a.Certificate)
}
diff --git a/agreement/cryptoVerifier.go b/agreement/cryptoVerifier.go
index ff8a6d6aa..cf6c466e5 100644
--- a/agreement/cryptoVerifier.go
+++ b/agreement/cryptoVerifier.go
@@ -22,8 +22,12 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/metrics"
)
+var voteVerifierOutFullCounter = metrics.MakeCounter(
+ metrics.MetricName{Name: "algod_agreement_vote_verifier_responses_dropped", Description: "Number of voteVerifier responses dropped due to full channel"})
+
// TODO put these in config
const (
voteParallelism = 16
@@ -210,6 +214,7 @@ func (c *poolCryptoVerifier) voteFillWorker(toBundleWait chan<- bundleFuture) {
select {
case c.votes.out <- asyncVerifyVoteResponse{index: votereq.TaskIndex, err: err, cancelled: true}:
default:
+ voteVerifierOutFullCounter.Inc(nil)
c.log.Infof("poolCryptoVerifier.voteFillWorker unable to write failed enqueue response to output channel")
}
}
diff --git a/agreement/pseudonode.go b/agreement/pseudonode.go
index f52854d6a..bdaa2f359 100644
--- a/agreement/pseudonode.go
+++ b/agreement/pseudonode.go
@@ -29,6 +29,7 @@ import (
"github.com/algorand/go-algorand/logging/logspec"
"github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/metrics"
)
// TODO put these in config
@@ -43,6 +44,9 @@ var errPseudonodeVerifierClosedChannel = errors.New("crypto verifier closed the
var errPseudonodeNoVotes = errors.New("no valid participation keys to generate votes for given round")
var errPseudonodeNoProposals = errors.New("no valid participation keys to generate proposals for given round")
+var pseudonodeBacklogFullByType = metrics.NewTagCounter("algod_agreement_pseudonode_tasks_dropped_{TAG}", "Number of pseudonode tasks dropped per type")
+var pseudonodeResultTimeoutsByType = metrics.NewTagCounter("algod_agreement_pseudonode_tasks_timeouts_{TAG}", "Number of pseudonode task result timeouts per type")
+
// A pseudonode creates proposals and votes with a KeyManager which holds participation keys.
//
// It constructs these messages as if they arrived from an external source and were verified.
@@ -176,6 +180,7 @@ func (n asyncPseudonode) MakeProposals(ctx context.Context, r round, p period) (
return proposalTask.outputChannel(), nil
default:
proposalTask.close()
+ pseudonodeBacklogFullByType.Add("proposal", 1)
return nil, fmt.Errorf("unable to make proposal for (%d, %d): %w", r, p, errPseudonodeBacklogFull)
}
}
@@ -193,6 +198,7 @@ func (n asyncPseudonode) MakeVotes(ctx context.Context, r round, p period, s ste
return proposalTask.outputChannel(), nil
default:
proposalTask.close()
+ pseudonodeBacklogFullByType.Add("vote", 1)
return nil, fmt.Errorf("unable to make vote for (%d, %d, %d): %w", r, p, s, errPseudonodeBacklogFull)
}
}
@@ -474,6 +480,7 @@ verifiedVotesLoop:
return
case <-outputTimeout:
// we've been waiting for too long for this vote to be written to the output.
+ pseudonodeResultTimeoutsByType.Add("vote", 1)
t.node.log.Warnf("pseudonode.makeVotes: unable to write vote to output channel for round %d, period %d", t.round, t.period)
outputTimeout = nil
}
@@ -577,6 +584,7 @@ verifiedVotesLoop:
return
case <-outputTimeout:
// we've been waiting for too long for this vote to be written to the output.
+ pseudonodeResultTimeoutsByType.Add("pvote", 1)
t.node.log.Warnf("pseudonode.makeProposals: unable to write proposal vote to output channel for round %d, period %d", t.round, t.period)
outputTimeout = nil
}
@@ -597,6 +605,7 @@ verifiedPayloadsLoop:
return
case <-outputTimeout:
// we've been waiting for too long for this vote to be written to the output.
+ pseudonodeResultTimeoutsByType.Add("ppayload", 1)
t.node.log.Warnf("pseudonode.makeProposals: unable to write proposal payload to output channel for round %d, period %d", t.round, t.period)
outputTimeout = nil
}
diff --git a/buildnumber.dat b/buildnumber.dat
index d00491fd7..0cfbf0888 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-1
+2
diff --git a/cmd/algocfg/getCommand.go b/cmd/algocfg/getCommand.go
index 86af9cc10..c22ff597c 100644
--- a/cmd/algocfg/getCommand.go
+++ b/cmd/algocfg/getCommand.go
@@ -51,14 +51,14 @@ var getCmd = &cobra.Command{
return
}
- val, err := getObjectProperty(cfg, getParameterArg)
+ val, err := serializeObjectProperty(cfg, getParameterArg)
if err != nil {
reportWarnf("Error retrieving property '%s' - %s", getParameterArg, err)
anyError = true
return
}
- fmt.Printf("%s", val)
+ fmt.Print(val)
})
if anyError {
os.Exit(1)
@@ -66,14 +66,14 @@ var getCmd = &cobra.Command{
},
}
-func getObjectProperty(object interface{}, property string) (ret interface{}, err error) {
+func serializeObjectProperty(object interface{}, property string) (ret string, err error) {
v := reflect.ValueOf(object)
val := reflect.Indirect(v)
f := val.FieldByName(property)
if !f.IsValid() {
- return object, fmt.Errorf("unknown property named '%s'", property)
+ return "", fmt.Errorf("unknown property named '%s'", property)
}
- return f.Interface(), nil
+ return fmt.Sprintf("%v", f.Interface()), nil
}
diff --git a/util/metrics/stringGauge_test.go b/cmd/algocfg/getCommand_test.go
index 97296ba5c..0547c1ccc 100644
--- a/util/metrics/stringGauge_test.go
+++ b/cmd/algocfg/getCommand_test.go
@@ -14,42 +14,51 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package metrics
+package main
import (
- "strings"
+ "fmt"
"testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
"github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
)
-func hasKey(data map[string]string, key string) bool {
- _, ok := data[key]
- return ok
-}
-
-func TestMetricStringGauge(t *testing.T) {
+func TestPrint(t *testing.T) {
partitiontest.PartitionTest(t)
- stringGauge := MakeStringGauge()
- stringGauge.Set("number-key", "1")
- stringGauge.Set("string-key", "value")
-
- results := make(map[string]string)
- DefaultRegistry().AddMetrics(results)
-
- // values are populated
- require.Equal(t, 2, len(results))
- require.True(t, hasKey(results, "number-key"))
- require.Equal(t, "1", results["number-key"])
- require.True(t, hasKey(results, "string-key"))
- require.Equal(t, "value", results["string-key"])
-
- // not included in string builder
- buf := strings.Builder{}
- DefaultRegistry().WriteMetrics(&buf, "not used")
- require.Equal(t, "", buf.String())
-
- stringGauge.Deregister(nil)
+ testcases := []struct {
+ Input interface{}
+ expected string
+ }{
+ {
+ Input: "string",
+ expected: "string",
+ },
+ {
+ Input: uint64(1234),
+ expected: "1234",
+ },
+ {
+ Input: int64(-1234),
+ expected: "-1234",
+ },
+ {
+ Input: true,
+ expected: "true",
+ },
+ {
+ Input: time.Second,
+ expected: "1s",
+ },
+ }
+ for i, tc := range testcases {
+ t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) {
+ ret, err := serializeObjectProperty(tc, "Input")
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expected, ret)
+ })
+ }
}
diff --git a/cmd/algod/main.go b/cmd/algod/main.go
index b29d55c2f..b0a45bc6a 100644
--- a/cmd/algod/main.go
+++ b/cmd/algod/main.go
@@ -50,10 +50,11 @@ var versionCheck = flag.Bool("v", false, "Display and write current build versio
var branchCheck = flag.Bool("b", false, "Display the git branch behind the build")
var channelCheck = flag.Bool("c", false, "Display and release channel behind the build")
var initAndExit = flag.Bool("x", false, "Initialize the ledger and exit")
+var logToStdout = flag.Bool("o", false, "Write to stdout instead of node.log by overriding config.LogSizeLimit to 0")
var peerOverride = flag.String("p", "", "Override phonebook with peer ip:port (or semicolon separated list: ip:port;ip:port;ip:port...)")
var listenIP = flag.String("l", "", "Override config.EndpointAddress (REST listening address) with ip:port")
var sessionGUID = flag.String("s", "", "Telemetry Session GUID to use")
-var telemetryOverride = flag.String("t", "", `Override telemetry setting if supported (Use "true", "false", "0" or "1"`)
+var telemetryOverride = flag.String("t", "", `Override telemetry setting if supported (Use "true", "false", "0" or "1")`)
var seed = flag.String("seed", "", "input to math/rand.Seed()")
func main() {
@@ -84,12 +85,12 @@ func run() int {
}
version := config.GetCurrentVersion()
- heartbeatGauge := metrics.MakeStringGauge()
- heartbeatGauge.Set("version", version.String())
- heartbeatGauge.Set("version-num", strconv.FormatUint(version.AsUInt64(), 10))
- heartbeatGauge.Set("channel", version.Channel)
- heartbeatGauge.Set("branch", version.Branch)
- heartbeatGauge.Set("commit-hash", version.GetCommitHash())
+ var baseHeartbeatEvent telemetryspec.HeartbeatEventDetails
+ baseHeartbeatEvent.Info.Version = version.String()
+ baseHeartbeatEvent.Info.VersionNum = strconv.FormatUint(version.AsUInt64(), 10)
+ baseHeartbeatEvent.Info.Channel = version.Channel
+ baseHeartbeatEvent.Info.Branch = version.Branch
+ baseHeartbeatEvent.Info.CommitHash = version.GetCommitHash()
if *branchCheck {
fmt.Println(config.Branch)
@@ -292,6 +293,10 @@ func run() int {
}
}
+ if logToStdout != nil && *logToStdout {
+ cfg.LogSizeLimit = 0
+ }
+
err = s.Initialize(cfg, phonebookAddresses, string(genesisText))
if err != nil {
fmt.Fprintln(os.Stderr, err)
@@ -339,12 +344,11 @@ func run() int {
defer ticker.Stop()
sendHeartbeat := func() {
- values := make(map[string]string)
+ values := make(map[string]float64)
metrics.DefaultRegistry().AddMetrics(values)
- heartbeatDetails := telemetryspec.HeartbeatEventDetails{
- Metrics: values,
- }
+ heartbeatDetails := baseHeartbeatEvent
+ heartbeatDetails.Metrics = values
log.EventWithDetails(telemetryspec.ApplicationState, telemetryspec.HeartbeatEvent, heartbeatDetails)
}
diff --git a/cmd/algokey/keyreg.go b/cmd/algokey/keyreg.go
index 445a07827..24ddafdd1 100644
--- a/cmd/algokey/keyreg.go
+++ b/cmd/algokey/keyreg.go
@@ -144,11 +144,11 @@ func run(params keyregCmdParams) error {
return errors.New("must provide --keyfile when registering participation keys")
}
if params.addr != "" {
- return errors.New("do not provide --address when registering participation keys")
+ return errors.New("do not provide --account when registering participation keys")
}
} else {
if params.addr == "" {
- return errors.New("must provide --address when bringing an account offline")
+ return errors.New("must provide --account when bringing an account offline")
}
if params.partkeyFile != "" {
return errors.New("do not provide --keyfile when bringing an account offline")
@@ -160,7 +160,7 @@ func run(params keyregCmdParams) error {
var err error
accountAddress, err = basics.UnmarshalChecksumAddress(params.addr)
if err != nil {
- return fmt.Errorf("unable to parse --address: %w", err)
+ return fmt.Errorf("unable to parse --account: %w", err)
}
}
@@ -172,8 +172,8 @@ func run(params keyregCmdParams) error {
params.txFile = fmt.Sprintf("%s.tx", params.partkeyFile)
}
- if util.FileExists(params.txFile) || params.txFile == stdoutFilenameValue {
- return fmt.Errorf("outputFile '%s' already exists", params.partkeyFile)
+ if params.txFile != stdoutFilenameValue && util.FileExists(params.txFile) {
+ return fmt.Errorf("outputFile '%s' already exists", params.txFile)
}
// Lookup information from partkey file
diff --git a/cmd/algokey/part.go b/cmd/algokey/part.go
index 57a4ddedc..41383cedb 100644
--- a/cmd/algokey/part.go
+++ b/cmd/algokey/part.go
@@ -180,7 +180,7 @@ func init() {
partGenerateCmd.Flags().StringVar(&partKeyfile, "keyfile", "", "Participation key filename")
partGenerateCmd.Flags().Uint64Var(&partFirstRound, "first", 0, "First round for participation key")
partGenerateCmd.Flags().Uint64Var(&partLastRound, "last", 0, "Last round for participation key")
- partGenerateCmd.Flags().Uint64Var(&partKeyDilution, "dilution", 0, "Key dilution (default to sqrt of validity window)")
+ partGenerateCmd.Flags().Uint64Var(&partKeyDilution, "dilution", 0, "Key dilution for two-level participation keys (defaults to sqrt of validity window)")
partGenerateCmd.Flags().StringVar(&partParent, "parent", "", "Address of parent account")
partGenerateCmd.MarkFlagRequired("first")
partGenerateCmd.MarkFlagRequired("last")
diff --git a/cmd/goal/account.go b/cmd/goal/account.go
index 202e7ca6a..f6e609a5a 100644
--- a/cmd/goal/account.go
+++ b/cmd/goal/account.go
@@ -161,7 +161,7 @@ func init() {
addParticipationKeyCmd.Flags().Uint64VarP(&roundLastValid, "roundLastValid", "", 0, "The last round for which the generated partkey will be valid")
addParticipationKeyCmd.MarkFlagRequired("roundLastValid")
addParticipationKeyCmd.Flags().StringVarP(&partKeyOutDir, "outdir", "o", "", "Save participation key file to specified output directory to (for offline creation)")
- addParticipationKeyCmd.Flags().Uint64VarP(&keyDilution, "keyDilution", "", 0, "Key dilution for two-level participation keys")
+ addParticipationKeyCmd.Flags().Uint64VarP(&keyDilution, "keyDilution", "", 0, "Key dilution for two-level participation keys (defaults to sqrt of validity window)")
// installParticipationKey flags
installParticipationKeyCmd.Flags().StringVar(&partKeyFile, "partkey", "", "Participation key file to install")
@@ -396,7 +396,7 @@ var newMultisigCmd = &cobra.Command{
}
}
if duplicatesDetected {
- reportWarnln(warnMultisigDuplicatesDetected)
+ reportWarnRawln(warnMultisigDuplicatesDetected)
}
// Generate a new address in the default wallet
addr, err := client.CreateMultisigAccount(wh, threshold, args)
diff --git a/cmd/goal/application.go b/cmd/goal/application.go
index 9769e8cb5..907a7b291 100644
--- a/cmd/goal/application.go
+++ b/cmd/goal/application.go
@@ -408,13 +408,13 @@ func mustParseProgArgs() (approval []byte, clear []byte) {
}
if approvalProgFile != "" {
- approval = assembleFile(approvalProgFile)
+ approval = assembleFile(approvalProgFile, false)
} else {
approval = mustReadFile(approvalProgRawFile)
}
if clearProgFile != "" {
- clear = assembleFile(clearProgFile)
+ clear = assembleFile(clearProgFile, false)
} else {
clear = mustReadFile(clearProgRawFile)
}
@@ -1238,7 +1238,7 @@ var methodAppCmd = &cobra.Command{
}
var retType *abi.Type
- if retTypeStr != "void" {
+ if retTypeStr != abi.VoidReturnType {
theRetType, err := abi.TypeOf(retTypeStr)
if err != nil {
reportErrorf("cannot cast %s to abi type: %v", retTypeStr, err)
diff --git a/cmd/goal/asset.go b/cmd/goal/asset.go
index e98ba5e95..9befa127d 100644
--- a/cmd/goal/asset.go
+++ b/cmd/goal/asset.go
@@ -58,6 +58,7 @@ func init() {
assetCmd.AddCommand(sendAssetCmd)
assetCmd.AddCommand(infoAssetCmd)
assetCmd.AddCommand(freezeAssetCmd)
+ assetCmd.AddCommand(optinAssetCmd)
assetCmd.PersistentFlags().StringVarP(&walletName, "wallet", "w", "", "Set the wallet to be used for the selected operation")
@@ -116,12 +117,18 @@ func init() {
freezeAssetCmd.MarkFlagRequired("account")
freezeAssetCmd.MarkFlagRequired("freeze")
+ optinAssetCmd.Flags().StringVar(&assetUnitName, "asset", "", "Unit name of the asset being accepted")
+ optinAssetCmd.Flags().Uint64Var(&assetID, "assetid", 0, "ID of the asset being accepted")
+ optinAssetCmd.Flags().StringVarP(&account, "account", "a", "", "Account address to opt in to using the asset (if not specified, uses default account)")
+ optinAssetCmd.Flags().StringVar(&assetCreator, "creator", "", "Account address for asset creator")
+
// Add common transaction flags to all txn-generating asset commands
addTxnFlags(createAssetCmd)
addTxnFlags(destroyAssetCmd)
addTxnFlags(configAssetCmd)
addTxnFlags(sendAssetCmd)
addTxnFlags(freezeAssetCmd)
+ addTxnFlags(optinAssetCmd)
infoAssetCmd.Flags().Uint64Var(&assetID, "assetid", 0, "ID of the asset to look up")
infoAssetCmd.Flags().StringVar(&assetUnitName, "asset", "", "DEPRECATED! Unit name of the asset to look up")
@@ -661,6 +668,81 @@ func assetDecimalsFmt(amount uint64, decimals uint32) string {
return fmt.Sprintf("%d.%0*d", amount/pow, decimals, amount%pow)
}
+var optinAssetCmd = &cobra.Command{
+ Use: "optin",
+ Short: "Optin to assets",
+ Long: "Opt in to receive a new asset. An account will begin accepting an asset by issuing a zero-amount asset transfer to itself.",
+ Args: validateNoPosArgsFn,
+ Run: func(cmd *cobra.Command, _ []string) {
+ checkTxValidityPeriodCmdFlags(cmd)
+
+ dataDir := ensureSingleDataDir()
+ client := ensureFullClient(dataDir)
+ accountList := makeAccountsList(dataDir)
+ // Opt in txns are always 0
+ const xferAmount uint64 = 0
+
+ creatorResolved := accountList.getAddressByName(assetCreator)
+
+ lookupAssetID(cmd, creatorResolved, client)
+
+ // Check if from was specified, else use default
+ if account == "" {
+ account = accountList.getDefaultAccount()
+ }
+ tx, err := client.MakeUnsignedAssetSendTx(assetID, xferAmount, account, "", "")
+ if err != nil {
+ reportErrorf("Cannot construct transaction: %s", err)
+ }
+
+ tx.Note = parseNoteField(cmd)
+ tx.Lease = parseLease(cmd)
+
+ firstValid, lastValid, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
+ if err != nil {
+ reportErrorf("Cannot determine last valid round: %s", err)
+ }
+
+ tx, err = client.FillUnsignedTxTemplate(account, firstValid, lastValid, fee, tx)
+ if err != nil {
+ reportErrorf("Cannot construct transaction: %s", err)
+ }
+
+ explicitFee := cmd.Flags().Changed("fee")
+ if explicitFee {
+ tx.Fee = basics.MicroAlgos{Raw: fee}
+ }
+
+ if outFilename == "" {
+ wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
+ signedTxn, err := client.SignTransactionWithWallet(wh, pw, tx)
+ if err != nil {
+ reportErrorf(errorSigningTX, err)
+ }
+
+ txid, err := client.BroadcastTransaction(signedTxn)
+ if err != nil {
+ reportErrorf(errorBroadcastingTX, err)
+ }
+
+ // Report tx details to user
+ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
+
+ if !noWaitAfterSend {
+ _, err = waitForCommit(client, txid, lastValid)
+ if err != nil {
+ reportErrorf(err.Error())
+ }
+ }
+ } else {
+ err = writeTxnToFile(client, sign, dataDir, walletName, tx, outFilename)
+ if err != nil {
+ reportErrorf(err.Error())
+ }
+ }
+ },
+}
+
var infoAssetCmd = &cobra.Command{
Use: "info",
Short: "Look up current parameters for an asset",
diff --git a/cmd/goal/bundledGenesisInject.go b/cmd/goal/bundledGenesisInject.go
index 86bd49189..aef8101b4 100644
--- a/cmd/goal/bundledGenesisInject.go
+++ b/cmd/goal/bundledGenesisInject.go
@@ -1,4 +1,4 @@
-// Code generated by bundle_genesis_json.sh, along with langspec.json. DO NOT EDIT.
+// Code generated by bundle_genesis_json.sh. DO NOT EDIT.
package main
var genesisMainnet []byte
diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go
index 5359c372f..11275c875 100644
--- a/cmd/goal/clerk.go
+++ b/cmd/goal/clerk.go
@@ -18,6 +18,7 @@ package main
import (
"encoding/base64"
+ "encoding/json"
"fmt"
"io"
"os"
@@ -49,6 +50,7 @@ var (
rejectsFilename string
closeToAddress string
noProgramOutput bool
+ writeSourceMap bool
signProgram bool
programSource string
argB64Strings []string
@@ -123,6 +125,7 @@ func init() {
compileCmd.Flags().BoolVarP(&disassemble, "disassemble", "D", false, "disassemble a compiled program")
compileCmd.Flags().BoolVarP(&noProgramOutput, "no-out", "n", false, "don't write contract program binary")
+ compileCmd.Flags().BoolVarP(&writeSourceMap, "map", "m", false, "write out source map")
compileCmd.Flags().BoolVarP(&signProgram, "sign", "s", false, "sign program, output is a binary signed LogicSig record")
compileCmd.Flags().StringVarP(&outFilename, "outfile", "o", "", "Filename to write program bytes or signed LogicSig to")
compileCmd.Flags().StringVarP(&account, "account", "a", "", "Account address to sign the program (If not specified, uses default account)")
@@ -331,7 +334,7 @@ var sendCmd = &cobra.Command{
if logicSigFile != "" {
reportErrorln("should use at most one of --from-program/-F or --from-program-bytes/-P --logic-sig/-L")
}
- program = assembleFile(programSource)
+ program = assembleFile(programSource, false)
} else if logicSigFile != "" {
lsigFromArgs(&lsig)
}
@@ -724,7 +727,7 @@ var signCmd = &cobra.Command{
}
var lsig transactions.LogicSig
-
+ var authAddr basics.Address
var client libgoal.Client
var wh []byte
var pw []byte
@@ -733,7 +736,7 @@ var signCmd = &cobra.Command{
if logicSigFile != "" {
reportErrorln("goal clerk sign should have at most one of --program/-p or --logic-sig/-L")
}
- lsig.Logic = assembleFile(programSource)
+ lsig.Logic = assembleFile(programSource, false)
lsig.Args = getProgramArgs()
} else if logicSigFile != "" {
lsigFromArgs(&lsig)
@@ -743,6 +746,11 @@ var signCmd = &cobra.Command{
dataDir := ensureSingleDataDir()
client = ensureKmdClient(dataDir)
wh, pw = ensureWalletHandleMaybePassword(dataDir, walletName, true)
+ } else if signerAddress != "" {
+ authAddr, err = basics.UnmarshalChecksumAddress(signerAddress)
+ if err != nil {
+ reportErrorf("Signer invalid (%s): %v", signerAddress, err)
+ }
}
var outData []byte
@@ -790,6 +798,9 @@ var signCmd = &cobra.Command{
for _, txn := range txnGroups[group] {
if lsig.Logic != nil {
txn.Lsig = lsig
+ if signerAddress != "" {
+ txn.AuthAddr = authAddr
+ }
}
txnGroup = append(txnGroup, *txn)
}
@@ -927,7 +938,7 @@ func mustReadFile(fname string) []byte {
return contents
}
-func assembleFile(fname string) (program []byte) {
+func assembleFileImpl(fname string, printWarnings bool) *logic.OpStream {
text, err := readFile(fname)
if err != nil {
reportErrorf("%s: %s", fname, err)
@@ -948,9 +959,30 @@ func assembleFile(fname string) (program []byte) {
}
}
+ if printWarnings && len(ops.Warnings) != 0 {
+ for _, warning := range ops.Warnings {
+ reportWarnRawln(warning.Error())
+ }
+ plural := "s"
+ if len(ops.Warnings) == 1 {
+ plural = ""
+ }
+ reportWarnRawf("%d warning%s", len(ops.Warnings), plural)
+ }
+
+ return ops
+}
+
+func assembleFile(fname string, printWarnings bool) (program []byte) {
+ ops := assembleFileImpl(fname, printWarnings)
return ops.Program
}
+func assembleFileWithMap(fname string, printWarnings bool) ([]byte, logic.SourceMap) {
+ ops := assembleFileImpl(fname, printWarnings)
+ return ops.Program, logic.GetSourceMap([]string{fname}, ops.OffsetToLine)
+}
+
func disassembleFile(fname, outname string) {
program, err := readFile(fname)
if err != nil {
@@ -997,8 +1029,6 @@ var compileCmd = &cobra.Command{
disassembleFile(fname, outFilename)
continue
}
- program := assembleFile(fname)
- outblob := program
outname := outFilename
if outname == "" {
if fname == stdinFileNameValue {
@@ -1007,6 +1037,9 @@ var compileCmd = &cobra.Command{
outname = fmt.Sprintf("%s.tok", fname)
}
}
+ shouldPrintAdditionalInfo := outname != stdoutFilenameValue
+ program, sourceMap := assembleFileWithMap(fname, true)
+ outblob := program
if signProgram {
dataDir := ensureSingleDataDir()
accountList := makeAccountsList(dataDir)
@@ -1036,7 +1069,21 @@ var compileCmd = &cobra.Command{
reportErrorf("%s: %s", outname, err)
}
}
- if !signProgram && outname != stdoutFilenameValue {
+ if writeSourceMap {
+ if outname == stdoutFilenameValue {
+ reportErrorf("%s: %s", outname, "cannot print map to stdout")
+ }
+ mapname := outname + ".map"
+ pcblob, err := json.Marshal(sourceMap)
+ if err != nil {
+ reportErrorf("%s: %s", mapname, err)
+ }
+ err = writeFile(mapname, pcblob, 0666)
+ if err != nil {
+ reportErrorf("%s: %s", mapname, err)
+ }
+ }
+ if !signProgram && shouldPrintAdditionalInfo {
pd := logic.HashProgram(program)
addr := basics.Address(pd)
fmt.Printf("%s: %s\n", fname, addr.String())
diff --git a/cmd/goal/commands.go b/cmd/goal/commands.go
index f2eee7760..4f93b6fb4 100644
--- a/cmd/goal/commands.go
+++ b/cmd/goal/commands.go
@@ -517,19 +517,33 @@ func reportInfof(format string, args ...interface{}) {
reportInfoln(fmt.Sprintf(format, args...))
}
-func reportWarnln(args ...interface{}) {
- fmt.Print("Warning: ")
-
+// reportWarnRawln prints a warning message to stderr. Only use this function if that warning
+// message already indicates that it's a warning. Otherwise, use reportWarnln
+func reportWarnRawln(args ...interface{}) {
for _, line := range strings.Split(fmt.Sprint(args...), "\n") {
printable, line := unicodePrintable(line)
if !printable {
- fmt.Println(infoNonPrintableCharacters)
+ fmt.Fprintln(os.Stderr, infoNonPrintableCharacters)
}
- fmt.Println(line)
+ fmt.Fprintln(os.Stderr, line)
}
}
+// reportWarnRawf prints a warning message to stderr. Only use this function if that warning message
+// already indicates that it's a warning. Otherwise, use reportWarnf
+func reportWarnRawf(format string, args ...interface{}) {
+ reportWarnRawln(fmt.Sprintf(format, args...))
+}
+
+// reportWarnln prints a warning message to stderr. The message will be prefixed with "Warning: ".
+// If you don't want this prefix, use reportWarnRawln
+func reportWarnln(args ...interface{}) {
+ reportWarnRawf("Warning: %s", fmt.Sprint(args...))
+}
+
+// reportWarnf prints a warning message to stderr. The message will be prefixed with "Warning: ". If
+// you don't want this prefix, use reportWarnRawf
func reportWarnf(format string, args ...interface{}) {
reportWarnln(fmt.Sprintf(format, args...))
}
diff --git a/cmd/goal/logging.go b/cmd/goal/logging.go
index 333c3ee8f..ace99abbf 100644
--- a/cmd/goal/logging.go
+++ b/cmd/goal/logging.go
@@ -50,7 +50,7 @@ var loggingCmd = &cobra.Command{
Long: `Enable/disable and configure Algorand remote logging.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- fmt.Fprintf(os.Stderr, "Warning: `goal logging` deprecated, use `diagcfg telemetry status`\n")
+ reportWarnln("`goal logging` deprecated, use `diagcfg telemetry status`")
dataDir := ensureSingleDataDir()
cfg, err := logging.EnsureTelemetryConfig(&dataDir, "")
@@ -72,7 +72,7 @@ var enableCmd = &cobra.Command{
Long: `This will turn on remote logging. The "friendly name" for the node, used by logging, will be determined by -n nodename.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- fmt.Fprintf(os.Stderr, "Warning: `goal logging enable` deprecated, use `diagcfg telemetry enable`\n")
+ reportWarnln("`goal logging enable` deprecated, use `diagcfg telemetry enable`")
dataDir := ensureSingleDataDir()
cfg, err := logging.EnsureTelemetryConfig(&dataDir, "")
if err != nil {
@@ -93,7 +93,7 @@ var disableCmd = &cobra.Command{
Short: "Disable Algorand remote logging",
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
- fmt.Fprintf(os.Stderr, "Warning: `goal logging disable` deprecated, use `diagcfg telemetry disable`\n")
+ reportWarnf("`goal logging disable` deprecated, use `diagcfg telemetry disable`")
dataDir := ensureSingleDataDir()
cfg, err := logging.EnsureTelemetryConfig(&dataDir, "")
diff --git a/cmd/goal/messages.go b/cmd/goal/messages.go
index e4b5348ab..6a772e137 100644
--- a/cmd/goal/messages.go
+++ b/cmd/goal/messages.go
@@ -131,7 +131,7 @@ const (
loggingNotEnabled = "Remote logging is current disabled"
loggingEnabled = "Remote logging is enabled. Node = %s, Guid = %s"
- infoNetworkAlreadyExists = "Network Root Directory '%s' already exists"
+ infoNetworkAlreadyExists = "Network Root Directory '%s' already exists and is not empty"
errorCreateNetwork = "Error creating private network: %s"
infoNetworkCreated = "Network %s created under %s"
errorLoadingNetwork = "Error loading deployed network: %s"
diff --git a/cmd/goal/network.go b/cmd/goal/network.go
index b7620b997..bea7d5ff4 100644
--- a/cmd/goal/network.go
+++ b/cmd/goal/network.go
@@ -34,6 +34,7 @@ var networkTemplateFile string
var startNode string
var noImportKeys bool
var noClean bool
+var devModeOverride bool
func init() {
networkCmd.AddCommand(networkCreateCmd)
@@ -46,6 +47,7 @@ func init() {
networkCreateCmd.MarkFlagRequired("template")
networkCreateCmd.Flags().BoolVarP(&noImportKeys, "noimportkeys", "K", false, "Do not import root keys when creating the network (by default will import)")
networkCreateCmd.Flags().BoolVar(&noClean, "noclean", false, "Prevents auto-cleanup on error - for diagnosing problems")
+ networkCreateCmd.Flags().BoolVar(&devModeOverride, "devMode", false, "Forces the configuration to enable DevMode, returns an error if the template is not compatible with DevMode.")
networkStartCmd.Flags().StringVarP(&startNode, "node", "n", "", "Specify the name of a specific node to start")
@@ -83,9 +85,8 @@ var networkCreateCmd = &cobra.Command{
if err != nil {
panic(err)
}
- // Make sure target directory doesn't already exist
- exists := util.FileExists(networkRootDir)
- if exists {
+ // Make sure target directory does not exist or is empty
+ if util.FileExists(networkRootDir) && !util.IsEmpty(networkRootDir) {
reportErrorf(infoNetworkAlreadyExists, networkRootDir)
}
@@ -101,7 +102,7 @@ var networkCreateCmd = &cobra.Command{
consensus, _ = config.PreloadConfigurableConsensusProtocols(dataDir)
}
- network, err := netdeploy.CreateNetworkFromTemplate(networkName, networkRootDir, networkTemplateFile, binDir, !noImportKeys, nil, consensus)
+ network, err := netdeploy.CreateNetworkFromTemplate(networkName, networkRootDir, networkTemplateFile, binDir, !noImportKeys, nil, consensus, devModeOverride)
if err != nil {
if noClean {
reportInfof(" ** failed ** - Preserving network rootdir '%s'", networkRootDir)
diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go
index 9b6fff979..46d3365cb 100644
--- a/cmd/opdoc/opdoc.go
+++ b/cmd/opdoc/opdoc.go
@@ -33,7 +33,6 @@ func opGroupMarkdownTable(names []string, out io.Writer) {
| - | -- |
`)
opSpecs := logic.OpsByName[logic.LogicVersion]
- // TODO: sort by logic.OpSpecs[].Opcode
for _, opname := range names {
spec, ok := opSpecs[opname]
if !ok {
@@ -49,15 +48,6 @@ func markdownTableEscape(x string) string {
return strings.ReplaceAll(x, "|", "\\|")
}
-func typeEnumTableMarkdown(out io.Writer) {
- fmt.Fprintf(out, "| Index | \"Type\" string | Description |\n")
- fmt.Fprintf(out, "| --- | --- | --- |\n")
- for i, name := range logic.TxnTypeNames {
- fmt.Fprintf(out, "| %d | %s | %s |\n", i, markdownTableEscape(name), logic.TypeNameDescriptions[name])
- }
- out.Write([]byte("\n"))
-}
-
func integerConstantsTableMarkdown(out io.Writer) {
fmt.Fprintf(out, "#### OnComplete\n\n")
fmt.Fprintf(out, "%s\n\n", logic.OnCompletionPreamble)
@@ -77,20 +67,22 @@ func integerConstantsTableMarkdown(out io.Writer) {
out.Write([]byte("\n"))
}
-type speccer interface {
- SpecByName(name string) logic.FieldSpec
-}
-
-func fieldSpecsMarkdown(out io.Writer, names []string, specs speccer) {
+func fieldGroupMarkdown(out io.Writer, group *logic.FieldGroup) {
showTypes := false
showVers := false
- spec0 := specs.SpecByName(names[0])
- opVer := spec0.OpVersion()
- for _, name := range names {
- if specs.SpecByName(name).Type() != logic.StackNone {
+ opVer := uint64(0)
+ for _, name := range group.Names {
+ spec, ok := group.SpecByName(name)
+ // reminder: group.Names can be "sparse" See: logic.TxnaFields
+ if !ok {
+ continue
+ }
+ if spec.Type().Typed() {
showTypes = true
}
- if specs.SpecByName(name).Version() != opVer {
+ if opVer == uint64(0) {
+ opVer = spec.Version()
+ } else if opVer != spec.Version() {
showVers = true
}
}
@@ -107,8 +99,11 @@ func fieldSpecsMarkdown(out io.Writer, names []string, specs speccer) {
headers += " Notes |\n"
widths += " --------- |\n"
fmt.Fprint(out, headers, widths)
- for i, name := range names {
- spec := specs.SpecByName(name)
+ for i, name := range group.Names {
+ spec, ok := group.SpecByName(name)
+ if !ok {
+ continue
+ }
str := fmt.Sprintf("| %d | %s", i, markdownTableEscape(name))
if showTypes {
str = fmt.Sprintf("%s | %s", str, markdownTableEscape(spec.Type().String()))
@@ -125,44 +120,9 @@ func fieldSpecsMarkdown(out io.Writer, names []string, specs speccer) {
fmt.Fprint(out, "\n")
}
-func transactionFieldsMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`txn` Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/)):\n\n")
- fieldSpecsMarkdown(out, logic.TxnFieldNames, logic.TxnFieldSpecByName)
-}
-
-func globalFieldsMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`global` Fields:\n\n")
- fieldSpecsMarkdown(out, logic.GlobalFieldNames, logic.GlobalFieldSpecByName)
-}
-
-func assetHoldingFieldsMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`asset_holding_get` Fields:\n\n")
- fieldSpecsMarkdown(out, logic.AssetHoldingFieldNames, logic.AssetHoldingFieldSpecByName)
-}
-
-func assetParamsFieldsMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`asset_params_get` Fields:\n\n")
- fieldSpecsMarkdown(out, logic.AssetParamsFieldNames, logic.AssetParamsFieldSpecByName)
-}
-
-func appParamsFieldsMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`app_params_get` Fields:\n\n")
- fieldSpecsMarkdown(out, logic.AppParamsFieldNames, logic.AppParamsFieldSpecByName)
-}
-
-func acctParamsFieldsMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`acct_params_get` Fields:\n\n")
- fieldSpecsMarkdown(out, logic.AcctParamsFieldNames, logic.AcctParamsFieldSpecByName)
-}
-
-func ecDsaCurvesMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`ECDSA` Curves:\n\n")
- fieldSpecsMarkdown(out, logic.EcdsaCurveNames, logic.EcdsaCurveSpecByName)
-}
-
func immediateMarkdown(op *logic.OpSpec) string {
markdown := ""
- for _, imm := range op.Details.Immediates {
+ for _, imm := range op.OpDetails.Immediates {
markdown = markdown + " " + imm.Name
}
return markdown
@@ -170,35 +130,43 @@ func immediateMarkdown(op *logic.OpSpec) string {
func stackMarkdown(op *logic.OpSpec) string {
out := "- Stack: "
- special := logic.OpStackEffects(op.Name)
- if special != "" {
- return out + special + "\n"
- }
out += "..."
- for i, v := range op.Args {
- out += fmt.Sprintf(", %c", rune(int('A')+i))
- if v.Typed() {
- out += fmt.Sprintf(": %s", v)
+ if op.Arg.Effects != "" {
+ out += ", " + op.Arg.Effects
+ } else {
+ for i, v := range op.Arg.Types {
+ out += fmt.Sprintf(", %c", rune(int('A')+i))
+ if v.Typed() {
+ out += fmt.Sprintf(": %s", v)
+ }
}
}
- out += " &rarr; ..."
- for i, rt := range op.Returns {
- out += ", "
- if len(op.Returns) > 1 {
- start := int('X')
- if len(op.Returns) > 3 {
- start = int('Z') + 1 - len(op.Returns)
+ if op.AlwaysExits() {
+ return out + " &rarr; _exits_\n"
+ }
+
+ out += " &rarr; ..."
+ if op.Return.Effects != "" {
+ out += ", " + op.Return.Effects
+ } else {
+ for i, rt := range op.Return.Types {
+ out += ", "
+ if len(op.Return.Types) > 1 {
+ start := int('X')
+ if len(op.Return.Types) > 3 {
+ start = int('Z') + 1 - len(op.Return.Types)
+ }
+ out += fmt.Sprintf("%c: ", rune(start+i))
}
- out += fmt.Sprintf("%c: ", rune(start+i))
+ out += rt.String()
}
- out += rt.String()
}
return out + "\n"
}
-func opToMarkdown(out io.Writer, op *logic.OpSpec) (err error) {
+func opToMarkdown(out io.Writer, op *logic.OpSpec, groupDocWritten map[string]bool) (err error) {
ws := ""
opextra := logic.OpImmediateNote(op.Name)
if opextra != "" {
@@ -214,45 +182,34 @@ func opToMarkdown(out io.Writer, op *logic.OpSpec) (err error) {
fmt.Fprintf(out, "- **Cost**:\n")
for _, cost := range costs {
if cost.From == cost.To {
- fmt.Fprintf(out, " - %d (v%d)\n", cost.Cost, cost.To)
+ fmt.Fprintf(out, " - %s (v%d)\n", cost.Cost, cost.To)
} else {
if cost.To < logic.LogicVersion {
- fmt.Fprintf(out, " - %d (v%d - v%d)\n", cost.Cost, cost.From, cost.To)
+ fmt.Fprintf(out, " - %s (v%d - v%d)\n", cost.Cost, cost.From, cost.To)
} else {
- fmt.Fprintf(out, " - %d (since v%d)\n", cost.Cost, cost.From)
+ fmt.Fprintf(out, " - %s (since v%d)\n", cost.Cost, cost.From)
}
}
}
} else {
cost := costs[0].Cost
- if cost != 1 {
- fmt.Fprintf(out, "- **Cost**: %d\n", cost)
+ if cost != "1" {
+ fmt.Fprintf(out, "- **Cost**: %s\n", cost)
}
}
if op.Version > 1 {
fmt.Fprintf(out, "- Availability: v%d\n", op.Version)
}
if !op.Modes.Any() {
- fmt.Fprintf(out, "- Mode: %s\n", op.Modes.String())
+ fmt.Fprintf(out, "- Mode: %s\n", op.Modes)
}
- switch op.Name {
- case "global":
- globalFieldsMarkdown(out)
- case "txn":
- transactionFieldsMarkdown(out)
- fmt.Fprintf(out, "\nTypeEnum mapping:\n\n")
- typeEnumTableMarkdown(out)
- case "asset_holding_get":
- assetHoldingFieldsMarkdown(out)
- case "asset_params_get":
- assetParamsFieldsMarkdown(out)
- case "app_params_get":
- appParamsFieldsMarkdown(out)
- case "acct_params_get":
- acctParamsFieldsMarkdown(out)
- default:
- if strings.HasPrefix(op.Name, "ecdsa") {
- ecDsaCurvesMarkdown(out)
+
+ for i := range op.OpDetails.Immediates {
+ group := op.OpDetails.Immediates[i].Group
+ if group != nil && group.Doc != "" && !groupDocWritten[group.Name] {
+ fmt.Fprintf(out, "\n`%s` %s:\n\n", group.Name, group.Doc)
+ fieldGroupMarkdown(out, group)
+ groupDocWritten[group.Name] = true
}
}
ode := logic.OpDocExtra(op.Name)
@@ -265,8 +222,9 @@ func opToMarkdown(out io.Writer, op *logic.OpSpec) (err error) {
func opsToMarkdown(out io.Writer) (err error) {
out.Write([]byte("# Opcodes\n\nOps have a 'cost' of 1 unless otherwise specified.\n\n"))
opSpecs := logic.OpcodesByVersion(logic.LogicVersion)
+ written := make(map[string]bool)
for _, spec := range opSpecs {
- err = opToMarkdown(out, &spec)
+ err = opToMarkdown(out, &spec, written)
if err != nil {
return
}
@@ -280,7 +238,6 @@ type OpRecord struct {
Name string
Args string `json:",omitempty"`
Returns string `json:",omitempty"`
- Cost int
Size int
ArgEnum []string `json:",omitempty"`
@@ -321,29 +278,39 @@ func typeString(types []logic.StackType) string {
return string(out)
}
-func fieldsAndTypes(names []string, specs speccer) ([]string, string) {
- types := make([]logic.StackType, len(names))
- for i, name := range names {
- types[i] = specs.SpecByName(name).Type()
+func fieldsAndTypes(group logic.FieldGroup) ([]string, string) {
+ // reminder: group.Names can be "sparse" See: logic.TxnaFields
+ fields := make([]string, 0, len(group.Names))
+ types := make([]logic.StackType, 0, len(group.Names))
+ for _, name := range group.Names {
+ if spec, ok := group.SpecByName(name); ok {
+ fields = append(fields, name)
+ types = append(types, spec.Type())
+ }
}
- return names, typeString(types)
+ return fields, typeString(types)
}
-func argEnums(name string) (names []string, types string) {
+func argEnums(name string) ([]string, string) {
switch name {
- case "txn", "gtxn", "gtxns", "itxn", "gitxn", "itxn_field":
- return fieldsAndTypes(logic.TxnFieldNames, logic.TxnFieldSpecByName)
+ case "txn", "gtxn", "gtxns", "itxn", "gitxn":
+ return fieldsAndTypes(logic.TxnFields)
+ case "itxn_field":
+ // itxn_field does not *return* a type depending on its immediate. It *takes* it.
+ // but until a consumer cares, ArgEnumTypes will be overloaded for that meaning.
+ return fieldsAndTypes(logic.ItxnSettableFields)
case "global":
- return
+ return fieldsAndTypes(logic.GlobalFields)
case "txna", "gtxna", "gtxnsa", "txnas", "gtxnas", "gtxnsas", "itxna", "gitxna":
- // Map is the whole txn field spec map. That's fine, we only lookup the given names.
- return fieldsAndTypes(logic.TxnaFieldNames(), logic.TxnFieldSpecByName)
+ return fieldsAndTypes(logic.TxnArrayFields)
case "asset_holding_get":
- return fieldsAndTypes(logic.AssetHoldingFieldNames, logic.AssetHoldingFieldSpecByName)
+ return fieldsAndTypes(logic.AssetHoldingFields)
case "asset_params_get":
- return fieldsAndTypes(logic.AssetParamsFieldNames, logic.AssetParamsFieldSpecByName)
+ return fieldsAndTypes(logic.AssetParamsFields)
case "app_params_get":
- return fieldsAndTypes(logic.AppParamsFieldNames, logic.AppParamsFieldSpecByName)
+ return fieldsAndTypes(logic.AppParamsFields)
+ case "acct_params_get":
+ return fieldsAndTypes(logic.AcctParamsFields)
default:
return nil, ""
}
@@ -355,10 +322,9 @@ func buildLanguageSpec(opGroups map[string][]string) *LanguageSpec {
for i, spec := range opSpecs {
records[i].Opcode = spec.Opcode
records[i].Name = spec.Name
- records[i].Args = typeString(spec.Args)
- records[i].Returns = typeString(spec.Returns)
- records[i].Cost = spec.Details.Cost
- records[i].Size = spec.Details.Size
+ records[i].Args = typeString(spec.Arg.Types)
+ records[i].Returns = typeString(spec.Return.Types)
+ records[i].Size = spec.OpDetails.Size
records[i].ArgEnum, records[i].ArgEnumTypes = argEnums(spec.Name)
records[i].Doc = logic.OpDoc(spec.Name)
records[i].DocExtra = logic.OpDocExtra(spec.Name)
@@ -400,32 +366,22 @@ func main() {
integerConstantsTableMarkdown(constants)
constants.Close()
- txnfields := create("txn_fields.md")
- fieldSpecsMarkdown(txnfields, logic.TxnFieldNames, logic.TxnFieldSpecByName)
- txnfields.Close()
-
- globalfields := create("global_fields.md")
- fieldSpecsMarkdown(globalfields, logic.GlobalFieldNames, logic.GlobalFieldSpecByName)
- globalfields.Close()
-
- assetholding := create("asset_holding_fields.md")
- fieldSpecsMarkdown(assetholding, logic.AssetHoldingFieldNames, logic.AssetHoldingFieldSpecByName)
- assetholding.Close()
-
- assetparams := create("asset_params_fields.md")
- fieldSpecsMarkdown(assetparams, logic.AssetParamsFieldNames, logic.AssetParamsFieldSpecByName)
- assetparams.Close()
-
- appparams := create("app_params_fields.md")
- fieldSpecsMarkdown(appparams, logic.AppParamsFieldNames, logic.AppParamsFieldSpecByName)
- appparams.Close()
-
- acctparams, _ := os.Create("acct_params_fields.md")
- fieldSpecsMarkdown(acctparams, logic.AcctParamsFieldNames, logic.AcctParamsFieldSpecByName)
- acctparams.Close()
+ written := make(map[string]bool)
+ opSpecs := logic.OpcodesByVersion(logic.LogicVersion)
+ for _, spec := range opSpecs {
+ for _, imm := range spec.OpDetails.Immediates {
+ if imm.Group != nil && !written[imm.Group.Name] {
+ out := create(strings.ToLower(imm.Group.Name) + "_fields.md")
+ fieldGroupMarkdown(out, imm.Group)
+ out.Close()
+ written[imm.Group.Name] = true
+ }
+ }
+ }
langspecjs := create("langspec.json")
enc := json.NewEncoder(langspecjs)
+ enc.SetIndent("", " ")
enc.Encode(buildLanguageSpec(opGroups))
langspecjs.Close()
diff --git a/cmd/opdoc/tmLanguage.go b/cmd/opdoc/tmLanguage.go
index 204068a62..9866ae504 100644
--- a/cmd/opdoc/tmLanguage.go
+++ b/cmd/opdoc/tmLanguage.go
@@ -18,6 +18,7 @@ package main
import (
"fmt"
+ "sort"
"strings"
"github.com/algorand/go-algorand/data/transactions/logic"
@@ -122,15 +123,31 @@ func buildSyntaxHighlight() *tmLanguage {
},
}
var allNamedFields []string
- allNamedFields = append(allNamedFields, logic.TxnFieldNames...)
- allNamedFields = append(allNamedFields, logic.GlobalFieldNames...)
- allNamedFields = append(allNamedFields, logic.AssetHoldingFieldNames...)
- allNamedFields = append(allNamedFields, logic.AssetParamsFieldNames...)
- allNamedFields = append(allNamedFields, logic.OnCompletionNames...)
+ allNamedFields = append(allNamedFields, logic.TxnTypeNames[:]...)
+ allNamedFields = append(allNamedFields, logic.OnCompletionNames[:]...)
+ accumulated := make(map[string]bool)
+ opSpecs := logic.OpcodesByVersion(logic.LogicVersion)
+ for _, spec := range opSpecs {
+ for _, imm := range spec.OpDetails.Immediates {
+ if imm.Group != nil && !accumulated[imm.Group.Name] {
+ allNamedFields = append(allNamedFields, imm.Group.Names...)
+ accumulated[imm.Group.Name] = true
+ }
+ }
+ }
+
+ var seen = make(map[string]bool, len(allNamedFields))
+ var dedupe = make([]string, 0, len(allNamedFields))
+ for _, name := range allNamedFields {
+ if name != "" && !seen[name] {
+ dedupe = append(dedupe, name)
+ }
+ seen[name] = true
+ }
literals.Patterns = append(literals.Patterns, pattern{
Name: "variable.parameter.teal",
- Match: fmt.Sprintf("\\b(%s)\\b", strings.Join(allNamedFields, "|")),
+ Match: fmt.Sprintf("\\b(%s)\\b", strings.Join(dedupe, "|")),
})
tm.Repository["literals"] = literals
@@ -153,7 +170,15 @@ func buildSyntaxHighlight() *tmLanguage {
},
}
var allArithmetics []string
- for grp, names := range logic.OpGroups {
+
+ var keys []string
+ for key := range logic.OpGroups {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ for _, grp := range keys {
+ names := logic.OpGroups[grp]
+ sort.Strings(names)
switch grp {
case "Flow Control":
keywords.Patterns = append(keywords.Patterns, pattern{
diff --git a/cmd/tealdbg/cdtSession.go b/cmd/tealdbg/cdtSession.go
index e880296a3..3ae5675b3 100644
--- a/cmd/tealdbg/cdtSession.go
+++ b/cmd/tealdbg/cdtSession.go
@@ -168,8 +168,8 @@ func (s *cdtSession) websocketHandler(w http.ResponseWriter, r *http.Request) {
// set pc and line to 0 to workaround Register ack
state.Update(cdtStateUpdate{
dbgState.Stack, dbgState.Scratch,
- 0, 0, "",
- dbgState.OpcodeBudget, s.debugger.GetStates(nil),
+ 0, 0, "", dbgState.OpcodeBudget, dbgState.CallStack,
+ s.debugger.GetStates(nil),
})
hash := sha256.Sum256([]byte(state.disassembly)) // some random hash
@@ -247,7 +247,7 @@ func (s *cdtSession) websocketHandler(w http.ResponseWriter, r *http.Request) {
state.Update(cdtStateUpdate{
dbgState.Stack, dbgState.Scratch,
dbgState.PC, dbgState.Line, dbgState.Error,
- dbgState.OpcodeBudget, appState,
+ dbgState.OpcodeBudget, dbgState.CallStack, appState,
})
dbgStateMu.Unlock()
@@ -473,14 +473,26 @@ func (s *cdtSession) handleCdtRequest(req *cdt.ChromeRequest, state *cdtState) (
response = cdt.ChromeResponse{ID: req.ID, Result: empty}
case "Debugger.stepOut":
state.lastAction.Store("step")
- state.pauseOnCompeted.SetTo(true)
- s.debugger.Resume()
+ if len(state.callStack) == 0 {
+ // If we are not in a subroutine, pause at the end so user can
+ // inspect the final state of the program.
+ state.pauseOnCompleted.SetTo(true)
+ }
+ s.debugger.StepOut()
+ if state.completed.IsSet() {
+ evDestroyed := s.makeContextDestroyedEvent()
+ events = append(events, &evDestroyed)
+ }
+ response = cdt.ChromeResponse{ID: req.ID, Result: empty}
+ case "Debugger.stepOver":
+ state.lastAction.Store("step")
+ s.debugger.StepOver()
if state.completed.IsSet() {
evDestroyed := s.makeContextDestroyedEvent()
events = append(events, &evDestroyed)
}
response = cdt.ChromeResponse{ID: req.ID, Result: empty}
- case "Debugger.stepOver", "Debugger.stepInto":
+ case "Debugger.stepInto":
state.lastAction.Store("step")
s.debugger.Step()
if state.completed.IsSet() {
@@ -497,7 +509,7 @@ func (s *cdtSession) handleCdtRequest(req *cdt.ChromeRequest, state *cdtState) (
func (s *cdtSession) computeEvent(state *cdtState) (event interface{}) {
if state.completed.IsSet() {
- if state.pauseOnCompeted.IsSet() {
+ if state.pauseOnCompleted.IsSet() {
event = s.makeDebuggerPausedEvent(state)
return
}
@@ -571,22 +583,43 @@ func (s *cdtSession) makeDebuggerPausedEvent(state *cdtState) cdt.DebuggerPaused
},
}
sc := []cdt.DebuggerScope{scopeLocal, scopeGlobal}
- cf := cdt.DebuggerCallFrame{
- CallFrameID: "mainframe",
- FunctionName: "",
- Location: &cdt.DebuggerLocation{
- ScriptID: s.scriptID,
- LineNumber: state.line.Load(),
- ColumnNumber: 0,
+
+ cfs := []cdt.DebuggerCallFrame{
+ {
+ CallFrameID: "mainframe",
+ FunctionName: "main",
+ Location: &cdt.DebuggerLocation{
+ ScriptID: s.scriptID,
+ LineNumber: state.line.Load(),
+ ColumnNumber: 0,
+ },
+ URL: s.scriptURL,
+ ScopeChain: sc,
},
- URL: s.scriptURL,
- ScopeChain: sc,
+ }
+ for i := range state.callStack {
+ cf := cdt.DebuggerCallFrame{
+ CallFrameID: "mainframe",
+ FunctionName: state.callStack[i].LabelName,
+ Location: &cdt.DebuggerLocation{
+ ScriptID: s.scriptID,
+ LineNumber: state.line.Load(),
+ ColumnNumber: 0,
+ },
+ URL: s.scriptURL,
+ ScopeChain: sc,
+ }
+ // Set the previous call frame line number
+ cfs[0].Location.LineNumber = state.callStack[i].FrameLine
+ // We have to prepend the newest frame for it to appear first
+ // in the debugger...
+ cfs = append([]cdt.DebuggerCallFrame{cf}, cfs...)
}
evPaused := cdt.DebuggerPausedEvent{
Method: "Debugger.paused",
Params: cdt.DebuggerPausedParams{
- CallFrames: []cdt.DebuggerCallFrame{cf},
+ CallFrames: cfs,
Reason: "other",
HitBreakpoints: make([]string, 0),
},
diff --git a/cmd/tealdbg/cdtSession_test.go b/cmd/tealdbg/cdtSession_test.go
index e670572a8..1668c1a03 100644
--- a/cmd/tealdbg/cdtSession_test.go
+++ b/cmd/tealdbg/cdtSession_test.go
@@ -436,7 +436,7 @@ func TestCdtSessionStateToEvent(t *testing.T) {
// if completed and pause on competed then pause
state.completed.SetTo(true)
- state.pauseOnCompeted.SetTo(true)
+ state.pauseOnCompleted.SetTo(true)
e = s.computeEvent(&state)
_, ok = (e).(cdt.DebuggerPausedEvent)
require.True(t, ok)
diff --git a/cmd/tealdbg/cdtState.go b/cmd/tealdbg/cdtState.go
index 457def831..21273982f 100644
--- a/cmd/tealdbg/cdtState.go
+++ b/cmd/tealdbg/cdtState.go
@@ -42,19 +42,20 @@ type cdtState struct {
globals []basics.TealValue
// mutable program state
- mu deadlock.Mutex
- stack []basics.TealValue
- scratch []basics.TealValue
- pc atomicInt
- line atomicInt
- err atomicString
+ mu deadlock.Mutex
+ stack []basics.TealValue
+ scratch []basics.TealValue
+ pc atomicInt
+ line atomicInt
+ err atomicString
+ callStack []logic.CallFrame
AppState
// debugger states
- lastAction atomicString
- pauseOnError atomicBool
- pauseOnCompeted atomicBool
- completed atomicBool
+ lastAction atomicString
+ pauseOnError atomicBool
+ pauseOnCompleted atomicBool
+ completed atomicBool
}
type cdtStateUpdate struct {
@@ -64,6 +65,7 @@ type cdtStateUpdate struct {
line int
err string
opcodeBudget int
+ callStack []logic.CallFrame
AppState
}
@@ -110,37 +112,7 @@ func (s *cdtState) Update(state cdtStateUpdate) {
s.AppState = state.AppState
// We need to dynamically override opcodeBudget with the proper value each step.
s.globals[logic.OpcodeBudget].Uint = uint64(state.opcodeBudget)
-}
-
-const localScopeObjID = "localScopeObjId"
-const globalScopeObjID = "globalScopeObjID"
-const globalsObjID = "globalsObjID"
-const txnObjID = "txnObjID"
-const gtxnObjID = "gtxnObjID"
-const stackObjID = "stackObjID"
-const scratchObjID = "scratchObjID"
-const tealErrorID = "tealErrorID"
-const appGlobalObjID = "appGlobalObjID"
-const appLocalsObjID = "appLocalsObjID"
-const txnArrayFieldObjID = "txnArrayField"
-const logsObjID = "logsObjID"
-const innerTxnsObjID = "innerTxnsObjID"
-
-type objectDescFn func(s *cdtState, preview bool) []cdt.RuntimePropertyDescriptor
-
-var objectDescMap = map[string]objectDescFn{
- globalScopeObjID: makeGlobalScope,
- localScopeObjID: makeLocalScope,
- globalsObjID: makeGlobals,
- txnObjID: makeTxn,
- gtxnObjID: makeTxnGroup,
- stackObjID: makeStack,
- scratchObjID: makeScratch,
- tealErrorID: makeTealError,
- appGlobalObjID: makeAppGlobalState,
- appLocalsObjID: makeAppLocalsState,
- logsObjID: makeLogsState,
- innerTxnsObjID: makeInnerTxnsState,
+ s.callStack = state.callStack
}
func (s *cdtState) getObjectDescriptor(objID string, preview bool) (desc []cdt.RuntimePropertyDescriptor, err error) {
@@ -591,8 +563,6 @@ func makeGlobalsPreview(globals []basics.TealValue) cdt.RuntimeObjectPreview {
return p
}
-var gtxnObjIDPrefix = fmt.Sprintf("%s_gid_", gtxnObjID)
-
func encodeGroupTxnID(groupIndex int) string {
return gtxnObjIDPrefix + strconv.Itoa(groupIndex)
}
@@ -606,10 +576,6 @@ func decodeGroupTxnID(objID string) (int, bool) {
return 0, false
}
-var logObjIDPrefix = fmt.Sprintf("%s_id", logsObjID)
-var innerTxnObjIDPrefix = fmt.Sprintf("%s_id", innerTxnsObjID)
-var innerNestedTxnObjIDPrefix = fmt.Sprintf("%s_nested", innerTxnsObjID)
-
func encodeNestedObjID(groupIndexes []int, prefix string) string {
encodedElements := []string{prefix}
for _, i := range groupIndexes {
@@ -695,8 +661,6 @@ func decodeArraySlice(objID string) (string, int, int, bool) {
return "", 0, 0, false
}
-var appGlobalObjIDPrefix = fmt.Sprintf("%s_", appGlobalObjID)
-
func encodeAppGlobalAppID(key string) string {
return appGlobalObjIDPrefix + key
}
@@ -710,8 +674,6 @@ func decodeAppGlobalAppID(objID string) (uint64, bool) {
return 0, false
}
-var appLocalsObjIDPrefix = fmt.Sprintf("%s_", appLocalsObjID)
-
func encodeAppLocalsAddr(addr string) string {
return appLocalsObjIDPrefix + addr
}
@@ -723,8 +685,6 @@ func decodeAppLocalsAddr(objID string) (string, bool) {
return "", false
}
-var appLocalAppIDPrefix = fmt.Sprintf("%s__", appLocalsObjID)
-
func encodeAppLocalsAppID(addr string, appID string) string {
return fmt.Sprintf("%s%s_%s", appLocalAppIDPrefix, addr, appID)
}
@@ -740,8 +700,6 @@ func decodeAppLocalsAppID(objID string) (string, uint64, bool) {
return "", 0, false
}
-var txnArrayFieldPrefix = fmt.Sprintf("%s__", txnArrayFieldObjID)
-
func encodeTxnArrayField(groupIndex int, field int) string {
return fmt.Sprintf("%s%d_%d", txnArrayFieldPrefix, groupIndex, field)
}
diff --git a/cmd/tealdbg/cdtStateObjects.go b/cmd/tealdbg/cdtStateObjects.go
new file mode 100644
index 000000000..b6daf39f1
--- /dev/null
+++ b/cmd/tealdbg/cdtStateObjects.go
@@ -0,0 +1,67 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "github.com/algorand/go-algorand/cmd/tealdbg/cdt"
+)
+
+// Object IDs
+const (
+ localScopeObjID = "localScopeObjId"
+ globalScopeObjID = "globalScopeObjID"
+ globalsObjID = "globalsObjID"
+ txnObjID = "txnObjID"
+ gtxnObjID = "gtxnObjID"
+ stackObjID = "stackObjID"
+ scratchObjID = "scratchObjID"
+ tealErrorID = "tealErrorID"
+ appGlobalObjID = "appGlobalObjID"
+ appLocalsObjID = "appLocalsObjID"
+ txnArrayFieldObjID = "txnArrayField"
+ logsObjID = "logsObjID"
+ innerTxnsObjID = "innerTxnsObjID"
+)
+
+// Object Prefix IDs
+const (
+ gtxnObjIDPrefix = gtxnObjID + "_gid_"
+ logObjIDPrefix = logsObjID + "_id"
+ innerTxnObjIDPrefix = innerTxnsObjID + "_id"
+ innerNestedTxnObjIDPrefix = innerTxnsObjID + "_nested"
+ appGlobalObjIDPrefix = appGlobalObjID + "_"
+ appLocalsObjIDPrefix = appLocalsObjID + "_"
+ appLocalAppIDPrefix = appLocalsObjID + "__"
+ txnArrayFieldPrefix = txnArrayFieldObjID + "__"
+)
+
+type objectDescFn func(s *cdtState, preview bool) []cdt.RuntimePropertyDescriptor
+
+var objectDescMap = map[string]objectDescFn{
+ globalScopeObjID: makeGlobalScope,
+ localScopeObjID: makeLocalScope,
+ globalsObjID: makeGlobals,
+ txnObjID: makeTxn,
+ gtxnObjID: makeTxnGroup,
+ stackObjID: makeStack,
+ scratchObjID: makeScratch,
+ tealErrorID: makeTealError,
+ appGlobalObjID: makeAppGlobalState,
+ appLocalsObjID: makeAppLocalsState,
+ logsObjID: makeLogsState,
+ innerTxnsObjID: makeInnerTxnsState,
+}
diff --git a/cmd/tealdbg/cdtdbg_test.go b/cmd/tealdbg/cdtdbg_test.go
index 3e6acf8b1..41164a3af 100644
--- a/cmd/tealdbg/cdtdbg_test.go
+++ b/cmd/tealdbg/cdtdbg_test.go
@@ -106,6 +106,12 @@ type MockDebugControl struct {
func (c *MockDebugControl) Step() {
}
+func (c *MockDebugControl) StepOver() {
+}
+
+func (c *MockDebugControl) StepOut() {
+}
+
func (c *MockDebugControl) Resume() {
}
diff --git a/cmd/tealdbg/debugger.go b/cmd/tealdbg/debugger.go
index 4d7421f93..b9b899560 100644
--- a/cmd/tealdbg/debugger.go
+++ b/cmd/tealdbg/debugger.go
@@ -53,6 +53,8 @@ type DebugAdapter interface {
// Control interface for execution control
type Control interface {
Step()
+ StepOver()
+ StepOut()
Resume()
SetBreakpoint(line int) error
RemoveBreakpoint(line int) error
@@ -89,19 +91,54 @@ type programMeta struct {
states AppState
}
-// breakpointLine is a source line number with a couple special values:
-// -1 do not break
-// 0 break at next instruction
-// N break at line N
-type breakpointLine int
+// debugConfig contains information about control execution and breakpoints.
+type debugConfig struct {
+ NoBreak bool `json:"nobreak"`
+ StepBreak bool `json:"stepbreak"`
+ StepOutOver bool `json:"stepover"`
-const (
- noBreak breakpointLine = -1
- stepBreak breakpointLine = 0
-)
+ ActiveBreak map[int]struct{} `json:"activebreak"`
+ CallDepth int `json:"calldepth"`
+}
-type debugConfig struct {
- BreakAtLine breakpointLine `json:"breakatline"`
+func makeDebugConfig() debugConfig {
+ dc := debugConfig{}
+ dc.ActiveBreak = make(map[int]struct{})
+ return dc
+}
+
+func (dc *debugConfig) setNoBreak() {
+ dc.NoBreak = true
+}
+
+func (dc *debugConfig) setStepBreak() {
+ dc.StepBreak = true
+}
+
+func (dc *debugConfig) setStepOutOver(callDepth int) {
+ dc.StepOutOver = true
+ dc.CallDepth = callDepth
+}
+
+// setActiveBreak does not check if the line is a valid value, so it should
+// be called inside the setBreakpoint() in session.
+func (dc *debugConfig) setActiveBreak(line int) {
+ dc.ActiveBreak[line] = struct{}{}
+}
+
+// isBreak checks if Update() should break at this line and callDepth.
+func (dc *debugConfig) isBreak(line int, callDepth int) bool {
+ if dc.StepBreak {
+ return true
+ }
+
+ _, ok := dc.ActiveBreak[line]
+ if !dc.StepOutOver || dc.CallDepth == callDepth {
+ // If we are in stepOver or stepOut, then make sure we check
+ // callstack depth before breaking at this line.
+ return ok
+ }
+ return false
}
type session struct {
@@ -111,7 +148,8 @@ type session struct {
acknowledged chan bool
// debugConfigs holds information about this debugging session,
- // currently just when we want to break
+ // such as the breakpoints, initial call stack depth, and whether we want
+ // to step over/out/in.
debugConfig debugConfig
// notifications from eval
@@ -130,6 +168,8 @@ type session struct {
breakpoints []breakpoint
line atomicInt
+ callStack []logic.CallFrame
+
states AppState
}
@@ -146,9 +186,8 @@ func makeSession(disassembly string, line int) (s *session) {
s = new(session)
// Allocate a default debugConfig (don't break)
- s.debugConfig = debugConfig{
- BreakAtLine: noBreak,
- }
+ s.debugConfig = makeDebugConfig()
+ s.debugConfig.setNoBreak()
// Allocate an acknowledgement and notifications channels
s.acknowledged = make(chan bool)
@@ -158,6 +197,7 @@ func makeSession(disassembly string, line int) (s *session) {
s.lines = strings.Split(disassembly, "\n")
s.breakpoints = make([]breakpoint, len(s.lines))
s.line.Store(line)
+ s.callStack = []logic.CallFrame{}
return
}
@@ -181,25 +221,69 @@ func (s *session) Step() {
func() {
s.mu.Lock()
defer s.mu.Unlock()
- s.debugConfig = debugConfig{BreakAtLine: stepBreak}
+ s.debugConfig = makeDebugConfig()
+ s.debugConfig.setStepBreak()
}()
s.resume()
}
-func (s *session) Resume() {
- currentLine := s.line.Load()
+func (s *session) StepOver() {
+ func() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ // Get the first TEAL opcode in the line
+ currentOp := strings.Fields(s.lines[s.line.Load()])[0]
+ s.debugConfig = makeDebugConfig()
+
+ // Step over a function call (callsub op).
+ if currentOp == "callsub" && s.line.Load() < len(s.breakpoints) {
+ // Set a flag to check if we are in StepOver mode and to
+ // save our initial call depth so we can pass over breakpoints that
+ // are not on the correct call depth.
+ s.debugConfig.setStepOutOver(len(s.callStack))
+ err := s.setBreakpoint(s.line.Load() + 1)
+ if err != nil {
+ s.debugConfig.setStepBreak()
+ }
+ } else {
+ s.debugConfig.setStepBreak()
+ }
+ }()
+ s.resume()
+}
+
+func (s *session) StepOut() {
+ func() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.debugConfig = makeDebugConfig()
+ if len(s.callStack) == 0 {
+ s.debugConfig.setNoBreak()
+ } else {
+ callFrame := s.callStack[len(s.callStack)-1]
+ s.debugConfig.setStepOutOver(len(s.callStack) - 1)
+ err := s.setBreakpoint(callFrame.FrameLine + 1)
+ if err != nil {
+ s.debugConfig.setStepBreak()
+ }
+ }
+ }()
+ s.resume()
+}
+
+func (s *session) Resume() {
func() {
s.mu.Lock()
defer s.mu.Unlock()
- s.debugConfig = debugConfig{BreakAtLine: noBreak} // reset possible break after Step
- // find any active breakpoints and set next break
- if currentLine < len(s.breakpoints) {
- for line, state := range s.breakpoints[currentLine+1:] {
- if state.set && state.active {
- s.setBreakpoint(line + currentLine + 1)
- break
+ s.debugConfig = makeDebugConfig()
+ // find any active breakpoints and set break
+ for line, state := range s.breakpoints {
+ if state.set && state.active {
+ err := s.setBreakpoint(line)
+ if err != nil {
+ s.debugConfig.setStepBreak()
}
}
}
@@ -209,21 +293,30 @@ func (s *session) Resume() {
}
// setBreakpoint must be called with lock taken
+// Used for setting a breakpoint in step execution and adding bp to the session.
func (s *session) setBreakpoint(line int) error {
if line >= len(s.breakpoints) {
return fmt.Errorf("invalid bp line %d", line)
}
s.breakpoints[line] = breakpoint{set: true, active: true}
- s.debugConfig = debugConfig{BreakAtLine: breakpointLine(line)}
+ s.debugConfig.setActiveBreak(line)
return nil
}
func (s *session) SetBreakpoint(line int) error {
s.mu.Lock()
defer s.mu.Unlock()
+ // Reset all existing flags and breakpoints and set a new bp.
+ s.debugConfig = makeDebugConfig()
return s.setBreakpoint(line)
}
+func (s *session) setCallStack(callStack []logic.CallFrame) {
+ s.mu.Lock()
+ s.callStack = callStack
+ s.mu.Unlock()
+}
+
func (s *session) RemoveBreakpoint(line int) error {
s.mu.Lock()
defer s.mu.Unlock()
@@ -232,7 +325,8 @@ func (s *session) RemoveBreakpoint(line int) error {
return fmt.Errorf("invalid bp line %d", line)
}
if s.breakpoints[line].NonEmpty() {
- s.debugConfig = debugConfig{BreakAtLine: noBreak}
+ s.debugConfig = makeDebugConfig()
+ s.debugConfig.setNoBreak()
s.breakpoints[line] = breakpoint{}
}
return nil
@@ -248,7 +342,8 @@ func (s *session) SetBreakpointsActive(active bool) {
}
}
if !active {
- s.debugConfig = debugConfig{BreakAtLine: noBreak}
+ s.debugConfig = makeDebugConfig()
+ s.debugConfig.setNoBreak()
}
}
@@ -273,14 +368,14 @@ func (s *session) GetSourceMap() ([]byte, error) {
prevSourceLine := 0
// the very first entry is needed by CDT
- lines[0] = MakeSourceMapLine(targetCol, sourceIdx, 0, sourceCol)
+ lines[0] = logic.MakeSourceMapLine(targetCol, sourceIdx, 0, sourceCol)
for targetLine := 1; targetLine < len(s.lines); targetLine++ {
if pc, ok := s.pcOffset[targetLine]; ok && pc != 0 {
sourceLine, ok = s.offsetToLine[pc]
if !ok {
lines[targetLine] = ""
} else {
- lines[targetLine] = MakeSourceMapLine(targetCol, sourceIdx, sourceLine-prevSourceLine, sourceCol)
+ lines[targetLine] = logic.MakeSourceMapLine(targetCol, sourceIdx, sourceLine-prevSourceLine, sourceCol)
prevSourceLine = sourceLine
}
} else {
@@ -289,7 +384,7 @@ func (s *session) GetSourceMap() ([]byte, error) {
if targetLine == len(s.lines)-1 {
delta = 1
}
- lines[targetLine] = MakeSourceMapLine(targetCol, sourceIdx, delta, sourceCol)
+ lines[targetLine] = logic.MakeSourceMapLine(targetCol, sourceIdx, delta, sourceCol)
}
}
@@ -478,8 +573,10 @@ func (d *Debugger) Update(state *logic.DebugState) error {
// copy state to prevent a data race in this the go-routine and upcoming updates to the state
go func(localState logic.DebugState) {
// Check if we are triggered and acknowledge asynchronously
- if cfg.BreakAtLine != noBreak {
- if cfg.BreakAtLine == stepBreak || breakpointLine(localState.Line) == cfg.BreakAtLine {
+ if !cfg.NoBreak {
+ if cfg.isBreak(localState.Line, len(localState.CallStack)) {
+ // Copy callstack information
+ s.setCallStack(state.CallStack)
// Breakpoint hit! Inform the user
s.notifications <- Notification{"updated", localState}
} else {
diff --git a/cmd/tealdbg/debugger_test.go b/cmd/tealdbg/debugger_test.go
index b37f7fb35..4a390d461 100644
--- a/cmd/tealdbg/debugger_test.go
+++ b/cmd/tealdbg/debugger_test.go
@@ -53,7 +53,7 @@ func (d *testDbgAdapter) WaitForCompletion() {
<-d.done
}
-func (d *testDbgAdapter) SessionStarted(sid string, debugger Control, ch chan Notification) {
+func (d *testDbgAdapter) SessionStarted(_ string, debugger Control, ch chan Notification) {
d.debugger = debugger
d.notifications = ch
@@ -62,7 +62,7 @@ func (d *testDbgAdapter) SessionStarted(sid string, debugger Control, ch chan No
d.started = true
}
-func (d *testDbgAdapter) SessionEnded(sid string) {
+func (d *testDbgAdapter) SessionEnded(_ string) {
d.ended = true
}
@@ -84,7 +84,8 @@ func (d *testDbgAdapter) eventLoop() {
require.NotNil(d.t, n.DebugState.Scratch)
require.NotEmpty(d.t, n.DebugState.Disassembly)
require.NotEmpty(d.t, n.DebugState.ExecID)
- d.debugger.SetBreakpoint(n.DebugState.Line + 1)
+ err := d.debugger.SetBreakpoint(n.DebugState.Line + 1)
+ require.NoError(d.t, err)
}
d.debugger.Resume()
}
@@ -121,9 +122,8 @@ int 1
require.Equal(t, 3, da.eventCount) // register, update, complete
}
-func TestSession(t *testing.T) {
- partitiontest.PartitionTest(t)
- source := fmt.Sprintf("#pragma version %d\nint 1\ndup\n+\n", logic.LogicVersion)
+func createSessionFromSource(t *testing.T, program string) *session {
+ source := fmt.Sprintf(program, logic.LogicVersion)
ops, err := logic.AssembleStringWithVersion(source, logic.LogicVersion)
require.NoError(t, err)
disassembly, err := logic.Disassemble(ops.Program)
@@ -141,7 +141,14 @@ func TestSession(t *testing.T) {
s.programName = "test"
s.offsetToLine = ops.OffsetToLine
s.pcOffset = pcOffset
- err = s.SetBreakpoint(2)
+
+ return s
+}
+
+func TestSession(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ s := createSessionFromSource(t, "#pragma version %d\nint 1\ndup\n+\n")
+ err := s.SetBreakpoint(2)
require.NoError(t, err)
ackCount := 0
@@ -155,9 +162,9 @@ func TestSession(t *testing.T) {
s.Resume()
<-done
-
- require.Equal(t, breakpointLine(2), s.debugConfig.BreakAtLine)
+ require.Equal(t, map[int]struct{}{2: {}}, s.debugConfig.ActiveBreak)
require.Equal(t, breakpoint{true, true}, s.breakpoints[2])
+ require.Equal(t, true, s.debugConfig.isBreak(2, len(s.callStack)))
require.Equal(t, 1, ackCount)
s.SetBreakpointsActive(false)
@@ -166,7 +173,8 @@ func TestSession(t *testing.T) {
s.SetBreakpointsActive(true)
require.Equal(t, breakpoint{true, true}, s.breakpoints[2])
- s.RemoveBreakpoint(2)
+ err = s.RemoveBreakpoint(2)
+ require.NoError(t, err)
require.Equal(t, breakpoint{false, false}, s.breakpoints[2])
go ackFunc()
@@ -174,7 +182,7 @@ func TestSession(t *testing.T) {
s.Step()
<-done
- require.Equal(t, stepBreak, s.debugConfig.BreakAtLine)
+ require.Equal(t, true, s.debugConfig.StepBreak)
require.Equal(t, 2, ackCount)
data, err := s.GetSourceMap()
@@ -185,3 +193,171 @@ func TestSession(t *testing.T) {
require.NotEmpty(t, name)
require.Greater(t, len(data), 0)
}
+
+// Tests control functions for stepping over subroutines and checks
+// that call stack is inspected correctly.
+func TestCallStackControl(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ newTestCase := func() (*session, chan struct{}, func(), *int) {
+ s := createSessionFromSource(t, "#pragma version %d\nlab1:\nint 1\ncallsub lab1\ndup\n+\n")
+
+ ackCount := 0
+ done := make(chan struct{})
+ ackFunc := func() {
+ ackCount++
+ <-s.acknowledged
+ done <- struct{}{}
+
+ }
+
+ return s, done, ackFunc, &ackCount
+ }
+
+ cases := map[string]func(*testing.T){
+ "Check that step over on callsub line returns correct callstack depth": func(t *testing.T) {
+ s, done, ackFunc, ackCount := newTestCase()
+ s.setCallStack([]logic.CallFrame{{FrameLine: 2, LabelName: "lab1"}})
+ initialStackDepth := len(s.callStack)
+ s.line.Store(3)
+
+ go ackFunc()
+ s.StepOver()
+ <-done
+
+ require.Equal(t, map[int]struct{}{4: {}}, s.debugConfig.ActiveBreak)
+ require.Equal(t, breakpoint{true, true}, s.breakpoints[4])
+
+ require.Equal(t, false, s.debugConfig.NoBreak)
+ require.Equal(t, false, s.debugConfig.StepBreak)
+ require.Equal(t, true, s.debugConfig.StepOutOver)
+
+ require.Equal(t, 1, *ackCount)
+ require.Equal(t, initialStackDepth, len(s.callStack))
+ },
+ "Breakpoint should not trigger at the wrong call stack height": func(t *testing.T) {
+ s, done, ackFunc, ackCount := newTestCase()
+
+ s.setCallStack([]logic.CallFrame{{FrameLine: 2, LabelName: "lab1"}})
+ s.line.Store(3)
+
+ go ackFunc()
+ s.StepOver()
+ <-done
+
+ s.setCallStack([]logic.CallFrame{
+ {FrameLine: 2, LabelName: "lab1"},
+ {FrameLine: 2, LabelName: "lab1"},
+ })
+ require.Equal(t, false, s.debugConfig.isBreak(4, len(s.callStack)))
+
+ s.setCallStack([]logic.CallFrame{
+ {FrameLine: 2, LabelName: "lab1"},
+ })
+ require.Equal(t, true, s.debugConfig.isBreak(4, len(s.callStack)))
+ require.Equal(t, 1, *ackCount)
+ },
+ "Check step over on a non callsub line breaks at next line": func(t *testing.T) {
+ s, done, ackFunc, ackCount := newTestCase()
+
+ s.line.Store(4)
+
+ go ackFunc()
+ s.StepOver()
+ <-done
+
+ require.Equal(t, false, s.debugConfig.NoBreak)
+ require.Equal(t, true, s.debugConfig.StepBreak)
+ require.Equal(t, false, s.debugConfig.StepOutOver)
+
+ require.Equal(t, 1, *ackCount)
+ require.Equal(t, 0, len(s.callStack))
+ },
+ "Check that step out when call stack depth is 1 sets breakpoint to the line after frame": func(t *testing.T) {
+ s, done, ackFunc, ackCount := newTestCase()
+
+ s.setCallStack([]logic.CallFrame{{FrameLine: 2, LabelName: "lab1"}})
+ s.line.Store(4)
+
+ go ackFunc()
+ s.StepOut()
+ <-done
+
+ require.Equal(t, map[int]struct{}{3: {}}, s.debugConfig.ActiveBreak)
+ require.Equal(t, breakpoint{true, true}, s.breakpoints[3])
+ require.Equal(t, true, s.debugConfig.isBreak(3, len(s.callStack)-1))
+
+ require.Equal(t, false, s.debugConfig.NoBreak)
+ require.Equal(t, false, s.debugConfig.StepBreak)
+ require.Equal(t, true, s.debugConfig.StepOutOver)
+
+ require.Equal(t, 1, *ackCount)
+ require.Equal(t, 1, len(s.callStack))
+ },
+ "Check that step out when call stack depth is 0 sets NoBreak to true": func(t *testing.T) {
+ s, done, ackFunc, ackCount := newTestCase()
+
+ s.setCallStack(nil)
+ s.line.Store(3)
+
+ go ackFunc()
+ s.StepOut()
+ <-done
+
+ require.Equal(t, true, s.debugConfig.NoBreak)
+ require.Equal(t, false, s.debugConfig.StepBreak)
+ require.Equal(t, false, s.debugConfig.StepOutOver)
+
+ require.Equal(t, 1, *ackCount)
+ require.Equal(t, 0, len(s.callStack))
+ },
+ "Check that resume keeps track of every breakpoint": func(t *testing.T) {
+ s, done, ackFunc, ackCount := newTestCase()
+
+ s.line.Store(3)
+ err := s.RemoveBreakpoint(3)
+ require.NoError(t, err)
+ require.Equal(t, breakpoint{false, false}, s.breakpoints[2])
+ err = s.SetBreakpoint(2)
+ require.NoError(t, err)
+ err = s.SetBreakpoint(4)
+ require.NoError(t, err)
+
+ go ackFunc()
+ s.Resume()
+ <-done
+
+ require.Equal(t, map[int]struct{}{2: {}, 4: {}}, s.debugConfig.ActiveBreak)
+ require.Equal(t, breakpoint{true, true}, s.breakpoints[2])
+ require.Equal(t, breakpoint{true, true}, s.breakpoints[4])
+ require.Equal(t, true, s.debugConfig.isBreak(2, len(s.callStack)))
+ require.Equal(t, false, s.debugConfig.isBreak(3, len(s.callStack)))
+ require.Equal(t, true, s.debugConfig.isBreak(4, len(s.callStack)))
+
+ require.Equal(t, false, s.debugConfig.NoBreak)
+ require.Equal(t, false, s.debugConfig.StepBreak)
+ require.Equal(t, false, s.debugConfig.StepOutOver)
+
+ require.Equal(t, 1, *ackCount)
+ require.Equal(t, 0, len(s.callStack))
+ },
+ }
+
+ for name, f := range cases {
+ t.Run(name, f)
+ }
+}
+
+func TestSourceMaps(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ s := createSessionFromSource(t, "#pragma version %d\nint 1\n")
+
+ // Source and source map checks
+ data, err := s.GetSourceMap()
+ require.NoError(t, err)
+ require.Greater(t, len(data), 0)
+
+ name, data := s.GetSource()
+ require.NotEmpty(t, name)
+ require.Greater(t, len(data), 0)
+}
diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go
index 6bf088e2f..8e92f66fd 100644
--- a/cmd/tealdbg/local.go
+++ b/cmd/tealdbg/local.go
@@ -530,17 +530,29 @@ func (r *LocalRunner) RunAll() error {
return fmt.Errorf("no program to debug")
}
+ configureDebugger := func(ep *logic.EvalParams) {
+ // Workaround for Go's nil/empty interfaces nil check after nil assignment, i.e.
+ // r.debugger = nil
+ // ep.Debugger = r.debugger
+ // if ep.Debugger != nil // FALSE
+ if r.debugger != nil {
+ ep.Debugger = r.debugger
+ }
+ }
+
txngroup := transactions.WrapSignedTxnsWithAD(r.txnGroup)
failed := 0
start := time.Now()
ep := logic.NewEvalParams(txngroup, &r.proto, &transactions.SpecialAddresses{})
- ep.Debugger = r.debugger
+ configureDebugger(ep)
var last error
for i := range r.runs {
run := &r.runs[i]
- r.debugger.SaveProgram(run.name, run.program, run.source, run.offsetToLine, run.states)
+ if r.debugger != nil {
+ r.debugger.SaveProgram(run.name, run.program, run.source, run.offsetToLine, run.states)
+ }
run.result.pass, run.result.err = run.eval(int(run.groupIndex), ep)
if run.result.err != nil {
@@ -554,26 +566,3 @@ func (r *LocalRunner) RunAll() error {
}
return nil
}
-
-// Run starts the first program in list
-func (r *LocalRunner) Run() (bool, error) {
- if len(r.runs) < 1 {
- return false, fmt.Errorf("no program to debug")
- }
-
- txngroup := transactions.WrapSignedTxnsWithAD(r.txnGroup)
-
- ep := logic.NewEvalParams(txngroup, &r.proto, &transactions.SpecialAddresses{})
-
- run := r.runs[0]
- // Workaround for Go's nil/empty interfaces nil check after nil assignment, i.e.
- // r.debugger = nil
- // ep.Debugger = r.debugger
- // if ep.Debugger != nil // FALSE
- if r.debugger != nil {
- r.debugger.SaveProgram(run.name, run.program, run.source, run.offsetToLine, run.states)
- ep.Debugger = r.debugger
- }
-
- return run.eval(int(run.groupIndex), ep)
-}
diff --git a/cmd/tealdbg/local_test.go b/cmd/tealdbg/local_test.go
index 99cfa1562..f39c9da8a 100644
--- a/cmd/tealdbg/local_test.go
+++ b/cmd/tealdbg/local_test.go
@@ -17,6 +17,7 @@
package main
import (
+ "encoding/base64"
"encoding/json"
"fmt"
"net/http"
@@ -34,6 +35,7 @@ import (
"github.com/algorand/go-algorand/ledger/apply"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -55,6 +57,55 @@ var txnSample string = `{
}
`
+type runAllResult struct {
+ invocationError error
+ results []evalResult
+}
+
+func runAllResultFromInvocation(lr LocalRunner) runAllResult {
+ err := lr.RunAll()
+ results := make([]evalResult, len(lr.runs))
+ for i := range results {
+ results[i] = lr.runs[i].result
+ }
+
+ return runAllResult{
+ invocationError: err,
+ results: results,
+ }
+}
+
+func (r runAllResult) allErrors() []error {
+ es := make([]error, len(r.results)+1)
+ es[0] = r.invocationError
+ for i := range r.results {
+ es[i+1] = r.results[i].err
+ }
+ return es
+}
+
+func allPassing(runCount int) runAllResult {
+ results := make([]evalResult, runCount)
+ for i := range results {
+ results[i].pass = true
+ }
+ return runAllResult{
+ invocationError: nil,
+ results: results,
+ }
+}
+
+func allErrors(es []error) assert.Comparison {
+ return func() bool {
+ for _, e := range es {
+ if e == nil {
+ return false
+ }
+ }
+ return true
+ }
+}
+
func TestTxnJSONInput(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
@@ -473,9 +524,7 @@ int 100
err = local.Setup(&ds)
a.NoError(err)
- pass, err := local.Run()
- a.NoError(err)
- a.True(pass)
+ a.Equal(allPassing(len(local.runs)), runAllResultFromInvocation(*local))
// check relaxed - opted in for both
source = `#pragma version 2
@@ -496,9 +545,8 @@ int 1
err = local.Setup(&ds)
a.NoError(err)
- pass, err = local.Run()
- a.NoError(err)
- a.True(pass)
+ a.Equal(allPassing(len(local.runs)), runAllResultFromInvocation(*local))
+
ds.Painless = false
// check ForeignApp
@@ -516,9 +564,8 @@ byte 0x676c6f62616c // global
err = local.Setup(&ds)
a.NoError(err)
- pass, err = local.Run()
- a.Error(err)
- a.False(pass)
+ r := runAllResultFromInvocation(*local)
+ a.Condition(allErrors(r.allErrors()))
}
func TestDebugFromPrograms(t *testing.T) {
@@ -1136,9 +1183,8 @@ int 1`
err = local.Setup(&ds)
a.NoError(err)
- pass, err := local.Run()
- a.NoError(err)
- a.True(pass)
+ r := runAllResultFromInvocation(*local)
+ a.Equal(allPassing(len(local.runs)), r)
}
func TestDebugFeePooling(t *testing.T) {
@@ -1195,12 +1241,20 @@ int 1`
// two testcase: success with enough fees and fail otherwise
var tests = []struct {
- pass bool
- fee uint64
+ fee uint64
+ expected func(LocalRunner, runAllResult)
}{
- {true, 2000},
- {false, 1500},
+ {2000, func(l LocalRunner, r runAllResult) {
+ a.Equal(allPassing(len(l.runs)), r)
+ }},
+ {1500, func(_ LocalRunner, r runAllResult) {
+ a.Condition(allErrors(r.allErrors()))
+ for _, result := range r.results {
+ a.False(result.pass)
+ }
+ }},
}
+
for _, test := range tests {
t.Run(fmt.Sprintf("fee=%d", test.fee), func(t *testing.T) {
@@ -1223,14 +1277,8 @@ int 1`
err = local.Setup(&ds)
a.NoError(err)
- pass, err := local.Run()
- if test.pass {
- a.NoError(err)
- a.True(pass)
- } else {
- a.Error(err)
- a.False(pass)
- }
+ r := runAllResultFromInvocation(*local)
+ test.expected(*local, r)
})
}
}
@@ -1315,11 +1363,22 @@ byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr
balanceBlob := protocol.EncodeMsgp(&br)
var tests = []struct {
- pass bool
additionalApps int
+ expected func(LocalRunner, runAllResult)
}{
- {false, 2},
- {true, 3},
+ {2, func(_ LocalRunner, r runAllResult) {
+ a.ErrorContains(r.results[0].err, "dynamic cost budget exceeded")
+
+ a.Equal(
+ allPassing(len(r.results)-1),
+ runAllResult{
+ invocationError: r.invocationError,
+ results: r.results[1:],
+ })
+ }},
+ {3, func(l LocalRunner, r runAllResult) {
+ a.Equal(allPassing(len(l.runs)), r)
+ }},
}
for _, test := range tests {
t.Run(fmt.Sprintf("txn-count=%d", test.additionalApps+1), func(t *testing.T) {
@@ -1347,15 +1406,7 @@ byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr
err = local.Setup(&ds)
a.NoError(err)
- pass, err := local.Run()
- if test.pass {
- a.NoError(err)
- a.True(pass)
- } else {
- a.Error(err)
- a.Contains(err.Error(), "dynamic cost budget exceeded")
- a.False(pass)
- }
+ test.expected(*local, runAllResultFromInvocation(*local))
})
}
}
@@ -1455,7 +1506,310 @@ func TestGroupTxnIdx(t *testing.T) {
err := local.Setup(&ds)
a.NoError(err)
- pass, err := local.Run()
+ r := runAllResultFromInvocation(*local)
+ a.Equal(allPassing(len(local.runs)), r)
+}
+
+func TestRunAllGloads(t *testing.T) {
+
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ sourceA := `#pragma version 6
+
+ txn ApplicationID
+ bz handle_createapp
+
+ int 99
+ store 1
+
+ itxn_begin
+ int acfg
+ itxn_field TypeEnum
+ int 1000000
+ itxn_field ConfigAssetTotal
+ int 3
+ itxn_field ConfigAssetDecimals
+ byte base64 AA==
+ itxn_field ConfigAssetUnitName
+ byte base64(AAAAAAAAAAA=)
+ itxn_field ConfigAssetName
+ pushbytes 0x0000000000000000
+ itxn_field ConfigAssetURL
+ global CurrentApplicationAddress
+ dup
+ dup2
+ itxn_field ConfigAssetManager
+ itxn_field ConfigAssetReserve
+ itxn_field ConfigAssetFreeze
+ itxn_field ConfigAssetClawback
+ itxn_submit
+
+ handle_createapp:
+ int 1`
+
+ sourceB := `#pragma version 6
+
+ txn ApplicationID
+ bz handle_createapp
+
+ gload 2 1
+ itob
+ log
+
+ handle_createapp:
+ int 1`
+
+ ops, err := logic.AssembleString(sourceA)
+ a.NoError(err)
+ progA := base64.StdEncoding.EncodeToString(ops.Program)
+
+ ops, err = logic.AssembleString(sourceB)
+ a.NoError(err)
+ progB := base64.StdEncoding.EncodeToString(ops.Program)
+
+ // Transaction group with 5 transactions
+ // 1. Payment txn to app A
+ // 2. Payment txn to app B
+ // 3. App call to app A
+ // 4. App call to app B with gload on app A scratch slot
+ ddrBlob := `{
+ "accounts": [
+ {
+ "address": "KQCXQJRLGPOQCMGM6ZH2WOVCQXYMH4XVYBJFTNPY4YW3CAVN3DB72N6ODA",
+ "amount": 4000001724861773,
+ "amount-without-pending-rewards": 4000001724861773,
+ "min-balance": 100000,
+ "participation": {
+ "selection-participation-key": "S3YIZ2TNGSl1plq93eXsXsRhJRfCyIMKq0sq12++C8Y=",
+ "state-proof-key": "4BqeyojB23ZEj7Ddf9MKtIHBKFFYKhIYEwctoSuL9iXXdQ6R5lWzIJ5Sun5wHJhE9Rk5/wjjTeiCFJPEJVafrA==",
+ "vote-first-valid": 0,
+ "vote-key-dilution": 10000,
+ "vote-last-valid": 3000000,
+ "vote-participation-key": "qmkEl2AbMO/KKK+iOgIhSB3Q/4WXftoucPUvEYFaWbo="
+ },
+ "pending-rewards": 0,
+ "reward-base": 1,
+ "rewards": 3999997773,
+ "round": 41,
+ "status": "Online",
+ "total-apps-opted-in": 0,
+ "total-assets-opted-in": 0,
+ "total-created-apps": 0,
+ "total-created-assets": 0
+ },
+ {
+ "address": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "amount": 74198032,
+ "amount-without-pending-rewards": 74198032,
+ "assets": [
+ {
+ "amount": 1000000,
+ "asset-id": 45,
+ "is-frozen": false
+ },
+ {
+ "amount": 1000000,
+ "asset-id": 50,
+ "is-frozen": false
+ }
+ ],
+ "created-assets": [
+ {
+ "index": 45,
+ "params": {
+ "clawback": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "creator": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "decimals": 3,
+ "freeze": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "manager": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "name-b64": "AAAAAAAAAAA=",
+ "reserve": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "total": 1000000,
+ "unit-name-b64": "AA==",
+ "url-b64": "AAAAAAAAAAA="
+ }
+ },
+ {
+ "index": 50,
+ "params": {
+ "clawback": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "creator": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "decimals": 3,
+ "freeze": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "manager": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "name-b64": "AAAAAAAAAAA=",
+ "reserve": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "total": 1000000,
+ "unit-name-b64": "AA==",
+ "url-b64": "AAAAAAAAAAA="
+ }
+ }
+ ],
+ "min-balance": 300000,
+ "pending-rewards": 0,
+ "reward-base": 1,
+ "rewards": 32,
+ "round": 41,
+ "status": "Offline",
+ "total-apps-opted-in": 0,
+ "total-assets-opted-in": 2,
+ "total-created-apps": 0,
+ "total-created-assets": 2
+ },
+ {
+ "address": "KQCXQJRLGPOQCMGM6ZH2WOVCQXYMH4XVYBJFTNPY4YW3CAVN3DB72N6ODA",
+ "amount": 4000001724861773,
+ "amount-without-pending-rewards": 4000001724861773,
+ "min-balance": 100000,
+ "participation": {
+ "selection-participation-key": "S3YIZ2TNGSl1plq93eXsXsRhJRfCyIMKq0sq12++C8Y=",
+ "state-proof-key": "4BqeyojB23ZEj7Ddf9MKtIHBKFFYKhIYEwctoSuL9iXXdQ6R5lWzIJ5Sun5wHJhE9Rk5/wjjTeiCFJPEJVafrA==",
+ "vote-first-valid": 0,
+ "vote-key-dilution": 10000,
+ "vote-last-valid": 3000000,
+ "vote-participation-key": "qmkEl2AbMO/KKK+iOgIhSB3Q/4WXftoucPUvEYFaWbo="
+ },
+ "pending-rewards": 0,
+ "reward-base": 1,
+ "rewards": 3999997773,
+ "round": 41,
+ "status": "Online",
+ "total-apps-opted-in": 0,
+ "total-assets-opted-in": 0,
+ "total-created-apps": 0,
+ "total-created-assets": 0
+ },
+ {
+ "address": "KLWQTWPJXUAPVZNANKGGTTFGPPJZDOLGOCBCBRHR53C6J2FDYF2GBABCRU",
+ "amount": 27300019,
+ "amount-without-pending-rewards": 27300019,
+ "min-balance": 100000,
+ "pending-rewards": 0,
+ "reward-base": 1,
+ "rewards": 19,
+ "round": 41,
+ "status": "Offline",
+ "total-apps-opted-in": 0,
+ "total-assets-opted-in": 0,
+ "total-created-apps": 0,
+ "total-created-assets": 0
+ }
+ ],
+ "apps": [
+ {
+ "id": 39,
+ "params": {
+ "approval-program": "%s",
+ "clear-state-program": "BoEB",
+ "creator": "5Z2LOJJCA52LM6I6FLS3DLRBG7UWDEQ2RS2Y76Z66QPUNLAGGJIDDX7BII",
+ "global-state-schema": {
+ "num-byte-slice": 1,
+ "num-uint": 1
+ },
+ "local-state-schema": {
+ "num-byte-slice": 1,
+ "num-uint": 1
+ }
+ }
+ },
+ {
+ "id": 41,
+ "params": {
+ "approval-program": "%s",
+ "clear-state-program": "BoEB",
+ "creator": "5P7Y556QIE3UCBNWJ7GXPNDCV6CLZF5VDEZ2PTTGNY5PQ2OBA4D6GXZFZA",
+ "global-state-schema": {
+ "num-byte-slice": 1,
+ "num-uint": 1
+ },
+ "local-state-schema": {
+ "num-byte-slice": 1,
+ "num-uint": 1
+ }
+ }
+ }
+ ],
+ "latest-timestamp": 1646848841,
+ "protocol-version": "future",
+ "round": 41,
+ "sources": null,
+ "txns": [
+ {
+ "sig": "EPT8gSZDv20jj+bRwoqeqt7js8pquiYoH+pK4tl+qzujseK6+3QiFJV0qFU6p2xlrLNvsbqHBMmbOGjX9HUmAQ==",
+ "txn": {
+ "amt": 41300000,
+ "fee": 1000,
+ "fv": 40,
+ "gen": "sandnet-v1",
+ "gh": "2m5E5yOWZvqfj2FL3GRJOo+Pq2tJMRH8LbpCwQRPRDY=",
+ "grp": "SY3swywpYP2hEQqZCKSM6uvqHgI34063jST7KPiKjBg=",
+ "lv": 1040,
+ "rcv": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "snd": "KQCXQJRLGPOQCMGM6ZH2WOVCQXYMH4XVYBJFTNPY4YW3CAVN3DB72N6ODA",
+ "type": "pay"
+ }
+ },
+ {
+ "sig": "Wmphf7cw//QSlNg0WD1VjFRwtVh6KOo/hFxdwD57aW/swuNCUN7L5ew0BS1vWOp2C6eVzZPK145b+H2A2PziBg==",
+ "txn": {
+ "amt": 7700000,
+ "fee": 1000,
+ "fv": 40,
+ "gen": "sandnet-v1",
+ "gh": "2m5E5yOWZvqfj2FL3GRJOo+Pq2tJMRH8LbpCwQRPRDY=",
+ "grp": "SY3swywpYP2hEQqZCKSM6uvqHgI34063jST7KPiKjBg=",
+ "lv": 1040,
+ "rcv": "KLWQTWPJXUAPVZNANKGGTTFGPPJZDOLGOCBCBRHR53C6J2FDYF2GBABCRU",
+ "snd": "KQCXQJRLGPOQCMGM6ZH2WOVCQXYMH4XVYBJFTNPY4YW3CAVN3DB72N6ODA",
+ "type": "pay"
+ }
+ },
+ {
+ "sig": "IyrYrbX6yaQfUcNHmArTWptV3WI9fdUbRT4K7q6KaCoub5L/dRRV6bFcLAcNZKTXNLYR+d4/GYz6XFhfFBp+DQ==",
+ "txn": {
+ "apid": 39,
+ "fee": 1000,
+ "fv": 40,
+ "gen": "sandnet-v1",
+ "gh": "2m5E5yOWZvqfj2FL3GRJOo+Pq2tJMRH8LbpCwQRPRDY=",
+ "grp": "SY3swywpYP2hEQqZCKSM6uvqHgI34063jST7KPiKjBg=",
+ "lv": 1040,
+ "snd": "KQCXQJRLGPOQCMGM6ZH2WOVCQXYMH4XVYBJFTNPY4YW3CAVN3DB72N6ODA",
+ "type": "appl"
+ }
+ },
+ {
+ "sig": "H1TQRug7WG3tjGae3bXzDiAoXbILByvc9//J+imkFgaAHW5UPzvJGtn7yVpr8tInYVPnnTF+l88TXY/ANUB2CQ==",
+ "txn": {
+ "apid": 41,
+ "fee": 1000,
+ "fv": 40,
+ "gen": "sandnet-v1",
+ "gh": "2m5E5yOWZvqfj2FL3GRJOo+Pq2tJMRH8LbpCwQRPRDY=",
+ "grp": "SY3swywpYP2hEQqZCKSM6uvqHgI34063jST7KPiKjBg=",
+ "lv": 1040,
+ "snd": "KQCXQJRLGPOQCMGM6ZH2WOVCQXYMH4XVYBJFTNPY4YW3CAVN3DB72N6ODA",
+ "type": "appl"
+ }
+ }
+ ]
+ }`
+
+ // Format string with base64 encoded program bytes string
+ ddrBlob = fmt.Sprintf(ddrBlob, progA, progB)
+
+ ds := DebugParams{
+ Proto: string(protocol.ConsensusCurrentVersion),
+ DdrBlob: []byte(ddrBlob),
+ GroupIndex: 4,
+ RunMode: "application",
+ }
+
+ local := MakeLocalRunner(nil)
+ err = local.Setup(&ds)
+ a.NoError(err)
+
+ err = local.RunAll()
a.NoError(err)
- a.True(pass)
}
diff --git a/cmd/tealdbg/util.go b/cmd/tealdbg/util.go
index 6a7f880ef..d611c7bcb 100644
--- a/cmd/tealdbg/util.go
+++ b/cmd/tealdbg/util.go
@@ -17,7 +17,6 @@
package main
import (
- "bytes"
"strconv"
"sync/atomic"
)
@@ -100,29 +99,3 @@ func IsTextFile(data []byte) bool {
}
return printable
}
-
-const b64table string = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
-
-// IntToVLQ writes out value to bytes.Buffer
-func IntToVLQ(v int, buf *bytes.Buffer) {
- v <<= 1
- if v < 0 {
- v = -v
- v |= 1
- }
- for v >= 32 {
- buf.WriteByte(b64table[32|(v&31)])
- v >>= 5
- }
- buf.WriteByte(b64table[v])
-}
-
-// MakeSourceMapLine creates source map mapping's line entry
-func MakeSourceMapLine(tcol, sindex, sline, scol int) string {
- buf := bytes.NewBuffer(nil)
- IntToVLQ(tcol, buf)
- IntToVLQ(sindex, buf)
- IntToVLQ(sline, buf)
- IntToVLQ(scol, buf)
- return buf.String()
-}
diff --git a/cmd/tealdbg/webdbg.go b/cmd/tealdbg/webdbg.go
index 3f99eea70..fe6058d9b 100644
--- a/cmd/tealdbg/webdbg.go
+++ b/cmd/tealdbg/webdbg.go
@@ -171,11 +171,20 @@ func (a *WebPageFrontend) configHandler(w http.ResponseWriter, r *http.Request)
}
// Extract PC from config
- line := req.debugConfig.BreakAtLine
- if line == noBreak {
- s.debugger.RemoveBreakpoint(int(line))
- } else {
- s.debugger.SetBreakpoint(int(line))
+ for line := range req.debugConfig.ActiveBreak {
+ if req.debugConfig.NoBreak {
+ err := s.debugger.RemoveBreakpoint(int(line))
+ if err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ } else {
+ err := s.debugger.SetBreakpoint(int(line))
+ if err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ }
}
w.WriteHeader(http.StatusOK)
diff --git a/compactcert/abstractions.go b/compactcert/abstractions.go
index 4cf4b7a93..9918b0f2e 100644
--- a/compactcert/abstractions.go
+++ b/compactcert/abstractions.go
@@ -32,7 +32,7 @@ import (
// TransactionSender is an interface that captures the node's ability
// to broadcast a new transaction.
type TransactionSender interface {
- BroadcastSignedTxGroup([]transactions.SignedTxn) error
+ BroadcastInternalSignedTxGroup([]transactions.SignedTxn) error
}
// Ledger captures the aspects of the ledger that are used by this package.
diff --git a/compactcert/builder.go b/compactcert/builder.go
index c590abbfe..27302ed49 100644
--- a/compactcert/builder.go
+++ b/compactcert/builder.go
@@ -347,7 +347,7 @@ func (ccw *Worker) tryBuilding() {
stxn.Txn.GenesisHash = ccw.ledger.GenesisHash()
stxn.Txn.CertRound = rnd
stxn.Txn.Cert = *cert
- err = ccw.txnSender.BroadcastSignedTxGroup([]transactions.SignedTxn{stxn})
+ err = ccw.txnSender.BroadcastInternalSignedTxGroup([]transactions.SignedTxn{stxn})
if err != nil {
ccw.log.Warnf("ccw.tryBuilding: broadcasting compact cert txn for %d: %v", rnd, err)
}
diff --git a/compactcert/worker_test.go b/compactcert/worker_test.go
index 5e72b96bf..aaf1a4e2b 100644
--- a/compactcert/worker_test.go
+++ b/compactcert/worker_test.go
@@ -196,7 +196,7 @@ func (s *testWorkerStubs) Broadcast(ctx context.Context, tag protocol.Tag, data
return nil
}
-func (s *testWorkerStubs) BroadcastSignedTxGroup(tx []transactions.SignedTxn) error {
+func (s *testWorkerStubs) BroadcastInternalSignedTxGroup(tx []transactions.SignedTxn) error {
require.Equal(s.t, len(tx), 1)
s.txmsg <- tx[0]
return nil
diff --git a/config/consensus.go b/config/consensus.go
index ecc9c72e6..7dbe2b7ab 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -118,6 +118,14 @@ type ConsensusParams struct {
//
// Rewards are received by whole reward units. Fractions of
// RewardUnits do not receive rewards.
+ //
+ // Ensure both considerations below are taken into account if RewardUnit is planned for change:
+ // 1. RewardUnits should not be changed without touching all accounts to apply their rewards
+ // based on the old RewardUnits and then use the new RewardUnits for all subsequent calculations.
+ // 2. Having a consistent RewardUnit is also important for preserving
+ // a constant amount of total algos in the system:
+ // the block header tracks how many reward units worth of algos are in existence
+ // and have logically received rewards.
RewardUnit uint64
// RewardsRateRefreshInterval is the number of rounds after which the
@@ -293,6 +301,9 @@ type ConsensusParams struct {
// provide greater isolation for clear state programs
IsolateClearState bool
+ // The minimum app version that can be called in an inner transaction
+ MinInnerApplVersion uint64
+
// maximum number of applications a single account can create and store
// AppParams for at once
MaxAppsCreated int
@@ -1071,6 +1082,7 @@ func initConsensusProtocols() {
v31.LogicSigVersion = 6
v31.EnableInnerTransactionPooling = true
v31.IsolateClearState = true
+ v31.MinInnerApplVersion = 6
// stat proof key registration
v31.EnableStateProofKeyregCheck = true
@@ -1125,6 +1137,7 @@ func initConsensusProtocols() {
vFuture.CompactCertSecKQ = 128
vFuture.LogicSigVersion = 7
+ vFuture.MinInnerApplVersion = 4
Consensus[protocol.ConsensusFuture] = vFuture
}
diff --git a/config/localTemplate.go b/config/localTemplate.go
index 1ec4e8004..1e95d5f99 100644
--- a/config/localTemplate.go
+++ b/config/localTemplate.go
@@ -115,7 +115,7 @@ type Local struct {
// SRV-based phonebook
DNSBootstrapID string `version[0]:"<network>.algorand.network"`
- // Log file size limit in bytes
+ // Log file size limit in bytes. When set to 0 logs will be written to stdout.
LogSizeLimit uint64 `version[0]:"1073741824"`
// text/template for creating log archive filename.
diff --git a/config/version.go b/config/version.go
index edfb7d5a3..c8dc1b4fe 100644
--- a/config/version.go
+++ b/config/version.go
@@ -33,7 +33,7 @@ const VersionMajor = 3
// VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced.
// Not enforced until after initial public release (x > 0).
-const VersionMinor = 5
+const VersionMinor = 6
// Version is the type holding our full version information.
type Version struct {
diff --git a/crypto/secp256k1/secp256_test.go b/crypto/secp256k1/secp256_test.go
index 3ee7d2c0b..5da4e593c 100644
--- a/crypto/secp256k1/secp256_test.go
+++ b/crypto/secp256k1/secp256_test.go
@@ -63,6 +63,8 @@ func compactSigCheck(t *testing.T, sig []byte) {
}
func TestSignatureValidity(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
pubkey, seckey := generateKeyPair()
msg := csprngEntropy(32)
sig, err := Sign(msg, seckey)
diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json
index 6f7e51647..8202ce1ca 100644
--- a/daemon/algod/api/algod.oas2.json
+++ b/daemon/algod/api/algod.oas2.json
@@ -1419,6 +1419,64 @@
}
}
},
+ "/v2/teal/disassemble": {
+ "post": {
+ "description": "Given the base64 encoded program bytes, return the TEAL source code in plain text. This endpoint is only enabled when a node's configuration file sets EnableDeveloperAPI to true.",
+ "consumes": [
+ "application/x-binary"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Disassemble program bytes into the TEAL source code.",
+ "operationId": "TealDisassemble",
+ "parameters": [
+ {
+ "description": "TEAL program binary to be disassembled",
+ "name": "source",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "format": "byte"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Successful disassembly",
+ "$ref": "#/responses/DisassembleResponse"
+ },
+ "400": {
+ "description": "Bad Request - Teal Compile Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Developer API not enabled"
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ }
+ },
"/v2/catchup/{catchpoint}": {
"post": {
"tags": [
@@ -3040,6 +3098,21 @@
}
}
},
+ "DisassembleResponse": {
+ "description": "Teal disassembly Result",
+ "schema": {
+ "type": "object",
+ "required": [
+ "result"
+ ],
+ "properties": {
+ "result": {
+ "description": "disassembled Teal code",
+ "type": "string"
+ }
+ }
+ }
+ },
"DryrunResponse": {
"description": "DryrunResponse contains per-txn debug information from a dryrun.",
"schema": {
diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml
index cc8288818..bb8b76408 100644
--- a/daemon/algod/api/algod.oas3.yml
+++ b/daemon/algod/api/algod.oas3.yml
@@ -392,6 +392,25 @@
},
"description": "Teal compile Result"
},
+ "DisassembleResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "result": {
+ "description": "disassembled Teal code",
+ "type": "string"
+ }
+ },
+ "required": [
+ "result"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Teal disassembly Result"
+ },
"DryrunResponse": {
"content": {
"application/json": {
@@ -3695,6 +3714,85 @@
"x-codegen-request-body-name": "source"
}
},
+ "/v2/teal/disassemble": {
+ "post": {
+ "description": "Given the base64 encoded program bytes, return the TEAL source code in plain text. This endpoint is only enabled when a node's configuration file sets EnableDeveloperAPI to true.",
+ "operationId": "TealDisassemble",
+ "requestBody": {
+ "content": {
+ "application/x-binary": {
+ "schema": {
+ "format": "byte",
+ "type": "string"
+ }
+ }
+ },
+ "description": "TEAL program binary to be disassembled",
+ "required": true
+ },
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "result": {
+ "description": "disassembled Teal code",
+ "type": "string"
+ }
+ },
+ "required": [
+ "result"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Teal disassembly Result"
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request - Teal Compile Error"
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "404": {
+ "content": {},
+ "description": "Developer API not enabled"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Disassemble program bytes into the TEAL source code.",
+ "x-codegen-request-body-name": "source"
+ }
+ },
"/v2/teal/dryrun": {
"post": {
"description": "Executes TEAL program(s) in context and returns debugging information about the execution. This endpoint is only enabled when a node's configuration file sets EnableDeveloperAPI to true.",
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index 7f8b2d412..596a8d458 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -422,7 +422,7 @@ func (client RestClient) AccountInformation(address string) (response v1.Account
func (client RestClient) AccountInformationV2(address string, includeCreatables bool) (response generatedV2.Account, err error) {
var infoParams accountInformationParams
if includeCreatables {
- infoParams = accountInformationParams{Exclude: "", Format: "json"}
+ infoParams = accountInformationParams{Exclude: "none", Format: "json"}
} else {
infoParams = accountInformationParams{Exclude: "all", Format: "json"}
}
diff --git a/daemon/algod/api/server/v2/dryrun.go b/daemon/algod/api/server/v2/dryrun.go
index e0036516f..38f902ed3 100644
--- a/daemon/algod/api/server/v2/dryrun.go
+++ b/daemon/algod/api/server/v2/dryrun.go
@@ -511,7 +511,7 @@ func doDryrunRequest(dr *DryrunRequest, response *generated.DryrunResponse) {
result.AppCallTrace = &debug.history
result.GlobalDelta = StateDeltaToStateDelta(delta.GlobalDelta)
if len(delta.LocalDeltas) > 0 {
- localDeltas := make([]generated.AccountStateDelta, len(delta.LocalDeltas))
+ localDeltas := make([]generated.AccountStateDelta, 0, len(delta.LocalDeltas))
for k, v := range delta.LocalDeltas {
ldaddr, err2 := stxn.Txn.AddressByIndex(k, stxn.Txn.Sender)
if err2 != nil {
diff --git a/daemon/algod/api/server/v2/dryrun_test.go b/daemon/algod/api/server/v2/dryrun_test.go
index 99246ec71..287e47d19 100644
--- a/daemon/algod/api/server/v2/dryrun_test.go
+++ b/daemon/algod/api/server/v2/dryrun_test.go
@@ -563,27 +563,27 @@ func TestDryrunLocal1(t *testing.T) {
if response.Txns[0].LocalDeltas == nil {
t.Fatal("empty local delta")
}
- addrFound := false
+
+ // Should be a single account
+ assert.Len(t, *response.Txns[0].LocalDeltas, 1)
+
+ lds := (*response.Txns[0].LocalDeltas)[0]
+ assert.Equal(t, lds.Address, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ")
+
valueFound := false
- for _, lds := range *response.Txns[0].LocalDeltas {
- if lds.Address == "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ" {
- addrFound = true
- for _, ld := range lds.Delta {
- if ld.Key == b64("foo") {
- valueFound = true
- assert.Equal(t, ld.Value.Action, uint64(basics.SetBytesAction))
- assert.Equal(t, *ld.Value.Bytes, b64("bar"))
+ for _, ld := range lds.Delta {
+ if ld.Key == b64("foo") {
+ valueFound = true
+ assert.Equal(t, ld.Value.Action, uint64(basics.SetBytesAction))
+ assert.Equal(t, *ld.Value.Bytes, b64("bar"))
- }
- }
}
}
- if !addrFound {
- t.Error("no local delta for AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ")
- }
+
if !valueFound {
t.Error("no local delta for value foo")
}
+
if t.Failed() {
logResponse(t, &response)
}
@@ -644,24 +644,22 @@ func TestDryrunLocal1A(t *testing.T) {
if response.Txns[0].LocalDeltas == nil {
t.Fatal("empty local delta")
}
- addrFound := false
+
+ assert.Len(t, *response.Txns[0].LocalDeltas, 1)
+
+ lds := (*response.Txns[0].LocalDeltas)[0]
+ assert.Equal(t, lds.Address, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ")
+
valueFound := false
- for _, lds := range *response.Txns[0].LocalDeltas {
- if lds.Address == "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ" {
- addrFound = true
- for _, ld := range lds.Delta {
- if ld.Key == b64("foo") {
- valueFound = true
- assert.Equal(t, ld.Value.Action, uint64(basics.SetBytesAction))
- assert.Equal(t, *ld.Value.Bytes, b64("bar"))
+ for _, ld := range lds.Delta {
+ if ld.Key == b64("foo") {
+ valueFound = true
+ assert.Equal(t, ld.Value.Action, uint64(basics.SetBytesAction))
+ assert.Equal(t, *ld.Value.Bytes, b64("bar"))
- }
- }
}
}
- if !addrFound {
- t.Error("no local delta for AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ")
- }
+
if !valueFound {
t.Error("no local delta for value foo")
}
diff --git a/daemon/algod/api/server/v2/generated/private/routes.go b/daemon/algod/api/server/v2/generated/private/routes.go
index c1022c618..e37b75489 100644
--- a/daemon/algod/api/server/v2/generated/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/private/routes.go
@@ -311,152 +311,153 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9/XPcNrLgv4KafVWOfcMZyR/ZtapS7xQryeriOC5L2Xf3bF+CIXtmsCIBBgClmfj0",
- "v1+hAZAgCc5QH6s81/NPtoZAo9FoNPoLjU+TVBSl4MC1mhx9mpRU0gI0SPyLpqmouE5YZv7KQKWSlZoJ",
- "Pjny34jSkvHVZDph5teS6vVkOuG0gKaN6T+dSPi9YhKyyZGWFUwnKl1DQQ1gvS1N6xrSJlmJxIE4tiBO",
- "TybXOz7QLJOgVB/Ln3m+JYyneZUB0ZJyRVPzSZErptdEr5kirjNhnAgORCyJXrcakyWDPFMzP8nfK5Db",
- "YJZu8OEpXTcoJlLk0MfzlSgWjIPHCmqk6gUhWpAMlthoTTUxIxhcfUMtiAIq0zVZCrkHVYtEiC/wqpgc",
- "vZ8o4BlIXK0U2CX+dykB/oBEU7kCPfk4jU1uqUEmmhWRqZ066ktQVa4VwbY4xxW7BE5Mrxn5qVKaLIBQ",
- "Tt59/4o8e/bspZlIQbWGzDHZ4Kya0cM52e6To0lGNfjPfV6j+UpIyrOkbv/u+1c4/pmb4NhWVCmIb5Zj",
- "84WcngxNwHeMsBDjGla4Di3uNz0im6L5eQFLIWHkmtjG97oo4fh/6qqkVKfrUjCuI+tC8Cuxn6MyLOi+",
- "S4bVCLTal4ZS0gB9f5C8/PjpcHp4cP2X98fJf7o/Xzy7Hjn9VzXcPRSINkwrKYGn22QlgeJuWVPep8c7",
- "xw9qLao8I2t6iYtPCxT1ri8xfa3ovKR5ZfiEpVIc5yuhCHVslMGSVrkmfmBS8dyIKQPNcTthipRSXLIM",
- "sqmRvldrlq5JSpUFge3IFctzw4OVgmyI1+Kz27GZrkOSGLxuRQ+c0H9dYjTz2kMJ2KA0SNJcKEi02HM8",
- "+ROH8oyEB0pzVqmbHVbkfA0EBzcf7GGLtOOGp/N8SzSua0aoIpT4o2lK2JJsRUWucHFydoH93WwM1Qpi",
- "iIaL0zpHzeYdIl+PGBHiLYTIgXIknt93fZLxJVtVEhS5WoNeuzNPgioFV0DE4p+QarPs/+vs5zdESPIT",
- "KEVX8JamFwR4KrLhNXaDxk7wfyphFrxQq5KmF/HjOmcFi6D8E92woioIr4oFSLNe/nzQgkjQleRDCFmI",
- "e/isoJv+oOey4ikubjNsS1EzrMRUmdPtjJwuSUE33xxMHTqK0DwnJfCM8RXRGz6opJmx96OXSFHxbIQO",
- "o82CBaemKiFlSwYZqaHswMQNsw8fxm+GT6NZBeh4IIPo1KPsQYfDJsIzZuuaL6SkKwhYZkZ+cZILv2px",
- "AbwWcGSxxU+lhEsmKlV3GsARh96tXnOhISklLFmEx84cOYz0sG2ceC2cgpMKrinjkBnJi0gLDVYSDeIU",
- "DLjbmOkf0Quq4OvnQwd483Xk6i9Fd9V3rvio1cZGid2SkXPRfHUbNq42tfqPMP7CsRVbJfbn3kKy1bk5",
- "SpYsx2Pmn2b9PBkqhUKgRQh/8Ci24lRXEo4+8CfmL5KQM015RmVmfinsTz9VuWZnbGV+yu1Pr8WKpWds",
- "NUDMGteoNYXdCvuPgRcXx3oTNRpeC3FRleGE0pZVutiS05OhRbYwb8qYx7UpG1oV5xtvady0h97UCzmA",
- "5CDtSmoaXsBWgsGWpkv8Z7NEfqJL+Yf5pyzzGE0NA7uDFp0CzllwXJY5S6mh3jv32Xw1ux+seUCbFnM8",
- "SY8+BbiVUpQgNbNAaVkmuUhpnihNNUL6NwnLydHkL/PGqzK33dU8GPy16XWGnYwiapWbhJblDWC8NQqN",
- "2iEljGTGTygfrLxDVYhxu3qGh5iRvTlcUq5njSHSEgT1zn3vRmrobXUYS++OYTVIcGIbLkBZvdY2fKRI",
- "QHqCZCVIVlQzV7lY1D98dVyWDQXx+3FZWnqgTggM1S3YMKXVY5w+bbZQOM7pyYz8EMJGBVvwfGtOBatj",
- "mENh6Y4rd3zVHiM3hwbiI0VwOYWcmaXxZDDK+31wHBoLa5EbdWcvr5jGf3dtQzYzv4/q/HmwWEjbYeZC",
- "88lRzlou+EtgsnzV4Zw+4zgnzowcd/vejm0MlDjD3IpXdq6nhbuDjjUJryQtLYLuiz1EGUfTyzayuN5R",
- "mo4UdFGcgz0c8Bpideu9tnc/RDFBVujg8G0u0ot72O8LA6e/7RA8WQPNQJKMahrsK7df4oc1dvw79kOJ",
- "ADKi0f+M/6E5MZ8N4xu5aMEaS50h/4rAr54ZA9eqzXYk0wANb0EKa9MSY4veCMtXzeA9GWHJMkZGfGfN",
- "aII9/CTM1Bsn2fFCyNvxS4cROGlcf4QaqMF2mXZWFptWZeLoE3Ef2AYdQE20pa9FhhTqgo/RqkWFM03/",
- "BVRQBup9UKEN6L6pIIqS5XAP+3VN1bo/CWPPPXtKzv5+/OLw6a9PX3xtDJJSipWkBVlsNSjylVOjidLb",
- "HB73Z4b6bJXrOPSvn3uHURvuXgohwjXsMTvqHIxksBQj1j1qsDuRW1ndh1INUgoZMfGRdbRIRZ5cglRM",
- "RLy1b10L4lr4g7bs/m6xJVdUETM2ep8qnoGcxSivNxxRYxoKte+gsKDPN7yhjQNIpaTb3grY+UZm58Yd",
- "syZt4ntnhiIlyERvOMlgUa1aOtlSioJQkmFHFIhvRAZGn67UPUiBBliDjFmIEAW6EJUmlHCRASrflYrL",
- "h4HQDfqM0dWtQ5Gj1/b8WYBR9FJardaaGCtaxJa26ZjQ1C5KgmeFGvB01S5K28oOZ8MCuQSaGQUQOBEL",
- "505yji6cJEUvtPYBZiedIipxC69SihSUMoq7Vcf2oubb2VXWO+iEiCPC9ShECbKk8pbIaqFpvgdRbBND",
- "t1YnnA+uj/W44XctYHfwcBmpNLq75QKju5jdnYOGIRKOpMklSPRF/UvXzw9y2+WryoFIsTuBz1mBJgCn",
- "XChIBc9UFFhOlU72bVvTqKUmmBkEOyW2UxHwgBn6miptPZKMZ6gyWnGD41j71AwxjPDgiWIg/8MfJn3Y",
- "qZGTXFWqPllUVZZCashic+Cw2THWG9jUY4llALs+vrQglYJ9kIeoFMB3xLIzsQSiurbfncu+Pzm0cs05",
- "sI2SsoVEQ4hdiJz5VgF1w2jZACLGvqh7IuMw1eGcOkQ3nSgtytLsP51UvO43RKYz2/pY/9K07TMX1Y1c",
- "zwSY0bXHyWF+ZSlr46RranQ7hEwKemHOJtTUrOu0j7PZjIliPIVkF+ebbXlmWoVbYM8mHVCSXSZGMFpn",
- "c3T4N8p0g0ywZxWGJjygsb+lUrOUlahJ/Ajbezf3uwNELX+SgaYsh4wEH1CAo+yt+xPrC+/CvJ2iNUoJ",
- "7aPf00Ij08mZwgOjjfwFbNEF+NYGWc+D0Ow9aIoRqGZ3U04QUR+6MQdy2AQ2NNX51hxzeg1bcgUSiKoW",
- "BdPaRs3biqQWZRICiBquO0Z0rgMboPQrMMaXcYaggun1l2I6sWrLbvzOO4pLixxOYSqFyEe4WHvEiGIw",
- "ygVLSmFWnbkkDR/J95zUQtIpMeg3qoXnI9UiM86A/B9RkZRyVMAqDfWJICSKWTx+zQjmAKvHdM7WhkKQ",
- "QwFWr8QvT550J/7kiVtzpsgSrnxmk2nYJceTJ2glvRVKtzbXPVi8ZrudRmQ7WvTmoHA6XFemzPZa9w7y",
- "mJV82wHuB8U9pZRjXDP9OwuAzs7cjJl7yCNrqtb7545wRzk0AtCxedt1l0Is78lBFI9so3HigtWmFVlW",
- "3CJVKWeOYPzGOzTEclpnL9isZRvZrgrsjf9fU+dwmkybkLRtYA7k5vPHiErJsk0s8yCDTWxR3B5Dc+qR",
- "sT22CqLhHpTMYhlJPgJ5kbupdWQHKcBsarVmpQHZJEpsNbSSLP/vV/9+9P44+U+a/HGQvPwf84+fnl8/",
- "ftL78en1N9/8v/ZPz66/efzv/xZTrZVmi7hf7++G0GJJnIzf8FNuPfNLIa1BtnV6nlg+PN5aAmRQ6nUs",
- "q7GUoFA22uzEUq+bRQXoOFFKKS6BTwmbwawrY7MVKO9NyoEuMbsOjQoxJtpX7wfLb545AqqHExklyGL8",
- "g7Er5E3czcbqyLf3oL1YQES26emtdWW/imWYEuo2itoqDUXf4WW7/jqg7r/zynJvUwmeMw5JIThso7cg",
- "GIef8GOstz3vBjqj5jHUt2tMtPDvoNUeZ8xi3pW+uNqBgH9bR2zvYfG7cDu+zjAZFn01kJeEkjRn6MkR",
- "XGlZpfoDp2grBuwaiZN4C3jYe/DKN4m7KyLeBAfqA6fK0LC2IKM+8CVEzqzvAbwTQVWrFSjd0ZqXAB+4",
- "a8U4qTjTOFZh1iuxC1aCxGDFzLYs6JYsaY7Ojj9ACrKodFuPxFNPaZbnzvFqhiFi+YFTbWSQ0uQnxs83",
- "CM6nxnme4aCvhLyoqRA/olbAQTGVxOX+D/Yrin83/bU7CvAChf3s5c1Dy32PeyyjzGF+euJsrNMTVKQb",
- "l2sP9wfzwxWMJ1EmM4pRwTgmJnd4i3xlzAHPQI8b561b9Q9cb7hhpEuas8woT7dhh66I6+1Fuzs6XNNa",
- "iI5bxc/1YywevhJJSdMLDIdOVkyvq8UsFcXc25bzlajtzHlGoRAcv2VzWrK5KiGdXx7uUXTvIK9IRFxd",
- "TydO6qh798Q4wLEJdcesHZr+by3Iox++Oydzt1LqkU0vtaCDvMCIO8ClvrQiVmby9nqUza/9wD/wE1gy",
- "zsz3ow88o5rOF1SxVM0rBfJbmlOewmwlyJHPpjmhmn7gPRE/eIMxyGMiZbXIWUouwqO42Zr2VkofwocP",
- "7w2DfPjwsRf+6B+cbqjoHrUDJFdMr0WlE5d2n0i4ojKLoK7qtGuEbC/N7Bp1Shxsy5Eurd/Bj4tqWpaq",
- "m4XZn35Z5mb6ARsql2NolowoLaQXgkYyWmxwfd8IZ3NJeuXvbFQKFPmtoOV7xvVHknyoDg6eAWmlJf7m",
- "ZI3hyW0JLcfRrbJEu04jnLhVqGCjJU1KugIVnb4GWuLq40FdoIsyzwl2a6VD+uQBBNVMwNNjeAEsHjdO",
- "7cLJndle/v5kfAr4CZcQ2xjp1Hj+b7teQYLkrZerk2TZW6VKrxOzt6OzUobF/crU16pWRib7cIxiK242",
- "gbuBtgCSriG9gAwvw0BR6u201d1H/NwJ50UHU/bSmM3gwpsN6GNbAKnKjDodgPJtN8VcgdY+r/4dXMD2",
- "XDQXI26SU97OdFZDGxU5NTiMDLOG29bB6C6+ix5jdmdZ+oRhTI7zbHFU84XvM7yR7Ql5D5s4xhStTNwh",
- "QlAZIYRl/gES3GKiBt6dWD82PaPeLOzJF3HzeNlPXJNGa3MR4HA2mGBsvxeAN1DFlSILqiAjwl2etNm8",
- "gRSrFF3BgO8pdHOOzJltuUYRyL5zL3rSiWX3QOudN1GUbePEzDnKKWC+GFZBP2En7u9Hsp50nMGMYE0E",
- "R7BFjmpSnXJghQ6VLXezveQ9hFqcgUHyRuHwaLQpEmo2a6r8vU68/ur38igd4F+Ynb7rMtJpELIO7rjW",
- "V428zO3u07jj1l46KlRz+Sj02o64SDSduCyq2HIIjgpQBjms7MRtY88oTaZ8s0AGj5+Xy5xxIEks+k2V",
- "EimzF3ObY8aNAUY/fkKI9T2R0RBibBygjREiBEzeiHBv8tVNkOQu05962BhbCv6GeCqgzW8yKo8ojQhn",
- "fCAzzUsA6lIm6vOrk7iDYAjjU2LE3CXNjZhzTtQGSO9qDKqtnYswLkb5eEid3eH6swfLjeZkj6LbzCbU",
- "mTzScYVuB8a7VYnYEiiklzN9a1oNnaVjhh44vodo9VVwqeZWCHQ8EU3dGWf57bXQ2mdz/yRrRPq0uSXq",
- "UzNjvD/EP9FVGqBf3xFcX4N52z2uo0Z6O3bZvgEU6E8xUWz2SN812nfAKsgBNeKkpUEkFzGHuVHsAcXt",
- "me8WWO54z4jy7eMgIC5hxZSGxnVlTiXvi33ocBfFe81CLIdnp0u5NPN7J0Qto+39ORu+C6f54DO4FBqS",
- "JZNKJ+j3i07BNPpeoUX5vWkaVxTaIXdb4oNlcdmAw17ANslYXsX51Y3744kZ9k3thFHV4gK2qA4CTddk",
- "gSVpook4O4a2uVo7J/zaTvg1vbf5jtsNpqkZWBp2aY/xmeyLjuTdJQ4iDBhjjv6qDZJ0h4DEg/8Ech27",
- "ihMoDXZzZqbhbJfrsbeZMg97l6EUYDF8RllI0bkE1vLOWTDMPjDmHtNBRZf+vYGBPUDLkmWbjiPQQh00",
- "F+mNrH1/Y7ZDBVxdB2wPBQKnXyw1VYJqX45utFtbm4eHc5uNosx5+wpzKBDCoZjyleX6hDKsjeWP9tHq",
- "HGj+I2z/YdridCbX08nd/IYxWjuIe2j9tl7eKJ0xIGb9SK0wwA1JTstSikuaJ867OsSaUlw61sTm3hn7",
- "wKIu7sM7/+749VuH/vV0kuZAZVKrCoOzwnblZzMrew97YIP4ylXG4PE6u1Ulg8Wv78eGHtmrNbgqQYE2",
- "2qtq0Hjbg63oPLTLeFx+r7/VBQbsFHcECKCs4wON78qGB9ohAXpJWe6dRh7bgRg6Tm5caYyoVAgB3Dm0",
- "EESIknsVN73dHd8dDXftkUnhWDvqGBW2VJcigndTsowKib4oZNWCYk0C6xLoCydeFYnZfonKWRp3MPKF",
- "MszBbeDINCbYeEAZNRArNhCH5BULYJlmaoSh20EyGCNKTF/fYoh2C+FqrFac/V4BYRlwbT5J3JWdjYpF",
- "IJyruX+cGt2hP5YDbN3TDfi76BhhPY7uiYdI7FYwwjBVD92T2mT2E63dMeaHwB9/g2h3OGLvSNwRqXb8",
- "4bjZpgyt2+GmsCRqX/4ZxrDls/bXY/XGqysMMjBGtL4qU8lSij8gbueheRzJW/cVSBhmTf4BfBa5/tMV",
- "MbV3pykT24w+uNxD2k3ohWpH6Ae4Hlc+iElhtQfvnqXcLrUtd9jKC4kzTJjLNbfwG4ZxOPfy33J6taCx",
- "UhhGyTA4HTfRz5YjWQviO3vaO583c0VhZiQIpNZtmb3RVYJsrpT0bw/fUmGww45WFRrNALk21AmmNviV",
- "KxEBU/Erym3VTNPPbiXXW4F1fpleV0LifUwV93lnkLKC5nHNIUPqt++vZmzFbM3ISkFQlNABssV2LRe5",
- "wo42vtyQ5nRJDqZB2VO3Ghm7ZIotcsAWh7bFgiqU5LUjqu5ipgdcrxU2fzqi+brimYRMr5UlrBKkVurQ",
- "vKkjNwvQVwCcHGC7w5fkK4xZKXYJjw0V3fk8OTp8iU5X+8dB7ABwxWF3SZMMxcl/OHES52MM2lkYRnA7",
- "qLPo7UJb0XtYcO3YTbbrmL2ELZ2s27+XCsrpCuJpEsUenGxfXE10pHXowjNbjlZpKbaE6fj4oKmRTwM5",
- "n0b8WTRIKoqC6cJFNpQoDD81FQftoB6crW3ryuJ4vPxHDBCWPj7SMSIf1mlqz7fYrDGM+4YW0CbrlFB7",
- "CTdnTejeV7Iip/4qP9YJqssDWdqYsczUUc3BSP6SlJJxjYZFpZfJ30i6ppKmRvzNhtBNFl8/j9RGapdD",
- "4TdD/MHpLkGBvIyTXg6wvdchXF/yFRc8KYxEyR43OdbBrhyMZMazxbxE7yYL7gY9VikzUJJBdqta7EYD",
- "SX0nxuM7AN6RFev53IgfbzyzB+fMSsbZg1ZmhX5599ppGYWQscIuzXZ3GocELRlcYuJafJEMzDuuhcxH",
- "rcJdsP9zIw9e5QzUMr+XY4bAtxXLs380d0Y65eUk5ek66vdfmI6/NuV/6ynbfRytI7KmnEMeBWfPzF/9",
- "2Ro5/f8pxo5TMD6ybbdsnJ1uZ3IN4m00PVJ+QENepnMzQEjVdhJ9nXWZr0RGcJymaEXDZf1KeEEJrd8r",
- "UDp2aQ8/2MwP9O8Yu8BWcCLAM9SqZ+QH+3zHGkjrTj1qs6yocns/G7IVSOd4rMpc0GxKDJzz745fEzuq",
- "7WNrWdoKUitU5tqz6Nj1QYWbcTmEvixlPL95PJzdCZdm1kpjiQulaVHGrq6YFue+Ad6PCX2dqOaF1JmR",
- "E6thK6+/2UEMPyyZLIxmWkOzMh55wvxHa5quUXVtSZNhlh9f+sxzpQoqntcFTOsiNbjvDN6u+pktfjYl",
- "wtgXV0zZVxvgEtq3ZeqrY8508rdn2tOTFeeWU6IyetfVxtuQ3SNnA9reHRrFrEP4GyouSlQyhZtWgjvD",
- "XtGqD92ycr1S5/ZWcV1707/Gk1IuOEux5kLwTkSNsnsBYkysYER5iq4zym9xt0MjmytazK5OJ3JUHCxv",
- "5wWhI1zfWRl8NYtqucP+qfGpgTXVZAVaOckG2dQXLHT+EsYVuKJD+BhIICeFbMVfUEJGQ3pJ7fq9IRth",
- "7vyAAvy9+fbGmUeYVHrBOCpCjmwuf9V6NLBAvTbaE9NkJUC5+bSv5qv3ps8Mr6dnsPk48wXtEYYNX5hp",
- "21hdH9Sxj9y5SJlp+8q0JTbrsP65laZoBz0uSzdoNNWoXuFYycVBAkciMIl3gQfEreGH0Haw286QO56n",
- "htHgEgN2UOI53GOMunplpwztJc0ry1HYgthUl+j9SsYjaLxmHJrnFiIHRBo9EnBhcL8O9FOppNqqgKNk",
- "2jnQHKN0MYGmtHPR3hVUZ4GRJDhHP8bwMjaFNwcER92gUdwo39avPBjuDpSJV/i8jCNkv4wmalVOicow",
- "7bhTWDMmOIzg9iVp2wdAfxv0dSLbXUtqd85NTqKhm2SpiOmb320grWwQWiifhUxSvJodnBdRjyZTxngq",
- "FnkkH+yk/hhUq8UU8cUW/43VWBomiYsS3zhPyYeEseONFdY2pJ66aZgpUWyVjKcECvO7k6MZ+nYc1vS/",
- "VxbLxaqNyAPXQtklXsI1igmW74zEDu819wqHWZleXzvGrCDhq6ijvVZfmGuLAzxDepXE0BtdF8Te7Q8Y",
- "Lm09xVNnIDcwqABD7cFmwxtDGYLpYEIr1e5eiaakKV/Rlwm2HnUMgk0vsHWw7RN6UdfOUEqBzSgwn3u9",
- "x6lkPQUXYe8kqM9V6SP0o0+EIyVlLnbXCIs+ZV3KbD+JeUwyXbPA3Um4RFQEEptJr0Dgbg7pJSIHyfS2",
- "jtts/IX24zowiuEarMK9Au7KcLdTDEcnOi2XkGp2uSfx+z+MstwkFU+9Om3fLgjywFmdOONfWryhlt8g",
- "tCsveyc+QdWMO6MzlPZ5AdtHirS4IVpYbuoZ9Tb3JZECWFEkMSwiVCzwYO1/5wtmquYMpIIP9Nnu0BRz",
- "GqzoG1xjuOVYniUJDa827BjyUsQMiFFjma43uvCDOSBDueH9mprDp9cJljBVdTX2+inFII/D2Indem9X",
- "7r4mpunXLi9/cxOU/83fybGj2Cc6m5rD6GC8ojLzLaIas1fGk4Fsq27+sk0TZ3Gkl/XIrEnL6KfwRuoc",
- "YPJNmgvF+CoZymBqZ0KEr/xgvAd9E1isFPFagnS1xrV/ATXRwqdx7MJjFyncizS3IYIarNpnkRu88fuu",
- "udKMxZ2off/WxbLCCRIJBTXYyeDi8fCYu4j9yn73Oau+uE+nlFYErufXZO/NYZ+Qw1SPiCHXL4k7Lffn",
- "wt7GVGGc26ccVOwWMjekDJ1YpRRZldoDOtwY4E260Xf8d4iSqJaf9mfZU9hyrHjxOrhZcAHbuVWa0jXl",
- "TemR9ra2xQjtHIKbfJ3VvlcrLq6w5is7gdW94PlnWkLTSSlEngx4rU77l6m7e+CCpReQEXN2+FD2QFVf",
- "8hU6S+qwxNV66y8PlyVwyB7PCDG2VFHqrY9QtMuIdQbnj/Su8Tc4albZ+gbOSJt94PEsDPui9B3lmwez",
- "W6opMMLvjkNZIHtuK28GLnJLehWpcT32ea5IzKBbd7hhKotFTEu55dW1Ufu7b6hFWD+8dLDH/rloWXW2",
- "UE4nTiAk3LN1FzhIb2jd9a9TjJ0ezgOlWqWgP8/RC9Ci7QDtxxC+cU30iTvsUdCLMR6FeFEP0x1dGpYg",
- "WBGHIKrkt8PfiISle97+yRMc4MmTqWv629P2Z2N9PXkS3ZkP5sxovQLmxo1xzD+G4so2djqQwtBZj4rl",
- "2T7GaCWkNNUqMeXiV5e686fUy/zVmsj9repKB97EjdpdBCRMZK6twYOhglSTEVkmrlskpwQPm7SSTG/x",
- "RpG3qNiv0ZvaP9ROGPe0ZJ2D7lKg7SPuLiOqcdk0727/IOzjcIU569GJrbGg/3cbWpQ5uI3yzaPFX+HZ",
- "355nB88O/7r428GLgxSev3h5cEBfPqeHL58dwtO/vXh+AIfLr18unmZPnz9dPH/6/OsXL9Nnzw8Xz79+",
- "+ddH/tFri2jzoPT/xqKyyfHb0+TcINvQhJasfsfDsLEvUElT3InGJsknR/6n/+l32CwVRQPe/zpx6XGT",
- "tdalOprPr66uZmGX+QpttESLKl3P/Tj99xPentapO/bKBa6ozcowrICL6ljhGL+9++7snBy/PZ01DDM5",
- "mhzMDmaHWAe6BE5LNjmaPMOfcPescd3njtkmR5+up5P5GmiOxcHNHwVoyVL/SV3R1QrkzFXqND9dPp37",
- "yP/8k7NPr3d9m4dFb+afWmZ8tqcn1gWZf/LXXXa3bt0nce6LoMNILIaHtC+EzT+hPTj4exuNT3rDsuu5",
- "dz+5Hu6lnfmn5umra7sLc4i5jmwqFw1eypoaex1fBFX2V7PxfAY5U+2X0mouOs0M95her+pnwILL80fv",
- "e+qXBUQ8pMjz/q2Rhh/3r0V5q30j0N8fJC8/fjqcHh5c/8UIbPfni2fXI33AzQum5KyWxiMbfuw8Ev/0",
- "4OC/2fuvz2844506dytMFinX+y3NiM9uxLEPH27sU44eeCM4iT0YrqeTFw85+1NuWJ7mBFsG9376S/8L",
- "v+DiivuW5hSvioLKrd/GqiUU/ON+eFbQlUILTLJLqmHyEU38WHh/QLjgQ7s3Fi74evAX4fJQwuXzeFb5",
- "6Q03+Oc/4y/i9HMTp2dW3I0Xp06Vswn0c/sCSaPh9crLriCayY859XTXg3tdCfsD6N77gZM7ipg/7SnB",
- "/9775PnB84fDoF0b8UfYkjdCk+8x7PWZ7tlx22eXJtSxjLKsx+RW/IPS34psu4NChVqVLuk1opcsGDco",
- "90+X/tscvff9LmBLbCjYu/zd+7Ztfej6jjLgs32K8IsM+SJDpB3+2cMNfwbykqVAzqEohaSS5VvyC6+v",
- "LN3erMuyaJpde+v3ZJqxRlKRwQp44gRWshDZ1peraQG8AOua7ikq80/tmpPW/TXoljrB3+uncPpIL7bk",
- "9KSnwdhuXUn77RabdizGiE3YRXGnZdiVRQPG2C42NxNZCU0sFTI3qS+C54vguZPyMnrzxPSXqDXhHTnd",
- "M3nq7+7GbrdT3R96jM3xp27X/7Ivu38RCV9Ewu1Fwg8Q2Yy4a52QiDDdbTy9fQGBmVdZt3I7pi/45lVO",
- "JVEw1k1xjBCdc+IhpMRDG2lRWlkbjXICG6bwJZLIgt2v3fZFxH0RcZ9R1Gq/oGkrIje2dC5gW9Cytm/U",
- "utKZuLI1b6JSEcvB0tzVjsNqbnUmhhbEA2guOJGf3Y2+fIsvorPMqHGaFWBUqlrWmc4+bbXJmzUQmif8",
- "VozjACgqcBRbJJEGVwcUpILbB686sTaH2RtrE8aE7O8VoERztHE4TqatYItbxkhJwjvrX/3YyPUOX3r9",
- "alXr7/kVZTpZCuluDiGF+lkYGmg+d9UdOr/aO9jBj0GGRvzXeV2LN/qxm1sS++pSP3yjJnksTMbClarT",
- "sN5/NATHUm5uEZvcoqP5HJPq10Lp+eR6+qmTdxR+/FjT+FN9vjpaX3+8/v8BAAD//+/wZ2fprwAA",
+ "H4sIAAAAAAAC/+x9a3PcNrbgX0H13Co/ttkt+ZEZqyp1V7ETjzaO47KUubvX9iZo8nQ3RiTAAKDUHa/+",
+ "+xYOABIkwW7qMcp1XX+y1cTj4ODg4LzxeZKKohQcuFaTo8+TkkpagAaJf9E0FRXXCcvMXxmoVLJSM8En",
+ "R/4bUVoyvppMJ8z8WlK9nkwnnBbQtDH9pxMJv1dMQjY50rKC6USlayioGVhvS9O6HmmTrETihji2Q5y8",
+ "mlzt+ECzTIJSfSh/5vmWMJ7mVQZES8oVTc0nRS6ZXhO9Zoq4zoRxIjgQsSR63WpMlgzyTM38In+vQG6D",
+ "VbrJh5d01YCYSJFDH86XolgwDh4qqIGqN4RoQTJYYqM11cTMYGD1DbUgCqhM12Qp5B5QLRAhvMCrYnL0",
+ "YaKAZyBxt1JgF/jfpQT4AxJN5Qr05NM0trilBploVkSWduKwL0FVuVYE2+IaV+wCODG9ZuSnSmmyAEI5",
+ "ef/DS/L06dMXZiEF1RoyR2SDq2pmD9dku0+OJhnV4D/3aY3mKyEpz5K6/fsfXuL8p26BY1tRpSB+WI7N",
+ "F3LyamgBvmOEhBjXsMJ9aFG/6RE5FM3PC1gKCSP3xDa+000J5/9TdyWlOl2XgnEd2ReCX4n9HOVhQfdd",
+ "PKwGoNW+NJiSZtAPB8mLT58Pp4cHV3/5cJz8p/vz+dOrkct/WY+7BwPRhmklJfB0m6wkUDwta8r7+Hjv",
+ "6EGtRZVnZE0vcPNpgaze9SWmr2WdFzSvDJ2wVIrjfCUUoY6MMljSKtfET0wqnhs2ZUZz1E6YIqUUFyyD",
+ "bGq47+WapWuSUmWHwHbkkuW5ocFKQTZEa/HV7ThMVyFKDFw3wgcu6L8uMpp17cEEbJAbJGkuFCRa7Lme",
+ "/I1DeUbCC6W5q9T1LitytgaCk5sP9rJF3HFD03m+JRr3NSNUEUr81TQlbEm2oiKXuDk5O8f+bjUGawUx",
+ "SMPNad2j5vAOoa+HjAjyFkLkQDkiz5+7Psr4kq0qCYpcrkGv3Z0nQZWCKyBi8U9Itdn2/3X681siJPkJ",
+ "lKIreEfTcwI8FdnwHrtJYzf4P5UwG16oVUnT8/h1nbOCRUD+iW5YURWEV8UCpNkvfz9oQSToSvIhgOyI",
+ "e+isoJv+pGey4ilubjNtS1AzpMRUmdPtjJwsSUE33x5MHTiK0DwnJfCM8RXRGz4opJm594OXSFHxbIQM",
+ "o82GBbemKiFlSwYZqUfZAYmbZh88jF8PnkayCsDxgwyCU8+yBxwOmwjNmKNrvpCSriAgmRn5xXEu/KrF",
+ "OfCawZHFFj+VEi6YqFTdaQBGnHq3eM2FhqSUsGQRGjt16DDcw7Zx7LVwAk4quKaMQ2Y4LwItNFhONAhT",
+ "MOFuZaZ/RS+ogm+eDV3gzdeRu78U3V3fueOjdhsbJfZIRu5F89Ud2LjY1Oo/QvkL51ZsldifexvJVmfm",
+ "KlmyHK+Zf5r982ioFDKBFiL8xaPYilNdSTj6yB+bv0hCTjXlGZWZ+aWwP/1U5ZqdspX5Kbc/vRErlp6y",
+ "1QAya1ij2hR2K+w/Zrw4O9abqNLwRojzqgwXlLa00sWWnLwa2mQ75nUJ87hWZUOt4mzjNY3r9tCbeiMH",
+ "gBzEXUlNw3PYSjDQ0nSJ/2yWSE90Kf8w/5RlHsOpIWB30aJRwBkLjssyZyk12HvvPpuv5vSDVQ9o02KO",
+ "N+nR5wC2UooSpGZ2UFqWSS5SmidKU40j/ZuE5eRo8pd5Y1WZ2+5qHkz+xvQ6xU5GELXCTULL8hpjvDMC",
+ "jdrBJQxnxk/IHyy/Q1GIcbt7hoaY4b05XFCuZ40i0mIE9cn94GZq8G1lGIvvjmI1iHBiGy5AWbnWNnyg",
+ "SIB6gmgliFYUM1e5WNQ/PDwuywaD+P24LC0+UCYEhuIWbJjS6hEunzZHKJzn5NWMvA7HRgFb8HxrbgUr",
+ "Y5hLYemuK3d91RYjt4ZmxAeK4HYKOTNb49FghPe7oDhUFtYiN+LOXloxjf/u2oZkZn4f1fnLILEQt8PE",
+ "heqTw5zVXPCXQGV52KGcPuE4I86MHHf73oxszChxgrkRrezcTzvuDjzWKLyUtLQAui/2EmUcVS/byMJ6",
+ "S246ktFFYQ7OcEBrCNWNz9re8xCFBEmhA8N3uUjP7+C8L8w4/WOHw5M10Awkyaimwbly5yV+WWPHv2M/",
+ "5AggIxL9z/gfmhPz2RC+4Yt2WKOpM6RfEdjVM6PgWrHZzmQaoOItSGF1WmJ00WtB+bKZvMcjLFrG8Ijv",
+ "rRpNsIdfhFl6YyQ7Xgh5M3rpEAInjemPUDNqcFymnZ3FplWZOPxEzAe2QWegxtvSlyJDDHWHj+GqhYVT",
+ "Tf8FWFBm1LvAQnugu8aCKEqWwx2c1zVV6/4ijD739Ak5/fvx88Mnvz55/o1RSEopVpIWZLHVoMhDJ0YT",
+ "pbc5POqvDOXZKtfx0b955g1G7XH3YggBrscec6LOwHAGizFizaMGuldMmeusWNwJHofWmjWzZMRBksHe",
+ "VV53ec0023CJciuru9AbQEohI1YMPB1apCJPLkAqJiIG6XeuBXEtvCxRdn+30JJLqoiZGw1sFc9AzmLE",
+ "pTccQWMaCrXvLrRDn214gxs3IJWSbnvot+uNrM7NO2Zf2sj39hpFSpCJ3nCSwaJatcTOpRQFoSTDjsjz",
+ "34oMjMpQqTtgdM1gDTBmI0IQ6EJUmlDCRQaoX1QqzgIHvFNoFkdrvg65ql7bK3YBRpZNabVaa1KVBG3V",
+ "va1tOiY0tZuS4HWoBox5tRXWtrLTWc9HLoFmRsYFTsTCWcycLQ8XSdHQrr0P3THgiNTfgquUIgWljG5i",
+ "Jc69oPl2dpf1Djwh4AhwPQtRgiypvCGwWmia7wEU28TArSUmZ2bsQz1u+l0b2J083EYqjXpiqcCIZ+Z0",
+ "56BhCIUjcXIBEs1t/9L985PcdPuqcsAZ7oSMM1aglsMpFwpSwTMVHSynSif7jq1p1JKEzAqCkxI7qTjw",
+ "gKb9hiptja6MZygVW3aD81gV3EwxDPDgjWJG/oe/TPpjp4ZPclWp+mZRVVkKqSGLrYHDZsdcb2FTzyWW",
+ "wdj19aUFqRTsG3kIS8H4Dll2JRZBVNcmCueV6C8OFXlzD2yjqGwB0SBiFyCnvlWA3dAhOACIUaHqnkg4",
+ "THUop/ZCTidKi7I0508nFa/7DaHp1LY+1r80bfvERXXD1zMBZnbtYXKQX1rMWlfwmhrxFUcmBT03dxMK",
+ "o9Y63IfZHMZEMZ5CsovyzbE8Na3CI7DnkA7oAS7YJJitczg69BslukEi2LMLQwseUEreUalZykqUJH6E",
+ "7Z1bNLoTRI0bJANNmZG2gw/IwJH31v2JNfd3x7yZoDVKCO2D35NCI8vJmcILow38OWzRyvnO+pHPAu/z",
+ "HUiKkVHN6aacIKDeO2Uu5LAJbGiq86255vQatuQSJBBVLQqmtQ0MaAuSWpRJOEBUN98xo7OOWB+s34Ex",
+ "5ppTHCpYXn8rphMrtuyG76wjuLTQ4QSmUoh8hBW5h4woBKOszKQUZteZi0PxwQqeklpAOiEGTWM183yg",
+ "WmjGFZD/IyqSUo4CWKWhvhGERDaL16+ZwVxg9ZzOntxgCHIowMqV+OXx4+7CHz92e84UWcKlD94yDbvo",
+ "ePwYtaR3QunW4boDjdcct5MIb0ejhbkonAzX5Smzvaq9G3nMTr7rDO4nxTOllCNcs/xbM4DOydyMWXtI",
+ "I2uq1vvXjuOOMmoEQ8fWbfddCrG8IxtY3HmPyonzx5tWZFlxC1SlnDqCLipv0BDLaR2gYQOzrfO+KrA3",
+ "/n9NnU1tMm287raBuZCbz58iIiXLNrHgigw2sU1xZwzVqQdG99gqiHq0kDOLZSS+CuR57pbW4R2kAHOo",
+ "1ZqVZsgmFmSroRVH+n8f/vvRh+PkP2nyx0Hy4n/MP31+dvXoce/HJ1fffvv/2j89vfr20b//W0y0Vpot",
+ "4qbLvxtEiyVxPH7DT7h1PiyFtArZ1sl5Ynn/cGsJkEGp17HAzVKCQt5oAzBLvW42FaBjRCmluAA+JWwG",
+ "sy6PzVagvDUpB7rEAEJUKsQYh2Z9Hiy9eeIIsB4uZBQji9EPuueQNvE0G60j396B9GIHIrKNT6+tK/tV",
+ "LMOoV3dQ1FZpKPoGL9v11wFx/70XlnuHSvCccUgKwWEbTfRgHH7Cj7He9r4b6IySx1DfrjLRgr8DVnue",
+ "MZt5W/zibgcM/l3tlL6Dze+O27F1hvG+aKuBvCSUpDlDS47gSssq1R85RV0xINeIK8hrwMPWg5e+Sdxc",
+ "EbEmuKE+cqoMDmsNMmoDX0LkzvoBwBsRVLVagdIdqXkJ8JG7VoyTijONcxVmvxK7YSVI9MfMbMuCbsmS",
+ "5mjs+AOkIItKt+VIvPWUZnnuDK9mGiKWHznVhgcpTX5i/GyDw/noP08zHPSlkOc1FuJX1Ao4KKaSON9/",
+ "bb8i+3fLX7urAHNE7GfPb+6b73vYY0FzDvKTV07HOnmFgnRjcu3Bfm92uILxJEpkRjAqGMfY6w5tkYdG",
+ "HfAE9Kgx3rpd/8j1hhtCuqA5y4zwdBNy6LK43lm0p6NDNa2N6JhV/Fo/xVz+K5GUND1Hj+9kxfS6WsxS",
+ "Ucy9bjlfiVrPnGcUCsHxWzanJZurEtL5xeEeQfcW/IpE2NXVdOK4jrpzS4wbOLag7py1QdP/rQV58Pr7",
+ "MzJ3O6Ue2AhaO3QQ+hgxB7jonpbHyizeZoDZEOKP/CN/BUvGmfl+9JFnVNP5giqWqnmlQH5Hc8pTmK0E",
+ "OfIBQ6+oph95j8UPJmkGoVqkrBY5S8l5eBU3R9Mm3vRH+PjxgyGQjx8/9dwf/YvTTRU9o3aC5JLptah0",
+ "4jILEgmXVGYR0FUdWY4j27ygXbNOiRvbUqTLXHDjx1k1LUvVDTTtL78sc7P8gAyVC6M0W0aUFtIzQcMZ",
+ "LTS4v2+F07kkvfRpKZUCRX4raPmBcf2JJB+rg4OnQFqRl785XmNocltCy3B0o0DYrtEIF24FKthoSZOS",
+ "rkBFl6+Blrj7eFEXaKLMc4LdWhGfPj4Ch2oW4PExvAEWjmtHr+HiTm0vnyIaXwJ+wi3ENoY7NZb/m+5X",
+ "EAN64+3qxJH2dqnS68Sc7eiqlCFxvzN15tjK8GTvjlFsxc0hcEl2CyDpGtJzyDDfB4pSb6et7t7j5244",
+ "zzqYsnlxNkgNkzfQxrYAUpUZdTIA5dtuFL0CrX3qwHs4h+2ZaHI/rhM23w7mVkMHFSk1uIwMsYbH1o3R",
+ "3XznPcYA1rL0MdEY/+fJ4qimC99n+CDbG/IODnGMKFrBxkOIoDKCCEv8Ayi4wULNeLci/djyjHizsDdf",
+ "xMzjeT9xTRqpzXmAw9VgDLX9XgAm2YpLRRZUQUaEyw+1AcsBF6sUXcGA7Sk0c44MC26ZRnGQffde9KYT",
+ "y+6F1rtvoiDbxolZc5RSwHwxpIJ2wo7f389kLem4ghnBsg8OYYscxaQ65MAyHSpb5mabxz4EWpyAQfJG",
+ "4PBgtDESSjZrqnzqKmb4+rM8Sgb4Fwbg78q3Oglc1kEab51N5Xlu95zGDbc2r6pQTX5VaLUdkSs1nbgo",
+ "qth2CI4CUAY5rOzCbWNPKE0yQLNBBo6fl8uccSBJzPtNlRIps7nHzTXj5gAjHz8mxNqeyOgRYmQcgI0e",
+ "IhyYvBXh2eSr6wDJXTID9WOjbyn4G+KhgDa+yYg8ojQsnPGByDTPAagLmajvr07gDg5DGJ8Sw+YuaG7Y",
+ "nDOiNoP0sn9QbO3k+jgf5aMhcXaH6c9eLNdak72KbrKaUGbyQMcFuh0Q7xYlYlugEF9O9a1xNXSXjpl6",
+ "4PoewtXDIG/oRgB0LBFNaR2n+e3V0Np3c/8ma1j6tEmE9aGZMdofop/oLg3gr28IrjN93nWv66iS3vZd",
+ "tpOcAvkpxorNGembRvsGWAU5oESctCSI5DxmMDeCPSC7PfXdAs0dU6ko3z4KHOISVkxpaExX5lbyttj7",
+ "dndRTN0WYjm8Ol3KpVnfeyFqHm1TBK37Llzmva/gQmhIlkwqnaDdL7oE0+gHhRrlD6ZpXFBou9xtFROW",
+ "xXkDTnsO2yRjeRWnVzfvj6/MtG9rI4yqFuewRXEQaLomC6y6Ew3E2TG1jdXaueA3dsFv6J2td9xpME3N",
+ "xNKQS3uOL+RcdDjvLnYQIcAYcfR3bRClOxgkXvyvINexbKNAaLCHMzMNZ7tMj73DlPmxdylKARTDd5Qd",
+ "KbqWQFveuQqG0QdG3WM6KFrTzxsYOAO0LFm26RgC7aiD6iK9lrbvk4I7WMDddYPtwUBg9IuFpkpQ7fzv",
+ "Rrq15Yd4uLbZKMyctbO0Q4YQTsWUL57XR5QhbazwtA9XZ0DzH2H7D9MWlzO5mk5uZzeM4dqNuAfX7+rt",
+ "jeIZHWLWjtRyA1wT5bQspbigeeKsq0OkKcWFI01s7o2x98zq4ja8s++P37xz4F9NJ2kOVCa1qDC4KmxX",
+ "fjGrsqnmAwfEF+cyCo+X2a0oGWx+nQIcWmQv1+AKIQXSaK9wQ2NtD46is9Au4375vfZW5xiwS9zhIICy",
+ "9g80tivrHmi7BOgFZbk3GnloB3zouLhx1T+iXCEc4NauhcBDlNwpu+md7vjpaKhrD08K59pRqqmw1cgU",
+ "EbwbkmVESLRFIakWFMsuWJNAnznxqkjM8UtUztK4gZEvlCEObh1HpjHBxgPCqBmxYgN+SF6xYCzTTI1Q",
+ "dDtABnNEkelLeAzhbiFcGdmKs98rICwDrs0niaeyc1CxzoUzNfevUyM79OdyA1vzdDP8bWSMsORI98ZD",
+ "IHYLGKGbqgfuq1pl9gutzTHmh8Aefw1vdzhj70rc4al29OGo2YYMrdvuprDqa5//GcKwFcL2l5z1yqur",
+ "fTIwR7SELFPJUoo/IK7noXociVv3RVYYRk3+AXwWSf/pspjautNUwm1mH9zuIekmtEK1PfQDVI87H/ik",
+ "sKCFN89SbrfaVnRsxYXECSaM5Zrb8RuCcTD34t9yermgsWofRsgwMB033s+WIVkL4jt73DubN3N1b2Yk",
+ "cKTWbZnN6CpBNikl/ezhGwoMdtrRokIjGSDVhjLB1Dq/ciUiw1T8knJbGNT0s0fJ9VZgjV+m16WQmI+p",
+ "4jbvDFJW0DwuOWSI/Xb+asZWzJbFrBQEdRfdQLaesKUiV7vS+pcb1JwsycE0qOzqdiNjF0yxRQ7Y4tC2",
+ "WFCFnLw2RNVdzPKA67XC5k9GNF9XPJOQ6bWyiFWC1EIdqje152YB+hKAkwNsd/iCPESflWIX8Mhg0d3P",
+ "k6PDF2h0tX8cxC4AV/92FzfJkJ38h2MncTpGp50dwzBuN+osml1oi5YPM64dp8l2HXOWsKXjdfvPUkE5",
+ "XUE8TKLYA5Pti7uJhrQOXnhmK+4qLcWWMB2fHzQ1/Gkg5tOwPwsGSUVRMF04z4YShaGnpqiindQPZ8v3",
+ "uso/Hi7/ER2EpfePdJTI+zWa2vsttmp0476lBbTROiXUJuHmrHHd+2Jd5MSn8mMppLoCksWNmcssHcUc",
+ "9OQvSSkZ16hYVHqZ/I2kayppatjfbAjcZPHNs0j5p3bFF349wO8d7xIUyIs46uUA2XsZwvUlD7ngSWE4",
+ "SvaoibEOTuWgJzMeLeY5ejdYcPfQY4UyM0oySG5Vi9xowKlvRXh8x4C3JMV6Pdeix2uv7N4ps5Jx8qCV",
+ "2aFf3r9xUkYhZKywS3PcncQhQUsGFxi4Ft8kM+Yt90Lmo3bhNtD/uZ4HL3IGYpk/yzFF4LuK5dk/mpyR",
+ "TgU9SXm6jtr9F6bjr02F43rJ9hxH64isKeeQR4ezd+av/m6N3P7/FGPnKRgf2bZbGc8ut7O4BvA2mB4o",
+ "P6FBL9O5mSDEajuIvo66zFciIzhPU7SiobJ+sb+ghNbvFSgdS9rDDzbyA+07Ri+wFZwI8Ayl6hl5bV8o",
+ "WQNp5dSjNMuKKrf52ZCtQDrDY1XmgmZTYsY5+/74DbGz2j62XKetILVCYa69io5eH1S4GRdD6CtvxuOb",
+ "x4+zO+DSrFppLHGhNC3KWOqKaXHmG2B+TGjrRDEvxM6MvLIStvLym53E0MOSycJIpvVolscjTZj/aE3T",
+ "NYquLW4yTPLjS595qlRBUfe6RmtdpAbPnYHbVT+zxc+mRBj94pIp+zAFXEA7W6ZOHXOqk8+eaS9PVpxb",
+ "Sony6F2pjTdBuwfOOrS9OTQKWQfx1xRclKhkCtetBHeKvaJVH7pl5XrV3G1WcV1e1D84lFIuOEux5kLw",
+ "FEYNsnvkYoyvYER5iq4xyh9xd0IjhytazK4OJ3JYHCxv5xmhQ1zfWBl8NZtqqcP+qfE1hTXVZAVaOc4G",
+ "2dTXZHT2EsYVuKJD+N5JwCeFbPlfkENGXXpJbfq9Jhlh7PyAAPyD+fbWqUcYVHrOOApCDm0uftVaNLAG",
+ "vzbSE9NkJUC59bRT89UH02eG6ekZbD7NfM1+HMO6L8yyra+uP9Sx99w5T5lp+9K0JTbqsP65FaZoJz0u",
+ "SzdpNNSo3uFYycVBBEc8MIk3gQfIrccPR9tBbjtd7nifGkKDC3TYQYn3cI8w6uqVnUq7FzSvLEVhC2JD",
+ "XaL5lYxHwHjDODQvSkQuiDR6JeDG4Hkd6KdSSbUVAUfxtDOgOXrpYgxNaWeive1QnQ1GlOAa/RzD29gU",
+ "3hxgHHWDRnCjfFs/ZGGoOxAmXuILOg6R/TKaKFU5ISrDsONOYc0Y4zCM21fdbV8A/WPQl4lsdy2pPTnX",
+ "uYmGMslSEZM3v99AWlkntFA+CpmkmJod3BdRi2ZTIjayDWGZWo9aDBFfbPHfWI2lYZQ4L/G145S8Sxg7",
+ "XltgbY/UEzcNMSWKrZLxmEBmfnt0NFPfjMKa/ndKYrlYtQG551oou9hLuEcxxvK94dhhXnOvcJjl6XXa",
+ "MUYFCV8oHvW1OmGuzQ7wDulVEkNrdF3ze7c9YLh69xRvnYHYwKACDLUXm3VvDEUIpoMBrVS7vBJNSVO+",
+ "os8TbMnt2Ag2vMCW+ravBEZNO0MhBTaiwHzu9R4nkvUEXBx7J0J9rEofoB99IBwpKXO+u4ZZ9DHrQmb7",
+ "QcxjgumaDe4uwgWi4iCxlfQKBO6mkF4gchBMb+u4zcYntB/XjlF012AV7hVwV4a7HWI4OtBpuYRUs4s9",
+ "gd//YYTlJqh46sVp+zxDEAfO6sAZ/5jkNaX8BqBdcdk74QmqZtwanKGwz3PYPlCkRQ3RwnJTT6g3yZdE",
+ "DGBFkcSQiFAxx4PV/50tmKmaMhAL3tFnu0NTzGmwom+QxnDDuTxJEhqmNuyY8kLEFIhRc5mu10r4wRiQ",
+ "odjwfk3N4dvrFZYwVXU19vq1yCCOw+iJ3Xpvly5fE8P0a5OXz9wE5X/zOTl2FvsKaVNzGA2Ml1RmvkVU",
+ "YvbCeDIQbdWNX7Zh4iwO9LKemTVhGf0Q3kidAwy+SXOhGF8lQxFM7UiI8CEj9PegbQKLlSJcS5Cu1rj2",
+ "j7wmWvgwjl1w7EKFe3TnJkhQg1X7LHCDGb/vm5RmLO5E7RO/zpcVLpBIKKiBTgaJx8Nz7kL2S/vdx6z6",
+ "4j6dUlqRcT29Jnszh31ADlM9JIZUvyTuttwfC3sTVYVxbp9yULEsZG5QGRqxSimyKrUXdHgwwKt0o3P8",
+ "d7CSqJSf9lfZE9hyrHjxJsgsOIft3ApN6ZrypvRI+1jbYoR2DUEmX2e371SLiwus+couYHUncP6ZmtB0",
+ "UgqRJwNWq5N+MnX3DJyz9BwyYu4O78oeqOpLHqKxpHZLXK63Pnm4LIFD9mhGiNGlilJvvYeiXUasMzl/",
+ "oHfNv8FZs8rWN3BK2uwjj0dh2Eezb8nf/DC7uZoCw/xuOZUdZE+28mYgkVvSy0iN67EvkEV8Bt26ww1R",
+ "WShiUsoNU9dGne++ohYh/TDpYI/+c97S6myhnI6fQEi4Y+0uMJBeU7vrp1OMXR6uA7lapaC/ztEb0MLt",
+ "AO7HIL4xTfSRO2xR0IsxFoV4UQ/THU0aFiFYEYcgqOS3w9+IhKV7wf/xY5zg8eOpa/rbk/Zno309fhw9",
+ "mfdmzGg9dObmjVHMP4b8ytZ3OhDC0NmPiuXZPsJoBaQ01Sox5OJXF7rzp9TL/NWqyP2j6koHXseM2t0E",
+ "RExkra3Jg6mCUJMRUSauWySmBC+btJJMbzGjyGtU7Ndopvbr2gjjXs+sY9BdCLR9p95FRDUmm+Zp8dfC",
+ "vn9XmLsejdgaC/p/v6FFmYM7KN8+WPwVnv7tWXbw9PCvi78dPD9I4dnzFwcH9MUzevji6SE8+dvzZwdw",
+ "uPzmxeJJ9uTZk8WzJ8++ef4iffrscPHsmxd/feDf9baANm9m/28sKpscvztJzgywDU5oyep3PAwZ+wKV",
+ "NMWTaHSSfHLkf/qf/oTNUlE0w/tfJy48brLWulRH8/nl5eUs7DJfoY6WaFGl67mfp/9+wruTOnTHplzg",
+ "jtqoDEMKuKmOFI7x2/vvT8/I8buTWUMwk6PJwexgdoh1oEvgtGSTo8lT/AlPzxr3fe6IbXL0+Wo6ma+B",
+ "5lgc3PxRgJYs9Z/UJV2tQM5cpU7z08WTuff8zz87/fRq17d5WPRm/rmlxmd7emJdkPlnn+6yu3Urn8SZ",
+ "L4IOI6EYntK+EDb/jPrg4O9tMD7rDcuu5t785Hq4l3bmn5unr67sKcwhZjqyoVw0eClravR1fPRU2V/N",
+ "wfMR5Ey1X0qrqegkM9Rjer2snwELkuePPvTELzsQ8SPhUTN01JyE1kwNs9OygjCfu2blrfYNQ/9wkLz4",
+ "9Plwenhw9RfDsN2fz59ejbQBN4+0ktOaG49s+KnzDv6Tg4P/Zk/cPrvminfK3C03WaRc73c0Iz66Eec+",
+ "vL+5Tzha4A3jJPZiuJpOnt/n6k+4IXmaE2wZ5P30t/4Xfs7FJfctzS1eFQWVW3+MVYsp+Mf98K6gK4Ua",
+ "mGQXVMPkE6r4Mff+AHPBt4SvzVzwgeSvzOW+mMuX8XL0k2se8C9/xV/Z6ZfGTk8tuxvPTp0oZwPo5/YF",
+ "kkbC65WXXUE0kh9j6umuB/e6HPY16N77gZNbspg/7SnB/97n5NnBs/uDoF0b8UfYkrdCkx/Q7fWFntlx",
+ "x2eXJNTRjLKsR+SW/YPS34lsuwNDhVqVLug1IpcsGDcg92+X/tscvff9zmFLrCvYm/zd+7Zteejqljzg",
+ "i32K8CsP+cpDpJ3+6f1NfwrygqVAzqAohaSS5VvyC69Tlm6u1mVZNMyuffR7PM1oI6nIYAU8cQwrWYhs",
+ "68vVtAY8B2ua7gkq88/tmpPW/DVolnqFv9dP4fSBXmzJyaueBGO7dTntd1ts2tEYIzphF8SdmmGXFw0o",
+ "Y7vI3CxkJTSxWMjcor4ynq+M51bCy+jDE5NfotqEN+R07+Spz92NZbdT3Z96jM7xpx7X/7Ivu39lCV9Z",
+ "ws1ZwmuIHEY8tY5JRIjuJpbePoPAyKusW7kdwxd88yqnkigYa6Y4xhGdceI+uMR9K2lRXFkdjXICG6bw",
+ "JZLIht2t3vaVxX1lcV+Q12o/o2kLItfWdM5hW9Cy1m/UutKZuLQ1b6JcEcvB0tzVjsNqbnUkhhbED9Ak",
+ "OJGfXUZfvsUX0VlmxDjNCjAiVc3rTGcfttrEzZoRmif8VozjBMgqcBZbJJEGqQMKUsHtg1cdX5uD7K3V",
+ "CWNM9vcKkKM53DgYJ9OWs8VtY6Qk4a3lr75v5GqHLb1+tar19/ySMp0shXSZQ4ihfhSGBprPXXWHzq9N",
+ "XmfvCyarBj8GsRvxX+d1ld7ox27USeyrCwrxjZqwsjBMC/ewDtD68MlsBRZ5c9vbRB0dzecYbr8WSs8n",
+ "V9PPnYik8OOnGvuf65vX7cLVp6v/HwAA//9tX0dx5rAAAA==",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/private/types.go b/daemon/algod/api/server/v2/generated/private/types.go
index e0e6ee849..ff7643fdf 100644
--- a/daemon/algod/api/server/v2/generated/private/types.go
+++ b/daemon/algod/api/server/v2/generated/private/types.go
@@ -612,6 +612,13 @@ type CompileResponse struct {
Result string `json:"result"`
}
+// DisassembleResponse defines model for DisassembleResponse.
+type DisassembleResponse struct {
+
+ // disassembled Teal code
+ Result string `json:"result"`
+}
+
// DryrunResponse defines model for DryrunResponse.
type DryrunResponse struct {
Error string `json:"error"`
diff --git a/daemon/algod/api/server/v2/generated/routes.go b/daemon/algod/api/server/v2/generated/routes.go
index 97c7308bd..b819ded7d 100644
--- a/daemon/algod/api/server/v2/generated/routes.go
+++ b/daemon/algod/api/server/v2/generated/routes.go
@@ -53,6 +53,9 @@ type ServerInterface interface {
// Compile TEAL source code to binary, produce its hash
// (POST /v2/teal/compile)
TealCompile(ctx echo.Context) error
+ // Disassemble program bytes into the TEAL source code.
+ // (POST /v2/teal/disassemble)
+ TealDisassemble(ctx echo.Context) error
// Provide debugging information for a transaction (or group).
// (POST /v2/teal/dryrun)
TealDryrun(ctx echo.Context) error
@@ -538,6 +541,29 @@ func (w *ServerInterfaceWrapper) TealCompile(ctx echo.Context) error {
return err
}
+// TealDisassemble converts echo context to params.
+func (w *ServerInterfaceWrapper) TealDisassemble(ctx echo.Context) error {
+
+ validQueryParams := map[string]bool{
+ "pretty": true,
+ }
+
+ // Check for unknown query parameters.
+ for name, _ := range ctx.QueryParams() {
+ if _, ok := validQueryParams[name]; !ok {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
+ }
+ }
+
+ var err error
+
+ ctx.Set("api_key.Scopes", []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.TealDisassemble(ctx)
+ return err
+}
+
// TealDryrun converts echo context to params.
func (w *ServerInterfaceWrapper) TealDryrun(ctx echo.Context) error {
@@ -726,6 +752,7 @@ func RegisterHandlers(router interface {
router.GET("/v2/status", wrapper.GetStatus, m...)
router.GET("/v2/status/wait-for-block-after/:round", wrapper.WaitForBlock, m...)
router.POST("/v2/teal/compile", wrapper.TealCompile, m...)
+ router.POST("/v2/teal/disassemble", wrapper.TealDisassemble, m...)
router.POST("/v2/teal/dryrun", wrapper.TealDryrun, m...)
router.POST("/v2/transactions", wrapper.RawTransaction, m...)
router.GET("/v2/transactions/params", wrapper.TransactionParams, m...)
@@ -737,200 +764,203 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+y9e3fbOJIo/lXw0+45Sbyi5Lx6Jj6nz/7ccT+8k6RzYvfs7LZzuyGyJGFMAhwAtKXO",
- "9Xe/BwWABElQkh9JOt3+K7GIR6FQKBTq+WGUiqIUHLhWo4MPo5JKWoAGiX/RNBUV1wnLzF8ZqFSyUjPB",
- "Rwf+G1FaMr4YjUfM/FpSvRyNR5wW0LQx/ccjCf+qmIRsdKBlBeORSpdQUDOwXpemdT3SKlmIxA1xaIc4",
- "PhpdbfhAs0yCUn0of+T5mjCe5lUGREvKFU3NJ0UumV4SvWSKuM6EcSI4EDEnetlqTOYM8kxN/CL/VYFc",
- "B6t0kw8v6aoBMZEihz6cL0UxYxw8VFADVW8I0YJkMMdGS6qJmcHA6htqQRRQmS7JXMgtoFogQniBV8Xo",
- "4OeRAp6BxN1KgV3gf+cS4DdINJUL0KP349ji5hpkolkRWdqxw74EVeVaEWyLa1ywC+DE9JqQ15XSZAaE",
- "cvLuu5fk6dOnL8xCCqo1ZI7IBlfVzB6uyXYfHYwyqsF/7tMazRdCUp4ldft3373E+U/cAndtRZWC+GE5",
- "NF/I8dHQAnzHCAkxrmGB+9CiftMjciian2cwFxJ23BPb+E43JZz/s+5KSnW6LAXjOrIvBL8S+znKw4Lu",
- "m3hYDUCrfWkwJc2gP+8nL95/eDx+vH/1bz8fJv/r/nz+9GrH5b+sx92CgWjDtJISeLpOFhIonpYl5X18",
- "vHP0oJaiyjOypBe4+bRAVu/6EtPXss4LmleGTlgqxWG+EIpQR0YZzGmVa+InJhXPDZsyozlqJ0yRUooL",
- "lkE2Ntz3csnSJUmpskNgO3LJ8tzQYKUgG6K1+Oo2HKarECUGrhvhAxf0+0VGs64tmIAVcoMkzYWCRIst",
- "15O/cSjPSHihNHeVut5lRU6XQHBy88Fetog7bmg6z9dE475mhCpCib+axoTNyVpU5BI3J2fn2N+txmCt",
- "IAZpuDmte9Qc3iH09ZARQd5MiBwoR+T5c9dHGZ+zRSVBkcsl6KW78ySoUnAFRMz+Cak22/5fJz++IUKS",
- "16AUXcBbmp4T4KnIhvfYTRq7wf+phNnwQi1Kmp7Hr+ucFSwC8mu6YkVVEF4VM5Bmv/z9oAWRoCvJhwCy",
- "I26hs4Ku+pOeyoqnuLnNtC1BzZASU2VO1xNyPCcFXX29P3bgKELznJTAM8YXRK/4oJBm5t4OXiJFxbMd",
- "ZBhtNiy4NVUJKZszyEg9ygZI3DTb4GH8evA0klUAjh9kEJx6li3gcFhFaMYcXfOFlHQBAclMyE+Oc+FX",
- "Lc6B1wyOzNb4qZRwwUSl6k4DMOLUm8VrLjQkpYQ5i9DYiUOH4R62jWOvhRNwUsE1ZRwyw3kRaKHBcqJB",
- "mIIJNz9m+lf0jCr46tnQBd583XH356K76xt3fKfdxkaJPZKRe9F8dQc2Lja1+u/w+AvnVmyR2J97G8kW",
- "p+YqmbMcr5l/mv3zaKgUMoEWIvzFo9iCU11JODjje+YvkpATTXlGZWZ+KexPr6tcsxO2MD/l9qdXYsHS",
- "E7YYQGYNa/Q1hd0K+48ZL86O9Sr6aHglxHlVhgtKW6/S2ZocHw1tsh3zuoR5WD9lw1fF6cq/NK7bQ6/q",
- "jRwAchB3JTUNz2EtwUBL0zn+s5ojPdG5/M38U5Z5DKeGgN1Fi0oBpyw4LMucpdRg7537bL6a0w/2eUCb",
- "FlO8SQ8+BLCVUpQgNbOD0rJMcpHSPFGaahzp3yXMRwejf5s2WpWp7a6mweSvTK8T7GQEUSvcJLQsrzHG",
- "WyPQqA1cwnBm/IT8wfI7FIUYt7tnaIgZ3pvDBeV60jxEWoygPrk/u5kafFsZxuK787AaRDixDWegrFxr",
- "Gz5QJEA9QbQSRCuKmYtczOofHh6WZYNB/H5YlhYfKBMCQ3ELVkxp9QiXT5sjFM5zfDQh34djo4AteL42",
- "t4KVMcylMHfXlbu+ao2RW0Mz4gNFcDuFnJit8WgwwvtdUBw+FpYiN+LOVloxjX9wbUMyM7/v1PnLILEQ",
- "t8PEhc8nhzn7csFfgifLww7l9AnHKXEm5LDb92ZkY0aJE8yNaGXjftpxN+CxRuGlpKUF0H2xlyjj+PSy",
- "jSyst+SmOzK6KMzBGQ5oDaG68Vnbeh6ikCApdGD4Jhfp+R2c95kZp3/scHiyBJqBJBnVNDhX7rzEL2vs",
- "+AP2Q44AMiLR/4j/oTkxnw3hG75ohzUvdYb0KwK9emYeuFZstjOZBvjwFqSwb1pi3qLXgvJlM3mPR1i0",
- "7MIjvrXPaII9/CLM0hsl2eFMyJvRS4cQOGlUf4SaUYPjMu7sLDatysThJ6I+sA06AzXWlr4UGWKoO3wM",
- "Vy0snGj6EbCgzKh3gYX2QHeNBVGULIc7OK9Lqpb9RZj33NMn5OSHw+ePn/zy5PlX5kFSSrGQtCCztQZF",
- "Hjoxmii9zuFRf2Uoz1a5jo/+1TOvMGqPuxVDCHA99i4n6hQMZ7AYI1Y9aqA7kmtZ3YVQDVIKGXniI+lo",
- "kYo8uQCpmIhoa9+6FsS18Bdt2f3dQksuqSJmbtQ+VTwDOYlhXq84gsY0FGrbRWGHPl3xBjduQColXfd2",
- "wK43sjo37y570ka+V2YoUoJM9IqTDGbVoiWTzaUoCCUZdkSG+EZkYOTpSt0BF2gGa4AxGxGCQGei0oQS",
- "LjJA4btScf4wYLpBnTGqunXIcvTS3j8zMIJeSqvFUhPzihaxrW06JjS1m5LgXaEGNF21itK2stNZs0Au",
- "gWZGAAROxMypk5yiCxdJUQutvYHZcaeISNyCq5QiBaWM4G7Fsa2g+XZ2l/UGPCHgCHA9C1GCzKm8IbBa",
- "aJpvARTbxMCtxQmng+tDvdv0mzawO3m4jVQa2d1SgZFdzOnOQcMQCnfEyQVI1EV91P3zk9x0+6pywFLs",
- "buBTVuATgFMuFKSCZyo6WE6VTrYdW9OoJSaYFQQnJXZSceCBZ+grqrTVSDKeocho2Q3OY9+nZophgAdv",
- "FDPy3/1l0h87NXySq0rVN4uqylJIDVlsDRxWG+Z6A6t6LjEPxq6vLy1IpWDbyENYCsZ3yLIrsQiiun6/",
- "O5V9f3H4yjX3wDqKyhYQDSI2AXLiWwXYDa1lA4CY90XdEwmHqQ7l1Ca68UhpUZbm/Omk4nW/ITSd2NaH",
- "+qembZ+4qG74eibAzK49TA7yS4tZayddUiPb4cikoOfmbkJJzapO+zCbw5goxlNINlG+OZYnplV4BLYc",
- "0gEh2XliBLN1DkeHfqNEN0gEW3ZhaMEDEvtbKjVLWYmSxN9gfefP/e4E0Zc/yUBTlkNGgg/IwJH31v2J",
- "1YV3x7yZoLWTENoHvyeFRpaTM4UXRhv4c1ijCvCtNbKeBqbZO5AUI6Oa0005QUC96cZcyGETWNFU52tz",
- "zeklrMklSCCqmhVMa2s1bwuSWpRJOED04bphRqc6sAZKvwO76DJOcKhgef2tGI+s2LIZvtOO4NJChxOY",
- "SiHyHVSsPWREIdhJBUtKYXadOScNb8n3lNQC0gkxqDeqmecD1UIzroD8j6hISjkKYJWG+kYQEtksXr9m",
- "BnOB1XM6ZWuDIcihACtX4pe9ve7C9/bcnjNF5nDpPZtMwy469vbwlfRWKN06XHfw4jXH7TjC2/FFby4K",
- "J8N1ecpk6+vejbzLTr7tDO4nxTOllCNcs/xbM4DOyVztsvaQRpZULbevHcfdSaERDB1bt913KcT8jhRE",
- "ccs2Pk6csdq0IvOKW6Aq5Z4jaL/xCg0xH9feC9Zr2Vq2qwJ74/+X1CmcRuPGJG0bmAu5+fw+IlKybBXz",
- "PMhgFdsUd8bwOfXAvD3WCqLmHuTMYh5xPgJ5nruldXgHKcAcarVkpRmycZRYa2g5Wf6fh/958PNh8r80",
- "+W0/efEf0/cfnl092uv9+OTq66//b/unp1dfP/rPf4+J1kqzWVyv94NBtJgTx+NX/JhbzfxcSPsgWzs5",
- "T8w/PdxaAmRQ6mXMq7GUoJA3Wu/EUi+bTQXoKFFKKS6AjwmbwKTLY7MFKK9NyoHO0bsOHxViF2tffR4s",
- "vXniCLAeLmQnRhajH7RdIW3iaTavjnx9B9KLHYjINj79a13Zr2IeuoS6g6LWSkPRV3jZrr8MiPvvvLDc",
- "O1SC54xDUggO62gUBOPwGj/Getv7bqAzSh5DfbuPiRb8HbDa8+yymbfFL+52wODf1hbbO9j87rgdXWfo",
- "DIu6GshLQkmaM9TkCK60rFJ9xim+FQNyjdhJ/At4WHvw0jeJqysi2gQ31BmnyuCwfkFGdeBziNxZ3wF4",
- "JYKqFgtQuiM1zwHOuGvFOKk40zhXYfYrsRtWgkRjxcS2LOiazGmOyo7fQAoyq3RbjsRbT2mW507xaqYh",
- "Yn7GqTY8SGnymvHTFQ7nXeM8zXDQl0Ke11iIX1EL4KCYSuJ8/3v7Fdm/W/7SXQUYQGE/e37zqfm+hz3m",
- "UeYgPz5yb6zjIxSkG5VrD/ZPpocrGE+iRGYEo4JxdEzu0BZ5aJ4DnoAeNcpbt+tnXK+4IaQLmrPMCE83",
- "IYcui+udRXs6OlTT2oiOWsWv9X3MHr4QSUnTczSHjhZML6vZJBXF1L8tpwtRvzOnGYVCcPyWTWnJpqqE",
- "dHrxeIugewt+RSLs6mo8clxH3bkmxg0cW1B3zlqh6f/Wgjz4/ttTMnU7pR5Y91I7dOAXGFEHONeXlsXK",
- "LN6GR1n/2jN+xo9gzjgz3w/OeEY1nc6oYqmaVgrkNzSnPIXJQpAD701zRDU94z0WPxjBGPgxkbKa5Swl",
- "5+FV3BxNG5XSH+Hs7GdDIGdn73vmj/7F6aaKnlE7QXLJ9FJUOnFu94mESyqzCOiqdrvGkW3QzKZZx8SN",
- "bSnSufW78eOsmpal6nph9pdflrlZfkCGyvkYmi0jSgvpmaDhjBYa3N83wr25JL30MRuVAkV+LWj5M+P6",
- "PUnOqv39p0Babom/Ol5jaHJdQktxdCMv0a7SCBduBSpYaUmTki5ARZevgZa4+3hRF6iizHOC3VrukN55",
- "AIdqFuDxMbwBFo5ru3bh4k5sLx8/GV8CfsItxDaGOzWa/5vuV+AgeePt6jhZ9nap0svEnO3oqpQhcb8z",
- "dVjVwvBkb45RbMHNIXARaDMg6RLSc8gwGAaKUq/Hre7e4uduOM86mLJBY9aDCyMbUMc2A1KVGXUyAOXr",
- "rou5Aq29X/07OIf1qWgCI67jU972dFZDBxUpNbiMDLGGx9aN0d18Zz1G786y9A7D6BznyeKgpgvfZ/gg",
- "2xvyDg5xjChanrhDiKAygghL/AMouMFCzXi3Iv3Y8ox4M7M3X0TN43k/cU0aqc1ZgMPVoIOx/V4ARqCK",
- "S0VmVEFGhAuetN68ARerFF3AgO4pVHPu6DPbUo3iINvuvehNJ+bdC61330RBto0Ts+YopYD5YkgF9YQd",
- "u7+fyWrScQUTgjkRHMJmOYpJtcuBZTpUttTNNsh7CLQ4AYPkjcDhwWhjJJRsllT5uE4Mf/VneScZ4CN6",
- "p28KRjoOTNZBjGsdauR5bvecxhW3NuioUE3wUai13SGQaDxyXlSx7RAcBaAMcljYhdvGnlAaT/lmgwwc",
- "P87nOeNAkpj1myolUmYDc5trxs0BRj7eI8TqnsjOI8TIOAAbLUQ4MHkjwrPJF9cBkjtPf+rHRttS8DfE",
- "XQGtf5MReURpWDjjA55pngNQ5zJR318dxx0chjA+JobNXdDcsDmnRG0G6YXGoNjaCYRxNspHQ+LsBtWf",
- "vViutSZ7Fd1kNaHM5IGOC3QbIN4sSsS2QCG+3NO3xtXQXbrL1APX9xCuHgZBNTcCoKOJaPLOuJff1hda",
- "+27u32QNSx83UaLeNTNG+0P0E92lAfz1FcF1GMzb7nUdfaS3bZftCKBAfoqxYnNG+qrRvgJWQQ4oESct",
- "CSI5jynMjWAPyG5PfLfg5Y5xRpSvHwUGcQkLpjQ0qitzK3ld7Kc2d1GMaxZiPrw6Xcq5Wd87IWoebePn",
- "rPkuXOYnX8GF0JDMmVQ6Qb1fdAmm0XcKX5TfmaZxQaFtcrcpPlgW5w047Tmsk4zlVZxe3bx/OzLTvqmV",
- "MKqancMaxUGg6ZLMMCVN1BFnw9TWV2vjgl/ZBb+id7be3U6DaWomloZc2nN8Ieeiw3k3sYMIAcaIo79r",
- "gyjdwCDx4j+CXMdCcQKhwR7OzDScbFI99g5T5sfe9FAKoBi+o+xI0bUEr+WNq2DofWCee0wHGV36cQMD",
- "Z4CWJctWHUWgHXXwuUiv9dr3EbMdLODuusG2YCBQ+sVcUyWodnB0I93a3Dw8XNtkJ8yctkOYQ4YQTsWU",
- "zyzXR5QhbUx/tA1Xp0Dzv8H676YtLmd0NR7dTm8Yw7UbcQuu39bbG8UzGsSsHqllBrgmymlZSnFB88Rp",
- "V4dIU4oLR5rY3CtjPzGri+vwTr89fPXWgX81HqU5UJnUosLgqrBd+cWsysZhDxwQn7nKPHi8zG5FyWDz",
- "6/jYUCN7uQSXJSiQRntZDRpte3AUnYZ2HrfLb9W3OsOAXeIGAwGUtX2g0V1Z80DbJEAvKMu90shDO2BD",
- "x8XtlhojyhXCAW5tWggsRMmdspve6Y6fjoa6tvCkcK4NeYwKm6pLEcG7LllGhERdFJJqQTEngVUJ9JkT",
- "r4rEHL9E5SyNKxj5TBni4NZwZBoTbDwgjJoRKzZgh+QVC8YyzdQOD90OkMEcUWT6/BZDuJsJl2O14uxf",
- "FRCWAdfmk8RT2TmomATCqZr716mRHfpzuYGteroZ/jYyRpiPo3vjIRCbBYzQTNUD96h+MvuF1uoY80Og",
- "j7+GtTucsXclbrBUO/pw1GxdhpZtc1OYErXP/wxh2PRZ2/Ox+serSwwyMEc0vypTyVyK3yD+zsPnccRv",
- "3WcgYeg1+RvwSST8p8tiau1Okya2mX1wu4ekm1AL1bbQD1A97nxgk8JsD149S7ndapvusOUXEieY0Jdr",
- "asdvCMbB3PN/y+nljMZSYRghw8B02Fg/W4pkLYjv7HHvdN7MJYWZkMCQWrdlNqKrBNmElPSjh28oMNhp",
- "dxYVGskAqTaUCcbW+JUrERmm4peU26yZpp89Sq63Aqv8Mr0uhcR4TBXXeWeQsoLmcckhQ+y341cztmA2",
- "Z2SlIEhK6AayyXYtFbnEjta+3KDmeE72x0HaU7cbGbtgis1ywBaPbYsZVcjJa0VU3cUsD7heKmz+ZIfm",
- "y4pnEjK9VBaxSpBaqMPnTW25mYG+BOBkH9s9fkEeos1KsQt4ZLDo7ufRweMXqHS1f+zHLgCXHHYTN8mQ",
- "nfy3YydxOkajnR3DMG436iQaXWgzeg8zrg2nyXbd5SxhS8frtp+lgnK6gLibRLEFJtsXdxMVaR288Mym",
- "o1VaijVhOj4/aGr404DPp2F/FgySiqJgunCWDSUKQ09NxkE7qR/O5rZ1aXE8XP4jGghLbx/pPCI/rdLU",
- "3m+xVaMZ9w0toI3WMaE2CDdnjeneZ7Iixz6UH/ME1emBLG7MXGbpKOagJX9OSsm4xodFpefJX0m6pJKm",
- "hv1NhsBNZl89i+RGaqdD4dcD/JPjXYICeRFHvRwgey9DuL7kIRc8KQxHyR41PtbBqRy0ZMa9xTxH7zoL",
- "bh56V6HMjJIMklvVIjcacOpbER7fMOAtSbFez7Xo8dor++SUWck4edDK7NBP7145KaMQMpbYpTnuTuKQ",
- "oCWDC3Rci2+SGfOWeyHznXbhNtB/XsuDFzkDscyf5dhD4JuK5dnfm5iRTno5SXm6jOr9Z6bjL03633rJ",
- "9hxH84gsKeeQR4ezd+Yv/m6N3P7/FLvOUzC+Y9tu2ji73M7iGsDbYHqg/IQGvUznZoIQq20n+trrMl+I",
- "jOA8TdKKhsr6mfCCFFr/qkDpWNAefrCeH6jfMe8Cm8GJAM9Qqp6Q7235jiWQVkw9SrOsqHIbnw3ZAqRT",
- "PFZlLmg2Jmac028PXxE7q+1jc1naDFILFObaq+i864MMN7v5EPq0lHH/5t3H2exwaVatNKa4UJoWZSx0",
- "xbQ49Q0wPibUdaKYF2JnQo6shK28/GYnMfQwZ7Iwkmk9muXxSBPmP1rTdImia4ubDJP87qnPPFWqION5",
- "ncC0TlKD587A7bKf2eRnYyLM++KSKVu1AS6gHS1Th465p5OPnmkvT1acW0qJ8uhNoY03QbsHzhq0vTo0",
- "ClkH8dcUXJSoZArXzQR3gr2iWR+6aeV6qc5tVHGde9NX40kpF5ylmHMhqBNRg+wqQOxiK9ghPUVXGeWP",
- "uDuhkcMVTWZXuxM5LA6mt/OM0CGur6wMvppNtdRh/9RYamBJNVmAVo6zQTb2CQudvoRxBS7pEBYDCfik",
- "kC37C3LIqEkvqVW/1yQj9J0fEIC/M9/euOcROpWeM46CkEOb81+1Gg1MUK+N9MQ0WQhQbj3t0Hz1s+kz",
- "wfD0DFbvJz6hPY5hzRdm2dZW1x/q0FvunKXMtH1p2hLrdVj/3HJTtJMelqWbNOpqVO9wLOXiIIIjFpjE",
- "q8AD5Nbjh6NtILeNJne8Tw2hwQUa7KDEe7hHGHX2yk4a2guaV5aisAWxri7R+ErGI2C8YhyacguRCyKN",
- "Xgm4MXheB/qpVFJtRcCdeNop0BytdDGGprRT0d52qM4GI0pwjX6O4W1sEm8OMI66QSO4Ub6uqzwY6g6E",
- "iZdYXsYhsp9GE6UqJ0Rl6HbcSawZYxyGcfuUtO0LoH8M+jKR7a4ltSfnOjfRUCRZKmLy5rcrSCtrhBbK",
- "eyGTFEOzg/siqtFkyjyeilke8Qc7qj8G2WrRRXy2xn9jOZaGUeKsxNf2U/ImYex4bYG1PVJP3DTElCi2",
- "SHbHBDLz26OjmfpmFNb0v1MSy8WiDcgnzoWyib2EexRjLN8ajh3GNfcSh1meXocdo1eQ8FnU8b1WB8y1",
- "2QHeIb1MYqiNrhNib9YHDKe2HuOtM+AbGGSAofZis+aNIQ/BdNChlWoXV6IpadJX9HmCzUcdG8G6F9g8",
- "2LaEXlS1M+RSYD0KzOde791Esp6Ai2NvRKj3VekD9DfvCEdKypztrmEWfcw6l9m+E/MuznTNBncX4RxR",
- "cZDYSnoJAjdTSM8ROXCmt3ncJrsHtB/WhlE012AW7gVwl4a77WK4s6PTfA6pZhdbHL//2wjLjVPx2IvT",
- "tnZB4AfOascZX2nxmlJ+A9Amv+yN8ARZM24NzpDb5zmsHyjSooZoYrmxJ9SbxEsiBjCjSGJIRKiY4cG+",
- "/50umKmaMhAL3tBnu0OTzGkwo28QxnDDuTxJEhqGNmyY8kLEHhA7zWW6XivgB31AhnzD+zk1h2+vI0xh",
- "qups7HUpxcCPw7wTu/neLl28Jrrp1yovH7kJyv/mY3LsLLZEZ5NzGBWMl1RmvkVUYvbCeDLgbdX1X7Zu",
- "4iwO9LyemTVuGX0X3kieA3S+SXOhGF8kQx5MbU+IsMoP2ntQN4HJShGuOUiXa1z7CqiJFt6NYxMcm1Dh",
- "KtLcBAlqMGufBW4w4vddE9KMyZ2orX/rbFnhAomEghroZBB4PDznJmS/tN+9z6pP7tNJpRUZ19NrsjVy",
- "2DvkMNVDYkj1c+Juy+2+sDd5qjDObSkHFYtC5gaVoRKrlCKrUntBhwcD/JNu5xj/DawkKuWn/VX2BLYc",
- "M168CiILzmE9tUJTuqS8ST3SPtY2GaFdQxDJ19ntO33FxQXWfGEXsLgTOD/nS2g8KoXIkwGt1XE/mLp7",
- "Bs5Zeg4ZMXeHN2UPZPUlD1FZUpslLpdrHzxclsAhezQhxLylilKvvYWinUasMzl/oDfNv8JZs8rmN3CP",
- "tMkZj3th2IrSt+RvfpjNXE2BYX63nMoOsiVaeTUQyC3pZSTH9a7luSI2g27e4YaoLBQxKeWGoWs7ne/+",
- "Qy1C+mHQwZb3z3nrVWcT5XTsBELCHb/uAgXpNV93/XCKXZeH60CuVinor3PnDWjhdgD3uyC+UU30kTus",
- "UdCzXTQK8aQepjuqNCxCMCMOQVDJr49/JRLmrrz93h5OsLc3dk1/fdL+bF5fe3vRk/nJlBmtKmBu3hjF",
- "/H3IrmxtpwMuDJ39qFiebSOMlkNKk60SXS5+ca47nyVf5i/2idw/qi514HXUqN1NQMRE1tqaPJgqcDXZ",
- "wcvEdYv4lOBlk1aS6TVGFPkXFfslGqn9fa2EcaUlax905wJti7g7j6hGZdPU3f5e2OJwhbnrUYmtMaH/",
- "tytalDm4g/L1g9lf4Olfn2X7Tx//ZfbX/ef7KTx7/mJ/n754Rh+/ePoYnvz1+bN9eDz/6sXsSfbk2ZPZ",
- "syfPvnr+In367PHs2Vcv/vLAF722gDYFpf+BSWWTw7fHyakBtsEJLVldx8OQsU9QSVM8ieZNko8O/E//",
- "vz9hk1QUzfD+15FzjxsttS7VwXR6eXk5CbtMF/hGS7So0uXUz9Ovn/D2uHbdsSEXuKPWK8OQAm6qI4VD",
- "/Pbu25NTcvj2eNIQzOhgtD/ZnzzGPNAlcFqy0cHoKf6Ep2eJ+z51xDY6+HA1Hk2XQHNMDm7+KEBLlvpP",
- "6pIuFiAnLlOn+eniydRb/qcf3Pv0yoy6iMVaWSeksHRuL4Gl03WhPck6GbUSQimXn2hcpwlz4iPP0DfE",
- "PvkMa6uRdZw1KUGOg3qsLjDKRoof/BxJnDxni0p2Kg/V2nyXQ5Ap8l8nP74hQpLXVuf+lqbnof9FrMC5",
- "Y2Wx+ubOS6NQi7Jt0mw0/bEaJbFMoJHy+42qaLjyfsNXDa/cT168//D8r1ejHQBBvaWr8PorzfNfbWko",
- "WKHyp11sWY2HKnuPG9VDp57yGG2y9dcwQ2Xdpu0J9CsXHH4d2gYHWHQfaJ6bhoJDbA/edwrJP9nf/wg1",
- "mcetUTxJfNbizs/ucKFtC9qtl9sdrrfob2iGWQNBabuUx1/sUo45mg4Mxyf2Rrsaj55/wXtzzA3PoTnB",
- "lkH8U/8W+Ymfc3HJfUsjzVRFQeUaZZUgtWkolV4N3lbTMA3b9ENLsZzd6i7rZaA8PtpyvT1QQ0yxnxig",
- "k+XNfK/zmKHqMSxarx5NyPdh75tVpm9gC+vTD1y2wWv9/t79qPfuYVvr0IosjwHTIvGNMPUsT7e9+Pp+",
- "T50k3TdKgh3kk7tBVp6Pmim08+gbrF+4A4O9x91Q7ccB8SaAt5Z02nkAPz7fte+34Jpo3QcfkSt/4cLa",
- "a5obOgmW2/HBtukW7oW4P40QVzsj2FobmGFok1iHaUSnH3x2jDsQ5Vx2kB2EuPClG/QNsjc87HCKRxOb",
- "6iJsczN24BwLtopnmLPkXjD72IJZP9lPDIwmhcvnE8YQhmWTDeg6BS5ayXuvlbXoC5W+/sTIGhS3DKTb",
- "Ba0b8MaeEOU48UfjmX9I4ckh7V5s+lOLTdaXb4Pg1MrE5Rw/h2UnCApvB7VMWo5ns7WnwzFRWN/f/FRK",
- "JiTT6zFhnGRgzh5aDIXEoOOmhLdzMgKO/319+A90PX19+A/yNdkf1yIYxmRFprfOPW0Z6HvQkRLz36wP",
- "a3Fgoyz0uxEwTmskDZSA18In00KkFXT19RDKVtauGBPPCroabZRExl+OtHhboakTTdmnIldE05aQd4Vf",
- "2i5VisCKpjpfE4r3z9r6/mJxcZ8Jq1MOvVubPxZvtGFGX1ciFjV2Xa+uSMh6ry7+QLngISp1GeewiMt2",
- "waSHjCgEN5Py7nf3i93dvlhKSmHONMOUCM194u+qFpBNdQEH7oDD6oT8j6jQ2cUWz4JYOk+cAZ17/ZxO",
- "AA3y8eZYuqzGzt5ed+F7e27PmSJzuEQOSjk27KJjb+8PILKu6iyKlHDBE461nS6ABB5y93Lr71pufb7/",
- "9ItdzQnIC5YCOYWiFJJKlq/JT7xOO3M7sbzmORUPEgFt5D89T/lGig7E91vZrru2aaYbybAVOBWoEOoS",
- "fO6tPG5y+Ju3PKYL8QHrauxNJ+j4Z60qdj/GPcPKJCakBxacb9bHR7vI5V+IIXTntFWRey2+Nx/7Boj6",
- "07z7NP40uzHTZ/vPPh0E4S68EZp8h+qyj8zSP6ruIE5WAbO5tkWlsZiErMUFIm5kKuaEjl2qUcx9uSZ1",
- "oJDhJ5YR2mIDfa5hZtiVX/yO9fM7lNmN0GUXvfd84Z4v3IovdAmq4QgYbq+mH9BUELKD3pH8xrT8A5kY",
- "A3uLFIU3uAgyB50ubRqCblhMhK347HfDPGVTjvg7tv8h0JEcubgWF/qBuct3DAjEjj/YSIyr8SgFGSG+",
- "H30+HPOZzTGss85s6EshoDmH+ezAdWJglz6dKe9z7rLeELOL14LyZTN5P0wH0XIXNsN7BF8PwT2m9q3L",
- "02yPl1vEH8Er3SfxTcgbFIfwgPvEfn9EtcfHvJE/9oLeCA7WLm0kVkuL9ybIWlzAaiqIFJ8FwRoeXYHW",
- "uOjQNjp+0CuWXU3rND1DQsVbbLBFqGhuatbUsGyrV2hZApXqxpf0dnPYaWfG46PQT6OVVajOJxQBxeDl",
- "mpbE/9jFjPjHtda1b+AlVct4fDdu0LrEIgymFZlX3G5VXS8KnXa8G4mYj2t1teEHYn5wxveIqgrsjf9f",
- "0uePn/zy5PlXo3G9YtfAYKP5/D4Sf8yyVTT1Bqx8BqGQnpzOEA/VA0VKuh7M2DOQ/Oo1yPPcF+puG0dI",
- "AeYiUktWfo7C42wWr73zg0G0mJM6I/Yx/6bmOxcg2RwLSNXn6RNnVJEAGZR6uTGVga1/Vepls6ngKjQy",
- "5VLGlFJcAB8TNoFJ14iULZrksjnQeZ1yRIhdXLzq82DpzRNHgPVwIbuIaG9j9INhhC4126dWRjSuUPYS",
- "8MiTHX78WTUV+rNoKt4InqAcA1x7mbqFls+ntcAsMeNAMVjXGOBCo0JQSBS/QralJjsJLjBopGnxQOty",
- "OEjGToxJqU6XVTn9gP/BiP2rJjbeFtSYWgXmJknmxLa4U9cUOyaRbW7jk0Q4paqYk9csleIQswm5a0St",
- "lYaiX27Rdv1lU6mG6JUjeM44JIXgsfwSP+LX1/gxmq8Izd0DndHxYKhvt0hOC/4OWO15dmF1t8Xv5Peh",
- "HL2VoN9ZrYSydu9DPwik/+a0tBLGNsek9fP0Q+tPZ2dwLdWy0pm4DPrafBAbz5Ztcadn643IwI7bTsES",
- "87vkIgOXtqJ/pGquERdJPX6bdh3hIKXVYqltucBoLdK6Y0JTexRszlW1LUmlbeWTsV0AobkEmq3JDIAT",
- "MTOLbif7JVTV9V+ROCxvjOdabOAqpUhBKciSsE7QJtDqZCAo+egNeELAEeB6FqIEmVN5Q2Atk9gMaLdA",
- "Xg1urWFzfKAP9W7Tb9rA7uThNlIJxDNEfNKIoszBPWoiKNwRJyhrs4+8f36Sm25fVWIpmki2UPv1lBWY",
- "74JTLhSkgmdqOKfvtmOLWXyDtSiw1Vf9SYlW+DADD1ytr6jSrhJSK/VhkAvaTLEhCfFQIi8z8t/rNF69",
- "sVPDL7mqVFMkyspekEXrb8Jqw1xvYFXPJebB2LVwZ2sDbxt5CEvB+HXZqCCrsA60P2a4yOIweIQ6USxS",
- "wz4EokHEJkBOfKsAu6GKZQAQphpE16lC25QT1O1VWpSlOX86qXjdbwhNJ7b1of6padsnLud0j3w9E6BC",
- "wdtBfmkxayvCLakiDg5S0HMnsy+c73sfZnMYE8V46lKhD8U1sQJOTKvwCGw5pF2xLzz+rXPWORwd+o0S",
- "3SARbNmFoQXHBM3fhVh43XdfV3H3EVXMbUE7EK8aQdP+Pb2kTCdzIV2aeaw5HrFWd7JYUaZdpXv3KtbC",
- "qYhd1XLLUNw4QT1EFToOWxB88IrZ/b6vipnqOyF3Mo43emwtiFkYqbhmPgLZnLdaxvz9WZrvped76fle",
- "er6Xnu+l53vp+V56vpeeP7b0/Hm8XUmSeD7tbcOxQCYy+iIl/C8oVuhTBvc0Qn8t8uMjwYjo5hxv9ILR",
- "QPOpq0KM7grRmpvWnT6saJya6RgnZU6NNAQr7YO6yYwq+OqZd8qoa0fatPeG15gGT5+Qkx8Onf+BdXgQ",
- "807bh74im9LrHB45b8E6L7V3GwROsVIleg1S//pJnUeJFebnLAeiDK6+xdZHcAG5keSt8ZOYt0j/dXQK",
- "NH/pcGOZEij9jcjWHboxy58iJtoU0xj8GacyUla3Tyc9HGuBpbVdnejeA+rqzt1T+rvf369tWxWvZRIv",
- "f7uJXGKuQz3/gVE99i5GM7OnHp3EleT9rBybIESOzBru9LsJWujWZXMHB9saocIdvy81wMAjPnrw8NiO",
- "fd0qwrQijuJWiWm0AJ44tpDMRLZ25dN9he8Wk7Wll4d5rK1rDK5wvDsGD9Ujw2URoyvd0vRg1eeFrUbU",
- "1VpgtpK6qNZn4Zu28O5Gtnlz4rCD1yE+t/VO7Q7XZxqBE8ZDIclCiqp8ZHNH8TU+iIuS8rVXghlJsahy",
- "V2QQPervllHXlbF6bNY/xobfcW/9cy14rTinwvbvFi1YT8vuL2Sk4hnIeNmaFVfXLMvcFEXfVqrEF23q",
- "rc7Nuwvn97vsXEprxV9pS9hFyqh3iqbfh7H9KW6EtzZH2wCD7XtlNQxhsvVikAHLwpuhk9TEXw1tfvqO",
- "Xp62StvvxlNXiZM7by2ULgHlsVpIi2SAMdelFDRLqcJIHQ76Usjzjyyw6tVxROuAYGImr76jsrm/J1vl",
- "Shx3J3Gy7VPvJsRUO8pWP/q8wmXjfXroAqNa2LhXBPxRFAHf+MOnCMVCip3DaXV+eCZ3YFP0Uq94lEtN",
- "S5sJdMjfLTgQLmfonVruesO3DXhBHk5rgIC8JJSkOUPzhOBKyyrVZ5yiArRTZbJj3PNq3WFR6qVvEtfB",
- "R1TkbqgzboSqOanVolGRag4Rg8d3AF5iU9ViAUp3OPEc4Iy7VoxjfWecC4t2JtYP1FzXhqNPbMuCrsmc",
- "5qjB/w2kIDPziAizw6A6UWmW586aaKYhYn7GqSY5GKb/mhmBzgznNU61hdzSXY2FgWLEtgxYEldCfG+/",
- "YsyFW77XGqFyy35u6q9/lmJ9SayevYP8+Mhlbjs+wmQ8jR2xB/snMy4VjCdRIjM3vrPHd2mLPDQynieg",
- "R41F0u36GTfCtBYEGT3VNyOHrhGgdxbt6ehQTWsjOrYCv9b3sajhhUjMk5EuzO8LppfVDMvl+Wji6ULU",
- "kcXTjEIhOH7LprRkU1VCOr14vEU+uAW/IhF2dX9z/3FU+CEdmNNSbzxmqO7u/cC9fAeJcn/f2XG3Oijd",
- "56K9z0V7n630Phft/e7e56K9z9R6n6n1z5qpdbJRQnTZTbbmTtQ91SYlElI7c83Aw2atLIt9qyTTE0JO",
- "l4b/U3MHwAVImpOUKisYcesnV7DFUhNVpSlAdnDGkxYkqSjcxA+b/9pn7lm1v/8UyP6jbh+rtwg4b78v",
- "iqr4CU1N5GtyNjob9UaSUIgLcDnXsHlWoanY9to67P9Xj/uj7G1dQddWubKkZQnmWlPVfM5SZlGeC/MY",
- "WIiOdx8X+AWkAc7mySBM2/S2iE/0inS+OdRFn8eE7v79fo3iXIfdbAb36WM+ep3u/obdHQ/cOHaPId6z",
- "jE/BMj470/gDZbq7T2r3O1tQaEhtZa29hSRVl2uL6J28jGTVyYY34wiQVpLpNd5wtGS/nIP5/3vDxxXI",
- "C3/5VTIfHYyWWpcH0ynmlV8KpacjczU131Tno7kf6MKO4C6XUrILzEn5/ur/BQAA//9/eRxT8xABAA==",
+ "H4sIAAAAAAAC/+y9e3fbOJIo/lXw0+45Sbyi5Lx6Jj6nz/7cST+8k6RzYvfs7LZzuyGyJGFMAhwAtKXO",
+ "9Xe/BwWABElQkl9Jp9t/JRbxKBQKhUI9P45SUZSCA9dqdPBxVFJJC9Ag8S+apqLiOmGZ+SsDlUpWaib4",
+ "6MB/I0pLxhej8YiZX0uql6PxiNMCmjam/3gk4V8Vk5CNDrSsYDxS6RIKagbW69K0rkdaJQuRuCEO7RBH",
+ "r0aXGz7QLJOgVB/KH3m+JoyneZUB0ZJyRVPzSZELppdEL5kirjNhnAgORMyJXrYakzmDPFMTv8h/VSDX",
+ "wSrd5MNLumxATKTIoQ/nS1HMGAcPFdRA1RtCtCAZzLHRkmpiZjCw+oZaEAVUpksyF3ILqBaIEF7gVTE6",
+ "+HmkgGcgcbdSYOf437kE+A0STeUC9OjDOLa4uQaZaFZElnbksC9BVblWBNviGhfsHDgxvSbkTaU0mQGh",
+ "nLz/7iV5+vTpC7OQgmoNmSOywVU1s4drst1HB6OMavCf+7RG84WQlGdJ3f79dy9x/mO3wF1bUaUgflgO",
+ "zRdy9GpoAb5jhIQY17DAfWhRv+kRORTNzzOYCwk77oltfKubEs7/WXclpTpdloJxHdkXgl+J/RzlYUH3",
+ "TTysBqDVvjSYkmbQn/eTFx8+Ph4/3r/8t58Pk/91fz5/ernj8l/W427BQLRhWkkJPF0nCwkUT8uS8j4+",
+ "3jt6UEtR5RlZ0nPcfFogq3d9ielrWec5zStDJyyV4jBfCEWoI6MM5rTKNfETk4rnhk2Z0Ry1E6ZIKcU5",
+ "yyAbG+57sWTpkqRU2SGwHblgeW5osFKQDdFafHUbDtNliBID17XwgQv6/SKjWdcWTMAKuUGS5kJBosWW",
+ "68nfOJRnJLxQmrtKXe2yIidLIDi5+WAvW8QdNzSd52uicV8zQhWhxF9NY8LmZC0qcoGbk7Mz7O9WY7BW",
+ "EIM03JzWPWoO7xD6esiIIG8mRA6UI/L8ueujjM/ZopKgyMUS9NLdeRJUKbgCImb/hFSbbf+v4x/fEiHJ",
+ "G1CKLuAdTc8I8FRkw3vsJo3d4P9Uwmx4oRYlTc/i13XOChYB+Q1dsaIqCK+KGUizX/5+0IJI0JXkQwDZ",
+ "EbfQWUFX/UlPZMVT3Nxm2pagZkiJqTKn6wk5mpOCrr7eHztwFKF5TkrgGeMLold8UEgzc28HL5Gi4tkO",
+ "Mow2GxbcmqqElM0ZZKQeZQMkbppt8DB+NXgaySoAxw8yCE49yxZwOKwiNGOOrvlCSrqAgGQm5CfHufCr",
+ "FmfAawZHZmv8VEo4Z6JSdacBGHHqzeI1FxqSUsKcRWjs2KHDcA/bxrHXwgk4qeCaMg6Z4bwItNBgOdEg",
+ "TMGEmx8z/St6RhV89WzoAm++7rj7c9Hd9Y07vtNuY6PEHsnIvWi+ugMbF5ta/Xd4/IVzK7ZI7M+9jWSL",
+ "E3OVzFmO18w/zf55NFQKmUALEf7iUWzBqa4kHJzyPfMXScixpjyjMjO/FPanN1Wu2TFbmJ9y+9NrsWDp",
+ "MVsMILOGNfqawm6F/ceMF2fHehV9NLwW4qwqwwWlrVfpbE2OXg1tsh3zqoR5WD9lw1fFycq/NK7aQ6/q",
+ "jRwAchB3JTUNz2AtwUBL0zn+s5ojPdG5/M38U5Z5DKeGgN1Fi0oBpyw4LMucpdRg7737bL6a0w/2eUCb",
+ "FlO8SQ8+BrCVUpQgNbOD0rJMcpHSPFGaahzp3yXMRwejf5s2WpWp7a6mweSvTa9j7GQEUSvcJLQsrzDG",
+ "OyPQqA1cwnBm/IT8wfI7FIUYt7tnaIgZ3pvDOeV60jxEWoygPrk/u5kafFsZxuK787AaRDixDWegrFxr",
+ "Gz5QJEA9QbQSRCuKmYtczOofHh6WZYNB/H5YlhYfKBMCQ3ELVkxp9QiXT5sjFM5z9GpCvg/HRgFb8Hxt",
+ "bgUrY5hLYe6uK3d91Rojt4ZmxAeK4HYKOTFb49FghPfboDh8LCxFbsSdrbRiGv/g2oZkZn7fqfOXQWIh",
+ "boeJC59PDnP25YK/BE+Whx3K6ROOU+JMyGG37/XIxowSJ5hr0crG/bTjbsBjjcILSUsLoPtiL1HG8ell",
+ "G1lYb8hNd2R0UZiDMxzQGkJ17bO29TxEIUFS6MDwTS7Ss1s47zMzTv/Y4fBkCTQDSTKqaXCu3HmJX9bY",
+ "8QfshxwBZESi/xH/Q3NiPhvCN3zRDmte6gzpVwR69cw8cK3YbGcyDfDhLUhh37TEvEWvBOXLZvIej7Bo",
+ "2YVHfGuf0QR7+EWYpTdKssOZkNejlw4hcNKo/gg1owbHZdzZWWxalYnDT0R9YBt0BmqsLX0pMsRQd/gY",
+ "rlpYONb0DrCgzKi3gYX2QLeNBVGULIdbOK9Lqpb9RZj33NMn5PiHw+ePn/zy5PlX5kFSSrGQtCCztQZF",
+ "Hjoxmii9zuFRf2Uoz1a5jo/+1TOvMGqPuxVDCHA99i4n6gQMZ7AYI1Y9aqB7xZS5zorZreBxaK1ZM0tG",
+ "HCQZbF3lVZfXTLMOlyjXsrqNdwNIKWREi4GnQ4tU5Mk5SMVERCH9zrUgroWXJcru7xZackEVMXOjgq3i",
+ "GchJjLj0iiNoTEOhtt2FduiTFW9w4wakUtJ1D/12vZHVuXl32Zc28r2+RpESZKJXnGQwqxYtsXMuRUEo",
+ "ybAj8vy3IgPzZKjULTC6ZrAGGLMRIQh0JipNKOEiA3xfVCrOAgesU6gWR22+DrmqXtordgZGlk1ptVhq",
+ "UpUEddW9rW06JjS1m5LgdagGlHm1Fta2stNZy0cugWZGxgVOxMxpzJwuDxdJUdGuvQ3dMeCI1N+Cq5Qi",
+ "BaXM28RKnFtB8+3sLusNeELAEeB6FqIEmVN5TWC10DTfAii2iYFbS0xOzdiHerfpN21gd/JwG6k0zxNL",
+ "BUY8M6c7Bw1DKNwRJ+cgUd12p/vnJ7nu9lXlgDHcCRknrMBXDqdcKEgFz1R0sJwqnWw7tqZRSxIyKwhO",
+ "Suyk4sADL+3XVGmrdGU8Q6nYshucxz7BzRTDAA/eKGbkv/vLpD92avgkV5WqbxZVlaWQGrLYGjisNsz1",
+ "Flb1XGIejF1fX1qQSsG2kYewFIzvkGVXYhFEda2icFaJ/uLwIW/ugXUUlS0gGkRsAuTYtwqwGxoEBwAx",
+ "T6i6JxIOUx3Kqa2Q45HSoizN+dNJxet+Q2g6tq0P9U9N2z5xUd3w9UyAmV17mBzkFxaz1hS8pEZ8xZFJ",
+ "Qc/M3YTCqNUO92E2hzFRjKeQbKJ8cyyPTavwCGw5pAPvAOdsEszWORwd+o0S3SARbNmFoQUPPEreUalZ",
+ "ykqUJP4G61vXaHQniCo3SAaaMiNtBx+QgSPvrfsTq+7vjnk9QWsnIbQPfk8KjSwnZwovjDbwZ7BGLec7",
+ "a0c+CazPtyApRkY1p5tygoB665S5kMMmsKKpztfmmtNLWJMLkEBUNSuY1tYxoC1IalEm4QDRt/mGGZ12",
+ "xNpg/Q7soq45xqGC5fW3YjyyYstm+E46gksLHU5gKoXId9Ai95ARhWAnLTMphdl15vxQvLOCp6QWkE6I",
+ "QdVYzTwfqBaacQXkf0RFUspRAKs01DeCkMhm8fo1M5gLrJ7T6ZMbDEEOBVi5Er/s7XUXvrfn9pwpMocL",
+ "77xlGnbRsbeHr6R3QunW4bqFF685bkcR3o5KC3NROBmuy1MmW5/2buRddvJdZ3A/KZ4ppRzhmuXfmAF0",
+ "TuZql7WHNLKkarl97TjuTkqNYOjYuu2+SyHmt6QDixvv8XHi7PGmFZlX3AJVKfccQROVV2iI+bh20LCO",
+ "2dZ4XxXYG/+/pE6nNho3VnfbwFzIzecPEZGSZauYc0UGq9imuDOGz6kH5u2xVhC1aCFnFvOIfxXIs9wt",
+ "rcM7SAHmUKslK82QjS/IWkPLj/T/PPzPg58Pk/+lyW/7yYv/mH74+Ozy0V7vxyeXX3/9f9s/Pb38+tF/",
+ "/ntMtFaazeKqyx8MosWcOB6/4kfcGh/mQtoH2drJeWL+6eHWEiCDUi9jjpulBIW80TpglnrZbCpAR4lS",
+ "SnEOfEzYBCZdHpstQHltUg50jg6E+KgQuxg06/Ng6c0TR4D1cCE7MbIY/aB5DmkTT7N5deTrW5Be7EBE",
+ "tvHpX+vKfhXz0OvVHRS1VhqKvsLLdv1lQNx/74Xl3qESPGcckkJwWEcDPRiHN/gx1tvedwOdUfIY6tt9",
+ "TLTg74DVnmeXzbwpfnG3Awb/rjZK38Lmd8ft6DpDf1/U1UBeEkrSnKEmR3ClZZXqU07xrRiQa8QU5F/A",
+ "w9qDl75JXF0R0Sa4oU45VQaH9QsyqgOfQ+TO+g7AKxFUtViA0h2peQ5wyl0rxknFmca5CrNfid2wEiTa",
+ "Yya2ZUHXZE5zVHb8BlKQWaXbciTeekqzPHeKVzMNEfNTTrXhQUqTN4yfrHA47/3naYaDvhDyrMZC/Ipa",
+ "AAfFVBLn+9/br8j+3fKX7irAGBH72fObT833PewxpzkH+dEr98Y6eoWCdKNy7cH+yfRwBeNJlMiMYFQw",
+ "jr7XHdoiD81zwBPQo0Z563b9lOsVN4R0TnOWGeHpOuTQZXG9s2hPR4dqWhvRUav4tX6ImfwXIilpeoYW",
+ "39GC6WU1m6SimPq35XQh6nfmNKNQCI7fsikt2VSVkE7PH28RdG/Ar0iEXV2OR47rqFvXxLiBYwvqzlkr",
+ "NP3fWpAH3397QqZup9QD60Frhw5cHyPqAOfd07JYmcXbCDDrQnzKT/krmDPOzPeDU55RTaczqliqppUC",
+ "+Q3NKU9hshDkwDsMvaKanvIeix8M0gxctUhZzXKWkrPwKm6Opg286Y9wevqzIZDT0w8980f/4nRTRc+o",
+ "nSC5YHopKp24yIJEwgWVWQR0VXuW48g2LmjTrGPixrYU6SIX3PhxVk3LUnUdTfvLL8vcLD8gQ+XcKM2W",
+ "EaWF9EzQcEYLDe7vW+HeXJJe+LCUSoEivxa0/Jlx/YEkp9X+/lMgLc/LXx2vMTS5LqGlOLqWI2xXaYQL",
+ "twIVrLSkSUkXoKLL10BL3H28qAtUUeY5wW4tj0/vH4FDNQvw+BjeAAvHlb3XcHHHtpcPEY0vAT/hFmIb",
+ "w50azf919yvwAb32dnX8SHu7VOllYs52dFXKkLjfmTpybGF4sjfHKLbg5hC4ILsZkHQJ6RlkGO8DRanX",
+ "41Z3b/FzN5xnHUzZuDjrpIbBG6hjmwGpyow6GYDyddeLXoHWPnTgPZzB+kQ0sR9XcZtvO3OroYOKlBpc",
+ "RoZYw2PrxuhuvrMeowNrWXqfaPT/82RxUNOF7zN8kO0NeQuHOEYULWfjIURQGUGEJf4BFFxjoWa8G5F+",
+ "bHlGvJnZmy+i5vG8n7gmjdTmLMDhatCH2n4vAINsxYUiM6ogI8LFh1qH5YCLVYouYED3FKo5d3QLbqlG",
+ "cZBt9170phPz7oXWu2+iINvGiVlzlFLAfDGkgnrCjt3fz2Q16biCCcG0Dw5hsxzFpNrlwDIdKlvqZhvH",
+ "PgRanIBB8kbg8GC0MRJKNkuqfOgqRvj6s7yTDHCHDvib4q2OApN1EMZbR1N5nts9p3HFrY2rKlQTXxVq",
+ "bXeIlRqPnBdVbDsERwEogxwWduG2sSeUJhig2SADx4/zec44kCRm/aZKiZTZ2OPmmnFzgJGP9wixuiey",
+ "8wgxMg7ARgsRDkzeivBs8sVVgOQumIH6sdG2FPwNcVdA699kRB5RGhbO+IBnmucA1LlM1PdXx3EHhyGM",
+ "j4lhc+c0N2zOKVGbQXrRPyi2dmJ9nI3y0ZA4u0H1Zy+WK63JXkXXWU0oM3mg4wLdBog3ixKxLVCIL/f0",
+ "rXE1dJfuMvXA9T2Eq4dB3NC1AOhoIprUOu7lt/WF1r6b+zdZw9LHTSCsd82M0f4Q/UR3aQB/fUVwHenz",
+ "rntdRx/pbdtlO8gpkJ9irNickb5qtK+AVZADSsRJS4JIzmIKcyPYA7LbY98teLljKBXl60eBQVzCgikN",
+ "jerK3EpeF/upzV0UQ7eFmA+vTpdybtb3XoiaR9sQQWu+C5f5yVdwLjQkcyaVTlDvF12CafSdwhfld6Zp",
+ "XFBom9xtFhOWxXkDTnsG6yRjeRWnVzfv316Zad/WShhVzc5gjeIg0HRJZph1J+qIs2Fq66u1ccGv7YJf",
+ "01tb726nwTQ1E0tDLu05vpBz0eG8m9hBhABjxNHftUGUbmCQePG/glzHoo0CocEezsw0nGxSPfYOU+bH",
+ "3vRQCqAYvqPsSNG1BK/ljatg6H1gnntMB0lr+nEDA2eAliXLVh1FoB118LlIr/Ta90HBHSzg7rrBtmAg",
+ "UPrFXFMlqHb8dyPd2vRDPFzbZCfMnLSjtEOGEE7FlE+e10eUIW3M8LQNVydA87/B+u+mLS5ndDke3Uxv",
+ "GMO1G3ELrt/V2xvFMxrErB6pZQa4IsppWUpxTvPEaVeHSFOKc0ea2NwrYz8xq4vr8E6+PXz9zoF/OR6l",
+ "OVCZ1KLC4KqwXfnFrMqGmg8cEJ+cyzx4vMxuRclg8+sQ4FAje7EElwgpkEZ7iRsabXtwFJ2Gdh63y2/V",
+ "tzrDgF3iBgMBlLV9oNFdWfNA2yRAzynLvdLIQztgQ8fF7Zb9I8oVwgFubFoILETJrbKb3umOn46Gurbw",
+ "pHCuDamaCpuNTBHBuy5ZRoREXRSSakEx7YJVCfSZE6+KxBy/ROUsjSsY+UwZ4uDWcGQaE2w8IIyaESs2",
+ "YIfkFQvGMs3UDg/dDpDBHFFk+hQeQ7ibCZdGtuLsXxUQlgHX5pPEU9k5qJjnwqma+9epkR36c7mBrXq6",
+ "Gf4mMkaYcqR74yEQmwWM0EzVA/dV/WT2C63VMeaHQB9/BWt3OGPvStxgqXb04ajZugwt2+amMOtrn/8Z",
+ "wrAZwrannPWPV5f7ZGCOaApZppK5FL9B/J2Hz+OI37pPssLQa/I34JNI+E+XxdTanSYTbjP74HYPSTeh",
+ "FqptoR+getz5wCaFCS28epZyu9U2o2PLLyROMKEv19SO3xCMg7nn/5bTixmNZfswQoaB6bCxfrYUyVoQ",
+ "39nj3um8mct7MyGBIbVuy2xEVwmyCSnpRw9fU2Cw0+4sKjSSAVJtKBOMrfErVyIyTMUvKLeJQU0/e5Rc",
+ "bwVW+WV6XQiJ8ZgqrvPOIGUFzeOSQ4bYb8evZmzBbFrMSkGQd9ENZPMJWypyuSutfblBzdGc7I+DzK5u",
+ "NzJ2zhSb5YAtHtsWM6qQk9eKqLqLWR5wvVTY/MkOzZcVzyRkeqksYpUgtVCHz5vacjMDfQHAyT62e/yC",
+ "PESblWLn8Mhg0d3Po4PHL1Dpav/Yj10ALv/tJm6SITv5b8dO4nSMRjs7hmHcbtRJNLrQJi0fZlwbTpPt",
+ "ustZwpaO120/SwXldAFxN4liC0y2L+4mKtI6eOGZzbirtBRrwnR8ftDU8KcBn0/D/iwYJBVFwXThLBtK",
+ "FIaemqSKdlI/nE3f6zL/eLj8RzQQlt4+0nlEflqlqb3fYqtGM+5bWkAbrWNCbRBuzhrTvU/WRY58KD+m",
+ "QqozIFncmLnM0lHMQUv+nJSScY0Pi0rPk7+SdEklTQ37mwyBm8y+ehZJ/9TO+MKvBvgnx7sEBfI8jno5",
+ "QPZehnB9yUMueFIYjpI9anysg1M5aMmMe4t5jt51Ftw89K5CmRklGSS3qkVuNODUNyI8vmHAG5JivZ4r",
+ "0eOVV/bJKbOScfKgldmhn96/dlJGIWQssUtz3J3EIUFLBufouBbfJDPmDfdC5jvtwk2g/7yWBy9yBmKZ",
+ "P8uxh8A3FcuzvzcxI50MepLydBnV+89Mx1+aDMf1ku05juYRWVLOIY8OZ+/MX/zdGrn9/yl2nadgfMe2",
+ "3cx4drmdxTWAt8H0QPkJDXqZzs0EIVbbTvS112W+EBnBeZqkFQ2V9ZP9BSm0/lWB0rGgPfxgPT9Qv2Pe",
+ "BTaDEwGeoVQ9Id/bCiVLIK2YepRmWVHlNj4bsgVIp3isylzQbEzMOCffHr4mdlbbx6brtBmkFijMtVfR",
+ "edcHGW528yH0mTfj/s27j7PZ4dKsWmlMcaE0LcpY6IppceIbYHxMqOtEMS/EzoS8shK28vKbncTQw5zJ",
+ "wkim9WiWxyNNmP9oTdMliq4tbjJM8runPvNUqYKk7nWO1jpJDZ47A7fLfmaTn42JMO+LC6ZsYQo4h3a0",
+ "TB065p5OPnqmvTxZcW4pJcqjN4U2XgftHjhr0Pbq0ChkHcRfUXBRopIpXDUT3DH2imZ96KaV62Vzt1HF",
+ "dXpRX3AopVxwlmLOhaAURg2yK3Kxi61gh/QUXWWUP+LuhEYOVzSZXe1O5LA4mN7OM0KHuL6yMvhqNtVS",
+ "h/1TYzWFJdVkAVo5zgbZ2OdkdPoSxhW4pENY7yTgk0K27C/IIaMmvaRW/V6RjNB3fkAA/s58e+ueR+hU",
+ "esY4CkIObc5/1Wo0MAe/NtIT02QhQLn1tEPz1c+mzwTD0zNYfZj4nP04hjVfmGVbW11/qENvuXOWMtP2",
+ "pWlLrNdh/XPLTdFOeliWbtKoq1G9w7GUi4MIjlhgEq8CD5Bbjx+OtoHcNprc8T41hAbnaLCDEu/hHmHU",
+ "2Ss7mXbPaV5ZisIWxLq6ROMrGY+A8ZpxaCpKRC6INHol4MbgeR3op1JJtRUBd+JpJ0BztNLFGJrSTkV7",
+ "06E6G4wowTX6OYa3sUm8OcA46gaN4Eb5ui5kYag7ECZeYgUdh8h+Gk2UqpwQlaHbcSexZoxxGMbts+62",
+ "L4D+MejLRLa7ltSenKvcREORZKmIyZvfriCtrBFaKO+FTFIMzQ7ui6hGs0kRG9mGME2tRy26iM/W+G8s",
+ "x9IwSpyV+Mp+St4kjB2vLLC2R+qJm4aYEsUWye6YQGZ+c3Q0U1+Pwpr+t0piuVi0AfnEuVA2sZdwj2KM",
+ "5VvDscO45l7iMMvT67Bj9AoSPlE8vtfqgLk2O8A7pJdJDLXRdc7vzfqA4ezdY7x1BnwDgwww1F5s1rwx",
+ "5CGYDjq0Uu3iSjQlTfqKPk+wKbdjI1j3Apvq21YJjKp2hlwKrEeB+dzrvZtI1hNwceyNCPW+Kn2A/uYd",
+ "4UhJmbPdNcyij1nnMtt3Yt7Fma7Z4O4inCMqDhJbSS9B4GYK6TkiB870No/bZPeA9sPaMIrmGszCvQDu",
+ "0nC3XQx3dnSazyHV7HyL4/d/G2G5cSoee3HalmcI/MBZ7Tjji0leUcpvANrkl70RniBrxo3BGXL7PIP1",
+ "A0Va1BBNLDf2hHqdeEnEAGYUSQyJCBUzPNj3v9MFM1VTBmLBG/psd2iSOQ1m9A3CGK45lydJQsPQhg1T",
+ "novYA2KnuUzXKwX8oA/IkG94P6fm8O31ClOYqjobe10tMvDjMO/Ebr63CxeviW76tcrLR26C8r/5mBw7",
+ "i61C2uQcRgXjBZWZbxGVmL0wngx4W3X9l62bOIsDPa9nZo1bRt+FN5LnAJ1v0lwoxhfJkAdT2xMiLGSE",
+ "9h7UTWCyUoRrDtLlGte+yGuihXfj2ATHJlS4ojvXQYIazNpngRuM+H3fhDRjcidqS/w6W1a4QCKhoAY6",
+ "GQQeD8+5Cdkv7Xfvs+qT+3RSaUXG9fSabI0c9g45TPWQGFL9nLjbcrsv7HWeKoxzW8pBxaKQuUFlqMQq",
+ "pciq1F7Q4cEA/6TbOcZ/AyuJSvlpf5U9gS3HjBevg8iCM1hPrdCULilvUo+0j7VNRmjXEETydXb7Vl9x",
+ "cYE1X9gFLG4Fzs/5EhqPSiHyZEBrddQPpu6egTOWnkFGzN3hTdkDWX3JQ1SW1GaJi+XaBw+XJXDIHk0I",
+ "MW+potRrb6FopxHrTM4f6E3zr3DWrLL5DdwjbXLK414Ytmj2DfmbH2YzV1NgmN8Np7KDbIlWXg0Eckt6",
+ "EclxvWsFsojNoJt3uCEqC0VMSrlm6NpO57v/UIuQfhh0sOX9c9Z61dlEOR07gZBwy6+7QEF6xdddP5xi",
+ "1+XhOpCrVQr669x5A1q4HcD9LohvVBN95A5rFPRsF41CPKmH6Y4qDYsQzIhDEFTy6+NfiYS5q+C/t4cT",
+ "7O2NXdNfn7Q/m9fX3l70ZH4yZUar0JmbN0Yxfx+yK1vb6YALQ2c/KpZn2wij5ZDSZKtEl4tfnOvOZ8mX",
+ "+Yt9IvePqksdeBU1ancTEDGRtbYmD6YKXE128DJx3SI+JXjZpJVkeo0RRf5FxX6JRmp/XythXPXM2gfd",
+ "uUDbOvXOI6pR2TSlxb8Xtv5dYe56VGJrTOj/7YoWZQ7uoHz9YPYXePrXZ9n+08d/mf11//l+Cs+ev9jf",
+ "py+e0ccvnj6GJ399/mwfHs+/ejF7kj159mT27Mmzr56/SJ8+ezx79tWLvzzwdb0toE3N7H9gUtnk8N1R",
+ "cmKAbXBCS1bX8TBk7BNU0hRPonmT5KMD/9P/70/YJBVFM7z/deTc40ZLrUt1MJ1eXFxMwi7TBb7REi2q",
+ "dDn18/TrJ7w7ql13bMgF7qj1yjCkgJvqSOEQv73/9viEHL47mjQEMzoY7U/2J48xD3QJnJZsdDB6ij/h",
+ "6Vnivk8dsY0OPl6OR9Ml0ByTg5s/CtCSpf6TuqCLBciJy9Rpfjp/MvWW/+lH9z69NKMuYrFW1gkprA7c",
+ "S2DpdF1oT7JORq2EUMrlJxrXacKc+Mgz9A2xTz7D2mpkHWVNSpCjoOSsC4yykeIHP0cSJ8/ZopKdykO1",
+ "Nt/lEGSK/Nfxj2+JkOSN1bm/o+lZ6H8Rq+HuWFmshLvz0ijUomybNBtNf6xGSSwTKM5s9jmg1FpV1HAi",
+ "LSsIIWn4quGV+8mLDx+f//VytAMgqLd0RWx/pXn+qy0NBStU/rTrSavxUPHycaN66JSMHqNNtv4aZqis",
+ "27Q9gX7lgsOvQ9vgAIvuA81z01BwiO3Bh06t/Cf7+3dQdnrcGsWTxGetX/3sFhfatqDdeLnd4XqL/oZm",
+ "mDUQlLZLefzFLuWIo+nAcHxib7TL8ej5F7w3R9zwHJoTbBnEP/VvkZ/4GRcX3Lc00kxVFFSuUVYJUpuG",
+ "Uunl4G01DdOwTT+2FMvZje6yXgbKo1dbrrcHaogp9hMDdLK8me91HjNUPYZ1+dWjCfk+7H294vsNbGEJ",
+ "/oHLNnit39+7d3rvHra1Dq3I8hgwLRLfCFPP8nTTi6/v99RJ0n2tJNhBPrlrZOW500yh3bLaQ/ULd2Cw",
+ "97gbqv04IN4E8NaSTjsP4N3zXft+C66J1n1wh1z5CxfW3tDc0Emw3I4Ptk23cC/E/WmEuNoZwdbawAxD",
+ "m8Q6TCM6/eizY9yCKOeyg+wgxIUv3aBvkL3hYYdTPJrYVBdhm+uxA+dYsFU8w5wl94LZXQtm/WQ/MTCa",
+ "FC6fTxhDGJZNNqCrFLhoJe+9UtaiL1T6+hMja1DcMpBuF7SuwRt7QpTjxHfGM/+QwpND2r3Y9KcWm6wv",
+ "3wbBqZWJyzl+DstOEBTeDmqZtBzPZmtPh2OisL6/+amUTEim12PCOMnAnD20GAqJQcdNCW/nZAQc//vm",
+ "8B/oevrm8B/ka7I/rkUwjMmKTG+de9oy0PegIyXmv1kf1uLARlnodyNgnNRIGigBr4VPpoVIK+jq6yGU",
+ "raxdMSaeFXQ12iiJjL8cafGmQlMnmrJPRa6Ipi0h7wq/tF2qFIEVTXW+JhTvn7X1/cXi4j4TVqccerc2",
+ "fyzeaMOMvq5ELGrsql5dkZD1Xl38gXLBQ1TqMs5hEZftgkkPGVEIrifl3e/uF7u7fbGUlMKcaYYpEZr7",
+ "xN9VLSCb6gIO3AGH1Qn5H1Ghs4stngWxdJ44Azr3+jmdABrk482xdFmNnb297sL39tyeM0XmcIEclHJs",
+ "2EXH3t4fQGRd1VkUKeGCJxxrO50DCTzk7uXW37Xc+nz/6Re7mmOQ5ywFcgJFKSSVLF+Tn3idduZmYnnN",
+ "cyoeJALayH96nvKNFB2I7zeyXXdt00w3kmErcCpQIdQl+Nxbedzk8DdveUwX4gPW1dibTtDxz1pV7H6M",
+ "e4aVSUxIDyw436yPXu0il38hhtCd01ZF7rX43tz1DRD1p3n/afxpdmOmz/affToIwl14KzT5DtVld8zS",
+ "71R3ECergNlc2aLSWExC1uICETcyFXNCxy7VKOa+XJM6UMjwE8sIbbGBPtcwM+zKL37H+vkdyuxG6LKL",
+ "3nu+cM8XbsQXugTVcAQMt1fTj2gqCNlB70h+Y1r+gUyMgb1FisIbXASZg06XNg1BNywmwlZ89rthnrIp",
+ "R/wt2/8Q6EiOXFyLC/3A3OU7BgRixx9sJMbleJSCjBDfjz4fjvnM5hjWWWc29KUQ0JzDfHbgOjGwS5/O",
+ "lPc5d1lviNnFK0H5spm8H6aDaLkNm+E9gq+G4B5T+9blabbHyy3ij+CV7pP4JuQtikN4wH1ivz+i2uMu",
+ "b+S7XtBbwcHapY3Eamnx3gRZiwtYTQWR4rMgWMOjK9AaFx3aRsePesWyy2mdpmdIqHiHDbYIFc1NzZoa",
+ "lm31Ci1LoFJd+5Lebg476cx49Cr002hlFarzCUVAMXi5oiXxP3YxI/5xrXXtG3hJ1TIe340btC6xCINp",
+ "ReYVt1tV14tCpx3vRiLm41pdbfiBmB+c8j2iqgJ74/+X9PnjJ788ef7VaFyv2DUw2Gg+f4jEH7NsFU29",
+ "ASufQSikJ6czxEP1QJGSrgcz9gwkv3oD8iz3hbrbxhFSgLmI1JKVn6PwOJvFa+/8YBAt5qTOiH3Ev6n5",
+ "zjlINscCUvV5+sQZVSRABqVebkxlYOtflXrZbCq4Co1MuZQxpRTnwMeETWDSNSJliya5bA50XqccEWIX",
+ "F6/6PFh688QRYD1cyC4i2rsY/WAYoUvN9qmVEY0rlL0EPPJkhx9/Vk2F/iyaireCJyjHANdepm6h5fNp",
+ "LTBLzDhQDNY1BrjQqBAUEsWvkG2pyU6CCwwaaVo80LocDpKxE2NSqtNlVU4/4n8wYv+yiY23BTWmVoG5",
+ "SZI5ti1u1TXFjklkm9v4JBFOqSrm5A1LpTjEbELuGlFrpaHol1u0XX/ZVKoheuUInjMOSSF4LL/Ej/j1",
+ "DX6M5itCc/dAZ3Q8GOrbLZLTgr8DVnueXVjdTfE7+X0oR28k6HdWK6Gs3fvQDwLpvzktrYSxzTFp/Tz9",
+ "2PrT2RlcS7WsdCYugr42H8TGs2Vb3OrZeisysOO2U7DE/C65yMClregfqZprxEVSj9+mXUc4SGm1WGpb",
+ "LjBai7TumNDUHgWbc1VtS1JpW/lkbOdAaC6BZmsyA+BEzMyi28l+CVV1/VckDssb47kWG7hKKVJQCrIk",
+ "rBO0CbQ6GQhKPnoDnhBwBLiehShB5lReE1jLJDYD2i2QV4Nba9gcH+hDvdv0mzawO3m4jVQC8QwRnzSi",
+ "KHNwj5oICnfECcra7I73z09y3e2rSixFE8kWar+esALzXXDKhYJU8EwN5/Tddmwxi2+wFgW2+qo/KdEK",
+ "H2bggav1NVXaVUJqpT4MckGbKTYkIR5K5GVG/nudxqs3dmr4JVeVaopEWdkLsmj9TVhtmOstrOq5xDwY",
+ "uxbubG3gbSMPYSkYvy4bFWQV1oH2xwwXWRwGj1AnikVq2IdANIjYBMixbxVgN1SxDADCVIPoOlVom3KC",
+ "ur1Ki7I0508nFa/7DaHp2LY+1D81bfvE5Zzuka9nAlQoeDvILyxmbUW4JVXEwUEKeuZk9oXzfe/DbA5j",
+ "ohhPXSr0obgmVsCxaRUegS2HtCv2hce/dc46h6NDv1GiGySCLbswtOCYoPm7EAuv+u7rKu7uUMXcFrQD",
+ "8aoRNO3f0wvKdDIX0qWZx5rjEWt1J4sVZdpVunevYi2cithVLbcMxY0T1ENUoeOwBcEHr5jd7/uqmKm+",
+ "E3In43ijx9aCmIWRimvmI5DNeatlzN+fpfleer6Xnu+l53vp+V56vpee76Xne+n5rqXnz+PtSpLE82lv",
+ "G44FMpHRFynhf0GxQp8yuKcR+muRHx8JRkQ353ijF4wGmk9dFWJ0V4jW3LTu9GFF49RMxzgpc2qkIVhp",
+ "H9RNZlTBV8+8U0ZdO9KmvTe8xjR4+oQc/3Do/A+sw4OYd9o+9BXZlF7n8Mh5C9Z5qb3bIHCKlSrRa5D6",
+ "10/qPEqsMD9nORBlcPUttn4F55AbSd4aP4l5i/RfRydA85cON5YpgdLfiGzdoRuz/Cliok0xjcGfcSoj",
+ "ZXX7dNLDsRZYWtvVie49oC5v3T2lv/v9/dq2VfFaJvHyt5vIJeY61PMfGNVj72I0M3vq0UlcSd7PyrEJ",
+ "QuTIrOFOv5ughW5dNndwsK0RKtzx+1IDDDziowcPj+3Y160iTCviKG6VmEYL4IljC8lMZGtXPt1X+G4x",
+ "2aac61ZGi55UG45FzWrr8t+DXPnzMM2ggvBGxhmSxypxXHKAhVqfqd0YaI0tHNHx0GAD7pqPDvG6EATi",
+ "OFHsOdlNhHVF9hZUDr5ncfcsLjiNnWubceeu12Uik+uxOCy2PczdbOl2UCQ8pA/VI8OyEKMr3VJmY2H7",
+ "hS241lXMYkKmum7g5+Fydrm7MrirEYcdvI5ivKkDfne4PuMI/MweCkkWUlTlI5sej69R51eUlK+9nt88",
+ "hosqd3VUMWjodnloXfyvJ0l6fdOwquqd10gFChnnN93+3aIFSwba/YWMVDwDGa/MteLqipXnT1a84cAb",
+ "qzH5unS91bl5d+H+fped13xt2yhtlU57oFqHybnu2pM7uY/U/XPcCO9sGsoBBtt3PG0YwvaLQQYsC2+G",
+ "Tt4mfzW0+el7ehFmgbotoXH3d7cRudcaaoE7kuTKiJFS0CylCoMROegLIc/uWJbUq6OIYhXBxGSF/VgM",
+ "80SZbBUqcdydRMp22JCbELOJKVvg7fMKl42D/aGL/Wxh417X+UfRdX7jD58iFGvFdg6nNWvgmdyBTdEL",
+ "veJRLjUtbbLjIZfe4EC4tMi36pzQG77toxCkGrY2VshLQkmaM7TACq60rFJ9yinaeDqFdDv+C95yNSxK",
+ "vfRN4mbGiBXQDXXKjVA1J7XlJypSzSFi0/0OwEtsqlosQOkOJ54DnHLXinEsYY9zYV3ixLq6m+vacPSJ",
+ "bVnQNZnTHI2Uv4EUZGYeEWECLLSYKM3y3DlMmGmImJ9yqkkOhum/YUagM8N5pXrtBGTprsbCQL11W+kw",
+ "ietZv7dfMazMLd8rxlF/bz/7AJDx56lHmrBsEPKjVy455dErzDfWuEr0YP9k9vOC8SRKZObGdy5HXdoi",
+ "D42M5wnoUeN04Xb9lBthWguCjJ7q65FD187ZO4v2dHSoprURHXOoX+uHWGKEhUjMk5EuzO8LppfVDCuC",
+ "+oQJ04WokydMMwqF4Pgtm9KSTVUJ6fT88Rb54Ab8ikTY1f3N/cexUoZ0YE5LvfGYhL+79wP38i3kAv99",
+ "JwDf6oN5n277Pt32fULm+3Tb97t7n277Phn1fTLqP2sy6slGCdElcNqaHlb3VJuUSEjtzDUDD5u1Esn2",
+ "rZJMTwg5WRr+T80dAOcgaU5SqqxgxK0rcMEWS01UlaYA2cEpT1qQpKJwEz9s/mufuafV/v5TIPuPun2s",
+ "3iLgvP2+KKriJzQ1ka/J6eh01BtJQiHOwaWVxOZZhd4wttfWYf+/etwfZW/rCrq2ypUlLUsw15qq5nOW",
+ "MovyXJjHwEJ0HJi5wC8gDXA2FRBh2mbwRnyi47fziaEuwUZM6O7f71eoP3jYTdhynyHrVgTsTXyqv2G3",
+ "xwM3jt1jiPcs41OwjM/ONP5AyTzv83b+zhYUGlJbiblvIEnVFSkjeicvI1l1suHNOAKklWR6jTccLdkv",
+ "Z2D+/8HwcQXy3F9+lcxHB6Ol1uXBdIqlM5ZC6enIXE3NN9X5aO4HurAjuMullOwc0+5+uPx/AQAA//9x",
+ "rSpBuRYBAA==",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/types.go b/daemon/algod/api/server/v2/generated/types.go
index e23c68083..50bb0a3ed 100644
--- a/daemon/algod/api/server/v2/generated/types.go
+++ b/daemon/algod/api/server/v2/generated/types.go
@@ -612,6 +612,13 @@ type CompileResponse struct {
Result string `json:"result"`
}
+// DisassembleResponse defines model for DisassembleResponse.
+type DisassembleResponse struct {
+
+ // disassembled Teal code
+ Result string `json:"result"`
+}
+
// DryrunResponse defines model for DryrunResponse.
type DryrunResponse struct {
Error string `json:"error"`
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index 88660854e..4098bf0aa 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -766,12 +766,15 @@ func (v2 *Handlers) TealDryrun(ctx echo.Context) error {
req := ctx.Request()
buf := new(bytes.Buffer)
req.Body = http.MaxBytesReader(nil, req.Body, maxTealDryrunBytes)
- buf.ReadFrom(req.Body)
+ _, err := buf.ReadFrom(ctx.Request().Body)
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
data := buf.Bytes()
var dr DryrunRequest
var gdr generated.DryrunRequest
- err := decode(protocol.JSONStrictHandle, data, &gdr)
+ err = decode(protocol.JSONStrictHandle, data, &gdr)
if err == nil {
dr, err = DryrunRequestFromGenerated(&gdr)
if err != nil {
@@ -1136,7 +1139,10 @@ func (v2 *Handlers) TealCompile(ctx echo.Context) error {
}
buf := new(bytes.Buffer)
ctx.Request().Body = http.MaxBytesReader(nil, ctx.Request().Body, maxTealSourceBytes)
- buf.ReadFrom(ctx.Request().Body)
+ _, err := buf.ReadFrom(ctx.Request().Body)
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
source := buf.String()
ops, err := logic.AssembleString(source)
if err != nil {
@@ -1152,3 +1158,27 @@ func (v2 *Handlers) TealCompile(ctx echo.Context) error {
}
return ctx.JSON(http.StatusOK, response)
}
+
+// TealDisassemble disassembles the program bytecode in base64 into TEAL code.
+// (POST /v2/teal/disassemble)
+func (v2 *Handlers) TealDisassemble(ctx echo.Context) error {
+ // return early if teal compile is not allowed in node config
+ if !v2.Node.Config().EnableDeveloperAPI {
+ return ctx.String(http.StatusNotFound, "/teal/disassemble was not enabled in the configuration file by setting the EnableDeveloperAPI to true")
+ }
+ buf := new(bytes.Buffer)
+ ctx.Request().Body = http.MaxBytesReader(nil, ctx.Request().Body, maxTealSourceBytes)
+ _, err := buf.ReadFrom(ctx.Request().Body)
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+ sourceProgram := buf.Bytes()
+ program, err := logic.Disassemble(sourceProgram)
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+ response := generated.DisassembleResponse{
+ Result: program,
+ }
+ return ctx.JSON(http.StatusOK, response)
+}
diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go
index a7f00b70c..e222a5f77 100644
--- a/daemon/algod/api/server/v2/test/handlers_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_test.go
@@ -551,6 +551,68 @@ func TestTealCompile(t *testing.T) {
tealCompileTest(t, badProgramBytes, 400, true)
}
+func tealDisassembleTest(t *testing.T, program []byte, expectedCode int,
+ expectedString string, enableDeveloperAPI bool,
+) (response generatedV2.DisassembleResponse) {
+ numAccounts := 1
+ numTransactions := 1
+ offlineAccounts := true
+ mockLedger, _, _, _, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts)
+ defer releasefunc()
+ dummyShutdownChan := make(chan struct{})
+ mockNode := makeMockNode(mockLedger, t.Name(), nil)
+ mockNode.config.EnableDeveloperAPI = enableDeveloperAPI
+ handler := v2.Handlers{
+ Node: mockNode,
+ Log: logging.Base(),
+ Shutdown: dummyShutdownChan,
+ }
+ e := echo.New()
+ req := httptest.NewRequest(http.MethodPost, "/", bytes.NewReader(program))
+ rec := httptest.NewRecorder()
+ c := e.NewContext(req, rec)
+ err := handler.TealDisassemble(c)
+ require.NoError(t, err)
+ require.Equal(t, expectedCode, rec.Code)
+
+ if rec.Code == 200 {
+ data := rec.Body.Bytes()
+ err = protocol.DecodeJSON(data, &response)
+ require.NoError(t, err, string(data))
+ require.Equal(t, expectedString, response.Result)
+ } else if rec.Code == 400 {
+ var response generatedV2.ErrorResponse
+ data := rec.Body.Bytes()
+ err = protocol.DecodeJSON(data, &response)
+ require.NoError(t, err, string(data))
+ require.Contains(t, response.Message, expectedString)
+ }
+ return
+}
+
+func TestTealDisassemble(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // nil program works, but results in invalid version text.
+ testProgram := []byte{}
+ tealDisassembleTest(t, testProgram, 200, "// invalid version\n", true)
+
+ // Test a valid program.
+ for ver := 1; ver < logic.AssemblerMaxVersion; ver++ {
+ goodProgram := `int 1`
+ ops, _ := logic.AssembleStringWithVersion(goodProgram, uint64(ver))
+ disassembledProgram, _ := logic.Disassemble(ops.Program)
+ tealDisassembleTest(t, ops.Program, 200, disassembledProgram, true)
+ }
+ // Test a nil program without the developer API flag.
+ tealDisassembleTest(t, testProgram, 404, "", false)
+
+ // Test bad program
+ badProgram := []byte{1, 99}
+ tealDisassembleTest(t, badProgram, 400, "invalid opcode", true)
+}
+
func tealDryrunTest(
t *testing.T, obj *generatedV2.DryrunRequest, format string,
expCode int, expResult string, enableDeveloperAPI bool,
diff --git a/daemon/algod/deadlockLogger.go b/daemon/algod/deadlockLogger.go
index 8ae3cfe02..62fc1af4c 100644
--- a/daemon/algod/deadlockLogger.go
+++ b/daemon/algod/deadlockLogger.go
@@ -56,7 +56,8 @@ func captureCallstack() []byte {
bufferSize := 256 * 1024
for {
buf = make([]byte, bufferSize)
- if runtime.Stack(buf, true) < bufferSize {
+ if writtenBytes := runtime.Stack(buf, true); writtenBytes < bufferSize {
+ buf = buf[:writtenBytes]
break
}
bufferSize *= 2
diff --git a/daemon/algod/server.go b/daemon/algod/server.go
index a3875f05e..d3b9ff2f5 100644
--- a/daemon/algod/server.go
+++ b/daemon/algod/server.go
@@ -20,6 +20,7 @@ import (
"context"
"errors"
"fmt"
+ "io"
"io/ioutil"
"net"
"net/http"
@@ -82,7 +83,13 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes
maxLogAge = 0
}
}
- logWriter := logging.MakeCyclicFileWriter(liveLog, archive, cfg.LogSizeLimit, maxLogAge)
+
+ var logWriter io.Writer
+ if cfg.LogSizeLimit > 0 {
+ logWriter = logging.MakeCyclicFileWriter(liveLog, archive, cfg.LogSizeLimit, maxLogAge)
+ } else {
+ logWriter = os.Stdout
+ }
s.log.SetOutput(logWriter)
s.log.SetJSONFormatter()
s.log.SetLevel(logging.Level(cfg.BaseLoggerDebugLevel))
diff --git a/daemon/kmd/wallet/driver/ledger.go b/daemon/kmd/wallet/driver/ledger.go
index 36029d370..1d64a6ab1 100644
--- a/daemon/kmd/wallet/driver/ledger.go
+++ b/daemon/kmd/wallet/driver/ledger.go
@@ -145,6 +145,7 @@ func (lwd *LedgerWalletDriver) scanWalletsLocked() error {
newDevs = append(newDevs, LedgerUSB{
hiddev: dev,
+ info: info,
})
}
diff --git a/daemon/kmd/wallet/driver/ledger_hid.go b/daemon/kmd/wallet/driver/ledger_hid.go
index 32e3d8321..28ba4ce5c 100644
--- a/daemon/kmd/wallet/driver/ledger_hid.go
+++ b/daemon/kmd/wallet/driver/ledger_hid.go
@@ -21,7 +21,7 @@ import (
"fmt"
"os"
- "github.com/karalabe/hid"
+ "github.com/karalabe/usb"
)
const ledgerVendorID = 0x2c97
@@ -31,7 +31,8 @@ const ledgerUsagePage = 0xffa0
// the protocol used for sending messages to the application running on the
// Ledger hardware wallet.
type LedgerUSB struct {
- hiddev *hid.Device
+ hiddev usb.Device
+ info usb.DeviceInfo
}
// LedgerUSBError is a wrapper around the two-byte error code that the Ledger
@@ -196,21 +197,25 @@ func (l *LedgerUSB) Exchange(msg []byte) ([]byte, error) {
}
// USBInfo returns information about the underlying USB device.
-func (l *LedgerUSB) USBInfo() hid.DeviceInfo {
- return l.hiddev.DeviceInfo
+func (l *LedgerUSB) USBInfo() usb.DeviceInfo {
+ return l.info
}
// LedgerEnumerate returns all of the Ledger devices connected to this machine.
-func LedgerEnumerate() ([]hid.DeviceInfo, error) {
- if !hid.Supported() || os.Getenv("KMD_NOUSB") != "" {
+func LedgerEnumerate() ([]usb.DeviceInfo, error) {
+ if !usb.Supported() || os.Getenv("KMD_NOUSB") != "" {
return nil, fmt.Errorf("HID not supported")
}
- var infos []hid.DeviceInfo
+ var infos []usb.DeviceInfo
// The enumeration process is based on:
// https://github.com/LedgerHQ/blue-loader-python/blob/master/ledgerblue/comm.py#L212
- // we search for the Ledger Vendor id and igonre devices that don't have specific usagepage or interface
- for _, info := range hid.Enumerate(ledgerVendorID, 0) {
+ // we search for the Ledger Vendor id and ignore devices that don't have specific usagepage or interface
+ hids, err := usb.EnumerateHid(ledgerVendorID, 0)
+ if err != nil {
+ return []usb.DeviceInfo{}, err
+ }
+ for _, info := range hids {
if info.UsagePage != ledgerUsagePage && info.Interface != 0 {
continue
}
diff --git a/data/abi/abi_encode.go b/data/abi/abi_encode.go
index ac25588f0..b0f8b6c12 100644
--- a/data/abi/abi_encode.go
+++ b/data/abi/abi_encode.go
@@ -25,23 +25,6 @@ import (
"strings"
)
-// bigIntToBytes casts non-negative big integer to byte slice with specific byte length
-// DEPRECATED: THIS IS A WORKAROUND FOR `fillBytes` METHOD BEFORE GOLANG 1.15+
-// SHOULD BE REMOVED AFTER WE MOVE TO HIGHER VERSION
-func bigIntToBytes(x *big.Int, byteLen uint) ([]byte, error) {
- if x.Cmp(big.NewInt(0)) < 0 {
- return nil, fmt.Errorf("ABI: big Int To Bytes error: should pass in non-negative integer")
- }
- if uint(x.BitLen()) > byteLen*8 {
- return nil, fmt.Errorf("ABI: big Int To Bytes error: integer byte length > given byte length")
- }
-
- buffer := make([]byte, byteLen)
- intBytes := x.Bytes()
- copy(buffer[int(byteLen)-len(intBytes):], intBytes)
- return buffer, nil
-}
-
// typeCastToTuple cast an array-like ABI type into an ABI tuple type.
func (t Type) typeCastToTuple(tupLen ...int) (Type, error) {
var childT []Type
@@ -187,14 +170,13 @@ func encodeInt(intValue interface{}, bitSize uint16) ([]byte, error) {
return nil, fmt.Errorf("passed in numeric value should be non negative")
}
+ castedBytes := make([]byte, bitSize/8)
+
if bigInt.Cmp(new(big.Int).Lsh(big.NewInt(1), uint(bitSize))) >= 0 {
return nil, fmt.Errorf("input value bit size %d > abi type bit size %d", bigInt.BitLen(), bitSize)
}
- castedBytes, err := bigIntToBytes(bigInt, uint(bitSize/8))
- if err != nil {
- return nil, err
- }
+ bigInt.FillBytes(castedBytes)
return castedBytes, nil
}
@@ -204,12 +186,8 @@ func inferToSlice(value interface{}) ([]interface{}, error) {
if reflectVal.Kind() != reflect.Slice && reflectVal.Kind() != reflect.Array {
return nil, fmt.Errorf("cannot infer an interface value as a slice of interface element")
}
- if reflectVal.IsNil() {
- if reflectVal.Kind() == reflect.Slice {
- return nil, nil
- }
- return nil, fmt.Errorf("cannot infer nil value for array kind interface")
- }
+ // * if input is a slice, with nil, then reflectVal.Len() == 0
+ // * if input is an array, it is not possible it is nil
values := make([]interface{}, reflectVal.Len())
for i := 0; i < reflectVal.Len(); i++ {
values[i] = reflectVal.Index(i).Interface()
@@ -572,7 +550,12 @@ func ParseArgJSONtoByteSlice(argTypes []string, jsonArgs []string, applicationAr
func ParseMethodSignature(methodSig string) (name string, argTypes []string, returnType string, err error) {
argsStart := strings.Index(methodSig, "(")
if argsStart == -1 {
- err = fmt.Errorf("Invalid method signature: %s", methodSig)
+ err = fmt.Errorf(`No parenthesis in method signature: "%s"`, methodSig)
+ return
+ }
+
+ if argsStart == 0 {
+ err = fmt.Errorf(`Method signature has no name: "%s"`, methodSig)
return
}
@@ -583,7 +566,7 @@ func ParseMethodSignature(methodSig string) (name string, argTypes []string, ret
depth++
} else if char == ')' {
if depth == 0 {
- err = fmt.Errorf("Unpaired parenthesis in method signature: %s", methodSig)
+ err = fmt.Errorf(`Unpaired parenthesis in method signature: "%s"`, methodSig)
return
}
depth--
@@ -595,7 +578,7 @@ func ParseMethodSignature(methodSig string) (name string, argTypes []string, ret
}
if argsEnd == -1 {
- err = fmt.Errorf("Invalid method signature: %s", methodSig)
+ err = fmt.Errorf(`Unpaired parenthesis in method signature: "%s"`, methodSig)
return
}
@@ -604,3 +587,31 @@ func ParseMethodSignature(methodSig string) (name string, argTypes []string, ret
returnType = methodSig[argsEnd+1:]
return
}
+
+// VerifyMethodSignature checks if a method signature and its referenced types can be parsed properly
+func VerifyMethodSignature(methodSig string) error {
+ _, argTypes, retType, err := ParseMethodSignature(methodSig)
+ if err != nil {
+ return err
+ }
+
+ for i, argType := range argTypes {
+ if IsReferenceType(argType) || IsTransactionType(argType) {
+ continue
+ }
+
+ _, err = TypeOf(argType)
+ if err != nil {
+ return fmt.Errorf("Error parsing argument type at index %d: %s", i, err.Error())
+ }
+ }
+
+ if retType != VoidReturnType {
+ _, err = TypeOf(retType)
+ if err != nil {
+ return fmt.Errorf("Error parsing return type: %s", err.Error())
+ }
+ }
+
+ return nil
+}
diff --git a/data/abi/abi_encode_test.go b/data/abi/abi_encode_test.go
index 620013c23..231c1a0e0 100644
--- a/data/abi/abi_encode_test.go
+++ b/data/abi/abi_encode_test.go
@@ -83,8 +83,8 @@ func TestEncodeValid(t *testing.T) {
randomInt, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
- expected, err := bigIntToBytes(randomInt, uint(intSize/8))
- require.NoError(t, err, "big int to byte conversion error")
+ expected := make([]byte, intSize/8)
+ randomInt.FillBytes(expected)
uintEncode, err := uintType.Encode(randomInt)
require.NoError(t, err, "encoding from uint type fail")
@@ -122,9 +122,8 @@ func TestEncodeValid(t *testing.T) {
encodedUfixed, err := typeUfixed.Encode(randomInt)
require.NoError(t, err, "ufixed encode fail")
- expected, err := bigIntToBytes(randomInt, uint(size/8))
- require.NoError(t, err, "big int to byte conversion error")
-
+ expected := make([]byte, size/8)
+ randomInt.FillBytes(expected)
require.Equal(t, expected, encodedUfixed, "encode ufixed not match with expected")
}
// (2^[bitSize] - 1) / (10^[precision]) test
@@ -142,8 +141,8 @@ func TestEncodeValid(t *testing.T) {
randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
- addrBytesExpected, err := bigIntToBytes(randomAddrInt, uint(addressByteSize))
- require.NoError(t, err, "big int to byte conversion error")
+ addrBytesExpected := make([]byte, addressByteSize)
+ randomAddrInt.FillBytes(addrBytesExpected)
addrBytesActual, err := addressType.Encode(addrBytesExpected)
require.NoError(t, err, "address encode fail")
@@ -422,8 +421,8 @@ func TestDecodeValid(t *testing.T) {
randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
- expected, err := bigIntToBytes(randomAddrInt, uint(addressByteSize))
- require.NoError(t, err, "big int to byte conversion error")
+ expected := make([]byte, addressByteSize)
+ randomAddrInt.FillBytes(expected)
actual, err := addressType.Decode(expected)
require.NoError(t, err, "decoding address should not return error")
@@ -952,10 +951,8 @@ func addPrimitiveRandomValues(t *testing.T, pool *map[BaseType][]testUnit) {
for i := 0; i < addressTestCaseCount; i++ {
randAddrVal, err := rand.Int(rand.Reader, maxAddress)
require.NoError(t, err, "generate random value for address, should be no error")
-
- addrBytes, err := bigIntToBytes(randAddrVal, uint(addressByteSize))
- require.NoError(t, err, "big int to byte conversion error")
-
+ addrBytes := make([]byte, addressByteSize)
+ randAddrVal.FillBytes(addrBytes)
(*pool)[Address][i] = testUnit{serializedType: addressType.String(), value: addrBytes}
}
categorySelfRoundTripTest(t, (*pool)[Address])
@@ -1165,7 +1162,7 @@ func TestParseArgJSONtoByteSlice(t *testing.T) {
for i, test := range tests {
t.Run(fmt.Sprintf("index=%d", i), func(t *testing.T) {
- applicationArgs := make([][]byte, 0)
+ applicationArgs := [][]byte{}
err := ParseArgJSONtoByteSlice(test.argTypes, test.jsonArgs, &applicationArgs)
require.NoError(t, err)
require.Equal(t, test.expectedAppArgs, applicationArgs)
@@ -1224,3 +1221,59 @@ func TestParseMethodSignature(t *testing.T) {
})
}
}
+
+func TestInferToSlice(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var emptySlice []int
+ tests := []struct {
+ toBeInferred interface{}
+ length int
+ }{
+ {
+ toBeInferred: []int{},
+ length: 0,
+ },
+ {
+ toBeInferred: make([]int, 0),
+ length: 0,
+ },
+ {
+ toBeInferred: emptySlice,
+ length: 0,
+ },
+ {
+ toBeInferred: [0]int{},
+ length: 0,
+ },
+ {
+ toBeInferred: [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32},
+ length: 32,
+ },
+ {
+ toBeInferred: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32},
+ length: 32,
+ },
+ }
+
+ for i, test := range tests {
+ inferredSlice, err := inferToSlice(test.toBeInferred)
+ require.NoError(t, err, "inferToSlice on testcase %d failed to successfully infer %v", i, test.toBeInferred)
+ require.Equal(t, test.length, len(inferredSlice), "inferToSlice on testcase %d inferred different length, expected %d", i, test.length)
+ }
+
+ // one more testcase for totally nil (with no type information) is bad, should not pass the test
+ _, err := inferToSlice(nil)
+ require.EqualError(
+ t, err,
+ "cannot infer an interface value as a slice of interface element",
+ "inferToSlice should return type inference error when passed in nil with unexpected Kind")
+
+ // one moar testcase for wrong typed nil is bad, should not pass the test
+ var nilPt *uint64 = nil
+ _, err = inferToSlice(nilPt)
+ require.EqualError(
+ t, err,
+ "cannot infer an interface value as a slice of interface element",
+ "inferToSlice should return type inference error when passing argument type other than slice or array")
+}
diff --git a/data/abi/abi_json_test.go b/data/abi/abi_json_test.go
index 0c3a006e3..49083fdea 100644
--- a/data/abi/abi_json_test.go
+++ b/data/abi/abi_json_test.go
@@ -31,17 +31,14 @@ func TestRandomAddressEquality(t *testing.T) {
upperLimit := new(big.Int).Lsh(big.NewInt(1), addressByteSize<<3)
var addrBasics basics.Address
- var addrABI = make([]byte, addressByteSize)
+ var addrABI []byte = make([]byte, addressByteSize)
for testCaseIndex := 0; testCaseIndex < addressTestCaseCount; testCaseIndex++ {
randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
- expected, err := bigIntToBytes(randomAddrInt, uint(addressByteSize))
- require.NoError(t, err, "big int to byte conversion error")
-
- copy(addrABI[:], expected)
- copy(addrBasics[:], expected)
+ randomAddrInt.FillBytes(addrBasics[:])
+ randomAddrInt.FillBytes(addrABI)
checkSumBasics := addrBasics.GetChecksum()
checkSumABI, err := addressCheckSum(addrABI)
diff --git a/data/abi/abi_type.go b/data/abi/abi_type.go
index f403916b2..aa4e0b75a 100644
--- a/data/abi/abi_type.go
+++ b/data/abi/abi_type.go
@@ -82,7 +82,7 @@ type Type struct {
// length for static array / tuple
/*
by ABI spec, len over binary array returns number of bytes
- the type is uint16, which allows for only lenth in [0, 2^16 - 1]
+ the type is uint16, which allows for only length in [0, 2^16 - 1]
representation of static length can only be constrained in uint16 type
*/
// NOTE may want to change back to uint32/uint64
@@ -136,7 +136,7 @@ func TypeOf(str string) (Type, error) {
stringMatches := staticArrayRegexp.FindStringSubmatch(str)
// match the string itself, array element type, then array length
if len(stringMatches) != 3 {
- return Type{}, fmt.Errorf("static array ill formated: %s", str)
+ return Type{}, fmt.Errorf(`static array ill formated: "%s"`, str)
}
// guaranteed that the length of array is existing
arrayLengthStr := stringMatches[2]
@@ -154,7 +154,7 @@ func TypeOf(str string) (Type, error) {
case strings.HasPrefix(str, "uint"):
typeSize, err := strconv.ParseUint(str[4:], 10, 16)
if err != nil {
- return Type{}, fmt.Errorf("ill formed uint type: %s", str)
+ return Type{}, fmt.Errorf(`ill formed uint type: "%s"`, str)
}
return makeUintType(int(typeSize))
case str == "byte":
@@ -163,7 +163,7 @@ func TypeOf(str string) (Type, error) {
stringMatches := ufixedRegexp.FindStringSubmatch(str)
// match string itself, then type-bitSize, and type-precision
if len(stringMatches) != 3 {
- return Type{}, fmt.Errorf("ill formed ufixed type: %s", str)
+ return Type{}, fmt.Errorf(`ill formed ufixed type: "%s"`, str)
}
// guaranteed that there are 2 uint strings in ufixed string
ufixedSize, err := strconv.ParseUint(stringMatches[1], 10, 16)
@@ -196,7 +196,7 @@ func TypeOf(str string) (Type, error) {
}
return MakeTupleType(tupleTypes)
default:
- return Type{}, fmt.Errorf("cannot convert a string %s to an ABI type", str)
+ return Type{}, fmt.Errorf(`cannot convert the string "%s" to an ABI type`, str)
}
}
@@ -493,3 +493,6 @@ func IsReferenceType(s string) bool {
return false
}
}
+
+// VoidReturnType is the ABI return type string for a method that does not return any value
+const VoidReturnType = "void"
diff --git a/data/bookkeeping/block.go b/data/bookkeeping/block.go
index 1e4b1393e..46f17c8ef 100644
--- a/data/bookkeeping/block.go
+++ b/data/bookkeeping/block.go
@@ -152,7 +152,7 @@ type (
FeeSink basics.Address `codec:"fees"`
// The RewardsPool accepts periodic injections from the
- // FeeSink and continually redistributes them to adresses as
+ // FeeSink and continually redistributes them to addresses as
// rewards.
RewardsPool basics.Address `codec:"rwd"`
diff --git a/data/ledger_test.go b/data/ledger_test.go
index 0483de994..6af896e97 100644
--- a/data/ledger_test.go
+++ b/data/ledger_test.go
@@ -156,9 +156,10 @@ func TestLedgerCirculation(t *testing.T) {
require.False(t, sourceAccount.IsZero())
require.False(t, destAccount.IsZero())
- data, err := realLedger.LookupAgreement(basics.Round(0), destAccount)
+ data, validThrough, _, err := realLedger.LookupAccount(basics.Round(0), destAccount)
+ require.Equal(t, basics.Round(0), validThrough)
require.NoError(t, err)
- baseDestValue := data.MicroAlgosWithRewards.Raw
+ baseDestValue := data.MicroAlgos.Raw
blk := genesisInitState.Block
totalsRound, totals, err := realLedger.LatestTotals()
@@ -191,12 +192,14 @@ func TestLedgerCirculation(t *testing.T) {
// test most recent round
if rnd < basics.Round(500) {
- data, err = realLedger.LookupAgreement(rnd, destAccount)
+ data, validThrough, _, err = realLedger.LookupAccount(rnd, destAccount)
require.NoError(t, err)
- require.Equal(t, baseDestValue+uint64(rnd), data.MicroAlgosWithRewards.Raw)
- data, err = l.LookupAgreement(rnd, destAccount)
+ require.Equal(t, rnd, validThrough)
+ require.Equal(t, baseDestValue+uint64(rnd), data.MicroAlgos.Raw)
+ data, validThrough, _, err = realLedger.LookupAccount(rnd, destAccount)
require.NoError(t, err)
- require.Equal(t, baseDestValue+uint64(rnd), data.MicroAlgosWithRewards.Raw)
+ require.Equal(t, rnd, validThrough)
+ require.Equal(t, baseDestValue+uint64(rnd), data.MicroAlgos.Raw)
roundCirculation, err := realLedger.OnlineTotals(rnd)
require.NoError(t, err)
@@ -207,12 +210,14 @@ func TestLedgerCirculation(t *testing.T) {
require.Equal(t, baseCirculation-uint64(rnd)*(10001), roundCirculation.Raw)
} else if rnd < basics.Round(510) {
// test one round ago
- data, err = realLedger.LookupAgreement(rnd-1, destAccount)
+ data, validThrough, _, err = realLedger.LookupAccount(rnd-1, destAccount)
require.NoError(t, err)
- require.Equal(t, baseDestValue+uint64(rnd)-1, data.MicroAlgosWithRewards.Raw)
- data, err = l.LookupAgreement(rnd-1, destAccount)
+ require.Equal(t, rnd-1, validThrough)
+ require.Equal(t, baseDestValue+uint64(rnd)-1, data.MicroAlgos.Raw)
+ data, validThrough, _, err = l.LookupAccount(rnd-1, destAccount)
require.NoError(t, err)
- require.Equal(t, baseDestValue+uint64(rnd)-1, data.MicroAlgosWithRewards.Raw)
+ require.Equal(t, rnd-1, validThrough)
+ require.Equal(t, baseDestValue+uint64(rnd)-1, data.MicroAlgos.Raw)
roundCirculation, err := realLedger.OnlineTotals(rnd - 1)
require.NoError(t, err)
@@ -223,12 +228,12 @@ func TestLedgerCirculation(t *testing.T) {
require.Equal(t, baseCirculation-uint64(rnd-1)*(10001), roundCirculation.Raw)
} else if rnd < basics.Round(520) {
// test one round in the future ( expected error )
- data, err = realLedger.LookupAgreement(rnd+1, destAccount)
+ data, _, _, err = realLedger.LookupAccount(rnd+1, destAccount)
require.Error(t, err)
- require.Equal(t, uint64(0), data.MicroAlgosWithRewards.Raw)
- data, err = l.LookupAgreement(rnd+1, destAccount)
+ require.Equal(t, uint64(0), data.MicroAlgos.Raw)
+ data, _, _, err = l.LookupAccount(rnd+1, destAccount)
require.Error(t, err)
- require.Equal(t, uint64(0), data.MicroAlgosWithRewards.Raw)
+ require.Equal(t, uint64(0), data.MicroAlgos.Raw)
_, err = realLedger.OnlineTotals(rnd + 1)
require.Error(t, err)
@@ -244,7 +249,6 @@ func TestLedgerCirculation(t *testing.T) {
require.Error(t, err)
}
}
- return
}
func TestLedgerSeed(t *testing.T) {
@@ -318,7 +322,6 @@ func TestLedgerSeed(t *testing.T) {
require.Equal(t, seed.elements[1].seed, expectedHdr.Seed)
}
}
- return
}
func TestConsensusVersion(t *testing.T) {
@@ -473,7 +476,7 @@ func TestLedgerErrorValidate(t *testing.T) {
var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
- proto, _ := config.Consensus[protocol.ConsensusCurrentVersion]
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
origProto := proto
defer func() {
config.Consensus[protocol.ConsensusCurrentVersion] = origProto
@@ -533,8 +536,8 @@ func TestLedgerErrorValidate(t *testing.T) {
// Add blocks to the ledger via EnsureValidatedBlock. This calls AddValidatedBlock, which simply
// passes the block to blockQueue. The returned error is handled by EnsureValidatedBlock, which reports
// in the form of logged error message.
+ wg.Add(1)
go func() {
- wg.Add(1)
i := 0
for blk := range blkChan1 {
i++
@@ -554,8 +557,8 @@ func TestLedgerErrorValidate(t *testing.T) {
// Add blocks to the ledger via EnsureBlock. This basically calls AddBlock, but handles
// the errors by logging them. Checking the logged messages to verify its behavior.
+ wg.Add(1)
go func() {
- wg.Add(1)
i := 0
for blk := range blkChan2 {
i++
@@ -565,8 +568,8 @@ func TestLedgerErrorValidate(t *testing.T) {
}()
// Add blocks directly to the ledger
+ wg.Add(1)
go func() {
- wg.Add(1)
i := 0
for blk := range blkChan3 {
i++
diff --git a/data/transactions/logic/.gitignore b/data/transactions/logic/.gitignore
deleted file mode 100644
index 24f8b4a36..000000000
--- a/data/transactions/logic/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-langspec.json
-teal.tmLanguage.json
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index b8dc148f3..96b771f7c 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -292,8 +292,8 @@ return stack matches the name of the input value.
| `!=` | A is not equal to B => {0 or 1} |
| `!` | A == 0 yields 1; else 0 |
| `len` | yields length of byte value A |
-| `itob` | converts uint64 A to big endian bytes |
-| `btoi` | converts bytes A as big endian to uint64 |
+| `itob` | converts uint64 A to big-endian byte array, always of length 8 |
+| `btoi` | converts big-endian byte array A to uint64. Fails if len(A) > 8. Padded by leading 0s if len(A) < 8. |
| `%` | A modulo B. Fail if B == 0. |
| `\|` | A bitwise-or B |
| `&` | A bitwise-and B |
@@ -304,10 +304,10 @@ return stack matches the name of the input value.
| `divw` | A,B / C. Fail if C == 0 or if result overflows. |
| `divmodw` | W,X = (A,B / C,D); Y,Z = (A,B modulo C,D) |
| `expw` | A raised to the Bth power as a 128-bit result in two uint64s. X is the high 64 bits, Y is the low. Fail if A == B == 0 or if the results exceeds 2^128-1 |
-| `getbit` | Bth bit of (byte-array or integer) A. |
-| `setbit` | Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C |
-| `getbyte` | Bth byte of A, as an integer |
-| `setbyte` | Copy of A with the Bth byte set to small integer (between 0..255) C |
+| `getbit` | Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails |
+| `setbit` | Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails |
+| `getbyte` | Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails |
+| `setbyte` | Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails |
| `concat` | join A and B |
### Byte Array Manipulation
@@ -436,7 +436,7 @@ Some of these have immediate data in the byte or bytes after the opcode.
| 16 | TypeEnum | uint64 | | See table below |
| 17 | XferAsset | uint64 | | Asset ID |
| 18 | AssetAmount | uint64 | | value in Asset's units |
-| 19 | AssetSender | []byte | | 32 byte address. Causes clawback of all value of asset from AssetSender if Sender is the Clawback address of the asset. |
+| 19 | AssetSender | []byte | | 32 byte address. Moves asset from AssetSender if Sender is the Clawback address of the asset. |
| 20 | AssetReceiver | []byte | | 32 byte address |
| 21 | AssetCloseTo | []byte | | 32 byte address |
| 22 | GroupIndex | uint64 | | Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1 |
@@ -457,7 +457,7 @@ Some of these have immediate data in the byte or bytes after the opcode.
| 37 | ConfigAssetUnitName | []byte | v2 | Unit name of the asset |
| 38 | ConfigAssetName | []byte | v2 | The asset name |
| 39 | ConfigAssetURL | []byte | v2 | URL |
-| 40 | ConfigAssetMetadataHash | []byte | v2 | 32 byte commitment to some unspecified asset metadata |
+| 40 | ConfigAssetMetadataHash | []byte | v2 | 32 byte commitment to unspecified asset metadata |
| 41 | ConfigAssetManager | []byte | v2 | 32 byte address |
| 42 | ConfigAssetReserve | []byte | v2 | 32 byte address |
| 43 | ConfigAssetFreeze | []byte | v2 | 32 byte address |
@@ -527,7 +527,7 @@ Asset fields include `AssetHolding` and `AssetParam` fields that are used in the
| 4 | AssetName | []byte | | Asset name |
| 5 | AssetURL | []byte | | URL with additional info about the asset |
| 6 | AssetMetadataHash | []byte | | Arbitrary commitment |
-| 7 | AssetManager | []byte | | Manager commitment |
+| 7 | AssetManager | []byte | | Manager address |
| 8 | AssetReserve | []byte | | Reserve address |
| 9 | AssetFreeze | []byte | | Freeze address |
| 10 | AssetClawback | []byte | | Clawback address |
@@ -623,7 +623,8 @@ In v5, inner transactions may perform `pay`, `axfer`, `acfg`, and
with the next instruction with, for example, `balance` and
`min_balance` checks. In v6, inner transactions may also perform
`keyreg` and `appl` effects. Inner `appl` calls fail if they attempt
-to invoke a program with version less than v6.
+to invoke a program with version less than v4, or if they attempt to
+opt-in to an app with a ClearState Program less than v4.
In v5, only a subset of the transaction's header fields may be set: `Type`/`TypeEnum`,
`Sender`, and `Fee`. In v6, header fields `Note` and `RekeyTo` may
diff --git a/data/transactions/logic/README_in.md b/data/transactions/logic/README_in.md
index f6968ab9f..87b7c15d7 100644
--- a/data/transactions/logic/README_in.md
+++ b/data/transactions/logic/README_in.md
@@ -333,7 +333,8 @@ In v5, inner transactions may perform `pay`, `axfer`, `acfg`, and
with the next instruction with, for example, `balance` and
`min_balance` checks. In v6, inner transactions may also perform
`keyreg` and `appl` effects. Inner `appl` calls fail if they attempt
-to invoke a program with version less than v6.
+to invoke a program with version less than v4, or if they attempt to
+opt-in to an app with a ClearState Program less than v4.
In v5, only a subset of the transaction's header fields may be set: `Type`/`TypeEnum`,
`Sender`, and `Fee`. In v6, header fields `Note` and `RekeyTo` may
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index 6ba7181fe..5ac8198bc 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -6,7 +6,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
## err
- Opcode: 0x00
-- Stack: ... &rarr; ...
+- Stack: ... &rarr; _exits_
- Fail immediately.
## sha256
@@ -50,15 +50,15 @@ The 32 byte public key is the last element on the stack, preceded by the 64 byte
- Opcode: 0x05 {uint8 curve index}
- Stack: ..., A: []byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., uint64
- for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1}
-- **Cost**: 1700
+- **Cost**: Secp256k1=1700 Secp256r1=2500
- Availability: v5
`ECDSA` Curves:
| Index | Name | In | Notes |
| - | ------ | - | --------- |
-| 0 | Secp256k1 | | secp256k1 curve |
-| 1 | Secp256r1 | v7 | secp256r1 curve |
+| 0 | Secp256k1 | | secp256k1 curve, used in Bitcoin |
+| 1 | Secp256r1 | v7 | secp256r1 curve, NIST standard |
The 32 byte Y-component of a public key is the last element on the stack, preceded by X-component of a pubkey, preceded by S and R components of a signature, preceded by the data that is fifth element on the stack. All values are big-endian encoded. The signed data must be 32 bytes long, and signatures in lower-S form are only accepted.
@@ -68,17 +68,9 @@ The 32 byte Y-component of a public key is the last element on the stack, preced
- Opcode: 0x06 {uint8 curve index}
- Stack: ..., A: []byte &rarr; ..., X: []byte, Y: []byte
- decompress pubkey A into components X, Y
-- **Cost**: 650
+- **Cost**: Secp256k1=650 Secp256r1=2400
- Availability: v5
-`ECDSA` Curves:
-
-| Index | Name | In | Notes |
-| - | ------ | - | --------- |
-| 0 | Secp256k1 | | secp256k1 curve |
-| 1 | Secp256r1 | v7 | secp256r1 curve |
-
-
The 33 byte public key in a compressed form to be decompressed into X and Y (top) components. All values are big-endian encoded.
## ecdsa_pk_recover v
@@ -89,14 +81,6 @@ The 33 byte public key in a compressed form to be decompressed into X and Y (top
- **Cost**: 2000
- Availability: v5
-`ECDSA` Curves:
-
-| Index | Name | In | Notes |
-| - | ------ | - | --------- |
-| 0 | Secp256k1 | | secp256k1 curve |
-| 1 | Secp256r1 | v7 | secp256r1 curve |
-
-
S (top) and R elements of a signature, recovery id and data (bottom) are expected on the stack and used to deriver a public key. All values are big-endian encoded. The signed data must be 32 bytes long.
## +
@@ -193,13 +177,13 @@ Overflow is an error condition which halts execution and fails the transaction.
- Opcode: 0x16
- Stack: ..., A: uint64 &rarr; ..., []byte
-- converts uint64 A to big endian bytes
+- converts uint64 A to big-endian byte array, always of length 8
## btoi
- Opcode: 0x17
- Stack: ..., A: []byte &rarr; ..., uint64
-- converts bytes A as big endian to uint64
+- converts big-endian byte array A to uint64. Fails if len(A) > 8. Padded by leading 0s if len(A) < 8.
`btoi` fails if the input is longer than 8 bytes.
@@ -396,7 +380,7 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
| 16 | TypeEnum | uint64 | | See table below |
| 17 | XferAsset | uint64 | | Asset ID |
| 18 | AssetAmount | uint64 | | value in Asset's units |
-| 19 | AssetSender | []byte | | 32 byte address. Causes clawback of all value of asset from AssetSender if Sender is the Clawback address of the asset. |
+| 19 | AssetSender | []byte | | 32 byte address. Moves asset from AssetSender if Sender is the Clawback address of the asset. |
| 20 | AssetReceiver | []byte | | 32 byte address |
| 21 | AssetCloseTo | []byte | | 32 byte address |
| 22 | GroupIndex | uint64 | | Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1 |
@@ -417,7 +401,7 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
| 37 | ConfigAssetUnitName | []byte | v2 | Unit name of the asset |
| 38 | ConfigAssetName | []byte | v2 | The asset name |
| 39 | ConfigAssetURL | []byte | v2 | URL |
-| 40 | ConfigAssetMetadataHash | []byte | v2 | 32 byte commitment to some unspecified asset metadata |
+| 40 | ConfigAssetMetadataHash | []byte | v2 | 32 byte commitment to unspecified asset metadata |
| 41 | ConfigAssetManager | []byte | v2 | 32 byte address |
| 42 | ConfigAssetReserve | []byte | v2 | 32 byte address |
| 43 | ConfigAssetFreeze | []byte | v2 | 32 byte address |
@@ -443,19 +427,6 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
| 63 | StateProofPK | []byte | v6 | 64 byte state proof public key commitment |
-TypeEnum mapping:
-
-| Index | "Type" string | Description |
-| --- | --- | --- |
-| 0 | unknown | Unknown type. Invalid transaction |
-| 1 | pay | Payment |
-| 2 | keyreg | KeyRegistration |
-| 3 | acfg | AssetConfig |
-| 4 | axfer | AssetTransfer |
-| 5 | afrz | AssetFreeze |
-| 6 | appl | ApplicationCall |
-
-
FirstValidTime causes the program to fail. The field is reserved for future use.
## global f
@@ -591,7 +562,7 @@ for notes on transaction fields available, see `txn`. If top of stack is _i_, `g
## bnz target
-- Opcode: 0x40 {int16 branch offset, big endian}
+- Opcode: 0x40 {int16 branch offset, big-endian}
- Stack: ..., A: uint64 &rarr; ...
- branch to TARGET if value A is not zero
@@ -601,7 +572,7 @@ At v2 it became allowed to branch to the end of the program exactly after the la
## bz target
-- Opcode: 0x41 {int16 branch offset, big endian}
+- Opcode: 0x41 {int16 branch offset, big-endian}
- Stack: ..., A: uint64 &rarr; ...
- branch to TARGET if value A is zero
- Availability: v2
@@ -610,7 +581,7 @@ See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.
## b target
-- Opcode: 0x42 {int16 branch offset, big endian}
+- Opcode: 0x42 {int16 branch offset, big-endian}
- Stack: ... &rarr; ...
- branch unconditionally to TARGET
- Availability: v2
@@ -620,7 +591,7 @@ See `bnz` for details on how branches work. `b` always jumps to the offset.
## return
- Opcode: 0x43
-- Stack: ..., A: uint64 &rarr; ...
+- Stack: ..., A: uint64 &rarr; _exits_
- use A as success value; end
- Availability: v2
@@ -667,7 +638,7 @@ See `bnz` for details on how branches work. `b` always jumps to the offset.
## select
- Opcode: 0x4d
-- Stack: ..., A, B, C &rarr; ..., A or B
+- Stack: ..., A, B, C: uint64 &rarr; ..., A or B
- selects one of two values based on top-of-stack: B if C != 0, else A
- Availability: v3
@@ -712,7 +683,7 @@ See `bnz` for details on how branches work. `b` always jumps to the offset.
- Opcode: 0x53
- Stack: ..., A, B: uint64 &rarr; ..., uint64
-- Bth bit of (byte-array or integer) A.
+- Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails
- Availability: v3
see explanation of bit ordering in setbit
@@ -721,7 +692,7 @@ see explanation of bit ordering in setbit
- Opcode: 0x54
- Stack: ..., A, B: uint64, C: uint64 &rarr; ..., any
-- Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C
+- Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails
- Availability: v3
When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.
@@ -730,14 +701,14 @@ When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on
- Opcode: 0x55
- Stack: ..., A: []byte, B: uint64 &rarr; ..., uint64
-- Bth byte of A, as an integer
+- Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails
- Availability: v3
## setbyte
- Opcode: 0x56
- Stack: ..., A: []byte, B: uint64, C: uint64 &rarr; ..., []byte
-- Copy of A with the Bth byte set to small integer (between 0..255) C
+- Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails
- Availability: v3
## extract s l
@@ -780,9 +751,17 @@ When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on
- Opcode: 0x5c {uint8 encoding index}
- Stack: ..., A: []byte &rarr; ..., []byte
- decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E
-- **Cost**: 25
+- **Cost**: 1 + 1 per 16 bytes
- Availability: v7
+`base64` Encodings:
+
+| Index | Name | Notes |
+| - | ------ | --------- |
+| 0 | URLEncoding | |
+| 1 | StdEncoding | |
+
+
Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See <a href="https://rfc-editor.org/rfc/rfc4648.html#section-4">RFC 4648</a> (sections 4 and 5). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\n` and `\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\r`, or `\n`.
## json_ref r
@@ -792,6 +771,15 @@ Decodes A using the base64 encoding E. Specify the encoding with an immediate ar
- return key B's value from a [valid](jsonspec.md) utf-8 encoded json object A
- Availability: v7
+`json_ref` Types:
+
+| Index | Name | Type | Notes |
+| - | ------ | -- | --------- |
+| 0 | JSONString | []byte | |
+| 1 | JSONUint64 | uint64 | |
+| 2 | JSONObject | []byte | |
+
+
specify the return type with an immediate arg either as JSONUint64 or JSONString or JSONObject.
## balance
@@ -904,7 +892,7 @@ Deleting a key which is already absent has no effect on the application global s
- Availability: v2
- Mode: Application
-`asset_holding_get` Fields:
+`asset_holding` Fields:
| Index | Name | Type | Notes |
| - | ------ | -- | --------- |
@@ -922,7 +910,7 @@ params: Txn.Accounts offset (or, since v4, an _available_ address), asset id (or
- Availability: v2
- Mode: Application
-`asset_params_get` Fields:
+`asset_params` Fields:
| Index | Name | Type | In | Notes |
| - | ------ | -- | - | --------- |
@@ -933,7 +921,7 @@ params: Txn.Accounts offset (or, since v4, an _available_ address), asset id (or
| 4 | AssetName | []byte | | Asset name |
| 5 | AssetURL | []byte | | URL with additional info about the asset |
| 6 | AssetMetadataHash | []byte | | Arbitrary commitment |
-| 7 | AssetManager | []byte | | Manager commitment |
+| 7 | AssetManager | []byte | | Manager address |
| 8 | AssetReserve | []byte | | Reserve address |
| 9 | AssetFreeze | []byte | | Freeze address |
| 10 | AssetClawback | []byte | | Clawback address |
@@ -950,7 +938,7 @@ params: Txn.ForeignAssets offset (or, since v4, an _available_ asset id. Return:
- Availability: v5
- Mode: Application
-`app_params_get` Fields:
+`app_params` Fields:
| Index | Name | Type | Notes |
| - | ------ | -- | --------- |
@@ -975,7 +963,7 @@ params: Txn.ForeignApps offset or an _available_ app id. Return: did_exist flag
- Availability: v6
- Mode: Application
-`acct_params_get` Fields:
+`acct_params` Fields:
| Index | Name | Type | Notes |
| - | ------ | -- | --------- |
@@ -1022,7 +1010,7 @@ pushint args are not added to the intcblock during assembly processes
## callsub target
-- Opcode: 0x88 {int16 branch offset, big endian}
+- Opcode: 0x88 {int16 branch offset, big-endian}
- Stack: ... &rarr; ...
- branch unconditionally to TARGET, saving the next instruction on the call stack
- Availability: v4
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index 8d1a59534..8c9d5955e 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -31,6 +31,7 @@ import (
"strconv"
"strings"
+ "github.com/algorand/go-algorand/data/abi"
"github.com/algorand/go-algorand/data/basics"
)
@@ -212,10 +213,10 @@ func (ref byteReference) makeNewReference(ops *OpStream, singleton bool, newInde
// OpStream is destination for program and scratch space
type OpStream struct {
Version uint64
- Trace io.Writer
- Warnings []error // informational warnings, shouldn't stop assembly
- Errors []*lineError // errors that should prevent final assembly
- Program []byte // Final program bytes. Will stay nil if any errors
+ Trace *strings.Builder
+ Warnings []error // informational warnings, shouldn't stop assembly
+ Errors []lineError // errors that should prevent final assembly
+ Program []byte // Final program bytes. Will stay nil if any errors
// Running bytes as they are assembled. jumps must be resolved
// and cblocks added before these bytes become a legal program.
@@ -229,8 +230,9 @@ type OpStream struct {
bytecRefs []byteReference // references to byte/addr pseudo-op constants, used for optimization
hasBytecBlock bool // prevent prepending bytecblock because asm has one
- // Keep a stack of the types of what we would push and pop to typecheck a program
- typeStack []StackType
+ // tracks information we know to be true at the point being assembled
+ known ProgramKnowledge
+ typeTracking bool
// current sourceLine during assembly
sourceLine int
@@ -247,56 +249,110 @@ type OpStream struct {
HasStatefulOps bool
}
-// createLabel inserts a label reference to point to the next
-// instruction, reporting an error for a duplicate.
-func (ops *OpStream) createLabel(label string) {
- if ops.labels == nil {
- ops.labels = make(map[string]int)
- }
- if _, ok := ops.labels[label]; ok {
- ops.errorf("duplicate label %#v", label)
+// newOpStream constructs OpStream instances ready to invoke assemble. A new
+// OpStream must be used for each call to assemble().
+func newOpStream(version uint64) OpStream {
+ return OpStream{
+ labels: make(map[string]int),
+ OffsetToLine: make(map[int]int),
+ typeTracking: true,
+ Version: version,
}
- ops.labels[label] = ops.pending.Len()
}
-// RecordSourceLine adds an entry to pc to line mapping
-func (ops *OpStream) RecordSourceLine() {
- if ops.OffsetToLine == nil {
- ops.OffsetToLine = make(map[int]int)
+// ProgramKnowledge tracks statically known information as we assemble
+type ProgramKnowledge struct {
+ // list of the types known to be on the value stack, based on specs of
+ // opcodes seen while assembling. In normal code, the tip of the stack must
+ // match the next opcode's Arg.Types, and is then replaced with its
+ // Return.Types. If `deadcode` is true, `stack` should be empty.
+ stack StackTypes
+
+ // bottom is the type given out when known is empty. It is StackNone at
+ // program start, so, for example, a `+` opcode at the start of a program
+ // fails. But when a label or callsub is encountered, `stack` is truncated
+ // and `bottom` becomes StackAny, because we don't track program state
+ // coming in from elsewhere. A `+` after a label succeeds, because the stack
+ // "vitually" contains an infinite list of StackAny.
+ bottom StackType
+
+ // deadcode indicates that the program is in deadcode, so no type checking
+ // errors should be reported.
+ deadcode bool
+}
+
+func (pgm *ProgramKnowledge) pop() StackType {
+ if len(pgm.stack) == 0 {
+ return pgm.bottom
}
- ops.OffsetToLine[ops.pending.Len()] = ops.sourceLine - 1
+ last := len(pgm.stack) - 1
+ t := pgm.stack[last]
+ pgm.stack = pgm.stack[:last]
+ return t
}
-// ReferToLabel records an opcode label refence to resolve later
-func (ops *OpStream) ReferToLabel(pc int, label string) {
- ops.labelReferences = append(ops.labelReferences, labelReference{ops.sourceLine, pc, label})
+func (pgm *ProgramKnowledge) push(types ...StackType) {
+ pgm.stack = append(pgm.stack, types...)
+}
+
+func (pgm *ProgramKnowledge) deaden() {
+ pgm.stack = pgm.stack[:0]
+ pgm.deadcode = true
}
-type opTypeFunc func(ops *OpStream, immediates []string) (StackTypes, StackTypes)
+// label resets knowledge to reflect that control may enter from elsewhere.
+func (pgm *ProgramKnowledge) label() {
+ if pgm.deadcode {
+ pgm.reset()
+ }
+}
+
+// reset clears existing knowledge and permissively allows any stack value. It's intended to be invoked after encountering a label or pragma type tracking change.
+func (pgm *ProgramKnowledge) reset() {
+ pgm.stack = nil
+ pgm.bottom = StackAny
+ pgm.deadcode = false
+}
-// returns allows opcodes like `txn` to be specific about their return
-// value types, based on the field requested, rather than use Any as
-// specified by opSpec.
-func (ops *OpStream) returns(argTypes ...StackType) {
- for range argTypes {
- ops.tpop()
+// createLabel inserts a label to point to the next instruction, reporting an
+// error for a duplicate.
+func (ops *OpStream) createLabel(label string) {
+ if _, ok := ops.labels[label]; ok {
+ ops.errorf("duplicate label %#v", label)
}
- ops.tpusha(argTypes)
+ ops.labels[label] = ops.pending.Len()
+ ops.known.label()
+}
+
+// recordSourceLine adds an entry to pc to line mapping
+func (ops *OpStream) recordSourceLine() {
+ ops.OffsetToLine[ops.pending.Len()] = ops.sourceLine - 1
}
-func (ops *OpStream) tpusha(argType []StackType) {
- ops.typeStack = append(ops.typeStack, argType...)
+// referToLabel records an opcode label reference to resolve later
+func (ops *OpStream) referToLabel(pc int, label string) {
+ ops.labelReferences = append(ops.labelReferences, labelReference{ops.sourceLine, pc, label})
}
-func (ops *OpStream) tpop() (argType StackType) {
- if len(ops.typeStack) == 0 {
- argType = StackNone
+type refineFunc func(pgm ProgramKnowledge, immediates []string) (StackTypes, StackTypes)
+
+// returns allows opcodes like `txn` to be specific about their return value
+// types, based on the field requested, rather than use Any as specified by
+// opSpec. It replaces StackAny in the top `count` elements of the typestack.
+func (ops *OpStream) returns(spec *OpSpec, replacement StackType) {
+ if ops.known.deadcode {
return
}
- last := len(ops.typeStack) - 1
- argType = ops.typeStack[last]
- ops.typeStack = ops.typeStack[:last]
- return
+ end := len(ops.known.stack)
+ tip := ops.known.stack[end-len(spec.Return.Types):]
+ for i := range tip {
+ if tip[i] == StackAny {
+ tip[i] = replacement
+ return
+ }
+ }
+ // returns was called on an OpSpec with no StackAny in its Returns
+ panic(fmt.Sprintf("%+v", spec))
}
// Intc writes opcodes for loading a uint64 constant onto the stack.
@@ -320,7 +376,7 @@ func (ops *OpStream) Intc(constIndex uint) {
if constIndex >= uint(len(ops.intc)) {
ops.errorf("intc %d is not defined", constIndex)
} else {
- ops.trace("intc %d %d", constIndex, ops.intc[constIndex])
+ ops.trace("intc %d: %d", constIndex, ops.intc[constIndex])
}
}
@@ -394,24 +450,18 @@ func (ops *OpStream) ByteLiteral(val []byte) {
ops.Bytec(constIndex)
}
-func assembleInt(ops *OpStream, spec *OpSpec, args []string) error {
+func asmInt(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.error("int needs one argument")
}
- // check friendly TypeEnum constants
- te, isTypeEnum := txnTypeConstToUint64[args[0]]
- if isTypeEnum {
- ops.Uint(te)
- return nil
- }
- // check raw transaction type strings
- tt, isTypeStr := txnTypeIndexes[args[0]]
- if isTypeStr {
- ops.Uint(tt)
+ // check txn type constants
+ i, ok := txnTypeMap[args[0]]
+ if ok {
+ ops.Uint(i)
return nil
}
// check OnCompetion constants
- oc, isOCStr := onCompletionConstToUint64[args[0]]
+ oc, isOCStr := onCompletionMap[args[0]]
if isOCStr {
ops.Uint(oc)
return nil
@@ -425,7 +475,7 @@ func assembleInt(ops *OpStream, spec *OpSpec, args []string) error {
}
// Explicit invocation of const lookup and push
-func assembleIntC(ops *OpStream, spec *OpSpec, args []string) error {
+func asmIntC(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.error("intc operation needs one argument")
}
@@ -436,7 +486,7 @@ func assembleIntC(ops *OpStream, spec *OpSpec, args []string) error {
ops.Intc(uint(constIndex))
return nil
}
-func assembleByteC(ops *OpStream, spec *OpSpec, args []string) error {
+func asmByteC(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.error("bytec operation needs one argument")
}
@@ -628,7 +678,7 @@ func parseStringLiteral(input string) (result []byte, err error) {
// byte {base64,b64,base32,b32} ...
// byte 0x....
// byte "this is a string\n"
-func assembleByte(ops *OpStream, spec *OpSpec, args []string) error {
+func asmByte(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 0 {
return ops.errorf("%s operation needs byte literal argument", spec.Name)
}
@@ -644,24 +694,31 @@ func assembleByte(ops *OpStream, spec *OpSpec, args []string) error {
}
// method "add(uint64,uint64)uint64"
-func assembleMethod(ops *OpStream, spec *OpSpec, args []string) error {
+func asmMethod(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 0 {
return ops.error("method requires a literal argument")
}
arg := args[0]
if len(arg) > 1 && arg[0] == '"' && arg[len(arg)-1] == '"' {
- val, err := parseStringLiteral(arg)
+ methodSig, err := parseStringLiteral(arg)
if err != nil {
return ops.error(err)
}
- hash := sha512.Sum512_256(val)
+ methodSigStr := string(methodSig)
+ err = abi.VerifyMethodSignature(methodSigStr)
+ if err != nil {
+ // Warn if an invalid signature is used. Don't return an error, since the ABI is not
+ // governed by the core protocol, so there may be changes to it that we don't know about
+ ops.warnf("Invalid ARC-4 ABI method signature for method op: %s", err.Error()) // nolint:errcheck
+ }
+ hash := sha512.Sum512_256(methodSig)
ops.ByteLiteral(hash[0:4])
return nil
}
return ops.error("Unable to parse method signature")
}
-func assembleIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
+func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
ops.pending.WriteByte(spec.Opcode)
var scratch [binary.MaxVarintLen64]byte
l := binary.PutUvarint(scratch[:], uint64(len(args)))
@@ -681,7 +738,7 @@ func assembleIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func assembleByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
+func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
ops.pending.WriteByte(spec.Opcode)
bvals := make([][]byte, 0, len(args))
rest := args
@@ -714,7 +771,7 @@ func assembleByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
// addr A1EU...
// parses base32-with-checksum account address strings into a byte literal
-func assembleAddr(ops *OpStream, spec *OpSpec, args []string) error {
+func asmAddr(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.error("addr operation needs one argument")
}
@@ -726,7 +783,7 @@ func assembleAddr(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func assembleArg(ops *OpStream, spec *OpSpec, args []string) error {
+func asmArg(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.error("arg operation needs one argument")
}
@@ -751,12 +808,12 @@ func assembleArg(ops *OpStream, spec *OpSpec, args []string) error {
return asmDefault(ops, &altSpec, args)
}
-func assembleBranch(ops *OpStream, spec *OpSpec, args []string) error {
+func asmBranch(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.error("branch operation needs label argument")
}
- ops.ReferToLabel(ops.pending.Len(), args[0])
+ ops.referToLabel(ops.pending.Len(), args[0])
ops.pending.WriteByte(spec.Opcode)
// zero bytes will get replaced with actual offset in resolveLabels()
ops.pending.WriteByte(0)
@@ -764,7 +821,7 @@ func assembleBranch(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func assembleSubstring(ops *OpStream, spec *OpSpec, args []string) error {
+func asmSubstring(ops *OpStream, spec *OpSpec, args []string) error {
err := asmDefault(ops, spec, args)
if err != nil {
return err
@@ -778,25 +835,7 @@ func assembleSubstring(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func txnFieldImm(name string, expectArray bool, ops *OpStream) (*txnFieldSpec, error) {
- fs, ok := TxnFieldSpecByName[name]
- if !ok {
- return nil, fmt.Errorf("unknown field: %#v", name)
- }
- if expectArray != fs.array {
- if expectArray {
- return nil, fmt.Errorf("found scalar field %#v while expecting array", name)
- }
- return nil, fmt.Errorf("found array field %#v while expecting scalar", name)
- }
- if fs.version > ops.Version {
- return nil,
- fmt.Errorf("field %#v available in version %d. Missed #pragma version?", name, fs.version)
- }
- return &fs, nil
-}
-
-func simpleImm(value string, label string) (uint64, error) {
+func simpleImm(value string, label string) (byte, error) {
res, err := strconv.ParseUint(value, 0, 64)
if err != nil {
return 0, fmt.Errorf("unable to parse %s %#v as integer", label, value)
@@ -804,352 +843,74 @@ func simpleImm(value string, label string) (uint64, error) {
if res > 255 {
return 0, fmt.Errorf("%s beyond 255: %d", label, res)
}
- return res, err
-}
-
-func asmTxn(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.error("txn expects one argument")
- }
- fs, err := txnFieldImm(args[0], false, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- ops.returns(fs.ftype)
- return nil
+ return byte(res), err
}
// asmTxn2 delegates to asmTxn or asmTxna depending on number of operands
func asmTxn2(ops *OpStream, spec *OpSpec, args []string) error {
switch len(args) {
case 1:
- return asmTxn(ops, spec, args)
+ txn := OpsByName[1]["txn"] // v1 txn opcode does not have array names
+ return asmDefault(ops, &txn, args)
case 2:
txna := OpsByName[ops.Version]["txna"]
- return asmTxna(ops, &txna, args)
+ return asmDefault(ops, &txna, args)
default:
- return ops.error("txn expects one or two arguments")
+ return ops.errorf("%s expects 1 or 2 immediate arguments", spec.Name)
}
}
-// asmTxna also assemble asmItxna
-func asmTxna(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 2 {
- return ops.errorf("%s expects two immediate arguments", spec.Name)
- }
- fs, err := txnFieldImm(args[0], true, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- arrayFieldIdx, err := simpleImm(args[1], "array index")
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
-
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- ops.pending.WriteByte(uint8(arrayFieldIdx))
- ops.returns(fs.ftype)
- return nil
-}
-
-// asmTxnas also assembles itxnas
-func asmTxnas(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one immediate argument", spec.Name)
- }
- fs, err := txnFieldImm(args[0], true, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
-
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- ops.returns(fs.ftype)
- return nil
-}
-
-func asmGtxn(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 2 {
- return ops.errorf("%s expects two arguments", spec.Name)
- }
- slot, err := simpleImm(args[0], "transaction index")
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- fs, err := txnFieldImm(args[1], false, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
-
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(slot))
- ops.pending.WriteByte(uint8(fs.field))
- ops.returns(fs.ftype)
- return nil
-}
-
func asmGtxn2(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 2 {
- return asmGtxn(ops, spec, args)
+ gtxn := OpsByName[1]["gtxn"] // v1 gtxn opcode does not have array names
+ return asmDefault(ops, &gtxn, args)
}
if len(args) == 3 {
gtxna := OpsByName[ops.Version]["gtxna"]
- return asmGtxna(ops, &gtxna, args)
+ return asmDefault(ops, &gtxna, args)
}
- return ops.errorf("%s expects two or three arguments", spec.Name)
-}
-
-//asmGtxna also assembles asmGitxna
-func asmGtxna(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 3 {
- return ops.errorf("%s expects three arguments", spec.Name)
- }
- slot, err := simpleImm(args[0], "transaction index")
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- fs, err := txnFieldImm(args[1], true, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- arrayFieldIdx, err := simpleImm(args[2], "array index")
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
-
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(slot))
- ops.pending.WriteByte(uint8(fs.field))
- ops.pending.WriteByte(uint8(arrayFieldIdx))
- ops.returns(fs.ftype)
- return nil
-}
-
-// asmGtxnas also assembles gitxnas
-func asmGtxnas(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 2 {
- return ops.errorf("%s expects two immediate arguments", spec.Name)
- }
- slot, err := simpleImm(args[0], "transaction index")
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- fs, err := txnFieldImm(args[1], true, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
-
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(slot))
- ops.pending.WriteByte(uint8(fs.field))
- ops.returns(fs.ftype)
- return nil
+ return ops.errorf("%s expects 2 or 3 immediate arguments", spec.Name)
}
func asmGtxns(ops *OpStream, spec *OpSpec, args []string) error {
+ if len(args) == 1 {
+ return asmDefault(ops, spec, args)
+ }
if len(args) == 2 {
gtxnsa := OpsByName[ops.Version]["gtxnsa"]
- return asmGtxnsa(ops, &gtxnsa, args)
- }
- if len(args) != 1 {
- return ops.errorf("%s expects one or two immediate arguments", spec.Name)
- }
- fs, err := txnFieldImm(args[0], false, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
-
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- ops.returns(fs.ftype)
- return nil
-}
-
-func asmGtxnsa(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 2 {
- return ops.errorf("%s expects two immediate arguments", spec.Name)
- }
- fs, err := txnFieldImm(args[0], true, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
+ return asmDefault(ops, &gtxnsa, args)
}
- arrayFieldIdx, err := simpleImm(args[1], "array index")
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- ops.pending.WriteByte(uint8(arrayFieldIdx))
- ops.returns(fs.ftype)
- return nil
+ return ops.errorf("%s expects 1 or 2 immediate arguments", spec.Name)
}
-func asmGtxnsas(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one immediate argument", spec.Name)
- }
- fs, err := txnFieldImm(args[0], true, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- ops.returns(fs.ftype)
- return nil
-}
-
-// asmItxn delegates to asmItxnOnly or asmItxna depending on number of operands
func asmItxn(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 1 {
- return asmItxnOnly(ops, spec, args)
+ return asmDefault(ops, spec, args)
}
if len(args) == 2 {
itxna := OpsByName[ops.Version]["itxna"]
- return asmTxna(ops, &itxna, args)
+ return asmDefault(ops, &itxna, args)
}
- return ops.errorf("%s expects one or two arguments", spec.Name)
+ return ops.errorf("%s expects 1 or 2 immediate arguments", spec.Name)
}
-func asmItxnOnly(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
- fs, err := txnFieldImm(args[0], false, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
-
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- ops.returns(fs.ftype)
- return nil
-}
-
-// asmGitxn delegates to asmGtxn or asmGtxna depending on number of operands
+// asmGitxn substitutes gitna's spec if the are 3 args
func asmGitxn(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 2 {
- return asmGtxn(ops, spec, args)
+ return asmDefault(ops, spec, args)
}
if len(args) == 3 {
itxna := OpsByName[ops.Version]["gitxna"]
- return asmGtxna(ops, &itxna, args)
+ return asmDefault(ops, &itxna, args)
}
- return ops.errorf("%s expects two or three arguments", spec.Name)
+ return ops.errorf("%s expects 2 or 3 immediate arguments", spec.Name)
}
-func assembleGlobal(ops *OpStream, spec *OpSpec, args []string) error {
+func asmItxnField(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.errorf("%s expects one argument", spec.Name)
}
- fs, ok := GlobalFieldSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
- }
- if fs.version > ops.Version {
- //nolint:errcheck // we continue to maintain typestack
- ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], fs.version)
- }
-
- val := fs.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(val))
- ops.trace("%s (%s)", fs.field, fs.ftype)
- ops.returns(fs.ftype)
- return nil
-}
-
-func assembleAssetHolding(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
- fs, ok := AssetHoldingFieldSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
- }
- if fs.version > ops.Version {
- //nolint:errcheck // we continue to maintain typestack
- ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], fs.version)
- }
-
- val := fs.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(val))
- ops.trace("%s (%s)", fs.field, fs.ftype)
- ops.returns(fs.ftype, StackUint64)
- return nil
-}
-
-func assembleAssetParams(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
- fs, ok := AssetParamsFieldSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
- }
- if fs.version > ops.Version {
- //nolint:errcheck // we continue to maintain typestack
- ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], fs.version)
- }
-
- val := fs.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(val))
- ops.trace("%s (%s)", fs.field, fs.ftype)
- ops.returns(fs.ftype, StackUint64)
- return nil
-}
-
-func assembleAppParams(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
- fs, ok := AppParamsFieldSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
- }
- if fs.version > ops.Version {
- //nolint:errcheck // we continue to maintain typestack
- ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], fs.version)
- }
-
- val := fs.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(val))
- ops.trace("%s (%s)", fs.field, fs.ftype)
- ops.returns(fs.ftype, StackUint64)
- return nil
-}
-
-func assembleAcctParams(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
- fs, ok := AcctParamsFieldSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
- }
- if fs.version > ops.Version {
- //nolint:errcheck // we continue to maintain typestack
- ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], fs.version)
- }
-
- val := fs.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(val))
- ops.trace("%s (%s)", fs.field, fs.ftype)
- ops.returns(fs.ftype, StackUint64)
- return nil
-}
-
-func asmTxField(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
- fs, ok := TxnFieldSpecByName[args[0]]
+ fs, ok := txnFieldSpecByName[args[0]]
if !ok {
return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
}
@@ -1157,188 +918,150 @@ func asmTxField(ops *OpStream, spec *OpSpec, args []string) error {
return ops.errorf("%s %#v is not allowed.", spec.Name, args[0])
}
if fs.itxVersion > ops.Version {
- return ops.errorf("%s %#v available in version %d. Missed #pragma version?", spec.Name, args[0], fs.itxVersion)
+ return ops.errorf("%s %s field was introduced in TEAL v%d. Missed #pragma version?", spec.Name, args[0], fs.itxVersion)
}
ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
+ ops.pending.WriteByte(fs.Field())
return nil
}
-func assembleEcdsa(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
-
- cs, ok := EcdsaCurveSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
- }
- if cs.version > ops.Version {
- //nolint:errcheck // we continue to maintain typestack
- ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], cs.version)
- }
-
- val := cs.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(val))
- return nil
-}
-
-func assembleBase64Decode(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
-
- encoding, ok := base64EncodingSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unknown encoding: %#v", spec.Name, args[0])
- }
- if encoding.version > ops.Version {
- //nolint:errcheck // we continue to maintain typestack
- ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], encoding.version)
- }
-
- val := encoding.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(val))
- ops.trace("%s (%s)", encoding.field, encoding.ftype)
- ops.returns(encoding.ftype)
- return nil
-}
-
-func assembleJSONRef(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
-
- jsonSpec, ok := jsonRefSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unsupported JSON value type: %#v", spec.Name, args[0])
- }
- if jsonSpec.version > ops.Version {
- return ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], jsonSpec.version)
- }
-
- valueType := jsonSpec.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(valueType))
- ops.trace("%s (%s)", jsonSpec.field, jsonSpec.ftype)
- ops.returns(jsonSpec.ftype)
- return nil
-}
-
-type assembleFunc func(*OpStream, *OpSpec, []string) error
+type asmFunc func(*OpStream, *OpSpec, []string) error
// Basic assembly. Any extra bytes of opcode are encoded as byte immediates.
func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != spec.Details.Size-1 {
- return ops.errorf("%s expects %d immediate arguments", spec.Name, spec.Details.Size-1)
+ expected := len(spec.OpDetails.Immediates)
+ if len(args) != expected {
+ if expected == 1 {
+ return ops.errorf("%s expects 1 immediate argument", spec.Name)
+ }
+ return ops.errorf("%s expects %d immediate arguments", spec.Name, expected)
}
ops.pending.WriteByte(spec.Opcode)
- for i := 0; i < spec.Details.Size-1; i++ {
- val, err := simpleImm(args[i], "argument")
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
+ for i, imm := range spec.OpDetails.Immediates {
+ switch imm.kind {
+ case immByte:
+ if imm.Group != nil {
+ fs, ok := imm.Group.SpecByName(args[i])
+ if !ok {
+ return ops.errorf("%s unknown field: %#v", spec.Name, args[i])
+ }
+ // refine the typestack now, so it is maintain even if there's a version error
+ if fs.Type().Typed() {
+ ops.returns(spec, fs.Type())
+ }
+ if fs.Version() > ops.Version {
+ return ops.errorf("%s %s field was introduced in TEAL v%d. Missed #pragma version?",
+ spec.Name, args[i], fs.Version())
+ }
+ ops.pending.WriteByte(fs.Field())
+ } else {
+ // simple immediate that must be a number from 0-255
+ val, err := simpleImm(args[i], imm.Name)
+ if err != nil {
+ return ops.errorf("%s %w", spec.Name, err)
+ }
+ ops.pending.WriteByte(val)
+ }
+ default:
+ return ops.errorf("unable to assemble immKind %d", imm.kind)
}
- ops.pending.WriteByte(byte(val))
}
return nil
}
-func typeSwap(ops *OpStream, args []string) (StackTypes, StackTypes) {
- topTwo := oneAny.plus(oneAny)
- top := len(ops.typeStack) - 1
+func typeSwap(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ topTwo := StackTypes{StackAny, StackAny}
+ top := len(pgm.stack) - 1
if top >= 0 {
- topTwo[1] = ops.typeStack[top]
+ topTwo[1] = pgm.stack[top]
if top >= 1 {
- topTwo[0] = ops.typeStack[top-1]
+ topTwo[0] = pgm.stack[top-1]
}
}
reversed := StackTypes{topTwo[1], topTwo[0]}
- return topTwo, reversed
+ return nil, reversed
}
-func typeDig(ops *OpStream, args []string) (StackTypes, StackTypes) {
+func typeDig(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
if len(args) == 0 {
- return oneAny, oneAny
+ return nil, nil
}
n, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
- return oneAny, oneAny
+ return nil, nil
}
depth := int(n) + 1
anys := make(StackTypes, depth)
+ returns := make(StackTypes, depth+1)
for i := range anys {
anys[i] = StackAny
+ returns[i] = StackAny
}
- returns := anys.plus(oneAny)
- idx := len(ops.typeStack) - depth
+ returns[depth] = StackAny
+ idx := len(pgm.stack) - depth
if idx >= 0 {
- returns[len(returns)-1] = ops.typeStack[idx]
- for i := idx; i < len(ops.typeStack); i++ {
- returns[i-idx] = ops.typeStack[i]
+ returns[len(returns)-1] = pgm.stack[idx]
+ for i := idx; i < len(pgm.stack); i++ {
+ returns[i-idx] = pgm.stack[i]
}
}
return anys, returns
}
-func typeEquals(ops *OpStream, args []string) (StackTypes, StackTypes) {
- top := len(ops.typeStack) - 1
+func typeEquals(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ top := len(pgm.stack) - 1
if top >= 0 {
//Require arg0 and arg1 to have same type
- return StackTypes{ops.typeStack[top], ops.typeStack[top]}, oneInt
+ return StackTypes{pgm.stack[top], pgm.stack[top]}, nil
}
- return oneAny.plus(oneAny), oneInt
+ return nil, nil
}
-func typeDup(ops *OpStream, args []string) (StackTypes, StackTypes) {
- top := len(ops.typeStack) - 1
+func typeDup(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ top := len(pgm.stack) - 1
if top >= 0 {
- return StackTypes{ops.typeStack[top]}, StackTypes{ops.typeStack[top], ops.typeStack[top]}
+ return StackTypes{pgm.stack[top]}, StackTypes{pgm.stack[top], pgm.stack[top]}
}
- return StackTypes{StackAny}, oneAny.plus(oneAny)
+ return nil, nil
}
-func typeDupTwo(ops *OpStream, args []string) (StackTypes, StackTypes) {
- topTwo := oneAny.plus(oneAny)
- top := len(ops.typeStack) - 1
+func typeDupTwo(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ topTwo := StackTypes{StackAny, StackAny}
+ top := len(pgm.stack) - 1
if top >= 0 {
- topTwo[1] = ops.typeStack[top]
+ topTwo[1] = pgm.stack[top]
if top >= 1 {
- topTwo[0] = ops.typeStack[top-1]
+ topTwo[0] = pgm.stack[top-1]
}
}
- result := topTwo.plus(topTwo)
- return topTwo, result
+ return nil, append(topTwo, topTwo...)
}
-func typeSelect(ops *OpStream, args []string) (StackTypes, StackTypes) {
- selectArgs := twoAny.plus(oneInt)
- top := len(ops.typeStack) - 1
+func typeSelect(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ top := len(pgm.stack) - 1
if top >= 2 {
- if ops.typeStack[top-1] == ops.typeStack[top-2] {
- return selectArgs, StackTypes{ops.typeStack[top-1]}
+ if pgm.stack[top-1] == pgm.stack[top-2] {
+ return nil, StackTypes{pgm.stack[top-1]}
}
}
- return selectArgs, StackTypes{StackAny}
+ return nil, nil
}
-func typeSetBit(ops *OpStream, args []string) (StackTypes, StackTypes) {
- setBitArgs := oneAny.plus(twoInts)
- top := len(ops.typeStack) - 1
+func typeSetBit(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ top := len(pgm.stack) - 1
if top >= 2 {
- return setBitArgs, StackTypes{ops.typeStack[top-2]}
+ return nil, StackTypes{pgm.stack[top-2]}
}
- return setBitArgs, StackTypes{StackAny}
+ return nil, nil
}
-func typeCover(ops *OpStream, args []string) (StackTypes, StackTypes) {
+func typeCover(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
if len(args) == 0 {
- return oneAny, oneAny
+ return nil, nil
}
n, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
- return oneAny, oneAny
+ return nil, nil
}
depth := int(n) + 1
anys := make(StackTypes, depth)
@@ -1349,24 +1072,27 @@ func typeCover(ops *OpStream, args []string) (StackTypes, StackTypes) {
for i := range returns {
returns[i] = StackAny
}
- idx := len(ops.typeStack) - depth
+ idx := len(pgm.stack) - depth
+ // This rotates all the types if idx is >= 0. But there's a potential
+ // improvement: when pgm.bottom is StackAny, and the cover is going "under"
+ // the known stack, the returns slice could still be partially populated
+ // based on pgm.stack.
if idx >= 0 {
- sv := ops.typeStack[len(ops.typeStack)-1]
- for i := idx; i < len(ops.typeStack)-1; i++ {
- returns[i-idx+1] = ops.typeStack[i]
+ returns[0] = pgm.stack[len(pgm.stack)-1]
+ for i := idx; i < len(pgm.stack)-1; i++ {
+ returns[i-idx+1] = pgm.stack[i]
}
- returns[len(returns)-depth] = sv
}
return anys, returns
}
-func typeUncover(ops *OpStream, args []string) (StackTypes, StackTypes) {
+func typeUncover(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
if len(args) == 0 {
- return oneAny, oneAny
+ return nil, nil
}
n, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
- return oneAny, oneAny
+ return nil, nil
}
depth := int(n) + 1
anys := make(StackTypes, depth)
@@ -1377,37 +1103,39 @@ func typeUncover(ops *OpStream, args []string) (StackTypes, StackTypes) {
for i := range returns {
returns[i] = StackAny
}
- idx := len(ops.typeStack) - depth
+ idx := len(pgm.stack) - depth
+ // See precision comment in typeCover
if idx >= 0 {
- sv := ops.typeStack[idx]
- for i := idx + 1; i < len(ops.typeStack); i++ {
- returns[i-idx-1] = ops.typeStack[i]
+ returns[len(returns)-1] = pgm.stack[idx]
+ for i := idx + 1; i < len(pgm.stack); i++ {
+ returns[i-idx-1] = pgm.stack[i]
}
- returns[len(returns)-1] = sv
}
return anys, returns
}
-func typeTxField(ops *OpStream, args []string) (StackTypes, StackTypes) {
+func typeTxField(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
if len(args) != 1 {
- return oneAny, nil
+ return nil, nil
}
- fs, ok := TxnFieldSpecByName[args[0]]
+ fs, ok := txnFieldSpecByName[args[0]]
if !ok {
- return oneAny, nil
+ return nil, nil
}
return StackTypes{fs.ftype}, nil
}
-// keywords handle parsing and assembling special asm language constructs like 'addr'
-// We use OpSpec here, but somewhat degenerate, since they don't have opcodes or eval functions
+// keywords or "pseudo-ops" handle parsing and assembling special asm language
+// constructs like 'addr' We use an OpSpec here, but it's somewhat degenerate,
+// since they don't have opcodes or eval functions. But it does need a lot of
+// OpSpec, in order to support assembly - Mode, typing info, etc.
var keywords = map[string]OpSpec{
- "int": {0, "int", nil, assembleInt, nil, nil, oneInt, 1, modeAny, opDetails{1, 2, nil, nil, nil}},
- "byte": {0, "byte", nil, assembleByte, nil, nil, oneBytes, 1, modeAny, opDetails{1, 2, nil, nil, nil}},
+ "int": {0, "int", nil, proto(":i"), 1, assembler(asmInt)},
+ "byte": {0, "byte", nil, proto(":b"), 1, assembler(asmByte)},
// parse basics.Address, actually just another []byte constant
- "addr": {0, "addr", nil, assembleAddr, nil, nil, oneBytes, 1, modeAny, opDetails{1, 2, nil, nil, nil}},
+ "addr": {0, "addr", nil, proto(":b"), 1, assembler(asmAddr)},
// take a signature, hash it, and take first 4 bytes, actually just another []byte constant
- "method": {0, "method", nil, assembleMethod, nil, nil, oneBytes, 1, modeAny, opDetails{1, 2, nil, nil, nil}},
+ "method": {0, "method", nil, proto(":b"), 1, assembler(asmMethod)},
}
type lineError struct {
@@ -1415,11 +1143,11 @@ type lineError struct {
Err error
}
-func (le *lineError) Error() string {
+func (le lineError) Error() string {
return fmt.Sprintf("%d: %s", le.Line, le.Err.Error())
}
-func (le *lineError) Unwrap() error {
+func (le lineError) Unwrap() error {
return le.Err
}
@@ -1516,34 +1244,38 @@ func (ops *OpStream) trace(format string, args ...interface{}) {
fmt.Fprintf(ops.Trace, format, args...)
}
-// checks (and pops) arg types from arg type stack
-func (ops *OpStream) checkStack(args StackTypes, returns StackTypes, instruction []string) {
+func (ops *OpStream) typeError(err error) {
+ if ops.typeTracking {
+ ops.error(err)
+ }
+}
+
+// trackStack checks that the typeStack has `args` on it, then pushes `returns` to it.
+func (ops *OpStream) trackStack(args StackTypes, returns StackTypes, instruction []string) {
+ // If in deadcode, allow anything. Maybe it's some sort of onchain data.
+ if ops.known.deadcode {
+ return
+ }
argcount := len(args)
- if argcount > len(ops.typeStack) {
- err := fmt.Errorf("%s expects %d stack arguments but stack height is %d", strings.Join(instruction, " "), argcount, len(ops.typeStack))
- if len(ops.labelReferences) > 0 {
- ops.warnf("%w; but branches have happened and assembler does not precisely track the stack in this case", err)
- } else {
- ops.error(err)
- }
+ if argcount > len(ops.known.stack) && ops.known.bottom == StackNone {
+ err := fmt.Errorf("%s expects %d stack arguments but stack height is %d",
+ strings.Join(instruction, " "), argcount, len(ops.known.stack))
+ ops.typeError(err)
} else {
firstPop := true
for i := argcount - 1; i >= 0; i-- {
argType := args[i]
- stype := ops.tpop()
+ stype := ops.known.pop()
if firstPop {
firstPop = false
- ops.trace("pops(%s", argType.String())
+ ops.trace("pops(%s", argType)
} else {
- ops.trace(", %s", argType.String())
+ ops.trace(", %s", argType)
}
if !typecheck(argType, stype) {
- err := fmt.Errorf("%s arg %d wanted type %s got %s", strings.Join(instruction, " "), i, argType.String(), stype.String())
- if len(ops.labelReferences) > 0 {
- ops.warnf("%w; but branches have happened and assembler does not precisely track types in this case", err)
- } else {
- ops.error(err)
- }
+ err := fmt.Errorf("%s arg %d wanted type %s got %s",
+ strings.Join(instruction, " "), i, argType, stype)
+ ops.typeError(err)
}
}
if !firstPop {
@@ -1552,11 +1284,11 @@ func (ops *OpStream) checkStack(args StackTypes, returns StackTypes, instruction
}
if len(returns) > 0 {
- ops.tpusha(returns)
- ops.trace(" pushes(%s", returns[0].String())
+ ops.known.push(returns...)
+ ops.trace(" pushes(%s", returns[0])
if len(returns) > 1 {
for _, rt := range returns[1:] {
- ops.trace(", %s", rt.String())
+ ops.trace(", %s", rt)
}
}
ops.trace(")")
@@ -1564,35 +1296,35 @@ func (ops *OpStream) checkStack(args StackTypes, returns StackTypes, instruction
}
// assemble reads text from an input and accumulates the program
-func (ops *OpStream) assemble(fin io.Reader) error {
+func (ops *OpStream) assemble(text string) error {
+ fin := strings.NewReader(text)
if ops.Version > LogicVersion && ops.Version != assemblerNoVersion {
return ops.errorf("Can not assemble version %d", ops.Version)
}
scanner := bufio.NewScanner(fin)
- ops.sourceLine = 0
for scanner.Scan() {
ops.sourceLine++
line := scanner.Text()
line = strings.TrimSpace(line)
if len(line) == 0 {
- ops.trace("%d: 0 line\n", ops.sourceLine)
+ ops.trace("%3d: 0 line\n", ops.sourceLine)
continue
}
if strings.HasPrefix(line, "//") {
- ops.trace("%d: // line\n", ops.sourceLine)
+ ops.trace("%3d: // line\n", ops.sourceLine)
continue
}
if strings.HasPrefix(line, "#pragma") {
- ops.trace("%d: #pragma line\n", ops.sourceLine)
+ ops.trace("%3d: #pragma line\n", ops.sourceLine)
ops.pragma(line)
continue
}
fields := fieldsFromLine(line)
if len(fields) == 0 {
- ops.trace("%d: no fields\n", ops.sourceLine)
+ ops.trace("%3d: no fields\n", ops.sourceLine)
continue
}
- // we're about to begin processing opcodes, so fix the Version
+ // we're about to begin processing opcodes, so settle the Version
if ops.Version == assemblerNoVersion {
ops.Version = AssemblerDefaultVersion
}
@@ -1602,7 +1334,7 @@ func (ops *OpStream) assemble(fin io.Reader) error {
ops.createLabel(opstring[:len(opstring)-1])
fields = fields[1:]
if len(fields) == 0 {
- // There was a label, not need to ops.trace this
+ ops.trace("%3d: label only\n", ops.sourceLine)
continue
}
opstring = fields[0]
@@ -1615,28 +1347,44 @@ func (ops *OpStream) assemble(fin io.Reader) error {
ok = false
}
}
+ if !ok {
+ // If the problem is only the version, it's useful to lookup the
+ // opcode from latest version, so we proceed with assembly well
+ // enough to report follow-on errors. Of course, we still have to
+ // bail out on the assembly as a whole.
+ spec, ok = OpsByName[AssemblerMaxVersion][opstring]
+ if !ok {
+ spec, ok = keywords[opstring]
+ }
+ ops.errorf("%s opcode was introduced in TEAL v%d", opstring, spec.Version)
+ }
if ok {
ops.trace("%3d: %s\t", ops.sourceLine, opstring)
- ops.RecordSourceLine()
- if spec.Modes == runModeApplication {
+ ops.recordSourceLine()
+ if spec.Modes == modeApp {
ops.HasStatefulOps = true
}
- args, returns := spec.Args, spec.Returns
- if spec.Details.typeFunc != nil {
- args, returns = spec.Details.typeFunc(ops, fields[1:])
+ args, returns := spec.Arg.Types, spec.Return.Types
+ if spec.OpDetails.refine != nil {
+ nargs, nreturns := spec.OpDetails.refine(ops.known, fields[1:])
+ if nargs != nil {
+ args = nargs
+ }
+ if nreturns != nil {
+ returns = nreturns
+ }
}
- ops.checkStack(args, returns, fields)
+ ops.trackStack(args, returns, fields)
spec.asm(ops, &spec, fields[1:])
+ if spec.deadens() { // An unconditional branch deadens the following code
+ ops.known.deaden()
+ }
+ if spec.Name == "callsub" {
+ // since retsub comes back to the callsub, it is an entry point like a label
+ ops.known.label()
+ }
ops.trace("\n")
continue
- }
- // unknown opcode, let's report a good error if version problem
- spec, ok = OpsByName[AssemblerMaxVersion][opstring]
- if !ok {
- spec, ok = keywords[opstring]
- }
- if ok {
- ops.errorf("%s opcode was introduced in TEAL v%d", opstring, spec.Version)
} else {
ops.errorf("unknown opcode: %s", opstring)
}
@@ -1656,7 +1404,6 @@ func (ops *OpStream) assemble(fin io.Reader) error {
ops.optimizeBytecBlock()
}
- // TODO: warn if expected resulting stack is not len==1 ?
ops.resolveLabels()
program := ops.prependCBlocks()
if ops.Errors != nil {
@@ -1708,6 +1455,22 @@ func (ops *OpStream) pragma(line string) error {
// ops.Version is already correct, or needed to be upped.
}
return nil
+ case "typetrack":
+ if len(fields) < 3 {
+ return ops.error("no typetrack value")
+ }
+ value := fields[2]
+ on, err := strconv.ParseBool(value)
+ if err != nil {
+ return ops.errorf("bad #pragma typetrack: %#v", value)
+ }
+ prev := ops.typeTracking
+ if !prev && on {
+ ops.known.reset()
+ }
+ ops.typeTracking = on
+
+ return nil
default:
return ops.errorf("unsupported pragma directive: %#v", key)
}
@@ -1718,7 +1481,7 @@ func (ops *OpStream) resolveLabels() {
raw := ops.pending.Bytes()
reported := make(map[string]bool)
for _, lr := range ops.labelReferences {
- ops.sourceLine = lr.sourceLine
+ ops.sourceLine = lr.sourceLine // so errors get reported where the label was used
dest, ok := ops.labels[lr.label]
if !ok {
if !reported[lr.label] {
@@ -1946,6 +1709,12 @@ func (ops *OpStream) optimizeConstants(refs []constReference, constBlock []inter
// update all indexes into ops.pending that have been shifted by the above line
+ // This is a huge optimization for long repetitive programs. Takes
+ // BenchmarkUintMath from 160sec to 19s.
+ if positionDelta == 0 {
+ continue
+ }
+
for i := range ops.intcRefs {
if ops.intcRefs[i].position > position {
ops.intcRefs[i].position += positionDelta
@@ -2049,17 +1818,17 @@ func (ops *OpStream) error(problem interface{}) error {
}
func (ops *OpStream) lineError(line int, problem interface{}) error {
- var le *lineError
+ var err lineError
switch p := problem.(type) {
case string:
- le = &lineError{Line: line, Err: errors.New(p)}
+ err = lineError{Line: line, Err: errors.New(p)}
case error:
- le = &lineError{Line: line, Err: p}
+ err = lineError{Line: line, Err: p}
default:
- le = &lineError{Line: line, Err: fmt.Errorf("%#v", p)}
+ err = lineError{Line: line, Err: fmt.Errorf("%#v", p)}
}
- ops.Errors = append(ops.Errors, le)
- return le
+ ops.Errors = append(ops.Errors, err)
+ return err
}
func (ops *OpStream) errorf(format string, a ...interface{}) error {
@@ -2125,9 +1894,8 @@ func AssembleString(text string) (*OpStream, error) {
// Note that AssemblerDefaultVersion is not the latest supported version,
// and therefore we might need to pass in explicitly a higher version.
func AssembleStringWithVersion(text string, version uint64) (*OpStream, error) {
- sr := strings.NewReader(text)
- ops := OpStream{Version: version}
- err := ops.assemble(sr)
+ ops := newOpStream(version)
+ err := ops.assemble(text)
return &ops, err
}
@@ -2140,13 +1908,11 @@ type disassembleState struct {
labelCount int
pendingLabels map[int]string
- // If we find a (back) jump to a label we did not generate
- // (because we didn't know about it yet), rerun is set to
- // true, and we make a second attempt to assemble once the
- // first attempt is done. The second attempt retains all the
- // labels found in the first pass. In effect, the first
- // attempt to assemble becomes a first-pass in a two-pass
- // assembly process that simply collects jump target labels.
+ // If we find a (back) jump to a label we did not generate (because we
+ // didn't know it was needed yet), rerun is set to true, and we make a
+ // second attempt to disassemble once the first attempt is done. The second
+ // attempt retains all the labels found in the first pass. In effect, the
+ // first attempt simply collects jump target labels for the second pass.
rerun bool
nextpc int
@@ -2172,29 +1938,131 @@ func (dis *disassembleState) outputLabelIfNeeded() (err error) {
return
}
-type disassembleFunc func(dis *disassembleState, spec *OpSpec) (string, error)
+// disassemble a single opcode at program[pc] according to spec
+func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
+ out := spec.Name
+ pc := dis.pc + 1
+ for _, imm := range spec.OpDetails.Immediates {
+ out += " "
+ switch imm.kind {
+ case immByte:
+ if pc >= len(dis.program) {
+ return "", fmt.Errorf("program end while reading immediate %s for %s",
+ imm.Name, spec.Name)
+ }
+ b := dis.program[pc]
+ if imm.Group != nil {
+ if int(b) >= len(imm.Group.Names) {
+ return "", fmt.Errorf("invalid immediate %s for %s: %d", imm.Name, spec.Name, b)
+ }
+ name := imm.Group.Names[b]
+ if name == "" {
+ return "", fmt.Errorf("invalid immediate %s for %s: %d", imm.Name, spec.Name, b)
+ }
+ out += name
+ } else {
+ out += fmt.Sprintf("%d", b)
+ }
+ if spec.Name == "intc" && int(b) < len(dis.intc) {
+ out += fmt.Sprintf(" // %d", dis.intc[b])
+ }
+ if spec.Name == "bytec" && int(b) < len(dis.bytec) {
+ out += fmt.Sprintf(" // %s", guessByteFormat(dis.bytec[b]))
+ }
-// Basic disasemble, and extra bytes of opcode are decoded as bytes integers.
-func disDefault(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + spec.Details.Size - 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
+ pc++
+ case immLabel:
+ offset := (uint(dis.program[pc]) << 8) | uint(dis.program[pc+1])
+ target := int(offset) + pc + 2
+ if target > 0xffff {
+ target -= 0x10000
+ }
+ var label string
+ if dis.numericTargets {
+ label = fmt.Sprintf("%d", target)
+ } else {
+ if known, ok := dis.pendingLabels[target]; ok {
+ label = known
+ } else {
+ dis.labelCount++
+ label = fmt.Sprintf("label%d", dis.labelCount)
+ dis.putLabel(label, target)
+ }
+ }
+ out += label
+ pc += 2
+ case immInt:
+ val, bytesUsed := binary.Uvarint(dis.program[pc:])
+ if bytesUsed <= 0 {
+ return "", fmt.Errorf("could not decode immediate %s for %s", imm.Name, spec.Name)
+ }
+ out += fmt.Sprintf("%d", val)
+ pc += bytesUsed
+ case immBytes:
+ length, bytesUsed := binary.Uvarint(dis.program[pc:])
+ if bytesUsed <= 0 {
+ return "", fmt.Errorf("could not decode immediate %s for %s", imm.Name, spec.Name)
+ }
+ pc += bytesUsed
+ end := uint64(pc) + length
+ if end > uint64(len(dis.program)) || end < uint64(pc) {
+ return "", fmt.Errorf("could not decode immediate %s for %s", imm.Name, spec.Name)
+ }
+ constant := dis.program[pc:end]
+ out += fmt.Sprintf("0x%s // %s", hex.EncodeToString(constant), guessByteFormat(constant))
+ pc = int(end)
+ case immInts:
+ intc, nextpc, err := parseIntcblock(dis.program, pc)
+ if err != nil {
+ return "", err
+ }
+
+ dis.intc = append(dis.intc, intc...)
+ for i, iv := range intc {
+ if i != 0 {
+ out += " "
+ }
+ out += fmt.Sprintf("%d", iv)
+ }
+ pc = nextpc
+ case immBytess:
+ bytec, nextpc, err := parseBytecBlock(dis.program, pc)
+ if err != nil {
+ return "", err
+ }
+ dis.bytec = append(dis.bytec, bytec...)
+ for i, bv := range bytec {
+ if i != 0 {
+ out += " "
+ }
+ out += fmt.Sprintf("0x%s", hex.EncodeToString(bv))
+ }
+ pc = nextpc
+ default:
+ return "", fmt.Errorf("unknown immKind %d", imm.kind)
+ }
}
- dis.nextpc = dis.pc + spec.Details.Size
- out := spec.Name
- for s := 1; s < spec.Details.Size; s++ {
- b := uint(dis.program[dis.pc+s])
- out += fmt.Sprintf(" %d", b)
+
+ if strings.HasPrefix(spec.Name, "intc_") {
+ b := spec.Name[len(spec.Name)-1] - byte('0')
+ if int(b) < len(dis.intc) {
+ out += fmt.Sprintf(" // %d", dis.intc[b])
+ }
}
+ if strings.HasPrefix(spec.Name, "bytec_") {
+ b := spec.Name[len(spec.Name)-1] - byte('0')
+ if int(b) < len(dis.intc) {
+ out += fmt.Sprintf(" // %s", guessByteFormat(dis.bytec[b]))
+ }
+ }
+ dis.nextpc = pc
return out, nil
}
var errShortIntcblock = errors.New("intcblock ran past end of program")
var errTooManyIntc = errors.New("intcblock with too many items")
-func parseIntcblock(program []byte, pc int) (intc []uint64, nextpc int, err error) {
- pos := pc + 1
+func parseIntcblock(program []byte, pos int) (intc []uint64, nextpc int, err error) {
numInts, bytesUsed := binary.Uvarint(program[pos:])
if bytesUsed <= 0 {
err = fmt.Errorf("could not decode intcblock size at pc=%d", pos)
@@ -2250,8 +2118,7 @@ func checkIntConstBlock(cx *EvalContext) error {
var errShortBytecblock = errors.New("bytecblock ran past end of program")
var errTooManyItems = errors.New("bytecblock with too many items")
-func parseBytecBlock(program []byte, pc int) (bytec [][]byte, nextpc int, err error) {
- pos := pc + 1
+func parseBytecBlock(program []byte, pos int) (bytec [][]byte, nextpc int, err error) {
numItems, bytesUsed := binary.Uvarint(program[pos:])
if bytesUsed <= 0 {
err = fmt.Errorf("could not decode bytecblock size at pc=%d", pos)
@@ -2324,68 +2191,6 @@ func checkByteConstBlock(cx *EvalContext) error {
return nil
}
-func disIntcblock(dis *disassembleState, spec *OpSpec) (string, error) {
- intc, nextpc, err := parseIntcblock(dis.program, dis.pc)
- if err != nil {
- return "", err
- }
- dis.nextpc = nextpc
- out := spec.Name
- for _, iv := range intc {
- dis.intc = append(dis.intc, iv)
- out += fmt.Sprintf(" %d", iv)
- }
- return out, nil
-}
-
-func disIntc(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + spec.Details.Size - 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + spec.Details.Size
- var suffix string
- var b int
- switch spec.Opcode {
- case 0x22:
- suffix = "_0"
- b = 0
- case 0x23:
- suffix = "_1"
- b = 1
- case 0x24:
- suffix = "_2"
- b = 2
- case 0x25:
- suffix = "_3"
- b = 3
- case 0x21:
- b = int(dis.program[dis.pc+1])
- suffix = fmt.Sprintf(" %d", b)
- default:
- return "", fmt.Errorf("disIntc on %v", spec)
- }
- if b < len(dis.intc) {
- return fmt.Sprintf("intc%s // %d", suffix, dis.intc[b]), nil
- }
- return fmt.Sprintf("intc%s", suffix), nil
-}
-
-func disBytecblock(dis *disassembleState, spec *OpSpec) (string, error) {
- bytec, nextpc, err := parseBytecBlock(dis.program, dis.pc)
- if err != nil {
- return "", err
- }
- dis.nextpc = nextpc
- out := spec.Name
- for _, bv := range bytec {
- dis.bytec = append(dis.bytec, bv)
- out += fmt.Sprintf(" 0x%s", hex.EncodeToString(bv))
- }
- return out, nil
-}
-
func allPrintableASCII(bytes []byte) bool {
for _, b := range bytes {
if b < 32 || b > 126 {
@@ -2395,11 +2200,11 @@ func allPrintableASCII(bytes []byte) bool {
return true
}
func guessByteFormat(bytes []byte) string {
- var short basics.Address
+ var addr basics.Address
- if len(bytes) == len(short) {
- copy(short[:], bytes[:])
- return fmt.Sprintf("addr %s", short.String())
+ if len(bytes) == len(addr) {
+ copy(addr[:], bytes[:])
+ return fmt.Sprintf("addr %s", addr)
}
if allPrintableASCII(bytes) {
return fmt.Sprintf("%#v", string(bytes))
@@ -2407,292 +2212,6 @@ func guessByteFormat(bytes []byte) string {
return "0x" + hex.EncodeToString(bytes)
}
-func disBytec(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + spec.Details.Size - 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + spec.Details.Size
- var suffix string
- var b int
- switch spec.Opcode {
- case 0x28:
- suffix = "_0"
- b = 0
- case 0x29:
- suffix = "_1"
- b = 1
- case 0x2a:
- suffix = "_2"
- b = 2
- case 0x2b:
- suffix = "_3"
- b = 3
- case 0x27:
- b = int(dis.program[dis.pc+1])
- suffix = fmt.Sprintf(" %d", b)
- }
- if b < len(dis.bytec) {
- return fmt.Sprintf("bytec%s // %s", suffix, guessByteFormat(dis.bytec[b])), nil
- }
- return fmt.Sprintf("bytec%s", suffix), nil
-}
-
-func disPushInt(dis *disassembleState, spec *OpSpec) (string, error) {
- pos := dis.pc + 1
- val, bytesUsed := binary.Uvarint(dis.program[pos:])
- if bytesUsed <= 0 {
- return "", fmt.Errorf("could not decode int at pc=%d", pos)
- }
- dis.nextpc = pos + bytesUsed
- return fmt.Sprintf("%s %d", spec.Name, val), nil
-}
-func checkPushInt(cx *EvalContext) error {
- opPushInt(cx)
- return cx.err
-}
-
-func disPushBytes(dis *disassembleState, spec *OpSpec) (string, error) {
- pos := dis.pc + 1
- length, bytesUsed := binary.Uvarint(dis.program[pos:])
- if bytesUsed <= 0 {
- return "", fmt.Errorf("could not decode bytes length at pc=%d", pos)
- }
- pos += bytesUsed
- end := uint64(pos) + length
- if end > uint64(len(dis.program)) || end < uint64(pos) {
- return "", fmt.Errorf("pushbytes too long %d %d", end, pos)
- }
- bytes := dis.program[pos:end]
- dis.nextpc = int(end)
- return fmt.Sprintf("%s 0x%s // %s", spec.Name, hex.EncodeToString(bytes), guessByteFormat(bytes)), nil
-}
-func checkPushBytes(cx *EvalContext) error {
- opPushBytes(cx)
- return cx.err
-}
-
-// This is also used to disassemble gtxns, gtxnsas, txnas, itxn, itxnas
-func disTxn(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- txarg := dis.program[dis.pc+1]
- if int(txarg) >= len(TxnFieldNames) {
- return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, TxnFieldNames[txarg]), nil
-}
-
-// This is also used to disassemble gtxnsa
-func disTxna(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 2
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 3
- txarg := dis.program[dis.pc+1]
- if int(txarg) >= len(TxnFieldNames) {
- return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
- }
- arrayFieldIdx := dis.program[dis.pc+2]
- return fmt.Sprintf("%s %s %d", spec.Name, TxnFieldNames[txarg], arrayFieldIdx), nil
-}
-
-// disGtxn is also used to disassemble gtxnas, gitxn, gitxnas
-func disGtxn(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 2
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 3
- gi := dis.program[dis.pc+1]
- txarg := dis.program[dis.pc+2]
- if int(txarg) >= len(TxnFieldNames) {
- return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
- }
- return fmt.Sprintf("%s %d %s", spec.Name, gi, TxnFieldNames[txarg]), nil
-}
-
-// disGtxna is also used to disassemble gitxna
-func disGtxna(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 3
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 4
- gi := dis.program[dis.pc+1]
- txarg := dis.program[dis.pc+2]
- if int(txarg) >= len(TxnFieldNames) {
- return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
- }
- arrayFieldIdx := dis.program[dis.pc+3]
- return fmt.Sprintf("%s %d %s %d", spec.Name, gi, TxnFieldNames[txarg], arrayFieldIdx), nil
-}
-
-func disGlobal(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- garg := dis.program[dis.pc+1]
- if int(garg) >= len(GlobalFieldNames) {
- return "", fmt.Errorf("invalid global arg index %d at pc=%d", garg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, GlobalFieldNames[garg]), nil
-}
-
-func disBranch(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 2
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
-
- dis.nextpc = dis.pc + 3
- offset := (uint(dis.program[dis.pc+1]) << 8) | uint(dis.program[dis.pc+2])
- target := int(offset) + dis.pc + 3
- if target > 0xffff {
- target -= 0x10000
- }
- var label string
- if dis.numericTargets {
- label = fmt.Sprintf("%d", target)
- } else {
- if known, ok := dis.pendingLabels[target]; ok {
- label = known
- } else {
- dis.labelCount++
- label = fmt.Sprintf("label%d", dis.labelCount)
- dis.putLabel(label, target)
- }
- }
- return fmt.Sprintf("%s %s", spec.Name, label), nil
-}
-
-func disAssetHolding(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- arg := dis.program[dis.pc+1]
- if int(arg) >= len(AssetHoldingFieldNames) {
- return "", fmt.Errorf("invalid asset holding arg index %d at pc=%d", arg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, AssetHoldingFieldNames[arg]), nil
-}
-
-func disAssetParams(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- arg := dis.program[dis.pc+1]
- if int(arg) >= len(AssetParamsFieldNames) {
- return "", fmt.Errorf("invalid asset params arg index %d at pc=%d", arg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, AssetParamsFieldNames[arg]), nil
-}
-
-func disAppParams(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- arg := dis.program[dis.pc+1]
- if int(arg) >= len(AppParamsFieldNames) {
- return "", fmt.Errorf("invalid app params arg index %d at pc=%d", arg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, AppParamsFieldNames[arg]), nil
-}
-
-func disAcctParams(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- arg := dis.program[dis.pc+1]
- if int(arg) >= len(AcctParamsFieldNames) {
- return "", fmt.Errorf("invalid acct params arg index %d at pc=%d", arg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, AcctParamsFieldNames[arg]), nil
-}
-
-func disTxField(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- arg := dis.program[dis.pc+1]
- if int(arg) >= len(TxnFieldNames) {
- return "", fmt.Errorf("invalid %s arg index %d at pc=%d", spec.Name, arg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, TxnFieldNames[arg]), nil
-}
-
-func disEcdsa(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- arg := dis.program[dis.pc+1]
- if int(arg) >= len(EcdsaCurveNames) {
- return "", fmt.Errorf("invalid curve arg index %d at pc=%d", arg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, EcdsaCurveNames[arg]), nil
-}
-
-func disBase64Decode(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- b64dArg := dis.program[dis.pc+1]
- if int(b64dArg) >= len(base64EncodingNames) {
- return "", fmt.Errorf("invalid base64_decode arg index %d at pc=%d", b64dArg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, base64EncodingNames[b64dArg]), nil
-}
-
-func disJSONRef(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
-
- jsonRefArg := dis.program[dis.pc+1]
- if int(jsonRefArg) >= len(jsonRefSpecByName) {
- return "", fmt.Errorf("invalid json_ref arg index %d at pc=%d", jsonRefArg, dis.pc)
- }
-
- return fmt.Sprintf("%s %s", spec.Name, jsonRefTypeNames[jsonRefArg]), nil
-}
-
type disInfo struct {
pcOffset []PCOffset
hasStatefulOps bool
@@ -2724,7 +2243,7 @@ func disassembleInstrumented(program []byte, labels map[int]string) (text string
return
}
op := opsByOpcode[version][program[dis.pc]]
- if op.Modes == runModeApplication {
+ if op.Modes == modeApp {
ds.hasStatefulOps = true
}
if op.Name == "" {
@@ -2742,7 +2261,7 @@ func disassembleInstrumented(program []byte, labels map[int]string) (text string
// Actually do the disassembly
var line string
- line, err = op.dis(&dis, &op)
+ line, err = disassemble(&dis, &op)
if err != nil {
return
}
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index c00247357..609366ed2 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -22,6 +22,7 @@ import (
"strings"
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -145,6 +146,7 @@ substring 42 99
intc 0
intc 1
substring3
+#pragma typetrack false
bz there2
b there2
there2:
@@ -466,26 +468,33 @@ type Expect struct {
s string
}
-func testMatch(t testing.TB, actual, expected string) {
+func testMatch(t testing.TB, actual, expected string) bool {
t.Helper()
if strings.HasPrefix(expected, "...") && strings.HasSuffix(expected, "...") {
- require.Contains(t, actual, expected[3:len(expected)-3])
+ return assert.Contains(t, actual, expected[3:len(expected)-3])
} else if strings.HasPrefix(expected, "...") {
- require.Contains(t, actual+"^", expected[3:]+"^")
+ return assert.Contains(t, actual+"^", expected[3:]+"^")
} else if strings.HasSuffix(expected, "...") {
- require.Contains(t, "^"+actual, "^"+expected[:len(expected)-3])
+ return assert.Contains(t, "^"+actual, "^"+expected[:len(expected)-3])
} else {
- require.Equal(t, expected, actual)
+ return assert.Equal(t, expected, actual)
}
}
+func assemblyTrace(text string, ver uint64) string {
+ ops := newOpStream(ver)
+ ops.Trace = &strings.Builder{}
+ ops.assemble(text)
+ return ops.Trace.String()
+}
+
func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpStream {
t.Helper()
program := strings.ReplaceAll(source, ";", "\n")
ops, err := AssembleStringWithVersion(program, ver)
if len(expected) == 0 {
if len(ops.Errors) > 0 || err != nil || ops == nil || ops.Program == nil {
- t.Log(program)
+ t.Log(assemblyTrace(program, ver))
}
require.Empty(t, ops.Errors)
require.NoError(t, err)
@@ -497,7 +506,7 @@ func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpSt
// And, while the disassembly may not match input
// exactly, the assembly of the disassembly should
// give the same bytecode
- ops2, err := AssembleStringWithVersion(dis, ver)
+ ops2, err := AssembleStringWithVersion(notrack(dis), ver)
if len(ops2.Errors) > 0 || err != nil || ops2 == nil || ops2.Program == nil {
t.Log(program)
t.Log(dis)
@@ -515,24 +524,34 @@ func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpSt
if exp.l == 0 {
// line 0 means: "must match all"
require.Len(t, expected, 1)
+ fail := false
for _, err := range errors {
msg := err.Unwrap().Error()
- testMatch(t, msg, exp.s)
+ if !testMatch(t, msg, exp.s) {
+ fail = true
+ }
+ }
+ if fail {
+ t.Log(assemblyTrace(program, ver))
+ t.FailNow()
}
} else {
var found *lineError
for _, err := range errors {
if err.Line == exp.l {
- found = err
+ found = &err
break
}
}
if found == nil {
t.Log(fmt.Sprintf("Errors: %v", errors))
}
- require.NotNil(t, found, "No error on line %d", exp.l)
+ require.NotNil(t, found, "Error %s was not found on line %d", exp.s, exp.l)
msg := found.Unwrap().Error()
- testMatch(t, msg, exp.s)
+ if !testMatch(t, msg, exp.s) {
+ t.Log(assemblyTrace(program, ver))
+ t.FailNow()
+ }
}
}
require.Nil(t, ops.Program)
@@ -556,40 +575,46 @@ func testLine(t *testing.T, line string, ver uint64, expected string) {
func TestAssembleTxna(t *testing.T) {
partitiontest.PartitionTest(t)
- testLine(t, "txna Accounts 256", AssemblerMaxVersion, "txna array index beyond 255: 256")
- testLine(t, "txna ApplicationArgs 256", AssemblerMaxVersion, "txna array index beyond 255: 256")
- testLine(t, "txna Sender 256", AssemblerMaxVersion, "txna found scalar field \"Sender\"...")
- testLine(t, "gtxna 0 Accounts 256", AssemblerMaxVersion, "gtxna array index beyond 255: 256")
- testLine(t, "gtxna 0 ApplicationArgs 256", AssemblerMaxVersion, "gtxna array index beyond 255: 256")
- testLine(t, "gtxna 256 Accounts 0", AssemblerMaxVersion, "gtxna transaction index beyond 255: 256")
- testLine(t, "gtxna 0 Sender 256", AssemblerMaxVersion, "gtxna found scalar field \"Sender\"...")
- testLine(t, "txn Accounts 0", 1, "txn expects one argument")
- testLine(t, "txn Accounts 0 1", 2, "txn expects one or two arguments")
- testLine(t, "txna Accounts 0 1", AssemblerMaxVersion, "txna expects two immediate arguments")
- testLine(t, "txnas Accounts 1", AssemblerMaxVersion, "txnas expects one immediate argument")
+ testLine(t, "txna Accounts 256", AssemblerMaxVersion, "txna i beyond 255: 256")
+ testLine(t, "txna ApplicationArgs 256", AssemblerMaxVersion, "txna i beyond 255: 256")
+ testLine(t, "txna Sender 256", AssemblerMaxVersion, "txna unknown field: \"Sender\"")
+ testLine(t, "gtxna 0 Accounts 256", AssemblerMaxVersion, "gtxna i beyond 255: 256")
+ testLine(t, "gtxna 0 ApplicationArgs 256", AssemblerMaxVersion, "gtxna i beyond 255: 256")
+ testLine(t, "gtxna 256 Accounts 0", AssemblerMaxVersion, "gtxna t beyond 255: 256")
+ testLine(t, "gtxna 0 Sender 256", AssemblerMaxVersion, "gtxna unknown field: \"Sender\"")
+ testLine(t, "txn Accounts 0", 1, "txn expects 1 immediate argument")
+ testLine(t, "txn Accounts 0 1", 2, "txn expects 1 or 2 immediate arguments")
+ testLine(t, "txna Accounts 0 1", AssemblerMaxVersion, "txna expects 2 immediate arguments")
+ testLine(t, "txnas Accounts 1", AssemblerMaxVersion, "txnas expects 1 immediate argument")
testLine(t, "txna Accounts a", AssemblerMaxVersion, "txna unable to parse...")
- testLine(t, "gtxn 0 Sender 0", 1, "gtxn expects two arguments")
- testLine(t, "gtxn 0 Sender 1 2", 2, "gtxn expects two or three arguments")
- testLine(t, "gtxna 0 Accounts 1 2", AssemblerMaxVersion, "gtxna expects three arguments")
+ testLine(t, "gtxn 0 Sender 0", 1, "gtxn expects 2 immediate arguments")
+ testLine(t, "gtxn 0 Sender 1 2", 2, "gtxn expects 2 or 3 immediate arguments")
+ testLine(t, "gtxna 0 Accounts 1 2", AssemblerMaxVersion, "gtxna expects 3 immediate arguments")
testLine(t, "gtxna a Accounts 0", AssemblerMaxVersion, "gtxna unable to parse...")
testLine(t, "gtxna 0 Accounts a", AssemblerMaxVersion, "gtxna unable to parse...")
- testLine(t, "gtxnas Accounts 1 2", AssemblerMaxVersion, "gtxnas expects two immediate arguments")
+ testLine(t, "gtxnas Accounts 1 2", AssemblerMaxVersion, "gtxnas expects 2 immediate arguments")
testLine(t, "txn ABC", 2, "txn unknown field: \"ABC\"")
testLine(t, "gtxn 0 ABC", 2, "gtxn unknown field: \"ABC\"")
testLine(t, "gtxn a ABC", 2, "gtxn unable to parse...")
- testLine(t, "txn Accounts", AssemblerMaxVersion, "txn found array field \"Accounts\"...")
- testLine(t, "txn Accounts", 1, "txn found array field \"Accounts\"...")
+ testLine(t, "txn Accounts", 1, "txn unknown field: \"Accounts\"")
+ testLine(t, "txn Accounts", AssemblerMaxVersion, "txn unknown field: \"Accounts\"")
testLine(t, "txn Accounts 0", AssemblerMaxVersion, "")
- testLine(t, "gtxn 0 Accounts", AssemblerMaxVersion, "gtxn found array field \"Accounts\"...")
- testLine(t, "gtxn 0 Accounts", 1, "gtxn found array field \"Accounts\"...")
+ testLine(t, "gtxn 0 Accounts", AssemblerMaxVersion, "gtxn unknown field: \"Accounts\"...")
+ testLine(t, "gtxn 0 Accounts", 1, "gtxn unknown field: \"Accounts\"")
testLine(t, "gtxn 0 Accounts 1", AssemblerMaxVersion, "")
}
func TestAssembleGlobal(t *testing.T) {
partitiontest.PartitionTest(t)
- testLine(t, "global", AssemblerMaxVersion, "global expects one argument")
+ testLine(t, "global", AssemblerMaxVersion, "global expects 1 immediate argument")
testLine(t, "global a", AssemblerMaxVersion, "global unknown field: \"a\"")
+ testProg(t, "global MinTxnFee; int 2; +", AssemblerMaxVersion)
+ testProg(t, "global ZeroAddress; byte 0x12; concat; len", AssemblerMaxVersion)
+ testProg(t, "global MinTxnFee; byte 0x12; concat", AssemblerMaxVersion,
+ Expect{3, "concat arg 0 wanted type []byte..."})
+ testProg(t, "int 2; global ZeroAddress; +", AssemblerMaxVersion,
+ Expect{3, "+ arg 1 wanted type uint64..."})
}
func TestAssembleDefault(t *testing.T) {
@@ -613,7 +638,7 @@ func TestOpUint(t *testing.T) {
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops := OpStream{Version: v}
+ ops := newOpStream(v)
ops.Uint(0xcafebabe)
prog := ops.prependCBlocks()
require.NotNil(t, prog)
@@ -631,7 +656,7 @@ func TestOpUint64(t *testing.T) {
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
t.Parallel()
- ops := OpStream{Version: v}
+ ops := newOpStream(v)
ops.Uint(0xcafebabecafebabe)
prog := ops.prependCBlocks()
require.NotNil(t, prog)
@@ -647,7 +672,7 @@ func TestOpBytes(t *testing.T) {
t.Parallel()
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops := OpStream{Version: v}
+ ops := newOpStream(v)
ops.ByteLiteral([]byte("abcdef"))
prog := ops.prependCBlocks()
require.NotNil(t, prog)
@@ -1453,10 +1478,10 @@ func TestAssembleDisassembleCycle(t *testing.T) {
ops := testProg(t, source, v)
t2, err := Disassemble(ops.Program)
require.NoError(t, err)
- none := testProg(t, t2, assemblerNoVersion)
+ none := testProg(t, notrack(t2), assemblerNoVersion)
require.Equal(t, ops.Program[1:], none.Program[1:])
t3 := "// " + t2 // This comments out the #pragma version
- current := testProg(t, t3, AssemblerMaxVersion)
+ current := testProg(t, notrack(t3), AssemblerMaxVersion)
require.Equal(t, ops.Program[1:], current.Program[1:])
})
}
@@ -1522,7 +1547,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program[2] = 0x50 // txn field
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "invalid txn arg index")
+ require.Contains(t, err.Error(), "invalid immediate f for txn")
source = `txna Accounts 0`
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1530,7 +1555,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program[2] = 0x50 // txn field
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "invalid txn arg index")
+ require.Contains(t, err.Error(), "invalid immediate f for txna")
source = `gtxn 0 Sender`
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1538,7 +1563,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program[3] = 0x50 // txn field
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "invalid txn arg index")
+ require.Contains(t, err.Error(), "invalid immediate f for gtxn")
source = `gtxna 0 Accounts 0`
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1546,7 +1571,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program[3] = 0x50 // txn field
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "invalid txn arg index")
+ require.Contains(t, err.Error(), "invalid immediate f for gtxna")
source = `global MinTxnFee`
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1554,7 +1579,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program[2] = 0x50 // txn field
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "invalid global arg index")
+ require.Contains(t, err.Error(), "invalid immediate f for global")
ops.Program[0] = 0x11 // version
out, err := Disassemble(ops.Program)
@@ -1573,7 +1598,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program[7] = 0x50 // holding field
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "invalid asset holding arg index")
+ require.Contains(t, err.Error(), "invalid immediate f for asset_holding_get")
source = "int 0\nasset_params_get AssetTotal"
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1581,7 +1606,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program[4] = 0x50 // params field
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "invalid asset params arg index")
+ require.Contains(t, err.Error(), "invalid immediate f for asset_params_get")
source = "int 0\nasset_params_get AssetTotal"
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1591,17 +1616,22 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program = ops.Program[0 : len(ops.Program)-1]
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "unexpected asset_params_get opcode end: missing 1 bytes")
+ require.Contains(t, err.Error(), "program end while reading immediate f for asset_params_get")
source = "gtxna 0 Accounts 0"
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
_, err = Disassemble(ops.Program)
require.NoError(t, err)
- ops.Program = ops.Program[0 : len(ops.Program)-2]
- _, err = Disassemble(ops.Program)
+ _, err = Disassemble(ops.Program[0 : len(ops.Program)-1])
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "program end while reading immediate i for gtxna")
+ _, err = Disassemble(ops.Program[0 : len(ops.Program)-2])
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "program end while reading immediate f for gtxna")
+ _, err = Disassemble(ops.Program[0 : len(ops.Program)-3])
require.Error(t, err)
- require.Contains(t, err.Error(), "unexpected gtxna opcode end: missing 2 bytes")
+ require.Contains(t, err.Error(), "program end while reading immediate t for gtxna")
source = "txna Accounts 0"
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1611,7 +1641,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program = ops.Program[0 : len(ops.Program)-1]
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "unexpected txna opcode end: missing 1 bytes")
+ require.Contains(t, err.Error(), "program end while reading immediate i for txna")
source = "byte 0x4141\nsubstring 0 1"
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1621,7 +1651,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program = ops.Program[0 : len(ops.Program)-1]
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "unexpected substring opcode end: missing 1 bytes")
+ require.Contains(t, err.Error(), "program end while reading immediate e for substring")
}
func TestAssembleVersions(t *testing.T) {
@@ -1671,21 +1701,28 @@ func TestAssembleAsset(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- introduction := OpsByName[LogicVersion]["asset_holding_get"].Version
- for v := introduction; v <= AssemblerMaxVersion; v++ {
+ for v := uint64(2); v <= AssemblerMaxVersion; v++ {
testProg(t, "asset_holding_get ABC 1", v,
Expect{1, "asset_holding_get ABC 1 expects 2 stack arguments..."})
testProg(t, "int 1; asset_holding_get ABC 1", v,
Expect{2, "asset_holding_get ABC 1 expects 2 stack arguments..."})
testProg(t, "int 1; int 1; asset_holding_get ABC 1", v,
- Expect{3, "asset_holding_get expects one argument"})
+ Expect{3, "asset_holding_get expects 1 immediate argument"})
testProg(t, "int 1; int 1; asset_holding_get ABC", v,
Expect{3, "asset_holding_get unknown field: \"ABC\""})
testProg(t, "byte 0x1234; asset_params_get ABC 1", v,
Expect{2, "asset_params_get ABC 1 arg 0 wanted type uint64..."})
- testLine(t, "asset_params_get ABC 1", v, "asset_params_get expects one argument")
+ // Test that AssetUnitName is known to return bytes
+ testProg(t, "int 1; asset_params_get AssetUnitName; pop; int 1; +", v,
+ Expect{5, "+ arg 0 wanted type uint64..."})
+
+ // Test that AssetTotal is known to return uint64
+ testProg(t, "int 1; asset_params_get AssetTotal; pop; byte 0x12; concat", v,
+ Expect{5, "concat arg 0 wanted type []byte..."})
+
+ testLine(t, "asset_params_get ABC 1", v, "asset_params_get expects 1 immediate argument")
testLine(t, "asset_params_get ABC", v, "asset_params_get unknown field: \"ABC\"")
}
}
@@ -2198,7 +2235,7 @@ func TestErrShortBytecblock(t *testing.T) {
text := `intcblock 0x1234567812345678 0x1234567812345671 0x1234567812345672 0x1234567812345673 4 5 6 7 8`
ops, err := AssembleStringWithVersion(text, 1)
require.NoError(t, err)
- _, _, err = parseIntcblock(ops.Program, 0)
+ _, _, err = parseIntcblock(ops.Program, 1)
require.Equal(t, err, errShortIntcblock)
var cx EvalContext
@@ -2207,6 +2244,52 @@ func TestErrShortBytecblock(t *testing.T) {
require.Equal(t, err, errShortIntcblock)
}
+func TestMethodWarning(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ tests := []struct {
+ method string
+ pass bool
+ }{
+ {
+ method: "abc(uint64)void",
+ pass: true,
+ },
+ {
+ method: "abc(uint64)",
+ pass: false,
+ },
+ {
+ method: "abc(uint65)void",
+ pass: false,
+ },
+ {
+ method: "(uint64)void",
+ pass: false,
+ },
+ {
+ method: "abc(uint65,void",
+ pass: false,
+ },
+ }
+
+ for _, test := range tests {
+ for v := uint64(1); v <= AssemblerMaxVersion; v++ {
+ src := fmt.Sprintf("method \"%s\"\nint 1", test.method)
+ ops, err := AssembleStringWithVersion(src, v)
+ require.NoError(t, err)
+
+ if test.pass {
+ require.Len(t, ops.Warnings, 0)
+ continue
+ }
+
+ require.Len(t, ops.Warnings, 1)
+ require.Contains(t, ops.Warnings[0].Error(), "Invalid ARC-4 ABI method signature for method op")
+ }
+ }
+}
+
func TestBranchAssemblyTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -2219,9 +2302,8 @@ func TestBranchAssemblyTypeCheck(t *testing.T) {
btoi // [n]
`
- sr := strings.NewReader(text)
- ops := OpStream{Version: AssemblerMaxVersion}
- err := ops.assemble(sr)
+ ops := newOpStream(AssemblerMaxVersion)
+ err := ops.assemble(text)
require.NoError(t, err)
require.Empty(t, ops.Warnings)
@@ -2235,9 +2317,8 @@ flip: // [x]
btoi // [n]
`
- sr = strings.NewReader(text)
- ops = OpStream{Version: AssemblerMaxVersion}
- err = ops.assemble(sr)
+ ops = newOpStream(AssemblerMaxVersion)
+ err = ops.assemble(text)
require.NoError(t, err)
require.Empty(t, ops.Warnings)
}
@@ -2318,6 +2399,7 @@ func TestCoverAsm(t *testing.T) {
testProg(t, `int 4; byte "ayush"; int 5; cover 1; pop; +`, AssemblerMaxVersion)
testProg(t, `int 4; byte "john"; int 5; cover 2; +`, AssemblerMaxVersion, Expect{5, "+ arg 1..."})
+ testProg(t, `int 4; cover junk`, AssemblerMaxVersion, Expect{2, "cover unable to parse n ..."})
}
func TestUncoverAsm(t *testing.T) {
@@ -2330,6 +2412,8 @@ func TestUncoverAsm(t *testing.T) {
}
func TestTxTypes(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
testProg(t, "itxn_begin; itxn_field Sender", 5, Expect{2, "itxn_field Sender expects 1 stack argument..."})
testProg(t, "itxn_begin; int 1; itxn_field Sender", 5, Expect{3, "...wanted type []byte got uint64"})
testProg(t, "itxn_begin; byte 0x56127823; itxn_field Sender", 5)
@@ -2338,3 +2422,106 @@ func TestTxTypes(t *testing.T) {
testProg(t, "itxn_begin; byte 0x87123376; itxn_field Amount", 5, Expect{3, "...wanted type uint64 got []byte"})
testProg(t, "itxn_begin; int 1; itxn_field Amount", 5)
}
+
+func TestBadInnerFields(t *testing.T) {
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 5, Expect{3, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValidTime", 5, Expect{3, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 5, Expect{3, "...is not allowed."})
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 5, Expect{4, "...is not allowed."})
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 5, Expect{3, "...Note field was introduced in TEAL v6..."})
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field VotePK", 5, Expect{3, "...VotePK field was introduced in TEAL v6..."})
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 5, Expect{4, "...is not allowed."})
+
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 6, Expect{3, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 6, Expect{3, "...is not allowed."})
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 6, Expect{4, "...is not allowed."})
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 6)
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field VotePK", 6)
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 6, Expect{4, "...is not allowed."})
+}
+
+func TestTypeTracking(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ testProg(t, "+", LogicVersion, Expect{1, "+ expects 2 stack arguments..."})
+
+ // hitting a label in deadcode starts analyzing again, with unknown stack
+ testProg(t, "b end; label: +; end: b label", LogicVersion)
+
+ // callsub also wipes our stack knowledge, this tests shows why: it's properly typed
+ testProg(t, "callsub A; +; return; A: int 1; int 2; retsub", LogicVersion)
+
+ // but we do want to ensure we're not just treating the code after callsub as dead
+ testProg(t, "callsub A; int 1; concat; return; A: int 1; int 2; retsub", LogicVersion,
+ Expect{3, "concat arg 1 wanted..."})
+
+ // retsub deadens code, like any unconditional branch
+ testProg(t, "callsub A; +; return; A: int 1; int 2; retsub; concat", LogicVersion)
+
+ // Branching would have confused the old analysis, but the problem is local
+ // to a basic block, so it makes sense to report it.
+ testProg(t, `
+ int 1
+ b confusion
+label:
+ byte "john" // detectable mistake
+ int 2
+ +
+confusion:
+ b label
+`, LogicVersion, Expect{7, "+ arg 0 wanted type uint64..."})
+
+ // Unless that same error is in dead code.
+ testProg(t, `
+ int 1
+ b confusion
+label:
+ err // deadens the apparent error at +
+ byte "john"
+ int 2
+ +
+confusion:
+ b label
+`, LogicVersion)
+
+ // Unconditional branches also deaden
+ testProg(t, `
+ int 1
+ b confusion
+label:
+ b done // deadens the apparent error at +
+ byte "john"
+ int 2
+ +
+confusion:
+ b label
+done:
+`, LogicVersion)
+
+ // Turning type tracking off and then back on, allows any follow-on code.
+ testProg(t, `
+ int 1
+ int 2
+#pragma typetrack false
+ concat
+`, LogicVersion)
+
+ testProg(t, `
+ int 1
+ int 2
+#pragma typetrack false
+ concat
+#pragma typetrack true
+ concat
+`, LogicVersion)
+
+ // Declaring type tracking on consecutively does _not_ reset type tracking state.
+ testProg(t, `
+ int 1
+ int 2
+#pragma typetrack true
+ concat
+#pragma typetrack true
+ concat
+`, LogicVersion, Expect{5, "concat arg 1 wanted type []byte..."})
+}
diff --git a/data/transactions/logic/backwardCompat_test.go b/data/transactions/logic/backwardCompat_test.go
index b5917219d..e2503e887 100644
--- a/data/transactions/logic/backwardCompat_test.go
+++ b/data/transactions/logic/backwardCompat_test.go
@@ -360,9 +360,9 @@ func TestBackwardCompatGlobalFields(t *testing.T) {
for _, field := range fields {
text := fmt.Sprintf("global %s", field.field.String())
// check assembler fails if version before introduction
- testLine(t, text, assemblerNoVersion, "...available in version...")
+ testLine(t, text, assemblerNoVersion, "...was introduced in...")
for v := uint64(0); v < field.version; v++ {
- testLine(t, text, v, "...available in version...")
+ testLine(t, text, v, "...was introduced in...")
}
ops := testProg(t, text, AssemblerMaxVersion)
@@ -410,13 +410,13 @@ func TestBackwardCompatTxnFields(t *testing.T) {
field := fs.field.String()
for _, command := range tests {
text := fmt.Sprintf(command, field)
- asmError := "...available in version ..."
+ asmError := "...was introduced in ..."
if fs.array {
parts := strings.Split(text, " ")
op := parts[0]
- asmError = fmt.Sprintf("%s found array field %#v while expecting scalar", op, field)
+ asmError = fmt.Sprintf("%s unknown field: %#v", op, field)
}
- // check assembler fails if version before introduction
+ // check assembler fails in versions before introduction
testLine(t, text, assemblerNoVersion, asmError)
for v := uint64(0); v < fs.version; v++ {
testLine(t, text, v, asmError)
@@ -425,7 +425,7 @@ func TestBackwardCompatTxnFields(t *testing.T) {
ops, err := AssembleStringWithVersion(text, AssemblerMaxVersion)
if fs.array {
// "txn Accounts" is invalid, so skip evaluation
- require.Error(t, err, asmError)
+ require.Error(t, err)
continue
} else {
require.NoError(t, err)
@@ -488,8 +488,8 @@ func TestBackwardCompatAssemble(t *testing.T) {
func TestExplicitConstants(t *testing.T) {
partitiontest.PartitionTest(t)
- require.Equal(t, 4096, MaxStringSize, "constant changed, move it to consensus params")
- require.Equal(t, 64, MaxByteMathSize, "constant changed, move it to consensus params")
- require.Equal(t, 1024, MaxLogSize, "constant changed, move it to consensus params")
- require.Equal(t, 32, MaxLogCalls, "constant changed, move it to consensus params")
+ require.Equal(t, 4096, maxStringSize, "constant changed, make it version dependent")
+ require.Equal(t, 64, maxByteMathSize, "constant changed, move it version dependent")
+ require.Equal(t, 1024, maxLogSize, "constant changed, move it version dependent")
+ require.Equal(t, 32, maxLogCalls, "constant changed, move it version dependent")
}
diff --git a/data/transactions/logic/debugger.go b/data/transactions/logic/debugger.go
index ffcd8be81..cae8f7111 100644
--- a/data/transactions/logic/debugger.go
+++ b/data/transactions/logic/debugger.go
@@ -56,6 +56,13 @@ type PCOffset struct {
Offset int `codec:"offset"`
}
+// CallFrame stores the label name and the line of the subroutine.
+// An array of CallFrames form the CallStack.
+type CallFrame struct {
+ FrameLine int `codec:"frameLine"`
+ LabelName string `codec:"labelname"`
+}
+
// DebugState is a representation of the evaluation context that we encode
// to json and send to tealdbg
type DebugState struct {
@@ -75,6 +82,7 @@ type DebugState struct {
Scratch []basics.TealValue `codec:"scratch"`
Error string `codec:"error"`
OpcodeBudget int `codec:"budget"`
+ CallStack []CallFrame `codec:"callstack"`
// global/local state changes are updated every step. Stateful TEAL only.
transactions.EvalDelta
@@ -87,7 +95,7 @@ func GetProgramID(program []byte) string {
return hex.EncodeToString(hash[:])
}
-func makeDebugState(cx *EvalContext) DebugState {
+func makeDebugState(cx *EvalContext) *DebugState {
disasm, dsInfo, err := disassembleInstrumented(cx.program, nil)
if err != nil {
// Report disassembly error as program text
@@ -95,11 +103,11 @@ func makeDebugState(cx *EvalContext) DebugState {
}
// initialize DebuggerState with immutable fields
- ds := DebugState{
+ ds := &DebugState{
ExecID: GetProgramID(cx.program),
Disassembly: disasm,
PCOffset: dsInfo.pcOffset,
- GroupIndex: int(cx.GroupIndex),
+ GroupIndex: int(cx.groupIndex),
TxnGroup: cx.TxnGroup,
Proto: cx.Proto,
}
@@ -107,7 +115,7 @@ func makeDebugState(cx *EvalContext) DebugState {
globals := make([]basics.TealValue, len(globalFieldSpecs))
for _, fs := range globalFieldSpecs {
// Don't try to grab app only fields when evaluating a signature
- if (cx.runModeFlags&runModeSignature) != 0 && fs.mode == runModeApplication {
+ if (cx.runModeFlags&modeSig) != 0 && fs.mode == modeApp {
continue
}
sv, err := cx.globalFieldToValue(fs)
@@ -118,8 +126,8 @@ func makeDebugState(cx *EvalContext) DebugState {
}
ds.Globals = globals
- if (cx.runModeFlags & runModeApplication) != 0 {
- ds.EvalDelta = cx.Txn.EvalDelta
+ if (cx.runModeFlags & modeApp) != 0 {
+ ds.EvalDelta = cx.txn.EvalDelta
}
return ds
@@ -192,14 +200,36 @@ func valueDeltaToValueDelta(vd *basics.ValueDelta) basics.ValueDelta {
}
}
-func (cx *EvalContext) refreshDebugState() *DebugState {
- ds := &cx.debugState
+// parseCallStack initializes an array of CallFrame objects from the raw
+// callstack.
+func (d *DebugState) parseCallstack(callstack []int) []CallFrame {
+ callFrames := make([]CallFrame, 0)
+ lines := strings.Split(d.Disassembly, "\n")
+ for _, pc := range callstack {
+ // The callsub is pc - 3 from the callstack pc
+ callsubLineNum := d.PCToLine(pc - 3)
+ callSubLine := strings.Fields(lines[callsubLineNum])
+ label := ""
+ if callSubLine[0] == "callsub" {
+ label = callSubLine[1]
+ }
+ callFrames = append(callFrames, CallFrame{
+ FrameLine: callsubLineNum,
+ LabelName: label,
+ })
+ }
+ return callFrames
+}
+
+func (cx *EvalContext) refreshDebugState(evalError error) *DebugState {
+ ds := cx.debugState
- // Update pc, line, error, stack, and scratch space
+ // Update pc, line, error, stack, scratch space, callstack,
+ // and opcode budget
ds.PC = cx.pc
ds.Line = ds.PCToLine(cx.pc)
- if cx.err != nil {
- ds.Error = cx.err.Error()
+ if evalError != nil {
+ ds.Error = evalError.Error()
}
stack := make([]basics.TealValue, len(cx.stack))
@@ -215,9 +245,10 @@ func (cx *EvalContext) refreshDebugState() *DebugState {
ds.Stack = stack
ds.Scratch = scratch
ds.OpcodeBudget = cx.remainingBudget()
+ ds.CallStack = ds.parseCallstack(cx.callstack)
- if (cx.runModeFlags & runModeApplication) != 0 {
- ds.EvalDelta = cx.Txn.EvalDelta
+ if (cx.runModeFlags & modeApp) != 0 {
+ ds.EvalDelta = cx.txn.EvalDelta
}
return ds
diff --git a/data/transactions/logic/debugger_test.go b/data/transactions/logic/debugger_test.go
index 80e4a639a..060b953fc 100644
--- a/data/transactions/logic/debugger_test.go
+++ b/data/transactions/logic/debugger_test.go
@@ -174,3 +174,62 @@ func TestValueDeltaToValueDelta(t *testing.T) {
require.Equal(t, base64.StdEncoding.EncodeToString([]byte(vDelta.Bytes)), ans.Bytes)
require.Equal(t, vDelta.Uint, ans.Uint)
}
+
+var testCallStackProgram string = `intcblock 1
+callsub label1
+intc_0
+label1:
+callsub label2
+label2:
+intc_0
+`
+
+func TestParseCallstack(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ expectedCallFrames := []CallFrame{
+ {
+ FrameLine: 1,
+ LabelName: "label1",
+ },
+ {
+ FrameLine: 4,
+ LabelName: "label2",
+ },
+ }
+
+ dState := DebugState{
+ Disassembly: testCallStackProgram,
+ PCOffset: []PCOffset{{PC: 1, Offset: 18}, {PC: 4, Offset: 30}, {PC: 7, Offset: 45}, {PC: 8, Offset: 65}, {PC: 11, Offset: 88}},
+ }
+ callstack := []int{4, 8}
+
+ cfs := dState.parseCallstack(callstack)
+ require.Equal(t, expectedCallFrames, cfs)
+}
+
+func TestCallStackUpdate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ expectedCallFrames := []CallFrame{
+ {
+ FrameLine: 2,
+ LabelName: "label1",
+ },
+ {
+ FrameLine: 5,
+ LabelName: "label2",
+ },
+ }
+
+ testDbg := testDbgHook{}
+ ep := defaultEvalParams(nil)
+ ep.Debugger = &testDbg
+ testLogic(t, testCallStackProgram, AssemblerMaxVersion, ep)
+
+ require.Equal(t, 1, testDbg.register)
+ require.Equal(t, 1, testDbg.complete)
+ require.Greater(t, testDbg.update, 1)
+ require.Len(t, testDbg.state.Stack, 1)
+ require.Equal(t, testDbg.state.CallStack, expectedCallFrames)
+}
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index 4cc312e6a..fdad62548 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -47,8 +47,8 @@ var opDocByName = map[string]string{
"!=": "A is not equal to B => {0 or 1}",
"!": "A == 0 yields 1; else 0",
"len": "yields length of byte value A",
- "itob": "converts uint64 A to big endian bytes",
- "btoi": "converts bytes A as big endian to uint64",
+ "itob": "converts uint64 A to big-endian byte array, always of length 8",
+ "btoi": "converts big-endian byte array A to uint64. Fails if len(A) > 8. Padded by leading 0s if len(A) < 8.",
"%": "A modulo B. Fail if B == 0.",
"|": "A bitwise-or B",
"&": "A bitwise-and B",
@@ -132,10 +132,10 @@ var opDocByName = map[string]string{
"concat": "join A and B",
"substring": "A range of bytes from A starting at S up to but not including E. If E < S, or either is larger than the array length, the program fails",
"substring3": "A range of bytes from A starting at B up to but not including C. If C < B, or either is larger than the array length, the program fails",
- "getbit": "Bth bit of (byte-array or integer) A.",
- "setbit": "Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C",
- "getbyte": "Bth byte of A, as an integer",
- "setbyte": "Copy of A with the Bth byte set to small integer (between 0..255) C",
+ "getbit": "Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
+ "setbit": "Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
+ "getbyte": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
+ "setbyte": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
"extract": "A range of bytes from A starting at S up to but not including S+L. If L is 0, then extract to the end of the string. If S or S+L is larger than the array length, the program fails",
"extract3": "A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails",
"extract_uint16": "A uint16 formed from a range of big-endian bytes from A starting at B up to but not including B+2. If B+2 is larger than the array length, the program fails",
@@ -213,10 +213,10 @@ var opcodeImmediateNotes = map[string]string{
"gtxnas": "{uint8 transaction group index} {uint8 transaction field index}",
"gtxnsas": "{uint8 transaction field index}",
- "bnz": "{int16 branch offset, big endian}",
- "bz": "{int16 branch offset, big endian}",
- "b": "{int16 branch offset, big endian}",
- "callsub": "{int16 branch offset, big endian}",
+ "bnz": "{int16 branch offset, big-endian}",
+ "bz": "{int16 branch offset, big-endian}",
+ "b": "{int16 branch offset, big-endian}",
+ "callsub": "{int16 branch offset, big-endian}",
"load": "{uint8 position in scratch space to load from}",
"store": "{uint8 position in scratch space to store to}",
@@ -256,21 +256,6 @@ func OpImmediateNote(opName string) string {
return opcodeImmediateNotes[opName]
}
-var opcodeSpecialStackEffects = map[string]string{
- "dup": "..., A &rarr; ..., A, A",
- "dup2": "..., A, B &rarr; ..., A, B, A, B",
- "dig": "..., A, [N items] &rarr; ..., A, [N items], A",
- "swap": "..., A, B &rarr; ..., B, A",
- "select": "..., A, B, C &rarr; ..., A or B",
- "cover": "..., [N items], A &rarr; ..., A, [N items]",
- "uncover": "..., A, [N items] &rarr; ..., [N items], A",
-}
-
-// OpStackEffects returns a "stack pattern" for opcodes that do not have a derivable effect
-func OpStackEffects(opName string) string {
- return opcodeSpecialStackEffects[opName]
-}
-
// further documentation on the function of the opcode
var opDocExtras = map[string]string{
"ed25519verify": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
@@ -344,28 +329,30 @@ var OpGroups = map[string][]string{
"Inner Transactions": {"itxn_begin", "itxn_next", "itxn_field", "itxn_submit", "itxn", "itxna", "itxnas", "gitxn", "gitxna", "gitxnas"},
}
-// OpCost indicates the cost of an operation over the range of
+// VerCost indicates the cost of an operation over the range of
// LogicVersions from From to To.
-type OpCost struct {
+type VerCost struct {
From int
To int
- Cost int
+ // Cost is a human readable string to describe costs. Simple opcodes are
+ // just an integer, but some opcodes have field or stack dependencies.
+ Cost string
}
-// OpAllCosts returns an array of the cost score for an op by version.
-// Each entry indicates the cost over a range of versions, so if the
-// cost has remained constant, there is only one result, otherwise
-// each entry shows the cost for a consecutive range of versions,
-// inclusive.
-func OpAllCosts(opName string) []OpCost {
- var costs []OpCost
+// OpAllCosts returns an array of the cost of an op by version. Each entry
+// indicates the cost over a range of versions, so if the cost has remained
+// constant, there is only one result, otherwise each entry shows the cost for a
+// consecutive range of versions, inclusive.
+func OpAllCosts(opName string) []VerCost {
+ var costs []VerCost
for v := 1; v <= LogicVersion; v++ {
- cost := OpsByName[v][opName].Details.Cost
- if cost == 0 {
+ spec, ok := OpsByName[v][opName]
+ if !ok {
continue
}
+ cost := spec.OpDetails.docCost()
if costs == nil || cost != costs[len(costs)-1].Cost {
- costs = append(costs, OpCost{v, v, cost})
+ costs = append(costs, VerCost{v, v, cost})
} else {
costs[len(costs)-1].To = v
}
@@ -408,99 +395,6 @@ func OnCompletionDescription(value uint64) string {
// OnCompletionPreamble describes what the OnCompletion constants represent.
const OnCompletionPreamble = "An application transaction must indicate the action to be taken following the execution of its approvalProgram or clearStateProgram. The constants below describe the available actions."
-var txnFieldDocs = map[string]string{
- "Type": "Transaction type as bytes",
- "TypeEnum": "See table below",
- "Sender": "32 byte address",
- "Fee": "microalgos",
- "FirstValid": "round number",
- "FirstValidTime": "Causes program to fail; reserved for future use",
- "LastValid": "round number",
- "Note": "Any data up to 1024 bytes",
- "Lease": "32 byte lease value",
- "RekeyTo": "32 byte Sender's new AuthAddr",
-
- "GroupIndex": "Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1",
- "TxID": "The computed ID for this transaction. 32 bytes.",
-
- "Receiver": "32 byte address",
- "Amount": "microalgos",
- "CloseRemainderTo": "32 byte address",
-
- "VotePK": "32 byte address",
- "SelectionPK": "32 byte address",
- "StateProofPK": "64 byte state proof public key commitment",
- "VoteFirst": "The first round that the participation key is valid.",
- "VoteLast": "The last round that the participation key is valid.",
- "VoteKeyDilution": "Dilution for the 2-level participation key",
- "Nonparticipation": "Marks an account nonparticipating for rewards",
-
- "XferAsset": "Asset ID",
- "AssetAmount": "value in Asset's units",
- "AssetSender": "32 byte address. Causes clawback of all value of asset from AssetSender if Sender is the Clawback address of the asset.",
- "AssetReceiver": "32 byte address",
- "AssetCloseTo": "32 byte address",
-
- "ApplicationID": "ApplicationID from ApplicationCall transaction",
- "OnCompletion": "ApplicationCall transaction on completion action",
- "ApplicationArgs": "Arguments passed to the application in the ApplicationCall transaction",
- "NumAppArgs": "Number of ApplicationArgs",
- "Accounts": "Accounts listed in the ApplicationCall transaction",
- "NumAccounts": "Number of Accounts",
- "Assets": "Foreign Assets listed in the ApplicationCall transaction",
- "NumAssets": "Number of Assets",
- "Applications": "Foreign Apps listed in the ApplicationCall transaction",
- "NumApplications": "Number of Applications",
- "GlobalNumUint": "Number of global state integers in ApplicationCall",
- "GlobalNumByteSlice": "Number of global state byteslices in ApplicationCall",
- "LocalNumUint": "Number of local state integers in ApplicationCall",
- "LocalNumByteSlice": "Number of local state byteslices in ApplicationCall",
- "ApprovalProgram": "Approval program",
- "ClearStateProgram": "Clear state program",
- "ExtraProgramPages": "Number of additional pages for each of the application's approval and clear state programs. An ExtraProgramPages of 1 means 2048 more total bytes, or 1024 for each program.",
-
- "ConfigAsset": "Asset ID in asset config transaction",
- "ConfigAssetTotal": "Total number of units of this asset created",
- "ConfigAssetDecimals": "Number of digits to display after the decimal place when displaying the asset",
- "ConfigAssetDefaultFrozen": "Whether the asset's slots are frozen by default or not, 0 or 1",
- "ConfigAssetUnitName": "Unit name of the asset",
- "ConfigAssetName": "The asset name",
- "ConfigAssetURL": "URL",
- "ConfigAssetMetadataHash": "32 byte commitment to some unspecified asset metadata",
- "ConfigAssetManager": "32 byte address",
- "ConfigAssetReserve": "32 byte address",
- "ConfigAssetFreeze": "32 byte address",
- "ConfigAssetClawback": "32 byte address",
-
- "FreezeAsset": "Asset ID being frozen or un-frozen",
- "FreezeAssetAccount": "32 byte address of the account whose asset slot is being frozen or un-frozen",
- "FreezeAssetFrozen": "The new frozen value, 0 or 1",
-
- "Logs": "Log messages emitted by an application call (only with `itxn` in v5)",
- "NumLogs": "Number of Logs (only with `itxn` in v5)",
- "LastLog": "The last message emitted. Empty bytes if none were emitted",
- "CreatedAssetID": "Asset ID allocated by the creation of an ASA (only with `itxn` in v5)",
- "CreatedApplicationID": "ApplicationID allocated by the creation of an application (only with `itxn` in v5)",
-}
-
-var globalFieldDocs = map[string]string{
- "MinTxnFee": "microalgos",
- "MinBalance": "microalgos",
- "MaxTxnLife": "rounds",
- "ZeroAddress": "32 byte address of all zero bytes",
- "GroupSize": "Number of transactions in this atomic transaction group. At least 1",
- "LogicSigVersion": "Maximum supported version",
- "Round": "Current round number",
- "LatestTimestamp": "Last confirmed block UNIX timestamp. Fails if negative",
- "CurrentApplicationID": "ID of current application executing",
- "CreatorAddress": "Address of the creator of the current application",
- "CurrentApplicationAddress": "Address that the current application controls",
- "GroupID": "ID of the transaction group. 32 zero bytes if the transaction is not part of a group.",
- "OpcodeBudget": "The remaining cost that can be spent by opcodes in this program.",
- "CallerApplicationID": "The application ID of the application that called this application. 0 if this application is at the top-level.",
- "CallerApplicationAddress": "The application address of the application that called this application. ZeroAddress if this application is at the top-level.",
-}
-
func addExtra(original string, extra string) string {
if len(original) == 0 {
return extra
@@ -514,51 +408,3 @@ func addExtra(original string, extra string) string {
}
return original + sep + extra
}
-
-// AssetHoldingFieldDocs are notes on fields available in `asset_holding_get`
-var assetHoldingFieldDocs = map[string]string{
- "AssetBalance": "Amount of the asset unit held by this account",
- "AssetFrozen": "Is the asset frozen or not",
-}
-
-// assetParamsFieldDocs are notes on fields available in `asset_params_get`
-var assetParamsFieldDocs = map[string]string{
- "AssetTotal": "Total number of units of this asset",
- "AssetDecimals": "See AssetParams.Decimals",
- "AssetDefaultFrozen": "Frozen by default or not",
- "AssetUnitName": "Asset unit name",
- "AssetName": "Asset name",
- "AssetURL": "URL with additional info about the asset",
- "AssetMetadataHash": "Arbitrary commitment",
- "AssetManager": "Manager commitment",
- "AssetReserve": "Reserve address",
- "AssetFreeze": "Freeze address",
- "AssetClawback": "Clawback address",
- "AssetCreator": "Creator address",
-}
-
-// appParamsFieldDocs are notes on fields available in `app_params_get`
-var appParamsFieldDocs = map[string]string{
- "AppApprovalProgram": "Bytecode of Approval Program",
- "AppClearStateProgram": "Bytecode of Clear State Program",
- "AppGlobalNumUint": "Number of uint64 values allowed in Global State",
- "AppGlobalNumByteSlice": "Number of byte array values allowed in Global State",
- "AppLocalNumUint": "Number of uint64 values allowed in Local State",
- "AppLocalNumByteSlice": "Number of byte array values allowed in Local State",
- "AppExtraProgramPages": "Number of Extra Program Pages of code space",
- "AppCreator": "Creator address",
- "AppAddress": "Address for which this application has authority",
-}
-
-// acctParamsFieldDocs are notes on fields available in `app_params_get`
-var acctParamsFieldDocs = map[string]string{
- "AcctBalance": "Account balance in microalgos",
- "AcctMinBalance": "Minimum required blance for account, in microalgos",
- "AcctAuthAddr": "Address the account is rekeyed to.",
-}
-
-// EcdsaCurveDocs are notes on curves available in `ecdsa_` opcodes
-var EcdsaCurveDocs = map[string]string{
- "Secp256k1": "secp256k1 curve",
- "Secp256r1": "secp256r1 curve",
-}
diff --git a/data/transactions/logic/doc_test.go b/data/transactions/logic/doc_test.go
index 85b97aaad..f270d9162 100644
--- a/data/transactions/logic/doc_test.go
+++ b/data/transactions/logic/doc_test.go
@@ -40,15 +40,8 @@ func TestOpDocs(t *testing.T) {
assert.True(t, seen, "opDocByName is missing doc for %#v", op)
}
- require.Len(t, txnFieldDocs, len(TxnFieldNames))
require.Len(t, onCompletionDescriptions, len(OnCompletionNames))
- require.Len(t, globalFieldDocs, len(GlobalFieldNames))
- require.Len(t, assetHoldingFieldDocs, len(AssetHoldingFieldNames))
- require.Len(t, assetParamsFieldDocs, len(AssetParamsFieldNames))
- require.Len(t, appParamsFieldDocs, len(AppParamsFieldNames))
- require.Len(t, acctParamsFieldDocs, len(AcctParamsFieldNames))
require.Len(t, TypeNameDescriptions, len(TxnTypeNames))
- require.Len(t, EcdsaCurveDocs, len(EcdsaCurveNames))
}
// TestDocStragglers confirms that we don't have any docs laying
@@ -112,9 +105,9 @@ func TestAllImmediatesDocumented(t *testing.T) {
partitiontest.PartitionTest(t)
for _, op := range OpSpecs {
- count := len(op.Details.Immediates)
+ count := len(op.OpDetails.Immediates)
note := OpImmediateNote(op.Name)
- if count == 1 && op.Details.Immediates[0].kind >= immBytes {
+ if count == 1 && op.OpDetails.Immediates[0].kind >= immBytes {
// More elaborate than can be checked by easy count.
assert.NotEmpty(t, note)
continue
@@ -137,12 +130,12 @@ func TestOpAllCosts(t *testing.T) {
a := OpAllCosts("+")
require.Len(t, a, 1)
- require.Equal(t, 1, a[0].Cost)
+ require.Equal(t, "1", a[0].Cost)
a = OpAllCosts("sha256")
require.Len(t, a, 2)
for _, cost := range a {
- require.True(t, cost.Cost > 1)
+ require.True(t, cost.Cost != "0")
}
}
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index e889fef95..3a38c5887 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -46,23 +46,23 @@ import (
"github.com/algorand/go-algorand/protocol"
)
-// EvalMaxVersion is the max version we can interpret and run
-const EvalMaxVersion = LogicVersion
+// evalMaxVersion is the max version we can interpret and run
+const evalMaxVersion = LogicVersion
-// The constants below control TEAL opcodes evaluation and MAY NOT be changed
-// without moving them into consensus parameters.
+// The constants below control opcode evaluation and MAY NOT be changed without
+// gating them by version. Old programs need to retain their old behavior.
-// MaxStringSize is the limit of byte string length in an AVM value
-const MaxStringSize = 4096
+// maxStringSize is the limit of byte string length in an AVM value
+const maxStringSize = 4096
-// MaxByteMathSize is the limit of byte strings supplied as input to byte math opcodes
-const MaxByteMathSize = 64
+// maxByteMathSize is the limit of byte strings supplied as input to byte math opcodes
+const maxByteMathSize = 64
-// MaxLogSize is the limit of total log size from n log calls in a program
-const MaxLogSize = 1024
+// maxLogSize is the limit of total log size from n log calls in a program
+const maxLogSize = 1024
-// MaxLogCalls is the limit of total log calls during a program execution
-const MaxLogCalls = 32
+// maxLogCalls is the limit of total log calls during a program execution
+const maxLogCalls = 32
// maxAppCallDepth is the limit on inner appl call depth
// To be clear, 0 would prevent inner appls, 1 would mean inner app calls cannot
@@ -70,6 +70,9 @@ const MaxLogCalls = 32
// you count the top-level app call.
const maxAppCallDepth = 8
+// maxStackDepth should not change unless controlled by a teal version change
+const maxStackDepth = 1000
+
// stackValue is the type for the operand stack.
// Each stackValue is either a valid []byte value or a uint64 value.
// If (.Bytes != nil) the stackValue is a []byte value, otherwise uint64 value.
@@ -78,39 +81,28 @@ type stackValue struct {
Bytes []byte
}
-func (sv *stackValue) argType() StackType {
+func (sv stackValue) argType() StackType {
if sv.Bytes != nil {
return StackBytes
}
return StackUint64
}
-func (sv *stackValue) typeName() string {
+func (sv stackValue) typeName() string {
if sv.Bytes != nil {
return "[]byte"
}
return "uint64"
}
-func (sv *stackValue) clone() stackValue {
- if sv.Bytes != nil {
- // clone stack value if Bytes
- bytesClone := make([]byte, len(sv.Bytes))
- copy(bytesClone, sv.Bytes)
- return stackValue{Bytes: bytesClone}
- }
- // otherwise no cloning is needed if Uint
- return stackValue{Uint: sv.Uint}
-}
-
-func (sv *stackValue) String() string {
+func (sv stackValue) String() string {
if sv.Bytes != nil {
return hex.EncodeToString(sv.Bytes)
}
return fmt.Sprintf("%d 0x%x", sv.Uint, sv.Uint)
}
-func (sv *stackValue) address() (addr basics.Address, err error) {
+func (sv stackValue) address() (addr basics.Address, err error) {
if len(sv.Bytes) != len(addr) {
return basics.Address{}, errors.New("not an address")
}
@@ -118,14 +110,14 @@ func (sv *stackValue) address() (addr basics.Address, err error) {
return
}
-func (sv *stackValue) uint() (uint64, error) {
+func (sv stackValue) uint() (uint64, error) {
if sv.Bytes != nil {
return 0, errors.New("not a uint64")
}
return sv.Uint, nil
}
-func (sv *stackValue) uintMaxed(max uint64) (uint64, error) {
+func (sv stackValue) uintMaxed(max uint64) (uint64, error) {
if sv.Bytes != nil {
return 0, fmt.Errorf("%#v is not a uint64", sv.Bytes)
}
@@ -135,7 +127,7 @@ func (sv *stackValue) uintMaxed(max uint64) (uint64, error) {
return sv.Uint, nil
}
-func (sv *stackValue) bool() (bool, error) {
+func (sv stackValue) bool() (bool, error) {
u64, err := sv.uint()
if err != nil {
return false, err
@@ -150,7 +142,7 @@ func (sv *stackValue) bool() (bool, error) {
}
}
-func (sv *stackValue) string(limit int) (string, error) {
+func (sv stackValue) string(limit int) (string, error) {
if sv.Bytes == nil {
return "", errors.New("not a byte array")
}
@@ -160,7 +152,14 @@ func (sv *stackValue) string(limit int) (string, error) {
return string(sv.Bytes), nil
}
-func stackValueFromTealValue(tv *basics.TealValue) (sv stackValue, err error) {
+func (sv stackValue) toTealValue() (tv basics.TealValue) {
+ if sv.argType() == StackBytes {
+ return basics.TealValue{Type: basics.TealBytesType, Bytes: string(sv.Bytes)}
+ }
+ return basics.TealValue{Type: basics.TealUintType, Uint: sv.Uint}
+}
+
+func stackValueFromTealValue(tv basics.TealValue) (sv stackValue, err error) {
switch tv.Type {
case basics.TealBytesType:
sv.Bytes = []byte(tv.Bytes)
@@ -177,10 +176,8 @@ func stackValueFromTealValue(tv *basics.TealValue) (sv stackValue, err error) {
// newly-introduced transaction fields from breaking assumptions made by older
// versions of TEAL. If one of the transactions in a group will execute a TEAL
// program whose version predates a given field, that field must not be set
-// anywhere in the transaction group, or the group will be rejected. In
-// addition, inner app calls must not call teal from before inner app calls were
-// introduced.
-func ComputeMinTealVersion(group []transactions.SignedTxnWithAD, inner bool) uint64 {
+// anywhere in the transaction group, or the group will be rejected.
+func ComputeMinTealVersion(group []transactions.SignedTxnWithAD) uint64 {
var minVersion uint64
for _, txn := range group {
if !txn.Txn.RekeyTo.IsZero() {
@@ -193,22 +190,10 @@ func ComputeMinTealVersion(group []transactions.SignedTxnWithAD, inner bool) uin
minVersion = appsEnabledVersion
}
}
- if inner {
- if minVersion < innerAppsEnabledVersion {
- minVersion = innerAppsEnabledVersion
- }
- }
}
return minVersion
}
-func (sv *stackValue) toTealValue() (tv basics.TealValue) {
- if sv.argType() == StackBytes {
- return basics.TealValue{Type: basics.TealBytesType, Bytes: string(sv.Bytes)}
- }
- return basics.TealValue{Type: basics.TealUintType, Uint: sv.Uint}
-}
-
// LedgerForLogic represents ledger API for Stateful TEAL program
type LedgerForLogic interface {
AccountData(addr basics.Address) (ledgercore.AccountData, error)
@@ -310,7 +295,16 @@ func NewEvalParams(txgroup []transactions.SignedTxnWithAD, proto *config.Consens
}
}
- minTealVersion := ComputeMinTealVersion(txgroup, false)
+ // Make a simpler EvalParams that is good enough to evaluate LogicSigs.
+ if apps == 0 {
+ return &EvalParams{
+ TxnGroup: txgroup,
+ Proto: proto,
+ Specials: specials,
+ }
+ }
+
+ minTealVersion := ComputeMinTealVersion(txgroup)
var pooledApplicationBudget *int
var pooledAllowedInners *int
@@ -362,9 +356,10 @@ func feeCredit(txgroup []transactions.SignedTxnWithAD, minFee uint64) uint64 {
// NewInnerEvalParams creates an EvalParams to be used while evaluating an inner group txgroup
func NewInnerEvalParams(txg []transactions.SignedTxnWithAD, caller *EvalContext) *EvalParams {
- minTealVersion := ComputeMinTealVersion(txg, true)
- // Can't happen currently, since innerAppsEnabledVersion > than any minimum
- // imposed otherwise. But is correct to check, in case of future restriction.
+ minTealVersion := ComputeMinTealVersion(txg)
+ // Can't happen currently, since earliest inner callable version is higher
+ // than any minimum imposed otherwise. But is correct to inherit a stronger
+ // restriction from above, in case of future restriction.
if minTealVersion < *caller.MinTealVersion {
minTealVersion = *caller.MinTealVersion
}
@@ -397,20 +392,20 @@ func NewInnerEvalParams(txg []transactions.SignedTxnWithAD, caller *EvalContext)
return ep
}
-type opEvalFunc func(cx *EvalContext)
-type opCheckFunc func(cx *EvalContext) error
+type evalFunc func(cx *EvalContext) error
+type checkFunc func(cx *EvalContext) error
type runMode uint64
const (
- // runModeSignature is TEAL in LogicSig execution
- runModeSignature runMode = 1 << iota
+ // modeSig is LogicSig execution
+ modeSig runMode = 1 << iota
- // runModeApplication is TEAL in application/stateful mode
- runModeApplication
+ // modeApp is application/contract execution
+ modeApp
// local constant, run in any mode
- modeAny = runModeSignature | runModeApplication
+ modeAny = modeSig | modeApp
)
func (r runMode) Any() bool {
@@ -419,9 +414,9 @@ func (r runMode) Any() bool {
func (r runMode) String() string {
switch r {
- case runModeSignature:
+ case modeSig:
return "Signature"
- case runModeApplication:
+ case modeApp:
return "Application"
case modeAny:
return "Any"
@@ -430,7 +425,7 @@ func (r runMode) String() string {
return "Unknown"
}
-func (ep EvalParams) log() logging.Logger {
+func (ep *EvalParams) log() logging.Logger {
if ep.logger != nil {
return ep.logger
}
@@ -441,6 +436,11 @@ func (ep EvalParams) log() logging.Logger {
// package. For example, after a acfg transaction is processed, the AD created
// by the acfg is added to the EvalParams this way.
func (ep *EvalParams) RecordAD(gi int, ad transactions.ApplyData) {
+ if ep.created == nil {
+ // This is a simplified ep. It won't be used for app evaluation, and
+ // shares the TxnGroup memory with the caller. Don't touch anything!
+ return
+ }
ep.TxnGroup[gi].ApplyData = ad
if aid := ad.ConfigAsset; aid != 0 {
ep.created.asas = append(ep.created.asas, aid)
@@ -462,9 +462,9 @@ type EvalContext struct {
runModeFlags runMode
// the index of the transaction being evaluated
- GroupIndex int
- // the transaction being evaluated (initialized from GroupIndex + ep.TxnGroup)
- Txn *transactions.SignedTxnWithAD
+ groupIndex int
+ // the transaction being evaluated (initialized from groupIndex + ep.TxnGroup)
+ txn *transactions.SignedTxnWithAD
// Txn.EvalDelta maintains a summary of changes as we go. We used to
// compute this from the ledger after a full eval. But now apps can call
@@ -482,7 +482,6 @@ type EvalContext struct {
program []byte
pc int
nextpc int
- err error
intc []uint64
bytec [][]byte
version uint64
@@ -505,7 +504,7 @@ type EvalContext struct {
programHashCached crypto.Digest
// Stores state & disassembly for the optional debugger
- debugState DebugState
+ debugState *DebugState
}
// StackType describes the type of a value on the operand stack
@@ -528,6 +527,28 @@ const (
// StackTypes is an alias for a list of StackType with syntactic sugar
type StackTypes []StackType
+func parseStackTypes(spec string) StackTypes {
+ if spec == "" {
+ return nil
+ }
+ types := make(StackTypes, len(spec))
+ for i, letter := range spec {
+ switch letter {
+ case 'a':
+ types[i] = StackAny
+ case 'b':
+ types[i] = StackBytes
+ case 'i':
+ types[i] = StackUint64
+ case 'x':
+ types[i] = StackNone
+ default:
+ panic(spec)
+ }
+ }
+ return types
+}
+
func (st StackType) String() string {
switch st {
case StackNone:
@@ -591,33 +612,30 @@ func EvalContract(program []byte, gi int, aid basics.AppIndex, params *EvalParam
}
cx := EvalContext{
EvalParams: params,
- runModeFlags: runModeApplication,
- GroupIndex: gi,
- Txn: &params.TxnGroup[gi],
+ runModeFlags: modeApp,
+ groupIndex: gi,
+ txn: &params.TxnGroup[gi],
appID: aid,
}
- if cx.Proto.IsolateClearState && cx.Txn.Txn.OnCompletion == transactions.ClearStateOC {
+ if cx.Proto.IsolateClearState && cx.txn.Txn.OnCompletion == transactions.ClearStateOC {
if cx.PooledApplicationBudget != nil && *cx.PooledApplicationBudget < cx.Proto.MaxAppProgramCost {
return false, nil, ClearStateBudgetError{*cx.PooledApplicationBudget}
}
}
if cx.Trace != nil && cx.caller != nil {
- fmt.Fprintf(cx.Trace, "--- enter %d %s %v\n", aid, cx.Txn.Txn.OnCompletion, cx.Txn.Txn.ApplicationArgs)
+ fmt.Fprintf(cx.Trace, "--- enter %d %s %v\n", aid, cx.txn.Txn.OnCompletion, cx.txn.Txn.ApplicationArgs)
}
pass, err := eval(program, &cx)
if cx.Trace != nil && cx.caller != nil {
fmt.Fprintf(cx.Trace, "--- exit %d accept=%t\n", aid, pass)
}
- // update side effects. It is tempting, and maybe even a good idea, to store
- // the pointer to cx.scratch instead. Since we don't modify them again,
- // it's probably safe. However it may have poor GC characteristics (because
- // we'd be storing a pointer into a much larger structure, the cx), and
- // copying seems nice and clean.
- cx.pastScratch[cx.GroupIndex] = &scratchSpace{}
- *cx.pastScratch[cx.GroupIndex] = cx.scratch
+ // Save scratch for `gload`. We used to copy, but cx.scratch is quite large,
+ // about 8k, and caused measurable CPU and memory demands. Of course, these
+ // should never be changed by later transactions.
+ cx.pastScratch[cx.groupIndex] = &cx.scratch
return pass, &cx, err
}
@@ -633,11 +651,11 @@ func EvalApp(program []byte, gi int, aid basics.AppIndex, params *EvalParams) (b
func EvalSignature(gi int, params *EvalParams) (pass bool, err error) {
cx := EvalContext{
EvalParams: params,
- runModeFlags: runModeSignature,
- GroupIndex: gi,
- Txn: &params.TxnGroup[gi],
+ runModeFlags: modeSig,
+ groupIndex: gi,
+ txn: &params.TxnGroup[gi],
}
- return eval(cx.Txn.Lsig.Logic, &cx)
+ return eval(cx.txn.Lsig.Logic, &cx)
}
// eval implementation
@@ -660,7 +678,7 @@ func eval(program []byte, cx *EvalContext) (pass bool, err error) {
defer func() {
// Ensure we update the debugger before exiting
if cx.Debugger != nil {
- errDbg := cx.Debugger.Complete(cx.refreshDebugState())
+ errDbg := cx.Debugger.Complete(cx.refreshDebugState(err))
if err == nil {
err = errDbg
}
@@ -671,52 +689,53 @@ func eval(program []byte, cx *EvalContext) (pass bool, err error) {
err = errLogicSigNotSupported
return
}
- if cx.Txn.Lsig.Args != nil && len(cx.Txn.Lsig.Args) > transactions.EvalMaxArgs {
+ if cx.txn.Lsig.Args != nil && len(cx.txn.Lsig.Args) > transactions.EvalMaxArgs {
err = errTooManyArgs
return
}
version, vlen, err := versionCheck(program, cx.EvalParams)
if err != nil {
- cx.err = err
return false, err
}
cx.version = version
cx.pc = vlen
- cx.stack = make([]stackValue, 0, 10)
+ // 16 is chosen to avoid growth for small programs, and so that repeated
+ // doublings lead to a number just a bit above 1000, the max stack height.
+ cx.stack = make([]stackValue, 0, 16)
cx.program = program
- cx.Txn.EvalDelta.GlobalDelta = basics.StateDelta{}
- cx.Txn.EvalDelta.LocalDeltas = make(map[uint64]basics.StateDelta)
+ cx.txn.EvalDelta.GlobalDelta = basics.StateDelta{}
+ cx.txn.EvalDelta.LocalDeltas = make(map[uint64]basics.StateDelta)
if cx.Debugger != nil {
cx.debugState = makeDebugState(cx)
- if err = cx.Debugger.Register(cx.refreshDebugState()); err != nil {
- return
+ if derr := cx.Debugger.Register(cx.refreshDebugState(err)); derr != nil {
+ return false, derr
}
}
- for (cx.err == nil) && (cx.pc < len(cx.program)) {
+ for (err == nil) && (cx.pc < len(cx.program)) {
if cx.Debugger != nil {
- if err = cx.Debugger.Update(cx.refreshDebugState()); err != nil {
- return
+ if derr := cx.Debugger.Update(cx.refreshDebugState(err)); derr != nil {
+ return false, derr
}
}
- cx.step()
+ err = cx.step()
}
- if cx.err != nil {
+ if err != nil {
if cx.Trace != nil {
- fmt.Fprintf(cx.Trace, "%3d %s\n", cx.pc, cx.err)
+ fmt.Fprintf(cx.Trace, "%3d %s\n", cx.pc, err)
}
- return false, cx.err
+ return false, err
}
if len(cx.stack) != 1 {
if cx.Trace != nil {
fmt.Fprintf(cx.Trace, "end stack:\n")
for i, sv := range cx.stack {
- fmt.Fprintf(cx.Trace, "[%d] %s\n", i, sv.String())
+ fmt.Fprintf(cx.Trace, "[%d] %s\n", i, sv)
}
}
return false, fmt.Errorf("stack len is %d instead of 1", len(cx.stack))
@@ -733,14 +752,14 @@ func eval(program []byte, cx *EvalContext) (pass bool, err error) {
// these static checks include a cost estimate that must be low enough
// (controlled by params.Proto).
func CheckContract(program []byte, params *EvalParams) error {
- return check(program, params, runModeApplication)
+ return check(program, params, modeApp)
}
// CheckSignature should be faster than EvalSignature. It can perform static
// checks and reject programs that are invalid. Prior to v4, these static checks
// include a cost estimate that must be low enough (controlled by params.Proto).
func CheckSignature(gi int, params *EvalParams) error {
- return check(params.TxnGroup[gi].Lsig.Logic, params, runModeSignature)
+ return check(params.TxnGroup[gi].Lsig.Logic, params, modeSig)
}
func check(program []byte, params *EvalParams, mode runMode) (err error) {
@@ -775,9 +794,6 @@ func check(program []byte, params *EvalParams, mode runMode) (err error) {
cx.instructionStarts = make(map[int]bool)
maxCost := cx.remainingBudget()
- if version >= backBranchEnabledVersion {
- maxCost = math.MaxInt32
- }
staticCost := 0
for cx.pc < len(cx.program) {
prevpc := cx.pc
@@ -786,7 +802,7 @@ func check(program []byte, params *EvalParams, mode runMode) (err error) {
return fmt.Errorf("pc=%3d %w", cx.pc, err)
}
staticCost += stepCost
- if staticCost > maxCost {
+ if version < backBranchEnabledVersion && staticCost > maxCost {
return fmt.Errorf("pc=%3d static cost budget of %d exceeded", cx.pc, maxCost)
}
if cx.pc <= prevpc {
@@ -794,7 +810,7 @@ func check(program []byte, params *EvalParams, mode runMode) (err error) {
// without evaluation. It always goes forward,
// even if we're in v4 and the jump would go
// back.
- return fmt.Errorf("pc did not advance, stuck at %d", cx.pc)
+ return fmt.Errorf("pc=%3d pc did not advance", cx.pc)
}
}
return nil
@@ -805,15 +821,15 @@ func versionCheck(program []byte, params *EvalParams) (uint64, int, error) {
if err != nil {
return 0, 0, err
}
- if version > EvalMaxVersion {
- return 0, 0, fmt.Errorf("program version %d greater than max supported version %d", version, EvalMaxVersion)
+ if version > evalMaxVersion {
+ return 0, 0, fmt.Errorf("program version %d greater than max supported version %d", version, evalMaxVersion)
}
if version > params.Proto.LogicSigVersion {
return 0, 0, fmt.Errorf("program version %d greater than protocol supported version %d", version, params.Proto.LogicSigVersion)
}
if params.MinTealVersion == nil {
- minVersion := ComputeMinTealVersion(params.TxnGroup, params.caller != nil)
+ minVersion := ComputeMinTealVersion(params.TxnGroup)
params.MinTealVersion = &minVersion
}
if version < *params.MinTealVersion {
@@ -843,17 +859,14 @@ func boolToUint(x bool) uint64 {
return 0
}
-// MaxStackDepth should not change unless gated by a teal version change / consensus upgrade.
-const MaxStackDepth = 1000
-
func (cx *EvalContext) remainingBudget() int {
- if cx.runModeFlags == runModeSignature {
+ if cx.runModeFlags == modeSig {
return int(cx.Proto.LogicSigMaxCost) - cx.cost
}
// restrict clear state programs from using more than standard unpooled budget
// cx.Txn is not set during check()
- if cx.Proto.IsolateClearState && cx.Txn != nil && cx.Txn.Txn.OnCompletion == transactions.ClearStateOC {
+ if cx.Proto.IsolateClearState && cx.txn != nil && cx.txn.Txn.OnCompletion == transactions.ClearStateOC {
// Need not confirm that *cx.PooledApplicationBudget is also >0, as
// ClearState programs are only run if *cx.PooledApplicationBudget >
// MaxAppProgramCost at the start.
@@ -874,83 +887,90 @@ func (cx *EvalContext) remainingInners() int {
// allowed in a single txn. No consensus version should enable inner app
// calls without turning on EnableInnerTransactionPoolin, else inner calls
// could keep branching with "width" MaxInnerTransactions
- return cx.Proto.MaxInnerTransactions - len(cx.Txn.EvalDelta.InnerTxns)
+ return cx.Proto.MaxInnerTransactions - len(cx.txn.EvalDelta.InnerTxns)
}
-func (cx *EvalContext) step() {
+func (cx *EvalContext) step() error {
opcode := cx.program[cx.pc]
spec := &opsByOpcode[cx.version][opcode]
// this check also ensures TEAL versioning: v2 opcodes are not in opsByOpcode[1] array
if spec.op == nil {
- cx.err = fmt.Errorf("%3d illegal opcode 0x%02x", cx.pc, opcode)
- return
+ return fmt.Errorf("%3d illegal opcode 0x%02x", cx.pc, opcode)
}
if (cx.runModeFlags & spec.Modes) == 0 {
- cx.err = fmt.Errorf("%s not allowed in current mode", spec.Name)
- return
+ return fmt.Errorf("%s not allowed in current mode", spec.Name)
}
// check args for stack underflow and types
- if len(cx.stack) < len(spec.Args) {
- cx.err = fmt.Errorf("stack underflow in %s", spec.Name)
- return
+ if len(cx.stack) < len(spec.Arg.Types) {
+ return fmt.Errorf("stack underflow in %s", spec.Name)
}
- first := len(cx.stack) - len(spec.Args)
- for i, argType := range spec.Args {
+ first := len(cx.stack) - len(spec.Arg.Types)
+ for i, argType := range spec.Arg.Types {
if !opCompat(argType, cx.stack[first+i].argType()) {
- cx.err = fmt.Errorf("%s arg %d wanted %s but got %s", spec.Name, i, argType.String(), cx.stack[first+i].typeName())
- return
+ return fmt.Errorf("%s arg %d wanted %s but got %s", spec.Name, i, argType, cx.stack[first+i].typeName())
}
}
- deets := spec.Details
+ deets := &spec.OpDetails
if deets.Size != 0 && (cx.pc+deets.Size > len(cx.program)) {
- cx.err = fmt.Errorf("%3d %s program ends short of immediate values", cx.pc, spec.Name)
- return
+ return fmt.Errorf("%3d %s program ends short of immediate values", cx.pc, spec.Name)
+ }
+
+ // It's something like a 5-10% overhead on our simplest instructions to make
+ // the Cost() call without the FullCost.compute() short-circuit, even
+ // though Cost() tries to exit fast. Use BenchmarkUintMath to test changes.
+ opcost := deets.FullCost.compute(cx.stack)
+ if opcost <= 0 {
+ opcost = deets.Cost(cx.program, cx.pc, cx.stack)
+ if opcost <= 0 {
+ return fmt.Errorf("%3d %s returned 0 cost", cx.pc, spec.Name)
+ }
}
- cx.cost += deets.Cost
+ cx.cost += opcost
if cx.PooledApplicationBudget != nil {
- *cx.PooledApplicationBudget -= deets.Cost
+ *cx.PooledApplicationBudget -= opcost
}
if cx.remainingBudget() < 0 {
// We're not going to execute the instruction, so give the cost back.
// This only matters if this is an inner ClearState - the caller should
// not be over debited. (Normally, failure causes total txtree failure.)
- cx.cost -= deets.Cost
+ cx.cost -= opcost
if cx.PooledApplicationBudget != nil {
- *cx.PooledApplicationBudget += deets.Cost
+ *cx.PooledApplicationBudget += opcost
}
- cx.err = fmt.Errorf("pc=%3d dynamic cost budget exceeded, executing %s: local program cost was %d",
+ return fmt.Errorf("pc=%3d dynamic cost budget exceeded, executing %s: local program cost was %d",
cx.pc, spec.Name, cx.cost)
- return
}
preheight := len(cx.stack)
- spec.op(cx)
+ err := spec.op(cx)
- if cx.err == nil {
+ if err == nil {
postheight := len(cx.stack)
- if spec.Name != "return" && postheight-preheight != len(spec.Returns)-len(spec.Args) {
- cx.err = fmt.Errorf("%s changed stack height improperly %d != %d",
- spec.Name, postheight-preheight, len(spec.Returns)-len(spec.Args))
- return
+ if postheight-preheight != len(spec.Return.Types)-len(spec.Arg.Types) && !spec.AlwaysExits() {
+ return fmt.Errorf("%s changed stack height improperly %d != %d",
+ spec.Name, postheight-preheight, len(spec.Return.Types)-len(spec.Arg.Types))
}
- first = postheight - len(spec.Returns)
- for i, argType := range spec.Returns {
+ first = postheight - len(spec.Return.Types)
+ for i, argType := range spec.Return.Types {
stackType := cx.stack[first+i].argType()
if !opCompat(argType, stackType) {
- cx.err = fmt.Errorf("%s produced %s but intended %s", spec.Name, cx.stack[first+i].typeName(), argType.String())
- return
+ if spec.AlwaysExits() { // We test in the loop because it's the uncommon case.
+ break
+ }
+ return fmt.Errorf("%s produced %s but intended %s", spec.Name, cx.stack[first+i].typeName(), argType)
}
- if stackType == StackBytes && len(cx.stack[first+i].Bytes) > MaxStringSize {
- cx.err = fmt.Errorf("%s produced a too big (%d) byte-array", spec.Name, len(cx.stack[first+i].Bytes))
- return
+ if stackType == StackBytes && len(cx.stack[first+i].Bytes) > maxStringSize {
+ return fmt.Errorf("%s produced a too big (%d) byte-array", spec.Name, len(cx.stack[first+i].Bytes))
}
}
}
+ // Delay checking and returning `err` so we have a chance to Trace the last instruction
+
if cx.Trace != nil {
// This code used to do a little disassembly on its
// own, but then it missed out on some nuances like
@@ -967,43 +987,41 @@ func (cx *EvalContext) step() {
// (changing the pc, for example) and this gives a big
// improvement of dryrun readability
dstate := &disassembleState{program: cx.program, pc: cx.pc, numericTargets: true, intc: cx.intc, bytec: cx.bytec}
- var sourceLine string
- sourceLine, err := spec.dis(dstate, spec)
- if err != nil {
- if cx.err == nil { // don't override an error from evaluation
- cx.err = err
+ sourceLine, inner := disassemble(dstate, spec)
+ if inner != nil {
+ if err != nil { // don't override an error from evaluation
+ return err
}
- return
+ return inner
}
var stackString string
if len(cx.stack) == 0 {
stackString = "<empty stack>"
} else {
num := 1
- if len(spec.Returns) > 1 {
- num = len(spec.Returns)
+ if len(spec.Return.Types) > 1 {
+ num = len(spec.Return.Types)
}
// check for nil error here, because we might not return
// values if we encounter an error in the opcode
- if cx.err == nil {
+ if err == nil {
if len(cx.stack) < num {
- cx.err = fmt.Errorf("stack underflow: expected %d, have %d", num, len(cx.stack))
- return
+ return fmt.Errorf("stack underflow: expected %d, have %d", num, len(cx.stack))
}
for i := 1; i <= num; i++ {
- stackString += fmt.Sprintf("(%s) ", cx.stack[len(cx.stack)-i].String())
+ stackString += fmt.Sprintf("(%s) ", cx.stack[len(cx.stack)-i])
}
}
}
fmt.Fprintf(cx.Trace, "%3d %s => %s\n", cx.pc, sourceLine, stackString)
}
- if cx.err != nil {
- return
+
+ if err != nil {
+ return err
}
- if len(cx.stack) > MaxStackDepth {
- cx.err = errors.New("stack overflow")
- return
+ if len(cx.stack) > maxStackDepth {
+ return errors.New("stack overflow")
}
if cx.nextpc != 0 {
cx.pc = cx.nextpc
@@ -1011,25 +1029,38 @@ func (cx *EvalContext) step() {
} else {
cx.pc += deets.Size
}
+ return nil
}
+// oneBlank is a boring stack provided to deets.Cost during checkStep. It is
+// good enough to allow Cost() to not crash. It would be incorrect to provide
+// this stack if there were linear cost opcodes before backBranchEnabledVersion,
+// because the static cost would be wrong. But then again, a static cost model
+// wouldn't work before backBranchEnabledVersion, so such an opcode is already
+// unacceptable. TestLinearOpcodes ensures.
+var oneBlank = []stackValue{{Bytes: []byte{}}}
+
func (cx *EvalContext) checkStep() (int, error) {
cx.instructionStarts[cx.pc] = true
opcode := cx.program[cx.pc]
spec := &opsByOpcode[cx.version][opcode]
if spec.op == nil {
- return 0, fmt.Errorf("%3d illegal opcode 0x%02x", cx.pc, opcode)
+ return 0, fmt.Errorf("illegal opcode 0x%02x", opcode)
}
if (cx.runModeFlags & spec.Modes) == 0 {
return 0, fmt.Errorf("%s not allowed in current mode", spec.Name)
}
- deets := spec.Details
+ deets := spec.OpDetails
if deets.Size != 0 && (cx.pc+deets.Size > len(cx.program)) {
- return 0, fmt.Errorf("%3d %s program ends short of immediate values", cx.pc, spec.Name)
+ return 0, fmt.Errorf("%s program ends short of immediate values", spec.Name)
+ }
+ opcost := deets.Cost(cx.program, cx.pc, oneBlank)
+ if opcost <= 0 {
+ return 0, fmt.Errorf("%s reported non-positive cost", spec.Name)
}
prevpc := cx.pc
- if deets.checkFunc != nil {
- err := deets.checkFunc(cx)
+ if deets.check != nil {
+ err := deets.check(cx)
if err != nil {
return 0, err
}
@@ -1045,21 +1076,19 @@ func (cx *EvalContext) checkStep() (int, error) {
if cx.Trace != nil {
fmt.Fprintf(cx.Trace, "%3d %s\n", prevpc, spec.Name)
}
- if cx.err == nil {
- for pc := prevpc + 1; pc < cx.pc; pc++ {
- if _, ok := cx.branchTargets[pc]; ok {
- return 0, fmt.Errorf("branch target %d is not an aligned instruction", pc)
- }
+ for pc := prevpc + 1; pc < cx.pc; pc++ {
+ if _, ok := cx.branchTargets[pc]; ok {
+ return 0, fmt.Errorf("branch target %d is not an aligned instruction", pc)
}
}
- return deets.Cost, nil
+ return opcost, nil
}
-func opErr(cx *EvalContext) {
- cx.err = errors.New("TEAL runtime encountered err opcode")
+func opErr(cx *EvalContext) error {
+ return errors.New("err opcode executed")
}
-func opReturn(cx *EvalContext) {
+func opReturn(cx *EvalContext) error {
// Achieve the end condition:
// Take the last element on the stack and make it the return value (only element on the stack)
// Move the pc to the end of the program
@@ -1067,24 +1096,26 @@ func opReturn(cx *EvalContext) {
cx.stack[0] = cx.stack[last]
cx.stack = cx.stack[:1]
cx.nextpc = len(cx.program)
+ return nil
}
-func opAssert(cx *EvalContext) {
+func opAssert(cx *EvalContext) error {
last := len(cx.stack) - 1
if cx.stack[last].Uint != 0 {
cx.stack = cx.stack[:last]
- return
+ return nil
}
- cx.err = fmt.Errorf("assert failed pc=%d", cx.pc)
+ return fmt.Errorf("assert failed pc=%d", cx.pc)
}
-func opSwap(cx *EvalContext) {
+func opSwap(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
cx.stack[last], cx.stack[prev] = cx.stack[prev], cx.stack[last]
+ return nil
}
-func opSelect(cx *EvalContext) {
+func opSelect(cx *EvalContext) error {
last := len(cx.stack) - 1 // condition on top
prev := last - 1 // true is one down
pprev := prev - 1 // false below that
@@ -1093,29 +1124,33 @@ func opSelect(cx *EvalContext) {
cx.stack[pprev] = cx.stack[prev]
}
cx.stack = cx.stack[:prev]
+ return nil
}
-func opSHA256(cx *EvalContext) {
+func opSHA256(cx *EvalContext) error {
last := len(cx.stack) - 1
hash := sha256.Sum256(cx.stack[last].Bytes)
cx.stack[last].Bytes = hash[:]
+ return nil
}
// The NIST SHA3-256 is implemented for compatibility with ICON
-func opSHA3_256(cx *EvalContext) {
+func opSHA3_256(cx *EvalContext) error {
last := len(cx.stack) - 1
hash := sha3.Sum256(cx.stack[last].Bytes)
cx.stack[last].Bytes = hash[:]
+ return nil
}
// The Keccak256 variant of SHA-3 is implemented for compatibility with Ethereum
-func opKeccak256(cx *EvalContext) {
+func opKeccak256(cx *EvalContext) error {
last := len(cx.stack) - 1
hasher := sha3.NewLegacyKeccak256()
hasher.Write(cx.stack[last].Bytes)
hv := make([]byte, 0, hasher.Size())
hv = hasher.Sum(hv)
cx.stack[last].Bytes = hv
+ return nil
}
// This is the hash commonly used in Algorand in crypto/util.go Hash()
@@ -1124,30 +1159,32 @@ func opKeccak256(cx *EvalContext) {
// stability and portability in case the rest of Algorand ever moves
// to a different default hash. For stability of this language, at
// that time a new opcode should be made with the new hash.
-func opSHA512_256(cx *EvalContext) {
+func opSHA512_256(cx *EvalContext) error {
last := len(cx.stack) - 1
hash := sha512.Sum512_256(cx.stack[last].Bytes)
cx.stack[last].Bytes = hash[:]
+ return nil
}
-func opPlus(cx *EvalContext) {
+func opPlus(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
sum, carry := bits.Add64(cx.stack[prev].Uint, cx.stack[last].Uint, 0)
if carry > 0 {
- cx.err = errors.New("+ overflowed")
- return
+ return errors.New("+ overflowed")
}
cx.stack[prev].Uint = sum
cx.stack = cx.stack[:last]
+ return nil
}
-func opAddw(cx *EvalContext) {
+func opAddw(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
sum, carry := bits.Add64(cx.stack[prev].Uint, cx.stack[last].Uint, 0)
cx.stack[prev].Uint = carry
cx.stack[last].Uint = sum
+ return nil
}
func uint128(hi uint64, lo uint64) *big.Int {
@@ -1168,12 +1205,11 @@ func opDivModwImpl(hiNum, loNum, hiDen, loDen uint64) (hiQuo uint64, loQuo uint6
rem.Uint64()
}
-func opDivModw(cx *EvalContext) {
+func opDivModw(cx *EvalContext) error {
loDen := len(cx.stack) - 1
hiDen := loDen - 1
if cx.stack[loDen].Uint == 0 && cx.stack[hiDen].Uint == 0 {
- cx.err = errors.New("/ 0")
- return
+ return errors.New("/ 0")
}
loNum := loDen - 2
hiNum := loDen - 3
@@ -1183,62 +1219,64 @@ func opDivModw(cx *EvalContext) {
cx.stack[loNum].Uint = loQuo
cx.stack[hiDen].Uint = hiRem
cx.stack[loDen].Uint = loRem
+ return nil
}
-func opMinus(cx *EvalContext) {
+func opMinus(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
if cx.stack[last].Uint > cx.stack[prev].Uint {
- cx.err = errors.New("- would result negative")
- return
+ return errors.New("- would result negative")
}
cx.stack[prev].Uint -= cx.stack[last].Uint
cx.stack = cx.stack[:last]
+ return nil
}
-func opDiv(cx *EvalContext) {
+func opDiv(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
if cx.stack[last].Uint == 0 {
- cx.err = errors.New("/ 0")
- return
+ return errors.New("/ 0")
}
cx.stack[prev].Uint /= cx.stack[last].Uint
cx.stack = cx.stack[:last]
+ return nil
}
-func opModulo(cx *EvalContext) {
+func opModulo(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
if cx.stack[last].Uint == 0 {
- cx.err = errors.New("% 0")
- return
+ return errors.New("% 0")
}
cx.stack[prev].Uint = cx.stack[prev].Uint % cx.stack[last].Uint
cx.stack = cx.stack[:last]
+ return nil
}
-func opMul(cx *EvalContext) {
+func opMul(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
high, low := bits.Mul64(cx.stack[prev].Uint, cx.stack[last].Uint)
if high > 0 {
- cx.err = errors.New("* overflowed")
- return
+ return errors.New("* overflowed")
}
cx.stack[prev].Uint = low
cx.stack = cx.stack[:last]
+ return nil
}
-func opMulw(cx *EvalContext) {
+func opMulw(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
high, low := bits.Mul64(cx.stack[prev].Uint, cx.stack[last].Uint)
cx.stack[prev].Uint = high
cx.stack[last].Uint = low
+ return nil
}
-func opDivw(cx *EvalContext) {
+func opDivw(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
pprev := last - 2
@@ -1248,65 +1286,68 @@ func opDivw(cx *EvalContext) {
// These two clauses catch what will cause panics in bits.Div64, so we get
// nicer errors.
if y == 0 {
- cx.err = errors.New("divw 0")
- return
+ return errors.New("divw 0")
}
if y <= hi {
- cx.err = fmt.Errorf("divw overflow: %d <= %d", y, hi)
- return
+ return fmt.Errorf("divw overflow: %d <= %d", y, hi)
}
quo, _ := bits.Div64(hi, lo, y)
cx.stack = cx.stack[:prev] // pop 2
cx.stack[pprev].Uint = quo
+ return nil
}
-func opLt(cx *EvalContext) {
+func opLt(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
cond := cx.stack[prev].Uint < cx.stack[last].Uint
cx.stack[prev].Uint = boolToUint(cond)
cx.stack = cx.stack[:last]
+ return nil
}
-func opGt(cx *EvalContext) {
+// opSwap, opLt, and opNot always succeed (return nil). So error checking elided in Gt,Le,Ge
+
+func opGt(cx *EvalContext) error {
opSwap(cx)
- opLt(cx)
+ return opLt(cx)
}
-func opLe(cx *EvalContext) {
+func opLe(cx *EvalContext) error {
opGt(cx)
- opNot(cx)
+ return opNot(cx)
}
-func opGe(cx *EvalContext) {
+func opGe(cx *EvalContext) error {
opLt(cx)
- opNot(cx)
+ return opNot(cx)
}
-func opAnd(cx *EvalContext) {
+func opAnd(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
cond := (cx.stack[prev].Uint != 0) && (cx.stack[last].Uint != 0)
cx.stack[prev].Uint = boolToUint(cond)
cx.stack = cx.stack[:last]
+ return nil
}
-func opOr(cx *EvalContext) {
+func opOr(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
cond := (cx.stack[prev].Uint != 0) || (cx.stack[last].Uint != 0)
cx.stack[prev].Uint = boolToUint(cond)
cx.stack = cx.stack[:last]
+ return nil
}
-func opEq(cx *EvalContext) {
+func opEq(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
ta := cx.stack[prev].argType()
tb := cx.stack[last].argType()
if ta != tb {
- cx.err = fmt.Errorf("cannot compare (%s to %s)", cx.stack[prev].typeName(), cx.stack[last].typeName())
- return
+ return fmt.Errorf("cannot compare (%s to %s)", cx.stack[prev].typeName(), cx.stack[last].typeName())
}
var cond bool
if ta == StackBytes {
@@ -1317,40 +1358,46 @@ func opEq(cx *EvalContext) {
cx.stack[prev].Uint = boolToUint(cond)
cx.stack[prev].Bytes = nil
cx.stack = cx.stack[:last]
+ return nil
}
-func opNeq(cx *EvalContext) {
- opEq(cx)
- opNot(cx)
+func opNeq(cx *EvalContext) error {
+ err := opEq(cx)
+ if err != nil {
+ return err
+ }
+ return opNot(cx)
}
-func opNot(cx *EvalContext) {
+func opNot(cx *EvalContext) error {
last := len(cx.stack) - 1
cond := cx.stack[last].Uint == 0
cx.stack[last].Uint = boolToUint(cond)
+ return nil
}
-func opLen(cx *EvalContext) {
+func opLen(cx *EvalContext) error {
last := len(cx.stack) - 1
cx.stack[last].Uint = uint64(len(cx.stack[last].Bytes))
cx.stack[last].Bytes = nil
+ return nil
}
-func opItob(cx *EvalContext) {
+func opItob(cx *EvalContext) error {
last := len(cx.stack) - 1
ibytes := make([]byte, 8)
binary.BigEndian.PutUint64(ibytes, cx.stack[last].Uint)
// cx.stack[last].Uint is not cleared out as optimization
// stackValue.argType() checks Bytes field first
cx.stack[last].Bytes = ibytes
+ return nil
}
-func opBtoi(cx *EvalContext) {
+func opBtoi(cx *EvalContext) error {
last := len(cx.stack) - 1
ibytes := cx.stack[last].Bytes
if len(ibytes) > 8 {
- cx.err = fmt.Errorf("btoi arg too long, got [%d]bytes", len(ibytes))
- return
+ return fmt.Errorf("btoi arg too long, got [%d]bytes", len(ibytes))
}
value := uint64(0)
for _, b := range ibytes {
@@ -1359,57 +1406,62 @@ func opBtoi(cx *EvalContext) {
}
cx.stack[last].Uint = value
cx.stack[last].Bytes = nil
+ return nil
}
-func opBitOr(cx *EvalContext) {
+func opBitOr(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
cx.stack[prev].Uint = cx.stack[prev].Uint | cx.stack[last].Uint
cx.stack = cx.stack[:last]
+ return nil
}
-func opBitAnd(cx *EvalContext) {
+func opBitAnd(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
cx.stack[prev].Uint = cx.stack[prev].Uint & cx.stack[last].Uint
cx.stack = cx.stack[:last]
+ return nil
}
-func opBitXor(cx *EvalContext) {
+func opBitXor(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
cx.stack[prev].Uint = cx.stack[prev].Uint ^ cx.stack[last].Uint
cx.stack = cx.stack[:last]
+ return nil
}
-func opBitNot(cx *EvalContext) {
+func opBitNot(cx *EvalContext) error {
last := len(cx.stack) - 1
cx.stack[last].Uint = cx.stack[last].Uint ^ 0xffffffffffffffff
+ return nil
}
-func opShiftLeft(cx *EvalContext) {
+func opShiftLeft(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
if cx.stack[last].Uint > 63 {
- cx.err = fmt.Errorf("shl arg too big, (%d)", cx.stack[last].Uint)
- return
+ return fmt.Errorf("shl arg too big, (%d)", cx.stack[last].Uint)
}
cx.stack[prev].Uint = cx.stack[prev].Uint << cx.stack[last].Uint
cx.stack = cx.stack[:last]
+ return nil
}
-func opShiftRight(cx *EvalContext) {
+func opShiftRight(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
if cx.stack[last].Uint > 63 {
- cx.err = fmt.Errorf("shr arg too big, (%d)", cx.stack[last].Uint)
- return
+ return fmt.Errorf("shr arg too big, (%d)", cx.stack[last].Uint)
}
cx.stack[prev].Uint = cx.stack[prev].Uint >> cx.stack[last].Uint
cx.stack = cx.stack[:last]
+ return nil
}
-func opSqrt(cx *EvalContext) {
+func opSqrt(cx *EvalContext) error {
/*
It would not be safe to use math.Sqrt, because we would have to
convert our u64 to an f64, but f64 cannot represent all u64s exactly.
@@ -1434,13 +1486,14 @@ func opSqrt(cx *EvalContext) {
}
}
cx.stack[last].Uint = root >> 1
+ return nil
}
-func opBitLen(cx *EvalContext) {
+func opBitLen(cx *EvalContext) error {
last := len(cx.stack) - 1
if cx.stack[last].argType() == StackUint64 {
cx.stack[last].Uint = uint64(bits.Len64(cx.stack[last].Uint))
- return
+ return nil
}
length := len(cx.stack[last].Bytes)
idx := 0
@@ -1453,6 +1506,7 @@ func opBitLen(cx *EvalContext) {
}
cx.stack[last].Bytes = nil
cx.stack[last].Uint = uint64(idx)
+ return nil
}
func opExpImpl(base uint64, exp uint64) (uint64, error) {
@@ -1483,7 +1537,7 @@ func opExpImpl(base uint64, exp uint64) (uint64, error) {
return answer, nil
}
-func opExp(cx *EvalContext) {
+func opExp(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
@@ -1491,11 +1545,11 @@ func opExp(cx *EvalContext) {
base := cx.stack[prev].Uint
val, err := opExpImpl(base, exp)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[prev].Uint = val
cx.stack = cx.stack[:last]
+ return nil
}
func opExpwImpl(base uint64, exp uint64) (*big.Int, error) {
@@ -1519,17 +1573,15 @@ func opExpwImpl(base uint64, exp uint64) (*big.Int, error) {
bigbase := new(big.Int).SetUint64(base)
// safe to cast exp, because it is known to fit in int (it's < 128)
for i := 1; i < int(exp); i++ {
- next := answer.Mul(answer, bigbase)
- answer = next
+ answer.Mul(answer, bigbase)
if answer.BitLen() > 128 {
return &big.Int{}, fmt.Errorf("%d^%d overflow", base, exp)
}
}
return answer, nil
-
}
-func opExpw(cx *EvalContext) {
+func opExpw(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
@@ -1537,83 +1589,86 @@ func opExpw(cx *EvalContext) {
base := cx.stack[prev].Uint
val, err := opExpwImpl(base, exp)
if err != nil {
- cx.err = err
- return
+ return err
}
hi := new(big.Int).Rsh(val, 64).Uint64()
lo := val.Uint64()
cx.stack[prev].Uint = hi
cx.stack[last].Uint = lo
+ return nil
}
-func opBytesBinOp(cx *EvalContext, result *big.Int, op func(x, y *big.Int) *big.Int) {
+func opBytesBinOp(cx *EvalContext, result *big.Int, op func(x, y *big.Int) *big.Int) error {
last := len(cx.stack) - 1
prev := last - 1
- if len(cx.stack[last].Bytes) > MaxByteMathSize || len(cx.stack[prev].Bytes) > MaxByteMathSize {
- cx.err = errors.New("math attempted on large byte-array")
- return
+ if len(cx.stack[last].Bytes) > maxByteMathSize || len(cx.stack[prev].Bytes) > maxByteMathSize {
+ return errors.New("math attempted on large byte-array")
}
rhs := new(big.Int).SetBytes(cx.stack[last].Bytes)
lhs := new(big.Int).SetBytes(cx.stack[prev].Bytes)
op(lhs, rhs) // op's receiver has already been bound to result
if result.Sign() < 0 {
- cx.err = errors.New("byte math would have negative result")
- return
+ return errors.New("byte math would have negative result")
}
cx.stack[prev].Bytes = result.Bytes()
cx.stack = cx.stack[:last]
+ return nil
}
-func opBytesPlus(cx *EvalContext) {
+func opBytesPlus(cx *EvalContext) error {
result := new(big.Int)
- opBytesBinOp(cx, result, result.Add)
+ return opBytesBinOp(cx, result, result.Add)
}
-func opBytesMinus(cx *EvalContext) {
+func opBytesMinus(cx *EvalContext) error {
result := new(big.Int)
- opBytesBinOp(cx, result, result.Sub)
+ return opBytesBinOp(cx, result, result.Sub)
}
-func opBytesDiv(cx *EvalContext) {
+func opBytesDiv(cx *EvalContext) error {
result := new(big.Int)
+ var inner error
checkDiv := func(x, y *big.Int) *big.Int {
if y.BitLen() == 0 {
- cx.err = errors.New("division by zero")
+ inner = errors.New("division by zero")
return new(big.Int)
}
return result.Div(x, y)
}
- opBytesBinOp(cx, result, checkDiv)
+ err := opBytesBinOp(cx, result, checkDiv)
+ if err != nil {
+ return err
+ }
+ return inner
}
-func opBytesMul(cx *EvalContext) {
+func opBytesMul(cx *EvalContext) error {
result := new(big.Int)
- opBytesBinOp(cx, result, result.Mul)
+ return opBytesBinOp(cx, result, result.Mul)
}
-func opBytesSqrt(cx *EvalContext) {
+func opBytesSqrt(cx *EvalContext) error {
last := len(cx.stack) - 1
- if len(cx.stack[last].Bytes) > MaxByteMathSize {
- cx.err = errors.New("math attempted on large byte-array")
- return
+ if len(cx.stack[last].Bytes) > maxByteMathSize {
+ return errors.New("math attempted on large byte-array")
}
val := new(big.Int).SetBytes(cx.stack[last].Bytes)
val.Sqrt(val)
cx.stack[last].Bytes = val.Bytes()
+ return nil
}
-func opBytesLt(cx *EvalContext) {
+func opBytesLt(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
- if len(cx.stack[last].Bytes) > MaxByteMathSize || len(cx.stack[prev].Bytes) > MaxByteMathSize {
- cx.err = errors.New("math attempted on large byte-array")
- return
+ if len(cx.stack[last].Bytes) > maxByteMathSize || len(cx.stack[prev].Bytes) > maxByteMathSize {
+ return errors.New("math attempted on large byte-array")
}
rhs := new(big.Int).SetBytes(cx.stack[last].Bytes)
@@ -1621,30 +1676,36 @@ func opBytesLt(cx *EvalContext) {
cx.stack[prev].Bytes = nil
cx.stack[prev].Uint = boolToUint(lhs.Cmp(rhs) < 0)
cx.stack = cx.stack[:last]
+ return nil
}
-func opBytesGt(cx *EvalContext) {
+func opBytesGt(cx *EvalContext) error {
opSwap(cx)
- opBytesLt(cx)
+ return opBytesLt(cx)
}
-func opBytesLe(cx *EvalContext) {
- opBytesGt(cx)
- opNot(cx)
+func opBytesLe(cx *EvalContext) error {
+ err := opBytesGt(cx)
+ if err != nil {
+ return err
+ }
+ return opNot(cx)
}
-func opBytesGe(cx *EvalContext) {
- opBytesLt(cx)
- opNot(cx)
+func opBytesGe(cx *EvalContext) error {
+ err := opBytesLt(cx)
+ if err != nil {
+ return err
+ }
+ return opNot(cx)
}
-func opBytesEq(cx *EvalContext) {
+func opBytesEq(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
- if len(cx.stack[last].Bytes) > MaxByteMathSize || len(cx.stack[prev].Bytes) > MaxByteMathSize {
- cx.err = errors.New("math attempted on large byte-array")
- return
+ if len(cx.stack[last].Bytes) > maxByteMathSize || len(cx.stack[prev].Bytes) > maxByteMathSize {
+ return errors.New("math attempted on large byte-array")
}
rhs := new(big.Int).SetBytes(cx.stack[last].Bytes)
@@ -1652,23 +1713,32 @@ func opBytesEq(cx *EvalContext) {
cx.stack[prev].Bytes = nil
cx.stack[prev].Uint = boolToUint(lhs.Cmp(rhs) == 0)
cx.stack = cx.stack[:last]
+ return nil
}
-func opBytesNeq(cx *EvalContext) {
- opBytesEq(cx)
- opNot(cx)
+func opBytesNeq(cx *EvalContext) error {
+ err := opBytesEq(cx)
+ if err != nil {
+ return err
+ }
+ return opNot(cx)
}
-func opBytesModulo(cx *EvalContext) {
+func opBytesModulo(cx *EvalContext) error {
result := new(big.Int)
+ var inner error
checkMod := func(x, y *big.Int) *big.Int {
if y.BitLen() == 0 {
- cx.err = errors.New("modulo by zero")
+ inner = errors.New("modulo by zero")
return new(big.Int)
}
return result.Mod(x, y)
}
- opBytesBinOp(cx, result, checkMod)
+ err := opBytesBinOp(cx, result, checkMod)
+ if err != nil {
+ return err
+ }
+ return inner
}
func zpad(smaller []byte, size int) []byte {
@@ -1700,28 +1770,31 @@ func opBytesBinaryLogicPrep(cx *EvalContext) ([]byte, []byte) {
return fresh, other
}
-func opBytesBitOr(cx *EvalContext) {
+func opBytesBitOr(cx *EvalContext) error {
a, b := opBytesBinaryLogicPrep(cx)
for i := range a {
a[i] = a[i] | b[i]
}
+ return nil
}
-func opBytesBitAnd(cx *EvalContext) {
+func opBytesBitAnd(cx *EvalContext) error {
a, b := opBytesBinaryLogicPrep(cx)
for i := range a {
a[i] = a[i] & b[i]
}
+ return nil
}
-func opBytesBitXor(cx *EvalContext) {
+func opBytesBitXor(cx *EvalContext) error {
a, b := opBytesBinaryLogicPrep(cx)
for i := range a {
a[i] = a[i] ^ b[i]
}
+ return nil
}
-func opBytesBitNot(cx *EvalContext) {
+func opBytesBitNot(cx *EvalContext) error {
last := len(cx.stack) - 1
fresh := make([]byte, len(cx.stack[last].Bytes))
@@ -1729,134 +1802,139 @@ func opBytesBitNot(cx *EvalContext) {
fresh[i] = ^b
}
cx.stack[last].Bytes = fresh
+ return nil
}
-func opBytesZero(cx *EvalContext) {
+func opBytesZero(cx *EvalContext) error {
last := len(cx.stack) - 1
length := cx.stack[last].Uint
- if length > MaxStringSize {
- cx.err = fmt.Errorf("bzero attempted to create a too large string")
- return
+ if length > maxStringSize {
+ return fmt.Errorf("bzero attempted to create a too large string")
}
cx.stack[last].Bytes = make([]byte, length)
+ return nil
}
-func opIntConstBlock(cx *EvalContext) {
- cx.intc, cx.nextpc, cx.err = parseIntcblock(cx.program, cx.pc)
+func opIntConstBlock(cx *EvalContext) error {
+ var err error
+ cx.intc, cx.nextpc, err = parseIntcblock(cx.program, cx.pc+1)
+ return err
}
-func opIntConstN(cx *EvalContext, n uint) {
- if n >= uint(len(cx.intc)) {
- cx.err = fmt.Errorf("intc [%d] beyond %d constants", n, len(cx.intc))
- return
+func opIntConstN(cx *EvalContext, n byte) error {
+ if int(n) >= len(cx.intc) {
+ return fmt.Errorf("intc [%d] beyond %d constants", n, len(cx.intc))
}
cx.stack = append(cx.stack, stackValue{Uint: cx.intc[n]})
+ return nil
}
-func opIntConstLoad(cx *EvalContext) {
- n := uint(cx.program[cx.pc+1])
- opIntConstN(cx, n)
+func opIntConstLoad(cx *EvalContext) error {
+ n := cx.program[cx.pc+1]
+ return opIntConstN(cx, n)
}
-func opIntConst0(cx *EvalContext) {
- opIntConstN(cx, 0)
+func opIntConst0(cx *EvalContext) error {
+ return opIntConstN(cx, 0)
}
-func opIntConst1(cx *EvalContext) {
- opIntConstN(cx, 1)
+func opIntConst1(cx *EvalContext) error {
+ return opIntConstN(cx, 1)
}
-func opIntConst2(cx *EvalContext) {
- opIntConstN(cx, 2)
+func opIntConst2(cx *EvalContext) error {
+ return opIntConstN(cx, 2)
}
-func opIntConst3(cx *EvalContext) {
- opIntConstN(cx, 3)
+func opIntConst3(cx *EvalContext) error {
+ return opIntConstN(cx, 3)
}
-func opPushInt(cx *EvalContext) {
- val, bytesUsed := binary.Uvarint(cx.program[cx.pc+1:])
+func opPushInt(cx *EvalContext) error {
+ pos := cx.pc + 1
+ val, bytesUsed := binary.Uvarint(cx.program[pos:])
if bytesUsed <= 0 {
- cx.err = fmt.Errorf("could not decode int at pc=%d", cx.pc+1)
- return
+ return fmt.Errorf("could not decode int at program[%d]", pos)
}
sv := stackValue{Uint: val}
cx.stack = append(cx.stack, sv)
- cx.nextpc = cx.pc + 1 + bytesUsed
+ cx.nextpc = pos + bytesUsed
+ return nil
}
-func opByteConstBlock(cx *EvalContext) {
- cx.bytec, cx.nextpc, cx.err = parseBytecBlock(cx.program, cx.pc)
+func opByteConstBlock(cx *EvalContext) error {
+ var err error
+ cx.bytec, cx.nextpc, err = parseBytecBlock(cx.program, cx.pc+1)
+ return err
}
-func opByteConstN(cx *EvalContext, n uint) {
+func opByteConstN(cx *EvalContext, n uint) error {
if n >= uint(len(cx.bytec)) {
- cx.err = fmt.Errorf("bytec [%d] beyond %d constants", n, len(cx.bytec))
- return
+ return fmt.Errorf("bytec [%d] beyond %d constants", n, len(cx.bytec))
}
cx.stack = append(cx.stack, stackValue{Bytes: cx.bytec[n]})
+ return nil
}
-func opByteConstLoad(cx *EvalContext) {
+func opByteConstLoad(cx *EvalContext) error {
n := uint(cx.program[cx.pc+1])
- opByteConstN(cx, n)
+ return opByteConstN(cx, n)
}
-func opByteConst0(cx *EvalContext) {
- opByteConstN(cx, 0)
+func opByteConst0(cx *EvalContext) error {
+ return opByteConstN(cx, 0)
}
-func opByteConst1(cx *EvalContext) {
- opByteConstN(cx, 1)
+func opByteConst1(cx *EvalContext) error {
+ return opByteConstN(cx, 1)
}
-func opByteConst2(cx *EvalContext) {
- opByteConstN(cx, 2)
+func opByteConst2(cx *EvalContext) error {
+ return opByteConstN(cx, 2)
}
-func opByteConst3(cx *EvalContext) {
- opByteConstN(cx, 3)
+func opByteConst3(cx *EvalContext) error {
+ return opByteConstN(cx, 3)
}
-func opPushBytes(cx *EvalContext) {
+func opPushBytes(cx *EvalContext) error {
pos := cx.pc + 1
length, bytesUsed := binary.Uvarint(cx.program[pos:])
if bytesUsed <= 0 {
- cx.err = fmt.Errorf("could not decode length at pc=%d", pos)
- return
+ return fmt.Errorf("could not decode length at program[%d]", pos)
}
pos += bytesUsed
end := uint64(pos) + length
if end > uint64(len(cx.program)) || end < uint64(pos) {
- cx.err = fmt.Errorf("pushbytes too long at pc=%d", pos)
- return
+ return fmt.Errorf("pushbytes too long at program[%d]", pos)
}
sv := stackValue{Bytes: cx.program[pos:end]}
cx.stack = append(cx.stack, sv)
cx.nextpc = int(end)
+ return nil
}
-func opArgN(cx *EvalContext, n uint64) {
- if n >= uint64(len(cx.Txn.Lsig.Args)) {
- cx.err = fmt.Errorf("cannot load arg[%d] of %d", n, len(cx.Txn.Lsig.Args))
- return
+func opArgN(cx *EvalContext, n uint64) error {
+ if n >= uint64(len(cx.txn.Lsig.Args)) {
+ return fmt.Errorf("cannot load arg[%d] of %d", n, len(cx.txn.Lsig.Args))
}
- val := nilToEmpty(cx.Txn.Lsig.Args[n])
+ val := nilToEmpty(cx.txn.Lsig.Args[n])
cx.stack = append(cx.stack, stackValue{Bytes: val})
+ return nil
}
-func opArg(cx *EvalContext) {
+func opArg(cx *EvalContext) error {
n := uint64(cx.program[cx.pc+1])
- opArgN(cx, n)
+ return opArgN(cx, n)
}
-func opArg0(cx *EvalContext) {
- opArgN(cx, 0)
+func opArg0(cx *EvalContext) error {
+ return opArgN(cx, 0)
}
-func opArg1(cx *EvalContext) {
- opArgN(cx, 1)
+func opArg1(cx *EvalContext) error {
+ return opArgN(cx, 1)
}
-func opArg2(cx *EvalContext) {
- opArgN(cx, 2)
+func opArg2(cx *EvalContext) error {
+ return opArgN(cx, 2)
}
-func opArg3(cx *EvalContext) {
- opArgN(cx, 3)
+func opArg3(cx *EvalContext) error {
+ return opArgN(cx, 3)
}
-func opArgs(cx *EvalContext) {
+func opArgs(cx *EvalContext) error {
last := len(cx.stack) - 1
n := cx.stack[last].Uint
// Pop the index and push the result back on the stack.
cx.stack = cx.stack[:last]
- opArgN(cx, n)
+ return opArgN(cx, n)
}
func branchTarget(cx *EvalContext) (int, error) {
@@ -1873,7 +1951,7 @@ func branchTarget(cx *EvalContext) (int, error) {
branchTooFar = target >= len(cx.program) || target < 0
}
if branchTooFar {
- return 0, errors.New("branch target beyond end of program")
+ return 0, fmt.Errorf("branch target %d outside of program", target)
}
return target, nil
@@ -1881,12 +1959,11 @@ func branchTarget(cx *EvalContext) (int, error) {
// checks any branch that is {op} {int16 be offset}
func checkBranch(cx *EvalContext) error {
- cx.nextpc = cx.pc + 3
target, err := branchTarget(cx)
if err != nil {
return err
}
- if target < cx.nextpc {
+ if target < cx.pc+3 {
// If a branch goes backwards, we should have already noted that an instruction began at that location.
if _, ok := cx.instructionStarts[target]; !ok {
return fmt.Errorf("back branch target %d is not an aligned instruction", target)
@@ -1895,7 +1972,7 @@ func checkBranch(cx *EvalContext) error {
cx.branchTargets[target] = true
return nil
}
-func opBnz(cx *EvalContext) {
+func opBnz(cx *EvalContext) error {
last := len(cx.stack) - 1
cx.nextpc = cx.pc + 3
isNonZero := cx.stack[last].Uint != 0
@@ -1903,14 +1980,14 @@ func opBnz(cx *EvalContext) {
if isNonZero {
target, err := branchTarget(cx)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.nextpc = target
}
+ return nil
}
-func opBz(cx *EvalContext) {
+func opBz(cx *EvalContext) error {
last := len(cx.stack) - 1
cx.nextpc = cx.pc + 3
isZero := cx.stack[last].Uint == 0
@@ -1918,97 +1995,100 @@ func opBz(cx *EvalContext) {
if isZero {
target, err := branchTarget(cx)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.nextpc = target
}
+ return nil
}
-func opB(cx *EvalContext) {
+func opB(cx *EvalContext) error {
target, err := branchTarget(cx)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.nextpc = target
+ return nil
}
-func opCallSub(cx *EvalContext) {
+func opCallSub(cx *EvalContext) error {
cx.callstack = append(cx.callstack, cx.pc+3)
- opB(cx)
+ return opB(cx)
}
-func opRetSub(cx *EvalContext) {
+func opRetSub(cx *EvalContext) error {
top := len(cx.callstack) - 1
if top < 0 {
- cx.err = errors.New("retsub with empty callstack")
- return
+ return errors.New("retsub with empty callstack")
}
target := cx.callstack[top]
cx.callstack = cx.callstack[:top]
cx.nextpc = target
+ return nil
}
-func opPop(cx *EvalContext) {
+func opPop(cx *EvalContext) error {
last := len(cx.stack) - 1
cx.stack = cx.stack[:last]
+ return nil
}
-func opDup(cx *EvalContext) {
+func opDup(cx *EvalContext) error {
last := len(cx.stack) - 1
sv := cx.stack[last]
cx.stack = append(cx.stack, sv)
+ return nil
}
-func opDup2(cx *EvalContext) {
+func opDup2(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
cx.stack = append(cx.stack, cx.stack[prev:]...)
+ return nil
}
-func opDig(cx *EvalContext) {
+func opDig(cx *EvalContext) error {
depth := int(cx.program[cx.pc+1])
idx := len(cx.stack) - 1 - depth
// Need to check stack size explicitly here because checkArgs() doesn't understand dig
// so we can't expect our stack to be prechecked.
if idx < 0 {
- cx.err = fmt.Errorf("dig %d with stack size = %d", depth, len(cx.stack))
- return
+ return fmt.Errorf("dig %d with stack size = %d", depth, len(cx.stack))
}
sv := cx.stack[idx]
cx.stack = append(cx.stack, sv)
+ return nil
}
-func opCover(cx *EvalContext) {
+func opCover(cx *EvalContext) error {
depth := int(cx.program[cx.pc+1])
topIdx := len(cx.stack) - 1
idx := topIdx - depth
// Need to check stack size explicitly here because checkArgs() doesn't understand cover
// so we can't expect our stack to be prechecked.
if idx < 0 {
- cx.err = fmt.Errorf("cover %d with stack size = %d", depth, len(cx.stack))
- return
+ return fmt.Errorf("cover %d with stack size = %d", depth, len(cx.stack))
}
sv := cx.stack[topIdx]
copy(cx.stack[idx+1:], cx.stack[idx:])
cx.stack[idx] = sv
+ return nil
}
-func opUncover(cx *EvalContext) {
+func opUncover(cx *EvalContext) error {
depth := int(cx.program[cx.pc+1])
topIdx := len(cx.stack) - 1
idx := topIdx - depth
// Need to check stack size explicitly here because checkArgs() doesn't understand uncover
// so we can't expect our stack to be prechecked.
if idx < 0 {
- cx.err = fmt.Errorf("uncover %d with stack size = %d", depth, len(cx.stack))
- return
+ return fmt.Errorf("uncover %d with stack size = %d", depth, len(cx.stack))
}
sv := cx.stack[idx]
copy(cx.stack[idx:], cx.stack[idx+1:])
cx.stack[topIdx] = sv
+ return nil
}
func (cx *EvalContext) assetHoldingToValue(holding *basics.AssetHolding, fs assetHoldingFieldSpec) (sv stackValue, err error) {
@@ -2018,14 +2098,13 @@ func (cx *EvalContext) assetHoldingToValue(holding *basics.AssetHolding, fs asse
case AssetFrozen:
sv.Uint = boolToUint(holding.Frozen)
default:
- err = fmt.Errorf("invalid asset_holding_get field %d", fs.field)
- return
+ return sv, fmt.Errorf("invalid asset_holding_get field %d", fs.field)
}
- if !typecheck(fs.ftype, sv.argType()) {
- err = fmt.Errorf("%s expected field type is %s but got %s", fs.field.String(), fs.ftype.String(), sv.argType().String())
+ if fs.ftype != sv.argType() {
+ return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
}
- return
+ return sv, nil
}
func (cx *EvalContext) assetParamsToValue(params *basics.AssetParams, creator basics.Address, fs assetParamsFieldSpec) (sv stackValue, err error) {
@@ -2055,14 +2134,13 @@ func (cx *EvalContext) assetParamsToValue(params *basics.AssetParams, creator ba
case AssetCreator:
sv.Bytes = creator[:]
default:
- err = fmt.Errorf("invalid asset_params_get field %d", fs.field)
- return
+ return sv, fmt.Errorf("invalid asset_params_get field %d", fs.field)
}
- if !typecheck(fs.ftype, sv.argType()) {
- err = fmt.Errorf("%s expected field type is %s but got %s", fs.field.String(), fs.ftype.String(), sv.argType().String())
+ if fs.ftype != sv.argType() {
+ return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
}
- return
+ return sv, nil
}
func (cx *EvalContext) appParamsToValue(params *basics.AppParams, fs appParamsFieldSpec) (sv stackValue, err error) {
@@ -2083,14 +2161,13 @@ func (cx *EvalContext) appParamsToValue(params *basics.AppParams, fs appParamsFi
sv.Uint = uint64(params.ExtraProgramPages)
default:
// The pseudo fields AppCreator and AppAddress are handled before this method
- err = fmt.Errorf("invalid app_params_get field %d", fs.field)
- return
+ return sv, fmt.Errorf("invalid app_params_get field %d", fs.field)
}
- if !typecheck(fs.ftype, sv.argType()) {
- err = fmt.Errorf("%s expected field type is %s but got %s", fs.field.String(), fs.ftype.String(), sv.argType().String())
+ if fs.ftype != sv.argType() {
+ return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
}
- return
+ return sv, nil
}
// TxnFieldToTealValue is a thin wrapper for txnFieldToStack for external use
@@ -2100,7 +2177,10 @@ func TxnFieldToTealValue(txn *transactions.Transaction, groupIndex int, field Tx
}
var cx EvalContext
stxnad := &transactions.SignedTxnWithAD{SignedTxn: transactions.SignedTxn{Txn: *txn}}
- fs := txnFieldSpecByField[field]
+ fs, ok := txnFieldSpecByField(field)
+ if !ok {
+ return basics.TealValue{}, fmt.Errorf("invalid field %s", field)
+ }
sv, err := cx.txnFieldToStack(stxnad, &fs, arrayFieldIdx, groupIndex, inner)
return sv.toTealValue(), err
}
@@ -2119,8 +2199,8 @@ func (cx *EvalContext) getTxID(txn *transactions.Transaction, groupIndex int) tr
txid, ok := cx.EvalParams.txidCache[groupIndex]
if !ok {
if cx.caller != nil {
- innerOffset := len(cx.caller.Txn.EvalDelta.InnerTxns)
- txid = txn.InnerID(cx.caller.Txn.ID(), innerOffset+groupIndex)
+ innerOffset := len(cx.caller.txn.EvalDelta.InnerTxns)
+ txid = txn.InnerID(cx.caller.txn.ID(), innerOffset+groupIndex)
} else {
txid = txn.ID()
}
@@ -2132,7 +2212,7 @@ func (cx *EvalContext) getTxID(txn *transactions.Transaction, groupIndex int) tr
func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *txnFieldSpec, arrayFieldIdx uint64, groupIndex int, inner bool) (sv stackValue, err error) {
if fs.effects {
- if cx.runModeFlags == runModeSignature {
+ if cx.runModeFlags == modeSig {
return sv, fmt.Errorf("txn[%s] not allowed in current mode", fs.field)
}
if cx.version < txnEffectsVersion && !inner {
@@ -2182,7 +2262,7 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t
case Type:
sv.Bytes = []byte(txn.Type)
case TypeEnum:
- sv.Uint = txnTypeIndexes[string(txn.Type)]
+ sv.Uint = txnTypeMap[string(txn.Type)]
case XferAsset:
sv.Uint = uint64(txn.XferAsset)
case AssetAmount:
@@ -2207,8 +2287,7 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t
case ApplicationArgs:
if arrayFieldIdx >= uint64(len(txn.ApplicationArgs)) {
- err = fmt.Errorf("invalid ApplicationArgs index %d", arrayFieldIdx)
- return
+ return sv, fmt.Errorf("invalid ApplicationArgs index %d", arrayFieldIdx)
}
sv.Bytes = nilToEmpty(txn.ApplicationArgs[arrayFieldIdx])
case NumAppArgs:
@@ -2220,8 +2299,7 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t
sv.Bytes = txn.Sender[:]
} else {
if arrayFieldIdx > uint64(len(txn.Accounts)) {
- err = fmt.Errorf("invalid Accounts index %d", arrayFieldIdx)
- return
+ return sv, fmt.Errorf("invalid Accounts index %d", arrayFieldIdx)
}
sv.Bytes = txn.Accounts[arrayFieldIdx-1][:]
}
@@ -2230,8 +2308,7 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t
case Assets:
if arrayFieldIdx >= uint64(len(txn.ForeignAssets)) {
- err = fmt.Errorf("invalid Assets index %d", arrayFieldIdx)
- return
+ return sv, fmt.Errorf("invalid Assets index %d", arrayFieldIdx)
}
sv.Uint = uint64(txn.ForeignAssets[arrayFieldIdx])
case NumAssets:
@@ -2243,8 +2320,7 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t
sv.Uint = uint64(txn.ApplicationID)
} else {
if arrayFieldIdx > uint64(len(txn.ForeignApps)) {
- err = fmt.Errorf("invalid Applications index %d", arrayFieldIdx)
- return
+ return sv, fmt.Errorf("invalid Applications index %d", arrayFieldIdx)
}
sv.Uint = uint64(txn.ForeignApps[arrayFieldIdx-1])
}
@@ -2302,8 +2378,7 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t
case Logs:
if arrayFieldIdx >= uint64(len(stxn.EvalDelta.Logs)) {
- err = fmt.Errorf("invalid Logs index %d", arrayFieldIdx)
- return
+ return sv, fmt.Errorf("invalid Logs index %d", arrayFieldIdx)
}
sv.Bytes = nilToEmpty([]byte(stxn.EvalDelta.Logs[arrayFieldIdx]))
case NumLogs:
@@ -2320,18 +2395,17 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t
sv.Uint = uint64(stxn.ApplyData.ApplicationID)
default:
- err = fmt.Errorf("invalid txn field %s", fs.field)
- return
+ return sv, fmt.Errorf("invalid txn field %s", fs.field)
}
- if !typecheck(fs.ftype, sv.argType()) {
- err = fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
+ if fs.ftype != sv.argType() {
+ return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
}
- return
+ return sv, nil
}
func (cx *EvalContext) fetchField(field TxnField, expectArray bool) (*txnFieldSpec, error) {
- fs, ok := txnFieldSpecByField[field]
+ fs, ok := txnFieldSpecByField(field)
if !ok || fs.version > cx.version {
return nil, fmt.Errorf("invalid txn field %d", field)
}
@@ -2365,12 +2439,12 @@ func (cx *EvalContext) opTxnImpl(gi uint64, src txnSource, field TxnField, ai ui
var group []transactions.SignedTxnWithAD
switch src {
case srcGroup:
- if fs.effects && gi >= uint64(cx.GroupIndex) {
+ if fs.effects && gi >= uint64(cx.groupIndex) {
// Test mode so that error is clearer
- if cx.runModeFlags == runModeSignature {
+ if cx.runModeFlags == modeSig {
return sv, fmt.Errorf("txn[%s] not allowed in current mode", fs.field)
}
- return sv, fmt.Errorf("txn effects can only be read from past txns %d %d", gi, cx.GroupIndex)
+ return sv, fmt.Errorf("txn effects can only be read from past txns %d %d", gi, cx.groupIndex)
}
group = cx.TxnGroup
case srcInner:
@@ -2394,77 +2468,77 @@ func (cx *EvalContext) opTxnImpl(gi uint64, src txnSource, field TxnField, ai ui
return sv, nil
}
-func opTxn(cx *EvalContext) {
- gi := uint64(cx.GroupIndex)
+func opTxn(cx *EvalContext) error {
+ gi := uint64(cx.groupIndex)
field := TxnField(cx.program[cx.pc+1])
sv, err := cx.opTxnImpl(gi, srcGroup, field, 0, false)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = append(cx.stack, sv)
+ return nil
}
-func opTxna(cx *EvalContext) {
- gi := uint64(cx.GroupIndex)
+func opTxna(cx *EvalContext) error {
+ gi := uint64(cx.groupIndex)
field := TxnField(cx.program[cx.pc+1])
ai := uint64(cx.program[cx.pc+2])
sv, err := cx.opTxnImpl(gi, srcGroup, field, ai, true)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = append(cx.stack, sv)
+ return nil
}
-func opTxnas(cx *EvalContext) {
+func opTxnas(cx *EvalContext) error {
last := len(cx.stack) - 1
- gi := uint64(cx.GroupIndex)
+ gi := uint64(cx.groupIndex)
field := TxnField(cx.program[cx.pc+1])
ai := cx.stack[last].Uint
sv, err := cx.opTxnImpl(gi, srcGroup, field, ai, true)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[last] = sv
+ return nil
}
-func opGtxn(cx *EvalContext) {
+func opGtxn(cx *EvalContext) error {
gi := uint64(cx.program[cx.pc+1])
field := TxnField(cx.program[cx.pc+2])
sv, err := cx.opTxnImpl(gi, srcGroup, field, 0, false)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = append(cx.stack, sv)
+ return nil
}
-func opGtxna(cx *EvalContext) {
+func opGtxna(cx *EvalContext) error {
gi := uint64(cx.program[cx.pc+1])
field := TxnField(cx.program[cx.pc+2])
ai := uint64(cx.program[cx.pc+3])
sv, err := cx.opTxnImpl(gi, srcGroup, field, ai, true)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = append(cx.stack, sv)
+ return nil
}
-func opGtxnas(cx *EvalContext) {
+func opGtxnas(cx *EvalContext) error {
last := len(cx.stack) - 1
gi := uint64(cx.program[cx.pc+1])
@@ -2473,14 +2547,14 @@ func opGtxnas(cx *EvalContext) {
sv, err := cx.opTxnImpl(gi, srcGroup, field, ai, true)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[last] = sv
+ return nil
}
-func opGtxns(cx *EvalContext) {
+func opGtxns(cx *EvalContext) error {
last := len(cx.stack) - 1
gi := cx.stack[last].Uint
@@ -2488,14 +2562,14 @@ func opGtxns(cx *EvalContext) {
sv, err := cx.opTxnImpl(gi, srcGroup, field, 0, false)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[last] = sv
+ return nil
}
-func opGtxnsa(cx *EvalContext) {
+func opGtxnsa(cx *EvalContext) error {
last := len(cx.stack) - 1
gi := cx.stack[last].Uint
@@ -2504,14 +2578,14 @@ func opGtxnsa(cx *EvalContext) {
sv, err := cx.opTxnImpl(gi, srcGroup, field, ai, true)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[last] = sv
+ return nil
}
-func opGtxnsas(cx *EvalContext) {
+func opGtxnsas(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
@@ -2521,39 +2595,39 @@ func opGtxnsas(cx *EvalContext) {
sv, err := cx.opTxnImpl(gi, srcGroup, field, ai, true)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[prev] = sv
cx.stack = cx.stack[:last]
+ return nil
}
-func opItxn(cx *EvalContext) {
+func opItxn(cx *EvalContext) error {
field := TxnField(cx.program[cx.pc+1])
sv, err := cx.opTxnImpl(0, srcInner, field, 0, false)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = append(cx.stack, sv)
+ return nil
}
-func opItxna(cx *EvalContext) {
+func opItxna(cx *EvalContext) error {
field := TxnField(cx.program[cx.pc+1])
ai := uint64(cx.program[cx.pc+2])
sv, err := cx.opTxnImpl(0, srcInner, field, ai, true)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = append(cx.stack, sv)
+ return nil
}
-func opItxnas(cx *EvalContext) {
+func opItxnas(cx *EvalContext) error {
last := len(cx.stack) - 1
field := TxnField(cx.program[cx.pc+1])
@@ -2561,15 +2635,15 @@ func opItxnas(cx *EvalContext) {
sv, err := cx.opTxnImpl(0, srcInner, field, ai, true)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[last] = sv
+ return nil
}
func (cx *EvalContext) getLastInner() []transactions.SignedTxnWithAD {
- inners := cx.Txn.EvalDelta.InnerTxns
+ inners := cx.txn.EvalDelta.InnerTxns
// If there are no inners yet, return empty slice, which will result in error
if len(inners) == 0 {
return inners
@@ -2578,7 +2652,7 @@ func (cx *EvalContext) getLastInner() []transactions.SignedTxnWithAD {
}
func (cx *EvalContext) getLastInnerGroup() []transactions.SignedTxnWithAD {
- inners := cx.Txn.EvalDelta.InnerTxns
+ inners := cx.txn.EvalDelta.InnerTxns
// If there are no inners yet, return empty slice, which will result in error
if len(inners) == 0 {
return inners
@@ -2598,34 +2672,34 @@ func (cx *EvalContext) getLastInnerGroup() []transactions.SignedTxnWithAD {
return inners
}
-func opGitxn(cx *EvalContext) {
+func opGitxn(cx *EvalContext) error {
gi := uint64(cx.program[cx.pc+1])
field := TxnField(cx.program[cx.pc+2])
sv, err := cx.opTxnImpl(gi, srcInnerGroup, field, 0, false)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = append(cx.stack, sv)
+ return nil
}
-func opGitxna(cx *EvalContext) {
+func opGitxna(cx *EvalContext) error {
gi := uint64(cx.program[cx.pc+1])
field := TxnField(cx.program[cx.pc+2])
ai := uint64(cx.program[cx.pc+3])
sv, err := cx.opTxnImpl(gi, srcInnerGroup, field, ai, true)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = append(cx.stack, sv)
+ return nil
}
-func opGitxnas(cx *EvalContext) {
+func opGitxnas(cx *EvalContext) error {
last := len(cx.stack) - 1
gi := uint64(cx.program[cx.pc+1])
@@ -2634,31 +2708,27 @@ func opGitxnas(cx *EvalContext) {
sv, err := cx.opTxnImpl(gi, srcInnerGroup, field, ai, true)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[last] = sv
+ return nil
}
func opGaidImpl(cx *EvalContext, giw uint64, opName string) (sv stackValue, err error) {
if giw >= uint64(len(cx.TxnGroup)) {
- err = fmt.Errorf("%s lookup TxnGroup[%d] but it only has %d", opName, giw, len(cx.TxnGroup))
- return
+ return sv, fmt.Errorf("%s lookup TxnGroup[%d] but it only has %d", opName, giw, len(cx.TxnGroup))
}
// Is now assured smalled than a len() so fits in int.
gi := int(giw)
- if gi > cx.GroupIndex {
- err = fmt.Errorf("%s can't get creatable ID of txn ahead of the current one (index %d) in the transaction group", opName, gi)
- return
+ if gi > cx.groupIndex {
+ return sv, fmt.Errorf("%s can't get creatable ID of txn ahead of the current one (index %d) in the transaction group", opName, gi)
}
- if gi == cx.GroupIndex {
- err = fmt.Errorf("%s is only for accessing creatable IDs of previous txns, use `global CurrentApplicationID` instead to access the current app's creatable ID", opName)
- return
+ if gi == cx.groupIndex {
+ return sv, fmt.Errorf("%s is only for accessing creatable IDs of previous txns, use `global CurrentApplicationID` instead to access the current app's creatable ID", opName)
}
if txn := cx.TxnGroup[gi].Txn; !(txn.Type == protocol.ApplicationCallTx || txn.Type == protocol.AssetConfigTx) {
- err = fmt.Errorf("can't use %s on txn that is not an app call nor an asset config txn with index %d", opName, gi)
- return
+ return sv, fmt.Errorf("can't use %s on txn that is not an app call nor an asset config txn with index %d", opName, gi)
}
if aid := cx.TxnGroup[gi].ApplyData.ConfigAsset; aid != 0 {
@@ -2667,51 +2737,41 @@ func opGaidImpl(cx *EvalContext, giw uint64, opName string) (sv stackValue, err
if aid := cx.TxnGroup[gi].ApplyData.ApplicationID; aid != 0 {
return stackValue{Uint: uint64(aid)}, nil
}
- err = fmt.Errorf("%s: index %d did not create anything", opName, gi)
- return
+ return sv, fmt.Errorf("%s: index %d did not create anything", opName, gi)
}
-func opGaid(cx *EvalContext) {
+func opGaid(cx *EvalContext) error {
gi := uint64(cx.program[cx.pc+1])
sv, err := opGaidImpl(cx, gi, "gaid")
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = append(cx.stack, sv)
+ return nil
}
-func opGaids(cx *EvalContext) {
+func opGaids(cx *EvalContext) error {
last := len(cx.stack) - 1
gi := cx.stack[last].Uint
sv, err := opGaidImpl(cx, gi, "gaids")
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[last] = sv
+ return nil
}
-func (cx *EvalContext) getRound() (rnd uint64, err error) {
- if cx.Ledger == nil {
- err = fmt.Errorf("ledger not available")
- return
- }
- return uint64(cx.Ledger.Round()), nil
+func (cx *EvalContext) getRound() uint64 {
+ return uint64(cx.Ledger.Round())
}
-func (cx *EvalContext) getLatestTimestamp() (timestamp uint64, err error) {
- if cx.Ledger == nil {
- err = fmt.Errorf("ledger not available")
- return
- }
+func (cx *EvalContext) getLatestTimestamp() (uint64, error) {
ts := cx.Ledger.LatestTimestamp()
if ts < 0 {
- err = fmt.Errorf("latest timestamp %d < 0", ts)
- return
+ return 0, fmt.Errorf("latest timestamp %d < 0", ts)
}
return uint64(ts), nil
}
@@ -2731,9 +2791,6 @@ func (cx *EvalContext) getApplicationAddress(app basics.AppIndex) basics.Address
}
func (cx *EvalContext) getCreatorAddress() ([]byte, error) {
- if cx.Ledger == nil {
- return nil, fmt.Errorf("ledger not available")
- }
_, creator, err := cx.Ledger.AppParams(cx.appID)
if err != nil {
return nil, fmt.Errorf("No params for current app")
@@ -2758,7 +2815,7 @@ func (cx *EvalContext) globalFieldToValue(fs globalFieldSpec) (sv stackValue, er
case LogicSigVersion:
sv.Uint = cx.Proto.LogicSigVersion
case Round:
- sv.Uint, err = cx.getRound()
+ sv.Uint = cx.getRound()
case LatestTimestamp:
sv.Uint, err = cx.getLatestTimestamp()
case CurrentApplicationID:
@@ -2769,7 +2826,7 @@ func (cx *EvalContext) globalFieldToValue(fs globalFieldSpec) (sv stackValue, er
case CreatorAddress:
sv.Bytes, err = cx.getCreatorAddress()
case GroupID:
- sv.Bytes = cx.Txn.Txn.Group[:]
+ sv.Bytes = cx.txn.Txn.Group[:]
case OpcodeBudget:
sv.Uint = uint64(cx.remainingBudget())
case CallerApplicationID:
@@ -2789,32 +2846,30 @@ func (cx *EvalContext) globalFieldToValue(fs globalFieldSpec) (sv stackValue, er
err = fmt.Errorf("invalid global field %d", fs.field)
}
- if !typecheck(fs.ftype, sv.argType()) {
- err = fmt.Errorf("%s expected field type is %s but got %s", fs.field.String(), fs.ftype.String(), sv.argType().String())
+ if fs.ftype != sv.argType() {
+ return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
}
return sv, err
}
-func opGlobal(cx *EvalContext) {
+func opGlobal(cx *EvalContext) error {
globalField := GlobalField(cx.program[cx.pc+1])
- fs, ok := globalFieldSpecByField[globalField]
+ fs, ok := globalFieldSpecByField(globalField)
if !ok || fs.version > cx.version {
- cx.err = fmt.Errorf("invalid global field %s", globalField)
- return
+ return fmt.Errorf("invalid global field %s", globalField)
}
if (cx.runModeFlags & fs.mode) == 0 {
- cx.err = fmt.Errorf("global[%s] not allowed in current mode", globalField)
- return
+ return fmt.Errorf("global[%s] not allowed in current mode", globalField)
}
sv, err := cx.globalFieldToValue(fs)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = append(cx.stack, sv)
+ return nil
}
// Msg is data meant to be signed and then verified with the
@@ -2838,22 +2893,20 @@ func (cx *EvalContext) programHash() crypto.Digest {
return cx.programHashCached
}
-func opEd25519Verify(cx *EvalContext) {
+func opEd25519Verify(cx *EvalContext) error {
last := len(cx.stack) - 1 // index of PK
prev := last - 1 // index of signature
pprev := prev - 1 // index of data
var sv crypto.SignatureVerifier
if len(cx.stack[last].Bytes) != len(sv) {
- cx.err = errors.New("invalid public key")
- return
+ return errors.New("invalid public key")
}
copy(sv[:], cx.stack[last].Bytes)
var sig crypto.Signature
if len(cx.stack[prev].Bytes) != len(sig) {
- cx.err = errors.New("invalid signature")
- return
+ return errors.New("invalid signature")
}
copy(sig[:], cx.stack[prev].Bytes)
@@ -2861,109 +2914,56 @@ func opEd25519Verify(cx *EvalContext) {
cx.stack[pprev].Uint = boolToUint(sv.Verify(msg, sig, cx.Proto.EnableBatchVerification))
cx.stack[pprev].Bytes = nil
cx.stack = cx.stack[:prev]
+ return nil
}
-func opEd25519VerifyBare(cx *EvalContext) {
+func opEd25519VerifyBare(cx *EvalContext) error {
last := len(cx.stack) - 1 // index of PK
prev := last - 1 // index of signature
pprev := prev - 1 // index of data
var sv crypto.SignatureVerifier
if len(cx.stack[last].Bytes) != len(sv) {
- cx.err = errors.New("invalid public key")
- return
+ return errors.New("invalid public key")
}
copy(sv[:], cx.stack[last].Bytes)
var sig crypto.Signature
if len(cx.stack[prev].Bytes) != len(sig) {
- cx.err = errors.New("invalid signature")
- return
+ return errors.New("invalid signature")
}
copy(sig[:], cx.stack[prev].Bytes)
cx.stack[pprev].Uint = boolToUint(sv.VerifyBytes(cx.stack[pprev].Bytes, sig, cx.Proto.EnableBatchVerification))
cx.stack[pprev].Bytes = nil
cx.stack = cx.stack[:prev]
+ return nil
}
-// leadingZeros needs to be replaced by big.Int.FillBytes
func leadingZeros(size int, b *big.Int) ([]byte, error) {
- data := b.Bytes()
- if size < len(data) {
- return nil, fmt.Errorf("insufficient buffer size: %d < %d", size, len(data))
- }
- if size == len(data) {
- return data, nil
+ byteLength := (b.BitLen() + 7) / 8
+ if size < byteLength {
+ return nil, fmt.Errorf("insufficient buffer size: %d < %d", size, byteLength)
}
-
buf := make([]byte, size)
- copy(buf[size-len(data):], data)
+ b.FillBytes(buf)
return buf, nil
}
-// polynomial returns x³ - 3x + b.
-//
-// TODO: remove this when go-algorand is updated to go 1.15+
-func polynomial(curve *elliptic.CurveParams, x *big.Int) *big.Int {
- x3 := new(big.Int).Mul(x, x)
- x3.Mul(x3, x)
-
- threeX := new(big.Int).Lsh(x, 1)
- threeX.Add(threeX, x)
-
- x3.Sub(x3, threeX)
- x3.Add(x3, curve.B)
- x3.Mod(x3, curve.P)
-
- return x3
-}
-
-// unmarshalCompressed converts a point, serialized by MarshalCompressed, into an x, y pair.
-// It is an error if the point is not in compressed form or is not on the curve.
-// On error, x = nil.
-//
-// TODO: remove this and replace usage with elliptic.UnmarshallCompressed when go-algorand is
-// updated to go 1.15+
-func unmarshalCompressed(curve elliptic.Curve, data []byte) (x, y *big.Int) {
- byteLen := (curve.Params().BitSize + 7) / 8
- if len(data) != 1+byteLen {
- return nil, nil
- }
- if data[0] != 2 && data[0] != 3 { // compressed form
- return nil, nil
- }
- p := curve.Params().P
- x = new(big.Int).SetBytes(data[1:])
- if x.Cmp(p) >= 0 {
- return nil, nil
- }
- // y² = x³ - 3x + b
- y = polynomial(curve.Params(), x)
- y = y.ModSqrt(y, p)
- if y == nil {
- return nil, nil
- }
- if byte(y.Bit(0)) != data[0]&1 {
- y.Neg(y).Mod(y, p)
- }
- if !curve.IsOnCurve(x, y) {
- return nil, nil
- }
- return
+var ecdsaVerifyCosts = []int{
+ Secp256k1: 1700,
+ Secp256r1: 2500,
}
-func opEcdsaVerify(cx *EvalContext) {
+func opEcdsaVerify(cx *EvalContext) error {
ecdsaCurve := EcdsaCurve(cx.program[cx.pc+1])
- fs, ok := ecdsaCurveSpecByField[ecdsaCurve]
+ fs, ok := ecdsaCurveSpecByField(ecdsaCurve)
if !ok || fs.version > cx.version {
- cx.err = fmt.Errorf("invalid curve %d", ecdsaCurve)
- return
+ return fmt.Errorf("invalid curve %d", ecdsaCurve)
}
if fs.field != Secp256k1 && fs.field != Secp256r1 {
- cx.err = fmt.Errorf("unsupported curve %d", fs.field)
- return
+ return fmt.Errorf("unsupported curve %d", fs.field)
}
last := len(cx.stack) - 1 // index of PK y
@@ -2979,8 +2979,7 @@ func opEcdsaVerify(cx *EvalContext) {
msg := cx.stack[fifth].Bytes
if len(msg) != 32 {
- cx.err = fmt.Errorf("the signed data must be 32 bytes long, not %d", len(msg))
- return
+ return fmt.Errorf("the signed data must be 32 bytes long, not %d", len(msg))
}
x := new(big.Int).SetBytes(pkX)
@@ -3009,19 +3008,23 @@ func opEcdsaVerify(cx *EvalContext) {
cx.stack[fifth].Uint = boolToUint(result)
cx.stack[fifth].Bytes = nil
cx.stack = cx.stack[:fourth]
+ return nil
}
-func opEcdsaPkDecompress(cx *EvalContext) {
+var ecdsaDecompressCosts = []int{
+ Secp256k1: 650,
+ Secp256r1: 2400,
+}
+
+func opEcdsaPkDecompress(cx *EvalContext) error {
ecdsaCurve := EcdsaCurve(cx.program[cx.pc+1])
- fs, ok := ecdsaCurveSpecByField[ecdsaCurve]
+ fs, ok := ecdsaCurveSpecByField(ecdsaCurve)
if !ok || fs.version > cx.version {
- cx.err = fmt.Errorf("invalid curve %d", ecdsaCurve)
- return
+ return fmt.Errorf("invalid curve %d", ecdsaCurve)
}
if fs.field != Secp256k1 && fs.field != Secp256r1 {
- cx.err = fmt.Errorf("unsupported curve %d", fs.field)
- return
+ return fmt.Errorf("unsupported curve %d", fs.field)
}
last := len(cx.stack) - 1 // compressed PK
@@ -3031,14 +3034,12 @@ func opEcdsaPkDecompress(cx *EvalContext) {
if fs.field == Secp256k1 {
x, y = secp256k1.DecompressPubkey(pubkey)
if x == nil {
- cx.err = fmt.Errorf("invalid pubkey")
- return
+ return fmt.Errorf("invalid pubkey")
}
} else if fs.field == Secp256r1 {
- x, y = unmarshalCompressed(elliptic.P256(), pubkey)
+ x, y = elliptic.UnmarshalCompressed(elliptic.P256(), pubkey)
if x == nil {
- cx.err = fmt.Errorf("invalid compressed pubkey")
- return
+ return fmt.Errorf("invalid compressed pubkey")
}
}
@@ -3046,31 +3047,28 @@ func opEcdsaPkDecompress(cx *EvalContext) {
cx.stack[last].Uint = 0
cx.stack[last].Bytes, err = leadingZeros(32, x)
if err != nil {
- cx.err = fmt.Errorf("x component zeroing failed: %s", err.Error())
- return
+ return fmt.Errorf("x component zeroing failed: %s", err.Error())
}
var sv stackValue
sv.Bytes, err = leadingZeros(32, y)
if err != nil {
- cx.err = fmt.Errorf("y component zeroing failed: %s", err.Error())
- return
+ return fmt.Errorf("y component zeroing failed: %s", err.Error())
}
cx.stack = append(cx.stack, sv)
+ return nil
}
-func opEcdsaPkRecover(cx *EvalContext) {
+func opEcdsaPkRecover(cx *EvalContext) error {
ecdsaCurve := EcdsaCurve(cx.program[cx.pc+1])
- fs, ok := ecdsaCurveSpecByField[ecdsaCurve]
+ fs, ok := ecdsaCurveSpecByField(ecdsaCurve)
if !ok || fs.version > cx.version {
- cx.err = fmt.Errorf("invalid curve %d", ecdsaCurve)
- return
+ return fmt.Errorf("invalid curve %d", ecdsaCurve)
}
if fs.field != Secp256k1 {
- cx.err = fmt.Errorf("unsupported curve %d", fs.field)
- return
+ return fmt.Errorf("unsupported curve %d", fs.field)
}
last := len(cx.stack) - 1 // index of signature s
@@ -3084,8 +3082,7 @@ func opEcdsaPkRecover(cx *EvalContext) {
msg := cx.stack[fourth].Bytes
if recid > 3 {
- cx.err = fmt.Errorf("invalid recovery id: %d", recid)
- return
+ return fmt.Errorf("invalid recovery id: %d", recid)
}
signature := make([]byte, 0, len(sigR)+len(sigS)+1)
@@ -3095,62 +3092,61 @@ func opEcdsaPkRecover(cx *EvalContext) {
pk, err := secp256k1.RecoverPubkey(msg, signature)
if err != nil {
- cx.err = fmt.Errorf("pubkey recover failed: %s", err.Error())
- return
+ return fmt.Errorf("pubkey recover failed: %s", err.Error())
}
x, y := secp256k1.S256().Unmarshal(pk)
if x == nil {
- cx.err = fmt.Errorf("pubkey unmarshal failed")
- return
+ return fmt.Errorf("pubkey unmarshal failed")
}
cx.stack[fourth].Uint = 0
cx.stack[fourth].Bytes, err = leadingZeros(32, x)
if err != nil {
- cx.err = fmt.Errorf("x component zeroing failed: %s", err.Error())
- return
+ return fmt.Errorf("x component zeroing failed: %s", err.Error())
}
cx.stack[pprev].Uint = 0
cx.stack[pprev].Bytes, err = leadingZeros(32, y)
if err != nil {
- cx.err = fmt.Errorf("y component zeroing failed: %s", err.Error())
- return
+ return fmt.Errorf("y component zeroing failed: %s", err.Error())
}
cx.stack = cx.stack[:prev]
+ return nil
}
-func opLoad(cx *EvalContext) {
+func opLoad(cx *EvalContext) error {
n := cx.program[cx.pc+1]
cx.stack = append(cx.stack, cx.scratch[n])
+ return nil
}
-func opLoads(cx *EvalContext) {
+func opLoads(cx *EvalContext) error {
last := len(cx.stack) - 1
n := cx.stack[last].Uint
if n >= uint64(len(cx.scratch)) {
- cx.err = fmt.Errorf("invalid Scratch index %d", n)
- return
+ return fmt.Errorf("invalid Scratch index %d", n)
}
cx.stack[last] = cx.scratch[n]
+ return nil
}
-func opStore(cx *EvalContext) {
+func opStore(cx *EvalContext) error {
n := cx.program[cx.pc+1]
last := len(cx.stack) - 1
cx.scratch[n] = cx.stack[last]
cx.stack = cx.stack[:last]
+ return nil
}
-func opStores(cx *EvalContext) {
+func opStores(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
n := cx.stack[prev].Uint
if n >= uint64(len(cx.scratch)) {
- cx.err = fmt.Errorf("invalid Scratch index %d", n)
- return
+ return fmt.Errorf("invalid Scratch index %d", n)
}
cx.scratch[n] = cx.stack[last]
cx.stack = cx.stack[:prev]
+ return nil
}
func opGloadImpl(cx *EvalContext, gi int, scratchIdx byte, opName string) (stackValue, error) {
@@ -3164,70 +3160,67 @@ func opGloadImpl(cx *EvalContext, gi int, scratchIdx byte, opName string) (stack
if cx.TxnGroup[gi].Txn.Type != protocol.ApplicationCallTx {
return none, fmt.Errorf("can't use %s on non-app call txn with index %d", opName, gi)
}
- if gi == cx.GroupIndex {
+ if gi == cx.groupIndex {
return none, fmt.Errorf("can't use %s on self, use load instead", opName)
}
- if gi > cx.GroupIndex {
+ if gi > cx.groupIndex {
return none, fmt.Errorf("%s can't get future scratch space from txn with index %d", opName, gi)
}
return cx.pastScratch[gi][scratchIdx], nil
}
-func opGload(cx *EvalContext) {
+func opGload(cx *EvalContext) error {
gi := int(cx.program[cx.pc+1])
scratchIdx := cx.program[cx.pc+2]
scratchValue, err := opGloadImpl(cx, gi, scratchIdx, "gload")
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = append(cx.stack, scratchValue)
+ return nil
}
-func opGloads(cx *EvalContext) {
+func opGloads(cx *EvalContext) error {
last := len(cx.stack) - 1
gi := cx.stack[last].Uint
if gi >= uint64(len(cx.TxnGroup)) {
- cx.err = fmt.Errorf("gloads lookup TxnGroup[%d] but it only has %d", gi, len(cx.TxnGroup))
- return
+ return fmt.Errorf("gloads lookup TxnGroup[%d] but it only has %d", gi, len(cx.TxnGroup))
}
scratchIdx := cx.program[cx.pc+1]
scratchValue, err := opGloadImpl(cx, int(gi), scratchIdx, "gloads")
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[last] = scratchValue
+ return nil
}
-func opGloadss(cx *EvalContext) {
+func opGloadss(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
gi := cx.stack[prev].Uint
if gi >= uint64(len(cx.TxnGroup)) {
- cx.err = fmt.Errorf("gloadss lookup TxnGroup[%d] but it only has %d", gi, len(cx.TxnGroup))
- return
+ return fmt.Errorf("gloadss lookup TxnGroup[%d] but it only has %d", gi, len(cx.TxnGroup))
}
scratchIdx := cx.stack[last].Uint
if scratchIdx >= 256 {
- cx.err = fmt.Errorf("gloadss scratch index >= 256 (%d)", scratchIdx)
- return
+ return fmt.Errorf("gloadss scratch index >= 256 (%d)", scratchIdx)
}
scratchValue, err := opGloadImpl(cx, int(gi), byte(scratchIdx), "gloadss")
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[prev] = scratchValue
cx.stack = cx.stack[:last]
+ return nil
}
-func opConcat(cx *EvalContext) {
+func opConcat(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
a := cx.stack[prev].Bytes
@@ -3238,45 +3231,44 @@ func opConcat(cx *EvalContext) {
copy(newvalue[len(a):], b)
cx.stack[prev].Bytes = newvalue
cx.stack = cx.stack[:last]
+ return nil
}
-func substring(x []byte, start, end int) (out []byte, err error) {
- out = x
+func substring(x []byte, start, end int) ([]byte, error) {
if end < start {
- err = errors.New("substring end before start")
- return
+ return nil, errors.New("substring end before start")
}
if start > len(x) || end > len(x) {
- err = errors.New("substring range beyond length of string")
- return
+ return nil, errors.New("substring range beyond length of string")
}
- out = x[start:end]
- err = nil
- return
+ return x[start:end], nil
}
-func opSubstring(cx *EvalContext) {
+func opSubstring(cx *EvalContext) error {
last := len(cx.stack) - 1
start := cx.program[cx.pc+1]
end := cx.program[cx.pc+2]
- cx.stack[last].Bytes, cx.err = substring(cx.stack[last].Bytes, int(start), int(end))
+ bytes, err := substring(cx.stack[last].Bytes, int(start), int(end))
+ cx.stack[last].Bytes = bytes
+ return err
}
-func opSubstring3(cx *EvalContext) {
+func opSubstring3(cx *EvalContext) error {
last := len(cx.stack) - 1 // end
prev := last - 1 // start
pprev := prev - 1 // bytes
start := cx.stack[prev].Uint
end := cx.stack[last].Uint
if start > math.MaxInt32 || end > math.MaxInt32 {
- cx.err = errors.New("substring range beyond length of string")
- return
+ return errors.New("substring range beyond length of string")
}
- cx.stack[pprev].Bytes, cx.err = substring(cx.stack[pprev].Bytes, int(start), int(end))
+ bytes, err := substring(cx.stack[pprev].Bytes, int(start), int(end))
+ cx.stack[pprev].Bytes = bytes
cx.stack = cx.stack[:prev]
+ return err
}
-func opGetBit(cx *EvalContext) {
+func opGetBit(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
idx := cx.stack[last].Uint
@@ -3285,8 +3277,7 @@ func opGetBit(cx *EvalContext) {
var bit uint64
if target.argType() == StackUint64 {
if idx > 63 {
- cx.err = errors.New("getbit index > 63 with with Uint")
- return
+ return errors.New("getbit index > 63 with with Uint")
}
mask := uint64(1) << idx
bit = (target.Uint & mask) >> idx
@@ -3294,8 +3285,7 @@ func opGetBit(cx *EvalContext) {
// indexing into a byteslice
byteIdx := idx / 8
if byteIdx >= uint64(len(target.Bytes)) {
- cx.err = errors.New("getbit index beyond byteslice")
- return
+ return errors.New("getbit index beyond byteslice")
}
byteVal := target.Bytes[byteIdx]
@@ -3311,9 +3301,10 @@ func opGetBit(cx *EvalContext) {
cx.stack[prev].Uint = bit
cx.stack[prev].Bytes = nil
cx.stack = cx.stack[:last]
+ return nil
}
-func opSetBit(cx *EvalContext) {
+func opSetBit(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
pprev := prev - 1
@@ -3323,14 +3314,12 @@ func opSetBit(cx *EvalContext) {
target := cx.stack[pprev]
if bit > 1 {
- cx.err = errors.New("setbit value > 1")
- return
+ return errors.New("setbit value > 1")
}
if target.argType() == StackUint64 {
if idx > 63 {
- cx.err = errors.New("setbit index > 63 with Uint")
- return
+ return errors.New("setbit index > 63 with Uint")
}
mask := uint64(1) << idx
if bit == uint64(1) {
@@ -3342,8 +3331,7 @@ func opSetBit(cx *EvalContext) {
// indexing into a byteslice
byteIdx := idx / 8
if byteIdx >= uint64(len(target.Bytes)) {
- cx.err = errors.New("setbit index beyond byteslice")
- return
+ return errors.New("setbit index beyond byteslice")
}
bitIdx := idx % 8
@@ -3363,9 +3351,10 @@ func opSetBit(cx *EvalContext) {
cx.stack[pprev].Bytes = scratch
}
cx.stack = cx.stack[:prev]
+ return nil
}
-func opGetByte(cx *EvalContext) {
+func opGetByte(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
@@ -3373,44 +3362,40 @@ func opGetByte(cx *EvalContext) {
target := cx.stack[prev]
if idx >= uint64(len(target.Bytes)) {
- cx.err = errors.New("getbyte index beyond array length")
- return
+ return errors.New("getbyte index beyond array length")
}
cx.stack[prev].Uint = uint64(target.Bytes[idx])
cx.stack[prev].Bytes = nil
cx.stack = cx.stack[:last]
+ return nil
}
-func opSetByte(cx *EvalContext) {
+func opSetByte(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
pprev := prev - 1
if cx.stack[last].Uint > 255 {
- cx.err = errors.New("setbyte value > 255")
- return
+ return errors.New("setbyte value > 255")
}
if cx.stack[prev].Uint >= uint64(len(cx.stack[pprev].Bytes)) {
- cx.err = errors.New("setbyte index beyond array length")
- return
+ return errors.New("setbyte index beyond array length")
}
// Copy to avoid modifying shared slice
cx.stack[pprev].Bytes = append([]byte(nil), cx.stack[pprev].Bytes...)
cx.stack[pprev].Bytes[cx.stack[prev].Uint] = byte(cx.stack[last].Uint)
cx.stack = cx.stack[:prev]
+ return nil
}
-func opExtractImpl(x []byte, start, length int) (out []byte, err error) {
- out = x
+func opExtractImpl(x []byte, start, length int) ([]byte, error) {
end := start + length
if start > len(x) || end > len(x) {
- err = errors.New("extract range beyond length of string")
- return
+ return nil, errors.New("extract range beyond length of string")
}
- out = x[start:end]
- return
+ return x[start:end], nil
}
-func opExtract(cx *EvalContext) {
+func opExtract(cx *EvalContext) error {
last := len(cx.stack) - 1
startIdx := cx.program[cx.pc+1]
lengthIdx := cx.program[cx.pc+2]
@@ -3419,55 +3404,61 @@ func opExtract(cx *EvalContext) {
if length == 0 {
length = len(cx.stack[last].Bytes) - int(startIdx)
}
- cx.stack[last].Bytes, cx.err = opExtractImpl(cx.stack[last].Bytes, int(startIdx), length)
+ bytes, err := opExtractImpl(cx.stack[last].Bytes, int(startIdx), length)
+ cx.stack[last].Bytes = bytes
+ return err
}
-func opExtract3(cx *EvalContext) {
+func opExtract3(cx *EvalContext) error {
last := len(cx.stack) - 1 // length
prev := last - 1 // start
byteArrayIdx := prev - 1 // bytes
startIdx := cx.stack[prev].Uint
lengthIdx := cx.stack[last].Uint
if startIdx > math.MaxInt32 || lengthIdx > math.MaxInt32 {
- cx.err = errors.New("extract range beyond length of string")
- return
+ return errors.New("extract range beyond length of string")
}
- cx.stack[byteArrayIdx].Bytes, cx.err = opExtractImpl(cx.stack[byteArrayIdx].Bytes, int(startIdx), int(lengthIdx))
+ bytes, err := opExtractImpl(cx.stack[byteArrayIdx].Bytes, int(startIdx), int(lengthIdx))
+ cx.stack[byteArrayIdx].Bytes = bytes
cx.stack = cx.stack[:prev]
+ return err
}
// We convert the bytes manually here because we need to accept "short" byte arrays.
// A single byte is a legal uint64 decoded this way.
-func convertBytesToInt(x []byte) (out uint64) {
- out = uint64(0)
+func convertBytesToInt(x []byte) uint64 {
+ out := uint64(0)
for _, b := range x {
out = out << 8
out = out | (uint64(b) & 0x0ff)
}
- return
+ return out
}
-func opExtractNBytes(cx *EvalContext, n int) {
+func opExtractNBytes(cx *EvalContext, n int) error {
last := len(cx.stack) - 1 // start
prev := last - 1 // bytes
startIdx := cx.stack[last].Uint
- cx.stack[prev].Bytes, cx.err = opExtractImpl(cx.stack[prev].Bytes, int(startIdx), n) // extract n bytes
-
- cx.stack[prev].Uint = convertBytesToInt(cx.stack[prev].Bytes)
+ bytes, err := opExtractImpl(cx.stack[prev].Bytes, int(startIdx), n) // extract n bytes
+ if err != nil {
+ return err
+ }
+ cx.stack[prev].Uint = convertBytesToInt(bytes)
cx.stack[prev].Bytes = nil
cx.stack = cx.stack[:last]
+ return nil
}
-func opExtract16Bits(cx *EvalContext) {
- opExtractNBytes(cx, 2) // extract 2 bytes
+func opExtract16Bits(cx *EvalContext) error {
+ return opExtractNBytes(cx, 2) // extract 2 bytes
}
-func opExtract32Bits(cx *EvalContext) {
- opExtractNBytes(cx, 4) // extract 4 bytes
+func opExtract32Bits(cx *EvalContext) error {
+ return opExtractNBytes(cx, 4) // extract 4 bytes
}
-func opExtract64Bits(cx *EvalContext) {
- opExtractNBytes(cx, 8) // extract 8 bytes
+func opExtract64Bits(cx *EvalContext) error {
+ return opExtractNBytes(cx, 8) // extract 8 bytes
}
// accountReference yields the address and Accounts offset designated by a
@@ -3485,16 +3476,16 @@ func opExtract64Bits(cx *EvalContext) {
func (cx *EvalContext) accountReference(account stackValue) (basics.Address, uint64, error) {
if account.argType() == StackUint64 {
- addr, err := cx.Txn.Txn.AddressByIndex(account.Uint, cx.Txn.Txn.Sender)
+ addr, err := cx.txn.Txn.AddressByIndex(account.Uint, cx.txn.Txn.Sender)
return addr, account.Uint, err
}
addr, err := account.address()
if err != nil {
return addr, 0, err
}
- idx, err := cx.Txn.Txn.IndexByAddress(addr, cx.Txn.Txn.Sender)
+ idx, err := cx.txn.Txn.IndexByAddress(addr, cx.txn.Txn.Sender)
- invalidIndex := uint64(len(cx.Txn.Txn.Accounts) + 1)
+ invalidIndex := uint64(len(cx.txn.Txn.Accounts) + 1)
// Allow an address for an app that was created in group
if err != nil && cx.version >= createdResourcesVersion {
for _, appID := range cx.created.apps {
@@ -3518,7 +3509,7 @@ func (cx *EvalContext) accountReference(account stackValue) (basics.Address, uin
func (cx *EvalContext) mutableAccountReference(account stackValue) (basics.Address, uint64, error) {
addr, accountIdx, err := cx.accountReference(account)
- if err == nil && accountIdx > uint64(len(cx.Txn.Txn.Accounts)) {
+ if err == nil && accountIdx > uint64(len(cx.txn.Txn.Accounts)) {
// There was no error, but accountReference has signaled that accountIdx
// is not for mutable ops (because it can't encode it in EvalDelta)
// This also tells us that account.address() will work.
@@ -3528,86 +3519,69 @@ func (cx *EvalContext) mutableAccountReference(account stackValue) (basics.Addre
return addr, accountIdx, err
}
-func opBalance(cx *EvalContext) {
- if cx.Ledger == nil {
- cx.err = fmt.Errorf("ledger not available")
- return
- }
+func opBalance(cx *EvalContext) error {
last := len(cx.stack) - 1 // account (index or actual address)
addr, _, err := cx.accountReference(cx.stack[last])
if err != nil {
- cx.err = err
- return
+ return err
}
account, err := cx.Ledger.AccountData(addr)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[last].Bytes = nil
cx.stack[last].Uint = account.MicroAlgos.Raw
+ return nil
}
-func opMinBalance(cx *EvalContext) {
- if cx.Ledger == nil {
- cx.err = fmt.Errorf("ledger not available")
- return
- }
+func opMinBalance(cx *EvalContext) error {
last := len(cx.stack) - 1 // account (index or actual address)
addr, _, err := cx.accountReference(cx.stack[last])
if err != nil {
- cx.err = err
- return
+ return err
}
account, err := cx.Ledger.AccountData(addr)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[last].Bytes = nil
cx.stack[last].Uint = account.MinBalance(cx.Proto).Raw
+ return nil
}
-func opAppOptedIn(cx *EvalContext) {
+func opAppOptedIn(cx *EvalContext) error {
last := len(cx.stack) - 1 // app
prev := last - 1 // account
- if cx.Ledger == nil {
- cx.err = fmt.Errorf("ledger not available")
- return
- }
-
addr, _, err := cx.accountReference(cx.stack[prev])
if err != nil {
- cx.err = err
- return
+ return err
}
app, err := appReference(cx, cx.stack[last].Uint, false)
if err != nil {
- cx.err = err
- return
+ return err
}
optedIn, err := cx.Ledger.OptedIn(addr, app)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[prev].Uint = boolToUint(optedIn)
cx.stack[prev].Bytes = nil
cx.stack = cx.stack[:last]
+ return nil
}
-func opAppLocalGet(cx *EvalContext) {
+func opAppLocalGet(cx *EvalContext) error {
last := len(cx.stack) - 1 // state key
prev := last - 1 // account
@@ -3615,15 +3589,15 @@ func opAppLocalGet(cx *EvalContext) {
result, _, err := opAppLocalGetImpl(cx, 0, key, cx.stack[prev])
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[prev] = result
cx.stack = cx.stack[:last]
+ return nil
}
-func opAppLocalGetEx(cx *EvalContext) {
+func opAppLocalGetEx(cx *EvalContext) error {
last := len(cx.stack) - 1 // state key
prev := last - 1 // app id
pprev := prev - 1 // account
@@ -3633,8 +3607,7 @@ func opAppLocalGetEx(cx *EvalContext) {
result, ok, err := opAppLocalGetImpl(cx, appID, key, cx.stack[pprev])
if err != nil {
- cx.err = err
- return
+ return err
}
var isOk stackValue
@@ -3645,14 +3618,10 @@ func opAppLocalGetEx(cx *EvalContext) {
cx.stack[pprev] = result
cx.stack[prev] = isOk
cx.stack = cx.stack[:last]
+ return nil
}
func opAppLocalGetImpl(cx *EvalContext, appID uint64, key []byte, acct stackValue) (result stackValue, ok bool, err error) {
- if cx.Ledger == nil {
- err = fmt.Errorf("ledger not available")
- return
- }
-
addr, accountIdx, err := cx.accountReference(acct)
if err != nil {
return
@@ -3665,53 +3634,47 @@ func opAppLocalGetImpl(cx *EvalContext, appID uint64, key []byte, acct stackValu
tv, ok, err := cx.Ledger.GetLocal(addr, app, string(key), accountIdx)
if err != nil {
- cx.err = err
return
}
if ok {
- result, err = stackValueFromTealValue(&tv)
+ result, err = stackValueFromTealValue(tv)
}
return
}
func opAppGetGlobalStateImpl(cx *EvalContext, appIndex uint64, key []byte) (result stackValue, ok bool, err error) {
- if cx.Ledger == nil {
- err = fmt.Errorf("ledger not available")
- return
- }
-
app, err := appReference(cx, appIndex, true)
if err != nil {
return
}
- tv, ok, err := cx.Ledger.GetGlobal(app, string(key))
+ tv, ok, err := cx.Ledger.GetGlobal(app, string(key))
if err != nil {
return
}
if ok {
- result, err = stackValueFromTealValue(&tv)
+ result, err = stackValueFromTealValue(tv)
}
return
}
-func opAppGlobalGet(cx *EvalContext) {
+func opAppGlobalGet(cx *EvalContext) error {
last := len(cx.stack) - 1 // state key
key := cx.stack[last].Bytes
result, _, err := opAppGetGlobalStateImpl(cx, 0, key)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack[last] = result
+ return nil
}
-func opAppGlobalGetEx(cx *EvalContext) {
+func opAppGlobalGetEx(cx *EvalContext) error {
last := len(cx.stack) - 1 // state key
prev := last - 1 // app
@@ -3719,8 +3682,7 @@ func opAppGlobalGetEx(cx *EvalContext) {
result, ok, err := opAppGetGlobalStateImpl(cx, cx.stack[prev].Uint, key)
if err != nil {
- cx.err = err
- return
+ return err
}
var isOk stackValue
@@ -3730,9 +3692,10 @@ func opAppGlobalGetEx(cx *EvalContext) {
cx.stack[prev] = result
cx.stack[last] = isOk
+ return nil
}
-func opAppLocalPut(cx *EvalContext) {
+func opAppLocalPut(cx *EvalContext) error {
last := len(cx.stack) - 1 // value
prev := last - 1 // state key
pprev := prev - 1 // account
@@ -3740,158 +3703,131 @@ func opAppLocalPut(cx *EvalContext) {
sv := cx.stack[last]
key := string(cx.stack[prev].Bytes)
- if cx.Ledger == nil {
- cx.err = fmt.Errorf("ledger not available")
- return
- }
-
addr, accountIdx, err := cx.mutableAccountReference(cx.stack[pprev])
if err != nil {
- cx.err = err
- return
+ return err
}
// if writing the same value, don't record in EvalDelta, matching ledger
// behavior with previous BuildEvalDelta mechanism
etv, ok, err := cx.Ledger.GetLocal(addr, cx.appID, key, accountIdx)
if err != nil {
- cx.err = err
- return
+ return err
}
tv := sv.toTealValue()
if !ok || tv != etv {
- if _, ok := cx.Txn.EvalDelta.LocalDeltas[accountIdx]; !ok {
- cx.Txn.EvalDelta.LocalDeltas[accountIdx] = basics.StateDelta{}
+ if _, ok := cx.txn.EvalDelta.LocalDeltas[accountIdx]; !ok {
+ cx.txn.EvalDelta.LocalDeltas[accountIdx] = basics.StateDelta{}
}
- cx.Txn.EvalDelta.LocalDeltas[accountIdx][key] = tv.ToValueDelta()
+ cx.txn.EvalDelta.LocalDeltas[accountIdx][key] = tv.ToValueDelta()
}
err = cx.Ledger.SetLocal(addr, cx.appID, key, tv, accountIdx)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = cx.stack[:pprev]
+ return nil
}
-func opAppGlobalPut(cx *EvalContext) {
+func opAppGlobalPut(cx *EvalContext) error {
last := len(cx.stack) - 1 // value
prev := last - 1 // state key
sv := cx.stack[last]
key := string(cx.stack[prev].Bytes)
- if cx.Ledger == nil {
- cx.err = fmt.Errorf("ledger not available")
- return
- }
-
// if writing the same value, don't record in EvalDelta, matching ledger
// behavior with previous BuildEvalDelta mechanism
etv, ok, err := cx.Ledger.GetGlobal(cx.appID, key)
if err != nil {
- cx.err = err
- return
+ return err
}
tv := sv.toTealValue()
if !ok || tv != etv {
- cx.Txn.EvalDelta.GlobalDelta[key] = tv.ToValueDelta()
+ cx.txn.EvalDelta.GlobalDelta[key] = tv.ToValueDelta()
}
err = cx.Ledger.SetGlobal(cx.appID, key, tv)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = cx.stack[:prev]
+ return nil
}
-func opAppLocalDel(cx *EvalContext) {
+func opAppLocalDel(cx *EvalContext) error {
last := len(cx.stack) - 1 // key
prev := last - 1 // account
key := string(cx.stack[last].Bytes)
- if cx.Ledger == nil {
- cx.err = fmt.Errorf("ledger not available")
- return
- }
-
addr, accountIdx, err := cx.mutableAccountReference(cx.stack[prev])
if err != nil {
- cx.err = err
- return
+ return err
}
// if deleting a non-existent value, don't record in EvalDelta, matching
// ledger behavior with previous BuildEvalDelta mechanism
if _, ok, err := cx.Ledger.GetLocal(addr, cx.appID, key, accountIdx); ok {
if err != nil {
- cx.err = err
- return
+ return err
}
- if _, ok := cx.Txn.EvalDelta.LocalDeltas[accountIdx]; !ok {
- cx.Txn.EvalDelta.LocalDeltas[accountIdx] = basics.StateDelta{}
+ if _, ok := cx.txn.EvalDelta.LocalDeltas[accountIdx]; !ok {
+ cx.txn.EvalDelta.LocalDeltas[accountIdx] = basics.StateDelta{}
}
- cx.Txn.EvalDelta.LocalDeltas[accountIdx][key] = basics.ValueDelta{
+ cx.txn.EvalDelta.LocalDeltas[accountIdx][key] = basics.ValueDelta{
Action: basics.DeleteAction,
}
}
err = cx.Ledger.DelLocal(addr, cx.appID, key, accountIdx)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = cx.stack[:prev]
+ return nil
}
-func opAppGlobalDel(cx *EvalContext) {
+func opAppGlobalDel(cx *EvalContext) error {
last := len(cx.stack) - 1 // key
key := string(cx.stack[last].Bytes)
- if cx.Ledger == nil {
- cx.err = fmt.Errorf("ledger not available")
- return
- }
-
// if deleting a non-existent value, don't record in EvalDelta, matching
// ledger behavior with previous BuildEvalDelta mechanism
if _, ok, err := cx.Ledger.GetGlobal(cx.appID, key); ok {
if err != nil {
- cx.err = err
- return
+ return err
}
- cx.Txn.EvalDelta.GlobalDelta[key] = basics.ValueDelta{
+ cx.txn.EvalDelta.GlobalDelta[key] = basics.ValueDelta{
Action: basics.DeleteAction,
}
}
err := cx.Ledger.DelGlobal(cx.appID, key)
if err != nil {
- cx.err = err
- return
+ return err
}
cx.stack = cx.stack[:last]
+ return nil
}
-// We have a difficult naming problem here. In some opcodes, TEAL
-// allows (and used to require) ASAs and Apps to to be referenced by
-// their "index" in an app call txn's foreign-apps or foreign-assets
-// arrays. That was a small integer, no more than 2 or so, and was
-// often called an "index". But it was not a basics.AssetIndex or
-// basics.ApplicationIndex.
+// We have a difficult naming problem here. Some opcodes allow (and used to
+// require) ASAs and Apps to to be referenced by their "index" in an app call
+// txn's foreign-apps or foreign-assets arrays. That was a small integer, no
+// more than 2 or so, and was often called an "index". But it was not a
+// basics.AssetIndex or basics.ApplicationIndex.
func appReference(cx *EvalContext, ref uint64, foreign bool) (basics.AppIndex, error) {
if cx.version >= directRefEnabledVersion {
if ref == 0 || ref == uint64(cx.appID) {
return cx.appID, nil
}
- for _, appID := range cx.Txn.Txn.ForeignApps {
+ for _, appID := range cx.txn.Txn.ForeignApps {
if appID == basics.AppIndex(ref) {
return appID, nil
}
@@ -3907,8 +3843,8 @@ func appReference(cx *EvalContext, ref uint64, foreign bool) (basics.AppIndex, e
// Allow use of indexes, but this comes last so that clear advice can be
// given to anyone who cares about semantics in the first few rounds of
// a new network - don't use indexes for references, use the App ID
- if ref <= uint64(len(cx.Txn.Txn.ForeignApps)) {
- return basics.AppIndex(cx.Txn.Txn.ForeignApps[ref-1]), nil
+ if ref <= uint64(len(cx.txn.Txn.ForeignApps)) {
+ return basics.AppIndex(cx.txn.Txn.ForeignApps[ref-1]), nil
}
} else {
// Old rules
@@ -3917,8 +3853,8 @@ func appReference(cx *EvalContext, ref uint64, foreign bool) (basics.AppIndex, e
}
if foreign {
// In old versions, a foreign reference must be an index in ForeignAssets or 0
- if ref <= uint64(len(cx.Txn.Txn.ForeignApps)) {
- return basics.AppIndex(cx.Txn.Txn.ForeignApps[ref-1]), nil
+ if ref <= uint64(len(cx.txn.Txn.ForeignApps)) {
+ return basics.AppIndex(cx.txn.Txn.ForeignApps[ref-1]), nil
}
} else {
// Otherwise it's direct
@@ -3930,7 +3866,7 @@ func appReference(cx *EvalContext, ref uint64, foreign bool) (basics.AppIndex, e
func asaReference(cx *EvalContext, ref uint64, foreign bool) (basics.AssetIndex, error) {
if cx.version >= directRefEnabledVersion {
- for _, assetID := range cx.Txn.Txn.ForeignAssets {
+ for _, assetID := range cx.txn.Txn.ForeignAssets {
if assetID == basics.AssetIndex(ref) {
return assetID, nil
}
@@ -3946,15 +3882,15 @@ func asaReference(cx *EvalContext, ref uint64, foreign bool) (basics.AssetIndex,
// Allow use of indexes, but this comes last so that clear advice can be
// given to anyone who cares about semantics in the first few rounds of
// a new network - don't use indexes for references, use the asa ID.
- if ref < uint64(len(cx.Txn.Txn.ForeignAssets)) {
- return basics.AssetIndex(cx.Txn.Txn.ForeignAssets[ref]), nil
+ if ref < uint64(len(cx.txn.Txn.ForeignAssets)) {
+ return basics.AssetIndex(cx.txn.Txn.ForeignAssets[ref]), nil
}
} else {
// Old rules
if foreign {
// In old versions, a foreign reference must be an index in ForeignAssets
- if ref < uint64(len(cx.Txn.Txn.ForeignAssets)) {
- return basics.AssetIndex(cx.Txn.Txn.ForeignAssets[ref]), nil
+ if ref < uint64(len(cx.txn.Txn.ForeignAssets)) {
+ return basics.AssetIndex(cx.txn.Txn.ForeignAssets[ref]), nil
}
} else {
// Otherwise it's direct
@@ -3965,32 +3901,24 @@ func asaReference(cx *EvalContext, ref uint64, foreign bool) (basics.AssetIndex,
}
-func opAssetHoldingGet(cx *EvalContext) {
+func opAssetHoldingGet(cx *EvalContext) error {
last := len(cx.stack) - 1 // asset
prev := last - 1 // account
- if cx.Ledger == nil {
- cx.err = fmt.Errorf("ledger not available")
- return
- }
-
holdingField := AssetHoldingField(cx.program[cx.pc+1])
- fs, ok := assetHoldingFieldSpecByField[holdingField]
+ fs, ok := assetHoldingFieldSpecByField(holdingField)
if !ok || fs.version > cx.version {
- cx.err = fmt.Errorf("invalid asset_holding_get field %d", holdingField)
- return
+ return fmt.Errorf("invalid asset_holding_get field %d", holdingField)
}
addr, _, err := cx.accountReference(cx.stack[prev])
if err != nil {
- cx.err = err
- return
+ return err
}
asset, err := asaReference(cx, cx.stack[last].Uint, false)
if err != nil {
- cx.err = err
- return
+ return err
}
var exist uint64 = 0
@@ -4000,34 +3928,27 @@ func opAssetHoldingGet(cx *EvalContext) {
exist = 1
value, err = cx.assetHoldingToValue(&holding, fs)
if err != nil {
- cx.err = err
- return
+ return err
}
}
cx.stack[prev] = value
cx.stack[last].Uint = exist
+ return nil
}
-func opAssetParamsGet(cx *EvalContext) {
+func opAssetParamsGet(cx *EvalContext) error {
last := len(cx.stack) - 1 // asset
- if cx.Ledger == nil {
- cx.err = fmt.Errorf("ledger not available")
- return
- }
-
paramField := AssetParamsField(cx.program[cx.pc+1])
- fs, ok := assetParamsFieldSpecByField[paramField]
+ fs, ok := assetParamsFieldSpecByField(paramField)
if !ok || fs.version > cx.version {
- cx.err = fmt.Errorf("invalid asset_params_get field %d", paramField)
- return
+ return fmt.Errorf("invalid asset_params_get field %d", paramField)
}
asset, err := asaReference(cx, cx.stack[last].Uint, true)
if err != nil {
- cx.err = err
- return
+ return err
}
var exist uint64 = 0
@@ -4037,34 +3958,27 @@ func opAssetParamsGet(cx *EvalContext) {
exist = 1
value, err = cx.assetParamsToValue(&params, creator, fs)
if err != nil {
- cx.err = err
- return
+ return err
}
}
cx.stack[last] = value
cx.stack = append(cx.stack, stackValue{Uint: exist})
+ return nil
}
-func opAppParamsGet(cx *EvalContext) {
+func opAppParamsGet(cx *EvalContext) error {
last := len(cx.stack) - 1 // app
- if cx.Ledger == nil {
- cx.err = fmt.Errorf("ledger not available")
- return
- }
-
paramField := AppParamsField(cx.program[cx.pc+1])
- fs, ok := appParamsFieldSpecByField[paramField]
+ fs, ok := appParamsFieldSpecByField(paramField)
if !ok || fs.version > cx.version {
- cx.err = fmt.Errorf("invalid app_params_get field %d", paramField)
- return
+ return fmt.Errorf("invalid app_params_get field %d", paramField)
}
app, err := appReference(cx, cx.stack[last].Uint, true)
if err != nil {
- cx.err = err
- return
+ return err
}
var exist uint64 = 0
@@ -4083,40 +3997,32 @@ func opAppParamsGet(cx *EvalContext) {
value, err = cx.appParamsToValue(&params, fs)
}
if err != nil {
- cx.err = err
- return
+ return err
}
}
cx.stack[last] = value
cx.stack = append(cx.stack, stackValue{Uint: exist})
+ return nil
}
-func opAcctParamsGet(cx *EvalContext) {
+func opAcctParamsGet(cx *EvalContext) error {
last := len(cx.stack) - 1 // acct
- if cx.Ledger == nil {
- cx.err = fmt.Errorf("ledger not available")
- return
- }
-
addr, _, err := cx.accountReference(cx.stack[last])
if err != nil {
- cx.err = err
- return
+ return err
}
paramField := AcctParamsField(cx.program[cx.pc+1])
- fs, ok := acctParamsFieldSpecByField[paramField]
+ fs, ok := acctParamsFieldSpecByField(paramField)
if !ok || fs.version > cx.version {
- cx.err = fmt.Errorf("invalid acct_params_get field %d", paramField)
- return
+ return fmt.Errorf("invalid acct_params_get field %d", paramField)
}
account, err := cx.Ledger.AccountData(addr)
if err != nil {
- cx.err = err
- return
+ return err
}
exist := boolToUint(account.MicroAlgos.Raw > 0)
@@ -4133,23 +4039,23 @@ func opAcctParamsGet(cx *EvalContext) {
}
cx.stack[last] = value
cx.stack = append(cx.stack, stackValue{Uint: exist})
+ return nil
}
-func opLog(cx *EvalContext) {
+func opLog(cx *EvalContext) error {
last := len(cx.stack) - 1
- if len(cx.Txn.EvalDelta.Logs) >= MaxLogCalls {
- cx.err = fmt.Errorf("too many log calls in program. up to %d is allowed", MaxLogCalls)
- return
+ if len(cx.txn.EvalDelta.Logs) >= maxLogCalls {
+ return fmt.Errorf("too many log calls in program. up to %d is allowed", maxLogCalls)
}
log := cx.stack[last]
cx.logSize += len(log.Bytes)
- if cx.logSize > MaxLogSize {
- cx.err = fmt.Errorf("program logs too large. %d bytes > %d bytes limit", cx.logSize, MaxLogSize)
- return
+ if cx.logSize > maxLogSize {
+ return fmt.Errorf("program logs too large. %d bytes > %d bytes limit", cx.logSize, maxLogSize)
}
- cx.Txn.EvalDelta.Logs = append(cx.Txn.EvalDelta.Logs, string(log.Bytes))
+ cx.txn.EvalDelta.Logs = append(cx.txn.EvalDelta.Logs, string(log.Bytes))
cx.stack = cx.stack[:last]
+ return nil
}
func authorizedSender(cx *EvalContext, addr basics.Address) error {
@@ -4202,31 +4108,28 @@ func addInnerTxn(cx *EvalContext) error {
stxn.Txn.Header = transactions.Header{
Sender: addr,
Fee: basics.MicroAlgos{Raw: fee},
- FirstValid: cx.Txn.Txn.FirstValid,
- LastValid: cx.Txn.Txn.LastValid,
+ FirstValid: cx.txn.Txn.FirstValid,
+ LastValid: cx.txn.Txn.LastValid,
}
cx.subtxns = append(cx.subtxns, stxn)
return nil
}
-func opTxBegin(cx *EvalContext) {
+func opTxBegin(cx *EvalContext) error {
if len(cx.subtxns) > 0 {
- cx.err = errors.New("itxn_begin without itxn_submit")
- return
+ return errors.New("itxn_begin without itxn_submit")
}
- if cx.Proto.IsolateClearState && cx.Txn.Txn.OnCompletion == transactions.ClearStateOC {
- cx.err = errors.New("clear state programs can not issue inner transactions")
- return
+ if cx.Proto.IsolateClearState && cx.txn.Txn.OnCompletion == transactions.ClearStateOC {
+ return errors.New("clear state programs can not issue inner transactions")
}
- cx.err = addInnerTxn(cx)
+ return addInnerTxn(cx)
}
-func opTxNext(cx *EvalContext) {
+func opItxnNext(cx *EvalContext) error {
if len(cx.subtxns) == 0 {
- cx.err = errors.New("itxn_next without itxn_begin")
- return
+ return errors.New("itxn_next without itxn_begin")
}
- cx.err = addInnerTxn(cx)
+ return addInnerTxn(cx)
}
// availableAccount is used instead of accountReference for more recent opcodes
@@ -4252,7 +4155,7 @@ func (cx *EvalContext) availableAsset(sv stackValue) (basics.AssetIndex, error)
aid := basics.AssetIndex(uint)
// Ensure that aid is in Foreign Assets
- for _, assetID := range cx.Txn.Txn.ForeignAssets {
+ for _, assetID := range cx.txn.Txn.ForeignAssets {
if assetID == aid {
return aid, nil
}
@@ -4280,7 +4183,7 @@ func (cx *EvalContext) availableApp(sv stackValue) (basics.AppIndex, error) {
aid := basics.AppIndex(uint)
// Ensure that aid is in Foreign Apps
- for _, appID := range cx.Txn.Txn.ForeignApps {
+ for _, appID := range cx.txn.Txn.ForeignApps {
if appID == aid {
return aid, nil
}
@@ -4312,13 +4215,13 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
if ok && ver <= cx.version {
txn.Type = protocol.TxType(txType)
} else {
- err = fmt.Errorf("%s is not a valid Type for itxn_field", txType)
+ return fmt.Errorf("%s is not a valid Type for itxn_field", txType)
}
case TypeEnum:
var i uint64
i, err = sv.uint()
if err != nil {
- return
+ return err
}
// i != 0 is so that the error reports 0 instead of Unknown
if i != 0 && i < uint64(len(TxnTypeNames)) {
@@ -4326,10 +4229,10 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
if ok && ver <= cx.version {
txn.Type = protocol.TxType(TxnTypeNames[i])
} else {
- err = fmt.Errorf("%s is not a valid Type for itxn_field", TxnTypeNames[i])
+ return fmt.Errorf("%s is not a valid Type for itxn_field", TxnTypeNames[i])
}
} else {
- err = fmt.Errorf("%d is not a valid TypeEnum", i)
+ return fmt.Errorf("%d is not a valid TypeEnum", i)
}
case Sender:
txn.Sender, err = cx.availableAccount(sv)
@@ -4470,7 +4373,7 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
var new basics.Address
new, err = cx.availableAccount(sv)
if err != nil {
- return
+ return err
}
if len(txn.Accounts) >= cx.Proto.MaxAppTxnAccounts {
return errors.New("too many foreign accounts")
@@ -4494,7 +4397,7 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
var new basics.AssetIndex
new, err = cx.availableAsset(sv)
if err != nil {
- return
+ return err
}
if len(txn.ForeignAssets) >= cx.Proto.MaxAppTxnForeignAssets {
return errors.New("too many foreign assets")
@@ -4504,7 +4407,7 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
var new basics.AppIndex
new, err = cx.availableApp(sv)
if err != nil {
- return
+ return err
}
if len(txn.ForeignApps) >= cx.Proto.MaxAppTxnForeignApps {
return errors.New("too many foreign apps")
@@ -4527,50 +4430,42 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
epp, err =
sv.uintMaxed(uint64(cx.Proto.MaxExtraAppProgramPages))
if err != nil {
- return
+ return err
}
txn.ExtraProgramPages = uint32(epp)
default:
- err = fmt.Errorf("invalid itxn_field %s", fs.field)
+ return fmt.Errorf("invalid itxn_field %s", fs.field)
}
return
}
-func opTxField(cx *EvalContext) {
+func opItxnField(cx *EvalContext) error {
itx := len(cx.subtxns) - 1
if itx < 0 {
- cx.err = errors.New("itxn_field without itxn_begin")
- return
+ return errors.New("itxn_field without itxn_begin")
}
last := len(cx.stack) - 1
field := TxnField(cx.program[cx.pc+1])
- fs, ok := txnFieldSpecByField[field]
+ fs, ok := txnFieldSpecByField(field)
if !ok || fs.itxVersion == 0 || fs.itxVersion > cx.version {
- cx.err = fmt.Errorf("invalid itxn_field %s", field)
- return
+ return fmt.Errorf("invalid itxn_field %s", field)
}
sv := cx.stack[last]
- cx.err = cx.stackIntoTxnField(sv, &fs, &cx.subtxns[itx].Txn)
+ err := cx.stackIntoTxnField(sv, &fs, &cx.subtxns[itx].Txn)
cx.stack = cx.stack[:last] // pop
+ return err
}
-func opTxSubmit(cx *EvalContext) {
- if cx.Ledger == nil {
- cx.err = fmt.Errorf("ledger not available")
- return
- }
-
+func opItxnSubmit(cx *EvalContext) error {
// Should rarely trigger, since itxn_next checks these too. (but that check
// must be imperfect, see its comment) In contrast to that check, subtxns is
// already populated here.
if len(cx.subtxns) > cx.remainingInners() || len(cx.subtxns) > cx.Proto.MaxTxGroupSize {
- cx.err = fmt.Errorf("too many inner transactions %d with %d left", len(cx.subtxns), cx.remainingInners())
- return
+ return fmt.Errorf("too many inner transactions %d with %d left", len(cx.subtxns), cx.remainingInners())
}
if len(cx.subtxns) == 0 {
- cx.err = errors.New("itxn_submit without itxn_begin")
- return
+ return errors.New("itxn_submit without itxn_begin")
}
// Check fees across the group first. Allows fee pooling in inner groups.
@@ -4583,8 +4478,7 @@ func opTxSubmit(cx *EvalContext) {
// See if the FeeCredit is enough to cover the shortfall
shortfall := groupFee - groupPaid
if cx.FeeCredit == nil || *cx.FeeCredit < shortfall {
- cx.err = fmt.Errorf("fee too small %#v", cx.subtxns)
- return
+ return fmt.Errorf("fee too small %#v", cx.subtxns)
}
*cx.FeeCredit -= shortfall
} else {
@@ -4601,7 +4495,7 @@ func opTxSubmit(cx *EvalContext) {
var parent transactions.Txid
isGroup := len(cx.subtxns) > 1
if isGroup {
- parent = cx.Txn.ID()
+ parent = cx.txn.ID()
}
for itx := range cx.subtxns {
// The goal is to follow the same invariants used by the
@@ -4610,61 +4504,81 @@ func opTxSubmit(cx *EvalContext) {
// is authorized, and WellFormed.
err := authorizedSender(cx, cx.subtxns[itx].Txn.Sender)
if err != nil {
- cx.err = err
- return
+ return err
}
// Recall that WellFormed does not care about individual
// transaction fees because of fee pooling. Checked above.
- cx.err = cx.subtxns[itx].Txn.WellFormed(*cx.Specials, *cx.Proto)
- if cx.err != nil {
- return
+ err = cx.subtxns[itx].Txn.WellFormed(*cx.Specials, *cx.Proto)
+ if err != nil {
+ return err
}
- // Disallow reentrancy and limit inner app call depth
+ // Disallow reentrancy, limit inner app call depth, and do version checks
if cx.subtxns[itx].Txn.Type == protocol.ApplicationCallTx {
if cx.appID == cx.subtxns[itx].Txn.ApplicationID {
- cx.err = fmt.Errorf("attempt to self-call")
- return
+ return fmt.Errorf("attempt to self-call")
}
depth := 0
for parent := cx.caller; parent != nil; parent = parent.caller {
if parent.appID == cx.subtxns[itx].Txn.ApplicationID {
- cx.err = fmt.Errorf("attempt to re-enter %d", parent.appID)
- return
+ return fmt.Errorf("attempt to re-enter %d", parent.appID)
}
depth++
}
if depth >= maxAppCallDepth {
- cx.err = fmt.Errorf("appl depth (%d) exceeded", depth)
- return
+ return fmt.Errorf("appl depth (%d) exceeded", depth)
}
- // Can't call version < innerAppsEnabledVersion, and apps with such
- // versions will always match, so just check approval program
- // version.
+ // Set program by txn, approval, or clear state
program := cx.subtxns[itx].Txn.ApprovalProgram
if cx.subtxns[itx].Txn.ApplicationID != 0 {
app, _, err := cx.Ledger.AppParams(cx.subtxns[itx].Txn.ApplicationID)
if err != nil {
- cx.err = err
- return
+ return err
}
program = app.ApprovalProgram
+ if cx.subtxns[itx].Txn.OnCompletion == transactions.ClearStateOC {
+ program = app.ClearStateProgram
+ }
}
+
+ // Can't call old versions in inner apps.
v, _, err := transactions.ProgramVersion(program)
if err != nil {
- cx.err = err
- return
+ return err
+ }
+ if v < cx.Proto.MinInnerApplVersion {
+ return fmt.Errorf("inner app call with version v%d < v%d",
+ v, cx.Proto.MinInnerApplVersion)
}
- if v < innerAppsEnabledVersion {
- cx.err = fmt.Errorf("inner app call with version %d < %d", v, innerAppsEnabledVersion)
- return
+
+ // Don't allow opt-in if the CSP is not runnable as an inner.
+ // This test can only fail for v4 and v5 approval programs,
+ // since v6 requires synchronized versions.
+ if cx.subtxns[itx].Txn.OnCompletion == transactions.OptInOC {
+ csp := cx.subtxns[itx].Txn.ClearStateProgram
+ if cx.subtxns[itx].Txn.ApplicationID != 0 {
+ app, _, err := cx.Ledger.AppParams(cx.subtxns[itx].Txn.ApplicationID)
+ if err != nil {
+ return err
+ }
+ csp = app.ClearStateProgram
+ }
+ csv, _, err := transactions.ProgramVersion(csp)
+ if err != nil {
+ return err
+ }
+ if csv < cx.Proto.MinInnerApplVersion {
+ return fmt.Errorf("inner app call opt-in with CSP v%d < v%d",
+ csv, cx.Proto.MinInnerApplVersion)
+ }
}
+
}
if isGroup {
- innerOffset := len(cx.Txn.EvalDelta.InnerTxns)
+ innerOffset := len(cx.txn.EvalDelta.InnerTxns)
group.TxGroupHashes = append(group.TxGroupHashes,
crypto.Digest(cx.subtxns[itx].Txn.InnerID(parent, innerOffset)))
}
@@ -4687,15 +4601,15 @@ func opTxSubmit(cx *EvalContext) {
for i := range ep.TxnGroup {
err := cx.Ledger.Perform(i, ep)
if err != nil {
- cx.err = err
- return
+ return err
}
// This is mostly a no-op, because Perform does its work "in-place", but
// RecordAD has some further responsibilities.
ep.RecordAD(i, ep.TxnGroup[i].ApplyData)
}
- cx.Txn.EvalDelta.InnerTxns = append(cx.Txn.EvalDelta.InnerTxns, ep.TxnGroup...)
+ cx.txn.EvalDelta.InnerTxns = append(cx.txn.EvalDelta.InnerTxns, ep.TxnGroup...)
cx.subtxns = nil
+ return nil
}
// PcDetails return PC and disassembled instructions at PC up to 2 opcodes back
@@ -4735,13 +4649,12 @@ func base64Decode(encoded []byte, encoding *base64.Encoding) ([]byte, error) {
return decoded[:n], err
}
-func opBase64Decode(cx *EvalContext) {
+func opBase64Decode(cx *EvalContext) error {
last := len(cx.stack) - 1
encodingField := Base64Encoding(cx.program[cx.pc+1])
- fs, ok := base64EncodingSpecByField[encodingField]
+ fs, ok := base64EncodingSpecByField(encodingField)
if !ok || fs.version > cx.version {
- cx.err = fmt.Errorf("invalid base64_decode encoding %d", encodingField)
- return
+ return fmt.Errorf("invalid base64_decode encoding %s", encodingField)
}
encoding := base64.URLEncoding
@@ -4749,7 +4662,12 @@ func opBase64Decode(cx *EvalContext) {
encoding = base64.StdEncoding
}
encoding = encoding.Strict()
- cx.stack[last].Bytes, cx.err = base64Decode(cx.stack[last].Bytes, encoding)
+ bytes, err := base64Decode(cx.stack[last].Bytes, encoding)
+ if err != nil {
+ return err
+ }
+ cx.stack[last].Bytes = bytes
+ return nil
}
func hasDuplicateKeys(jsonText []byte) (bool, map[string]json.RawMessage, error) {
dec := json.NewDecoder(bytes.NewReader(jsonText))
@@ -4801,56 +4719,57 @@ func parseJSON(jsonText []byte) (map[string]json.RawMessage, error) {
}
return parsed, nil
}
-func opJSONRef(cx *EvalContext) {
+func opJSONRef(cx *EvalContext) error {
// get json key
last := len(cx.stack) - 1
key := string(cx.stack[last].Bytes)
cx.stack = cx.stack[:last] // pop
+ expectedType := JSONRefType(cx.program[cx.pc+1])
+ fs, ok := jsonRefSpecByField(expectedType)
+ if !ok || fs.version > cx.version {
+ return fmt.Errorf("invalid json_ref type %s", expectedType)
+ }
+
// parse json text
last = len(cx.stack) - 1
parsed, err := parseJSON(cx.stack[last].Bytes)
if err != nil {
- cx.err = fmt.Errorf("error while parsing JSON text, %v", err)
- return
+ return fmt.Errorf("error while parsing JSON text, %v", err)
}
// get value from json
var stval stackValue
- _, ok := parsed[key]
+ _, ok = parsed[key]
if !ok {
- cx.err = fmt.Errorf("key %s not found in JSON text", key)
- return
+ return fmt.Errorf("key %s not found in JSON text", key)
}
- expectedType := JSONRefType(cx.program[cx.pc+1])
+
switch expectedType {
case JSONString:
var value string
err := json.Unmarshal(parsed[key], &value)
if err != nil {
- cx.err = err
- return
+ return err
}
stval.Bytes = []byte(value)
case JSONUint64:
var value uint64
err := json.Unmarshal(parsed[key], &value)
if err != nil {
- cx.err = err
- return
+ return err
}
stval.Uint = value
case JSONObject:
var value map[string]json.RawMessage
err := json.Unmarshal(parsed[key], &value)
if err != nil {
- cx.err = err
- return
+ return err
}
stval.Bytes = parsed[key]
default:
- cx.err = fmt.Errorf("unsupported json_ref return type, should not have reached here")
- return
+ return fmt.Errorf("unsupported json_ref return type %s", expectedType)
}
cx.stack[last] = stval
+ return nil
}
diff --git a/data/transactions/logic/evalAppTxn_test.go b/data/transactions/logic/evalAppTxn_test.go
index cb1a16408..2ccd94e3e 100644
--- a/data/transactions/logic/evalAppTxn_test.go
+++ b/data/transactions/logic/evalAppTxn_test.go
@@ -54,9 +54,9 @@ func TestCurrentInnerTypes(t *testing.T) {
// bad type
TestApp(t, "itxn_begin; byte \"pya\"; itxn_field Type; itxn_submit; int 1;", ep, "pya is not a valid Type")
// mixed up the int form for the byte form
- TestApp(t, Obfuscate("itxn_begin; int pay; itxn_field Type; itxn_submit; int 1;"), ep, "Type arg not a byte array")
+ TestApp(t, NoTrack("itxn_begin; int pay; itxn_field Type; itxn_submit; int 1;"), ep, "Type arg not a byte array")
// or vice versa
- TestApp(t, Obfuscate("itxn_begin; byte \"pay\"; itxn_field TypeEnum; itxn_submit; int 1;"), ep, "not a uint64")
+ TestApp(t, NoTrack("itxn_begin; byte \"pay\"; itxn_field TypeEnum; itxn_submit; int 1;"), ep, "not a uint64")
// some bad types
TestApp(t, "itxn_begin; int 42; itxn_field TypeEnum; itxn_submit; int 1;", ep, "42 is not a valid TypeEnum")
@@ -101,7 +101,7 @@ func TestFieldTypes(t *testing.T) {
ep, _, _ := MakeSampleEnv()
TestApp(t, "itxn_begin; byte \"pay\"; itxn_field Sender;", ep, "not an address")
- TestApp(t, Obfuscate("itxn_begin; int 7; itxn_field Receiver;"), ep, "not an address")
+ TestApp(t, NoTrack("itxn_begin; int 7; itxn_field Receiver;"), ep, "not an address")
TestApp(t, "itxn_begin; byte \"\"; itxn_field CloseRemainderTo;", ep, "not an address")
TestApp(t, "itxn_begin; byte \"\"; itxn_field AssetSender;", ep, "not an address")
// can't really tell if it's an addres, so 32 bytes gets further
@@ -111,10 +111,10 @@ func TestFieldTypes(t *testing.T) {
TestApp(t, "itxn_begin; byte \"GAYTEMZUGU3DOOBZGAYTEMZUGU3DOOBZGAYTEMZUGU3DOOBZGAYZIZD42E\"; itxn_field AssetCloseTo;",
ep, "not an address")
- TestApp(t, Obfuscate("itxn_begin; byte \"pay\"; itxn_field Fee;"), ep, "not a uint64")
- TestApp(t, Obfuscate("itxn_begin; byte 0x01; itxn_field Amount;"), ep, "not a uint64")
- TestApp(t, Obfuscate("itxn_begin; byte 0x01; itxn_field XferAsset;"), ep, "not a uint64")
- TestApp(t, Obfuscate("itxn_begin; byte 0x01; itxn_field AssetAmount;"), ep, "not a uint64")
+ TestApp(t, NoTrack("itxn_begin; byte \"pay\"; itxn_field Fee;"), ep, "not a uint64")
+ TestApp(t, NoTrack("itxn_begin; byte 0x01; itxn_field Amount;"), ep, "not a uint64")
+ TestApp(t, NoTrack("itxn_begin; byte 0x01; itxn_field XferAsset;"), ep, "not a uint64")
+ TestApp(t, NoTrack("itxn_begin; byte 0x01; itxn_field AssetAmount;"), ep, "not a uint64")
}
@@ -972,7 +972,7 @@ func TestApplSubmission(t *testing.T) {
// All zeros is v0, so we get a complaint, but that means lengths were ok when set.
TestApp(t, p+a+`int 600; bzero; itxn_field ApprovalProgram;
int 600; bzero; itxn_field ClearStateProgram;`+s, ep,
- "inner app call with version 0")
+ "inner app call with version v0 < v4")
TestApp(t, p+`int 601; bzero; itxn_field ApprovalProgram;
int 600; bzero; itxn_field ClearStateProgram;`+s, ep, "too long")
@@ -981,7 +981,7 @@ func TestApplSubmission(t *testing.T) {
TestApp(t, p+a+`int 1; itxn_field ExtraProgramPages
int 1200; bzero; itxn_field ApprovalProgram;
int 1200; bzero; itxn_field ClearStateProgram;`+s, ep,
- "inner app call with version 0")
+ "inner app call with version v0 < v4")
TestApp(t, p+`int 1; itxn_field ExtraProgramPages
int 1200; bzero; itxn_field ApprovalProgram;
int 1201; bzero; itxn_field ClearStateProgram;`+s, ep, "too long")
@@ -1071,44 +1071,46 @@ func TestCreateOldAppFails(t *testing.T) {
ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
ledger.NewAccount(appAddr(888), 50_000)
- ops := TestProg(t, "int 1", InnerAppsEnabledVersion-1)
- old := "byte 0x" + hex.EncodeToString(ops.Program)
+ three := "byte 0x" + hex.EncodeToString(TestProg(t, "int 1", 3).Program)
TestApp(t, `
itxn_begin
int appl; itxn_field TypeEnum
-`+old+`; itxn_field ApprovalProgram
-`+old+`; itxn_field ClearStateProgram
-int 1; itxn_field GlobalNumUint
-int 2; itxn_field LocalNumByteSlice
-int 3; itxn_field LocalNumUint
+`+three+`; itxn_field ApprovalProgram
+`+three+`; itxn_field ClearStateProgram
itxn_submit
int 1
-`, ep, "inner app call with version 5")
+`, ep, "inner app call with version v3 < v4")
- ops = TestProg(t, "int 1", InnerAppsEnabledVersion)
- recent := "byte 0x" + hex.EncodeToString(ops.Program)
+ four := "byte 0x" + hex.EncodeToString(TestProg(t, "int 1", 4).Program)
TestApp(t, `
itxn_begin
int appl; itxn_field TypeEnum
-`+recent+`; itxn_field ApprovalProgram
-`+recent+`; itxn_field ClearStateProgram
-int 1; itxn_field GlobalNumUint
-int 2; itxn_field LocalNumByteSlice
-int 3; itxn_field LocalNumUint
+`+four+`; itxn_field ApprovalProgram
+`+four+`; itxn_field ClearStateProgram
itxn_submit
int 1
`, ep)
+ // Version synch is only enforced for v6 and up, since it was a new rule when 6 came out.
+ five := "byte 0x" + hex.EncodeToString(TestProg(t, "int 1", 5).Program)
+ six := "byte 0x" + hex.EncodeToString(TestProg(t, "int 1", 6).Program)
+
TestApp(t, `
itxn_begin
int appl; itxn_field TypeEnum
-`+old+`; itxn_field ApprovalProgram
-`+recent+`; itxn_field ClearStateProgram
-int 1; itxn_field GlobalNumUint
-int 2; itxn_field LocalNumByteSlice
-int 3; itxn_field LocalNumUint
+`+four+`; itxn_field ApprovalProgram
+`+five+`; itxn_field ClearStateProgram
+itxn_submit
+int 1
+`, ep)
+
+ TestApp(t, `
+itxn_begin
+int appl; itxn_field TypeEnum
+`+six+`; itxn_field ApprovalProgram
+`+five+`; itxn_field ClearStateProgram
itxn_submit
int 1
`, ep, "program version mismatch")
@@ -1116,11 +1118,8 @@ int 1
TestApp(t, `
itxn_begin
int appl; itxn_field TypeEnum
-`+recent+`; itxn_field ApprovalProgram
-`+old+`; itxn_field ClearStateProgram
-int 1; itxn_field GlobalNumUint
-int 2; itxn_field LocalNumByteSlice
-int 3; itxn_field LocalNumUint
+`+five+`; itxn_field ApprovalProgram
+`+six+`; itxn_field ClearStateProgram
itxn_submit
int 1
`, ep, "program version mismatch")
diff --git a/data/transactions/logic/evalCrypto_test.go b/data/transactions/logic/evalCrypto_test.go
index f38fb85cc..6d7734897 100644
--- a/data/transactions/logic/evalCrypto_test.go
+++ b/data/transactions/logic/evalCrypto_test.go
@@ -173,23 +173,10 @@ ed25519verify_bare`, pkStr), v)
}
}
-// bitIntFillBytes is a replacement for big.Int.FillBytes from future Go
-func bitIntFillBytes(b *big.Int, buf []byte) []byte {
- for i := range buf {
- buf[i] = 0
- }
- bytes := b.Bytes()
- if len(bytes) > len(buf) {
- panic(fmt.Sprintf("bitIntFillBytes: has %d but got %d buffer", len(bytes), len(buf)))
- }
- copy(buf[len(buf)-len(bytes):], bytes)
- return buf
-}
-
func keyToByte(tb testing.TB, b *big.Int) []byte {
k := make([]byte, 32)
require.NotPanics(tb, func() {
- k = bitIntFillBytes(b, k)
+ b.FillBytes(k)
})
return k
}
@@ -381,18 +368,6 @@ ecdsa_verify Secp256k1`, hex.EncodeToString(r), hex.EncodeToString(s), hex.Encod
require.True(t, pass)
}
-// MarshalCompressed converts a point on the curve into the compressed form
-// specified in section 4.3.6 of ANSI X9.62.
-//
-// TODO: replace with elliptic.MarshalCompressed when updating to go 1.15+
-func marshalCompressed(curve elliptic.Curve, x, y *big.Int) []byte {
- byteLen := (curve.Params().BitSize + 7) / 8
- compressed := make([]byte, 1+byteLen)
- compressed[0] = byte(y.Bit(0)) | 2
- bitIntFillBytes(x, compressed[1:])
- return compressed
-}
-
func TestEcdsaWithSecp256r1(t *testing.T) {
if LogicVersion < fidoVersion {
return
@@ -403,7 +378,7 @@ func TestEcdsaWithSecp256r1(t *testing.T) {
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err)
- pk := marshalCompressed(elliptic.P256(), key.X, key.Y)
+ pk := elliptic.MarshalCompressed(elliptic.P256(), key.X, key.Y)
x := keyToByte(t, key.PublicKey.X)
y := keyToByte(t, key.PublicKey.Y)
@@ -437,9 +412,9 @@ byte 0x%s
t.Log("decompressTests i", i)
src := fmt.Sprintf(source, hex.EncodeToString(test.key), hex.EncodeToString(x), hex.EncodeToString(y))
if test.pass {
- testAcceptsWithField(t, src, 5, fidoVersion)
+ testAccepts(t, src, fidoVersion)
} else {
- testPanicsWithField(t, src, 5, fidoVersion)
+ testPanics(t, src, fidoVersion)
}
})
}
@@ -479,9 +454,9 @@ ecdsa_verify Secp256r1
t.Run(fmt.Sprintf("verify/pass=%v", test.pass), func(t *testing.T) {
src := fmt.Sprintf(source, test.data, hex.EncodeToString(test.r), hex.EncodeToString(s), hex.EncodeToString(x), hex.EncodeToString(y))
if test.pass {
- testAcceptsWithField(t, src, 5, fidoVersion)
+ testAccepts(t, src, fidoVersion)
} else {
- testRejectsWithField(t, src, 5, fidoVersion)
+ testRejects(t, src, fidoVersion)
}
})
}
@@ -505,6 +480,8 @@ ecdsa_verify Secp256r1`, hex.EncodeToString(r), hex.EncodeToString(s), hex.Encod
// test compatibility with ethereum signatures
func TestEcdsaEthAddress(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
/*
pip install eth-keys pycryptodome
from eth_keys import keys
@@ -531,19 +508,61 @@ byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr
testAccepts(t, progText, 5)
}
+func TestEcdsaCostVariation(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // Doesn't matter if the actual verify returns true or false. Just confirm the cost depends on curve.
+ source := `
+global ZeroAddress // need 32 bytes
+byte "signature r"
+byte "signature s"
+byte "PK x"
+byte "PK y"
+ecdsa_verify Secp256k1
+!
+assert
+global OpcodeBudget
+int ` + fmt.Sprintf("%d", 20_000-1700-8) + `
+==
+`
+ testAccepts(t, source, 6) // Secp256k1 was 5, but OpcodeBudget is 6
+
+ source = `
+global ZeroAddress // need 32 bytes
+byte "signature r"
+byte "signature s"
+byte "PK x"
+byte "PK y"
+ecdsa_verify Secp256r1
+!
+assert
+global OpcodeBudget
+int ` + fmt.Sprintf("%d", 20_000-2500-8) + `
+==
+`
+ testAccepts(t, source, fidoVersion)
+}
+
func BenchmarkHash(b *testing.B) {
for _, hash := range []string{"sha256", "keccak256", "sha512_256"} {
- b.Run(hash+"-small", func(b *testing.B) { // hash 32 bytes
+ b.Run(hash+"-0w", func(b *testing.B) { // hash 0 bytes
+ benchmarkOperation(b, "", "byte 0x; "+hash+"; pop", "int 1")
+ })
+ b.Run(hash+"-32", func(b *testing.B) { // hash 32 bytes
benchmarkOperation(b, "int 32; bzero", hash, "pop; int 1")
})
- b.Run(hash+"-med", func(b *testing.B) { // hash 128 bytes
+ b.Run(hash+"-128", func(b *testing.B) { // hash 128 bytes
benchmarkOperation(b, "int 32; bzero",
"dup; concat; dup; concat;"+hash, "pop; int 1")
})
- b.Run(hash+"-big", func(b *testing.B) { // hash 512 bytes
+ b.Run(hash+"-512", func(b *testing.B) { // hash 512 bytes
benchmarkOperation(b, "int 32; bzero",
"dup; concat; dup; concat; dup; concat; dup; concat;"+hash, "pop; int 1")
})
+ b.Run(hash+"-4096", func(b *testing.B) { // hash 4k bytes
+ benchmarkOperation(b, "int 32; bzero",
+ "dup; concat; dup; concat; dup; concat; dup; concat; dup; concat; dup; concat; dup; concat;"+hash, "pop; int 1")
+ })
}
}
@@ -636,7 +655,7 @@ func benchmarkEcdsaGenData(b *testing.B, curve EcdsaCurve) (data []benchmarkEcds
if curve == Secp256k1 {
data[i].pk = secp256k1.CompressPubkey(key.PublicKey.X, key.PublicKey.Y)
} else if curve == Secp256r1 {
- data[i].pk = marshalCompressed(elliptic.P256(), key.PublicKey.X, key.PublicKey.Y)
+ data[i].pk = elliptic.MarshalCompressed(elliptic.P256(), key.PublicKey.X, key.PublicKey.Y)
}
d := []byte("testdata")
@@ -664,10 +683,9 @@ func benchmarkEcdsa(b *testing.B, source string, curve EcdsaCurve) {
if curve == Secp256k1 {
version = 5
} else if curve == Secp256r1 {
- version = 6
+ version = fidoVersion
}
- ops, err := AssembleStringWithVersion(source, version)
- require.NoError(b, err)
+ ops := testProg(b, source, version)
for i := 0; i < b.N; i++ {
data[i].programs = ops.Program
}
@@ -707,7 +725,7 @@ ecdsa_verify Secp256k1`
if LogicVersion >= fidoVersion {
b.Run("ecdsa_verify secp256r1", func(b *testing.B) {
source := `#pragma version ` + strconv.Itoa(fidoVersion) + `
- arg 0d
+ arg 0
arg 1
arg 2
arg 3
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index 3d48c5805..730978fd8 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -191,8 +191,8 @@ bytec_0
log
`
tests := map[runMode]string{
- runModeSignature: opcodesRunModeAny + opcodesRunModeSignature,
- runModeApplication: opcodesRunModeAny + opcodesRunModeApplication,
+ modeSig: opcodesRunModeAny + opcodesRunModeSignature,
+ modeApp: opcodesRunModeAny + opcodesRunModeApplication,
}
ep, tx, ledger := makeSampleEnv()
@@ -227,7 +227,7 @@ log
t.Run(fmt.Sprintf("opcodes_mode=%d", mode), func(t *testing.T) {
ep.TxnGroup[0].Txn.ApplicationID = 100
ep.TxnGroup[0].Txn.ForeignAssets = []basics.AssetIndex{5} // needed since v4
- if mode == runModeSignature {
+ if mode == modeSig {
testLogic(t, test, AssemblerMaxVersion, ep)
} else {
testApp(t, test, ep)
@@ -237,9 +237,8 @@ log
// check err opcode work in both modes
source := "err"
- testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil), "encountered err")
- testApp(t, source, defaultEvalParams(nil), "encountered err")
- // require.NotContains(t, err.Error(), "not allowed in current mode")
+ testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil), "err opcode executed")
+ testApp(t, source, defaultEvalParams(nil), "err opcode executed")
// check that ed25519verify and arg is not allowed in stateful mode between v2-v4
disallowedV4 := []string{
@@ -293,9 +292,9 @@ log
"not allowed in current mode", "not allowed in current mode")
}
- require.Equal(t, runMode(1), runModeSignature)
- require.Equal(t, runMode(2), runModeApplication)
- require.True(t, modeAny == runModeSignature|runModeApplication)
+ require.Equal(t, runMode(1), modeSig)
+ require.Equal(t, runMode(2), modeApp)
+ require.True(t, modeAny == modeSig|modeApp)
require.True(t, modeAny.Any())
}
@@ -905,14 +904,14 @@ func TestAssets(t *testing.T) {
}
func testAssetsByVersion(t *testing.T, assetsTestProgram string, version uint64) {
- for _, field := range AssetHoldingFieldNames {
- fs := AssetHoldingFieldSpecByName[field]
+ for _, field := range assetHoldingFieldNames {
+ fs := assetHoldingFieldSpecByName[field]
if fs.version <= version && !strings.Contains(assetsTestProgram, field) {
t.Errorf("TestAssets missing field %v", field)
}
}
- for _, field := range AssetParamsFieldNames {
- fs := AssetParamsFieldSpecByName[field]
+ for _, field := range assetParamsFieldNames {
+ fs := assetParamsFieldSpecByName[field]
if fs.version <= version && !strings.Contains(assetsTestProgram, field) {
t.Errorf("TestAssets missing field %v", field)
}
@@ -1093,7 +1092,7 @@ intc_1
`
params.URL = ""
ledger.NewAsset(txn.Txn.Sender, 55, params)
- testApp(t, source, now, "cannot compare ([]byte to uint64)")
+ testApp(t, notrack(source), now, "cannot compare ([]byte to uint64)")
}
func TestAppParams(t *testing.T) {
@@ -2197,12 +2196,12 @@ func TestEnumFieldErrors(t *testing.T) {
partitiontest.PartitionTest(t)
source := `txn Amount`
- origSpec := txnFieldSpecByField[Amount]
+ origSpec := txnFieldSpecs[Amount]
changed := origSpec
changed.ftype = StackBytes
- txnFieldSpecByField[Amount] = changed
+ txnFieldSpecs[Amount] = changed
defer func() {
- txnFieldSpecByField[Amount] = origSpec
+ txnFieldSpecs[Amount] = origSpec
}()
testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil), "Amount expected field type is []byte but got uint64")
@@ -2210,12 +2209,12 @@ func TestEnumFieldErrors(t *testing.T) {
source = `global MinTxnFee`
- origMinTxnFs := globalFieldSpecByField[MinTxnFee]
+ origMinTxnFs := globalFieldSpecs[MinTxnFee]
badMinTxnFs := origMinTxnFs
badMinTxnFs.ftype = StackBytes
- globalFieldSpecByField[MinTxnFee] = badMinTxnFs
+ globalFieldSpecs[MinTxnFee] = badMinTxnFs
defer func() {
- globalFieldSpecByField[MinTxnFee] = origMinTxnFs
+ globalFieldSpecs[MinTxnFee] = origMinTxnFs
}()
testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil), "MinTxnFee expected field type is []byte but got uint64")
@@ -2242,12 +2241,12 @@ int 55
asset_holding_get AssetBalance
assert
`
- origBalanceFs := assetHoldingFieldSpecByField[AssetBalance]
+ origBalanceFs := assetHoldingFieldSpecs[AssetBalance]
badBalanceFs := origBalanceFs
badBalanceFs.ftype = StackBytes
- assetHoldingFieldSpecByField[AssetBalance] = badBalanceFs
+ assetHoldingFieldSpecs[AssetBalance] = badBalanceFs
defer func() {
- assetHoldingFieldSpecByField[AssetBalance] = origBalanceFs
+ assetHoldingFieldSpecs[AssetBalance] = origBalanceFs
}()
testApp(t, source, ep, "AssetBalance expected field type is []byte but got uint64")
@@ -2256,12 +2255,12 @@ assert
asset_params_get AssetTotal
assert
`
- origTotalFs := assetParamsFieldSpecByField[AssetTotal]
+ origTotalFs := assetParamsFieldSpecs[AssetTotal]
badTotalFs := origTotalFs
badTotalFs.ftype = StackBytes
- assetParamsFieldSpecByField[AssetTotal] = badTotalFs
+ assetParamsFieldSpecs[AssetTotal] = badTotalFs
defer func() {
- assetParamsFieldSpecByField[AssetTotal] = origTotalFs
+ assetParamsFieldSpecs[AssetTotal] = origTotalFs
}()
testApp(t, source, ep, "AssetTotal expected field type is []byte but got uint64")
@@ -2279,11 +2278,6 @@ func TestReturnTypes(t *testing.T) {
}
ep, tx, ledger := makeSampleEnv()
- // This unit test reususes this `ep` willy-nilly. Would be nice to rewrite,
- // but for now, trun off budget pooling so that it doesn't get exhausted.
- ep.Proto.EnableAppCostPooling = false
- ep.PooledApplicationBudget = nil
-
tx.Type = protocol.ApplicationCallTx
tx.ApplicationID = 1
tx.ForeignApps = []basics.AppIndex{tx.ApplicationID}
@@ -2322,22 +2316,22 @@ func TestReturnTypes(t *testing.T) {
ledger.NewLocal(tx.Receiver, 1, string(key), algoValue)
ledger.NewAccount(appAddr(1), 1000000)
+ // We try to form a snippet that will test every opcode, by sandwiching it
+ // between arguments that correspond to the opcodes input types, and then
+ // check to see if the proper output types end up on the stack. But many
+ // opcodes require more specific inputs than a constant string or the number
+ // 1 for ints. Defaults are also supplied for immediate arguments. For
+ // opcodes that need to set up their own stack inputs, a ": at the front of
+ // the string means "start with an empty stack".
specialCmd := map[string]string{
"txn": "txn Sender",
"txna": "txna ApplicationArgs 0",
"gtxn": "gtxn 0 Sender",
"gtxna": "gtxna 0 ApplicationArgs 0",
"global": "global MinTxnFee",
- "arg": "arg 0",
- "load": "load 0",
- "store": "store 0",
- "gload": "gload 0 0",
- "gloads": "gloads 0",
- "gloadss": "pop; pop; int 0; int 1; gloadss", // Needs txn index = 0 to work
- "gaid": "gaid 0",
- "dig": "dig 0",
- "cover": "cover 0",
- "uncover": "uncover 0",
+ "gaids": ": int 0; gaids",
+ "gloads": ": int 0; gloads 0", // Needs txn index = 0 to work
+ "gloadss": ": int 0; int 1; gloadss", // Needs txn index = 0 to work
"intc": "intcblock 0; intc 0",
"intc_0": "intcblock 0; intc_0",
"intc_1": "intcblock 0 0; intc_1",
@@ -2349,28 +2343,32 @@ func TestReturnTypes(t *testing.T) {
"bytec_2": "bytecblock 0x32 0x33 0x34; bytec_2",
"bytec_3": "bytecblock 0x32 0x33 0x34 0x35; bytec_3",
"substring": "substring 0 2",
- "asset_params_get": "asset_params_get AssetTotal",
+ "extract_uint32": ": byte 0x0102030405; int 1; extract_uint32",
+ "extract_uint64": ": byte 0x010203040506070809; int 1; extract_uint64",
+ "asset_params_get": "asset_params_get AssetUnitName",
"asset_holding_get": "asset_holding_get AssetBalance",
"gtxns": "gtxns Sender",
- "gtxnsa": "pop; int 0; gtxnsa ApplicationArgs 0",
- "pushint": "pushint 7272",
- "pushbytes": `pushbytes "jojogoodgorilla"`,
+ "gtxnsa": ": int 0; gtxnsa ApplicationArgs 0",
"app_params_get": "app_params_get AppGlobalNumUint",
"acct_params_get": "acct_params_get AcctMinBalance",
"extract": "extract 0 2",
"txnas": "txnas ApplicationArgs",
"gtxnas": "gtxnas 0 ApplicationArgs",
- "gtxnsas": "pop; pop; int 0; int 0; gtxnsas ApplicationArgs",
- "divw": "pop; pop; pop; int 1; int 2; int 3; divw",
- "args": "args",
- "itxn": "itxn_begin; int pay; itxn_field TypeEnum; itxn_submit; itxn CreatedAssetID",
- "itxna": "itxn_begin; int pay; itxn_field TypeEnum; itxn_submit; itxna Accounts 0",
- "itxnas": "itxn_begin; int pay; itxn_field TypeEnum; itxn_submit; itxnas Accounts",
- "gitxn": "itxn_begin; int pay; itxn_field TypeEnum; itxn_submit; gitxn 0 Sender",
- "gitxna": "itxn_begin; int pay; itxn_field TypeEnum; itxn_submit; gitxna 0 Accounts 0",
- "gitxnas": "itxn_begin; int pay; itxn_field TypeEnum; itxn_submit; gitxnas 0 Accounts",
- "base64_decode": `pushbytes "YWJjMTIzIT8kKiYoKSctPUB+"; base64_decode StdEncoding; pushbytes "abc123!?$*&()'-=@~"; ==; pushbytes "YWJjMTIzIT8kKiYoKSctPUB-"; base64_decode URLEncoding; pushbytes "abc123!?$*&()'-=@~"; ==; &&; assert`,
- "json_ref": "json_ref JSONUint64",
+ "gtxnsas": ": int 0; int 0; gtxnsas ApplicationArgs",
+ "divw": ": int 1; int 2; int 3; divw",
+
+ "itxn_field": "itxn_begin; itxn_field TypeEnum",
+ "itxn_next": "itxn_begin; int pay; itxn_field TypeEnum; itxn_next",
+ "itxn_submit": "itxn_begin; int pay; itxn_field TypeEnum; itxn_submit",
+ "itxn": "itxn_begin; int pay; itxn_field TypeEnum; itxn_submit; itxn CreatedAssetID",
+ "itxna": "itxn_begin; int pay; itxn_field TypeEnum; itxn_submit; itxna Accounts 0",
+ "itxnas": ": itxn_begin; int pay; itxn_field TypeEnum; itxn_submit; int 0; itxnas Accounts",
+ "gitxn": "itxn_begin; int pay; itxn_field TypeEnum; itxn_submit; gitxn 0 Sender",
+ "gitxna": "itxn_begin; int pay; itxn_field TypeEnum; itxn_submit; gitxna 0 Accounts 0",
+ "gitxnas": ": itxn_begin; int pay; itxn_field TypeEnum; itxn_submit; int 0; gitxnas 0 Accounts",
+
+ "base64_decode": `: byte "YWJjMTIzIT8kKiYoKSctPUB+"; base64_decode StdEncoding`,
+ "json_ref": `: byte "{\"k\": 7}"; byte "k"; json_ref JSONUint64`,
}
/* Make sure the specialCmd tests the opcode in question */
@@ -2378,8 +2376,13 @@ func TestReturnTypes(t *testing.T) {
assert.Contains(t, cmd, opcode)
}
- // these require special input data and tested separately
+ // these have strange stack semantics or require special input data /
+ // context, so they must be tested separately
skipCmd := map[string]bool{
+ "retsub": true,
+ "err": true,
+ "return": true,
+
"ed25519verify": true,
"ed25519verify_bare": true,
"ecdsa_verify": true,
@@ -2388,32 +2391,62 @@ func TestReturnTypes(t *testing.T) {
}
byName := OpsByName[LogicVersion]
- for _, m := range []runMode{runModeSignature, runModeApplication} {
- t.Run(fmt.Sprintf("m=%s", m.String()), func(t *testing.T) {
- for name, spec := range byName {
- if len(spec.Returns) == 0 || (m&spec.Modes) == 0 || skipCmd[name] {
- continue
+ for _, m := range []runMode{modeSig, modeApp} {
+ for name, spec := range byName {
+ // Only try an opcode in its modes
+ if (m & spec.Modes) == 0 {
+ continue
+ }
+ if skipCmd[name] {
+ continue
+ }
+ t.Run(fmt.Sprintf("mode=%s,opcode=%s", m, name), func(t *testing.T) {
+ provideStackInput := true
+ cmd := name
+ if special, ok := specialCmd[name]; ok {
+ if strings.HasPrefix(special, ":") {
+ cmd = special[1:]
+ provideStackInput = false
+ } else {
+ cmd = special
+ }
+ } else {
+ for _, imm := range spec.OpDetails.Immediates {
+ switch imm.kind {
+ case immByte:
+ cmd += " 0"
+ case immInt:
+ cmd += " 10"
+ case immInts:
+ cmd += " 11 12 13"
+ case immBytes:
+ cmd += " 0x123456"
+ case immBytess:
+ cmd += " 0x12 0x34 0x56"
+ case immLabel:
+ cmd += " done; done: ;"
+ default:
+ require.Fail(t, "bad immediate", "%s", imm)
+ }
+ }
}
var sb strings.Builder
- sb.Grow(64)
- for _, t := range spec.Args {
- sb.WriteString(typeToArg[t])
- }
- if cmd, ok := specialCmd[name]; ok {
- sb.WriteString(cmd + "\n")
- } else {
- sb.WriteString(name + "\n")
+ if provideStackInput {
+ for _, t := range spec.Arg.Types {
+ sb.WriteString(typeToArg[t])
+ }
}
- source := sb.String()
- ops := testProg(t, source, AssemblerMaxVersion)
+ sb.WriteString(cmd + "\n")
+ ops := testProg(t, sb.String(), AssemblerMaxVersion)
+
+ ep.reset() // for Trace and budget isolation
+ ep.pastScratch[0] = &scratchSpace{} // for gload
- // Setup as if evaluation is in tx1, since we want to test gaid
- // that must look back.
cx := EvalContext{
EvalParams: ep,
runModeFlags: m,
- GroupIndex: 1,
- Txn: &ep.TxnGroup[1],
+ groupIndex: 1,
+ txn: &ep.TxnGroup[1],
appID: 1,
}
@@ -2421,27 +2454,31 @@ func TestReturnTypes(t *testing.T) {
// This convinces them all to work. Revisit.
cx.TxnGroup[0].ConfigAsset = 100
- eval(ops.Program, &cx)
-
- assert.Equal(
- t,
- len(spec.Returns), len(cx.stack),
- fmt.Sprintf("\n%s%s expected to return %d values but stack is %#v", ep.Trace, spec.Name, len(spec.Returns), cx.stack),
- )
- for i := 0; i < len(spec.Returns); i++ {
- sp := len(cx.stack) - 1 - i
- if sp < 0 {
- continue // We only assert this above, not require.
+ // These little programs need not pass. Since the returned stack
+ // is checked for typing, we can't get hung up on whether it is
+ // exactly one positive int. But if it fails for any *other*
+ // reason, we're not doing a good test.
+ _, err := eval(ops.Program, &cx)
+ if err != nil {
+ // Allow the kinds of errors we expect, but fail for stuff
+ // that indicates the opcode itself failed.
+ reason := err.Error()
+ if reason != "stack finished with bytes not int" &&
+ !strings.HasPrefix(reason, "stack len is") {
+ require.NoError(t, err, "%s: %s\n%s", name, err, ep.Trace)
}
- stackType := cx.stack[sp].argType()
- retType := spec.Returns[i]
- assert.True(
+ }
+ require.Len(t, cx.stack, len(spec.Return.Types), "%s", ep.Trace)
+ for i := 0; i < len(spec.Return.Types); i++ {
+ stackType := cx.stack[i].argType()
+ retType := spec.Return.Types[i]
+ require.True(
t, typecheck(retType, stackType),
- fmt.Sprintf("%s expected to return %s but actual is %s", spec.Name, retType.String(), stackType.String()),
+ "%s expected to return %s but actual is %s", spec.Name, retType, stackType,
)
}
- }
- })
+ })
+ }
}
}
@@ -2539,6 +2576,8 @@ func appAddr(id int) basics.Address {
}
func TestAppInfo(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
ep, tx, ledger := makeSampleEnv()
require.Equal(t, 888, int(tx.ApplicationID))
ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
@@ -2557,6 +2596,8 @@ func TestAppInfo(t *testing.T) {
}
func TestBudget(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
ep := defaultEvalParams(nil)
source := `
global OpcodeBudget
@@ -2571,6 +2612,8 @@ int 695
}
func TestSelfMutate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
ep, _, ledger := makeSampleEnv()
/* In order to test the added protection of mutableAccountReference, we're
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index fc6cf1cbc..df4f43856 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -98,6 +98,8 @@ func makeTestProtoV(version uint64) *config.ConsensusParams {
EnableAppCostPooling: true,
EnableInnerTransactionPooling: true,
+ MinInnerApplVersion: 4,
+
SupportBecomeNonParticipatingTransactions: true,
}
}
@@ -455,7 +457,11 @@ int 1
+
`, 1)
- testAccepts(t, `
+ // This code accepts if run, but the assembler will complain because the
+ // "straightline" path has a typing error. That path is not taken because
+ // of the specific values used, so there is no runtime error. You could
+ // assemble this with "#pragma typetrack false", and it would accept.
+ code := `
int 1
int 2
int 1
@@ -470,7 +476,9 @@ planb:
after:
dup
pop
-`, 1)
+`
+ testProg(t, code, LogicVersion, Expect{12, "+ expects 2 stack arguments..."})
+ testAccepts(t, notrack(code), 1)
}
func TestV2Branches(t *testing.T) {
@@ -593,6 +601,7 @@ func TestDivw(t *testing.T) {
func TestUint128(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
x := uint128(0, 3)
require.Equal(t, x.String(), "3")
x = uint128(0, 0)
@@ -653,6 +662,7 @@ func TestDivModw(t *testing.T) {
func TestWideMath(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
// 2^64 = 18446744073709551616, we use a bunch of numbers close to that below
pattern := `
int %d
@@ -697,11 +707,14 @@ int 1
}
func TestMulDiv(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
// Demonstrate a "function" that expects three u64s on stack,
// and calculates B*C/A. (Following opcode documentation
// convention, C is top-of-stack, B is below it, and A is
// below B.
+ t.Parallel()
muldiv := `
muldiv:
mulw // multiply B*C. puts TWO u64s on stack
@@ -1089,7 +1102,7 @@ func TestOnCompletionConstants(t *testing.T) {
}
require.Less(t, last, max, "too many OnCompletion constants, adjust max limit")
require.Equal(t, int(invalidOnCompletionConst), last)
- require.Equal(t, len(onCompletionConstToUint64), len(onCompletionDescriptions))
+ require.Equal(t, len(onCompletionMap), len(onCompletionDescriptions))
require.Equal(t, len(OnCompletionNames), last)
for v := NoOp; v < invalidOnCompletionConst; v++ {
require.Equal(t, v.String(), OnCompletionNames[int(v)])
@@ -1099,8 +1112,8 @@ func TestOnCompletionConstants(t *testing.T) {
for i := 0; i < last; i++ {
oc := OnCompletionConstType(i)
symbol := oc.String()
- require.Contains(t, onCompletionConstToUint64, symbol)
- require.Equal(t, uint64(i), onCompletionConstToUint64[symbol])
+ require.Contains(t, onCompletionMap, symbol)
+ require.Equal(t, uint64(i), onCompletionMap[symbol])
t.Run(symbol, func(t *testing.T) {
testAccepts(t, fmt.Sprintf("int %s; int %s; ==;", symbol, oc), 1)
})
@@ -1543,7 +1556,7 @@ func TestTxn(t *testing.T) {
}
for i, txnField := range TxnFieldNames {
- fs := txnFieldSpecByField[TxnField(i)]
+ fs := txnFieldSpecs[i]
// Ensure that each field appears, starting in the version it was introduced
for v := uint64(1); v <= uint64(LogicVersion); v++ {
if v < fs.version {
@@ -2477,7 +2490,7 @@ int 1`,
Type: protocol.PaymentTx,
},
},
- runMode: runModeApplication,
+ runMode: modeApp,
errContains: "can't use gload on non-app call txn with index 0",
}
@@ -2487,7 +2500,7 @@ int 1`,
Type: protocol.ApplicationCallTx,
},
},
- runMode: runModeSignature,
+ runMode: modeSig,
errContains: "gload not allowed in current mode",
}
@@ -2508,7 +2521,7 @@ int 1`,
}
switch failCase.runMode {
- case runModeApplication:
+ case modeApp:
testAppBytes(t, program, ep, failCase.errContains)
default:
testLogicBytes(t, program, ep, failCase.errContains, failCase.errContains)
@@ -2839,7 +2852,7 @@ func TestShortBytecblock2(t *testing.T) {
const panicString = "out of memory, buffer overrun, stack overflow, divide by zero, halt and catch fire"
-func opPanic(cx *EvalContext) {
+func opPanic(cx *EvalContext) error {
panic(panicString)
}
func checkPanic(cx *EvalContext) error {
@@ -2855,13 +2868,16 @@ func TestPanic(t *testing.T) {
ops := testProg(t, `int 1`, v)
var hackedOpcode int
var oldSpec OpSpec
+ // Find an unused opcode to temporarily convert to a panicing opcde,
+ // and append it to program.
for opcode, spec := range opsByOpcode[v] {
if spec.op == nil {
hackedOpcode = opcode
oldSpec = spec
opsByOpcode[v][opcode].op = opPanic
opsByOpcode[v][opcode].Modes = modeAny
- opsByOpcode[v][opcode].Details.checkFunc = checkPanic
+ opsByOpcode[v][opcode].OpDetails.FullCost.baseCost = 1
+ opsByOpcode[v][opcode].OpDetails.check = checkPanic
ops.Program = append(ops.Program, byte(opcode))
break
}
@@ -2905,7 +2921,7 @@ func TestProgramTooNew(t *testing.T) {
t.Parallel()
var program [12]byte
- vlen := binary.PutUvarint(program[:], EvalMaxVersion+1)
+ vlen := binary.PutUvarint(program[:], evalMaxVersion+1)
testLogicBytes(t, program[:vlen], defaultEvalParams(nil),
"greater than max supported", "greater than max supported")
}
@@ -2924,10 +2940,10 @@ func TestProgramProtoForbidden(t *testing.T) {
t.Parallel()
var program [12]byte
- vlen := binary.PutUvarint(program[:], EvalMaxVersion)
+ vlen := binary.PutUvarint(program[:], evalMaxVersion)
ep := defaultEvalParams(nil)
ep.Proto = &config.ConsensusParams{
- LogicSigVersion: EvalMaxVersion - 1,
+ LogicSigVersion: evalMaxVersion - 1,
}
testLogicBytes(t, program[:vlen], ep, "greater than protocol", "greater than protocol")
}
@@ -2983,7 +2999,7 @@ int 1`, v)
require.Equal(t, ops.Program, canonicalProgramBytes)
ops.Program[7] = 200 // clobber the branch offset to be beyond the end of the program
testLogicBytes(t, ops.Program, defaultEvalParams(nil),
- "beyond end of program", "beyond end of program")
+ "outside of program", "outside of program")
})
}
}
@@ -3006,7 +3022,7 @@ int 1`, v)
require.NoError(t, err)
require.Equal(t, ops.Program, canonicalProgramBytes)
ops.Program[6] = 0x70 // clobber hi byte of branch offset
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "beyond", "beyond")
+ testLogicBytes(t, ops.Program, defaultEvalParams(nil), "outside", "outside")
})
}
branches := []string{
@@ -3027,7 +3043,7 @@ intc_1
ops.Program[7] = 0xf0 // clobber the branch offset - highly negative
ops.Program[8] = 0xff // clobber the branch offset
testLogicBytes(t, ops.Program, defaultEvalParams(nil),
- "branch target beyond", "branch target beyond")
+ "outside of program", "outside of program")
})
}
}
@@ -3337,9 +3353,9 @@ func benchmarkBasicProgram(b *testing.B, source string) {
}
// Rather than run b.N times, build a program that runs the operation
-// 2000 times, and does so for b.N / 2000 tuns. This lets us amortize
+// 2000 times, and does so for b.N / 2000 runs. This lets us amortize
// away the creation and teardown of the evaluation system. We report
-// the "waste/op" as the number of extra instructions that are run
+// the "extra/op" as the number of extra instructions that are run
// during the "operation". They are presumed to be fast (15/ns), so
// the idea is that you can subtract that out from the reported speed
func benchmarkOperation(b *testing.B, prefix string, operation string, suffix string) {
@@ -3349,11 +3365,12 @@ func benchmarkOperation(b *testing.B, prefix string, operation string, suffix st
source = strings.ReplaceAll(source, ";", "\n")
ops := testProg(b, source, AssemblerMaxVersion)
evalLoop(b, runs, ops.Program)
- b.ReportMetric(float64(inst)*15.0, "waste/op")
+ b.ReportMetric(float64(inst), "extra/op")
}
func BenchmarkUintMath(b *testing.B) {
benches := [][]string{
+ {"dup", "int 23423", "dup; pop", ""},
{"pop1", "", "int 1234576; pop", "int 1"},
{"pop", "", "int 1234576; int 6712; pop; pop", "int 1"},
{"add", "", "int 1234576; int 6712; +; pop", "int 1"},
@@ -3370,6 +3387,7 @@ func BenchmarkUintMath(b *testing.B) {
}
for _, bench := range benches {
b.Run(bench[0], func(b *testing.B) {
+ b.ReportAllocs()
benchmarkOperation(b, bench[1], bench[2], bench[3])
})
}
@@ -3383,11 +3401,11 @@ func BenchmarkUintCmp(b *testing.B) {
})
}
}
-func BenchmarkBigLogic(b *testing.B) {
+func BenchmarkByteLogic(b *testing.B) {
benches := [][]string{
- {"b&", "byte 0x01234576", "byte 0x01ffffffffffffff; b&", "pop; int 1"},
- {"b|", "byte 0x0ffff1234576", "byte 0x1202; b|", "pop; int 1"},
- {"b^", "byte 0x01234576", "byte 0x0223627389; b^", "pop; int 1"},
+ {"b&", "", "byte 0x012345678901feab; byte 0x01ffffffffffffff; b&; pop", "int 1"},
+ {"b|", "", "byte 0x0ffff1234576abef; byte 0x1202120212021202; b|; pop", "int 1"},
+ {"b^", "", "byte 0x0ffff1234576abef; byte 0x1202120212021202; b^; pop", "int 1"},
{"b~", "byte 0x0123457673624736", "b~", "pop; int 1"},
{"b&big",
@@ -3398,7 +3416,7 @@ func BenchmarkBigLogic(b *testing.B) {
"byte 0x0123457601234576012345760123457601234576012345760123457601234576",
"byte 0xffffff01ffffffffffffff01234576012345760123457601234576; b|",
"pop; int 1"},
- {"b^big", "", // u256*u256
+ {"b^big", "", // u256^u256
`byte 0x123457601234576012345760123457601234576012345760123457601234576a
byte 0xf123457601234576012345760123457601234576012345760123457601234576; b^; pop`,
"int 1"},
@@ -3408,12 +3426,13 @@ func BenchmarkBigLogic(b *testing.B) {
}
for _, bench := range benches {
b.Run(bench[0], func(b *testing.B) {
+ b.ReportAllocs()
benchmarkOperation(b, bench[1], bench[2], bench[3])
})
}
}
-func BenchmarkBigMath(b *testing.B) {
+func BenchmarkByteMath(b *testing.B) {
benches := [][]string{
{"bpop", "", "byte 0x01ffffffffffffff; pop", "int 1"},
@@ -3464,18 +3483,13 @@ func BenchmarkBase64Decode(b *testing.B) {
bigStd := strings.Repeat(medStd, 4)
bigURL := strings.Repeat(medURL, 4)
- tags := []string{"small", "medium", "large"}
- stds := []string{smallStd, medStd, bigStd}
- urls := []string{smallURL, medURL, bigURL}
+ tags := []string{"0", "64", "1024", "4096"}
+ stds := []string{"", smallStd, medStd, bigStd}
+ urls := []string{"", smallURL, medURL, bigURL}
ops := []string{
- "",
- "len",
+ "int 1; int 2; +; pop",
"b~",
"int 1; pop",
- "keccak256",
- "sha256",
- "sha512_256",
- "sha3_256",
"base64_decode StdEncoding",
"base64_decode URLEncoding",
}
@@ -3548,24 +3562,6 @@ func BenchmarkCheckx5(b *testing.B) {
}
}
-func TestStackValues(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- t.Parallel()
-
- actual := oneInt.plus(oneInt)
- require.Equal(t, twoInts, actual)
-
- actual = oneInt.plus(oneAny)
- require.Equal(t, StackTypes{StackUint64, StackAny}, actual)
-
- actual = twoInts.plus(oneBytes)
- require.Equal(t, StackTypes{StackUint64, StackUint64, StackBytes}, actual)
-
- actual = oneInt.plus(oneBytes).plus(oneAny)
- require.Equal(t, StackTypes{StackUint64, StackBytes, StackAny}, actual)
-}
-
func TestEvalVersions(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -3600,7 +3596,7 @@ func TestStackOverflow(t *testing.T) {
t.Parallel()
source := "int 1; int 2;"
- for i := 1; i < MaxStackDepth/2; i++ {
+ for i := 1; i < maxStackDepth/2; i++ {
source += "dup2;"
}
testAccepts(t, source+"return", 2)
@@ -3692,6 +3688,7 @@ func TestArgType(t *testing.T) {
func TestApplicationsDisallowOldTeal(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
const source = "int 1"
txn := makeSampleTxn()
@@ -3711,6 +3708,7 @@ func TestApplicationsDisallowOldTeal(t *testing.T) {
func TestAnyRekeyToOrApplicationRaisesMinTealVersion(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
const source = "int 1"
// Construct a group of two payments, no rekeying
@@ -3753,7 +3751,7 @@ func TestAnyRekeyToOrApplicationRaisesMinTealVersion(t *testing.T) {
ep.TxnGroup = transactions.WrapSignedTxnsWithAD(cse.group)
// Computed MinTealVersion should be == validFromVersion
- calc := ComputeMinTealVersion(ep.TxnGroup, false)
+ calc := ComputeMinTealVersion(ep.TxnGroup)
require.Equal(t, calc, cse.validFromVersion)
// Should fail for all versions < validFromVersion
@@ -3889,10 +3887,23 @@ func TestAllowedOpcodesV3(t *testing.T) {
require.Len(t, tests, cnt)
}
-func TestRekeyFailsOnOldVersion(t *testing.T) {
+// TestLinearOpcodes ensures we don't have a linear cost opcode (which
+// inherently requires a dynamic cost model) before backBranchEnabledVersion,
+// which introduced our dynamic model.
+func TestLinearOpcodes(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
+ for _, spec := range OpSpecs {
+ if spec.Version < backBranchEnabledVersion {
+ require.Zero(t, spec.OpDetails.FullCost.chunkCost, spec)
+ }
+ }
+}
+func TestRekeyFailsOnOldVersion(t *testing.T) {
+ partitiontest.PartitionTest(t)
t.Parallel()
+
for v := uint64(0); v < rekeyingEnabledVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, `int 1`, v)
@@ -3905,40 +3916,29 @@ func TestRekeyFailsOnOldVersion(t *testing.T) {
}
}
-func obfuscate(program string) string {
+func notrack(program string) string {
// Put a prefix on the program that does nothing interesting,
// but prevents assembly from detecting type errors. Allows
// evaluation testing of a program that would be rejected by
// assembler.
- if strings.Contains(program, "obfuscate") {
+ pragma := "#pragma typetrack false\n"
+ if strings.Contains(program, pragma) {
return program // Already done. Tests sometimes use at multiple levels
}
- return "int 0;bnz obfuscate;obfuscate:;" + program
+ return pragma + program
}
type evalTester func(pass bool, err error) bool
-func testEvaluation(t *testing.T, program string, introduced uint64, tester evalTester, xtras ...uint64) error {
+func testEvaluation(t *testing.T, program string, introduced uint64, tester evalTester) error {
t.Helper()
- numXtras := len(xtras)
- require.LessOrEqual(t, numXtras, 1, "can handle at most 1 extra parameter but provided %d", numXtras)
- withField := false
- var introducedField uint64
- if numXtras == 1 {
- withField = true
- introducedField = xtras[0]
- }
-
var outer error
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
t.Helper()
if v < introduced {
- testProg(t, obfuscate(program), v, Expect{0, "...was introduced..."})
- return
- } else if withField && v < introducedField {
- testProg(t, obfuscate(program), v, Expect{0, "...available in version..."})
+ testProg(t, notrack(program), v, Expect{0, "...was introduced..."})
return
}
ops := testProg(t, program, v)
@@ -3989,19 +3989,6 @@ func testRejects(t *testing.T, program string, introduced uint64) {
return !pass && err == nil
})
}
-func testRejectsWithField(t *testing.T, program string, introducedOpcode, introducedField uint64) {
- t.Helper()
- testEvaluation(t, program, introducedOpcode, func(pass bool, err error) bool {
- // Returned False, but didn't panic
- return !pass && err == nil
- }, introducedField)
-}
-func testAcceptsWithField(t *testing.T, program string, introducedOpcode, introducedField uint64) {
- t.Helper()
- testEvaluation(t, program, introducedOpcode, func(pass bool, err error) bool {
- return pass && err == nil
- }, introducedField)
-}
func testPanics(t *testing.T, program string, introduced uint64) error {
t.Helper()
return testEvaluation(t, program, introduced, func(pass bool, err error) bool {
@@ -4009,13 +3996,6 @@ func testPanics(t *testing.T, program string, introduced uint64) error {
return !pass && err != nil
})
}
-func testPanicsWithField(t *testing.T, program string, introducedOpcode, introducedField uint64) error {
- t.Helper()
- return testEvaluation(t, program, introducedOpcode, func(pass bool, err error) bool {
- // TEAL panic! not just reject at exit
- return !pass && err != nil
- }, introducedField)
-}
func TestAssert(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -4024,8 +4004,8 @@ func TestAssert(t *testing.T) {
testAccepts(t, "int 1;assert;int 1", 3)
testRejects(t, "int 1;assert;int 0", 3)
testPanics(t, "int 0;assert;int 1", 3)
- testPanics(t, obfuscate("assert;int 1"), 3)
- testPanics(t, obfuscate(`byte "john";assert;int 1`), 3)
+ testPanics(t, notrack("assert;int 1"), 3)
+ testPanics(t, notrack(`byte "john";assert;int 1`), 3)
}
func TestBits(t *testing.T) {
@@ -4110,7 +4090,7 @@ func TestSwap(t *testing.T) {
t.Parallel()
testAccepts(t, "int 1; byte 0x1234; swap; int 1; ==; assert; byte 0x1234; ==", 3)
- testPanics(t, obfuscate("int 1; swap; int 1; return"), 3)
+ testPanics(t, notrack("int 1; swap; int 1; return"), 3)
}
func TestSelect(t *testing.T) {
@@ -4130,7 +4110,7 @@ func TestDig(t *testing.T) {
t.Parallel()
testAccepts(t, "int 3; int 2; int 1; dig 1; int 2; ==; return", 3)
- testPanics(t, obfuscate("int 3; int 2; int 1; dig 11; int 2; ==; return"), 3)
+ testPanics(t, notrack("int 3; int 2; int 1; dig 11; int 2; ==; return"), 3)
}
func TestCover(t *testing.T) {
@@ -4140,8 +4120,8 @@ func TestCover(t *testing.T) {
testAccepts(t, "int 4; int 3; int 2; int 1; cover 1; int 2; ==; return", 5)
testAccepts(t, "int 4; int 3; int 2; int 1; cover 2; int 2; ==; return", 5)
testAccepts(t, "int 4; int 3; int 2; int 1; cover 2; pop; pop; int 1; ==; return", 5)
- testPanics(t, obfuscate("int 4; int 3; int 2; int 1; cover 11; int 2; ==; return"), 5)
- testPanics(t, obfuscate("int 4; int 3; int 2; int 1; cover 4; int 2; ==; return"), 5)
+ testPanics(t, notrack("int 4; int 3; int 2; int 1; cover 11; int 2; ==; return"), 5)
+ testPanics(t, notrack("int 4; int 3; int 2; int 1; cover 4; int 2; ==; return"), 5)
}
func TestUncover(t *testing.T) {
@@ -4153,8 +4133,8 @@ func TestUncover(t *testing.T) {
testAccepts(t, "int 4; int 3; int 2; int 1; uncover 3; pop; int 1; ==; return", 5)
testAccepts(t, "int 4; int 3; int 2; int 1; uncover 3; pop; pop; int 2; ==; return", 5)
testAccepts(t, "int 1; int 3; int 2; int 1; uncover 3; pop; pop; int 2; ==; return", 5)
- testPanics(t, obfuscate("int 4; int 3; int 2; int 1; uncover 11; int 3; ==; return"), 5)
- testPanics(t, obfuscate("int 4; int 3; int 2; int 1; uncover 4; int 2; ==; return"), 5)
+ testPanics(t, notrack("int 4; int 3; int 2; int 1; uncover 11; int 3; ==; return"), 5)
+ testPanics(t, notrack("int 4; int 3; int 2; int 1; uncover 4; int 2; ==; return"), 5)
}
func TestPush(t *testing.T) {
@@ -4456,7 +4436,7 @@ func TestBytesCompare(t *testing.T) {
testAccepts(t, "byte 0x11; byte 0x00; b!=", 4)
testAccepts(t, "byte 0x0011; byte 0x1100; b!=", 4)
- testPanics(t, obfuscate("byte 0x11; int 17; b!="), 4)
+ testPanics(t, notrack("byte 0x11; int 17; b!="), 4)
}
func TestBytesBits(t *testing.T) {
@@ -4492,6 +4472,7 @@ func TestBytesBits(t *testing.T) {
func TestBytesConversions(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
testAccepts(t, "byte 0x11; byte 0x10; b+; btoi; int 0x21; ==", 4)
testAccepts(t, "byte 0x0011; byte 0x10; b+; btoi; int 0x21; ==", 4)
}
@@ -4520,15 +4501,15 @@ func TestLog(t *testing.T) {
loglen: 2,
},
{
- source: fmt.Sprintf(`%s int 1`, strings.Repeat(`byte "a logging message"; log;`, MaxLogCalls)),
- loglen: MaxLogCalls,
+ source: fmt.Sprintf(`%s int 1`, strings.Repeat(`byte "a logging message"; log;`, maxLogCalls)),
+ loglen: maxLogCalls,
},
{
source: `int 1; loop: byte "a logging message"; log; int 1; +; dup; int 30; <=; bnz loop;`,
loglen: 30,
},
{
- source: fmt.Sprintf(`byte "%s"; log; int 1`, strings.Repeat("a", MaxLogSize)),
+ source: fmt.Sprintf(`byte "%s"; log; int 1`, strings.Repeat("a", maxLogSize)),
loglen: 1,
},
}
@@ -4538,7 +4519,7 @@ func TestLog(t *testing.T) {
delta := testApp(t, s.source, ep)
require.Len(t, delta.Logs, s.loglen)
if i == len(testCases)-1 {
- require.Equal(t, strings.Repeat("a", MaxLogSize), delta.Logs[0])
+ require.Equal(t, strings.Repeat("a", maxLogSize), delta.Logs[0])
} else {
for _, l := range delta.Logs {
require.Equal(t, "a logging message", l)
@@ -4553,45 +4534,45 @@ func TestLog(t *testing.T) {
errContains string
}{
{
- source: fmt.Sprintf(`byte "%s"; log; int 1`, strings.Repeat("a", MaxLogSize+1)),
- errContains: fmt.Sprintf("> %d bytes limit", MaxLogSize),
- runMode: runModeApplication,
+ source: fmt.Sprintf(`byte "%s"; log; int 1`, strings.Repeat("a", maxLogSize+1)),
+ errContains: fmt.Sprintf("> %d bytes limit", maxLogSize),
+ runMode: modeApp,
},
{
source: fmt.Sprintf(`byte "%s"; log; byte "%s"; log; byte "%s"; log; int 1`, msg, msg, msg),
- errContains: fmt.Sprintf("> %d bytes limit", MaxLogSize),
- runMode: runModeApplication,
+ errContains: fmt.Sprintf("> %d bytes limit", maxLogSize),
+ runMode: modeApp,
},
{
- source: fmt.Sprintf(`%s; int 1`, strings.Repeat(`byte "a"; log;`, MaxLogCalls+1)),
+ source: fmt.Sprintf(`%s; int 1`, strings.Repeat(`byte "a"; log;`, maxLogCalls+1)),
errContains: "too many log calls",
- runMode: runModeApplication,
+ runMode: modeApp,
},
{
source: `int 1; loop: byte "a"; log; int 1; +; dup; int 35; <; bnz loop;`,
errContains: "too many log calls",
- runMode: runModeApplication,
+ runMode: modeApp,
},
{
source: fmt.Sprintf(`int 1; loop: byte "%s"; log; int 1; +; dup; int 6; <; bnz loop;`, strings.Repeat(`a`, 400)),
- errContains: fmt.Sprintf("> %d bytes limit", MaxLogSize),
- runMode: runModeApplication,
+ errContains: fmt.Sprintf("> %d bytes limit", maxLogSize),
+ runMode: modeApp,
},
{
source: `load 0; log`,
errContains: "log arg 0 wanted []byte but got uint64",
- runMode: runModeApplication,
+ runMode: modeApp,
},
{
source: `byte "a logging message"; log; int 1`,
errContains: "log not allowed in current mode",
- runMode: runModeSignature,
+ runMode: modeSig,
},
}
for _, c := range failCases {
switch c.runMode {
- case runModeApplication:
+ case modeApp:
testApp(t, c.source, ep, c.errContains)
default:
testLogic(t, c.source, AssemblerMaxVersion, ep, c.errContains, c.errContains)
@@ -4631,6 +4612,7 @@ func TestPcDetails(t *testing.T) {
})
}
}
+
func TestOpBase64Decode(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -4761,6 +4743,51 @@ By Herman Melville`, "",
}
}
+func TestBase64CostVariation(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ source := `
+byte ""
+base64_decode URLEncoding
+pop
+global OpcodeBudget
+int ` + fmt.Sprintf("%d", 20_000-3-1) + ` // base64_decode cost = 1
+==
+`
+ testAccepts(t, source, fidoVersion)
+
+ source = `
+byte "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"
+base64_decode URLEncoding
+pop
+global OpcodeBudget
+int ` + fmt.Sprintf("%d", 20_000-3-5) + ` // base64_decode cost = 5 (64 bytes -> 1 + 64/16)
+==
+`
+ testAccepts(t, source, fidoVersion)
+
+ source = `
+byte "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
+base64_decode URLEncoding
+pop
+global OpcodeBudget
+int ` + fmt.Sprintf("%d", 20_000-3-5) + ` // base64_decode cost = 5 (60 bytes -> 1 + ceil(60/16))
+==
+`
+ testAccepts(t, source, fidoVersion)
+
+ source = `
+byte "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_AA=="
+base64_decode URLEncoding
+pop
+global OpcodeBudget
+int ` + fmt.Sprintf("%d", 20_000-3-6) + ` // base64_decode cost = 6 (68 bytes -> 1 + ceil(68/16))
+==
+`
+ testAccepts(t, source, fidoVersion)
+}
+
func TestHasDuplicateKeys(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -4821,7 +4848,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 0;
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "== arg 0 wanted type uint64 got []byte"}},
+ previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": 3}, \"key5\": 18446744073709551615 }";
@@ -4829,7 +4856,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 18446744073709551615; //max uint64 value
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "== arg 0 wanted type uint64 got []byte"}},
+ previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": 3}, \"key5\": 18446744073709551615 }";
@@ -5154,3 +5181,8 @@ func TestOpJSONRef(t *testing.T) {
}
}
+
+func TestTypeComplaints(t *testing.T) {
+ testProg(t, "err; store 0", AssemblerMaxVersion)
+ testProg(t, "int 1; return; store 0", AssemblerMaxVersion)
+}
diff --git a/data/transactions/logic/export_test.go b/data/transactions/logic/export_test.go
index 6575fa3bc..1a1a21ce2 100644
--- a/data/transactions/logic/export_test.go
+++ b/data/transactions/logic/export_test.go
@@ -36,11 +36,10 @@ var MakeSampleTxn = makeSampleTxn
var MakeSampleTxnGroup = makeSampleTxnGroup
var MakeTestProto = makeTestProto
var MakeTestProtoV = makeTestProtoV
-var Obfuscate = obfuscate
+var NoTrack = notrack
var TestApp = testApp
var TestAppBytes = testAppBytes
var TestApps = testApps
var TestProg = testProg
-const InnerAppsEnabledVersion = innerAppsEnabledVersion
const CreatedResourcesVersion = createdResourcesVersion
diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go
index f42e84d50..0954c3229 100644
--- a/data/transactions/logic/fields.go
+++ b/data/transactions/logic/fields.go
@@ -25,6 +25,40 @@ import (
//go:generate stringer -type=TxnField,GlobalField,AssetParamsField,AppParamsField,AcctParamsField,AssetHoldingField,OnCompletionConstType,EcdsaCurve,Base64Encoding,JSONRefType -output=fields_string.go
+// FieldSpec unifies the various specs for assembly, disassembly, and doc generation.
+type FieldSpec interface {
+ Field() byte
+ Type() StackType
+ OpVersion() uint64
+ Note() string
+ Version() uint64
+}
+
+// fieldSpecMap is something that yields a FieldSpec, given a name for the field
+type fieldSpecMap interface {
+ get(name string) (FieldSpec, bool)
+}
+
+// FieldGroup binds all the info for a field (names, int value, spec access) so
+// they can be attached to opcodes and used by doc generation
+type FieldGroup struct {
+ Name string
+ Doc string
+ Names []string
+ specs fieldSpecMap
+}
+
+// SpecByName returns a FieldsSpec for a name, respecting the "sparseness" of
+// the Names array to hide some names
+func (fg *FieldGroup) SpecByName(name string) (FieldSpec, bool) {
+ if fs, ok := fg.specs.get(name); ok {
+ if fg.Names[fs.Field()] != "" {
+ return fs, true
+ }
+ }
+ return nil, false
+}
+
// TxnField is an enum type for `txn` and `gtxn`
type TxnField int
@@ -167,31 +201,27 @@ const (
// StateProofPK Transaction.StateProofPK
StateProofPK
- invalidTxnField // fence for some setup that loops from Sender..invalidTxnField
+ invalidTxnField // compile-time constant for number of fields
)
-// FieldSpec unifies the various specs for presentation
-type FieldSpec interface {
- Type() StackType
- OpVersion() uint64
- Note() string
- Version() uint64
+func txnFieldSpecByField(f TxnField) (txnFieldSpec, bool) {
+ if int(f) >= len(txnFieldSpecs) {
+ return txnFieldSpec{}, false
+ }
+ return txnFieldSpecs[f], true
}
-// TxnFieldNames are arguments to the 'txn' and 'txnById' opcodes
-var TxnFieldNames []string
-
-var txnFieldSpecByField map[TxnField]txnFieldSpec
+// TxnFieldNames are arguments to the 'txn' family of opcodes.
+var TxnFieldNames [invalidTxnField]string
-// TxnFieldSpecByName gives access to the field specs by field name
-var TxnFieldSpecByName tfNameSpecMap
+var txnFieldSpecByName = make(tfNameSpecMap, len(TxnFieldNames))
// simple interface used by doc generator for fields versioning
type tfNameSpecMap map[string]txnFieldSpec
-func (s tfNameSpecMap) SpecByName(name string) FieldSpec {
- fs := s[name]
- return &fs
+func (s tfNameSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
}
type txnFieldSpec struct {
@@ -201,104 +231,181 @@ type txnFieldSpec struct {
version uint64 // When this field become available to txn/gtxn. 0=always
itxVersion uint64 // When this field become available to itxn_field. 0=never
effects bool // Is this a field on the "effects"? That is, something in ApplyData
+ doc string
}
-func (fs *txnFieldSpec) Type() StackType {
+func (fs txnFieldSpec) Field() byte {
+ return byte(fs.field)
+}
+func (fs txnFieldSpec) Type() StackType {
return fs.ftype
}
-
-func (fs *txnFieldSpec) OpVersion() uint64 {
+func (fs txnFieldSpec) OpVersion() uint64 {
return 0
}
-
-func (fs *txnFieldSpec) Version() uint64 {
+func (fs txnFieldSpec) Version() uint64 {
return fs.version
}
-
-func (fs *txnFieldSpec) Note() string {
- note := txnFieldDocs[fs.field.String()]
+func (fs txnFieldSpec) Note() string {
+ note := fs.doc
if fs.effects {
note = addExtra(note, "Application mode only")
}
return note
}
-var txnFieldSpecs = []txnFieldSpec{
- {Sender, StackBytes, false, 0, 5, false},
- {Fee, StackUint64, false, 0, 5, false},
- {FirstValid, StackUint64, false, 0, 0, false},
- {FirstValidTime, StackUint64, false, 0, 0, false},
- {LastValid, StackUint64, false, 0, 0, false},
- {Note, StackBytes, false, 0, 6, false},
- {Lease, StackBytes, false, 0, 0, false},
- {Receiver, StackBytes, false, 0, 5, false},
- {Amount, StackUint64, false, 0, 5, false},
- {CloseRemainderTo, StackBytes, false, 0, 5, false},
- {VotePK, StackBytes, false, 0, 6, false},
- {SelectionPK, StackBytes, false, 0, 6, false},
- {VoteFirst, StackUint64, false, 0, 6, false},
- {VoteLast, StackUint64, false, 0, 6, false},
- {VoteKeyDilution, StackUint64, false, 0, 6, false},
- {Type, StackBytes, false, 0, 5, false},
- {TypeEnum, StackUint64, false, 0, 5, false},
- {XferAsset, StackUint64, false, 0, 5, false},
- {AssetAmount, StackUint64, false, 0, 5, false},
- {AssetSender, StackBytes, false, 0, 5, false},
- {AssetReceiver, StackBytes, false, 0, 5, false},
- {AssetCloseTo, StackBytes, false, 0, 5, false},
- {GroupIndex, StackUint64, false, 0, 0, false},
- {TxID, StackBytes, false, 0, 0, false},
- {ApplicationID, StackUint64, false, 2, 6, false},
- {OnCompletion, StackUint64, false, 2, 6, false},
- {ApplicationArgs, StackBytes, true, 2, 6, false},
- {NumAppArgs, StackUint64, false, 2, 0, false},
- {Accounts, StackBytes, true, 2, 6, false},
- {NumAccounts, StackUint64, false, 2, 0, false},
- {ApprovalProgram, StackBytes, false, 2, 6, false},
- {ClearStateProgram, StackBytes, false, 2, 6, false},
- {RekeyTo, StackBytes, false, 2, 6, false},
- {ConfigAsset, StackUint64, false, 2, 5, false},
- {ConfigAssetTotal, StackUint64, false, 2, 5, false},
- {ConfigAssetDecimals, StackUint64, false, 2, 5, false},
- {ConfigAssetDefaultFrozen, StackUint64, false, 2, 5, false},
- {ConfigAssetUnitName, StackBytes, false, 2, 5, false},
- {ConfigAssetName, StackBytes, false, 2, 5, false},
- {ConfigAssetURL, StackBytes, false, 2, 5, false},
- {ConfigAssetMetadataHash, StackBytes, false, 2, 5, false},
- {ConfigAssetManager, StackBytes, false, 2, 5, false},
- {ConfigAssetReserve, StackBytes, false, 2, 5, false},
- {ConfigAssetFreeze, StackBytes, false, 2, 5, false},
- {ConfigAssetClawback, StackBytes, false, 2, 5, false},
- {FreezeAsset, StackUint64, false, 2, 5, false},
- {FreezeAssetAccount, StackBytes, false, 2, 5, false},
- {FreezeAssetFrozen, StackUint64, false, 2, 5, false},
- {Assets, StackUint64, true, 3, 6, false},
- {NumAssets, StackUint64, false, 3, 0, false},
- {Applications, StackUint64, true, 3, 6, false},
- {NumApplications, StackUint64, false, 3, 0, false},
- {GlobalNumUint, StackUint64, false, 3, 6, false},
- {GlobalNumByteSlice, StackUint64, false, 3, 6, false},
- {LocalNumUint, StackUint64, false, 3, 6, false},
- {LocalNumByteSlice, StackUint64, false, 3, 6, false},
- {ExtraProgramPages, StackUint64, false, 4, 6, false},
- {Nonparticipation, StackUint64, false, 5, 6, false},
+var txnFieldSpecs = [...]txnFieldSpec{
+ {Sender, StackBytes, false, 0, 5, false, "32 byte address"},
+ {Fee, StackUint64, false, 0, 5, false, "microalgos"},
+ {FirstValid, StackUint64, false, 0, 0, false, "round number"},
+ {FirstValidTime, StackUint64, false, 0, 0, false, "Causes program to fail; reserved for future use"},
+ {LastValid, StackUint64, false, 0, 0, false, "round number"},
+ {Note, StackBytes, false, 0, 6, false, "Any data up to 1024 bytes"},
+ {Lease, StackBytes, false, 0, 0, false, "32 byte lease value"},
+ {Receiver, StackBytes, false, 0, 5, false, "32 byte address"},
+ {Amount, StackUint64, false, 0, 5, false, "microalgos"},
+ {CloseRemainderTo, StackBytes, false, 0, 5, false, "32 byte address"},
+ {VotePK, StackBytes, false, 0, 6, false, "32 byte address"},
+ {SelectionPK, StackBytes, false, 0, 6, false, "32 byte address"},
+ {VoteFirst, StackUint64, false, 0, 6, false, "The first round that the participation key is valid."},
+ {VoteLast, StackUint64, false, 0, 6, false, "The last round that the participation key is valid."},
+ {VoteKeyDilution, StackUint64, false, 0, 6, false, "Dilution for the 2-level participation key"},
+ {Type, StackBytes, false, 0, 5, false, "Transaction type as bytes"},
+ {TypeEnum, StackUint64, false, 0, 5, false, "See table below"},
+ {XferAsset, StackUint64, false, 0, 5, false, "Asset ID"},
+ {AssetAmount, StackUint64, false, 0, 5, false, "value in Asset's units"},
+ {AssetSender, StackBytes, false, 0, 5, false,
+ "32 byte address. Moves asset from AssetSender if Sender is the Clawback address of the asset."},
+ {AssetReceiver, StackBytes, false, 0, 5, false, "32 byte address"},
+ {AssetCloseTo, StackBytes, false, 0, 5, false, "32 byte address"},
+ {GroupIndex, StackUint64, false, 0, 0, false,
+ "Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1"},
+ {TxID, StackBytes, false, 0, 0, false, "The computed ID for this transaction. 32 bytes."},
+ {ApplicationID, StackUint64, false, 2, 6, false, "ApplicationID from ApplicationCall transaction"},
+ {OnCompletion, StackUint64, false, 2, 6, false, "ApplicationCall transaction on completion action"},
+ {ApplicationArgs, StackBytes, true, 2, 6, false,
+ "Arguments passed to the application in the ApplicationCall transaction"},
+ {NumAppArgs, StackUint64, false, 2, 0, false, "Number of ApplicationArgs"},
+ {Accounts, StackBytes, true, 2, 6, false, "Accounts listed in the ApplicationCall transaction"},
+ {NumAccounts, StackUint64, false, 2, 0, false, "Number of Accounts"},
+ {ApprovalProgram, StackBytes, false, 2, 6, false, "Approval program"},
+ {ClearStateProgram, StackBytes, false, 2, 6, false, "Clear state program"},
+ {RekeyTo, StackBytes, false, 2, 6, false, "32 byte Sender's new AuthAddr"},
+ {ConfigAsset, StackUint64, false, 2, 5, false, "Asset ID in asset config transaction"},
+ {ConfigAssetTotal, StackUint64, false, 2, 5, false, "Total number of units of this asset created"},
+ {ConfigAssetDecimals, StackUint64, false, 2, 5, false,
+ "Number of digits to display after the decimal place when displaying the asset"},
+ {ConfigAssetDefaultFrozen, StackUint64, false, 2, 5, false,
+ "Whether the asset's slots are frozen by default or not, 0 or 1"},
+ {ConfigAssetUnitName, StackBytes, false, 2, 5, false, "Unit name of the asset"},
+ {ConfigAssetName, StackBytes, false, 2, 5, false, "The asset name"},
+ {ConfigAssetURL, StackBytes, false, 2, 5, false, "URL"},
+ {ConfigAssetMetadataHash, StackBytes, false, 2, 5, false,
+ "32 byte commitment to unspecified asset metadata"},
+ {ConfigAssetManager, StackBytes, false, 2, 5, false, "32 byte address"},
+ {ConfigAssetReserve, StackBytes, false, 2, 5, false, "32 byte address"},
+ {ConfigAssetFreeze, StackBytes, false, 2, 5, false, "32 byte address"},
+ {ConfigAssetClawback, StackBytes, false, 2, 5, false, "32 byte address"},
+ {FreezeAsset, StackUint64, false, 2, 5, false, "Asset ID being frozen or un-frozen"},
+ {FreezeAssetAccount, StackBytes, false, 2, 5, false,
+ "32 byte address of the account whose asset slot is being frozen or un-frozen"},
+ {FreezeAssetFrozen, StackUint64, false, 2, 5, false, "The new frozen value, 0 or 1"},
+ {Assets, StackUint64, true, 3, 6, false, "Foreign Assets listed in the ApplicationCall transaction"},
+ {NumAssets, StackUint64, false, 3, 0, false, "Number of Assets"},
+ {Applications, StackUint64, true, 3, 6, false, "Foreign Apps listed in the ApplicationCall transaction"},
+ {NumApplications, StackUint64, false, 3, 0, false, "Number of Applications"},
+ {GlobalNumUint, StackUint64, false, 3, 6, false, "Number of global state integers in ApplicationCall"},
+ {GlobalNumByteSlice, StackUint64, false, 3, 6, false, "Number of global state byteslices in ApplicationCall"},
+ {LocalNumUint, StackUint64, false, 3, 6, false, "Number of local state integers in ApplicationCall"},
+ {LocalNumByteSlice, StackUint64, false, 3, 6, false, "Number of local state byteslices in ApplicationCall"},
+ {ExtraProgramPages, StackUint64, false, 4, 6, false,
+ "Number of additional pages for each of the application's approval and clear state programs. An ExtraProgramPages of 1 means 2048 more total bytes, or 1024 for each program."},
+ {Nonparticipation, StackUint64, false, 5, 6, false, "Marks an account nonparticipating for rewards"},
// "Effects" Last two things are always going to: 0, true
- {Logs, StackBytes, true, 5, 0, true},
- {NumLogs, StackUint64, false, 5, 0, true},
- {CreatedAssetID, StackUint64, false, 5, 0, true},
- {CreatedApplicationID, StackUint64, false, 5, 0, true},
- {LastLog, StackBytes, false, 6, 0, true},
- {StateProofPK, StackBytes, false, 6, 6, false},
-}
-
-// TxnaFieldNames are arguments to the 'txna' opcode
-// It need not be fast, as it's only used for doc generation.
-func TxnaFieldNames() []string {
- var names []string
- for _, fs := range txnFieldSpecs {
+ {Logs, StackBytes, true, 5, 0, true, "Log messages emitted by an application call (only with `itxn` in v5)"},
+ {NumLogs, StackUint64, false, 5, 0, true, "Number of Logs (only with `itxn` in v5)"},
+ {CreatedAssetID, StackUint64, false, 5, 0, true,
+ "Asset ID allocated by the creation of an ASA (only with `itxn` in v5)"},
+ {CreatedApplicationID, StackUint64, false, 5, 0, true,
+ "ApplicationID allocated by the creation of an application (only with `itxn` in v5)"},
+ {LastLog, StackBytes, false, 6, 0, true, "The last message emitted. Empty bytes if none were emitted"},
+
+ // Not an effect. Just added after the effects fields.
+ {StateProofPK, StackBytes, false, 6, 6, false, "64 byte state proof public key commitment"},
+}
+
+// TxnFields contains info on the arguments to the txn* family of opcodes
+var TxnFields = FieldGroup{
+ "txn", "Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/))",
+ TxnFieldNames[:],
+ txnFieldSpecByName,
+}
+
+// TxnScalarFields narows TxnFields to only have the names of scalar fetching opcodes
+var TxnScalarFields = FieldGroup{
+ "txn", "",
+ txnScalarFieldNames(),
+ txnFieldSpecByName,
+}
+
+// txnScalarFieldNames are txn field names that return scalars. Return value is
+// a "sparse" slice, the names appear at their usual index, array slots are set
+// to "". They are laid out this way so that it is possible to get the name
+// from the index value.
+func txnScalarFieldNames() []string {
+ names := make([]string, len(txnFieldSpecs))
+ for i, fs := range txnFieldSpecs {
+ if fs.array {
+ names[i] = ""
+ } else {
+ names[i] = fs.field.String()
+ }
+ }
+ return names
+}
+
+// TxnArrayFields narows TxnFields to only have the names of array fetching opcodes
+var TxnArrayFields = FieldGroup{
+ "txna", "",
+ txnaFieldNames(),
+ txnFieldSpecByName,
+}
+
+// txnaFieldNames are txn field names that return arrays. Return value is a
+// "sparse" slice, the names appear at their usual index, non-array slots are
+// set to "". They are laid out this way so that it is possible to get the name
+// from the index value.
+func txnaFieldNames() []string {
+ names := make([]string, len(txnFieldSpecs))
+ for i, fs := range txnFieldSpecs {
if fs.array {
- names = append(names, fs.field.String())
+ names[i] = fs.field.String()
+ } else {
+ names[i] = ""
+ }
+ }
+ return names
+}
+
+// ItxnSettableFields collects info for itxn_field opcode
+var ItxnSettableFields = FieldGroup{
+ "itxn_field", "",
+ itxnSettableFieldNames(),
+ txnFieldSpecByName,
+}
+
+// itxnSettableFieldNames are txn field names that can be set by
+// itxn_field. Return value is a "sparse" slice, the names appear at their usual
+// index, unsettable slots are set to "". They are laid out this way so that it is
+// possible to get the name from the index value.
+func itxnSettableFieldNames() []string {
+ names := make([]string, len(txnFieldSpecs))
+ for i, fs := range txnFieldSpecs {
+ if fs.itxVersion == 0 {
+ names[i] = ""
+ } else {
+ names[i] = fs.field.String()
}
}
return names
@@ -314,7 +421,7 @@ var innerTxnTypes = map[string]uint64{
}
// TxnTypeNames is the values of Txn.Type in enum order
-var TxnTypeNames = []string{
+var TxnTypeNames = [...]string{
string(protocol.UnknownTx),
string(protocol.PaymentTx),
string(protocol.KeyRegistrationTx),
@@ -324,37 +431,34 @@ var TxnTypeNames = []string{
string(protocol.ApplicationCallTx),
}
-// map TxnTypeName to its enum index, for `txn TypeEnum`
-var txnTypeIndexes map[string]uint64
-
-// map symbolic name to uint64 for assembleInt
-var txnTypeConstToUint64 map[string]uint64
+// map txn type names (long and short) to index/enum value
+var txnTypeMap = make(map[string]uint64)
// OnCompletionConstType is the same as transactions.OnCompletion
type OnCompletionConstType transactions.OnCompletion
const (
// NoOp = transactions.NoOpOC
- NoOp OnCompletionConstType = OnCompletionConstType(transactions.NoOpOC)
+ NoOp = OnCompletionConstType(transactions.NoOpOC)
// OptIn = transactions.OptInOC
- OptIn OnCompletionConstType = OnCompletionConstType(transactions.OptInOC)
+ OptIn = OnCompletionConstType(transactions.OptInOC)
// CloseOut = transactions.CloseOutOC
- CloseOut OnCompletionConstType = OnCompletionConstType(transactions.CloseOutOC)
+ CloseOut = OnCompletionConstType(transactions.CloseOutOC)
// ClearState = transactions.ClearStateOC
- ClearState OnCompletionConstType = OnCompletionConstType(transactions.ClearStateOC)
+ ClearState = OnCompletionConstType(transactions.ClearStateOC)
// UpdateApplication = transactions.UpdateApplicationOC
- UpdateApplication OnCompletionConstType = OnCompletionConstType(transactions.UpdateApplicationOC)
+ UpdateApplication = OnCompletionConstType(transactions.UpdateApplicationOC)
// DeleteApplication = transactions.DeleteApplicationOC
- DeleteApplication OnCompletionConstType = OnCompletionConstType(transactions.DeleteApplicationOC)
+ DeleteApplication = OnCompletionConstType(transactions.DeleteApplicationOC)
// end of constants
- invalidOnCompletionConst OnCompletionConstType = DeleteApplication + 1
+ invalidOnCompletionConst = DeleteApplication + 1
)
// OnCompletionNames is the string names of Txn.OnCompletion, array index is the const value
-var OnCompletionNames []string
+var OnCompletionNames [invalidOnCompletionConst]string
-// onCompletionConstToUint64 map symbolic name to uint64 for assembleInt
-var onCompletionConstToUint64 map[string]uint64
+// onCompletionMap maps symbolic name to uint64 for assembleInt
+var onCompletionMap map[string]uint64
// GlobalField is an enum for `global` opcode
type GlobalField uint64
@@ -405,67 +509,89 @@ const (
// CallerApplicationAddress The Address of the caller app, else ZeroAddress
CallerApplicationAddress
- invalidGlobalField
+ invalidGlobalField // compile-time constant for number of fields
)
// GlobalFieldNames are arguments to the 'global' opcode
-var GlobalFieldNames []string
+var GlobalFieldNames [invalidGlobalField]string
type globalFieldSpec struct {
field GlobalField
ftype StackType
mode runMode
version uint64
+ doc string
}
-func (fs *globalFieldSpec) Type() StackType {
+func (fs globalFieldSpec) Field() byte {
+ return byte(fs.field)
+}
+func (fs globalFieldSpec) Type() StackType {
return fs.ftype
}
-
-func (fs *globalFieldSpec) OpVersion() uint64 {
+func (fs globalFieldSpec) OpVersion() uint64 {
return 0
}
-
-func (fs *globalFieldSpec) Version() uint64 {
+func (fs globalFieldSpec) Version() uint64 {
return fs.version
}
-func (fs *globalFieldSpec) Note() string {
- note := globalFieldDocs[fs.field.String()]
- if fs.mode == runModeApplication {
+func (fs globalFieldSpec) Note() string {
+ note := fs.doc
+ if fs.mode == modeApp {
note = addExtra(note, "Application mode only.")
}
// There are no Signature mode only globals
return note
}
-var globalFieldSpecs = []globalFieldSpec{
- {MinTxnFee, StackUint64, modeAny, 0}, // version 0 is the same as TEAL v1 (initial TEAL release)
- {MinBalance, StackUint64, modeAny, 0},
- {MaxTxnLife, StackUint64, modeAny, 0},
- {ZeroAddress, StackBytes, modeAny, 0},
- {GroupSize, StackUint64, modeAny, 0},
- {LogicSigVersion, StackUint64, modeAny, 2},
- {Round, StackUint64, runModeApplication, 2},
- {LatestTimestamp, StackUint64, runModeApplication, 2},
- {CurrentApplicationID, StackUint64, runModeApplication, 2},
- {CreatorAddress, StackBytes, runModeApplication, 3},
- {CurrentApplicationAddress, StackBytes, runModeApplication, 5},
- {GroupID, StackBytes, modeAny, 5},
- {OpcodeBudget, StackUint64, modeAny, 6},
- {CallerApplicationID, StackUint64, runModeApplication, 6},
- {CallerApplicationAddress, StackBytes, runModeApplication, 6},
+var globalFieldSpecs = [...]globalFieldSpec{
+ // version 0 is the same as TEAL v1 (initial TEAL release)
+ {MinTxnFee, StackUint64, modeAny, 0, "microalgos"},
+ {MinBalance, StackUint64, modeAny, 0, "microalgos"},
+ {MaxTxnLife, StackUint64, modeAny, 0, "rounds"},
+ {ZeroAddress, StackBytes, modeAny, 0, "32 byte address of all zero bytes"},
+ {GroupSize, StackUint64, modeAny, 0,
+ "Number of transactions in this atomic transaction group. At least 1"},
+ {LogicSigVersion, StackUint64, modeAny, 2, "Maximum supported version"},
+ {Round, StackUint64, modeApp, 2, "Current round number"},
+ {LatestTimestamp, StackUint64, modeApp, 2,
+ "Last confirmed block UNIX timestamp. Fails if negative"},
+ {CurrentApplicationID, StackUint64, modeApp, 2, "ID of current application executing"},
+ {CreatorAddress, StackBytes, modeApp, 3,
+ "Address of the creator of the current application"},
+ {CurrentApplicationAddress, StackBytes, modeApp, 5,
+ "Address that the current application controls"},
+ {GroupID, StackBytes, modeAny, 5,
+ "ID of the transaction group. 32 zero bytes if the transaction is not part of a group."},
+ {OpcodeBudget, StackUint64, modeAny, 6,
+ "The remaining cost that can be spent by opcodes in this program."},
+ {CallerApplicationID, StackUint64, modeApp, 6,
+ "The application ID of the application that called this application. 0 if this application is at the top-level."},
+ {CallerApplicationAddress, StackBytes, modeApp, 6,
+ "The application address of the application that called this application. ZeroAddress if this application is at the top-level."},
+}
+
+func globalFieldSpecByField(f GlobalField) (globalFieldSpec, bool) {
+ if int(f) >= len(globalFieldSpecs) {
+ return globalFieldSpec{}, false
+ }
+ return globalFieldSpecs[f], true
}
-var globalFieldSpecByField map[GlobalField]globalFieldSpec
-
-// GlobalFieldSpecByName gives access to the field specs by field name
-var GlobalFieldSpecByName gfNameSpecMap
+var globalFieldSpecByName = make(gfNameSpecMap, len(GlobalFieldNames))
type gfNameSpecMap map[string]globalFieldSpec
-func (s gfNameSpecMap) SpecByName(name string) FieldSpec {
- fs := s[name]
- return &fs
+func (s gfNameSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
+
+// GlobalFields has info on the global opcode's immediate
+var GlobalFields = FieldGroup{
+ "global", "Fields",
+ GlobalFieldNames[:],
+ globalFieldSpecByName,
}
// EcdsaCurve is an enum for `ecdsa_` opcodes
@@ -476,50 +602,59 @@ const (
Secp256k1 EcdsaCurve = iota
// Secp256r1 curve
Secp256r1
- invalidEcdsaCurve
+ invalidEcdsaCurve // compile-time constant for number of fields
)
-// EcdsaCurveNames are arguments to the 'ecdsa_' opcode
-var EcdsaCurveNames []string
+var ecdsaCurveNames [invalidEcdsaCurve]string
type ecdsaCurveSpec struct {
field EcdsaCurve
version uint64
+ doc string
}
-func (fs *ecdsaCurveSpec) Type() StackType {
- return StackNone // Will not show, since all are the same
+func (fs ecdsaCurveSpec) Field() byte {
+ return byte(fs.field)
}
-
-func (fs *ecdsaCurveSpec) OpVersion() uint64 {
+func (fs ecdsaCurveSpec) Type() StackType {
+ return StackNone // Will not show, since all are untyped
+}
+func (fs ecdsaCurveSpec) OpVersion() uint64 {
return 5
}
-
-func (fs *ecdsaCurveSpec) Version() uint64 {
+func (fs ecdsaCurveSpec) Version() uint64 {
return fs.version
}
+func (fs ecdsaCurveSpec) Note() string {
+ return fs.doc
+}
-func (fs *ecdsaCurveSpec) Note() string {
- note := EcdsaCurveDocs[fs.field.String()]
- return note
+var ecdsaCurveSpecs = [...]ecdsaCurveSpec{
+ {Secp256k1, 5, "secp256k1 curve, used in Bitcoin"},
+ {Secp256r1, fidoVersion, "secp256r1 curve, NIST standard"},
}
-var ecdsaCurveSpecs = []ecdsaCurveSpec{
- {Secp256k1, 5},
- {Secp256r1, fidoVersion},
+func ecdsaCurveSpecByField(c EcdsaCurve) (ecdsaCurveSpec, bool) {
+ if int(c) >= len(ecdsaCurveSpecs) {
+ return ecdsaCurveSpec{}, false
+ }
+ return ecdsaCurveSpecs[c], true
}
-var ecdsaCurveSpecByField map[EcdsaCurve]ecdsaCurveSpec
+var ecdsaCurveSpecByName = make(ecdsaCurveNameSpecMap, len(ecdsaCurveNames))
-// EcdsaCurveSpecByName gives access to the field specs by field name
-var EcdsaCurveSpecByName ecDsaCurveNameSpecMap
+type ecdsaCurveNameSpecMap map[string]ecdsaCurveSpec
-// simple interface used by doc generator for fields versioning
-type ecDsaCurveNameSpecMap map[string]ecdsaCurveSpec
+func (s ecdsaCurveNameSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
-func (s ecDsaCurveNameSpecMap) SpecByName(name string) FieldSpec {
- fs := s[name]
- return &fs
+// EcdsaCurves collects details about the constants used to describe EcdsaCurves
+var EcdsaCurves = FieldGroup{
+ "ECDSA", "Curves",
+ ecdsaCurveNames[:],
+ ecdsaCurveSpecByName,
}
// Base64Encoding is an enum for the `base64decode` opcode
@@ -530,47 +665,59 @@ const (
URLEncoding Base64Encoding = iota
// StdEncoding represents the standard encoding of the RFC
StdEncoding
- invalidBase64Alphabet
+ invalidBase64Encoding // compile-time constant for number of fields
)
-// After running `go generate` these strings will be available:
-var base64EncodingNames [2]string = [...]string{URLEncoding.String(), StdEncoding.String()}
+var base64EncodingNames [invalidBase64Encoding]string
type base64EncodingSpec struct {
field Base64Encoding
- ftype StackType
version uint64
}
-var base64EncodingSpecs = []base64EncodingSpec{
- {URLEncoding, StackBytes, 6},
- {StdEncoding, StackBytes, 6},
+var base64EncodingSpecs = [...]base64EncodingSpec{
+ {URLEncoding, 6},
+ {StdEncoding, 6},
}
-var base64EncodingSpecByField map[Base64Encoding]base64EncodingSpec
-var base64EncodingSpecByName base64EncodingSpecMap
+func base64EncodingSpecByField(e Base64Encoding) (base64EncodingSpec, bool) {
+ if int(e) >= len(base64EncodingSpecs) {
+ return base64EncodingSpec{}, false
+ }
+ return base64EncodingSpecs[e], true
+}
+
+var base64EncodingSpecByName = make(base64EncodingSpecMap, len(base64EncodingNames))
type base64EncodingSpecMap map[string]base64EncodingSpec
-func (fs *base64EncodingSpec) Type() StackType {
- return fs.ftype
+func (fs base64EncodingSpec) Field() byte {
+ return byte(fs.field)
}
-
-func (fs *base64EncodingSpec) OpVersion() uint64 {
+func (fs base64EncodingSpec) Type() StackType {
+ return StackAny // Will not show in docs, since all are untyped
+}
+func (fs base64EncodingSpec) OpVersion() uint64 {
return 6
}
-
-func (fs *base64EncodingSpec) Version() uint64 {
+func (fs base64EncodingSpec) Version() uint64 {
return fs.version
}
-
-func (fs *base64EncodingSpec) Note() string {
+func (fs base64EncodingSpec) Note() string {
note := "" // no doc list?
return note
}
-func (s base64EncodingSpecMap) getExtraFor(name string) (extra string) {
- // Uses 6 here because base64_decode fields were introduced in 6
- return
+
+func (s base64EncodingSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
+
+// Base64Encodings describes the base64_encode immediate
+var Base64Encodings = FieldGroup{
+ "base64", "Encodings",
+ base64EncodingNames[:],
+ base64EncodingSpecByName,
}
// JSONRefType is an enum for the `json_ref` opcode
@@ -583,11 +730,10 @@ const (
JSONUint64
// JSONObject represents json object
JSONObject
- invalidJSONRefType
+ invalidJSONRefType // compile-time constant for number of fields
)
-// After running `go generate` these strings will be available:
-var jsonRefTypeNames [3]string = [...]string{JSONString.String(), JSONUint64.String(), JSONObject.String()}
+var jsonRefTypeNames [invalidJSONRefType]string
type jsonRefSpec struct {
field JSONRefType
@@ -595,23 +741,50 @@ type jsonRefSpec struct {
version uint64
}
-var jsonRefSpecs = []jsonRefSpec{
+var jsonRefSpecs = [...]jsonRefSpec{
{JSONString, StackBytes, fidoVersion},
{JSONUint64, StackUint64, fidoVersion},
{JSONObject, StackBytes, fidoVersion},
}
-var jsonRefSpecByField map[JSONRefType]jsonRefSpec
-var jsonRefSpecByName jsonRefSpecMap
+func jsonRefSpecByField(r JSONRefType) (jsonRefSpec, bool) {
+ if int(r) >= len(jsonRefSpecs) {
+ return jsonRefSpec{}, false
+ }
+ return jsonRefSpecs[r], true
+}
+
+var jsonRefSpecByName = make(jsonRefSpecMap, len(jsonRefTypeNames))
type jsonRefSpecMap map[string]jsonRefSpec
-func (s jsonRefSpecMap) getExtraFor(name string) (extra string) {
- // Uses 6 here because base64_decode fields were introduced in 6
- if s[name].version > 6 {
- extra = fmt.Sprintf("LogicSigVersion >= %d.", s[name].version)
- }
- return
+func (fs jsonRefSpec) Field() byte {
+ return byte(fs.field)
+}
+func (fs jsonRefSpec) Type() StackType {
+ return fs.ftype
+}
+func (fs jsonRefSpec) OpVersion() uint64 {
+ return fidoVersion
+}
+func (fs jsonRefSpec) Version() uint64 {
+ return fs.version
+}
+func (fs jsonRefSpec) Note() string {
+ note := "" // no doc list?
+ return note
+}
+
+func (s jsonRefSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
+
+// JSONRefTypes describes the json_ref immediate
+var JSONRefTypes = FieldGroup{
+ "json_ref", "Types",
+ jsonRefTypeNames[:],
+ jsonRefSpecByName,
}
// AssetHoldingField is an enum for `asset_holding_get` opcode
@@ -622,50 +795,60 @@ const (
AssetBalance AssetHoldingField = iota
// AssetFrozen AssetHolding.Frozen
AssetFrozen
- invalidAssetHoldingField
+ invalidAssetHoldingField // compile-time constant for number of fields
)
-// AssetHoldingFieldNames are arguments to the 'asset_holding_get' opcode
-var AssetHoldingFieldNames []string
+var assetHoldingFieldNames [invalidAssetHoldingField]string
type assetHoldingFieldSpec struct {
field AssetHoldingField
ftype StackType
version uint64
+ doc string
}
-func (fs *assetHoldingFieldSpec) Type() StackType {
+func (fs assetHoldingFieldSpec) Field() byte {
+ return byte(fs.field)
+}
+func (fs assetHoldingFieldSpec) Type() StackType {
return fs.ftype
}
-
-func (fs *assetHoldingFieldSpec) OpVersion() uint64 {
+func (fs assetHoldingFieldSpec) OpVersion() uint64 {
return 2
}
-
-func (fs *assetHoldingFieldSpec) Version() uint64 {
+func (fs assetHoldingFieldSpec) Version() uint64 {
return fs.version
}
-
-func (fs *assetHoldingFieldSpec) Note() string {
- note := assetHoldingFieldDocs[fs.field.String()]
- return note
+func (fs assetHoldingFieldSpec) Note() string {
+ return fs.doc
}
-var assetHoldingFieldSpecs = []assetHoldingFieldSpec{
- {AssetBalance, StackUint64, 2},
- {AssetFrozen, StackUint64, 2},
+var assetHoldingFieldSpecs = [...]assetHoldingFieldSpec{
+ {AssetBalance, StackUint64, 2, "Amount of the asset unit held by this account"},
+ {AssetFrozen, StackUint64, 2, "Is the asset frozen or not"},
}
-var assetHoldingFieldSpecByField map[AssetHoldingField]assetHoldingFieldSpec
+func assetHoldingFieldSpecByField(f AssetHoldingField) (assetHoldingFieldSpec, bool) {
+ if int(f) >= len(assetHoldingFieldSpecs) {
+ return assetHoldingFieldSpec{}, false
+ }
+ return assetHoldingFieldSpecs[f], true
+}
-// AssetHoldingFieldSpecByName gives access to the field specs by field name
-var AssetHoldingFieldSpecByName ahfNameSpecMap
+var assetHoldingFieldSpecByName = make(ahfNameSpecMap, len(assetHoldingFieldNames))
type ahfNameSpecMap map[string]assetHoldingFieldSpec
-func (s ahfNameSpecMap) SpecByName(name string) FieldSpec {
- fs := s[name]
- return &fs
+func (s ahfNameSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
+
+// AssetHoldingFields describes asset_holding_get's immediates
+var AssetHoldingFields = FieldGroup{
+ "asset_holding", "Fields",
+ assetHoldingFieldNames[:],
+ assetHoldingFieldSpecByName,
}
// AssetParamsField is an enum for `asset_params_get` opcode
@@ -698,60 +881,70 @@ const (
// AssetCreator is not *in* the Params, but it is uniquely determined.
AssetCreator
- invalidAssetParamsField
+ invalidAssetParamsField // compile-time constant for number of fields
)
-// AssetParamsFieldNames are arguments to the 'asset_params_get' opcode
-var AssetParamsFieldNames []string
+var assetParamsFieldNames [invalidAssetParamsField]string
type assetParamsFieldSpec struct {
field AssetParamsField
ftype StackType
version uint64
+ doc string
}
-func (fs *assetParamsFieldSpec) Type() StackType {
+func (fs assetParamsFieldSpec) Field() byte {
+ return byte(fs.field)
+}
+func (fs assetParamsFieldSpec) Type() StackType {
return fs.ftype
}
-
-func (fs *assetParamsFieldSpec) OpVersion() uint64 {
+func (fs assetParamsFieldSpec) OpVersion() uint64 {
return 2
}
-
-func (fs *assetParamsFieldSpec) Version() uint64 {
+func (fs assetParamsFieldSpec) Version() uint64 {
return fs.version
}
-
-func (fs *assetParamsFieldSpec) Note() string {
- note := assetParamsFieldDocs[fs.field.String()]
- return note
+func (fs assetParamsFieldSpec) Note() string {
+ return fs.doc
}
-var assetParamsFieldSpecs = []assetParamsFieldSpec{
- {AssetTotal, StackUint64, 2},
- {AssetDecimals, StackUint64, 2},
- {AssetDefaultFrozen, StackUint64, 2},
- {AssetUnitName, StackBytes, 2},
- {AssetName, StackBytes, 2},
- {AssetURL, StackBytes, 2},
- {AssetMetadataHash, StackBytes, 2},
- {AssetManager, StackBytes, 2},
- {AssetReserve, StackBytes, 2},
- {AssetFreeze, StackBytes, 2},
- {AssetClawback, StackBytes, 2},
- {AssetCreator, StackBytes, 5},
+var assetParamsFieldSpecs = [...]assetParamsFieldSpec{
+ {AssetTotal, StackUint64, 2, "Total number of units of this asset"},
+ {AssetDecimals, StackUint64, 2, "See AssetParams.Decimals"},
+ {AssetDefaultFrozen, StackUint64, 2, "Frozen by default or not"},
+ {AssetUnitName, StackBytes, 2, "Asset unit name"},
+ {AssetName, StackBytes, 2, "Asset name"},
+ {AssetURL, StackBytes, 2, "URL with additional info about the asset"},
+ {AssetMetadataHash, StackBytes, 2, "Arbitrary commitment"},
+ {AssetManager, StackBytes, 2, "Manager address"},
+ {AssetReserve, StackBytes, 2, "Reserve address"},
+ {AssetFreeze, StackBytes, 2, "Freeze address"},
+ {AssetClawback, StackBytes, 2, "Clawback address"},
+ {AssetCreator, StackBytes, 5, "Creator address"},
}
-var assetParamsFieldSpecByField map[AssetParamsField]assetParamsFieldSpec
+func assetParamsFieldSpecByField(f AssetParamsField) (assetParamsFieldSpec, bool) {
+ if int(f) >= len(assetParamsFieldSpecs) {
+ return assetParamsFieldSpec{}, false
+ }
+ return assetParamsFieldSpecs[f], true
+}
-// AssetParamsFieldSpecByName gives access to the field specs by field name
-var AssetParamsFieldSpecByName apfNameSpecMap
+var assetParamsFieldSpecByName = make(apfNameSpecMap, len(assetParamsFieldNames))
type apfNameSpecMap map[string]assetParamsFieldSpec
-func (s apfNameSpecMap) SpecByName(name string) FieldSpec {
- fs := s[name]
- return &fs
+func (s apfNameSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
+
+// AssetParamsFields describes asset_params_get's immediates
+var AssetParamsFields = FieldGroup{
+ "asset_params", "Fields",
+ assetParamsFieldNames[:],
+ assetParamsFieldSpecByName,
}
// AppParamsField is an enum for `app_params_get` opcode
@@ -779,58 +972,68 @@ const (
// AppAddress is also not *in* the Params, but can be derived
AppAddress
- invalidAppParamsField
+ invalidAppParamsField // compile-time constant for number of fields
)
-// AppParamsFieldNames are arguments to the 'app_params_get' opcode
-var AppParamsFieldNames []string
+var appParamsFieldNames [invalidAppParamsField]string
type appParamsFieldSpec struct {
field AppParamsField
ftype StackType
version uint64
+ doc string
}
-func (fs *appParamsFieldSpec) Type() StackType {
+func (fs appParamsFieldSpec) Field() byte {
+ return byte(fs.field)
+}
+func (fs appParamsFieldSpec) Type() StackType {
return fs.ftype
}
-
-func (fs *appParamsFieldSpec) OpVersion() uint64 {
+func (fs appParamsFieldSpec) OpVersion() uint64 {
return 5
}
-
-func (fs *appParamsFieldSpec) Version() uint64 {
+func (fs appParamsFieldSpec) Version() uint64 {
return fs.version
}
-
-func (fs *appParamsFieldSpec) Note() string {
- note := appParamsFieldDocs[fs.field.String()]
- return note
+func (fs appParamsFieldSpec) Note() string {
+ return fs.doc
}
-var appParamsFieldSpecs = []appParamsFieldSpec{
- {AppApprovalProgram, StackBytes, 5},
- {AppClearStateProgram, StackBytes, 5},
- {AppGlobalNumUint, StackUint64, 5},
- {AppGlobalNumByteSlice, StackUint64, 5},
- {AppLocalNumUint, StackUint64, 5},
- {AppLocalNumByteSlice, StackUint64, 5},
- {AppExtraProgramPages, StackUint64, 5},
- {AppCreator, StackBytes, 5},
- {AppAddress, StackBytes, 5},
+var appParamsFieldSpecs = [...]appParamsFieldSpec{
+ {AppApprovalProgram, StackBytes, 5, "Bytecode of Approval Program"},
+ {AppClearStateProgram, StackBytes, 5, "Bytecode of Clear State Program"},
+ {AppGlobalNumUint, StackUint64, 5, "Number of uint64 values allowed in Global State"},
+ {AppGlobalNumByteSlice, StackUint64, 5, "Number of byte array values allowed in Global State"},
+ {AppLocalNumUint, StackUint64, 5, "Number of uint64 values allowed in Local State"},
+ {AppLocalNumByteSlice, StackUint64, 5, "Number of byte array values allowed in Local State"},
+ {AppExtraProgramPages, StackUint64, 5, "Number of Extra Program Pages of code space"},
+ {AppCreator, StackBytes, 5, "Creator address"},
+ {AppAddress, StackBytes, 5, "Address for which this application has authority"},
}
-var appParamsFieldSpecByField map[AppParamsField]appParamsFieldSpec
+func appParamsFieldSpecByField(f AppParamsField) (appParamsFieldSpec, bool) {
+ if int(f) >= len(appParamsFieldSpecs) {
+ return appParamsFieldSpec{}, false
+ }
+ return appParamsFieldSpecs[f], true
+}
-// AppParamsFieldSpecByName gives access to the field specs by field name
-var AppParamsFieldSpecByName appNameSpecMap
+var appParamsFieldSpecByName = make(appNameSpecMap, len(appParamsFieldNames))
// simple interface used by doc generator for fields versioning
type appNameSpecMap map[string]appParamsFieldSpec
-func (s appNameSpecMap) SpecByName(name string) FieldSpec {
- fs := s[name]
- return &fs
+func (s appNameSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
+
+// AppParamsFields describes app_params_get's immediates
+var AppParamsFields = FieldGroup{
+ "app_params", "Fields",
+ appParamsFieldNames[:],
+ appParamsFieldSpecByName,
}
// AcctParamsField is an enum for `acct_params_get` opcode
@@ -844,199 +1047,146 @@ const (
//AcctAuthAddr is the rekeyed address if any, else ZeroAddress
AcctAuthAddr
- invalidAcctParamsField
+ invalidAcctParamsField // compile-time constant for number of fields
)
-// AcctParamsFieldNames are arguments to the 'acct_params_get' opcode
-var AcctParamsFieldNames []string
+var acctParamsFieldNames [invalidAcctParamsField]string
type acctParamsFieldSpec struct {
field AcctParamsField
ftype StackType
version uint64
+ doc string
}
-func (fs *acctParamsFieldSpec) Type() StackType {
+func (fs acctParamsFieldSpec) Field() byte {
+ return byte(fs.field)
+}
+func (fs acctParamsFieldSpec) Type() StackType {
return fs.ftype
}
-
-func (fs *acctParamsFieldSpec) OpVersion() uint64 {
+func (fs acctParamsFieldSpec) OpVersion() uint64 {
return 6
}
-
-func (fs *acctParamsFieldSpec) Version() uint64 {
+func (fs acctParamsFieldSpec) Version() uint64 {
return fs.version
}
-
-func (fs *acctParamsFieldSpec) Note() string {
- note := acctParamsFieldDocs[fs.field.String()]
- return note
+func (fs acctParamsFieldSpec) Note() string {
+ return fs.doc
}
-var acctParamsFieldSpecs = []acctParamsFieldSpec{
- {AcctBalance, StackUint64, 6},
- {AcctMinBalance, StackUint64, 6},
- {AcctAuthAddr, StackBytes, 6},
+var acctParamsFieldSpecs = [...]acctParamsFieldSpec{
+ {AcctBalance, StackUint64, 6, "Account balance in microalgos"},
+ {AcctMinBalance, StackUint64, 6, "Minimum required blance for account, in microalgos"},
+ {AcctAuthAddr, StackBytes, 6, "Address the account is rekeyed to."},
}
-var acctParamsFieldSpecByField map[AcctParamsField]acctParamsFieldSpec
+func acctParamsFieldSpecByField(f AcctParamsField) (acctParamsFieldSpec, bool) {
+ if int(f) >= len(acctParamsFieldSpecs) {
+ return acctParamsFieldSpec{}, false
+ }
+ return acctParamsFieldSpecs[f], true
+}
-// AcctParamsFieldSpecByName gives access to the field specs by field name
-var AcctParamsFieldSpecByName acctNameSpecMap
+var acctParamsFieldSpecByName = make(acctNameSpecMap, len(acctParamsFieldNames))
-// simple interface used by doc generator for fields versioning
type acctNameSpecMap map[string]acctParamsFieldSpec
-func (s acctNameSpecMap) SpecByName(name string) FieldSpec {
- fs := s[name]
- return &fs
+func (s acctNameSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
}
-func init() {
- TxnFieldNames = make([]string, int(invalidTxnField))
- for fi := Sender; fi < invalidTxnField; fi++ {
- TxnFieldNames[fi] = fi.String()
- }
- txnFieldSpecByField = make(map[TxnField]txnFieldSpec, len(TxnFieldNames))
- for i, s := range txnFieldSpecs {
- if int(s.field) != i {
- panic("txnFieldSpecs disjoint with TxnField enum")
- }
- txnFieldSpecByField[s.field] = s
- }
- TxnFieldSpecByName = make(map[string]txnFieldSpec, len(TxnFieldNames))
- for i, tfn := range TxnFieldNames {
- TxnFieldSpecByName[tfn] = txnFieldSpecByField[TxnField(i)]
- }
+// AcctParamsFields describes acct_params_get's immediates
+var AcctParamsFields = FieldGroup{
+ "acct_params", "Fields",
+ acctParamsFieldNames[:],
+ acctParamsFieldSpecByName,
+}
- GlobalFieldNames = make([]string, int(invalidGlobalField))
- for i := MinTxnFee; i < invalidGlobalField; i++ {
- GlobalFieldNames[i] = i.String()
- }
- globalFieldSpecByField = make(map[GlobalField]globalFieldSpec, len(GlobalFieldNames))
- for i, s := range globalFieldSpecs {
- if int(s.field) != i {
- panic("globalFieldSpecs disjoint with GlobalField enum")
+func init() {
+ equal := func(x int, y int) {
+ if x != y {
+ panic(fmt.Sprintf("%d != %d", x, y))
}
- globalFieldSpecByField[s.field] = s
- }
- GlobalFieldSpecByName = make(gfNameSpecMap, len(GlobalFieldNames))
- for i, gfn := range GlobalFieldNames {
- GlobalFieldSpecByName[gfn] = globalFieldSpecByField[GlobalField(i)]
}
- EcdsaCurveNames = make([]string, int(invalidEcdsaCurve))
- for i := Secp256k1; i < invalidEcdsaCurve; i++ {
- EcdsaCurveNames[i] = i.String()
- }
- ecdsaCurveSpecByField = make(map[EcdsaCurve]ecdsaCurveSpec, len(EcdsaCurveNames))
- for _, s := range ecdsaCurveSpecs {
- ecdsaCurveSpecByField[s.field] = s
- }
-
- EcdsaCurveSpecByName = make(ecDsaCurveNameSpecMap, len(EcdsaCurveNames))
- for i, ahfn := range EcdsaCurveNames {
- EcdsaCurveSpecByName[ahfn] = ecdsaCurveSpecByField[EcdsaCurve(i)]
- }
-
- base64EncodingSpecByField = make(map[Base64Encoding]base64EncodingSpec, len(base64EncodingNames))
- for _, s := range base64EncodingSpecs {
- base64EncodingSpecByField[s.field] = s
- }
-
- base64EncodingSpecByName = make(base64EncodingSpecMap, len(base64EncodingNames))
- for i, encoding := range base64EncodingNames {
- base64EncodingSpecByName[encoding] = base64EncodingSpecByField[Base64Encoding(i)]
+ equal(len(txnFieldSpecs), len(TxnFieldNames))
+ for i, s := range txnFieldSpecs {
+ equal(int(s.field), i)
+ TxnFieldNames[s.field] = s.field.String()
+ txnFieldSpecByName[s.field.String()] = s
}
- base64EncodingSpecByField = make(map[Base64Encoding]base64EncodingSpec, len(base64EncodingNames))
- for _, s := range base64EncodingSpecs {
- base64EncodingSpecByField[s.field] = s
+ equal(len(globalFieldSpecs), len(GlobalFieldNames))
+ for i, s := range globalFieldSpecs {
+ equal(int(s.field), i)
+ GlobalFieldNames[s.field] = s.field.String()
+ globalFieldSpecByName[s.field.String()] = s
}
- base64EncodingSpecByName = make(base64EncodingSpecMap, len(base64EncodingNames))
- for i, encoding := range base64EncodingNames {
- base64EncodingSpecByName[encoding] = base64EncodingSpecByField[Base64Encoding(i)]
+ equal(len(ecdsaCurveSpecs), len(ecdsaCurveNames))
+ for i, s := range ecdsaCurveSpecs {
+ equal(int(s.field), i)
+ ecdsaCurveNames[s.field] = s.field.String()
+ ecdsaCurveSpecByName[s.field.String()] = s
}
- jsonRefSpecByField = make(map[JSONRefType]jsonRefSpec, len(jsonRefTypeNames))
- for _, s := range jsonRefSpecs {
- jsonRefSpecByField[s.field] = s
+ equal(len(base64EncodingSpecs), len(base64EncodingNames))
+ for i, s := range base64EncodingSpecs {
+ equal(int(s.field), i)
+ base64EncodingNames[i] = s.field.String()
+ base64EncodingSpecByName[s.field.String()] = s
}
- jsonRefSpecByName = make(jsonRefSpecMap, len(jsonRefTypeNames))
- for i, typename := range jsonRefTypeNames {
- jsonRefSpecByName[typename] = jsonRefSpecByField[JSONRefType(i)]
+ equal(len(jsonRefSpecs), len(jsonRefTypeNames))
+ for i, s := range jsonRefSpecs {
+ equal(int(s.field), i)
+ jsonRefTypeNames[i] = s.field.String()
+ jsonRefSpecByName[s.field.String()] = s
}
- AssetHoldingFieldNames = make([]string, int(invalidAssetHoldingField))
- for i := AssetBalance; i < invalidAssetHoldingField; i++ {
- AssetHoldingFieldNames[i] = i.String()
- }
- assetHoldingFieldSpecByField = make(map[AssetHoldingField]assetHoldingFieldSpec, len(AssetHoldingFieldNames))
- for _, s := range assetHoldingFieldSpecs {
- assetHoldingFieldSpecByField[s.field] = s
- }
- AssetHoldingFieldSpecByName = make(ahfNameSpecMap, len(AssetHoldingFieldNames))
- for i, ahfn := range AssetHoldingFieldNames {
- AssetHoldingFieldSpecByName[ahfn] = assetHoldingFieldSpecByField[AssetHoldingField(i)]
+ equal(len(assetHoldingFieldSpecs), len(assetHoldingFieldNames))
+ for i, s := range assetHoldingFieldSpecs {
+ equal(int(s.field), i)
+ assetHoldingFieldNames[i] = s.field.String()
+ assetHoldingFieldSpecByName[s.field.String()] = s
}
- AssetParamsFieldNames = make([]string, int(invalidAssetParamsField))
- for i := AssetTotal; i < invalidAssetParamsField; i++ {
- AssetParamsFieldNames[i] = i.String()
- }
- assetParamsFieldSpecByField = make(map[AssetParamsField]assetParamsFieldSpec, len(AssetParamsFieldNames))
- for _, s := range assetParamsFieldSpecs {
- assetParamsFieldSpecByField[s.field] = s
- }
- AssetParamsFieldSpecByName = make(apfNameSpecMap, len(AssetParamsFieldNames))
- for i, apfn := range AssetParamsFieldNames {
- AssetParamsFieldSpecByName[apfn] = assetParamsFieldSpecByField[AssetParamsField(i)]
+ equal(len(assetParamsFieldSpecs), len(assetParamsFieldNames))
+ for i, s := range assetParamsFieldSpecs {
+ equal(int(s.field), i)
+ assetParamsFieldNames[i] = s.field.String()
+ assetParamsFieldSpecByName[s.field.String()] = s
}
- AppParamsFieldNames = make([]string, int(invalidAppParamsField))
- for i := AppApprovalProgram; i < invalidAppParamsField; i++ {
- AppParamsFieldNames[i] = i.String()
- }
- appParamsFieldSpecByField = make(map[AppParamsField]appParamsFieldSpec, len(AppParamsFieldNames))
- for _, s := range appParamsFieldSpecs {
- appParamsFieldSpecByField[s.field] = s
- }
- AppParamsFieldSpecByName = make(appNameSpecMap, len(AppParamsFieldNames))
- for i, apfn := range AppParamsFieldNames {
- AppParamsFieldSpecByName[apfn] = appParamsFieldSpecByField[AppParamsField(i)]
+ equal(len(appParamsFieldSpecs), len(appParamsFieldNames))
+ for i, s := range appParamsFieldSpecs {
+ equal(int(s.field), i)
+ appParamsFieldNames[i] = s.field.String()
+ appParamsFieldSpecByName[s.field.String()] = s
}
- AcctParamsFieldNames = make([]string, int(invalidAcctParamsField))
- for i := AcctBalance; i < invalidAcctParamsField; i++ {
- AcctParamsFieldNames[i] = i.String()
- }
- acctParamsFieldSpecByField = make(map[AcctParamsField]acctParamsFieldSpec, len(AcctParamsFieldNames))
- for _, s := range acctParamsFieldSpecs {
- acctParamsFieldSpecByField[s.field] = s
- }
- AcctParamsFieldSpecByName = make(acctNameSpecMap, len(AcctParamsFieldNames))
- for i, apfn := range AcctParamsFieldNames {
- AcctParamsFieldSpecByName[apfn] = acctParamsFieldSpecByField[AcctParamsField(i)]
+ equal(len(acctParamsFieldSpecs), len(acctParamsFieldNames))
+ for i, s := range acctParamsFieldSpecs {
+ equal(int(s.field), i)
+ acctParamsFieldNames[i] = s.field.String()
+ acctParamsFieldSpecByName[s.field.String()] = s
}
- txnTypeIndexes = make(map[string]uint64, len(TxnTypeNames))
+ txnTypeMap = make(map[string]uint64)
for i, tt := range TxnTypeNames {
- txnTypeIndexes[tt] = uint64(i)
+ txnTypeMap[tt] = uint64(i)
}
-
- txnTypeConstToUint64 = make(map[string]uint64, len(TxnTypeNames))
- for tt, v := range txnTypeIndexes {
- symbol := TypeNameDescriptions[tt]
- txnTypeConstToUint64[symbol] = v
+ for k, v := range TypeNameDescriptions {
+ txnTypeMap[v] = txnTypeMap[k]
}
- OnCompletionNames = make([]string, int(invalidOnCompletionConst))
- onCompletionConstToUint64 = make(map[string]uint64, len(OnCompletionNames))
+ onCompletionMap = make(map[string]uint64, len(OnCompletionNames))
for oc := NoOp; oc < invalidOnCompletionConst; oc++ {
symbol := oc.String()
OnCompletionNames[oc] = symbol
- onCompletionConstToUint64[symbol] = uint64(oc)
+ onCompletionMap[symbol] = uint64(oc)
}
+
}
diff --git a/data/transactions/logic/fields_string.go b/data/transactions/logic/fields_string.go
index 4e3cdbc88..cd2298463 100644
--- a/data/transactions/logic/fields_string.go
+++ b/data/transactions/logic/fields_string.go
@@ -221,12 +221,11 @@ func _() {
_ = x[ClearState-3]
_ = x[UpdateApplication-4]
_ = x[DeleteApplication-5]
- _ = x[invalidOnCompletionConst-6]
}
-const _OnCompletionConstType_name = "NoOpOptInCloseOutClearStateUpdateApplicationDeleteApplicationinvalidOnCompletionConst"
+const _OnCompletionConstType_name = "NoOpOptInCloseOutClearStateUpdateApplicationDeleteApplication"
-var _OnCompletionConstType_index = [...]uint8{0, 4, 9, 17, 27, 44, 61, 85}
+var _OnCompletionConstType_index = [...]uint8{0, 4, 9, 17, 27, 44, 61}
func (i OnCompletionConstType) String() string {
if i >= OnCompletionConstType(len(_OnCompletionConstType_index)-1) {
@@ -259,10 +258,10 @@ func _() {
var x [1]struct{}
_ = x[URLEncoding-0]
_ = x[StdEncoding-1]
- _ = x[invalidBase64Alphabet-2]
+ _ = x[invalidBase64Encoding-2]
}
-const _Base64Encoding_name = "URLEncodingStdEncodinginvalidBase64Alphabet"
+const _Base64Encoding_name = "URLEncodingStdEncodinginvalidBase64Encoding"
var _Base64Encoding_index = [...]uint8{0, 11, 22, 43}
diff --git a/data/transactions/logic/fields_test.go b/data/transactions/logic/fields_test.go
index b6d1a4989..4a8128c87 100644
--- a/data/transactions/logic/fields_test.go
+++ b/data/transactions/logic/fields_test.go
@@ -45,9 +45,9 @@ func TestGlobalFieldsVersions(t *testing.T) {
for _, field := range fields {
text := fmt.Sprintf("global %s", field.field.String())
// check assembler fails if version before introduction
- testLine(t, text, assemblerNoVersion, "...available in version...")
+ testLine(t, text, assemblerNoVersion, "...was introduced in...")
for v := uint64(0); v < field.version; v++ {
- testLine(t, text, v, "...available in version...")
+ testLine(t, text, v, "...was introduced in...")
}
testLine(t, text, field.version, "")
@@ -108,7 +108,7 @@ func TestTxnFieldVersions(t *testing.T) {
// TEAL version
txn.Txn.RekeyTo = basics.Address{}
txgroup := makeSampleTxnGroup(txn)
- asmDefaultError := "...available in version ..."
+ asmDefaultError := "...was introduced in ..."
for _, fs := range fields {
field := fs.field.String()
for _, command := range tests {
@@ -176,7 +176,7 @@ func TestTxnEffectsAvailable(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- for _, fs := range txnFieldSpecByField {
+ for _, fs := range txnFieldSpecs {
if !fs.effects {
continue
}
@@ -225,7 +225,7 @@ func TestAssetParamsFieldsVersions(t *testing.T) {
ep, _, _ := makeSampleEnv()
ep.Proto.LogicSigVersion = v
if field.version > v {
- testProg(t, text, v, Expect{3, "...available in version..."})
+ testProg(t, text, v, Expect{3, "...was introduced in..."})
ops := testProg(t, text, field.version) // assemble in the future
ops.Program[0] = byte(v)
testAppBytes(t, ops.Program, ep, "invalid asset_params_get field")
diff --git a/data/transactions/logic/langspec.json b/data/transactions/logic/langspec.json
new file mode 100644
index 000000000..7a6cef2d9
--- /dev/null
+++ b/data/transactions/logic/langspec.json
@@ -0,0 +1,2222 @@
+{
+ "EvalMaxVersion": 7,
+ "LogicSigVersion": 6,
+ "Ops": [
+ {
+ "Opcode": 0,
+ "Name": "err",
+ "Size": 1,
+ "Doc": "Fail immediately.",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 1,
+ "Name": "sha256",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "SHA256 hash of value A, yields [32]byte",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 2,
+ "Name": "keccak256",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "Keccak256 hash of value A, yields [32]byte",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 3,
+ "Name": "sha512_256",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "SHA512_256 hash of value A, yields [32]byte",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 4,
+ "Name": "ed25519verify",
+ "Args": "BBB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "for (data A, signature B, pubkey C) verify the signature of (\"ProgData\" || program_hash || data) against the pubkey =\u003e {0 or 1}",
+ "DocExtra": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 5,
+ "Name": "ecdsa_verify",
+ "Args": "BBBBB",
+ "Returns": "U",
+ "Size": 2,
+ "Doc": "for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey =\u003e {0 or 1}",
+ "DocExtra": "The 32 byte Y-component of a public key is the last element on the stack, preceded by X-component of a pubkey, preceded by S and R components of a signature, preceded by the data that is fifth element on the stack. All values are big-endian encoded. The signed data must be 32 bytes long, and signatures in lower-S form are only accepted.",
+ "ImmediateNote": "{uint8 curve index}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 6,
+ "Name": "ecdsa_pk_decompress",
+ "Args": "B",
+ "Returns": "BB",
+ "Size": 2,
+ "Doc": "decompress pubkey A into components X, Y",
+ "DocExtra": "The 33 byte public key in a compressed form to be decompressed into X and Y (top) components. All values are big-endian encoded.",
+ "ImmediateNote": "{uint8 curve index}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 7,
+ "Name": "ecdsa_pk_recover",
+ "Args": "BUBB",
+ "Returns": "BB",
+ "Size": 2,
+ "Doc": "for (data A, recovery id B, signature C, D) recover a public key",
+ "DocExtra": "S (top) and R elements of a signature, recovery id and data (bottom) are expected on the stack and used to deriver a public key. All values are big-endian encoded. The signed data must be 32 bytes long.",
+ "ImmediateNote": "{uint8 curve index}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 8,
+ "Name": "+",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A plus B. Fail on overflow.",
+ "DocExtra": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `addw`.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 9,
+ "Name": "-",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A minus B. Fail if B \u003e A.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 10,
+ "Name": "/",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A divided by B (truncated division). Fail if B == 0.",
+ "DocExtra": "`divmodw` is available to divide the two-element values produced by `mulw` and `addw`.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 11,
+ "Name": "*",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A times B. Fail on overflow.",
+ "DocExtra": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 12,
+ "Name": "\u003c",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A less than B =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 13,
+ "Name": "\u003e",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A greater than B =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 14,
+ "Name": "\u003c=",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A less than or equal to B =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 15,
+ "Name": "\u003e=",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A greater than or equal to B =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 16,
+ "Name": "\u0026\u0026",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A is not zero and B is not zero =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 17,
+ "Name": "||",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A is not zero or B is not zero =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 18,
+ "Name": "==",
+ "Args": "..",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A is equal to B =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 19,
+ "Name": "!=",
+ "Args": "..",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A is not equal to B =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 20,
+ "Name": "!",
+ "Args": "U",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A == 0 yields 1; else 0",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 21,
+ "Name": "len",
+ "Args": "B",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "yields length of byte value A",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 22,
+ "Name": "itob",
+ "Args": "U",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "converts uint64 A to big-endian byte array, always of length 8",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 23,
+ "Name": "btoi",
+ "Args": "B",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "converts big-endian byte array A to uint64. Fails if len(A) \u003e 8. Padded by leading 0s if len(A) \u003c 8.",
+ "DocExtra": "`btoi` fails if the input is longer than 8 bytes.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 24,
+ "Name": "%",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A modulo B. Fail if B == 0.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 25,
+ "Name": "|",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A bitwise-or B",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 26,
+ "Name": "\u0026",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A bitwise-and B",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 27,
+ "Name": "^",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A bitwise-xor B",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 28,
+ "Name": "~",
+ "Args": "U",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "bitwise invert value A",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 29,
+ "Name": "mulw",
+ "Args": "UU",
+ "Returns": "UU",
+ "Size": 1,
+ "Doc": "A times B as a 128-bit result in two uint64s. X is the high 64 bits, Y is the low",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 30,
+ "Name": "addw",
+ "Args": "UU",
+ "Returns": "UU",
+ "Size": 1,
+ "Doc": "A plus B as a 128-bit result. X is the carry-bit, Y is the low-order 64 bits.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 31,
+ "Name": "divmodw",
+ "Args": "UUUU",
+ "Returns": "UUUU",
+ "Size": 1,
+ "Doc": "W,X = (A,B / C,D); Y,Z = (A,B modulo C,D)",
+ "DocExtra": "The notation J,K indicates that two uint64 values J and K are interpreted as a uint128 value, with J as the high uint64 and K the low.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 32,
+ "Name": "intcblock",
+ "Size": 0,
+ "Doc": "prepare block of uint64 constants for use by intc",
+ "DocExtra": "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.",
+ "ImmediateNote": "{varuint length} [{varuint value}, ...]",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 33,
+ "Name": "intc",
+ "Returns": "U",
+ "Size": 2,
+ "Doc": "Ith constant from intcblock",
+ "ImmediateNote": "{uint8 int constant index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 34,
+ "Name": "intc_0",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "constant 0 from intcblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 35,
+ "Name": "intc_1",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "constant 1 from intcblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 36,
+ "Name": "intc_2",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "constant 2 from intcblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 37,
+ "Name": "intc_3",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "constant 3 from intcblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 38,
+ "Name": "bytecblock",
+ "Size": 0,
+ "Doc": "prepare block of byte-array constants for use by bytec",
+ "DocExtra": "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.",
+ "ImmediateNote": "{varuint length} [({varuint value length} bytes), ...]",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 39,
+ "Name": "bytec",
+ "Returns": "B",
+ "Size": 2,
+ "Doc": "Ith constant from bytecblock",
+ "ImmediateNote": "{uint8 byte constant index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 40,
+ "Name": "bytec_0",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "constant 0 from bytecblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 41,
+ "Name": "bytec_1",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "constant 1 from bytecblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 42,
+ "Name": "bytec_2",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "constant 2 from bytecblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 43,
+ "Name": "bytec_3",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "constant 3 from bytecblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 44,
+ "Name": "arg",
+ "Returns": "B",
+ "Size": 2,
+ "Doc": "Nth LogicSig argument",
+ "ImmediateNote": "{uint8 arg index N}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 45,
+ "Name": "arg_0",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "LogicSig argument 0",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 46,
+ "Name": "arg_1",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "LogicSig argument 1",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 47,
+ "Name": "arg_2",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "LogicSig argument 2",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 48,
+ "Name": "arg_3",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "LogicSig argument 3",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 49,
+ "Name": "txn",
+ "Returns": ".",
+ "Size": 2,
+ "ArgEnum": [
+ "Sender",
+ "Fee",
+ "FirstValid",
+ "FirstValidTime",
+ "LastValid",
+ "Note",
+ "Lease",
+ "Receiver",
+ "Amount",
+ "CloseRemainderTo",
+ "VotePK",
+ "SelectionPK",
+ "VoteFirst",
+ "VoteLast",
+ "VoteKeyDilution",
+ "Type",
+ "TypeEnum",
+ "XferAsset",
+ "AssetAmount",
+ "AssetSender",
+ "AssetReceiver",
+ "AssetCloseTo",
+ "GroupIndex",
+ "TxID",
+ "ApplicationID",
+ "OnCompletion",
+ "ApplicationArgs",
+ "NumAppArgs",
+ "Accounts",
+ "NumAccounts",
+ "ApprovalProgram",
+ "ClearStateProgram",
+ "RekeyTo",
+ "ConfigAsset",
+ "ConfigAssetTotal",
+ "ConfigAssetDecimals",
+ "ConfigAssetDefaultFrozen",
+ "ConfigAssetUnitName",
+ "ConfigAssetName",
+ "ConfigAssetURL",
+ "ConfigAssetMetadataHash",
+ "ConfigAssetManager",
+ "ConfigAssetReserve",
+ "ConfigAssetFreeze",
+ "ConfigAssetClawback",
+ "FreezeAsset",
+ "FreezeAssetAccount",
+ "FreezeAssetFrozen",
+ "Assets",
+ "NumAssets",
+ "Applications",
+ "NumApplications",
+ "GlobalNumUint",
+ "GlobalNumByteSlice",
+ "LocalNumUint",
+ "LocalNumByteSlice",
+ "ExtraProgramPages",
+ "Nonparticipation",
+ "Logs",
+ "NumLogs",
+ "CreatedAssetID",
+ "CreatedApplicationID",
+ "LastLog",
+ "StateProofPK"
+ ],
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "Doc": "field F of current transaction",
+ "DocExtra": "FirstValidTime causes the program to fail. The field is reserved for future use.",
+ "ImmediateNote": "{uint8 transaction field index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 50,
+ "Name": "global",
+ "Returns": ".",
+ "Size": 2,
+ "ArgEnum": [
+ "MinTxnFee",
+ "MinBalance",
+ "MaxTxnLife",
+ "ZeroAddress",
+ "GroupSize",
+ "LogicSigVersion",
+ "Round",
+ "LatestTimestamp",
+ "CurrentApplicationID",
+ "CreatorAddress",
+ "CurrentApplicationAddress",
+ "GroupID",
+ "OpcodeBudget",
+ "CallerApplicationID",
+ "CallerApplicationAddress"
+ ],
+ "ArgEnumTypes": "UUUBUUUUUBBBUUB",
+ "Doc": "global field F",
+ "ImmediateNote": "{uint8 global field index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 51,
+ "Name": "gtxn",
+ "Returns": ".",
+ "Size": 3,
+ "ArgEnum": [
+ "Sender",
+ "Fee",
+ "FirstValid",
+ "FirstValidTime",
+ "LastValid",
+ "Note",
+ "Lease",
+ "Receiver",
+ "Amount",
+ "CloseRemainderTo",
+ "VotePK",
+ "SelectionPK",
+ "VoteFirst",
+ "VoteLast",
+ "VoteKeyDilution",
+ "Type",
+ "TypeEnum",
+ "XferAsset",
+ "AssetAmount",
+ "AssetSender",
+ "AssetReceiver",
+ "AssetCloseTo",
+ "GroupIndex",
+ "TxID",
+ "ApplicationID",
+ "OnCompletion",
+ "ApplicationArgs",
+ "NumAppArgs",
+ "Accounts",
+ "NumAccounts",
+ "ApprovalProgram",
+ "ClearStateProgram",
+ "RekeyTo",
+ "ConfigAsset",
+ "ConfigAssetTotal",
+ "ConfigAssetDecimals",
+ "ConfigAssetDefaultFrozen",
+ "ConfigAssetUnitName",
+ "ConfigAssetName",
+ "ConfigAssetURL",
+ "ConfigAssetMetadataHash",
+ "ConfigAssetManager",
+ "ConfigAssetReserve",
+ "ConfigAssetFreeze",
+ "ConfigAssetClawback",
+ "FreezeAsset",
+ "FreezeAssetAccount",
+ "FreezeAssetFrozen",
+ "Assets",
+ "NumAssets",
+ "Applications",
+ "NumApplications",
+ "GlobalNumUint",
+ "GlobalNumByteSlice",
+ "LocalNumUint",
+ "LocalNumByteSlice",
+ "ExtraProgramPages",
+ "Nonparticipation",
+ "Logs",
+ "NumLogs",
+ "CreatedAssetID",
+ "CreatedApplicationID",
+ "LastLog",
+ "StateProofPK"
+ ],
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "Doc": "field F of the Tth transaction in the current group",
+ "DocExtra": "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`.",
+ "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 52,
+ "Name": "load",
+ "Returns": ".",
+ "Size": 2,
+ "Doc": "Ith scratch space value. All scratch spaces are 0 at program start.",
+ "ImmediateNote": "{uint8 position in scratch space to load from}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 53,
+ "Name": "store",
+ "Args": ".",
+ "Size": 2,
+ "Doc": "store A to the Ith scratch space",
+ "ImmediateNote": "{uint8 position in scratch space to store to}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 54,
+ "Name": "txna",
+ "Returns": ".",
+ "Size": 3,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Ith value of the array field F of the current transaction",
+ "ImmediateNote": "{uint8 transaction field index} {uint8 transaction field array index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 55,
+ "Name": "gtxna",
+ "Returns": ".",
+ "Size": 4,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Ith value of the array field F from the Tth transaction in the current group",
+ "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 56,
+ "Name": "gtxns",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 2,
+ "ArgEnum": [
+ "Sender",
+ "Fee",
+ "FirstValid",
+ "FirstValidTime",
+ "LastValid",
+ "Note",
+ "Lease",
+ "Receiver",
+ "Amount",
+ "CloseRemainderTo",
+ "VotePK",
+ "SelectionPK",
+ "VoteFirst",
+ "VoteLast",
+ "VoteKeyDilution",
+ "Type",
+ "TypeEnum",
+ "XferAsset",
+ "AssetAmount",
+ "AssetSender",
+ "AssetReceiver",
+ "AssetCloseTo",
+ "GroupIndex",
+ "TxID",
+ "ApplicationID",
+ "OnCompletion",
+ "ApplicationArgs",
+ "NumAppArgs",
+ "Accounts",
+ "NumAccounts",
+ "ApprovalProgram",
+ "ClearStateProgram",
+ "RekeyTo",
+ "ConfigAsset",
+ "ConfigAssetTotal",
+ "ConfigAssetDecimals",
+ "ConfigAssetDefaultFrozen",
+ "ConfigAssetUnitName",
+ "ConfigAssetName",
+ "ConfigAssetURL",
+ "ConfigAssetMetadataHash",
+ "ConfigAssetManager",
+ "ConfigAssetReserve",
+ "ConfigAssetFreeze",
+ "ConfigAssetClawback",
+ "FreezeAsset",
+ "FreezeAssetAccount",
+ "FreezeAssetFrozen",
+ "Assets",
+ "NumAssets",
+ "Applications",
+ "NumApplications",
+ "GlobalNumUint",
+ "GlobalNumByteSlice",
+ "LocalNumUint",
+ "LocalNumByteSlice",
+ "ExtraProgramPages",
+ "Nonparticipation",
+ "Logs",
+ "NumLogs",
+ "CreatedAssetID",
+ "CreatedApplicationID",
+ "LastLog",
+ "StateProofPK"
+ ],
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "Doc": "field F of the Ath transaction in the current group",
+ "DocExtra": "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction.",
+ "ImmediateNote": "{uint8 transaction field index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 57,
+ "Name": "gtxnsa",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 3,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Ith value of the array field F from the Ath transaction in the current group",
+ "ImmediateNote": "{uint8 transaction field index} {uint8 transaction field array index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 58,
+ "Name": "gload",
+ "Returns": ".",
+ "Size": 3,
+ "Doc": "Ith scratch space value of the Tth transaction in the current group",
+ "DocExtra": "`gload` fails unless the requested transaction is an ApplicationCall and T \u003c GroupIndex.",
+ "ImmediateNote": "{uint8 transaction group index} {uint8 position in scratch space to load from}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 59,
+ "Name": "gloads",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 2,
+ "Doc": "Ith scratch space value of the Ath transaction in the current group",
+ "DocExtra": "`gloads` fails unless the requested transaction is an ApplicationCall and A \u003c GroupIndex.",
+ "ImmediateNote": "{uint8 position in scratch space to load from}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 60,
+ "Name": "gaid",
+ "Returns": "U",
+ "Size": 2,
+ "Doc": "ID of the asset or application created in the Tth transaction of the current group",
+ "DocExtra": "`gaid` fails unless the requested transaction created an asset or application and T \u003c GroupIndex.",
+ "ImmediateNote": "{uint8 transaction group index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 61,
+ "Name": "gaids",
+ "Args": "U",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "ID of the asset or application created in the Ath transaction of the current group",
+ "DocExtra": "`gaids` fails unless the requested transaction created an asset or application and A \u003c GroupIndex.",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 62,
+ "Name": "loads",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 1,
+ "Doc": "Ath scratch space value. All scratch spaces are 0 at program start.",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 63,
+ "Name": "stores",
+ "Args": "U.",
+ "Size": 1,
+ "Doc": "store B to the Ath scratch space",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 64,
+ "Name": "bnz",
+ "Args": "U",
+ "Size": 3,
+ "Doc": "branch to TARGET if value A is not zero",
+ "DocExtra": "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Starting at v4, the offset is treated as a signed 16 bit integer allowing for backward branches and looping. In prior version (v1 to v3), branch offsets are limited to forward branches only, 0-0x7fff.\n\nAt v2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before v2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)",
+ "ImmediateNote": "{int16 branch offset, big-endian}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 65,
+ "Name": "bz",
+ "Args": "U",
+ "Size": 3,
+ "Doc": "branch to TARGET if value A is zero",
+ "DocExtra": "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.",
+ "ImmediateNote": "{int16 branch offset, big-endian}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 66,
+ "Name": "b",
+ "Size": 3,
+ "Doc": "branch unconditionally to TARGET",
+ "DocExtra": "See `bnz` for details on how branches work. `b` always jumps to the offset.",
+ "ImmediateNote": "{int16 branch offset, big-endian}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 67,
+ "Name": "return",
+ "Args": "U",
+ "Size": 1,
+ "Doc": "use A as success value; end",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 68,
+ "Name": "assert",
+ "Args": "U",
+ "Size": 1,
+ "Doc": "immediately fail unless A is a non-zero number",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 72,
+ "Name": "pop",
+ "Args": ".",
+ "Size": 1,
+ "Doc": "discard A",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 73,
+ "Name": "dup",
+ "Args": ".",
+ "Returns": "..",
+ "Size": 1,
+ "Doc": "duplicate A",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 74,
+ "Name": "dup2",
+ "Args": "..",
+ "Returns": "....",
+ "Size": 1,
+ "Doc": "duplicate A and B",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 75,
+ "Name": "dig",
+ "Args": ".",
+ "Returns": "..",
+ "Size": 2,
+ "Doc": "Nth value from the top of the stack. dig 0 is equivalent to dup",
+ "ImmediateNote": "{uint8 depth}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 76,
+ "Name": "swap",
+ "Args": "..",
+ "Returns": "..",
+ "Size": 1,
+ "Doc": "swaps A and B on stack",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 77,
+ "Name": "select",
+ "Args": "..U",
+ "Returns": ".",
+ "Size": 1,
+ "Doc": "selects one of two values based on top-of-stack: B if C != 0, else A",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 78,
+ "Name": "cover",
+ "Args": ".",
+ "Returns": ".",
+ "Size": 2,
+ "Doc": "remove top of stack, and place it deeper in the stack such that N elements are above it. Fails if stack depth \u003c= N.",
+ "ImmediateNote": "{uint8 depth}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 79,
+ "Name": "uncover",
+ "Args": ".",
+ "Returns": ".",
+ "Size": 2,
+ "Doc": "remove the value at depth N in the stack and shift above items down so the Nth deep value is on top of the stack. Fails if stack depth \u003c= N.",
+ "ImmediateNote": "{uint8 depth}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 80,
+ "Name": "concat",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "join A and B",
+ "DocExtra": "`concat` fails if the result would be greater than 4096 bytes.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 81,
+ "Name": "substring",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 3,
+ "Doc": "A range of bytes from A starting at S up to but not including E. If E \u003c S, or either is larger than the array length, the program fails",
+ "ImmediateNote": "{uint8 start position} {uint8 end position}",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 82,
+ "Name": "substring3",
+ "Args": "BUU",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A range of bytes from A starting at B up to but not including C. If C \u003c B, or either is larger than the array length, the program fails",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 83,
+ "Name": "getbit",
+ "Args": ".U",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
+ "DocExtra": "see explanation of bit ordering in setbit",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 84,
+ "Name": "setbit",
+ "Args": ".UU",
+ "Returns": ".",
+ "Size": 1,
+ "Doc": "Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
+ "DocExtra": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 85,
+ "Name": "getbyte",
+ "Args": "BU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 86,
+ "Name": "setbyte",
+ "Args": "BUU",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 87,
+ "Name": "extract",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 3,
+ "Doc": "A range of bytes from A starting at S up to but not including S+L. If L is 0, then extract to the end of the string. If S or S+L is larger than the array length, the program fails",
+ "ImmediateNote": "{uint8 start position} {uint8 length}",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 88,
+ "Name": "extract3",
+ "Args": "BUU",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 89,
+ "Name": "extract_uint16",
+ "Args": "BU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A uint16 formed from a range of big-endian bytes from A starting at B up to but not including B+2. If B+2 is larger than the array length, the program fails",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 90,
+ "Name": "extract_uint32",
+ "Args": "BU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A uint32 formed from a range of big-endian bytes from A starting at B up to but not including B+4. If B+4 is larger than the array length, the program fails",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 91,
+ "Name": "extract_uint64",
+ "Args": "BU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A uint64 formed from a range of big-endian bytes from A starting at B up to but not including B+8. If B+8 is larger than the array length, the program fails",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 92,
+ "Name": "base64_decode",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 2,
+ "Doc": "decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E",
+ "DocExtra": "Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See \u003ca href=\"https://rfc-editor.org/rfc/rfc4648.html#section-4\"\u003eRFC 4648\u003c/a\u003e (sections 4 and 5). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\\n` and `\\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\\r`, or `\\n`.",
+ "ImmediateNote": "{uint8 encoding index}",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 93,
+ "Name": "json_ref",
+ "Args": "BB",
+ "Returns": ".",
+ "Size": 2,
+ "Doc": "return key B's value from a [valid](jsonspec.md) utf-8 encoded json object A",
+ "DocExtra": "specify the return type with an immediate arg either as JSONUint64 or JSONString or JSONObject.",
+ "ImmediateNote": "{string return type}",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 96,
+ "Name": "balance",
+ "Args": ".",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "get balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted.",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 97,
+ "Name": "app_opted_in",
+ "Args": ".U",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "1 if account A is opted in to application B, else 0",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: 1 if opted in and 0 otherwise.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 98,
+ "Name": "app_local_get",
+ "Args": ".B",
+ "Returns": ".",
+ "Size": 1,
+ "Doc": "local state of the key B in the current application in account A",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key. Return: value. The value is zero (of type uint64) if the key does not exist.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 99,
+ "Name": "app_local_get_ex",
+ "Args": ".UB",
+ "Returns": ".U",
+ "Size": 1,
+ "Doc": "X is the local state of application B, key C in account A. Y is 1 if key existed, else 0",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 100,
+ "Name": "app_global_get",
+ "Args": "B",
+ "Returns": ".",
+ "Size": 1,
+ "Doc": "global state of the key A in the current application",
+ "DocExtra": "params: state key. Return: value. The value is zero (of type uint64) if the key does not exist.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 101,
+ "Name": "app_global_get_ex",
+ "Args": "UB",
+ "Returns": ".U",
+ "Size": 1,
+ "Doc": "X is the global state of application A, key B. Y is 1 if key existed, else 0",
+ "DocExtra": "params: Txn.ForeignApps offset (or, since v4, an _available_ application id), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 102,
+ "Name": "app_local_put",
+ "Args": ".B.",
+ "Size": 1,
+ "Doc": "write C to key B in account A's local state of the current application",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key, value.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 103,
+ "Name": "app_global_put",
+ "Args": "B.",
+ "Size": 1,
+ "Doc": "write B to key A in the global state of the current application",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 104,
+ "Name": "app_local_del",
+ "Args": ".B",
+ "Size": 1,
+ "Doc": "delete key B from account A's local state of the current application",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key.\n\nDeleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.)",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 105,
+ "Name": "app_global_del",
+ "Args": "B",
+ "Size": 1,
+ "Doc": "delete key A from the global state of the current application",
+ "DocExtra": "params: state key.\n\nDeleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.)",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 112,
+ "Name": "asset_holding_get",
+ "Args": ".U",
+ "Returns": ".U",
+ "Size": 2,
+ "ArgEnum": [
+ "AssetBalance",
+ "AssetFrozen"
+ ],
+ "ArgEnumTypes": "UU",
+ "Doc": "X is field F from account A's holding of asset B. Y is 1 if A is opted into B, else 0",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ address), asset id (or, since v4, a Txn.ForeignAssets offset). Return: did_exist flag (1 if the asset existed and 0 otherwise), value.",
+ "ImmediateNote": "{uint8 asset holding field index}",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 113,
+ "Name": "asset_params_get",
+ "Args": "U",
+ "Returns": ".U",
+ "Size": 2,
+ "ArgEnum": [
+ "AssetTotal",
+ "AssetDecimals",
+ "AssetDefaultFrozen",
+ "AssetUnitName",
+ "AssetName",
+ "AssetURL",
+ "AssetMetadataHash",
+ "AssetManager",
+ "AssetReserve",
+ "AssetFreeze",
+ "AssetClawback",
+ "AssetCreator"
+ ],
+ "ArgEnumTypes": "UUUBBBBBBBBB",
+ "Doc": "X is field F from asset A. Y is 1 if A exists, else 0",
+ "DocExtra": "params: Txn.ForeignAssets offset (or, since v4, an _available_ asset id. Return: did_exist flag (1 if the asset existed and 0 otherwise), value.",
+ "ImmediateNote": "{uint8 asset params field index}",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 114,
+ "Name": "app_params_get",
+ "Args": "U",
+ "Returns": ".U",
+ "Size": 2,
+ "ArgEnum": [
+ "AppApprovalProgram",
+ "AppClearStateProgram",
+ "AppGlobalNumUint",
+ "AppGlobalNumByteSlice",
+ "AppLocalNumUint",
+ "AppLocalNumByteSlice",
+ "AppExtraProgramPages",
+ "AppCreator",
+ "AppAddress"
+ ],
+ "ArgEnumTypes": "BBUUUUUBB",
+ "Doc": "X is field F from app A. Y is 1 if A exists, else 0",
+ "DocExtra": "params: Txn.ForeignApps offset or an _available_ app id. Return: did_exist flag (1 if the application existed and 0 otherwise), value.",
+ "ImmediateNote": "{uint8 app params field index}",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 115,
+ "Name": "acct_params_get",
+ "Args": ".",
+ "Returns": ".U",
+ "Size": 2,
+ "ArgEnum": [
+ "AcctBalance",
+ "AcctMinBalance",
+ "AcctAuthAddr"
+ ],
+ "ArgEnumTypes": "UUB",
+ "Doc": "X is field F from account A. Y is 1 if A owns positive algos, else 0",
+ "ImmediateNote": "{uint8 account params field index}",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 120,
+ "Name": "min_balance",
+ "Args": ".",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "get minimum required balance for account A, in microalgos. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes.",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 128,
+ "Name": "pushbytes",
+ "Returns": "B",
+ "Size": 0,
+ "Doc": "immediate BYTES",
+ "DocExtra": "pushbytes args are not added to the bytecblock during assembly processes",
+ "ImmediateNote": "{varuint length} {bytes}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 129,
+ "Name": "pushint",
+ "Returns": "U",
+ "Size": 0,
+ "Doc": "immediate UINT",
+ "DocExtra": "pushint args are not added to the intcblock during assembly processes",
+ "ImmediateNote": "{varuint int}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 132,
+ "Name": "ed25519verify_bare",
+ "Args": "BBB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "for (data A, signature B, pubkey C) verify the signature of the data against the pubkey =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 136,
+ "Name": "callsub",
+ "Size": 3,
+ "Doc": "branch unconditionally to TARGET, saving the next instruction on the call stack",
+ "DocExtra": "The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.",
+ "ImmediateNote": "{int16 branch offset, big-endian}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 137,
+ "Name": "retsub",
+ "Size": 1,
+ "Doc": "pop the top instruction from the call stack and branch to it",
+ "DocExtra": "The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 144,
+ "Name": "shl",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A times 2^B, modulo 2^64",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 145,
+ "Name": "shr",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A divided by 2^B",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 146,
+ "Name": "sqrt",
+ "Args": "U",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "The largest integer I such that I^2 \u003c= A",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 147,
+ "Name": "bitlen",
+ "Args": ".",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "The highest set bit in A. If A is a byte-array, it is interpreted as a big-endian unsigned integer. bitlen of 0 is 0, bitlen of 8 is 4",
+ "DocExtra": "bitlen interprets arrays as big-endian integers, unlike setbit/getbit",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 148,
+ "Name": "exp",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A raised to the Bth power. Fail if A == B == 0 and on overflow",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 149,
+ "Name": "expw",
+ "Args": "UU",
+ "Returns": "UU",
+ "Size": 1,
+ "Doc": "A raised to the Bth power as a 128-bit result in two uint64s. X is the high 64 bits, Y is the low. Fail if A == B == 0 or if the results exceeds 2^128-1",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 150,
+ "Name": "bsqrt",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "The largest integer I such that I^2 \u003c= A. A and I are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 151,
+ "Name": "divw",
+ "Args": "UUU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A,B / C. Fail if C == 0 or if result overflows.",
+ "DocExtra": "The notation A,B indicates that A and B are interpreted as a uint128 value, with A as the high uint64 and B the low.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 152,
+ "Name": "sha3_256",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "SHA3_256 hash of value A, yields [32]byte",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 160,
+ "Name": "b+",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A plus B. A and B are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 161,
+ "Name": "b-",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A minus B. A and B are interpreted as big-endian unsigned integers. Fail on underflow.",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 162,
+ "Name": "b/",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A divided by B (truncated division). A and B are interpreted as big-endian unsigned integers. Fail if B is zero.",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 163,
+ "Name": "b*",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A times B. A and B are interpreted as big-endian unsigned integers.",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 164,
+ "Name": "b\u003c",
+ "Args": "BB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "1 if A is less than B, else 0. A and B are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 165,
+ "Name": "b\u003e",
+ "Args": "BB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "1 if A is greater than B, else 0. A and B are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 166,
+ "Name": "b\u003c=",
+ "Args": "BB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "1 if A is less than or equal to B, else 0. A and B are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 167,
+ "Name": "b\u003e=",
+ "Args": "BB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "1 if A is greater than or equal to B, else 0. A and B are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 168,
+ "Name": "b==",
+ "Args": "BB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "1 if A is equal to B, else 0. A and B are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 169,
+ "Name": "b!=",
+ "Args": "BB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "0 if A is equal to B, else 1. A and B are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 170,
+ "Name": "b%",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A modulo B. A and B are interpreted as big-endian unsigned integers. Fail if B is zero.",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 171,
+ "Name": "b|",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A bitwise-or B. A and B are zero-left extended to the greater of their lengths",
+ "Groups": [
+ "Byte Array Logic"
+ ]
+ },
+ {
+ "Opcode": 172,
+ "Name": "b\u0026",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A bitwise-and B. A and B are zero-left extended to the greater of their lengths",
+ "Groups": [
+ "Byte Array Logic"
+ ]
+ },
+ {
+ "Opcode": 173,
+ "Name": "b^",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A bitwise-xor B. A and B are zero-left extended to the greater of their lengths",
+ "Groups": [
+ "Byte Array Logic"
+ ]
+ },
+ {
+ "Opcode": 174,
+ "Name": "b~",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A with all bits inverted",
+ "Groups": [
+ "Byte Array Logic"
+ ]
+ },
+ {
+ "Opcode": 175,
+ "Name": "bzero",
+ "Args": "U",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "zero filled byte-array of length A",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 176,
+ "Name": "log",
+ "Args": "B",
+ "Size": 1,
+ "Doc": "write A to log state of the current application",
+ "DocExtra": "`log` fails if called more than MaxLogCalls times in a program, or if the sum of logged bytes exceeds 1024 bytes.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 177,
+ "Name": "itxn_begin",
+ "Size": 1,
+ "Doc": "begin preparation of a new inner transaction in a new transaction group",
+ "DocExtra": "`itxn_begin` initializes Sender to the application address; Fee to the minimum allowable, taking into account MinTxnFee and credit from overpaying in earlier transactions; FirstValid/LastValid to the values in the invoking transaction, and all other fields to zero or empty values.",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 178,
+ "Name": "itxn_field",
+ "Args": ".",
+ "Size": 2,
+ "ArgEnum": [
+ "Sender",
+ "Fee",
+ "Note",
+ "Receiver",
+ "Amount",
+ "CloseRemainderTo",
+ "VotePK",
+ "SelectionPK",
+ "VoteFirst",
+ "VoteLast",
+ "VoteKeyDilution",
+ "Type",
+ "TypeEnum",
+ "XferAsset",
+ "AssetAmount",
+ "AssetSender",
+ "AssetReceiver",
+ "AssetCloseTo",
+ "ApplicationID",
+ "OnCompletion",
+ "ApplicationArgs",
+ "Accounts",
+ "ApprovalProgram",
+ "ClearStateProgram",
+ "RekeyTo",
+ "ConfigAsset",
+ "ConfigAssetTotal",
+ "ConfigAssetDecimals",
+ "ConfigAssetDefaultFrozen",
+ "ConfigAssetUnitName",
+ "ConfigAssetName",
+ "ConfigAssetURL",
+ "ConfigAssetMetadataHash",
+ "ConfigAssetManager",
+ "ConfigAssetReserve",
+ "ConfigAssetFreeze",
+ "ConfigAssetClawback",
+ "FreezeAsset",
+ "FreezeAssetAccount",
+ "FreezeAssetFrozen",
+ "Assets",
+ "Applications",
+ "GlobalNumUint",
+ "GlobalNumByteSlice",
+ "LocalNumUint",
+ "LocalNumByteSlice",
+ "ExtraProgramPages",
+ "Nonparticipation",
+ "StateProofPK"
+ ],
+ "ArgEnumTypes": "BUBBUBBBUUUBUUUBBBUUBBBBBUUUUBBBBBBBBUBUUUUUUUUUB",
+ "Doc": "set field F of the current inner transaction to A",
+ "DocExtra": "`itxn_field` fails if A is of the wrong type for F, including a byte array of the wrong size for use as an address when F is an address field. `itxn_field` also fails if A is an account, asset, or app that is not _available_, or an attempt is made extend an array field beyond the limit imposed by consensus parameters. (Addresses set into asset params of acfg transactions need not be _available_.)",
+ "ImmediateNote": "{uint8 transaction field index}",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 179,
+ "Name": "itxn_submit",
+ "Size": 1,
+ "Doc": "execute the current inner transaction group. Fail if executing this group would exceed the inner transaction limit, or if any transaction in the group fails.",
+ "DocExtra": "`itxn_submit` resets the current transaction so that it can not be resubmitted. A new `itxn_begin` is required to prepare another inner transaction.",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 180,
+ "Name": "itxn",
+ "Returns": ".",
+ "Size": 2,
+ "ArgEnum": [
+ "Sender",
+ "Fee",
+ "FirstValid",
+ "FirstValidTime",
+ "LastValid",
+ "Note",
+ "Lease",
+ "Receiver",
+ "Amount",
+ "CloseRemainderTo",
+ "VotePK",
+ "SelectionPK",
+ "VoteFirst",
+ "VoteLast",
+ "VoteKeyDilution",
+ "Type",
+ "TypeEnum",
+ "XferAsset",
+ "AssetAmount",
+ "AssetSender",
+ "AssetReceiver",
+ "AssetCloseTo",
+ "GroupIndex",
+ "TxID",
+ "ApplicationID",
+ "OnCompletion",
+ "ApplicationArgs",
+ "NumAppArgs",
+ "Accounts",
+ "NumAccounts",
+ "ApprovalProgram",
+ "ClearStateProgram",
+ "RekeyTo",
+ "ConfigAsset",
+ "ConfigAssetTotal",
+ "ConfigAssetDecimals",
+ "ConfigAssetDefaultFrozen",
+ "ConfigAssetUnitName",
+ "ConfigAssetName",
+ "ConfigAssetURL",
+ "ConfigAssetMetadataHash",
+ "ConfigAssetManager",
+ "ConfigAssetReserve",
+ "ConfigAssetFreeze",
+ "ConfigAssetClawback",
+ "FreezeAsset",
+ "FreezeAssetAccount",
+ "FreezeAssetFrozen",
+ "Assets",
+ "NumAssets",
+ "Applications",
+ "NumApplications",
+ "GlobalNumUint",
+ "GlobalNumByteSlice",
+ "LocalNumUint",
+ "LocalNumByteSlice",
+ "ExtraProgramPages",
+ "Nonparticipation",
+ "Logs",
+ "NumLogs",
+ "CreatedAssetID",
+ "CreatedApplicationID",
+ "LastLog",
+ "StateProofPK"
+ ],
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "Doc": "field F of the last inner transaction",
+ "ImmediateNote": "{uint8 transaction field index}",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 181,
+ "Name": "itxna",
+ "Returns": ".",
+ "Size": 3,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Ith value of the array field F of the last inner transaction",
+ "ImmediateNote": "{uint8 transaction field index} {uint8 transaction field array index}",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 182,
+ "Name": "itxn_next",
+ "Size": 1,
+ "Doc": "begin preparation of a new inner transaction in the same transaction group",
+ "DocExtra": "`itxn_next` initializes the transaction exactly as `itxn_begin` does",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 183,
+ "Name": "gitxn",
+ "Returns": ".",
+ "Size": 3,
+ "ArgEnum": [
+ "Sender",
+ "Fee",
+ "FirstValid",
+ "FirstValidTime",
+ "LastValid",
+ "Note",
+ "Lease",
+ "Receiver",
+ "Amount",
+ "CloseRemainderTo",
+ "VotePK",
+ "SelectionPK",
+ "VoteFirst",
+ "VoteLast",
+ "VoteKeyDilution",
+ "Type",
+ "TypeEnum",
+ "XferAsset",
+ "AssetAmount",
+ "AssetSender",
+ "AssetReceiver",
+ "AssetCloseTo",
+ "GroupIndex",
+ "TxID",
+ "ApplicationID",
+ "OnCompletion",
+ "ApplicationArgs",
+ "NumAppArgs",
+ "Accounts",
+ "NumAccounts",
+ "ApprovalProgram",
+ "ClearStateProgram",
+ "RekeyTo",
+ "ConfigAsset",
+ "ConfigAssetTotal",
+ "ConfigAssetDecimals",
+ "ConfigAssetDefaultFrozen",
+ "ConfigAssetUnitName",
+ "ConfigAssetName",
+ "ConfigAssetURL",
+ "ConfigAssetMetadataHash",
+ "ConfigAssetManager",
+ "ConfigAssetReserve",
+ "ConfigAssetFreeze",
+ "ConfigAssetClawback",
+ "FreezeAsset",
+ "FreezeAssetAccount",
+ "FreezeAssetFrozen",
+ "Assets",
+ "NumAssets",
+ "Applications",
+ "NumApplications",
+ "GlobalNumUint",
+ "GlobalNumByteSlice",
+ "LocalNumUint",
+ "LocalNumByteSlice",
+ "ExtraProgramPages",
+ "Nonparticipation",
+ "Logs",
+ "NumLogs",
+ "CreatedAssetID",
+ "CreatedApplicationID",
+ "LastLog",
+ "StateProofPK"
+ ],
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "Doc": "field F of the Tth transaction in the last inner group submitted",
+ "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 184,
+ "Name": "gitxna",
+ "Returns": ".",
+ "Size": 4,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Ith value of the array field F from the Tth transaction in the last inner group submitted",
+ "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 192,
+ "Name": "txnas",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 2,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Ath value of the array field F of the current transaction",
+ "ImmediateNote": "{uint8 transaction field index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 193,
+ "Name": "gtxnas",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 3,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Ath value of the array field F from the Tth transaction in the current group",
+ "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 194,
+ "Name": "gtxnsas",
+ "Args": "UU",
+ "Returns": ".",
+ "Size": 2,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Bth value of the array field F from the Ath transaction in the current group",
+ "ImmediateNote": "{uint8 transaction field index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 195,
+ "Name": "args",
+ "Args": "U",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "Ath LogicSig argument",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 196,
+ "Name": "gloadss",
+ "Args": "UU",
+ "Returns": ".",
+ "Size": 1,
+ "Doc": "Bth scratch space value of the Ath transaction in the current group",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 197,
+ "Name": "itxnas",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 2,
+ "Doc": "Ath value of the array field F of the last inner transaction",
+ "ImmediateNote": "{uint8 transaction field index}",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 198,
+ "Name": "gitxnas",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 3,
+ "Doc": "Ath value of the array field F from the Tth transaction in the last inner group submitted",
+ "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ }
+ ]
+}
diff --git a/data/transactions/logic/ledger_test.go b/data/transactions/logic/ledger_test.go
index 760590309..5df6102e1 100644
--- a/data/transactions/logic/ledger_test.go
+++ b/data/transactions/logic/ledger_test.go
@@ -679,7 +679,7 @@ func (l *Ledger) appl(from basics.Address, appl transactions.ApplicationCallTxnF
if !pass {
return errors.New("Approval program failed")
}
- ad.EvalDelta = cx.Txn.EvalDelta
+ ad.EvalDelta = cx.txn.EvalDelta
switch appl.OnCompletion {
case transactions.NoOpOC:
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index a50c199a9..a486a8d3d 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -17,9 +17,10 @@
package logic
import (
+ "fmt"
"sort"
-
- "github.com/algorand/go-algorand/data/transactions"
+ "strconv"
+ "strings"
)
// LogicVersion defines default assembler and max eval versions
@@ -44,10 +45,8 @@ const backBranchEnabledVersion = 4
// using an index into arrays.
const directRefEnabledVersion = 4
-// innerAppsEnabledVersion is the version that allowed inner app calls. No old
-// apps should be called as inner apps. Set to ExtraProgramChecks version
-// because those checks protect from tricky ClearState Programs.
-const innerAppsEnabledVersion = transactions.ExtraProgramChecksVersion
+// innerAppsEnabledVersion is the version that allowed inner app calls.
+const innerAppsEnabledVersion = 6
// txnEffectsVersion is first version that allowed txn opcode to access
// "effects" (ApplyData info)
@@ -61,47 +60,210 @@ const createdResourcesVersion = 6
// experimental-
const fidoVersion = 7 // base64, json, secp256r1
-// opDetails records details such as non-standard costs, immediate
-// arguments, or dynamic layout controlled by a check function.
-type opDetails struct {
- Cost int
- Size int
- checkFunc opCheckFunc
- Immediates []immediate
- typeFunc opTypeFunc
+type linearCost struct {
+ baseCost int
+ chunkCost int
+ chunkSize int
+}
+
+// divideCeilUnsafely provides `math.Ceil` semantics using integer division. The technique avoids slower floating point operations as suggested in https://stackoverflow.com/a/2745086.
+// The method does _not_ check for divide-by-zero.
+func divideCeilUnsafely(numerator int, denominator int) int {
+ return (numerator + denominator - 1) / denominator
+}
+
+func (lc *linearCost) compute(stack []stackValue) int {
+ cost := lc.baseCost
+ if lc.chunkCost != 0 && lc.chunkSize != 0 {
+ // Uses divideCeilUnsafely rather than (len/size) to match how Ethereum discretizes hashing costs.
+ cost += divideCeilUnsafely(lc.chunkCost*len(stack[len(stack)-1].Bytes), lc.chunkSize)
+ }
+ return cost
+}
+
+func (lc *linearCost) docCost() string {
+ if *lc == (linearCost{}) {
+ return ""
+ }
+ if lc.chunkCost == 0 {
+ return strconv.Itoa(lc.baseCost)
+ }
+ if lc.chunkSize == 1 {
+ return fmt.Sprintf("%d + %d per byte", lc.baseCost, lc.chunkCost)
+ }
+ return fmt.Sprintf("%d + %d per %d bytes", lc.baseCost, lc.chunkCost, lc.chunkSize)
+}
+
+// OpDetails records details such as non-standard costs, immediate arguments, or
+// dynamic layout controlled by a check function. These objects are mostly built
+// with constructor functions, so it's cleaner to have defaults set here, rather
+// than in line after line of OpSpecs.
+type OpDetails struct {
+ asm asmFunc // assemble the op
+ check checkFunc // static check bytecode (and determine size)
+ refine refineFunc // refine arg/return types based on ProgramKnowledge at assembly time
+
+ Modes runMode // all modes that opcode can run in. i.e (cx.mode & Modes) != 0 allows
+
+ FullCost linearCost // if non-zero, the cost of the opcode, no immediates matter
+ Size int // if non-zero, the known size of opcode. if 0, check() determines.
+ Immediates []immediate // details of each immediate arg to opcode
+}
+
+func (d *OpDetails) docCost() string {
+ cost := d.FullCost.docCost()
+ if cost != "" {
+ return cost
+ }
+ found := false
+ for _, imm := range d.Immediates {
+ if imm.fieldCosts != nil {
+ if found {
+ panic("two cost dependent fields")
+ }
+ found = true
+ group := imm.Group
+ for _, name := range group.Names {
+ fs, ok := group.SpecByName(name)
+ if !ok {
+ continue
+ }
+ cost += fmt.Sprintf(" %s=%d", name, imm.fieldCosts[fs.Field()])
+ }
+ }
+ }
+ return cost
+}
+
+// Cost computes the cost of the opcode, given details about how it is used,
+// both static (the program, which can be used to find the immediate values
+// supplied), and dynamic (the stack, which can be used to find the run-time
+// arguments supplied). Cost is used at run-time. docCost returns similar
+// information in human-reable form.
+func (d *OpDetails) Cost(program []byte, pc int, stack []stackValue) int {
+ cost := d.FullCost.compute(stack)
+ if cost != 0 {
+ return cost
+ }
+ for i := range d.Immediates {
+ if d.Immediates[i].fieldCosts != nil {
+ cost += d.Immediates[i].fieldCosts[program[pc+1+i]]
+ }
+ }
+ return cost
+}
+
+func opDefault() OpDetails {
+ return OpDetails{asmDefault, nil, nil, modeAny, linearCost{baseCost: 1}, 1, nil}
+}
+
+func constants(asm asmFunc, checker checkFunc, name string, kind immKind) OpDetails {
+ return OpDetails{asm, checker, nil, modeAny, linearCost{baseCost: 1}, 0, []immediate{imm(name, kind)}}
+}
+
+func opBranch() OpDetails {
+ d := opDefault()
+ d.asm = asmBranch
+ d.check = checkBranch
+ d.Size = 3
+ d.Immediates = []immediate{imm("target", immLabel)}
+ return d
+}
+
+func assembler(asm asmFunc) OpDetails {
+ d := opDefault()
+ d.asm = asm
+ return d
+}
+
+func (d OpDetails) assembler(asm asmFunc) OpDetails {
+ clone := d
+ clone.asm = asm
+ return clone
+}
+
+func costly(cost int) OpDetails {
+ d := opDefault()
+ d.FullCost.baseCost = cost
+ return d
+}
+
+func (d OpDetails) costs(cost int) OpDetails {
+ clone := d
+ clone.FullCost = linearCost{baseCost: cost}
+ return clone
}
-var opDefault = opDetails{1, 1, nil, nil, nil}
-var opBranch = opDetails{1, 3, checkBranch, []immediate{{"target", immLabel}}, nil}
+func only(m runMode) OpDetails {
+ d := opDefault()
+ d.Modes = m
+ return d
+}
+
+func (d OpDetails) only(m runMode) OpDetails {
+ clone := d
+ clone.Modes = m
+ return clone
+}
-func costly(cost int) opDetails {
- return opDetails{cost, 1, nil, nil, nil}
+func (d OpDetails) costByLength(initial, perChunk, chunkSize int) OpDetails {
+ clone := d
+ clone.FullCost = costByLength(initial, perChunk, chunkSize).FullCost
+ return clone
}
-func immediates(names ...string) opDetails {
- immediates := make([]immediate, len(names))
+func immediates(names ...string) OpDetails {
+ d := opDefault()
+ d.Size = len(names) + 1
+ d.Immediates = make([]immediate, len(names))
for i, name := range names {
- immediates[i] = immediate{name, immByte}
+ d.Immediates[i] = imm(name, immByte)
}
- return opDetails{1, 1 + len(immediates), nil, immediates, nil}
+ return d
}
-func stacky(typer opTypeFunc, imms ...string) opDetails {
+func stacky(typer refineFunc, imms ...string) OpDetails {
d := immediates(imms...)
- d.typeFunc = typer
+ d.refine = typer
return d
}
-func varies(checker opCheckFunc, name string, kind immKind) opDetails {
- return opDetails{1, 0, checker, []immediate{{name, kind}}, nil}
+// field is used to create an opDetails for an opcode with a single field
+func field(immediate string, group *FieldGroup) OpDetails {
+ opd := immediates(immediate)
+ opd.Immediates[0].Group = group
+ return opd
}
-func costlyImm(cost int, names ...string) opDetails {
- opd := immediates(names...)
- opd.Cost = cost
+// field is used to annotate an existing immediate with group info
+func (d OpDetails) field(name string, group *FieldGroup) OpDetails {
+ for i := range d.Immediates {
+ if d.Immediates[i].Name == name {
+ d.Immediates[i].Group = group
+ return d
+ }
+ }
+ panic(name)
+}
+
+func costByField(immediate string, group *FieldGroup, costs []int) OpDetails {
+ opd := immediates(immediate).costs(0)
+ opd.Immediates[0].Group = group
+ fieldCosts := make([]int, 256)
+ copy(fieldCosts, costs)
+ opd.Immediates[0].fieldCosts = fieldCosts
return opd
}
+func costByLength(initial int, perChunk int, chunkSize int) OpDetails {
+ if initial < 1 || perChunk <= 0 || chunkSize < 1 || chunkSize > maxStringSize {
+ panic("bad cost configuration")
+ }
+ d := opDefault()
+ d.FullCost = linearCost{initial, perChunk, chunkSize}
+ return d
+}
+
// immType describes the immediate arguments to an opcode
type immKind byte
@@ -115,35 +277,76 @@ const (
)
type immediate struct {
- Name string
- kind immKind
+ Name string
+ kind immKind
+ Group *FieldGroup
+
+ // If non-nil, always 256 long, so cost can be checked before eval
+ fieldCosts []int
+}
+
+func imm(name string, kind immKind) immediate {
+ return immediate{name, kind, nil, nil}
+}
+
+type typedList struct {
+ Types StackTypes
+ Effects string
+}
+
+// Proto describes the "stack behavior" of an opcode, what it pops as arguments
+// and pushes onto the stack as return values.
+type Proto struct {
+ Arg typedList // what gets popped from the stack
+ Return typedList // what gets pushed to the stack
+}
+
+func proto(signature string, effects ...string) Proto {
+ parts := strings.Split(signature, ":")
+ if len(parts) != 2 {
+ panic(signature)
+ }
+ var argEffect, retEffect string
+ switch len(effects) {
+ case 0:
+ // will be generated
+ case 1:
+ retEffect = effects[0]
+ case 2:
+ argEffect = effects[0]
+ retEffect = effects[1]
+ default:
+ panic(effects)
+ }
+ return Proto{
+ Arg: typedList{parseStackTypes(parts[0]), argEffect},
+ Return: typedList{parseStackTypes(parts[1]), retEffect},
+ }
}
// OpSpec defines an opcode
type OpSpec struct {
- Opcode byte
- Name string
- op opEvalFunc // evaluate the op
- asm assembleFunc // assemble the op
- dis disassembleFunc // disassemble the op
- Args StackTypes // what gets popped from the stack
- Returns StackTypes // what gets pushed to the stack
- Version uint64 // TEAL version opcode introduced
- Modes runMode // if non-zero, then (mode & Modes) != 0 to allow
- Details opDetails // Special cost or bytecode layout considerations
-}
-
-var oneBytes = StackTypes{StackBytes}
-var twoBytes = StackTypes{StackBytes, StackBytes}
-var threeBytes = StackTypes{StackBytes, StackBytes, StackBytes}
-var byteInt = StackTypes{StackBytes, StackUint64}
-var byteIntInt = StackTypes{StackBytes, StackUint64, StackUint64}
-var oneInt = StackTypes{StackUint64}
-var twoInts = StackTypes{StackUint64, StackUint64}
-var oneAny = StackTypes{StackAny}
-var twoAny = StackTypes{StackAny, StackAny}
-var anyInt = StackTypes{StackAny, StackUint64}
-var anyIntInt = StackTypes{StackAny, StackUint64, StackUint64}
+ Opcode byte
+ Name string
+ op evalFunc // evaluate the op
+ Proto
+ Version uint64 // TEAL version opcode introduced
+ OpDetails // Special cost or bytecode layout considerations
+}
+
+// AlwaysExits is true iff the opcode always ends the program.
+func (spec *OpSpec) AlwaysExits() bool {
+ return len(spec.Return.Types) == 1 && spec.Return.Types[0] == StackNone
+}
+
+func (spec *OpSpec) deadens() bool {
+ switch spec.Name {
+ case "b", "callsub", "retsub", "err", "return":
+ return true
+ default:
+ return false
+ }
+}
// OpSpecs is the table of operations that can be assembled and evaluated.
//
@@ -152,211 +355,225 @@ var anyIntInt = StackTypes{StackAny, StackUint64, StackUint64}
// Note: assembly can specialize an Any return type if known at
// assembly-time, with ops.returns()
var OpSpecs = []OpSpec{
- {0x00, "err", opErr, asmDefault, disDefault, nil, nil, 1, modeAny, opDefault},
- {0x01, "sha256", opSHA256, asmDefault, disDefault, oneBytes, oneBytes, 1, modeAny, costly(7)},
- {0x02, "keccak256", opKeccak256, asmDefault, disDefault, oneBytes, oneBytes, 1, modeAny, costly(26)},
- {0x03, "sha512_256", opSHA512_256, asmDefault, disDefault, oneBytes, oneBytes, 1, modeAny, costly(9)},
+ {0x00, "err", opErr, proto(":x"), 1, opDefault()},
+ {0x01, "sha256", opSHA256, proto("b:b"), 1, costly(7)},
+ {0x02, "keccak256", opKeccak256, proto("b:b"), 1, costly(26)},
+ {0x03, "sha512_256", opSHA512_256, proto("b:b"), 1, costly(9)},
// Cost of these opcodes increases in TEAL version 2 based on measured
// performance. Should be able to run max hashes during stateful TEAL
// and achieve reasonable TPS. Same opcode for different TEAL versions
// is OK.
- {0x01, "sha256", opSHA256, asmDefault, disDefault, oneBytes, oneBytes, 2, modeAny, costly(35)},
- {0x02, "keccak256", opKeccak256, asmDefault, disDefault, oneBytes, oneBytes, 2, modeAny, costly(130)},
- {0x03, "sha512_256", opSHA512_256, asmDefault, disDefault, oneBytes, oneBytes, 2, modeAny, costly(45)},
-
- {0x04, "ed25519verify", opEd25519Verify, asmDefault, disDefault, threeBytes, oneInt, 1, runModeSignature, costly(1900)},
- {0x04, "ed25519verify", opEd25519Verify, asmDefault, disDefault, threeBytes, oneInt, 5, modeAny, costly(1900)},
-
- {0x05, "ecdsa_verify", opEcdsaVerify, assembleEcdsa, disEcdsa, threeBytes.plus(twoBytes), oneInt, 5, modeAny, costlyImm(1700, "v")},
- {0x06, "ecdsa_pk_decompress", opEcdsaPkDecompress, assembleEcdsa, disEcdsa, oneBytes, twoBytes, 5, modeAny, costlyImm(650, "v")},
- {0x07, "ecdsa_pk_recover", opEcdsaPkRecover, assembleEcdsa, disEcdsa, oneBytes.plus(oneInt).plus(twoBytes), twoBytes, 5, modeAny, costlyImm(2000, "v")},
-
- {0x08, "+", opPlus, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x09, "-", opMinus, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x0a, "/", opDiv, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x0b, "*", opMul, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x0c, "<", opLt, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x0d, ">", opGt, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x0e, "<=", opLe, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x0f, ">=", opGe, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x10, "&&", opAnd, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x11, "||", opOr, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x12, "==", opEq, asmDefault, disDefault, twoAny, oneInt, 1, modeAny, stacky(typeEquals)},
- {0x13, "!=", opNeq, asmDefault, disDefault, twoAny, oneInt, 1, modeAny, stacky(typeEquals)},
- {0x14, "!", opNot, asmDefault, disDefault, oneInt, oneInt, 1, modeAny, opDefault},
- {0x15, "len", opLen, asmDefault, disDefault, oneBytes, oneInt, 1, modeAny, opDefault},
- {0x16, "itob", opItob, asmDefault, disDefault, oneInt, oneBytes, 1, modeAny, opDefault},
- {0x17, "btoi", opBtoi, asmDefault, disDefault, oneBytes, oneInt, 1, modeAny, opDefault},
- {0x18, "%", opModulo, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x19, "|", opBitOr, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x1a, "&", opBitAnd, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x1b, "^", opBitXor, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x1c, "~", opBitNot, asmDefault, disDefault, oneInt, oneInt, 1, modeAny, opDefault},
- {0x1d, "mulw", opMulw, asmDefault, disDefault, twoInts, twoInts, 1, modeAny, opDefault},
- {0x1e, "addw", opAddw, asmDefault, disDefault, twoInts, twoInts, 2, modeAny, opDefault},
- {0x1f, "divmodw", opDivModw, asmDefault, disDefault, twoInts.plus(twoInts), twoInts.plus(twoInts), 4, modeAny, costly(20)},
-
- {0x20, "intcblock", opIntConstBlock, assembleIntCBlock, disIntcblock, nil, nil, 1, modeAny, varies(checkIntConstBlock, "uint ...", immInts)},
- {0x21, "intc", opIntConstLoad, assembleIntC, disIntc, nil, oneInt, 1, modeAny, immediates("i")},
- {0x22, "intc_0", opIntConst0, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault},
- {0x23, "intc_1", opIntConst1, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault},
- {0x24, "intc_2", opIntConst2, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault},
- {0x25, "intc_3", opIntConst3, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault},
- {0x26, "bytecblock", opByteConstBlock, assembleByteCBlock, disBytecblock, nil, nil, 1, modeAny, varies(checkByteConstBlock, "bytes ...", immBytess)},
- {0x27, "bytec", opByteConstLoad, assembleByteC, disBytec, nil, oneBytes, 1, modeAny, immediates("i")},
- {0x28, "bytec_0", opByteConst0, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault},
- {0x29, "bytec_1", opByteConst1, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault},
- {0x2a, "bytec_2", opByteConst2, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault},
- {0x2b, "bytec_3", opByteConst3, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault},
- {0x2c, "arg", opArg, assembleArg, disDefault, nil, oneBytes, 1, runModeSignature, immediates("n")},
- {0x2d, "arg_0", opArg0, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault},
- {0x2e, "arg_1", opArg1, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault},
- {0x2f, "arg_2", opArg2, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault},
- {0x30, "arg_3", opArg3, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault},
- {0x31, "txn", opTxn, asmTxn, disTxn, nil, oneAny, 1, modeAny, immediates("f")},
+ {0x01, "sha256", opSHA256, proto("b:b"), 2, costly(35)},
+ {0x02, "keccak256", opKeccak256, proto("b:b"), 2, costly(130)},
+ {0x03, "sha512_256", opSHA512_256, proto("b:b"), 2, costly(45)},
+
+ /*
+ Tabling these changes until we offer unlimited global storage as there
+ is currently a useful pattern that requires hashes on long slices to
+ creating logicsigs in apps.
+
+ {0x01, "sha256", opSHA256, proto("b:b"), unlimitedStorage, costByLength(12, 6, 8)},
+ {0x02, "keccak256", opKeccak256, proto("b:b"), unlimitedStorage, costByLength(58, 4, 8)},
+ {0x03, "sha512_256", opSHA512_256, proto("b:b"), 7, unlimitedStorage, costByLength(17, 5, 8)},
+ */
+
+ {0x04, "ed25519verify", opEd25519Verify, proto("bbb:i"), 1, costly(1900).only(modeSig)},
+ {0x04, "ed25519verify", opEd25519Verify, proto("bbb:i"), 5, costly(1900)},
+
+ {0x05, "ecdsa_verify", opEcdsaVerify, proto("bbbbb:i"), 5, costByField("v", &EcdsaCurves, ecdsaVerifyCosts)},
+ {0x06, "ecdsa_pk_decompress", opEcdsaPkDecompress, proto("b:bb"), 5, costByField("v", &EcdsaCurves, ecdsaDecompressCosts)},
+ {0x07, "ecdsa_pk_recover", opEcdsaPkRecover, proto("bibb:bb"), 5, field("v", &EcdsaCurves).costs(2000)},
+
+ {0x08, "+", opPlus, proto("ii:i"), 1, opDefault()},
+ {0x09, "-", opMinus, proto("ii:i"), 1, opDefault()},
+ {0x0a, "/", opDiv, proto("ii:i"), 1, opDefault()},
+ {0x0b, "*", opMul, proto("ii:i"), 1, opDefault()},
+ {0x0c, "<", opLt, proto("ii:i"), 1, opDefault()},
+ {0x0d, ">", opGt, proto("ii:i"), 1, opDefault()},
+ {0x0e, "<=", opLe, proto("ii:i"), 1, opDefault()},
+ {0x0f, ">=", opGe, proto("ii:i"), 1, opDefault()},
+ {0x10, "&&", opAnd, proto("ii:i"), 1, opDefault()},
+ {0x11, "||", opOr, proto("ii:i"), 1, opDefault()},
+ {0x12, "==", opEq, proto("aa:i"), 1, stacky(typeEquals)},
+ {0x13, "!=", opNeq, proto("aa:i"), 1, stacky(typeEquals)},
+ {0x14, "!", opNot, proto("i:i"), 1, opDefault()},
+ {0x15, "len", opLen, proto("b:i"), 1, opDefault()},
+ {0x16, "itob", opItob, proto("i:b"), 1, opDefault()},
+ {0x17, "btoi", opBtoi, proto("b:i"), 1, opDefault()},
+ {0x18, "%", opModulo, proto("ii:i"), 1, opDefault()},
+ {0x19, "|", opBitOr, proto("ii:i"), 1, opDefault()},
+ {0x1a, "&", opBitAnd, proto("ii:i"), 1, opDefault()},
+ {0x1b, "^", opBitXor, proto("ii:i"), 1, opDefault()},
+ {0x1c, "~", opBitNot, proto("i:i"), 1, opDefault()},
+ {0x1d, "mulw", opMulw, proto("ii:ii"), 1, opDefault()},
+ {0x1e, "addw", opAddw, proto("ii:ii"), 2, opDefault()},
+ {0x1f, "divmodw", opDivModw, proto("iiii:iiii"), 4, costly(20)},
+
+ {0x20, "intcblock", opIntConstBlock, proto(":"), 1, constants(asmIntCBlock, checkIntConstBlock, "uint ...", immInts)},
+ {0x21, "intc", opIntConstLoad, proto(":i"), 1, immediates("i").assembler(asmIntC)},
+ {0x22, "intc_0", opIntConst0, proto(":i"), 1, opDefault()},
+ {0x23, "intc_1", opIntConst1, proto(":i"), 1, opDefault()},
+ {0x24, "intc_2", opIntConst2, proto(":i"), 1, opDefault()},
+ {0x25, "intc_3", opIntConst3, proto(":i"), 1, opDefault()},
+ {0x26, "bytecblock", opByteConstBlock, proto(":"), 1, constants(asmByteCBlock, checkByteConstBlock, "bytes ...", immBytess)},
+ {0x27, "bytec", opByteConstLoad, proto(":b"), 1, immediates("i").assembler(asmByteC)},
+ {0x28, "bytec_0", opByteConst0, proto(":b"), 1, opDefault()},
+ {0x29, "bytec_1", opByteConst1, proto(":b"), 1, opDefault()},
+ {0x2a, "bytec_2", opByteConst2, proto(":b"), 1, opDefault()},
+ {0x2b, "bytec_3", opByteConst3, proto(":b"), 1, opDefault()},
+ {0x2c, "arg", opArg, proto(":b"), 1, immediates("n").only(modeSig).assembler(asmArg)},
+ {0x2d, "arg_0", opArg0, proto(":b"), 1, only(modeSig)},
+ {0x2e, "arg_1", opArg1, proto(":b"), 1, only(modeSig)},
+ {0x2f, "arg_2", opArg2, proto(":b"), 1, only(modeSig)},
+ {0x30, "arg_3", opArg3, proto(":b"), 1, only(modeSig)},
+ {0x31, "txn", opTxn, proto(":a"), 1, field("f", &TxnScalarFields)},
// It is ok to have the same opcode for different TEAL versions.
// This 'txn' asm command supports additional argument in version 2 and
// generates 'txna' opcode in that particular case
- {0x31, "txn", opTxn, asmTxn2, disTxn, nil, oneAny, 2, modeAny, immediates("f")},
- {0x32, "global", opGlobal, assembleGlobal, disGlobal, nil, oneAny, 1, modeAny, immediates("f")},
- {0x33, "gtxn", opGtxn, asmGtxn, disGtxn, nil, oneAny, 1, modeAny, immediates("t", "f")},
- {0x33, "gtxn", opGtxn, asmGtxn2, disGtxn, nil, oneAny, 2, modeAny, immediates("t", "f")},
- {0x34, "load", opLoad, asmDefault, disDefault, nil, oneAny, 1, modeAny, immediates("i")},
- {0x35, "store", opStore, asmDefault, disDefault, oneAny, nil, 1, modeAny, immediates("i")},
- {0x36, "txna", opTxna, asmTxna, disTxna, nil, oneAny, 2, modeAny, immediates("f", "i")},
- {0x37, "gtxna", opGtxna, asmGtxna, disGtxna, nil, oneAny, 2, modeAny, immediates("t", "f", "i")},
+ {0x31, "txn", opTxn, proto(":a"), 2, field("f", &TxnFields).assembler(asmTxn2)},
+ {0x32, "global", opGlobal, proto(":a"), 1, field("f", &GlobalFields)},
+ {0x33, "gtxn", opGtxn, proto(":a"), 1, immediates("t", "f").field("f", &TxnScalarFields)},
+ {0x33, "gtxn", opGtxn, proto(":a"), 2, immediates("t", "f").field("f", &TxnFields).assembler(asmGtxn2)},
+ {0x34, "load", opLoad, proto(":a"), 1, immediates("i")},
+ {0x35, "store", opStore, proto("a:"), 1, immediates("i")},
+ {0x36, "txna", opTxna, proto(":a"), 2, immediates("f", "i").field("f", &TxnArrayFields)},
+ {0x37, "gtxna", opGtxna, proto(":a"), 2, immediates("t", "f", "i").field("f", &TxnArrayFields)},
// Like gtxn, but gets txn index from stack, rather than immediate arg
- {0x38, "gtxns", opGtxns, asmGtxns, disTxn, oneInt, oneAny, 3, modeAny, immediates("f")},
- {0x39, "gtxnsa", opGtxnsa, asmGtxns, disTxna, oneInt, oneAny, 3, modeAny, immediates("f", "i")},
+ {0x38, "gtxns", opGtxns, proto("i:a"), 3, immediates("f").field("f", &TxnFields).assembler(asmGtxns)},
+ {0x39, "gtxnsa", opGtxnsa, proto("i:a"), 3, immediates("f", "i").field("f", &TxnArrayFields)},
// Group scratch space access
- {0x3a, "gload", opGload, asmDefault, disDefault, nil, oneAny, 4, runModeApplication, immediates("t", "i")},
- {0x3b, "gloads", opGloads, asmDefault, disDefault, oneInt, oneAny, 4, runModeApplication, immediates("i")},
+ {0x3a, "gload", opGload, proto(":a"), 4, immediates("t", "i").only(modeApp)},
+ {0x3b, "gloads", opGloads, proto("i:a"), 4, immediates("i").only(modeApp)},
// Access creatable IDs (consider deprecating, as txn CreatedAssetID, CreatedApplicationID should be enough
- {0x3c, "gaid", opGaid, asmDefault, disDefault, nil, oneInt, 4, runModeApplication, immediates("t")},
- {0x3d, "gaids", opGaids, asmDefault, disDefault, oneInt, oneInt, 4, runModeApplication, opDefault},
+ {0x3c, "gaid", opGaid, proto(":i"), 4, immediates("t").only(modeApp)},
+ {0x3d, "gaids", opGaids, proto("i:i"), 4, only(modeApp)},
// Like load/store, but scratch slot taken from TOS instead of immediate
- {0x3e, "loads", opLoads, asmDefault, disDefault, oneInt, oneAny, 5, modeAny, opDefault},
- {0x3f, "stores", opStores, asmDefault, disDefault, oneInt.plus(oneAny), nil, 5, modeAny, opDefault},
-
- {0x40, "bnz", opBnz, assembleBranch, disBranch, oneInt, nil, 1, modeAny, opBranch},
- {0x41, "bz", opBz, assembleBranch, disBranch, oneInt, nil, 2, modeAny, opBranch},
- {0x42, "b", opB, assembleBranch, disBranch, nil, nil, 2, modeAny, opBranch},
- {0x43, "return", opReturn, asmDefault, disDefault, oneInt, nil, 2, modeAny, opDefault},
- {0x44, "assert", opAssert, asmDefault, disDefault, oneInt, nil, 3, modeAny, opDefault},
- {0x48, "pop", opPop, asmDefault, disDefault, oneAny, nil, 1, modeAny, opDefault},
- {0x49, "dup", opDup, asmDefault, disDefault, oneAny, twoAny, 1, modeAny, stacky(typeDup)},
- {0x4a, "dup2", opDup2, asmDefault, disDefault, twoAny, twoAny.plus(twoAny), 2, modeAny, stacky(typeDupTwo)},
+ {0x3e, "loads", opLoads, proto("i:a"), 5, opDefault()},
+ {0x3f, "stores", opStores, proto("ia:"), 5, opDefault()},
+
+ {0x40, "bnz", opBnz, proto("i:"), 1, opBranch()},
+ {0x41, "bz", opBz, proto("i:"), 2, opBranch()},
+ {0x42, "b", opB, proto(":"), 2, opBranch()},
+ {0x43, "return", opReturn, proto("i:x"), 2, opDefault()},
+ {0x44, "assert", opAssert, proto("i:"), 3, opDefault()},
+ {0x48, "pop", opPop, proto("a:"), 1, opDefault()},
+ {0x49, "dup", opDup, proto("a:aa", "A, A"), 1, stacky(typeDup)},
+ {0x4a, "dup2", opDup2, proto("aa:aaaa", "A, B, A, B"), 2, stacky(typeDupTwo)},
// There must be at least one thing on the stack for dig, but
// it would be nice if we did better checking than that.
- {0x4b, "dig", opDig, asmDefault, disDefault, oneAny, twoAny, 3, modeAny, stacky(typeDig, "n")},
- {0x4c, "swap", opSwap, asmDefault, disDefault, twoAny, twoAny, 3, modeAny, stacky(typeSwap)},
- {0x4d, "select", opSelect, asmDefault, disDefault, twoAny.plus(oneInt), oneAny, 3, modeAny, stacky(typeSelect)},
- {0x4e, "cover", opCover, asmDefault, disDefault, oneAny, oneAny, 5, modeAny, stacky(typeCover, "n")},
- {0x4f, "uncover", opUncover, asmDefault, disDefault, oneAny, oneAny, 5, modeAny, stacky(typeUncover, "n")},
+ {0x4b, "dig", opDig, proto("a:aa", "A, [N items]", "A, [N items], A"), 3, stacky(typeDig, "n")},
+ {0x4c, "swap", opSwap, proto("aa:aa", "B, A"), 3, stacky(typeSwap)},
+ {0x4d, "select", opSelect, proto("aai:a", "A or B"), 3, stacky(typeSelect)},
+ {0x4e, "cover", opCover, proto("a:a", "[N items], A", "A, [N items]"), 5, stacky(typeCover, "n")},
+ {0x4f, "uncover", opUncover, proto("a:a", "A, [N items]", "[N items], A"), 5, stacky(typeUncover, "n")},
// byteslice processing / StringOps
- {0x50, "concat", opConcat, asmDefault, disDefault, twoBytes, oneBytes, 2, modeAny, opDefault},
- {0x51, "substring", opSubstring, assembleSubstring, disDefault, oneBytes, oneBytes, 2, modeAny, immediates("s", "e")},
- {0x52, "substring3", opSubstring3, asmDefault, disDefault, byteIntInt, oneBytes, 2, modeAny, opDefault},
- {0x53, "getbit", opGetBit, asmDefault, disDefault, anyInt, oneInt, 3, modeAny, opDefault},
- {0x54, "setbit", opSetBit, asmDefault, disDefault, anyIntInt, oneAny, 3, modeAny, stacky(typeSetBit)},
- {0x55, "getbyte", opGetByte, asmDefault, disDefault, byteInt, oneInt, 3, modeAny, opDefault},
- {0x56, "setbyte", opSetByte, asmDefault, disDefault, byteIntInt, oneBytes, 3, modeAny, opDefault},
- {0x57, "extract", opExtract, asmDefault, disDefault, oneBytes, oneBytes, 5, modeAny, immediates("s", "l")},
- {0x58, "extract3", opExtract3, asmDefault, disDefault, byteIntInt, oneBytes, 5, modeAny, opDefault},
- {0x59, "extract_uint16", opExtract16Bits, asmDefault, disDefault, byteInt, oneInt, 5, modeAny, opDefault},
- {0x5a, "extract_uint32", opExtract32Bits, asmDefault, disDefault, byteInt, oneInt, 5, modeAny, opDefault},
- {0x5b, "extract_uint64", opExtract64Bits, asmDefault, disDefault, byteInt, oneInt, 5, modeAny, opDefault},
- {0x5c, "base64_decode", opBase64Decode, assembleBase64Decode, disBase64Decode, oneBytes, oneBytes, fidoVersion, modeAny, costlyImm(25, "e")},
- {0x5d, "json_ref", opJSONRef, assembleJSONRef, disJSONRef, twoBytes, oneAny, fidoVersion, modeAny, immediates("r")},
-
- {0x60, "balance", opBalance, asmDefault, disDefault, oneInt, oneInt, 2, runModeApplication, opDefault},
- {0x60, "balance", opBalance, asmDefault, disDefault, oneAny, oneInt, directRefEnabledVersion, runModeApplication, opDefault},
- {0x61, "app_opted_in", opAppOptedIn, asmDefault, disDefault, twoInts, oneInt, 2, runModeApplication, opDefault},
- {0x61, "app_opted_in", opAppOptedIn, asmDefault, disDefault, oneAny.plus(oneInt), oneInt, directRefEnabledVersion, runModeApplication, opDefault},
- {0x62, "app_local_get", opAppLocalGet, asmDefault, disDefault, oneInt.plus(oneBytes), oneAny, 2, runModeApplication, opDefault},
- {0x62, "app_local_get", opAppLocalGet, asmDefault, disDefault, oneAny.plus(oneBytes), oneAny, directRefEnabledVersion, runModeApplication, opDefault},
- {0x63, "app_local_get_ex", opAppLocalGetEx, asmDefault, disDefault, twoInts.plus(oneBytes), oneAny.plus(oneInt), 2, runModeApplication, opDefault},
- {0x63, "app_local_get_ex", opAppLocalGetEx, asmDefault, disDefault, oneAny.plus(oneInt).plus(oneBytes), oneAny.plus(oneInt), directRefEnabledVersion, runModeApplication, opDefault},
- {0x64, "app_global_get", opAppGlobalGet, asmDefault, disDefault, oneBytes, oneAny, 2, runModeApplication, opDefault},
- {0x65, "app_global_get_ex", opAppGlobalGetEx, asmDefault, disDefault, oneInt.plus(oneBytes), oneAny.plus(oneInt), 2, runModeApplication, opDefault},
- {0x66, "app_local_put", opAppLocalPut, asmDefault, disDefault, oneInt.plus(oneBytes).plus(oneAny), nil, 2, runModeApplication, opDefault},
- {0x66, "app_local_put", opAppLocalPut, asmDefault, disDefault, oneAny.plus(oneBytes).plus(oneAny), nil, directRefEnabledVersion, runModeApplication, opDefault},
- {0x67, "app_global_put", opAppGlobalPut, asmDefault, disDefault, oneBytes.plus(oneAny), nil, 2, runModeApplication, opDefault},
- {0x68, "app_local_del", opAppLocalDel, asmDefault, disDefault, oneInt.plus(oneBytes), nil, 2, runModeApplication, opDefault},
- {0x68, "app_local_del", opAppLocalDel, asmDefault, disDefault, oneAny.plus(oneBytes), nil, directRefEnabledVersion, runModeApplication, opDefault},
- {0x69, "app_global_del", opAppGlobalDel, asmDefault, disDefault, oneBytes, nil, 2, runModeApplication, opDefault},
-
- {0x70, "asset_holding_get", opAssetHoldingGet, assembleAssetHolding, disAssetHolding, twoInts, oneAny.plus(oneInt), 2, runModeApplication, immediates("f")},
- {0x70, "asset_holding_get", opAssetHoldingGet, assembleAssetHolding, disAssetHolding, oneAny.plus(oneInt), oneAny.plus(oneInt), directRefEnabledVersion, runModeApplication, immediates("f")},
- {0x71, "asset_params_get", opAssetParamsGet, assembleAssetParams, disAssetParams, oneInt, oneAny.plus(oneInt), 2, runModeApplication, immediates("f")},
- {0x72, "app_params_get", opAppParamsGet, assembleAppParams, disAppParams, oneInt, oneAny.plus(oneInt), 5, runModeApplication, immediates("f")},
- {0x73, "acct_params_get", opAcctParamsGet, assembleAcctParams, disAcctParams, oneAny, oneAny.plus(oneInt), 6, runModeApplication, immediates("f")},
-
- {0x78, "min_balance", opMinBalance, asmDefault, disDefault, oneInt, oneInt, 3, runModeApplication, opDefault},
- {0x78, "min_balance", opMinBalance, asmDefault, disDefault, oneAny, oneInt, directRefEnabledVersion, runModeApplication, opDefault},
+ {0x50, "concat", opConcat, proto("bb:b"), 2, opDefault()},
+ {0x51, "substring", opSubstring, proto("b:b"), 2, immediates("s", "e").assembler(asmSubstring)},
+ {0x52, "substring3", opSubstring3, proto("bii:b"), 2, opDefault()},
+ {0x53, "getbit", opGetBit, proto("ai:i"), 3, opDefault()},
+ {0x54, "setbit", opSetBit, proto("aii:a"), 3, stacky(typeSetBit)},
+ {0x55, "getbyte", opGetByte, proto("bi:i"), 3, opDefault()},
+ {0x56, "setbyte", opSetByte, proto("bii:b"), 3, opDefault()},
+ {0x57, "extract", opExtract, proto("b:b"), 5, immediates("s", "l")},
+ {0x58, "extract3", opExtract3, proto("bii:b"), 5, opDefault()},
+ {0x59, "extract_uint16", opExtract16Bits, proto("bi:i"), 5, opDefault()},
+ {0x5a, "extract_uint32", opExtract32Bits, proto("bi:i"), 5, opDefault()},
+ {0x5b, "extract_uint64", opExtract64Bits, proto("bi:i"), 5, opDefault()},
+ {0x5c, "base64_decode", opBase64Decode, proto("b:b"), fidoVersion, field("e", &Base64Encodings).costByLength(1, 1, 16)},
+ {0x5d, "json_ref", opJSONRef, proto("bb:a"), fidoVersion, field("r", &JSONRefTypes)},
+
+ {0x60, "balance", opBalance, proto("i:i"), 2, only(modeApp)},
+ {0x60, "balance", opBalance, proto("a:i"), directRefEnabledVersion, only(modeApp)},
+ {0x61, "app_opted_in", opAppOptedIn, proto("ii:i"), 2, only(modeApp)},
+ {0x61, "app_opted_in", opAppOptedIn, proto("ai:i"), directRefEnabledVersion, only(modeApp)},
+ {0x62, "app_local_get", opAppLocalGet, proto("ib:a"), 2, only(modeApp)},
+ {0x62, "app_local_get", opAppLocalGet, proto("ab:a"), directRefEnabledVersion, only(modeApp)},
+ {0x63, "app_local_get_ex", opAppLocalGetEx, proto("iib:ai"), 2, only(modeApp)},
+ {0x63, "app_local_get_ex", opAppLocalGetEx, proto("aib:ai"), directRefEnabledVersion, only(modeApp)},
+ {0x64, "app_global_get", opAppGlobalGet, proto("b:a"), 2, only(modeApp)},
+ {0x65, "app_global_get_ex", opAppGlobalGetEx, proto("ib:ai"), 2, only(modeApp)},
+ {0x66, "app_local_put", opAppLocalPut, proto("iba:"), 2, only(modeApp)},
+ {0x66, "app_local_put", opAppLocalPut, proto("aba:"), directRefEnabledVersion, only(modeApp)},
+ {0x67, "app_global_put", opAppGlobalPut, proto("ba:"), 2, only(modeApp)},
+ {0x68, "app_local_del", opAppLocalDel, proto("ib:"), 2, only(modeApp)},
+ {0x68, "app_local_del", opAppLocalDel, proto("ab:"), directRefEnabledVersion, only(modeApp)},
+ {0x69, "app_global_del", opAppGlobalDel, proto("b:"), 2, only(modeApp)},
+
+ {0x70, "asset_holding_get", opAssetHoldingGet, proto("ii:ai"), 2, field("f", &AssetHoldingFields).only(modeApp)},
+ {0x70, "asset_holding_get", opAssetHoldingGet, proto("ai:ai"), directRefEnabledVersion, field("f", &AssetHoldingFields).only(modeApp)},
+ {0x71, "asset_params_get", opAssetParamsGet, proto("i:ai"), 2, field("f", &AssetParamsFields).only(modeApp)},
+ {0x72, "app_params_get", opAppParamsGet, proto("i:ai"), 5, field("f", &AppParamsFields).only(modeApp)},
+ {0x73, "acct_params_get", opAcctParamsGet, proto("a:ai"), 6, field("f", &AcctParamsFields).only(modeApp)},
+
+ {0x78, "min_balance", opMinBalance, proto("i:i"), 3, only(modeApp)},
+ {0x78, "min_balance", opMinBalance, proto("a:i"), directRefEnabledVersion, only(modeApp)},
// Immediate bytes and ints. Smaller code size for single use of constant.
- {0x80, "pushbytes", opPushBytes, asmPushBytes, disPushBytes, nil, oneBytes, 3, modeAny, varies(checkPushBytes, "bytes", immBytes)},
- {0x81, "pushint", opPushInt, asmPushInt, disPushInt, nil, oneInt, 3, modeAny, varies(checkPushInt, "uint", immInt)},
+ {0x80, "pushbytes", opPushBytes, proto(":b"), 3, constants(asmPushBytes, opPushBytes, "bytes", immBytes)},
+ {0x81, "pushint", opPushInt, proto(":i"), 3, constants(asmPushInt, opPushInt, "uint", immInt)},
- {0x84, "ed25519verify_bare", opEd25519VerifyBare, asmDefault, disDefault, threeBytes, oneInt, 7, modeAny, costly(1900)},
+ {0x84, "ed25519verify_bare", opEd25519VerifyBare, proto("bbb:i"), 7, costly(1900)},
// "Function oriented"
- {0x88, "callsub", opCallSub, assembleBranch, disBranch, nil, nil, 4, modeAny, opBranch},
- {0x89, "retsub", opRetSub, asmDefault, disDefault, nil, nil, 4, modeAny, opDefault},
+ {0x88, "callsub", opCallSub, proto(":"), 4, opBranch()},
+ {0x89, "retsub", opRetSub, proto(":"), 4, opDefault()},
// Leave a little room for indirect function calls, or similar
// More math
- {0x90, "shl", opShiftLeft, asmDefault, disDefault, twoInts, oneInt, 4, modeAny, opDefault},
- {0x91, "shr", opShiftRight, asmDefault, disDefault, twoInts, oneInt, 4, modeAny, opDefault},
- {0x92, "sqrt", opSqrt, asmDefault, disDefault, oneInt, oneInt, 4, modeAny, costly(4)},
- {0x93, "bitlen", opBitLen, asmDefault, disDefault, oneAny, oneInt, 4, modeAny, opDefault},
- {0x94, "exp", opExp, asmDefault, disDefault, twoInts, oneInt, 4, modeAny, opDefault},
- {0x95, "expw", opExpw, asmDefault, disDefault, twoInts, twoInts, 4, modeAny, costly(10)},
- {0x96, "bsqrt", opBytesSqrt, asmDefault, disDefault, oneBytes, oneBytes, 6, modeAny, costly(40)},
- {0x97, "divw", opDivw, asmDefault, disDefault, twoInts.plus(oneInt), oneInt, 6, modeAny, opDefault},
- {0x98, "sha3_256", opSHA3_256, asmDefault, disDefault, oneBytes, oneBytes, 7, modeAny, costly(130)},
+ {0x90, "shl", opShiftLeft, proto("ii:i"), 4, opDefault()},
+ {0x91, "shr", opShiftRight, proto("ii:i"), 4, opDefault()},
+ {0x92, "sqrt", opSqrt, proto("i:i"), 4, costly(4)},
+ {0x93, "bitlen", opBitLen, proto("a:i"), 4, opDefault()},
+ {0x94, "exp", opExp, proto("ii:i"), 4, opDefault()},
+ {0x95, "expw", opExpw, proto("ii:ii"), 4, costly(10)},
+ {0x96, "bsqrt", opBytesSqrt, proto("b:b"), 6, costly(40)},
+ {0x97, "divw", opDivw, proto("iii:i"), 6, opDefault()},
+ {0x98, "sha3_256", opSHA3_256, proto("b:b"), 7, costly(130)},
+
+ /* Will end up following keccak256 -
+ {0x98, "sha3_256", opSHA3_256, proto("b:b"), unlimitedStorage, costByLength(58, 4, 8)},},
+ */
// Byteslice math.
- {0xa0, "b+", opBytesPlus, asmDefault, disDefault, twoBytes, oneBytes, 4, modeAny, costly(10)},
- {0xa1, "b-", opBytesMinus, asmDefault, disDefault, twoBytes, oneBytes, 4, modeAny, costly(10)},
- {0xa2, "b/", opBytesDiv, asmDefault, disDefault, twoBytes, oneBytes, 4, modeAny, costly(20)},
- {0xa3, "b*", opBytesMul, asmDefault, disDefault, twoBytes, oneBytes, 4, modeAny, costly(20)},
- {0xa4, "b<", opBytesLt, asmDefault, disDefault, twoBytes, oneInt, 4, modeAny, opDefault},
- {0xa5, "b>", opBytesGt, asmDefault, disDefault, twoBytes, oneInt, 4, modeAny, opDefault},
- {0xa6, "b<=", opBytesLe, asmDefault, disDefault, twoBytes, oneInt, 4, modeAny, opDefault},
- {0xa7, "b>=", opBytesGe, asmDefault, disDefault, twoBytes, oneInt, 4, modeAny, opDefault},
- {0xa8, "b==", opBytesEq, asmDefault, disDefault, twoBytes, oneInt, 4, modeAny, opDefault},
- {0xa9, "b!=", opBytesNeq, asmDefault, disDefault, twoBytes, oneInt, 4, modeAny, opDefault},
- {0xaa, "b%", opBytesModulo, asmDefault, disDefault, twoBytes, oneBytes, 4, modeAny, costly(20)},
- {0xab, "b|", opBytesBitOr, asmDefault, disDefault, twoBytes, oneBytes, 4, modeAny, costly(6)},
- {0xac, "b&", opBytesBitAnd, asmDefault, disDefault, twoBytes, oneBytes, 4, modeAny, costly(6)},
- {0xad, "b^", opBytesBitXor, asmDefault, disDefault, twoBytes, oneBytes, 4, modeAny, costly(6)},
- {0xae, "b~", opBytesBitNot, asmDefault, disDefault, oneBytes, oneBytes, 4, modeAny, costly(4)},
- {0xaf, "bzero", opBytesZero, asmDefault, disDefault, oneInt, oneBytes, 4, modeAny, opDefault},
+ {0xa0, "b+", opBytesPlus, proto("bb:b"), 4, costly(10)},
+ {0xa1, "b-", opBytesMinus, proto("bb:b"), 4, costly(10)},
+ {0xa2, "b/", opBytesDiv, proto("bb:b"), 4, costly(20)},
+ {0xa3, "b*", opBytesMul, proto("bb:b"), 4, costly(20)},
+ {0xa4, "b<", opBytesLt, proto("bb:i"), 4, opDefault()},
+ {0xa5, "b>", opBytesGt, proto("bb:i"), 4, opDefault()},
+ {0xa6, "b<=", opBytesLe, proto("bb:i"), 4, opDefault()},
+ {0xa7, "b>=", opBytesGe, proto("bb:i"), 4, opDefault()},
+ {0xa8, "b==", opBytesEq, proto("bb:i"), 4, opDefault()},
+ {0xa9, "b!=", opBytesNeq, proto("bb:i"), 4, opDefault()},
+ {0xaa, "b%", opBytesModulo, proto("bb:b"), 4, costly(20)},
+ {0xab, "b|", opBytesBitOr, proto("bb:b"), 4, costly(6)},
+ {0xac, "b&", opBytesBitAnd, proto("bb:b"), 4, costly(6)},
+ {0xad, "b^", opBytesBitXor, proto("bb:b"), 4, costly(6)},
+ {0xae, "b~", opBytesBitNot, proto("b:b"), 4, costly(4)},
+ {0xaf, "bzero", opBytesZero, proto("i:b"), 4, opDefault()},
// AVM "effects"
- {0xb0, "log", opLog, asmDefault, disDefault, oneBytes, nil, 5, runModeApplication, opDefault},
- {0xb1, "itxn_begin", opTxBegin, asmDefault, disDefault, nil, nil, 5, runModeApplication, opDefault},
- {0xb2, "itxn_field", opTxField, asmTxField, disTxField, oneAny, nil, 5, runModeApplication, stacky(typeTxField, "f")},
- {0xb3, "itxn_submit", opTxSubmit, asmDefault, disDefault, nil, nil, 5, runModeApplication, opDefault},
- {0xb4, "itxn", opItxn, asmItxn, disTxn, nil, oneAny, 5, runModeApplication, immediates("f")},
- {0xb5, "itxna", opItxna, asmTxna, disTxna, nil, oneAny, 5, runModeApplication, immediates("f", "i")},
- {0xb6, "itxn_next", opTxNext, asmDefault, disDefault, nil, nil, 6, runModeApplication, opDefault},
- {0xb7, "gitxn", opGitxn, asmGitxn, disGtxn, nil, oneAny, 6, runModeApplication, immediates("t", "f")},
- {0xb8, "gitxna", opGitxna, asmGtxna, disGtxna, nil, oneAny, 6, runModeApplication, immediates("t", "f", "i")},
+ {0xb0, "log", opLog, proto("b:"), 5, only(modeApp)},
+ {0xb1, "itxn_begin", opTxBegin, proto(":"), 5, only(modeApp)},
+ {0xb2, "itxn_field", opItxnField, proto("a:"), 5, stacky(typeTxField, "f").field("f", &TxnFields).only(modeApp).assembler(asmItxnField)},
+ {0xb3, "itxn_submit", opItxnSubmit, proto(":"), 5, only(modeApp)},
+ {0xb4, "itxn", opItxn, proto(":a"), 5, field("f", &TxnScalarFields).only(modeApp).assembler(asmItxn)},
+ {0xb5, "itxna", opItxna, proto(":a"), 5, immediates("f", "i").field("f", &TxnArrayFields).only(modeApp)},
+ {0xb6, "itxn_next", opItxnNext, proto(":"), 6, only(modeApp)},
+ {0xb7, "gitxn", opGitxn, proto(":a"), 6, immediates("t", "f").field("f", &TxnFields).only(modeApp).assembler(asmGitxn)},
+ {0xb8, "gitxna", opGitxna, proto(":a"), 6, immediates("t", "f", "i").field("f", &TxnArrayFields).only(modeApp)},
// Dynamic indexing
- {0xc0, "txnas", opTxnas, asmTxnas, disTxn, oneInt, oneAny, 5, modeAny, immediates("f")},
- {0xc1, "gtxnas", opGtxnas, asmGtxnas, disGtxn, oneInt, oneAny, 5, modeAny, immediates("t", "f")},
- {0xc2, "gtxnsas", opGtxnsas, asmGtxnsas, disTxn, twoInts, oneAny, 5, modeAny, immediates("f")},
- {0xc3, "args", opArgs, asmDefault, disDefault, oneInt, oneBytes, 5, runModeSignature, opDefault},
- {0xc4, "gloadss", opGloadss, asmDefault, disDefault, twoInts, oneAny, 6, runModeApplication, opDefault},
- {0xc5, "itxnas", opItxnas, asmTxnas, disTxn, oneInt, oneAny, 6, runModeApplication, immediates("f")},
- {0xc6, "gitxnas", opGitxnas, asmGtxnas, disGtxn, oneInt, oneAny, 6, runModeApplication, immediates("t", "f")},
+ {0xc0, "txnas", opTxnas, proto("i:a"), 5, field("f", &TxnArrayFields)},
+ {0xc1, "gtxnas", opGtxnas, proto("i:a"), 5, immediates("t", "f").field("f", &TxnArrayFields)},
+ {0xc2, "gtxnsas", opGtxnsas, proto("ii:a"), 5, field("f", &TxnArrayFields)},
+ {0xc3, "args", opArgs, proto("i:b"), 5, only(modeSig)},
+ {0xc4, "gloadss", opGloadss, proto("ii:a"), 6, only(modeApp)},
+ {0xc5, "itxnas", opItxnas, proto("i:a"), 6, field("f", &TxnArrayFields).only(modeApp)},
+ {0xc6, "gitxnas", opGitxnas, proto("i:a"), 6, immediates("t", "f").field("f", &TxnArrayFields).only(modeApp)},
}
type sortByOpcode []OpSpec
@@ -414,7 +631,7 @@ func OpcodesByVersion(version uint64) []OpSpec {
// direct opcode bytes
var opsByOpcode [LogicVersion + 1][256]OpSpec
-// OpsByName map for each each version, mapping opcode name to OpSpec
+// OpsByName map for each version, mapping opcode name to OpSpec
var OpsByName [LogicVersion + 1]map[string]OpSpec
// Migration from TEAL v1 to TEAL v2.
@@ -441,7 +658,7 @@ func init() {
}
// Start from v2 TEAL and higher,
// copy lower version opcodes and overwrite matching version
- for v := uint64(2); v <= EvalMaxVersion; v++ {
+ for v := uint64(2); v <= evalMaxVersion; v++ {
OpsByName[v] = make(map[string]OpSpec, 256)
// Copy opcodes from lower version
diff --git a/data/transactions/logic/opcodes_test.go b/data/transactions/logic/opcodes_test.go
index 955662ced..68a0ef9c9 100644
--- a/data/transactions/logic/opcodes_test.go
+++ b/data/transactions/logic/opcodes_test.go
@@ -31,7 +31,7 @@ func TestOpSpecs(t *testing.T) {
t.Parallel()
for _, spec := range OpSpecs {
- require.NotEmpty(t, spec.Details, spec)
+ require.NotEmpty(t, spec.OpDetails, spec)
}
}
@@ -42,10 +42,10 @@ func (os *OpSpec) equals(oso *OpSpec) bool {
if os.Name != oso.Name {
return false
}
- if !reflect.DeepEqual(os.Args, oso.Args) {
+ if !reflect.DeepEqual(os.Arg, oso.Arg) {
return false
}
- if !reflect.DeepEqual(os.Returns, oso.Returns) {
+ if !reflect.DeepEqual(os.Return, oso.Return) {
return false
}
if os.Version != oso.Version {
@@ -77,13 +77,15 @@ func TestOpcodesByVersionReordered(t *testing.T) {
OpSpecs[1] = OpSpecs[4]
OpSpecs[4] = tmp
- t.Run("TestOpcodesByVersion", TestOpcodesByVersion)
+ t.Run("TestOpcodesByVersion", testOpcodesByVersion)
}
func TestOpcodesByVersion(t *testing.T) {
- // partitiontest.PartitionTest(t)
- // has partitioning in the TestOpcodesByVersionReordered()
+ partitiontest.PartitionTest(t)
+ testOpcodesByVersion(t)
+}
+func testOpcodesByVersion(t *testing.T) {
// Make a copy of the OpSpecs to check if OpcodesByVersion will change it
OpSpecs2 := make([]OpSpec, len(OpSpecs))
for idx, opspec := range OpSpecs {
@@ -165,11 +167,10 @@ func TestOpcodesVersioningV2(t *testing.T) {
eq = a.Opcode == b.Opcode && a.Name == b.Name &&
reflect.ValueOf(a.op).Pointer() == reflect.ValueOf(b.op).Pointer() &&
reflect.ValueOf(a.asm).Pointer() == reflect.ValueOf(b.asm).Pointer() &&
- reflect.ValueOf(a.dis).Pointer() == reflect.ValueOf(b.dis).Pointer() &&
- reflect.DeepEqual(a.Args, b.Args) && reflect.DeepEqual(a.Returns, b.Returns) &&
+ reflect.DeepEqual(a.Arg, b.Arg) && reflect.DeepEqual(a.Return, b.Return) &&
a.Modes == b.Modes &&
- a.Details.Cost == b.Details.Cost && a.Details.Size == b.Details.Size &&
- reflect.ValueOf(a.Details.checkFunc).Pointer() == reflect.ValueOf(b.Details.checkFunc).Pointer()
+ a.OpDetails.FullCost == b.OpDetails.FullCost && a.OpDetails.Size == b.OpDetails.Size &&
+ reflect.ValueOf(a.OpDetails.check).Pointer() == reflect.ValueOf(b.OpDetails.check).Pointer()
return
}
// ensure v0 and v1 are the same
diff --git a/data/transactions/logic/sourcemap.go b/data/transactions/logic/sourcemap.go
new file mode 100644
index 000000000..3ffafd5e5
--- /dev/null
+++ b/data/transactions/logic/sourcemap.go
@@ -0,0 +1,95 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+import (
+ "bytes"
+ "strings"
+)
+
+// sourceMapVersion is currently 3.
+// Refer to the full specs of sourcemap here: https://sourcemaps.info/spec.html
+const sourceMapVersion = 3
+const b64table string = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
+
+// SourceMap contains details from the source to assembly process.
+// Currently contains the map between TEAL source line to
+// the assembled bytecode position and details about
+// the template variables contained in the source file.
+type SourceMap struct {
+ Version int `json:"version"`
+ File string `json:"file,omitempty"`
+ SourceRoot string `json:"sourceRoot,omitempty"`
+ Sources []string `json:"sources"`
+ Names []string `json:"names"`
+ Mapping string `json:"mapping"`
+}
+
+// GetSourceMap returns a struct containing details about
+// the assembled file and encoded mappings to the source file.
+func GetSourceMap(sourceNames []string, offsetToLine map[int]int) SourceMap {
+ maxPC := 0
+ for pc := range offsetToLine {
+ if pc > maxPC {
+ maxPC = pc
+ }
+ }
+
+ // Array where index is the PC and value is the line.
+ pcToLine := make([]string, maxPC+1)
+ for pc := range pcToLine {
+ if line, ok := offsetToLine[pc]; ok {
+ pcToLine[pc] = MakeSourceMapLine(0, 0, line, 0)
+ } else {
+ pcToLine[pc] = ""
+ }
+ }
+
+ // Encode the source map into a string
+ encodedMapping := strings.Join(pcToLine, ";")
+
+ return SourceMap{
+ Version: sourceMapVersion,
+ Sources: sourceNames,
+ Names: []string{}, // TEAL code does not generate any names.
+ Mapping: encodedMapping,
+ }
+}
+
+// intToVLQ writes out value to bytes.Buffer
+func intToVLQ(v int, buf *bytes.Buffer) {
+ v <<= 1
+ if v < 0 {
+ v = -v
+ v |= 1
+ }
+ for v >= 32 {
+ buf.WriteByte(b64table[32|(v&31)])
+ v >>= 5
+ }
+ buf.WriteByte(b64table[v])
+}
+
+// MakeSourceMapLine creates source map mapping's line entry
+func MakeSourceMapLine(tcol, sindex, sline, scol int) string {
+ buf := bytes.NewBuffer(nil)
+ intToVLQ(tcol, buf)
+ intToVLQ(sindex, buf)
+ intToVLQ(sline, buf)
+ intToVLQ(scol, buf)
+ return buf.String()
+}
diff --git a/data/transactions/logic/sourcemap_test.go b/data/transactions/logic/sourcemap_test.go
new file mode 100644
index 000000000..718535ec4
--- /dev/null
+++ b/data/transactions/logic/sourcemap_test.go
@@ -0,0 +1,64 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetSourceMap(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ sourceNames := []string{"test.teal"}
+ offsetToLine := map[int]int{
+ 1: 1,
+ 2: 2,
+ 5: 3,
+ }
+ actualSourceMap := GetSourceMap(sourceNames, offsetToLine)
+
+ a.Equal(sourceMapVersion, actualSourceMap.Version)
+ a.Equal(sourceNames, actualSourceMap.Sources)
+ a.Equal([]string{}, actualSourceMap.Names)
+
+ // Check encoding for each line.
+ splitMapping := strings.Split(actualSourceMap.Mapping, ";")
+ for pc := range splitMapping {
+ if line, ok := offsetToLine[pc]; ok {
+ a.Equal(MakeSourceMapLine(0, 0, line, 0), splitMapping[pc])
+ } else {
+ a.Equal("", splitMapping[pc])
+ }
+ }
+}
+
+func TestVLQ(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ a.Equal("AAAA", MakeSourceMapLine(0, 0, 0, 0))
+ a.Equal("AACA", MakeSourceMapLine(0, 0, 1, 0))
+ a.Equal("AAEA", MakeSourceMapLine(0, 0, 2, 0))
+ a.Equal("AAgBA", MakeSourceMapLine(0, 0, 16, 0))
+ a.Equal("AAggBA", MakeSourceMapLine(0, 0, 512, 0))
+ a.Equal("ADggBD", MakeSourceMapLine(0, -1, 512, -1))
+}
diff --git a/data/transactions/logic/teal.tmLanguage.json b/data/transactions/logic/teal.tmLanguage.json
new file mode 100644
index 000000000..efc093602
--- /dev/null
+++ b/data/transactions/logic/teal.tmLanguage.json
@@ -0,0 +1,136 @@
+{
+ "$schema": "https://raw.githubusercontent.com/martinring/tmlanguage/master/tmlanguage.json",
+ "name": "Algorand TEAL",
+ "patterns": [
+ {
+ "include": "#invalid"
+ },
+ {
+ "include": "#comments"
+ },
+ {
+ "include": "#strings"
+ },
+ {
+ "include": "#literals"
+ },
+ {
+ "include": "#labels"
+ },
+ {
+ "include": "#keywords"
+ },
+ {
+ "include": "#pragmas"
+ }
+ ],
+ "repository": {
+ "comments": {
+ "name": "comment.line.double-slash.teal",
+ "begin": "//",
+ "end": "$"
+ },
+ "invalid": {
+ "patterns": [
+ {
+ "name": "invalid.illegal.teal",
+ "match": "^\\s+.*$"
+ }
+ ]
+ },
+ "keywords": {
+ "patterns": [
+ {
+ "match": "\\b(base64|b64|base32|b32)(?:\\(|\\s+)([a-zA-Z0-9\\+\\/\\=]+)(?:\\)|\\s?|$)",
+ "captures": {
+ "1": {
+ "name": "support.class.teal"
+ },
+ "2": {
+ "name": "string.quoted.triple.teal"
+ }
+ }
+ },
+ {
+ "match": "^(addr)\\s+([A-Z2-7\\=]+)",
+ "captures": {
+ "1": {
+ "name": "keyword.other.teal"
+ },
+ "2": {
+ "name": "string.unquoted.teal"
+ }
+ }
+ },
+ {
+ "name": "keyword.control.teal",
+ "match": "^(assert|b|bnz|bz|callsub|cover|dig|dup|dup2|err|pop|retsub|return|select|swap|uncover)\\b"
+ },
+ {
+ "name": "keyword.other.teal",
+ "match": "^(int|byte|addr|arg|arg_0|arg_1|arg_2|arg_3|args|bytec|bytec_0|bytec_1|bytec_2|bytec_3|bytecblock|bzero|gaid|gaids|gload|gloads|gloadss|global|gtxn|gtxna|gtxnas|gtxns|gtxnsa|gtxnsas|intc|intc_0|intc_1|intc_2|intc_3|intcblock|load|loads|pushbytes|pushint|store|stores|txn|txna|txnas)\\b"
+ },
+ {
+ "name": "keyword.other.unit.teal",
+ "match": "^(acct_params_get|app_global_del|app_global_get|app_global_get_ex|app_global_put|app_local_del|app_local_get|app_local_get_ex|app_local_put|app_opted_in|app_params_get|asset_holding_get|asset_params_get|balance|log|min_balance)\\b"
+ },
+ {
+ "name": "keyword.operator.teal",
+ "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|btoi|concat|divmodw|divw|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|exp|expw|getbit|getbyte|itob|keccak256|len|mulw|setbit|setbyte|sha256|sha3_256|sha512_256|shl|shr|sqrt|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|extract|extract3|extract_uint16|extract_uint32|extract_uint64|json_ref|substring|substring3|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b"
+ }
+ ]
+ },
+ "labels": {
+ "patterns": [
+ {
+ "name": "support.variable.teal",
+ "match": "^\\w+:.*$"
+ },
+ {
+ "match": "\\b(?\u003c=b|bz|bnz)\\s+(\\w+)\\b",
+ "captures": {
+ "1": {
+ "name": "support.variable.teal"
+ }
+ }
+ }
+ ]
+ },
+ "literals": {
+ "patterns": [
+ {
+ "name": "constant.numeric.teal",
+ "match": "\\b([0-9]+)\\b"
+ },
+ {
+ "name": "constant.numeric.teal",
+ "match": "\\b(?\u003c=int\\s+)(0x[0-9]+)\\b"
+ },
+ {
+ "name": "string.quoted.double.teal",
+ "match": "\\b(?\u003c=byte\\s+)(0x[0-9]+)\\b"
+ },
+ {
+ "name": "variable.parameter.teal",
+ "match": "\\b(unknown|pay|keyreg|acfg|axfer|afrz|appl|NoOp|OptIn|CloseOut|ClearState|UpdateApplication|DeleteApplication|Secp256k1|Secp256r1|Sender|Fee|FirstValid|FirstValidTime|LastValid|Note|Lease|Receiver|Amount|CloseRemainderTo|VotePK|SelectionPK|VoteFirst|VoteLast|VoteKeyDilution|Type|TypeEnum|XferAsset|AssetAmount|AssetSender|AssetReceiver|AssetCloseTo|GroupIndex|TxID|ApplicationID|OnCompletion|ApplicationArgs|NumAppArgs|Accounts|NumAccounts|ApprovalProgram|ClearStateProgram|RekeyTo|ConfigAsset|ConfigAssetTotal|ConfigAssetDecimals|ConfigAssetDefaultFrozen|ConfigAssetUnitName|ConfigAssetName|ConfigAssetURL|ConfigAssetMetadataHash|ConfigAssetManager|ConfigAssetReserve|ConfigAssetFreeze|ConfigAssetClawback|FreezeAsset|FreezeAssetAccount|FreezeAssetFrozen|Assets|NumAssets|Applications|NumApplications|GlobalNumUint|GlobalNumByteSlice|LocalNumUint|LocalNumByteSlice|ExtraProgramPages|Nonparticipation|Logs|NumLogs|CreatedAssetID|CreatedApplicationID|LastLog|StateProofPK|MinTxnFee|MinBalance|MaxTxnLife|ZeroAddress|GroupSize|LogicSigVersion|Round|LatestTimestamp|CurrentApplicationID|CreatorAddress|CurrentApplicationAddress|GroupID|OpcodeBudget|CallerApplicationID|CallerApplicationAddress|URLEncoding|StdEncoding|JSONString|JSONUint64|JSONObject|AssetBalance|AssetFrozen|AssetTotal|AssetDecimals|AssetDefaultFrozen|AssetUnitName|AssetName|AssetURL|AssetMetadataHash|AssetManager|AssetReserve|AssetFreeze|AssetClawback|AssetCreator|AppApprovalProgram|AppClearStateProgram|AppGlobalNumUint|AppGlobalNumByteSlice|AppLocalNumUint|AppLocalNumByteSlice|AppExtraProgramPages|AppCreator|AppAddress|AcctBalance|AcctMinBalance|AcctAuthAddr)\\b"
+ }
+ ]
+ },
+ "pragmas": {
+ "name": "support.function.teal",
+ "match": "^#pragma\\b.*$"
+ },
+ "strings": {
+ "name": "string.quoted.double.teal",
+ "begin": "\"",
+ "end": "\"",
+ "patterns": [
+ {
+ "name": "constant.character.escape.teal",
+ "match": "\\\\(x[0-9A-Fa-f]{2}|.|$)"
+ }
+ ]
+ }
+ },
+ "scopeName": "source.teal"
+}
diff --git a/data/transactions/logic/tlhc.py b/data/transactions/logic/tlhc.py
index f436b7810..ce8c8d793 100755
--- a/data/transactions/logic/tlhc.py
+++ b/data/transactions/logic/tlhc.py
@@ -69,11 +69,11 @@ def main():
out.write(code)
try:
out.close()
- except Exeption as e:
+ except Exception as e:
print(e)
try:
secretout.close()
- except Exeption as e:
+ except Exception as e:
print(e)
return
diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go
index 83721e5e1..a2c1de369 100644
--- a/data/transactions/transaction.go
+++ b/data/transactions/transaction.go
@@ -380,7 +380,7 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa
} else {
// This will check version matching, but not downgrading. That
// depends on chain state (so we pass an empty AppParams)
- err := CheckContractVersions(tx.ApprovalProgram, tx.ClearStateProgram, basics.AppParams{})
+ err := CheckContractVersions(tx.ApprovalProgram, tx.ClearStateProgram, basics.AppParams{}, &proto)
if err != nil {
return err
}
@@ -713,14 +713,14 @@ func ProgramVersion(bytecode []byte) (version uint64, length int, err error) {
return version, vlen, nil
}
-// ExtraProgramChecksVersion is version of AVM programs that are subject to
-// extra test - approval and clear must match versions, and they may not be
-// downgraded
-const ExtraProgramChecksVersion = 6
+// syncProgramsVersion is version of AVM programs that are required to have
+// matching versions between approval and clearstate.
+const syncProgramsVersion = 6
// CheckContractVersions ensures that for v6 and higher two programs are version
-// matched, and that they are not a downgrade.
-func CheckContractVersions(approval []byte, clear []byte, previous basics.AppParams) error {
+// matched, and that they are not a downgrade. If proto.AllowV4InnerAppls, then
+// no downgrades are allowed, regardless of version.
+func CheckContractVersions(approval []byte, clear []byte, previous basics.AppParams, proto *config.ConsensusParams) error {
av, _, err := ProgramVersion(approval)
if err != nil {
return fmt.Errorf("bad ApprovalProgram: %v", err)
@@ -729,18 +729,30 @@ func CheckContractVersions(approval []byte, clear []byte, previous basics.AppPar
if err != nil {
return fmt.Errorf("bad ClearStateProgram: %v", err)
}
- if av >= ExtraProgramChecksVersion || cv >= ExtraProgramChecksVersion {
+ if av >= syncProgramsVersion || cv >= syncProgramsVersion {
if av != cv {
return fmt.Errorf("program version mismatch: %d != %d", av, cv)
}
}
- if len(previous.ApprovalProgram) != 0 { // if creation or in call from WellFormed() previous is empty
- pv, _, err := ProgramVersion(previous.ApprovalProgram)
+ // The downgrade check ensures that if app A opts its account into app B
+ // (which requires B's CSP to be a callable version), the CSP will STAY
+ // callable. That way, A can certainly ClearState its account out of B.
+ if len(previous.ApprovalProgram) != 0 { // in creation and in call from WellFormed() previous is empty
+ pav, _, err := ProgramVersion(previous.ApprovalProgram)
if err != nil {
return err
}
- if pv >= ExtraProgramChecksVersion && av < pv {
- return fmt.Errorf("program version downgrade: %d < %d", av, pv)
+ if pav >= proto.MinInnerApplVersion && av < pav {
+ return fmt.Errorf("approval program version downgrade: %d < %d", av, pav)
+ }
+ }
+ if len(previous.ClearStateProgram) != 0 {
+ pcv, _, err := ProgramVersion(previous.ClearStateProgram)
+ if err != nil {
+ return err
+ }
+ if pcv >= proto.MinInnerApplVersion && cv < pcv {
+ return fmt.Errorf("clearstate program version downgrade: %d < %d", cv, pcv)
}
}
return nil
diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go
index f4d8deaf3..7f6fc2b9f 100644
--- a/data/transactions/verify/txn.go
+++ b/data/transactions/verify/txn.go
@@ -85,7 +85,7 @@ func PrepareGroupContext(group []transactions.SignedTxn, contextHdr bookkeeping.
},
consensusVersion: contextHdr.CurrentProtocol,
consensusParams: consensusParams,
- minTealVersion: logic.ComputeMinTealVersion(transactions.WrapSignedTxnsWithAD(group), false),
+ minTealVersion: logic.ComputeMinTealVersion(transactions.WrapSignedTxnsWithAD(group)),
signedGroupTxns: group,
}, nil
}
diff --git a/data/transactions/verify/verifiedTxnCache.go b/data/transactions/verify/verifiedTxnCache.go
index fdd502833..06798f3b4 100644
--- a/data/transactions/verify/verifiedTxnCache.go
+++ b/data/transactions/verify/verifiedTxnCache.go
@@ -128,7 +128,7 @@ func (v *verifiedTransactionCache) GetUnverifiedTranscationGroups(txnGroups [][]
for txnGroupIndex := 0; txnGroupIndex < len(txnGroups); txnGroupIndex++ {
signedTxnGroup := txnGroups[txnGroupIndex]
verifiedTxn := 0
- groupCtx.minTealVersion = logic.ComputeMinTealVersion(transactions.WrapSignedTxnsWithAD(signedTxnGroup), false)
+ groupCtx.minTealVersion = logic.ComputeMinTealVersion(transactions.WrapSignedTxnsWithAD(signedTxnGroup))
baseBucket := v.base
for txnIdx := 0; txnIdx < len(signedTxnGroup); txnIdx++ {
diff --git a/docker/build/cicd.ubuntu.Dockerfile b/docker/build/cicd.ubuntu.Dockerfile
index 00ec1afc4..011da3182 100644
--- a/docker/build/cicd.ubuntu.Dockerfile
+++ b/docker/build/cicd.ubuntu.Dockerfile
@@ -3,11 +3,12 @@ ARG ARCH="amd64"
FROM ${ARCH}/ubuntu:18.04
ARG GOLANG_VERSION
ARG ARCH="amd64"
+ARG GOARCH="amd64"
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get install -y build-essential git libboost-all-dev wget sqlite3 autoconf jq bsdmainutils shellcheck awscli
WORKDIR /root
-RUN wget https://dl.google.com/go/go${GOLANG_VERSION}.linux-${ARCH%v*}.tar.gz \
- && tar -xvf go${GOLANG_VERSION}.linux-${ARCH%v*}.tar.gz && \
+RUN wget https://dl.google.com/go/go${GOLANG_VERSION}.linux-${GOARCH}.tar.gz \
+ && tar -xvf go${GOLANG_VERSION}.linux-${GOARCH}.tar.gz && \
mv go /usr/local
ENV GOROOT=/usr/local/go \
GOPATH=$HOME/go \
@@ -17,6 +18,7 @@ COPY . $GOPATH/src/github.com/algorand/go-algorand
ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH \
GOPROXY=https://proxy.golang.org,https://pkg.go.dev,https://goproxy.io,direct
WORKDIR $GOPATH/src/github.com/algorand/go-algorand
+RUN git config --global --add safe.directory '*'
RUN make clean
RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \
mkdir -p $GOPATH/src/github.com/algorand/go-algorand
diff --git a/gen/generate.go b/gen/generate.go
index 05c2ecb97..dc76a485d 100644
--- a/gen/generate.go
+++ b/gen/generate.go
@@ -27,6 +27,7 @@ import (
"sort"
"sync"
"sync/atomic"
+ "time"
"github.com/algorand/go-deadlock"
@@ -301,6 +302,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion,
}()
}
+ createStart := time.Now()
creatingWalletsWaitGroup.Add(concurrentWalletGenerators)
for routinesCounter := 0; routinesCounter < concurrentWalletGenerators; routinesCounter++ {
go createWallet()
@@ -375,7 +377,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion,
err = ioutil.WriteFile(filepath.Join(outDir, config.GenesisJSONFile), append(jsonData, '\n'), 0666)
if (verbose) && (rootKeyCreated > 0 || partKeyCreated > 0) {
- fmt.Printf("Created %d new rootkeys and %d new partkeys.\n", rootKeyCreated, partKeyCreated)
+ fmt.Printf("Created %d new rootkeys and %d new partkeys in %s.\n", rootKeyCreated, partKeyCreated, time.Since(createStart))
fmt.Printf("NOTICE: Participation keys are valid for a period of %d rounds. After this many rounds the network will stall unless new keys are registered.\n", lastWalletValid-firstWalletValid)
}
diff --git a/go.mod b/go.mod
index c2d5d8409..c917e62ad 100644
--- a/go.mod
+++ b/go.mod
@@ -1,16 +1,16 @@
module github.com/algorand/go-algorand
-go 1.14
+go 1.16
require (
github.com/algorand/falcon v0.0.0-20220130164023-c9e1d466f123
- github.com/algorand/go-codec/codec v0.0.0-20190507210007-269d70b6135d
- github.com/algorand/go-deadlock v0.2.1
+ github.com/algorand/go-codec/codec v1.1.8
+ github.com/algorand/go-deadlock v0.2.2
github.com/algorand/go-sumhash v0.1.0
- github.com/algorand/graphtrace v0.0.0-20201117160756-e524ed1a6f64
- github.com/algorand/msgp v1.1.49
- github.com/algorand/oapi-codegen v1.3.5-algorand5
- github.com/algorand/websocket v1.4.4
+ github.com/algorand/graphtrace v0.1.0
+ github.com/algorand/msgp v1.1.50
+ github.com/algorand/oapi-codegen v1.3.7
+ github.com/algorand/websocket v1.4.5
github.com/aws/aws-sdk-go v1.16.5
github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e
github.com/cpuguy83/go-md2man v1.0.8 // indirect
@@ -27,10 +27,9 @@ require (
github.com/gopherjs/gopherwasm v1.0.1 // indirect
github.com/gorilla/context v1.1.1 // indirect
github.com/gorilla/mux v1.6.2
- github.com/gorilla/websocket v1.4.2 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmoiron/sqlx v1.2.0
- github.com/karalabe/hid v1.0.0
+ github.com/karalabe/usb v0.0.2
github.com/labstack/echo/v4 v4.1.17
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-sqlite3 v1.10.0
@@ -39,14 +38,13 @@ require (
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect
github.com/olivere/elastic v6.2.14+incompatible
github.com/russross/blackfriday v1.5.2 // indirect
- github.com/sirupsen/logrus v1.4.2
+ github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v0.0.3
github.com/spf13/pflag v1.0.5 // indirect
- github.com/stretchr/testify v1.7.0
+ github.com/stretchr/testify v1.7.1
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
- golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 // indirect
- golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
- golang.org/x/text v0.3.3
+ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654
+ golang.org/x/text v0.3.7
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009
diff --git a/go.sum b/go.sum
index 77152d167..73e446a9f 100644
--- a/go.sum
+++ b/go.sum
@@ -1,28 +1,27 @@
github.com/algorand/falcon v0.0.0-20220130164023-c9e1d466f123 h1:cnUjJ/iqUjJNbhUzgmxbfwHMVFnz+DLnNQx8uJcGaks=
github.com/algorand/falcon v0.0.0-20220130164023-c9e1d466f123/go.mod h1:OkQyHlGvS0kLNcIWbC21/uQcnbfwSOQm+wiqWwBG9pQ=
-github.com/algorand/go-codec v1.1.2 h1:QWS9YC3EEWBpJq5AqFPELcCJ2QPpTIg9aqR2K/sRDq4=
-github.com/algorand/go-codec v1.1.2/go.mod h1:A3YI4V24jUUnU1eNekNmx2fLi60FvlNssqOiUsyfNM8=
-github.com/algorand/go-codec/codec v0.0.0-20190507210007-269d70b6135d h1:W9MgGUodEl4Y4+CxeEr+T3fZ26kOcWA4yfqhjbFxxmI=
-github.com/algorand/go-codec/codec v0.0.0-20190507210007-269d70b6135d/go.mod h1:qm6LyXvDa1+uZJxaVg8X+OEjBqt/zDinDa2EohtTDxU=
-github.com/algorand/go-deadlock v0.2.1 h1:TQPQwWAB133bS5uwHpmrgH5hCMyZK5hnUW26aqWMvq4=
-github.com/algorand/go-deadlock v0.2.1/go.mod h1:HgdF2cwtBIBCL7qmUaozuG/UIZFR6PLpSMR58pvWiXE=
+github.com/algorand/go-codec v1.1.8 h1:XDSreeeZY8gMst6Edz4RBkl08/DGMJOeHYkoXL2B7wI=
+github.com/algorand/go-codec v1.1.8/go.mod h1:XhzVs6VVyWMLu6cApb9/192gBjGRVGm5cX5j203Heg4=
+github.com/algorand/go-codec/codec v1.1.8 h1:lsFuhcOH2LiEhpBH3BVUUkdevVmwCRyvb7FCAAPeY6U=
+github.com/algorand/go-codec/codec v1.1.8/go.mod h1:tQ3zAJ6ijTps6V+wp8KsGDnPC2uhHVC7ANyrtkIY0bA=
+github.com/algorand/go-deadlock v0.2.2 h1:L7AKATSUCzoeVuOgpTipfCEjdUu5ECmlje8R7lP9DOY=
+github.com/algorand/go-deadlock v0.2.2/go.mod h1:Hat1OXKqKNUcN/iv74FjGhF4hsOE2l7gOgQ9ZVIq6Fk=
github.com/algorand/go-sumhash v0.1.0 h1:b/QRhyLuF//vOcicBIxBXYW8bERNoeLxieht/dUYpVg=
github.com/algorand/go-sumhash v0.1.0/go.mod h1:OOe7jdDWUhLkuP1XytkK5gnLu9entAviN5DfDZh6XAc=
-github.com/algorand/graphtrace v0.0.0-20201117160756-e524ed1a6f64 h1:yvKeJdS/mvLRiyIxu8j5BQDXIzs1XbC9/22KycJnt3A=
-github.com/algorand/graphtrace v0.0.0-20201117160756-e524ed1a6f64/go.mod h1:qFtQmC+kmsfnLfS9j3xgKtzsWyozemL5ek1R4dWZa5c=
-github.com/algorand/msgp v1.1.49 h1:YBFRcYZNsD2WgzXONvzFrjv1/887pWzJSx874VL4P6g=
-github.com/algorand/msgp v1.1.49/go.mod h1:oyDY2SIeM1bytVYJTL88nt9kVeEBC00Avyqcnyrq/ec=
-github.com/algorand/oapi-codegen v1.3.5-algorand5 h1:y576Ca2/guQddQrQA7dtL5KcOx5xQgPeIupiuFMGyCI=
-github.com/algorand/oapi-codegen v1.3.5-algorand5/go.mod h1:/k0Ywn0lnt92uBMyE+yiRf/Wo3/chxHHsAfenD09EbY=
-github.com/algorand/websocket v1.4.4 h1:BL9atWs/7tkV73NCwiLZ5YqDENMBsSxozc5gDtPdsQ4=
-github.com/algorand/websocket v1.4.4/go.mod h1:0nFSn+xppw/GZS9hgWPS3b8/4FcA3Pj7XQxm+wqHGx8=
+github.com/algorand/graphtrace v0.1.0 h1:QemP1iT0W56SExD0NfiU6rsG34/v0Je6bg5UZnptEUM=
+github.com/algorand/graphtrace v0.1.0/go.mod h1:HscLQrzBdH1BH+5oehs3ICd8SYcXvnSL9BjfTu8WHCc=
+github.com/algorand/msgp v1.1.50 h1:Mvsjs5LCE6HsXXbwJXD8ol1Y+c+QMoFNM4j0CY+mFGo=
+github.com/algorand/msgp v1.1.50/go.mod h1:R5sJrW9krk4YwNo+rs82Kq6V55q/zNgACwWqt3sQBM4=
+github.com/algorand/oapi-codegen v1.3.7 h1:TdXeGljgrnLXSCGPdeY6g6+i/G0Rr5CkjBgUJY6ht48=
+github.com/algorand/oapi-codegen v1.3.7/go.mod h1:UvOtAiP3hc0M2GUKBnZVTjLe3HKGDKh6y9rs3e3JyOg=
+github.com/algorand/websocket v1.4.5 h1:Cs6UTaCReAl02evYxmN8k57cNHmBILRcspfSxYg4AJE=
+github.com/algorand/websocket v1.4.5/go.mod h1:79n6FSZY08yQagHzE/YWZqTPBYfY5wc3IS+UTZe1W5c=
github.com/aws/aws-sdk-go v1.16.5 h1:NVxzZXIuwX828VcJrpNxxWjur1tlOBISdMdDdHIKHcc=
github.com/aws/aws-sdk-go v1.16.5/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e h1:CHPYEbz71w8DqJ7DRIq+MXyCQsdibK08vdcQTY4ufas=
github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e/go.mod h1:6Xhs0ZlsRjXLIiSMLKafbZxML/j30pg9Z1priLuha5s=
github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M=
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
-github.com/cyberdelia/templates v0.0.0-20191230040416-20a325f050d4 h1:Fphwr1XDjkTR/KFbrrkLfY6D2CEOlHqFGomQQrxcHFs=
github.com/cyberdelia/templates v0.0.0-20191230040416-20a325f050d4/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -44,7 +43,6 @@ github.com/getkin/kin-openapi v0.22.0 h1:J5IFyKd/5yuB6AZAgwK0CMBKnabWcmkowtsl6bR
github.com/getkin/kin-openapi v0.22.0/go.mod h1:WGRs2ZMM1Q8LR1QBEwUxC6RJEfaBcD0s+pcEVXFuAjw=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-chi/chi v4.1.1+incompatible h1:MmTgB0R8Bt/jccxp+t6S/1VGIKdJw5J74CK/c9tTfA4=
github.com/go-chi/chi v4.1.1+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@@ -52,7 +50,6 @@ github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f h1:zlOR3rOlPAVvtfuxGKo
github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/gofrs/flock v0.7.0 h1:pGFUjl501gafK9HBt1VGL1KCOd/YhIooID+xgyJCf3g=
github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
-github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
@@ -66,8 +63,6 @@ github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
@@ -76,15 +71,12 @@ github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/karalabe/hid v1.0.0 h1:+/CIMNXhSU/zIJgnIvBD2nKHxS/bnRHhhs9xBryLpPo=
-github.com/karalabe/hid v1.0.0/go.mod h1:Vr51f8rUOLYrfrWDFlV12GGQgM5AT8sVh+2fY4MPeu8=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
+github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/labstack/echo/v4 v4.1.16 h1:8swiwjE5Jkai3RPfZoahp8kjVCRNq+y7Q0hPji2Kz0o=
github.com/labstack/echo/v4 v4.1.16/go.mod h1:awO+5TzAjvL8XpibdsfXxPgHr+orhtXZJZIQCVjogKI=
github.com/labstack/echo/v4 v4.1.17 h1:PQIBaRplyRy3OjwILGkPg89JRtH2x5bssi59G2EL3fo=
github.com/labstack/echo/v4 v4.1.17/go.mod h1:Tn2yRQL/UclUalpb5rPdXDevbkJ+lp/2svdyFBg6CHQ=
@@ -94,10 +86,8 @@ github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/matryer/moq v0.0.0-20200310130814-7721994d1b54 h1:p8zN0Xu28xyEkPpqLbFXAnjdgBVvTJCpfOtoDf/+/RQ=
github.com/matryer/moq v0.0.0-20200310130814-7721994d1b54/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -124,30 +114,28 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
-github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
-github.com/valyala/fasttemplate v1.1.0 h1:RZqt0yGBsps8NGvLSGW804QQqCUYYLsaOjTVHy1Ocw4=
github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -157,8 +145,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -166,43 +154,46 @@ golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200423205358-59e73619c742 h1:9OGWpORUXvk8AsaBJlpzzDx7Srv/rSK6rvjcsJq4rJo=
golang.org/x/tools v0.0.0-20200423205358-59e73619c742/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index c6ff9cc84..7ace8c55d 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -620,11 +620,11 @@ func (au *accountUpdates) newBlock(blk bookkeeping.Block, delta ledgercore.State
au.accountsReadCond.Broadcast()
}
-// Totals returns the totals for a given round
-func (au *accountUpdates) Totals(rnd basics.Round) (totals ledgercore.AccountTotals, err error) {
+// OnlineTotals returns the online totals of all accounts at the end of round rnd.
+func (au *accountUpdates) OnlineTotals(rnd basics.Round) (basics.MicroAlgos, error) {
au.accountsMu.RLock()
defer au.accountsMu.RUnlock()
- return au.totalsImpl(rnd)
+ return au.onlineTotalsImpl(rnd)
}
// LatestTotals returns the totals of all accounts for the most recent round, as well as the round number
@@ -728,15 +728,15 @@ func (aul *accountUpdatesLedgerEvaluator) GetCreatorForRound(rnd basics.Round, c
return aul.au.getCreatorForRound(rnd, cidx, ctype, false /* don't sync */)
}
-// totalsImpl returns the totals for a given round
-func (au *accountUpdates) totalsImpl(rnd basics.Round) (totals ledgercore.AccountTotals, err error) {
+// onlineTotalsImpl returns the online totals of all accounts at the end of round rnd.
+func (au *accountUpdates) onlineTotalsImpl(rnd basics.Round) (basics.MicroAlgos, error) {
offset, err := au.roundOffset(rnd)
if err != nil {
- return
+ return basics.MicroAlgos{}, err
}
- totals = au.roundTotals[offset]
- return
+ totals := au.roundTotals[offset]
+ return totals.Online.Money, nil
}
// latestTotalsImpl returns the totals of all accounts for the most recent round, as well as the round number
@@ -895,9 +895,10 @@ func (au *accountUpdates) lookupLatest(addr basics.Address) (data basics.Account
addResource := func(cidx basics.CreatableIndex, round basics.Round, res ledgercore.AccountResource) error {
foundRound, ok := foundResources[cidx]
if !ok { // first time seeing this cidx
- resourceCount++
foundResources[cidx] = round
- ledgercore.AssignAccountResourceToAccountData(cidx, res, &data)
+ if ledgercore.AssignAccountResourceToAccountData(cidx, res, &data) {
+ resourceCount++
+ }
return nil
}
// is this newer than current "found" rnd for this resource?
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index e2c577ee2..d2faf6ec7 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -274,7 +274,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates
latest := au.latest()
require.Equal(t, latestRnd, latest)
- _, err := au.Totals(latest + 1)
+ _, err := au.OnlineTotals(latest + 1)
require.Error(t, err)
var validThrough basics.Round
@@ -283,7 +283,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates
require.Equal(t, basics.Round(0), validThrough)
if base > 0 {
- _, err := au.Totals(base - 1)
+ _, err := au.OnlineTotals(base - 1)
require.Error(t, err)
_, validThrough, err = au.LookupWithoutRewards(base-1, ledgertesting.RandomAddress())
@@ -338,13 +338,9 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates
bll := accts[rnd]
require.Equal(t, all, bll)
- totals, err := au.Totals(rnd)
+ totals, err := au.OnlineTotals(rnd)
require.NoError(t, err)
- require.Equal(t, totals.Online.Money.Raw, totalOnline)
- require.Equal(t, totals.Offline.Money.Raw, totalOffline)
- require.Equal(t, totals.NotParticipating.Money.Raw, totalNotPart)
- require.Equal(t, totals.Participating().Raw, totalOnline+totalOffline)
- require.Equal(t, totals.All().Raw, totalOnline+totalOffline+totalNotPart)
+ require.Equal(t, totals.Raw, totalOnline)
d, validThrough, err := au.LookupWithoutRewards(rnd, ledgertesting.RandomAddress())
require.NoError(t, err)
@@ -457,7 +453,8 @@ func TestAcctUpdates(t *testing.T) {
var totals map[basics.Address]ledgercore.AccountData
base := accts[i-1]
updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -565,7 +562,8 @@ func TestAcctUpdatesFastUpdates(t *testing.T) {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
updates, totals := ledgertesting.RandomDeltasBalanced(1, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -653,7 +651,8 @@ func BenchmarkBalancesChanges(b *testing.B) {
}
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(b, i-1, prevRound)
require.NoError(b, err)
newPool := totals[testPoolAddr]
@@ -786,7 +785,8 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
rewardLevel += rewardLevelDelta
updates, totals := ledgertesting.RandomDeltasBalanced(1, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -1595,7 +1595,8 @@ func TestAcctUpdatesCachesInitialization(t *testing.T) {
accountChanges := 2
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -1698,7 +1699,8 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) {
accountChanges := 2
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -1734,7 +1736,8 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) {
accountChanges := 2
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -1817,7 +1820,8 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundry(t *testing.T) {
accountChanges := 2
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -1852,7 +1856,8 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundry(t *testing.T) {
accountChanges := 2
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -1889,7 +1894,8 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundry(t *testing.T) {
accountChanges := 2
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -2034,7 +2040,8 @@ func TestAcctUpdatesResources(t *testing.T) {
updates.UpsertAssetResource(addr1, aidx4, creatorParams, ledgercore.AssetHoldingDelta{Holding: &basics.AssetHolding{Amount: 0}})
}
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
base := accts[i-1]
@@ -2224,7 +2231,8 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates,
var totals map[basics.Address]ledgercore.AccountData
base := accts[i-1]
updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -2349,6 +2357,74 @@ func TestAcctUpdatesLookupRetry(t *testing.T) {
})
}
+// auCommitSync is a helper function calling the committing sequence similarly to what tracker registry does
+func auCommitSync(t *testing.T, rnd basics.Round, au *accountUpdates, ml *mockLedgerForTracker) {
+ _, maxLookback := au.committedUpTo(rnd)
+ dcc := &deferredCommitContext{
+ deferredCommitRange: deferredCommitRange{
+ lookback: maxLookback,
+ },
+ }
+ cdr := &dcc.deferredCommitRange
+ cdr = au.produceCommittingTask(rnd, ml.trackers.dbRound, cdr)
+ if cdr != nil {
+ func() {
+ dcc.deferredCommitRange = *cdr
+ ml.trackers.accountsWriting.Add(1)
+ defer ml.trackers.accountsWriting.Done()
+
+ // do not take any locks since all operations are synchronous
+ newBase := basics.Round(dcc.offset) + dcc.oldBase
+ dcc.newBase = newBase
+
+ err := au.prepareCommit(dcc)
+ require.NoError(t, err)
+ err = ml.trackers.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ err = au.commitRound(ctx, tx, dcc)
+ if err != nil {
+ return err
+ }
+ err = updateAccountsRound(tx, newBase)
+ return err
+ })
+ require.NoError(t, err)
+ ml.trackers.dbRound = newBase
+ au.postCommit(ml.trackers.ctx, dcc)
+ au.postCommitUnlocked(ml.trackers.ctx, dcc)
+ }()
+ }
+}
+
+type auNewBlockOpts struct {
+ updates ledgercore.AccountDeltas
+ version protocol.ConsensusVersion
+ protoParams config.ConsensusParams
+ knownCreatables map[basics.CreatableIndex]bool
+}
+
+func auNewBlock(t *testing.T, rnd basics.Round, au *accountUpdates, base map[basics.Address]basics.AccountData, data auNewBlockOpts) {
+ rewardLevel := uint64(0)
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, rnd-1, prevRound)
+ require.NoError(t, err)
+
+ newTotals := ledgertesting.CalculateNewRoundAccountTotals(t, data.updates, rewardLevel, data.protoParams, base, prevTotals)
+
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(rnd),
+ },
+ }
+ blk.RewardsLevel = rewardLevel
+ blk.CurrentProtocol = data.version
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, data.updates.Len(), 0)
+ delta.Accts.MergeAccounts(data.updates)
+ delta.Creatables = creatablesFromUpdates(base, data.updates, data.knownCreatables)
+ delta.Totals = newTotals
+
+ au.newBlock(blk, delta)
+}
+
// TestAcctUpdatesLookupLatestCacheRetry simulates a situation when base account and resources are in a cache but
// account updates advances while calling lookupLatest
// The idea of the test:
@@ -2400,66 +2476,6 @@ func TestAcctUpdatesLookupLatestCacheRetry(t *testing.T) {
aidx2 := basics.AssetIndex(2)
knownCreatables := make(map[basics.CreatableIndex]bool)
- commitSync := func(rnd basics.Round) {
- _, maxLookback := au.committedUpTo(rnd)
- dcc := &deferredCommitContext{
- deferredCommitRange: deferredCommitRange{
- lookback: maxLookback,
- },
- }
- cdr := &dcc.deferredCommitRange
- cdr = au.produceCommittingTask(rnd, ml.trackers.dbRound, cdr)
- if cdr != nil {
- func() {
- dcc.deferredCommitRange = *cdr
- ml.trackers.accountsWriting.Add(1)
- defer ml.trackers.accountsWriting.Done()
-
- // do not take any locks since all operations are synchronous
- newBase := basics.Round(dcc.offset) + dcc.oldBase
- dcc.newBase = newBase
-
- err := au.prepareCommit(dcc)
- require.NoError(t, err)
- err = ml.trackers.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- err = au.commitRound(ctx, tx, dcc)
- if err != nil {
- return err
- }
- err = updateAccountsRound(tx, newBase)
- return err
- })
- require.NoError(t, err)
- ml.trackers.dbRound = newBase
- au.postCommit(ml.trackers.ctx, dcc)
- au.postCommitUnlocked(ml.trackers.ctx, dcc)
- }()
-
- }
- }
-
- newBlock := func(au *accountUpdates, rnd basics.Round, base map[basics.Address]basics.AccountData, updates ledgercore.AccountDeltas) {
- rewardLevel := uint64(0)
- prevTotals, err := au.Totals(basics.Round(rnd - 1))
- require.NoError(t, err)
-
- newTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, rewardLevel, protoParams, base, prevTotals)
-
- blk := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: basics.Round(rnd),
- },
- }
- blk.RewardsLevel = rewardLevel
- blk.CurrentProtocol = testProtocolVersion
- delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
- delta.Accts.MergeAccounts(updates)
- delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
- delta.Totals = newTotals
-
- au.newBlock(blk, delta)
- }
-
// the test 1 requires 2 blocks with different resource state, au requires MaxBalLookback block to start persisting
for i := basics.Round(1); i <= basics.Round(protoParams.MaxBalLookback+2); i++ {
var updates ledgercore.AccountDeltas
@@ -2476,10 +2492,11 @@ func TestAcctUpdatesLookupLatestCacheRetry(t *testing.T) {
accts = append(accts, newAccts)
// prepare block
- newBlock(au, i, base, updates)
+ opts := auNewBlockOpts{updates, testProtocolVersion, protoParams, knownCreatables}
+ auNewBlock(t, i, au, base, opts)
// commit changes synchroniously
- commitSync(i)
+ auCommitSync(t, i, au, ml)
}
// ensure rounds
@@ -2539,8 +2556,9 @@ func TestAcctUpdatesLookupLatestCacheRetry(t *testing.T) {
au.accountsMu.Lock()
au.cachedDBRound = oldCachedDBRound
au.accountsMu.Unlock()
- newBlock(au, rnd+1, accts[rnd], ledgercore.AccountDeltas{})
- commitSync(rnd + 1)
+ opts := auNewBlockOpts{ledgercore.AccountDeltas{}, testProtocolVersion, protoParams, knownCreatables}
+ auNewBlock(t, rnd+1, au, accts[rnd], opts)
+ auCommitSync(t, rnd+1, au, ml)
wg.Wait()
@@ -2550,3 +2568,91 @@ func TestAcctUpdatesLookupLatestCacheRetry(t *testing.T) {
require.Equal(t, uint64(100), ad.Assets[aidx1].Amount)
require.Equal(t, uint64(200), ad.Assets[aidx2].Amount)
}
+
+// TestAcctUpdatesLookupResources creates 3 assets, deletes one
+// and checks au.resources with deleted resources are not counted toward totals
+func TestAcctUpdatesLookupResources(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(1, true)}
+ pooldata := basics.AccountData{}
+ pooldata.MicroAlgos.Raw = 100 * 1000 * 1000 * 1000 * 1000
+ pooldata.Status = basics.NotParticipating
+ accts[0][testPoolAddr] = pooldata
+
+ sinkdata := basics.AccountData{}
+ sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
+ sinkdata.Status = basics.NotParticipating
+ accts[0][testSinkAddr] = sinkdata
+
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestAcctUpdatesLookupResources")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.MaxBalLookback = 2
+ protoParams.SeedLookback = 1
+ protoParams.SeedRefreshInterval = 1
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ ml := makeMockLedgerForTracker(t, true, 1, testProtocolVersion, accts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ au := newAcctUpdates(t, ml, conf, ".")
+ defer au.close()
+
+ var addr1 basics.Address
+ for addr := range accts[0] {
+ if addr != testSinkAddr && addr != testPoolAddr {
+ addr1 = addr
+ break
+ }
+ }
+
+ aidx1 := basics.AssetIndex(1)
+ aidx2 := basics.AssetIndex(2)
+ aidx3 := basics.AssetIndex(3)
+ knownCreatables := make(map[basics.CreatableIndex]bool)
+
+ // test requires 5 blocks: 1 with aidx1, protoParams.MaxBalLookback empty blocks to commit the first one
+ // and 1 block with aidx2 and aidx3, and another one with aidx2 deleted
+ for i := basics.Round(1); i <= basics.Round(protoParams.MaxBalLookback+3); i++ {
+ var updates ledgercore.AccountDeltas
+
+ // add data
+ if i == 1 {
+ updates.Upsert(addr1, ledgercore.AccountData{AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 1000000}, TotalAssets: 1}})
+ updates.UpsertAssetResource(addr1, aidx1, ledgercore.AssetParamsDelta{}, ledgercore.AssetHoldingDelta{Holding: &basics.AssetHolding{Amount: 100}})
+ }
+ if i == basics.Round(protoParams.MaxBalLookback+2) {
+ updates.Upsert(addr1, ledgercore.AccountData{AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 1000000}, TotalAssets: 3}})
+ updates.UpsertAssetResource(addr1, aidx2, ledgercore.AssetParamsDelta{}, ledgercore.AssetHoldingDelta{Holding: &basics.AssetHolding{Amount: 200}})
+ updates.UpsertAssetResource(addr1, aidx3, ledgercore.AssetParamsDelta{}, ledgercore.AssetHoldingDelta{Holding: &basics.AssetHolding{Amount: 300}})
+ }
+ if i == basics.Round(protoParams.MaxBalLookback+3) {
+ updates.Upsert(addr1, ledgercore.AccountData{AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 1000000}, TotalAssets: 2}})
+ updates.UpsertAssetResource(addr1, aidx2, ledgercore.AssetParamsDelta{}, ledgercore.AssetHoldingDelta{Deleted: true})
+ }
+
+ base := accts[i-1]
+ newAccts := applyPartialDeltas(base, updates)
+ accts = append(accts, newAccts)
+
+ // prepare block
+ opts := auNewBlockOpts{updates, testProtocolVersion, protoParams, knownCreatables}
+ auNewBlock(t, i, au, base, opts)
+
+ if i <= basics.Round(protoParams.MaxBalLookback+1) {
+ auCommitSync(t, i, au, ml)
+ }
+ // do not commit two last blocks to keep data in memory deltas
+ }
+ data, rnd, _, err := au.lookupLatest(addr1)
+ require.NoError(t, err)
+ require.Equal(t, basics.Round(protoParams.MaxBalLookback+3), rnd)
+ require.Len(t, data.Assets, 2)
+ require.Contains(t, data.Assets, aidx1)
+ require.Contains(t, data.Assets, aidx3)
+ require.NotContains(t, data.Assets, aidx2)
+}
diff --git a/ledger/applications_test.go b/ledger/applications_test.go
index 235f18b82..364d9a9d5 100644
--- a/ledger/applications_test.go
+++ b/ledger/applications_test.go
@@ -33,12 +33,18 @@ import (
"github.com/algorand/go-algorand/test/partitiontest"
)
+// commitRound schedules a commit for known offset and dbRound
+// and waits for completion
func commitRound(offset uint64, dbRound basics.Round, l *Ledger) {
+ commitRoundLookback(dbRound+basics.Round(offset), l)
+}
+
+func commitRoundLookback(lookback basics.Round, l *Ledger) {
l.trackers.mu.Lock()
l.trackers.lastFlushTime = time.Time{}
l.trackers.mu.Unlock()
- l.trackers.scheduleCommit(l.Latest(), l.Latest()-(dbRound+basics.Round(offset)))
+ l.trackers.scheduleCommit(l.Latest(), l.Latest()-lookback)
// wait for the operation to complete. Once it does complete, the tr.lastFlushTime is going to be updated, so we can
// use that as an indicator.
for {
@@ -49,7 +55,6 @@ func commitRound(offset uint64, dbRound basics.Round, l *Ledger) {
break
}
time.Sleep(time.Millisecond)
-
}
}
diff --git a/ledger/apply/application.go b/ledger/apply/application.go
index 7b0e1eedf..e02af73c5 100644
--- a/ledger/apply/application.go
+++ b/ledger/apply/application.go
@@ -385,7 +385,7 @@ func ApplicationCall(ac transactions.ApplicationCallTxnFields, header transactio
// If this txn is going to set new programs (either for creation or
// update), check that the programs are valid and not too expensive
if ac.ApplicationID == 0 || ac.OnCompletion == transactions.UpdateApplicationOC {
- err := transactions.CheckContractVersions(ac.ApprovalProgram, ac.ClearStateProgram, params)
+ err := transactions.CheckContractVersions(ac.ApprovalProgram, ac.ClearStateProgram, params, evalParams.Proto)
if err != nil {
return err
}
diff --git a/ledger/apply/asset.go b/ledger/apply/asset.go
index 25764c71a..53b4bbfaf 100644
--- a/ledger/apply/asset.go
+++ b/ledger/apply/asset.go
@@ -231,7 +231,7 @@ func putIn(balances Balances, addr basics.Address, asset basics.AssetIndex, amou
return err
}
if !ok {
- return fmt.Errorf("asset %v missing from %v", asset, addr)
+ return fmt.Errorf("receiver error: must optin, asset %v missing from %v", asset, addr)
}
if rcvHolding.Frozen && !bypassFreeze {
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
index 4b7bcb545..eb812937f 100644
--- a/ledger/catchpointtracker_test.go
+++ b/ledger/catchpointtracker_test.go
@@ -445,7 +445,8 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
var totals map[basics.Address]ledgercore.AccountData
base := accts[i-1]
updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
diff --git a/ledger/internal/apptxn_test.go b/ledger/internal/apptxn_test.go
index daf56b465..a4d9da8e0 100644
--- a/ledger/internal/apptxn_test.go
+++ b/ledger/internal/apptxn_test.go
@@ -90,7 +90,7 @@ func TestPayAction(t *testing.T) {
Accounts: []basics.Address{addrs[1]}, // pay self
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &create, &fund, &payout1)
vb := endBlock(t, l, eval)
@@ -111,11 +111,11 @@ func TestPayAction(t *testing.T) {
// Build up Residue in RewardsState so it's ready to pay
for i := 1; i < 10; i++ {
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
endBlock(t, l, eval)
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
payout2 := txntest.Txn{
Type: "appl",
Sender: addrs[1],
@@ -162,17 +162,17 @@ func TestPayAction(t *testing.T) {
Receiver: ai.Address(),
Amount: 10 * 1000 * 1000000, // account min balance, plus fees
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &tenkalgos)
endBlock(t, l, eval)
beforepay := micros(t, l, ai.Address())
// Build up Residue in RewardsState so it's ready to pay again
for i := 1; i < 10; i++ {
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
endBlock(t, l, eval)
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, payout2.Noted("2"))
vb = endBlock(t, l, eval)
@@ -245,7 +245,7 @@ submit: itxn_submit
`),
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &asa, &app)
vb := endBlock(t, l, eval)
@@ -262,7 +262,7 @@ submit: itxn_submit
// stay under 1M, to avoid rewards complications
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &fund)
endBlock(t, l, eval)
@@ -275,7 +275,7 @@ submit: itxn_submit
}
// Fail, because app account is not opted in.
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &fundgold, fmt.Sprintf("asset %d missing", asaIndex))
endBlock(t, l, eval)
@@ -292,7 +292,7 @@ submit: itxn_submit
}
// Tell the app to opt itself in.
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &optin)
endBlock(t, l, eval)
@@ -301,7 +301,7 @@ submit: itxn_submit
require.Equal(t, amount, uint64(0))
// Now, suceed, because opted in.
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &fundgold)
endBlock(t, l, eval)
@@ -317,7 +317,7 @@ submit: itxn_submit
ForeignAssets: []basics.AssetIndex{asaIndex},
Accounts: []basics.Address{addrs[0]},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &withdraw)
endBlock(t, l, eval)
@@ -325,7 +325,7 @@ submit: itxn_submit
require.True(t, in)
require.Equal(t, amount, uint64(10000))
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, withdraw.Noted("2"))
endBlock(t, l, eval)
@@ -333,7 +333,7 @@ submit: itxn_submit
require.True(t, in) // Zero left, but still opted in
require.Equal(t, amount, uint64(0))
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, withdraw.Noted("3"), "underflow on subtracting")
endBlock(t, l, eval)
@@ -350,7 +350,7 @@ submit: itxn_submit
Accounts: []basics.Address{addrs[0]},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &close)
endBlock(t, l, eval)
@@ -359,13 +359,13 @@ submit: itxn_submit
require.Equal(t, amount, uint64(0))
// Now, fail again, opted out
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, fundgold.Noted("2"), fmt.Sprintf("asset %d missing", asaIndex))
endBlock(t, l, eval)
// Do it all again, so we can test closeTo when we have a non-zero balance
// Tell the app to opt itself in.
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, optin.Noted("a"), fundgold.Noted("a"))
endBlock(t, l, eval)
@@ -373,7 +373,7 @@ submit: itxn_submit
require.Equal(t, uint64(20000), amount)
left, _ := holding(t, l, addrs[0], asaIndex)
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, close.Noted("a"))
endBlock(t, l, eval)
@@ -390,6 +390,10 @@ func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *ledger.L
func newTestLedgerWithConsensusVersion(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion) *ledger.Ledger {
var genHash crypto.Digest
crypto.RandBytes(genHash[:])
+ return newTestLedgerFull(t, balances, cv, genHash)
+}
+
+func newTestLedgerFull(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, genHash crypto.Digest) *ledger.Ledger {
genBlock, err := bookkeeping.MakeGenesisBlock(cv, balances, "test", genHash)
require.NoError(t, err)
require.False(t, genBlock.FeeSink.IsZero())
@@ -461,7 +465,7 @@ func TestClawbackAction(t *testing.T) {
AssetReceiver: addrs[1],
XferAsset: asaIndex,
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &asa, &app, &optin)
vb := endBlock(t, l, eval)
@@ -482,7 +486,7 @@ func TestClawbackAction(t *testing.T) {
ForeignAssets: []basics.AssetIndex{asaIndex},
Accounts: []basics.Address{addrs[0], addrs[1]},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
err := txgroup(t, l, eval, &overpay, &clawmove)
require.NoError(t, err)
endBlock(t, l, eval)
@@ -531,7 +535,7 @@ skipclose:
RekeyTo: appIndex.Address(),
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &ezpayer, &rekey)
endBlock(t, l, eval)
@@ -541,7 +545,7 @@ skipclose:
ApplicationID: appIndex,
Accounts: []basics.Address{addrs[0], addrs[2]}, // pay 2 from 0 (which was rekeyed)
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &useacct)
endBlock(t, l, eval)
@@ -558,7 +562,7 @@ skipclose:
ApplicationID: appIndex,
Accounts: []basics.Address{addrs[2], addrs[0]}, // pay 0 from 2
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &baduse, "unauthorized")
endBlock(t, l, eval)
@@ -571,7 +575,7 @@ skipclose:
ApplicationID: appIndex,
Accounts: []basics.Address{addrs[0], addrs[2], addrs[3]}, // close to 3
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &close)
endBlock(t, l, eval)
@@ -583,13 +587,13 @@ skipclose:
Receiver: addrs[0],
Amount: 10_000_000,
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &payback)
endBlock(t, l, eval)
require.Equal(t, uint64(10_000_000), micros(t, l, addrs[0]))
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, useacct.Noted("2"), "unauthorized")
endBlock(t, l, eval)
}
@@ -655,7 +659,7 @@ func TestRekeyActionCloseAccount(t *testing.T) {
Amount: 1_000_000,
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &create, &rekey, &fund)
endBlock(t, l, eval)
@@ -665,7 +669,7 @@ func TestRekeyActionCloseAccount(t *testing.T) {
ApplicationID: appIndex,
Accounts: []basics.Address{addrs[0], addrs[2]},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &useacct, "unauthorized")
endBlock(t, l, eval)
}
@@ -716,7 +720,7 @@ func TestDuplicatePayAction(t *testing.T) {
Accounts: []basics.Address{addrs[1]}, // pay self
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &create, &fund, &paytwice, create.Noted("in same block"))
vb := endBlock(t, l, eval)
@@ -737,7 +741,7 @@ func TestDuplicatePayAction(t *testing.T) {
require.Equal(t, 188000, int(app))
// Now create another app, and see if it gets the index we expect.
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, create.Noted("again"))
vb = endBlock(t, l, eval)
@@ -782,12 +786,12 @@ func TestInnerTxnCount(t *testing.T) {
Accounts: []basics.Address{addrs[1]}, // pay self
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &create, &fund)
vb := endBlock(t, l, eval)
require.Equal(t, 2, int(vb.Block().TxnCounter))
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &payout1)
vb = endBlock(t, l, eval)
require.Equal(t, 4, int(vb.Block().TxnCounter))
@@ -907,7 +911,7 @@ submit: itxn_submit
Amount: 200_000, // exactly account min balance + one asset
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &app, &fund)
endBlock(t, l, eval)
@@ -918,7 +922,7 @@ submit: itxn_submit
ApplicationArgs: [][]byte{[]byte("create")},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
// Can't create an asset if you have exactly 200,000 and need to pay fee
txn(t, l, eval, &createAsa, "balance 199000 below min 200000")
// fund it some more and try again
@@ -947,7 +951,7 @@ submit: itxn_submit
ApplicationArgs: [][]byte{[]byte(a), []byte("junkjunkjunkjunkjunkjunkjunkjunk")},
ForeignAssets: []basics.AssetIndex{asaIndex},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
t.Log(a)
txn(t, l, eval, &check)
endBlock(t, l, eval)
@@ -960,7 +964,7 @@ submit: itxn_submit
ApplicationArgs: [][]byte{[]byte("freeze"), []byte("junkjunkjunkjunkjunkjunkjunkjunk")},
ForeignAssets: []basics.AssetIndex{asaIndex},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &nodice, "this transaction should be issued by the manager")
endBlock(t, l, eval)
@@ -1013,7 +1017,7 @@ func TestAsaDuringInit(t *testing.T) {
`,
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &prefund, &app)
vb := endBlock(t, l, eval)
@@ -1050,7 +1054,7 @@ func TestRekey(t *testing.T) {
`),
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &app)
vb := endBlock(t, l, eval)
appIndex := vb.Block().Payset[0].ApplicationID
@@ -1067,7 +1071,7 @@ func TestRekey(t *testing.T) {
Sender: addrs[1],
ApplicationID: appIndex,
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &fund, &rekey)
txn(t, l, eval, rekey.Noted("2"), "unauthorized")
endBlock(t, l, eval)
@@ -1098,7 +1102,7 @@ func TestNote(t *testing.T) {
`),
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &app)
vb := endBlock(t, l, eval)
appIndex := vb.Block().Payset[0].ApplicationID
@@ -1115,7 +1119,7 @@ func TestNote(t *testing.T) {
Sender: addrs[1],
ApplicationID: appIndex,
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &fund, &note)
vb = endBlock(t, l, eval)
alphabet := vb.Block().Payset[1].EvalDelta.InnerTxns[0].Txn.Note
@@ -1158,7 +1162,7 @@ nonpart:
}
// Create the app
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &app)
vb := endBlock(t, l, eval)
appIndex := vb.Block().Payset[0].ApplicationID
@@ -1171,7 +1175,7 @@ nonpart:
Receiver: appIndex.Address(),
Amount: 1_000_000_000,
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &fund)
endBlock(t, l, eval)
@@ -1179,7 +1183,7 @@ nonpart:
// Build up Residue in RewardsState so it's ready to pay
for i := 1; i < 10; i++ {
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
endBlock(t, l, eval)
}
@@ -1190,7 +1194,7 @@ nonpart:
ApplicationID: appIndex,
ApplicationArgs: [][]byte{[]byte("pay")},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &pay)
endBlock(t, l, eval)
// 2000 was earned in rewards (- 1000 fee, -1 pay)
@@ -1203,7 +1207,7 @@ nonpart:
ApplicationID: appIndex,
ApplicationArgs: [][]byte{[]byte("nonpart")},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &nonpart)
endBlock(t, l, eval)
require.Equal(t, 999_999_999, int(micros(t, l, appIndex.Address())))
@@ -1211,10 +1215,10 @@ nonpart:
// Build up Residue in RewardsState so it's ready to pay AGAIN
// But expect no rewards
for i := 1; i < 100; i++ {
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
endBlock(t, l, eval)
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, pay.Noted("again"))
txn(t, l, eval, nonpart.Noted("again"), "cannot change online/offline")
endBlock(t, l, eval)
@@ -1243,7 +1247,7 @@ func TestInnerAppCall(t *testing.T) {
itxn_submit
`),
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txn(t, l, eval, &app0)
vb := endBlock(t, l, eval)
index0 := vb.Block().Payset[0].ApplicationID
@@ -1261,7 +1265,7 @@ func TestInnerAppCall(t *testing.T) {
`),
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &app1)
vb = endBlock(t, l, eval)
index1 := vb.Block().Payset[0].ApplicationID
@@ -1281,7 +1285,7 @@ func TestInnerAppCall(t *testing.T) {
ApplicationID: index1,
ForeignApps: []basics.AppIndex{index0},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &fund0, &fund1, &call1)
endBlock(t, l, eval)
@@ -1332,7 +1336,7 @@ next2:
Receiver: calleeIndex.Address(),
Amount: 1_000_000,
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &callee, &fund)
vb := endBlock(t, l, eval)
require.Equal(t, calleeIndex, vb.Block().Payset[0].ApplicationID)
@@ -1363,7 +1367,7 @@ next2:
}
fund.Receiver = callerIndex.Address()
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &caller, &fund)
vb = endBlock(t, l, eval)
require.Equal(t, callerIndex, vb.Block().Payset[0].ApplicationID)
@@ -1374,7 +1378,7 @@ next2:
ApplicationID: callerIndex,
ForeignApps: []basics.AppIndex{calleeIndex},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &call)
vb = endBlock(t, l, eval)
tib := vb.Block().Payset[0]
@@ -1394,18 +1398,21 @@ next2:
// TestCreateAndUse checks that an ASA can be created in an early tx, and then
// used in a later app call tx (in the same group). This was not allowed until
-// v6, because of the strict adherence to the foreign-arrays rules.
+// teal 6 (v31), because of the strict adherence to the foreign-arrays rules.
func TestCreateAndUse(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- createapp := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ // At 30 the asset reference is illegal, then from v31 it works.
+ testConsensusRange(t, 30, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
+
+ createapp := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
itxn_begin
int axfer; itxn_field TypeEnum
int 0; itxn_field Amount
@@ -1414,141 +1421,153 @@ func TestCreateAndUse(t *testing.T) {
global CurrentApplicationAddress; itxn_field AssetReceiver
itxn_submit
`),
- }
- appIndex := basics.AppIndex(1)
+ }
+ appIndex := basics.AppIndex(1)
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: appIndex.Address(),
- Amount: 1_000_000,
- }
+ fund := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: appIndex.Address(),
+ Amount: 1_000_000,
+ }
- createasa := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- AssetParams: basics.AssetParams{
- Total: 1000000,
- Decimals: 3,
- UnitName: "oz",
- AssetName: "Gold",
- URL: "https://gold.rush/",
- },
- }
- asaIndex := basics.AssetIndex(3)
+ createasa := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 1000000,
+ Decimals: 3,
+ UnitName: "oz",
+ AssetName: "Gold",
+ URL: "https://gold.rush/",
+ },
+ }
+ asaIndex := basics.AssetIndex(3)
- use := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: basics.AppIndex(1),
- // The point of this test is to show the following (psychic) setting is unnecessary.
- //ForeignAssets: []basics.AssetIndex{asaIndex},
- }
+ use := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: basics.AppIndex(1),
+ // The point of this test is to show the following (psychic) setting is unnecessary.
+ //ForeignAssets: []basics.AssetIndex{asaIndex},
+ }
- eval := nextBlock(t, l, true, nil)
- txn(t, l, eval, &createapp)
- txn(t, l, eval, &fund)
- err := txgroup(t, l, eval, &createasa, &use)
- require.NoError(t, err)
- vb := endBlock(t, l, eval)
+ dl.beginBlock()
+ dl.txn(&createapp)
+ dl.txn(&fund)
+ if ver == 30 {
+ dl.txgroup("invalid Asset reference", &createasa, &use)
+ dl.endBlock()
+ return
+ }
+ // v31 onward, create & use works
+ dl.txgroup("", &createasa, &use)
+ vb := dl.endBlock()
- require.Equal(t, appIndex, vb.Block().Payset[0].ApplyData.ApplicationID)
- require.Equal(t, asaIndex, vb.Block().Payset[2].ApplyData.ConfigAsset)
+ require.Equal(t, appIndex, vb.Block().Payset[0].ApplyData.ApplicationID)
+ require.Equal(t, asaIndex, vb.Block().Payset[2].ApplyData.ConfigAsset)
+ })
}
func TestGtxnEffects(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- createapp := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ // At 30 `gtxn CreatedAssetId is illegal, then from v31 it works.
+ testConsensusRange(t, 30, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
+
+ createapp := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
gtxn 0 CreatedAssetID
int 3
==
- assert
-`),
- }
- appIndex := basics.AppIndex(1)
-
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: appIndex.Address(),
- Amount: 1_000_000,
- }
+ assert`),
+ }
+ appIndex := basics.AppIndex(1)
- eval := nextBlock(t, l, true, nil)
- txns(t, l, eval, &createapp, &fund)
+ fund := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: appIndex.Address(),
+ Amount: 1_000_000,
+ }
- createasa := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- AssetParams: basics.AssetParams{
- Total: 1000000,
- Decimals: 3,
- UnitName: "oz",
- AssetName: "Gold",
- URL: "https://gold.rush/",
- },
- }
- asaIndex := basics.AssetIndex(3)
+ dl.beginBlock()
+ dl.txns(&createapp, &fund)
+
+ createasa := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 1000000,
+ Decimals: 3,
+ UnitName: "oz",
+ AssetName: "Gold",
+ URL: "https://gold.rush/",
+ },
+ }
+ asaIndex := basics.AssetIndex(3)
- see := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: basics.AppIndex(1),
- }
+ see := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: basics.AppIndex(1),
+ }
- err := txgroup(t, l, eval, &createasa, &see)
- require.NoError(t, err)
- vb := endBlock(t, l, eval)
+ if ver == 30 {
+ dl.txgroup("Unable to obtain effects from top-level transactions", &createasa, &see)
+ dl.endBlock()
+ return
+ }
+ dl.txgroup("", &createasa, &see)
+ vb := dl.endBlock()
- require.Equal(t, appIndex, vb.Block().Payset[0].ApplyData.ApplicationID)
- require.Equal(t, asaIndex, vb.Block().Payset[2].ApplyData.ConfigAsset)
+ require.Equal(t, appIndex, vb.Block().Payset[0].ApplyData.ApplicationID)
+ require.Equal(t, asaIndex, vb.Block().Payset[2].ApplyData.ConfigAsset)
+ })
}
func TestBasicReentry(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- app0 := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
+
+ app0 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
itxn_begin
int appl
itxn_field TypeEnum
txn Applications 1
itxn_field ApplicationID
- itxn_submit
-`),
- }
- eval := nextBlock(t, l, true, nil)
- txn(t, l, eval, &app0)
- vb := endBlock(t, l, eval)
- index0 := vb.Block().Payset[0].ApplicationID
+ itxn_submit`),
+ }
+ vb := dl.fullBlock(&app0)
+ index0 := vb.Block().Payset[0].ApplicationID
- call1 := txntest.Txn{
- Type: "appl",
- Sender: addrs[2],
- ApplicationID: index0,
- ForeignApps: []basics.AppIndex{index0},
- }
- eval = nextBlock(t, l, true, nil)
- txn(t, l, eval, &call1, "self-call")
- endBlock(t, l, eval)
+ call1 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[2],
+ ApplicationID: index0,
+ ForeignApps: []basics.AppIndex{index0},
+ }
+ dl.txn(&call1, "self-call")
+ })
}
func TestIndirectReentry(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -1568,7 +1587,7 @@ func TestIndirectReentry(t *testing.T) {
itxn_submit
`),
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txn(t, l, eval, &app0)
vb := endBlock(t, l, eval)
index0 := vb.Block().Payset[0].ApplicationID
@@ -1592,7 +1611,7 @@ func TestIndirectReentry(t *testing.T) {
itxn_submit
`),
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &app1, &fund)
vb = endBlock(t, l, eval)
index1 := vb.Block().Payset[0].ApplicationID
@@ -1603,16 +1622,17 @@ func TestIndirectReentry(t *testing.T) {
ApplicationID: index0,
ForeignApps: []basics.AppIndex{index1, index0},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &call1, "attempt to re-enter")
endBlock(t, l, eval)
}
-// This tests a valid form of reentry (which may not be the correct word here).
+// TestValidAppReentry tests a valid form of reentry (which may not be the correct word here).
// When A calls B then returns to A then A calls C which calls B, the execution
// should not produce an error because B doesn't occur in the call stack twice.
func TestValidAppReentry(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -1639,7 +1659,7 @@ func TestValidAppReentry(t *testing.T) {
itxn_submit
`),
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txn(t, l, eval, &app0)
vb := endBlock(t, l, eval)
index0 := vb.Block().Payset[0].ApplicationID
@@ -1661,7 +1681,7 @@ func TestValidAppReentry(t *testing.T) {
assert
`),
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &app1, &fund0)
vb = endBlock(t, l, eval)
index1 := vb.Block().Payset[0].ApplicationID
@@ -1678,7 +1698,7 @@ func TestValidAppReentry(t *testing.T) {
itxn_submit
`),
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &app2)
vb = endBlock(t, l, eval)
index2 := vb.Block().Payset[0].ApplicationID
@@ -1690,7 +1710,7 @@ func TestValidAppReentry(t *testing.T) {
Amount: 1_000_000,
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &fund2)
_ = endBlock(t, l, eval)
@@ -1700,254 +1720,25 @@ func TestValidAppReentry(t *testing.T) {
ApplicationID: index0,
ForeignApps: []basics.AppIndex{index2, index1, index0},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &call1)
endBlock(t, l, eval)
}
-func TestMaxInnerTxFanOut(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- app0 := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-`),
- }
- eval := nextBlock(t, l, true, nil)
- txn(t, l, eval, &app0)
- vb := endBlock(t, l, eval)
- index0 := vb.Block().Payset[0].ApplicationID
-
- fund0 := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: index0.Address(),
- Amount: 1_000_000,
- }
-
- app1 := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
- int 3
- int 3
- ==
- assert
-`),
- }
- eval = nextBlock(t, l, true, nil)
- txns(t, l, eval, &app1, &fund0)
- vb = endBlock(t, l, eval)
- index1 := vb.Block().Payset[0].ApplicationID
-
- callTxGroup := make([]txntest.Txn, 16)
- for i := 0; i < 16; i++ {
- callTxGroup[i] = txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: index0,
- ForeignApps: []basics.AppIndex{index1},
- }
- }
- eval = nextBlock(t, l, true, nil)
- err := txgroup(t, l, eval, &callTxGroup[0], &callTxGroup[1], &callTxGroup[2], &callTxGroup[3], &callTxGroup[4], &callTxGroup[5], &callTxGroup[6], &callTxGroup[7], &callTxGroup[8], &callTxGroup[9], &callTxGroup[10], &callTxGroup[11], &callTxGroup[12], &callTxGroup[13], &callTxGroup[14], &callTxGroup[15])
- require.NoError(t, err)
-
- endBlock(t, l, eval)
-}
-
-func TestExceedMaxInnerTxFanOut(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- var program string
- for i := 0; i < 17; i++ {
- program += `
- itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
- itxn_submit
-`
- }
-
- // 17 inner txns
- app0 := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(program),
- }
- eval := nextBlock(t, l, true, nil)
- txn(t, l, eval, &app0)
- vb := endBlock(t, l, eval)
- index0 := vb.Block().Payset[0].ApplicationID
-
- fund0 := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: index0.Address(),
- Amount: 1_000_000,
- }
-
- app1 := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
- int 3
- int 3
- ==
- assert
-`),
- }
- eval = nextBlock(t, l, true, nil)
- txns(t, l, eval, &app1, &fund0)
- vb = endBlock(t, l, eval)
- index1 := vb.Block().Payset[0].ApplicationID
-
- callTxGroup := make([]txntest.Txn, 16)
- for i := 0; i < 16; i++ {
- callTxGroup[i] = txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: index0,
- ForeignApps: []basics.AppIndex{index1},
- }
- }
- eval = nextBlock(t, l, true, nil)
- err := txgroup(t, l, eval, &callTxGroup[0], &callTxGroup[1], &callTxGroup[2], &callTxGroup[3], &callTxGroup[4], &callTxGroup[5], &callTxGroup[6], &callTxGroup[7], &callTxGroup[8], &callTxGroup[9], &callTxGroup[10], &callTxGroup[11], &callTxGroup[12], &callTxGroup[13], &callTxGroup[14], &callTxGroup[15])
- require.Error(t, err)
-
- endBlock(t, l, eval)
-}
-
func TestMaxInnerTxForSingleAppCall(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
+ // v31 = inner appl
+ testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
- program := `
+ program := `
+txn ApplicationArgs 0
+btoi
+store 0
int 1
loop:
itxn_begin
@@ -1959,153 +1750,79 @@ itxn_submit
int 1
+
dup
-int 256
+load 0
<=
bnz loop
-int 257
+load 0
+int 1
++
==
assert
`
- // 256 inner txns
- app0 := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(program),
- }
- eval := nextBlock(t, l, true, nil)
- txn(t, l, eval, &app0)
- vb := endBlock(t, l, eval)
- index0 := vb.Block().Payset[0].ApplicationID
+ app0 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(program),
+ }
+ vb := dl.fullBlock(&app0)
+ index0 := vb.Block().Payset[0].ApplicationID
- fund0 := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: index0.Address(),
- Amount: 1_000_000,
- }
+ fund0 := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: index0.Address(),
+ Amount: 1_000_000,
+ }
- app1 := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ app1 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
int 3
int 3
==
assert
`),
- }
- eval = nextBlock(t, l, true, nil)
- txns(t, l, eval, &app1, &fund0)
- vb = endBlock(t, l, eval)
- index1 := vb.Block().Payset[0].ApplicationID
-
- callTxGroup := make([]txntest.Txn, 16)
- callTxGroup[0] = txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: index0,
- ForeignApps: []basics.AppIndex{index1},
- }
- for i := 1; i < 16; i++ {
- callTxGroup[i] = txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: index1,
- ForeignApps: []basics.AppIndex{},
}
- }
- eval = nextBlock(t, l, true, nil)
- err := txgroup(t, l, eval, &callTxGroup[0], &callTxGroup[1], &callTxGroup[2], &callTxGroup[3], &callTxGroup[4], &callTxGroup[5], &callTxGroup[6], &callTxGroup[7], &callTxGroup[8], &callTxGroup[9], &callTxGroup[10], &callTxGroup[11], &callTxGroup[12], &callTxGroup[13], &callTxGroup[14], &callTxGroup[15])
- require.NoError(t, err)
-
- endBlock(t, l, eval)
-}
-
-func TestExceedMaxInnerTxForSingleAppCall(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
- program := `
-int 1
-loop:
-itxn_begin
- int appl
- itxn_field TypeEnum
- txn Applications 1
- itxn_field ApplicationID
-itxn_submit
-int 1
-+
-dup
-int 257
-<=
-bnz loop
-int 258
-==
-assert
-`
+ vb = dl.fullBlock(&app1, &fund0)
+ index1 := vb.Block().Payset[0].ApplicationID
- // 257 inner txns
- app0 := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(program),
- }
- eval := nextBlock(t, l, true, nil)
- txn(t, l, eval, &app0)
- vb := endBlock(t, l, eval)
- index0 := vb.Block().Payset[0].ApplicationID
+ callTxGroup := make([]*txntest.Txn, 16)
+ callTxGroup[0] = &txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: index0,
+ ForeignApps: []basics.AppIndex{index1},
+ ApplicationArgs: [][]byte{{1, 0}}, // 256 inner calls
+ }
+ for i := 1; i < 16; i++ {
+ callTxGroup[i] = &txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: index1,
+ Note: []byte{byte(i)},
+ }
+ }
+ dl.txgroup("", callTxGroup...)
- fund0 := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: index0.Address(),
- Amount: 1_000_000,
- }
+ // Can't do it twice in a single group
+ dl.txgroup("too many inner", callTxGroup[0], callTxGroup[0].Noted("another"))
- app1 := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
- int 3
- int 3
- ==
- assert
-`),
- }
- eval = nextBlock(t, l, true, nil)
- txns(t, l, eval, &app1, &fund0)
- vb = endBlock(t, l, eval)
- index1 := vb.Block().Payset[0].ApplicationID
+ // Don't need all those extra top-levels to be allowed to do 256 in tx0
+ callTxGroup[0].Group = crypto.Digest{}
+ dl.fullBlock(callTxGroup[0])
- callTxGroup := make([]txntest.Txn, 16)
- callTxGroup[0] = txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: index0,
- ForeignApps: []basics.AppIndex{index1},
- }
- for i := 1; i < 16; i++ {
- callTxGroup[i] = txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: index1,
- ForeignApps: []basics.AppIndex{},
- }
- }
- eval = nextBlock(t, l, true, nil)
- err := txgroup(t, l, eval, &callTxGroup[0], &callTxGroup[1], &callTxGroup[2], &callTxGroup[3], &callTxGroup[4], &callTxGroup[5], &callTxGroup[6], &callTxGroup[7], &callTxGroup[8], &callTxGroup[9], &callTxGroup[10], &callTxGroup[11], &callTxGroup[12], &callTxGroup[13], &callTxGroup[14], &callTxGroup[15])
- require.Error(t, err)
-
- endBlock(t, l, eval)
+ // Can't do 257 txns
+ callTxGroup[0].ApplicationArgs[0][1] = 1
+ dl.txn(callTxGroup[0], "too many inner")
+ })
}
func TestAbortWhenInnerAppCallFails(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -2127,7 +1844,7 @@ int 1
assert
`),
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txn(t, l, eval, &app0)
vb := endBlock(t, l, eval)
index0 := vb.Block().Payset[0].ApplicationID
@@ -2149,7 +1866,7 @@ assert
assert
`),
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &app1, &fund0)
vb = endBlock(t, l, eval)
index1 := vb.Block().Payset[0].ApplicationID
@@ -2161,7 +1878,7 @@ assert
ForeignApps: []basics.AppIndex{index1},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &callTx, "logic eval error")
endBlock(t, l, eval)
}
@@ -2169,60 +1886,137 @@ assert
// TestInnerAppVersionCalling ensure that inner app calls must be the >=v6 apps
func TestInnerAppVersionCalling(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
- five, err := logic.AssembleStringWithVersion("int 1", 5)
- require.NoError(t, err)
- six, err := logic.AssembleStringWithVersion("int 1", 6)
- require.NoError(t, err)
+ // 31 allowed inner appls. vFuture enables proto.AllowV4InnerAppls (presumed v33, below)
+ testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
+
+ three, err := logic.AssembleStringWithVersion("int 1", 3)
+ require.NoError(t, err)
+ five, err := logic.AssembleStringWithVersion("int 1", 5)
+ require.NoError(t, err)
+ six, err := logic.AssembleStringWithVersion("int 1", 6)
+ require.NoError(t, err)
+
+ create5 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: five.Program,
+ ClearStateProgram: five.Program,
+ }
- create5 := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: five.Program,
- ClearStateProgram: five.Program,
- }
+ create6 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: six.Program,
+ ClearStateProgram: six.Program,
+ }
- create6 := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: six.Program,
- ClearStateProgram: six.Program,
- }
+ create5with3 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: five.Program,
+ ClearStateProgram: three.Program,
+ }
- eval := nextBlock(t, l, true, nil)
- txns(t, l, eval, &create5, &create6)
- vb := endBlock(t, l, eval)
- v5id := vb.Block().Payset[0].ApplicationID
- v6id := vb.Block().Payset[1].ApplicationID
+ vb := dl.fullBlock(&create5, &create6, &create5with3)
+ v5id := vb.Block().Payset[0].ApplicationID
+ v6id := vb.Block().Payset[1].ApplicationID
+ v5withv3csp := vb.Block().Payset[2].ApplicationID
- call := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- // don't use main. do the test at creation time
- ApprovalProgram: `
+ call := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ // don't use main. do the test at creation time
+ ApprovalProgram: `
itxn_begin
int appl
itxn_field TypeEnum
txn Applications 1
itxn_field ApplicationID
itxn_submit`,
- ForeignApps: []basics.AppIndex{v5id},
- }
+ ForeignApps: []basics.AppIndex{v5id},
+ }
- eval = nextBlock(t, l, true, nil)
- txn(t, l, eval, &call, "inner app call with version 5")
- call.ForeignApps[0] = v6id
- txn(t, l, eval, &call, "overspend") // it tried to execute, but test doesn't bother funding
- endBlock(t, l, eval)
+ // optin is the same as call, except also sets OnCompletion to optin
+ optin := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ // don't use main. do the test at creation time
+ ApprovalProgram: `
+itxn_begin
+ int appl
+ itxn_field TypeEnum
+ txn Applications 1
+ itxn_field ApplicationID
+ int OptIn
+ itxn_field OnCompletion
+itxn_submit`,
+ ForeignApps: []basics.AppIndex{v5id},
+ }
+
+ // createAndOptin tries to create and optin to args[0], args[1] programs
+ createAndOptin := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ // don't use main. do the test at creation time
+ ApprovalProgram: `
+itxn_begin
+ int appl
+ itxn_field TypeEnum
+ txn ApplicationArgs 0
+ itxn_field ApprovalProgram
+ txn ApplicationArgs 1
+ itxn_field ClearStateProgram
+ int OptIn
+ itxn_field OnCompletion
+itxn_submit`,
+ }
+
+ if ver <= 32 {
+ dl.txn(&call, "inner app call with version v5 < v6")
+ call.ForeignApps[0] = v6id
+ dl.txn(&call, "overspend") // it tried to execute, but test doesn't bother funding
+
+ // Can't create a v3 app from inside an app, because that is calling
+ createAndOptin.ApplicationArgs = [][]byte{three.Program, three.Program}
+ dl.txn(&createAndOptin, "inner app call with version v3 < v6")
+
+ // nor v5 in proto ver 32
+ createAndOptin.ApplicationArgs = [][]byte{five.Program, five.Program}
+ dl.txn(&createAndOptin, "inner app call with version v5 < v6")
+
+ // 6 is good
+ createAndOptin.ApplicationArgs = [][]byte{six.Program, six.Program}
+ dl.txn(&createAndOptin, "overspend") // passed the checks, but is an overspend
+ } else {
+ // after 32 proto.AllowV4InnerAppls should be in effect, so calls and optins to v5 are ok
+ dl.txn(&call, "overspend") // it tried to execute, but test doesn't bother funding
+ dl.txn(&optin, "overspend") // it tried to execute, but test doesn't bother funding
+ optin.ForeignApps[0] = v5withv3csp // but we can't optin to a v5 if it has an old csp
+ dl.txn(&optin, "CSP v3 < v4") // it tried to execute, but test doesn't bother funding
+
+ // Can't create a v3 app from inside an app, because that is calling
+ createAndOptin.ApplicationArgs = [][]byte{three.Program, five.Program}
+ dl.txn(&createAndOptin, "inner app call with version v3 < v4")
+ // Can't create and optin to a v5/v3 app from inside an app
+ createAndOptin.ApplicationArgs = [][]byte{five.Program, three.Program}
+ dl.txn(&createAndOptin, "inner app call opt-in with CSP v3 < v4")
+
+ createAndOptin.ApplicationArgs = [][]byte{five.Program, five.Program}
+ dl.txn(&createAndOptin, "overspend") // passed the checks, but is an overspend
+ }
+ })
}
func TestAppVersionMatching(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -2242,42 +2036,39 @@ func TestAppVersionMatching(t *testing.T) {
ClearStateProgram: five.Program,
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txn(t, l, eval, &create)
endBlock(t, l, eval)
create.ClearStateProgram = six.Program
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &create, "version mismatch")
endBlock(t, l, eval)
create.ApprovalProgram = six.Program
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &create)
endBlock(t, l, eval)
create.ClearStateProgram = four.Program
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &create, "version mismatch")
endBlock(t, l, eval)
// four doesn't match five, but it doesn't have to
create.ApprovalProgram = five.Program
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &create)
endBlock(t, l, eval)
}
func TestAppDowngrade(t *testing.T) {
partitiontest.PartitionTest(t)
-
- genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
+ t.Parallel()
four, err := logic.AssembleStringWithVersion("int 1", 4)
require.NoError(t, err)
@@ -2286,59 +2077,67 @@ func TestAppDowngrade(t *testing.T) {
six, err := logic.AssembleStringWithVersion("int 1", 6)
require.NoError(t, err)
- create := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: four.Program,
- ClearStateProgram: four.Program,
- }
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
+
+ create := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: four.Program,
+ ClearStateProgram: four.Program,
+ }
- eval := nextBlock(t, l, true, nil)
- txn(t, l, eval, &create)
- vb := endBlock(t, l, eval)
- app := vb.Block().Payset[0].ApplicationID
+ vb := dl.fullBlock(&create)
+ app := vb.Block().Payset[0].ApplicationID
- update := txntest.Txn{
- Type: "appl",
- ApplicationID: app,
- OnCompletion: transactions.UpdateApplicationOC,
- Sender: addrs[0],
- ApprovalProgram: four.Program,
- ClearStateProgram: four.Program,
- }
+ update := txntest.Txn{
+ Type: "appl",
+ ApplicationID: app,
+ OnCompletion: transactions.UpdateApplicationOC,
+ Sender: addrs[0],
+ ApprovalProgram: four.Program,
+ ClearStateProgram: four.Program,
+ }
- // No change - legal
- eval = nextBlock(t, l, true, nil)
- txn(t, l, eval, &update)
+ // No change - legal
+ dl.fullBlock(&update)
- // Upgrade just the approval. Sure (because under 6, no need to match)
- update.ApprovalProgram = five.Program
- txn(t, l, eval, &update)
+ // Upgrade just the approval. Sure (because under 6, no need to match)
+ update.ApprovalProgram = five.Program
+ dl.fullBlock(&update)
- // Upgrade just the clear state. Now they match
- update.ClearStateProgram = five.Program
- txn(t, l, eval, &update)
+ // Upgrade just the clear state. Now they match
+ update.ClearStateProgram = five.Program
+ dl.fullBlock(&update)
- // Downgrade (allowed pre 6)
- update.ClearStateProgram = four.Program
- txn(t, l, eval, update.Noted("actually a repeat of first upgrade"))
+ // Downgrade (allowed for pre 6 programs until AllowV4InnerAppls)
+ update.ClearStateProgram = four.Program
+ if ver <= 32 {
+ dl.fullBlock(update.Noted("actually a repeat of first upgrade"))
+ } else {
+ dl.txn(update.Noted("actually a repeat of first upgrade"), "clearstate program version downgrade")
+ }
- // Try to upgrade (at 6, must match)
- update.ApprovalProgram = six.Program
- txn(t, l, eval, &update, "version mismatch")
+ // Try to upgrade (at 6, must match)
+ update.ApprovalProgram = six.Program
+ dl.txn(&update, "version mismatch")
- // Do both
- update.ClearStateProgram = six.Program
- txn(t, l, eval, &update)
+ // Do both
+ update.ClearStateProgram = six.Program
+ dl.fullBlock(&update)
- // Try to downgrade. Fails because it was 6.
- update.ApprovalProgram = five.Program
- update.ClearStateProgram = five.Program
- txn(t, l, eval, update.Noted("repeat of 3rd update"), "downgrade")
+ // Try to downgrade. Fails because it was 6.
+ update.ApprovalProgram = five.Program
+ update.ClearStateProgram = five.Program
+ dl.txn(update.Noted("repeat of 3rd update"), "downgrade")
+ })
}
func TestCreatedAppsAreAvailable(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -2362,7 +2161,7 @@ func TestCreatedAppsAreAvailable(t *testing.T) {
itxn_submit`),
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txn(t, l, eval, &createapp)
vb := endBlock(t, l, eval)
index0 := vb.Block().Payset[0].ApplicationID
@@ -2374,7 +2173,7 @@ func TestCreatedAppsAreAvailable(t *testing.T) {
Amount: 1_000_000,
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &fund0)
endBlock(t, l, eval)
@@ -2385,7 +2184,7 @@ func TestCreatedAppsAreAvailable(t *testing.T) {
ForeignApps: []basics.AppIndex{},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &callTx)
endBlock(t, l, eval)
index1 := basics.AppIndex(1)
@@ -2397,13 +2196,14 @@ func TestCreatedAppsAreAvailable(t *testing.T) {
ForeignApps: []basics.AppIndex{},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &callTx)
endBlock(t, l, eval)
}
func TestInvalidAppsNotAccessible(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -2420,7 +2220,7 @@ itxn_begin
itxn_field ApplicationID
itxn_submit`),
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txn(t, l, eval, &app0)
vb := endBlock(t, l, eval)
index0 := vb.Block().Payset[0].ApplicationID
@@ -2442,7 +2242,7 @@ int 2
assert
`),
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &app1, &fund0)
endBlock(t, l, eval)
@@ -2453,13 +2253,14 @@ assert
ForeignApps: []basics.AppIndex{},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &callTx, "invalid App reference 2")
endBlock(t, l, eval)
}
func TestInvalidAssetsNotAccessible(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
l := newTestLedger(t, genBalances)
@@ -2499,7 +2300,7 @@ func TestInvalidAssetsNotAccessible(t *testing.T) {
},
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &createapp, &fund, &createasa)
endBlock(t, l, eval)
@@ -2509,7 +2310,7 @@ func TestInvalidAssetsNotAccessible(t *testing.T) {
ApplicationID: basics.AppIndex(1),
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &use, "invalid Asset reference 3")
endBlock(t, l, eval)
}
@@ -2594,7 +2395,7 @@ func executeMegaContract(b *testing.B) {
}
}
- eval := nextBlock(b, l, true, nil)
+ eval := nextBlock(b, l)
txns(b, l, eval, funds...)
endBlock(b, l, eval)
@@ -2604,7 +2405,7 @@ func executeMegaContract(b *testing.B) {
ApprovalProgram: `int 1`,
}
- eval = nextBlock(b, l, true, nil)
+ eval = nextBlock(b, l)
err := txgroup(b, l, eval, &createapp, &app1, &app1, &app1, &app1, &app1, &app1)
require.NoError(b, err)
endBlock(b, l, eval)
@@ -2637,7 +2438,7 @@ func TestInnerClearState(t *testing.T) {
},
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txn(t, l, eval, &inner)
vb := endBlock(t, l, eval)
innerId := vb.Block().Payset[0].ApplicationID
@@ -2662,7 +2463,7 @@ itxn_submit
ForeignApps: []basics.AppIndex{innerId},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &outer)
vb = endBlock(t, l, eval)
outerId := vb.Block().Payset[0].ApplicationID
@@ -2681,7 +2482,7 @@ itxn_submit
ApplicationArgs: [][]byte{{byte(transactions.OptInOC)}},
ForeignApps: []basics.AppIndex{innerId},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &fund, &call)
endBlock(t, l, eval)
@@ -2693,7 +2494,7 @@ itxn_submit
})
call.ApplicationArgs = [][]byte{{byte(transactions.ClearStateOC)}}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &call)
endBlock(t, l, eval)
@@ -2725,7 +2526,7 @@ b top
`,
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txn(t, l, eval, &badCallee)
vb := endBlock(t, l, eval)
badId := vb.Block().Payset[0].ApplicationID
@@ -2767,7 +2568,7 @@ skip:
ForeignApps: []basics.AppIndex{badId},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &outer)
vb = endBlock(t, l, eval)
outerId := vb.Block().Payset[0].ApplicationID
@@ -2786,7 +2587,7 @@ skip:
ApplicationArgs: [][]byte{{byte(transactions.OptInOC)}},
ForeignApps: []basics.AppIndex{badId},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &fund, &call)
endBlock(t, l, eval)
@@ -2795,7 +2596,7 @@ skip:
// When doing a clear state, `call` checks that budget wasn't stolen
call.ApplicationArgs = [][]byte{{byte(transactions.ClearStateOC)}}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &call)
endBlock(t, l, eval)
@@ -2855,7 +2656,7 @@ log
},
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &inner, &waster)
vb := endBlock(t, l, eval)
innerId := vb.Block().Payset[0].ApplicationID
@@ -2888,7 +2689,7 @@ itxn_submit
`),
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &grouper)
vb = endBlock(t, l, eval)
grouperId := vb.Block().Payset[0].ApplicationID
@@ -2907,7 +2708,7 @@ itxn_submit
ApplicationArgs: [][]byte{{byte(transactions.OptInOC)}, {byte(transactions.OptInOC)}},
ForeignApps: []basics.AppIndex{wasterId, innerId},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &fund, &call)
endBlock(t, l, eval)
@@ -2915,7 +2716,7 @@ itxn_submit
require.Len(t, gAcct.AppLocalStates, 2)
call.ApplicationArgs = [][]byte{{byte(transactions.CloseOutOC)}, {byte(transactions.ClearStateOC)}}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txn(t, l, eval, &call, "ClearState execution with low OpcodeBudget")
vb = endBlock(t, l, eval)
require.Len(t, vb.Block().Payset, 0)
@@ -2970,7 +2771,7 @@ itxn_begin
itxn_submit
` + test.approval,
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txn(t, l, eval, &app0)
vb := endBlock(t, l, eval)
index0 := vb.Block().Payset[0].ApplicationID
@@ -2989,7 +2790,7 @@ itxn_submit
OnCompletion: transactions.OptInOC,
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &fund0, &optin)
vb = endBlock(t, l, eval)
@@ -3009,7 +2810,7 @@ itxn_submit
OnCompletion: transactions.ClearStateOC,
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &clear)
vb = endBlock(t, l, eval)
@@ -3119,7 +2920,7 @@ check:
`),
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &appA, &appB, &appC)
vb := endBlock(t, l, eval)
indexA := vb.Block().Payset[0].ApplicationID
@@ -3140,7 +2941,7 @@ check:
ForeignApps: []basics.AppIndex{indexB, indexC},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &fundA, &callA)
endBlock(t, l, eval)
}
@@ -3232,7 +3033,7 @@ check:
`),
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &appA, &appB, &appC)
vb := endBlock(t, l, eval)
indexA := vb.Block().Payset[0].ApplicationID
@@ -3253,7 +3054,7 @@ check:
ForeignApps: []basics.AppIndex{indexB, indexC},
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &fundA, &callA)
endBlock(t, l, eval)
}
diff --git a/ledger/internal/double_test.go b/ledger/internal/double_test.go
new file mode 100644
index 000000000..84f1f092b
--- /dev/null
+++ b/ledger/internal/double_test.go
@@ -0,0 +1,178 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package internal_test
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/stretchr/testify/require"
+)
+
+// DoubleLedger allows for easy "Double Entry bookkeeping" as a way to write
+// fairly extensive ledger tests. In addition to simplifying the addition of
+// txns and txgroups to a ledger (and then allowing for inspection of the
+// created blocks), it also does a double check on correctness by marshalling
+// the created blocks, evaluating the transactions in a ledger copy, and
+// asserting that it comes out the same. During the insertion of those
+// transactions, the validator ledger is not in `generate` mode - so it
+// evaluates and validates, checking that the ApplyDatas that come from the
+// first ledger match the ADs created by the second. The validator ledger is
+// then temporarily placed in `generate` mode so that the entire block can be
+// generated in the copy second ledger, and compared.
+type DoubleLedger struct {
+ t *testing.T
+
+ generator *ledger.Ledger
+ validator *ledger.Ledger
+
+ eval *internal.BlockEvaluator
+}
+
+func (dl DoubleLedger) Close() {
+ dl.generator.Close()
+ dl.validator.Close()
+}
+
+// NewDoubleLedger creates a new DoubleLedger with the supplied balances and consensus version.
+func NewDoubleLedger(t *testing.T, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion) DoubleLedger {
+ g := newTestLedgerWithConsensusVersion(t, balances, cv)
+ v := newTestLedgerFull(t, balances, cv, g.GenesisHash())
+ return DoubleLedger{t, g, v, nil}
+}
+
+func (dl *DoubleLedger) beginBlock() *internal.BlockEvaluator {
+ dl.eval = nextBlock(dl.t, dl.generator)
+ return dl.eval
+}
+
+func (dl *DoubleLedger) txn(tx *txntest.Txn, problem ...string) {
+ dl.t.Helper()
+ if dl.eval == nil {
+ dl.beginBlock()
+ defer dl.endBlock()
+ }
+ txn(dl.t, dl.generator, dl.eval, tx, problem...)
+}
+
+func (dl *DoubleLedger) txns(txns ...*txntest.Txn) {
+ dl.t.Helper()
+ if dl.eval == nil {
+ dl.beginBlock()
+ defer dl.endBlock()
+ }
+ for _, tx := range txns {
+ dl.txn(tx)
+ }
+}
+
+func (dl *DoubleLedger) txgroup(problem string, txns ...*txntest.Txn) {
+ dl.t.Helper()
+ if dl.eval == nil {
+ dl.beginBlock()
+ defer dl.endBlock()
+ }
+ err := txgroup(dl.t, dl.generator, dl.eval, txns...)
+ if problem == "" {
+ require.NoError(dl.t, err)
+ } else {
+ require.Error(dl.t, err)
+ require.Contains(dl.t, err.Error(), problem)
+ }
+}
+
+func (dl *DoubleLedger) fullBlock(txs ...*txntest.Txn) *ledgercore.ValidatedBlock {
+ dl.t.Helper()
+ dl.beginBlock()
+ dl.txns(txs...)
+ return dl.endBlock()
+}
+
+func (dl *DoubleLedger) endBlock() *ledgercore.ValidatedBlock {
+ vb := endBlock(dl.t, dl.generator, dl.eval)
+ checkBlock(dl.t, dl.validator, vb)
+ dl.eval = nil // Ensure it's not used again
+ return vb
+}
+
+func checkBlock(t *testing.T, checkLedger *ledger.Ledger, vb *ledgercore.ValidatedBlock) {
+ bl := vb.Block()
+ msg := bl.MarshalMsg(nil)
+ var reconstituted bookkeeping.Block
+ _, err := reconstituted.UnmarshalMsg(msg)
+ require.NoError(t, err)
+
+ check := nextCheckBlock(t, checkLedger, reconstituted.RewardsState)
+ var group []transactions.SignedTxnWithAD
+ for _, stib := range reconstituted.Payset {
+ stxn, ad, err := reconstituted.BlockHeader.DecodeSignedTxn(stib)
+ require.NoError(t, err)
+ stad := transactions.SignedTxnWithAD{SignedTxn: stxn, ApplyData: ad}
+ // If txn we're looking at belongs in the current group, append
+ if group == nil || (!stxn.Txn.Group.IsZero() && group[0].Txn.Group == stxn.Txn.Group) {
+ group = append(group, stad)
+ } else if group != nil {
+ err := check.TransactionGroup(group)
+ require.NoError(t, err)
+ group = []transactions.SignedTxnWithAD{stad}
+ }
+ }
+ if group != nil {
+ err := check.TransactionGroup(group)
+ require.NoError(t, err, "%+v", reconstituted.Payset)
+ }
+ check.SetGenerate(true)
+ cb := endBlock(t, checkLedger, check)
+ check.SetGenerate(false)
+ require.Equal(t, vb.Block(), cb.Block())
+
+ // vb.Delta() need not actually be Equal, in the sense of require.Equal
+ // because the order of the records in Accts is determined by the way the
+ // cb.sdeltas map (and then the maps in there) is iterated when the
+ // StateDelta is constructed by roundCowState.deltas(). They should be
+ // semantically equivalent, but those fields are not exported, so checking
+ // equivalence is hard. If vb.Delta() is, in fact, different, even though
+ // vb.Block() is the same, then there is something seriously broken going
+ // on, that is unlikely to have anything to do with these tests. So upshot:
+ // we skip trying a complicated equality check.
+
+ // This is the part of checking Delta() equality that wouldn't work right.
+ // require.Equal(t, vb.Delta().Accts, cb.Delta().Accts)
+}
+
+func nextCheckBlock(t testing.TB, ledger *ledger.Ledger, rs bookkeeping.RewardsState) *internal.BlockEvaluator {
+ rnd := ledger.Latest()
+ hdr, err := ledger.BlockHdr(rnd)
+ require.NoError(t, err)
+
+ nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
+ nextHdr.RewardsState = rs
+ // follow nextBlock, which does this for determinism
+ nextHdr.TimeStamp = hdr.TimeStamp + 1
+ eval, err := internal.StartEvaluator(ledger, nextHdr, internal.EvaluatorOptions{
+ Generate: false,
+ Validate: true, // Do the complete checks that a new txn would be subject to
+ })
+ require.NoError(t, err)
+ return eval
+}
diff --git a/ledger/internal/eval_blackbox_test.go b/ledger/internal/eval_blackbox_test.go
index 3aa232391..79d22d189 100644
--- a/ledger/internal/eval_blackbox_test.go
+++ b/ledger/internal/eval_blackbox_test.go
@@ -31,7 +31,6 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/txntest"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/internal"
@@ -43,8 +42,6 @@ import (
"github.com/algorand/go-algorand/util/execpool"
)
-var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
-var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
var minFee basics.MicroAlgos
func init() {
@@ -54,10 +51,11 @@ func init() {
func TestBlockEvaluator(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genesisInitState, addrs, keys := ledgertesting.Genesis(10)
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ l, err := ledger.OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
require.NoError(t, err)
defer l.Close()
@@ -224,6 +222,7 @@ func TestBlockEvaluator(t *testing.T) {
func TestRekeying(t *testing.T) {
partitiontest.PartitionTest(t)
+ // t.Parallel() NO! This test manipulates []protocol.Consensus
// Pretend rekeying is supported
actual := config.Consensus[protocol.ConsensusCurrentVersion]
@@ -237,7 +236,7 @@ func TestRekeying(t *testing.T) {
// Bring up a ledger
genesisInitState, addrs, keys := ledgertesting.Genesis(10)
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ l, err := ledger.OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
require.NoError(t, err)
defer l.Close()
@@ -327,23 +326,26 @@ func TestRekeying(t *testing.T) {
// TODO: More tests
}
-func testEvalAppGroup(t *testing.T, schema basics.StateSchema) (*internal.BlockEvaluator, basics.Address, error) {
- genesisInitState, addrs, keys := ledgertesting.Genesis(10)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
-
- blkHeader, err := l.BlockHdr(basics.Round(0))
- require.NoError(t, err)
- newBlock := bookkeeping.MakeBlock(blkHeader)
-
- eval, err := internal.StartEvaluator(l, newBlock.BlockHeader, internal.EvaluatorOptions{
- Generate: true,
- Validate: true})
- require.NoError(t, err)
-
- ops, err := logic.AssembleString(`#pragma version 2
+// TestEvalAppState ensures txns in a group can't violate app state schema
+// limits the test ensures that commitToParent -> applyChild copies child's cow
+// state usage counts into parent and the usage counts correctly propagated from
+// parent cow to child cow and back. When limits are not violated, the test
+// ensures that the updates are correct.
+func TestEvalAppState(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // v24 = apps
+ testConsensusRange(t, 24, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
+
+ appcall1 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[0],
+ GlobalStateSchema: basics.StateSchema{NumByteSlice: 1},
+ ApprovalProgram: `#pragma version 2
txn ApplicationID
bz create
byte "caller"
@@ -355,121 +357,50 @@ create:
txn Sender
app_global_put
ok:
- int 1`)
- require.NoError(t, err, ops.Errors)
- approval := ops.Program
- ops, err = logic.AssembleString("#pragma version 2\nint 1")
- require.NoError(t, err)
- clear := ops.Program
-
- genHash := l.GenesisHash()
- header := transactions.Header{
- Sender: addrs[0],
- Fee: minFee,
- FirstValid: newBlock.Round(),
- LastValid: newBlock.Round(),
- GenesisHash: genHash,
- }
- appcall1 := transactions.Transaction{
- Type: protocol.ApplicationCallTx,
- Header: header,
- ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
- GlobalStateSchema: schema,
- ApprovalProgram: approval,
- ClearStateProgram: clear,
- },
- }
+ int 1`,
+ ClearStateProgram: "#pragma version 2\nint 1",
+ }
- appcall2 := transactions.Transaction{
- Type: protocol.ApplicationCallTx,
- Header: header,
- ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ appcall2 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[0],
ApplicationID: 1,
- },
- }
-
- var group transactions.TxGroup
- group.TxGroupHashes = []crypto.Digest{crypto.HashObj(appcall1), crypto.HashObj(appcall2)}
- appcall1.Group = crypto.HashObj(group)
- appcall2.Group = crypto.HashObj(group)
- stxn1 := appcall1.Sign(keys[0])
- stxn2 := appcall2.Sign(keys[0])
-
- g := []transactions.SignedTxnWithAD{
- {
- SignedTxn: stxn1,
- ApplyData: transactions.ApplyData{
- EvalDelta: transactions.EvalDelta{GlobalDelta: map[string]basics.ValueDelta{
- "creator": {Action: basics.SetBytesAction, Bytes: string(addrs[0][:])}},
- },
- ApplicationID: 1,
- },
- },
- {
- SignedTxn: stxn2,
- ApplyData: transactions.ApplyData{
- EvalDelta: transactions.EvalDelta{GlobalDelta: map[string]basics.ValueDelta{
- "caller": {Action: basics.SetBytesAction, Bytes: string(addrs[0][:])}},
- }},
- },
- }
- txgroup := []transactions.SignedTxn{stxn1, stxn2}
- err = eval.TestTransactionGroup(txgroup)
- if err != nil {
- return eval, addrs[0], err
- }
- err = eval.TransactionGroup(g)
- return eval, addrs[0], err
-}
-
-// TestEvalAppStateCountsWithTxnGroup ensures txns in a group can't violate app state schema limits
-// the test ensures that
-// commitToParent -> applyChild copies child's cow state usage counts into parent
-// and the usage counts correctly propagated from parent cow to child cow and back
-func TestEvalAppStateCountsWithTxnGroup(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- _, _, err := testEvalAppGroup(t, basics.StateSchema{NumByteSlice: 1})
- require.Error(t, err)
- require.Contains(t, err.Error(), "store bytes count 2 exceeds schema bytes count 1")
-}
-
-// TestEvalAppAllocStateWithTxnGroup ensures roundCowState.deltas and applyStorageDelta
-// produce correct results when a txn group has storage allocate and storage update actions
-func TestEvalAppAllocStateWithTxnGroup(t *testing.T) {
- partitiontest.PartitionTest(t)
+ }
- eval, addr, err := testEvalAppGroup(t, basics.StateSchema{NumByteSlice: 2})
- require.NoError(t, err)
+ dl.beginBlock()
+ dl.txgroup("store bytes count 2 exceeds schema bytes count 1", &appcall1, &appcall2)
- vb, err := eval.GenerateBlock()
- require.NoError(t, err)
- deltas := vb.Delta()
+ appcall1.GlobalStateSchema = basics.StateSchema{NumByteSlice: 2}
+ dl.txgroup("", &appcall1, &appcall2)
+ vb := dl.endBlock()
+ deltas := vb.Delta()
- params, _ := deltas.Accts.GetAppParams(addr, 1)
- state := params.Params.GlobalState
- require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addr[:])}, state["caller"])
- require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addr[:])}, state["creator"])
+ params, ok := deltas.Accts.GetAppParams(addrs[0], 1)
+ require.True(t, ok)
+ state := params.Params.GlobalState
+ require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addrs[0][:])}, state["caller"])
+ require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addrs[0][:])}, state["creator"])
+ })
}
// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
-func nextBlock(t testing.TB, ledger *ledger.Ledger, generate bool, protoParams *config.ConsensusParams) *internal.BlockEvaluator {
+func nextBlock(t testing.TB, ledger *ledger.Ledger) *internal.BlockEvaluator {
rnd := ledger.Latest()
hdr, err := ledger.BlockHdr(rnd)
require.NoError(t, err)
nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
+ nextHdr.TimeStamp = hdr.TimeStamp + 1 // ensure deterministic tests
eval, err := internal.StartEvaluator(ledger, nextHdr, internal.EvaluatorOptions{
- Generate: generate,
- Validate: false,
- ProtoParams: protoParams,
+ Generate: true,
+ Validate: true, // Do the complete checks that a new txn would be subject to
})
require.NoError(t, err)
return eval
}
func fillDefaults(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn) {
- if txn.GenesisHash.IsZero() {
+ if txn.GenesisHash.IsZero() && ledger.GenesisProto().SupportGenesisHash {
txn.GenesisHash = ledger.GenesisHash()
}
if txn.FirstValid == 0 {
@@ -489,17 +420,7 @@ func txns(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, tx
func txn(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn, problem ...string) {
t.Helper()
fillDefaults(t, ledger, eval, txn)
- stxn := txn.SignedTxn()
- err := eval.TestTransactionGroup([]transactions.SignedTxn{stxn})
- if err != nil {
- if len(problem) == 1 {
- require.Contains(t, err.Error(), problem[0])
- } else {
- require.NoError(t, err) // Will obviously fail
- }
- return
- }
- err = eval.Transaction(stxn, transactions.ApplyData{})
+ err := eval.Transaction(txn.SignedTxn(), transactions.ApplyData{})
if err != nil {
if len(problem) == 1 {
require.Contains(t, err.Error(), problem[0])
@@ -518,13 +439,7 @@ func txgroup(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator,
}
txgroup := txntest.SignedTxns(txns...)
- err := eval.TestTransactionGroup(txgroup)
- if err != nil {
- return err
- }
-
- err = eval.TransactionGroup(transactions.WrapSignedTxnsWithAD(txgroup))
- return err
+ return eval.TransactionGroup(transactions.WrapSignedTxnsWithAD(txgroup))
}
func testEvalAppPoolingGroup(t *testing.T, schema basics.StateSchema, approvalProgram string, consensusVersion protocol.ConsensusVersion) error {
@@ -534,8 +449,7 @@ func testEvalAppPoolingGroup(t *testing.T, schema basics.StateSchema, approvalPr
require.NoError(t, err)
defer l.Close()
- protoParams := config.Consensus[consensusVersion]
- eval := nextBlock(t, l, false, &protoParams)
+ eval := nextBlock(t, l)
appcall1 := txntest.Txn{
Sender: addrs[0],
@@ -563,6 +477,7 @@ func testEvalAppPoolingGroup(t *testing.T, schema basics.StateSchema, approvalPr
// budgets in a group txn and return an error if the budget is exceeded
func TestEvalAppPooledBudgetWithTxnGroup(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
source := func(n int, m int) string {
return "#pragma version 4\nbyte 0x1337BEEF\n" + strings.Repeat("keccak256\n", n) +
@@ -657,72 +572,65 @@ func asaParams(t testing.TB, ledger *ledger.Ledger, asset basics.AssetIndex) (ba
func TestGarbageClearState(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
- genesisInitState, addrs, _ := ledgertesting.Genesis(10)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // v24 = apps
+ testConsensusRange(t, 24, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
- createTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: "int 1",
- }
+ createTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: "int 1",
+ ClearStateProgram: []byte{},
+ }
- eval := nextBlock(t, l, true, nil)
+ dl.txn(&createTxn, "invalid program (empty)")
- // Do this "by hand" so we can have an empty / garbage clear state, which
- // would have been papered over with txn()
- fillDefaults(t, l, eval, &createTxn)
- stxn := createTxn.SignedTxn()
- stxn.Txn.ClearStateProgram = nil
- err = eval.TestTransactionGroup([]transactions.SignedTxn{stxn})
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid program")
- err = eval.Transaction(stxn, transactions.ApplyData{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid program")
-
- stxn.Txn.ClearStateProgram = []byte{0xfe} // bad uvarint
- err = eval.TestTransactionGroup([]transactions.SignedTxn{stxn})
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid version")
+ createTxn.ClearStateProgram = []byte{0xfe} // bad uvarint
+ dl.txn(&createTxn, "invalid version")
+ })
}
func TestRewardsInAD(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
- genesisInitState, addrs, _ := ledgertesting.Genesis(10)
-
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // v15 put rewards into ApplyData
+ testConsensusRange(t, 11, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
- payTxn := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[1]}
+ payTxn := txntest.Txn{Type: protocol.PaymentTx, Sender: addrs[0], Receiver: addrs[1]}
- // Build up Residue in RewardsState so it's ready to pay
- for i := 1; i < 10; i++ {
- eval := nextBlock(t, l, true, nil)
- endBlock(t, l, eval)
- }
+ // Build up Residue in RewardsState so it's ready to pay
+ for i := 1; i < 10; i++ {
+ dl.fullBlock()
+ }
- eval := nextBlock(t, l, true, nil)
- txn(t, l, eval, &payTxn)
- vb, err := eval.GenerateBlock()
- require.NoError(t, err)
- payInBlock := vb.Block().Payset[0]
- require.Greater(t, payInBlock.ApplyData.SenderRewards.Raw, uint64(1000))
- require.Greater(t, payInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
- require.Equal(t, payInBlock.ApplyData.SenderRewards, payInBlock.ApplyData.ReceiverRewards)
+ vb := dl.fullBlock(&payTxn)
+ payInBlock := vb.Block().Payset[0]
+ if ver >= 15 {
+ require.Greater(t, payInBlock.ApplyData.SenderRewards.Raw, uint64(1000))
+ require.Greater(t, payInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
+ require.Equal(t, payInBlock.ApplyData.SenderRewards, payInBlock.ApplyData.ReceiverRewards)
+ } else {
+ require.EqualValues(t, 0, payInBlock.ApplyData.SenderRewards.Raw)
+ require.EqualValues(t, 0, payInBlock.ApplyData.ReceiverRewards.Raw)
+ }
+ })
}
func TestMinBalanceChanges(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genesisInitState, addrs, _ := ledgertesting.Genesis(10)
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ l, err := ledger.OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
require.NoError(t, err)
defer l.Close()
@@ -751,7 +659,7 @@ func TestMinBalanceChanges(t *testing.T) {
ad5init, _, _, err := l.LookupLatest(addrs[5])
require.NoError(t, err)
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns(t, l, eval, &createTxn, &optInTxn)
endBlock(t, l, eval)
@@ -781,7 +689,7 @@ func TestMinBalanceChanges(t *testing.T) {
ConfigAsset: expectedID,
}
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
txns(t, l, eval, &optOutTxn, &closeTxn)
endBlock(t, l, eval)
@@ -797,50 +705,56 @@ func TestMinBalanceChanges(t *testing.T) {
// TestDeleteNonExistantKeys checks if the EvalDeltas from deleting missing keys are correct
func TestDeleteNonExistantKeys(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
- genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // teal v2 (apps)
+ testConsensusRange(t, 24, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
+ const appid basics.AppIndex = 1
- const appid basics.AppIndex = 1
-
- createTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ createTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
byte "missing_global"
app_global_del
int 0
byte "missing_local"
app_local_del
`),
- }
+ }
- optInTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appid,
- OnCompletion: transactions.OptInOC,
- }
+ optInTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appid,
+ OnCompletion: transactions.OptInOC,
+ }
- eval := nextBlock(t, l, true, nil)
- txns(t, l, eval, &createTxn, &optInTxn)
- vb := endBlock(t, l, eval)
- require.Len(t, vb.Block().Payset[1].EvalDelta.GlobalDelta, 0)
- require.Len(t, vb.Block().Payset[1].EvalDelta.LocalDeltas, 0)
+ vb := dl.fullBlock(&createTxn, &optInTxn)
+ require.Len(t, vb.Block().Payset[1].EvalDelta.GlobalDelta, 0)
+ // For a while, we encoded an empty localdelta
+ deltas := 1
+ if ver >= 27 {
+ deltas = 0
+ }
+ require.Len(t, vb.Block().Payset[1].EvalDelta.LocalDeltas, deltas)
+ })
}
// TestAppInsMinBalance checks that accounts with MaxAppsOptedIn are accepted by block evaluator
// and do not cause any MaximumMinimumBalance problems
func TestAppInsMinBalance(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
genesisInitState, addrs, _ := ledgertesting.Genesis(10)
genesisInitState.Block.CurrentProtocol = protocol.ConsensusV30
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ l, err := ledger.OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
require.NoError(t, err)
defer l.Close()
@@ -884,7 +798,7 @@ func TestAppInsMinBalance(t *testing.T) {
txnsOptIn = append(txnsOptIn, &optInTxn)
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txns1 := append(txnsCreate, txnsOptIn...)
txns(t, l, eval, txns1...)
vb := endBlock(t, l, eval)
@@ -904,54 +818,165 @@ func TestAppInsMinBalance(t *testing.T) {
require.Equal(t, appParamsCount, 50)
}
-// TestLogsInBlock ensures that logs appear in the block properly
-func TestLogsInBlock(t *testing.T) {
+func TestDuplicates(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
- genesisInitState, addrs, _ := ledgertesting.Genesis(10)
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ testConsensusRange(t, 11, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
- require.NoError(t, err)
- defer l.Close()
+ pay := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: addrs[1],
+ Amount: 10,
+ }
+ dl.txn(&pay)
+ dl.txn(&pay, "transaction already in ledger")
- const appid basics.AppIndex = 1
- createTxn := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: "byte \"APP\"\n log\n int 1",
- // Fail the clear state
- ClearStateProgram: "byte \"CLR\"\n log\n int 0",
- }
- eval := nextBlock(t, l, true, nil)
- txns(t, l, eval, &createTxn)
- vb := endBlock(t, l, eval)
- createInBlock := vb.Block().Payset[0]
- require.Equal(t, "APP", createInBlock.ApplyData.EvalDelta.Logs[0])
+ // Test same transaction in a later block
+ dl.txn(&pay, "transaction already in ledger")
- optInTxn := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: addrs[1],
- ApplicationID: appid,
- OnCompletion: transactions.OptInOC,
+ // Change the note so it can go in again
+ pay.Note = []byte("1")
+ dl.txn(&pay)
+
+ // Change note again, but try the txn twice in same group
+ if dl.generator.GenesisProto().MaxTxGroupSize > 1 {
+ pay.Note = []byte("2")
+ dl.txgroup("transaction already in ledger", &pay, &pay)
+ }
+ })
+}
+
+var consensusByNumber = []protocol.ConsensusVersion{
+ "", "", "", "", "", "", "",
+ protocol.ConsensusV7,
+ protocol.ConsensusV8,
+ protocol.ConsensusV9,
+ protocol.ConsensusV10,
+ protocol.ConsensusV11, // first with viable payset commit type
+ protocol.ConsensusV12,
+ protocol.ConsensusV13,
+ protocol.ConsensusV14,
+ protocol.ConsensusV15, // rewards in AD
+ protocol.ConsensusV16,
+ protocol.ConsensusV17,
+ protocol.ConsensusV18,
+ protocol.ConsensusV19,
+ protocol.ConsensusV20,
+ protocol.ConsensusV21,
+ protocol.ConsensusV22,
+ protocol.ConsensusV23,
+ protocol.ConsensusV24, // teal v2 (apps)
+ protocol.ConsensusV25,
+ protocol.ConsensusV26,
+ protocol.ConsensusV27,
+ protocol.ConsensusV28,
+ protocol.ConsensusV29,
+ protocol.ConsensusV30, // teal v5 (inner txs)
+ protocol.ConsensusV31, // teal v6 (inner txs with appls)
+ protocol.ConsensusV32, // unlimited assets and apps
+ protocol.ConsensusFuture,
+}
+
+func TestContainsLatestVersion(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // This confirms that the proto before future has no ApprovedUpgrades. Once
+ // it does, that new version should be added to consensusByNumber.
+ require.Len(t, config.Consensus[consensusByNumber[len(consensusByNumber)-2]].ApprovedUpgrades, 0)
+ // And no funny business with vFuture
+ require.Equal(t, protocol.ConsensusFuture, consensusByNumber[len(consensusByNumber)-1])
+}
+
+// testConsensusRange allows for running tests against a range of consensus
+// versions. Generally `start` will be the version that introduced the feature,
+// and `stop` will be 0 to indicate it should work right on up through vFuture.
+// `stop` will be an actual version number if we're confirming that something
+// STOPS working as of a particular version. When writing the test for a new
+// feature that is currently in vFuture, use the expected version number as
+// `start`. That will correspond to vFuture until a new consensus version is
+// created and inserted in consensusByNumber. At that point, your feature is
+// probably active in that version. (If it's being held in vFuture, just
+// increment your `start`.)
+func testConsensusRange(t *testing.T, start, stop int, test func(t *testing.T, ver int)) {
+ if stop == 0 { // Treat 0 as "future"
+ stop = len(consensusByNumber) - 1
+ }
+ for i := start; i <= stop; i++ {
+ var version string
+ if i == len(consensusByNumber)-1 {
+ version = "vFuture"
+ } else {
+ version = fmt.Sprintf("v%d", i)
+ }
+ t.Run(fmt.Sprintf("cv=%s", version), func(t *testing.T) { test(t, i) })
}
- eval = nextBlock(t, l, true, nil)
- txns(t, l, eval, &optInTxn)
- vb = endBlock(t, l, eval)
- optInInBlock := vb.Block().Payset[0]
- require.Equal(t, "APP", optInInBlock.ApplyData.EvalDelta.Logs[0])
+}
- clearTxn := txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: addrs[1],
- ApplicationID: appid,
- OnCompletion: transactions.ClearStateOC,
+func benchConsensusRange(b *testing.B, start, stop int, bench func(t *testing.B, ver int)) {
+ if stop == 0 { // Treat 0 as "future"
+ stop = len(consensusByNumber) - 1
}
- eval = nextBlock(t, l, true, nil)
- txns(t, l, eval, &clearTxn)
- vb = endBlock(t, l, eval)
- clearInBlock := vb.Block().Payset[0]
- // Logs do not appear if the ClearState failed
- require.Len(t, clearInBlock.ApplyData.EvalDelta.Logs, 0)
+ for i := start; i <= stop; i++ {
+ var version string
+ if i == len(consensusByNumber)-1 {
+ version = "vFuture"
+ } else {
+ version = fmt.Sprintf("v%d", i)
+ }
+ b.Run(fmt.Sprintf("cv=%s", version), func(b *testing.B) { bench(b, i) })
+ }
+}
+
+// TestLogsInBlock ensures that logs appear in the block properly
+func TestLogsInBlock(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // Run tests from v30 onward
+ testConsensusRange(t, 30, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
+
+ createTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: "byte \"APP\"\n log\n int 1",
+ // Fail the clear state
+ ClearStateProgram: "byte \"CLR\"\n log\n int 0",
+ }
+ vb := dl.fullBlock(&createTxn)
+ createInBlock := vb.Block().Payset[0]
+ appId := createInBlock.ApplyData.ApplicationID
+ require.Equal(t, "APP", createInBlock.ApplyData.EvalDelta.Logs[0])
+
+ optInTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[1],
+ ApplicationID: appId,
+ OnCompletion: transactions.OptInOC,
+ }
+ vb = dl.fullBlock(&optInTxn)
+ optInInBlock := vb.Block().Payset[0]
+ require.Equal(t, "APP", optInInBlock.ApplyData.EvalDelta.Logs[0])
+
+ clearTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[1],
+ ApplicationID: appId,
+ OnCompletion: transactions.ClearStateOC,
+ }
+ vb = dl.fullBlock(&clearTxn)
+ clearInBlock := vb.Block().Payset[0]
+ // Logs do not appear if the ClearState failed
+ require.Len(t, clearInBlock.ApplyData.EvalDelta.Logs, 0)
+ })
}
// TestGhostTransactions confirms that accounts that don't even exist
@@ -997,10 +1022,11 @@ func TestGhostTransactions(t *testing.T) {
*/
partitiontest.PartitionTest(t)
+ t.Parallel()
genesisInitState, addrs, _ := ledgertesting.Genesis(10)
- l, err := ledger.OpenLedger(logging.TestingLog(t), "", true, genesisInitState, config.GetDefaultLocal())
+ l, err := ledger.OpenLedger(logging.TestingLog(t), t.Name(), true, genesisInitState, config.GetDefaultLocal())
require.NoError(t, err)
defer l.Close()
@@ -1021,7 +1047,7 @@ func TestGhostTransactions(t *testing.T) {
},
}
- eval := nextBlock(t, l, true, nil)
+ eval := nextBlock(t, l)
txn(t, l, eval, &asa)
endBlock(t, l, eval)
@@ -1077,7 +1103,7 @@ func TestGhostTransactions(t *testing.T) {
}
for i, e := range ephemeral {
- eval = nextBlock(t, l, true, nil)
+ eval = nextBlock(t, l)
err := txgroup(t, l, eval, &benefactor, &e)
require.NoError(t, err, "i=%d %s", i, e.Type)
endBlock(t, l, eval)
diff --git a/ledger/internal/export_test.go b/ledger/internal/export_test.go
new file mode 100644
index 000000000..a772d53b9
--- /dev/null
+++ b/ledger/internal/export_test.go
@@ -0,0 +1,28 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package internal
+
+// Export for testing only. See
+// https://medium.com/@robiplus/golang-trick-export-for-test-aa16cbd7b8cd for a
+// nice explanation. tl;dr: Since some of our testing is in logic_test package,
+// we export some extra things to make testing easier there. But we do it in a
+// _test.go file, so they are only exported during testing.
+
+// In order to generate a block
+func (eval *BlockEvaluator) SetGenerate(g bool) {
+ eval.generate = g
+}
diff --git a/ledger/internal/prefetcher/error.go b/ledger/internal/prefetcher/error.go
new file mode 100644
index 000000000..58b52f891
--- /dev/null
+++ b/ledger/internal/prefetcher/error.go
@@ -0,0 +1,43 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package prefetcher
+
+import (
+ "fmt"
+
+ "github.com/algorand/go-algorand/data/basics"
+)
+
+// GroupTaskError indicates the group index of the unfulfilled resource
+type GroupTaskError struct {
+ err error
+ GroupIdx int64
+ Address *basics.Address
+ CreatableIndex basics.CreatableIndex
+ CreatableType basics.CreatableType
+}
+
+// Error satisfies builtin interface `error`
+func (err *GroupTaskError) Error() string {
+ return fmt.Sprintf("prefetch failed for groupIdx %d, address: %s, creatableIndex %d, creatableType %d, cause: %v",
+ err.GroupIdx, err.Address, err.CreatableIndex, err.CreatableType, err.err)
+}
+
+// Unwrap provides access to the underlying error
+func (err *GroupTaskError) Unwrap() error {
+ return err.err
+}
diff --git a/ledger/internal/prefetcher/prefetcher.go b/ledger/internal/prefetcher/prefetcher.go
index 24803ea2d..aa08d850e 100644
--- a/ledger/internal/prefetcher/prefetcher.go
+++ b/ledger/internal/prefetcher/prefetcher.go
@@ -69,7 +69,7 @@ type LoadedTransactionGroup struct {
// Err indicates whether any of the balances in this structure have failed to load. In case of an error, at least
// one of the entries in the balances would be uninitialized.
- Err error
+ Err *GroupTaskError
}
// accountPrefetcher used to prefetch accounts balances and resources before the evaluator is being called.
@@ -146,6 +146,7 @@ type preloaderTaskQueue struct {
type groupTaskDone struct {
groupIdx int64
err error
+ task *preloaderTask
}
func allocPreloaderQueue(count int, maxTxnGroupEntries int) preloaderTaskQueue {
@@ -408,7 +409,13 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
if done.err != nil {
// if there is an error, report the error to the output channel.
p.outChan <- LoadedTransactionGroup{
- Err: done.err,
+ Err: &GroupTaskError{
+ err: done.err,
+ GroupIdx: done.groupIdx,
+ Address: done.task.address,
+ CreatableIndex: done.task.creatableIndex,
+ CreatableType: done.task.creatableType,
+ },
}
return
}
@@ -463,14 +470,18 @@ func (gt *groupTask) markCompletionResource(idx int, res LoadedResourcesEntry, g
}
}
-func (gt *groupTask) markCompletionAcctError(err error, groupDoneCh chan groupTaskDone) {
+func (gt *groupTask) markCompletionAcctError(err error, task *preloaderTask, groupDoneCh chan groupTaskDone) {
for {
curVal := atomic.LoadInt64(&gt.incompleteCount)
if curVal <= 0 {
return
}
if atomic.CompareAndSwapInt64(&gt.incompleteCount, curVal, 0) {
- groupDoneCh <- groupTaskDone{groupIdx: gt.groupTaskIndex, err: err}
+ groupDoneCh <- groupTaskDone{
+ groupIdx: gt.groupTaskIndex,
+ err: err,
+ task: task,
+ }
return
}
}
@@ -558,6 +569,6 @@ func (p *accountPrefetcher) asyncPrefetchRoutine(queue *preloaderTaskQueue, task
// in every case we get here, the task is gurenteed to be a non-nil.
for _, wt := range task.groups {
// notify the channel of the error.
- wt.markCompletionAcctError(err, groupDoneCh)
+ wt.markCompletionAcctError(err, task, groupDoneCh)
}
}
diff --git a/ledger/internal/prefetcher/prefetcher_alignment_test.go b/ledger/internal/prefetcher/prefetcher_alignment_test.go
index 015283508..ba14fbe03 100644
--- a/ledger/internal/prefetcher/prefetcher_alignment_test.go
+++ b/ledger/internal/prefetcher/prefetcher_alignment_test.go
@@ -259,7 +259,7 @@ func prefetch(t *testing.T, l prefetcher.Ledger, txn transactions.Transaction) l
loaded, ok := <-ch
require.True(t, ok)
- require.NoError(t, loaded.Err)
+ require.Nil(t, loaded.Err)
require.Equal(t, group, loaded.TxnGroup)
_, ok = <-ch
diff --git a/ledger/internal/prefetcher/prefetcher_test.go b/ledger/internal/prefetcher/prefetcher_test.go
index 2aa59cf78..87a2e9d63 100644
--- a/ledger/internal/prefetcher/prefetcher_test.go
+++ b/ledger/internal/prefetcher/prefetcher_test.go
@@ -18,6 +18,7 @@ package prefetcher_test
import (
"context"
+ "errors"
"testing"
"github.com/stretchr/testify/require"
@@ -49,19 +50,44 @@ func makeAddress(addressSeed int) (o basics.Address) {
const proto = protocol.ConsensusCurrentVersion
+type lookupError struct{}
+
+func (le lookupError) Error() string {
+ return "lookup error"
+}
+
+type assetLookupError struct{}
+
+func (ale assetLookupError) Error() string {
+ return "asset lookup error"
+}
+
+type getCreatorError struct{}
+
+func (gce getCreatorError) Error() string {
+ return "get creator error"
+}
+
type prefetcherTestLedger struct {
- round basics.Round
- balances map[basics.Address]ledgercore.AccountData
- creators map[basics.CreatableIndex]basics.Address
+ round basics.Round
+ balances map[basics.Address]ledgercore.AccountData
+ creators map[basics.CreatableIndex]basics.Address
+ errorTriggerAddress map[basics.Address]bool
}
+const errorTriggerCreatableIndex = 1000001
+const errorTriggerAssetIndex = 1000002
+
func (l *prefetcherTestLedger) BlockHdr(basics.Round) (bookkeeping.BlockHeader, error) {
return bookkeeping.BlockHeader{}, nil
}
func (l *prefetcherTestLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
return nil
}
-func (l *prefetcherTestLedger) LookupWithoutRewards(_ basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, error) {
+func (l *prefetcherTestLedger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, error) {
+ if _, has := l.errorTriggerAddress[addr]; has {
+ return ledgercore.AccountData{}, l.round, lookupError{}
+ }
if data, has := l.balances[addr]; has {
return data, l.round, nil
}
@@ -71,9 +97,15 @@ func (l *prefetcherTestLedger) LookupApplication(rnd basics.Round, addr basics.A
return ledgercore.AppResource{}, nil
}
func (l *prefetcherTestLedger) LookupAsset(rnd basics.Round, addr basics.Address, aidx basics.AssetIndex) (ledgercore.AssetResource, error) {
+ if aidx == errorTriggerAssetIndex {
+ return ledgercore.AssetResource{}, assetLookupError{}
+ }
return ledgercore.AssetResource{}, nil
}
func (l *prefetcherTestLedger) GetCreatorForRound(_ basics.Round, cidx basics.CreatableIndex, _ basics.CreatableType) (basics.Address, bool, error) {
+ if cidx == errorTriggerCreatableIndex {
+ return basics.Address{}, false, getCreatorError{}
+ }
if addr, has := l.creators[cidx]; has {
return addr, true, nil
}
@@ -151,16 +183,13 @@ func compareLoadedResourcesEntries(t *testing.T, expected []prefetcher.LoadedRes
require.Equal(t, expectedForTest, actualForTest)
}
-func TestEvaluatorPrefetcher(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- rnd := basics.Round(5)
- var feeSinkAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+func getPrefetcherTestLedger(rnd basics.Round) *prefetcherTestLedger {
var ledger = &prefetcherTestLedger{
- round: rnd,
- balances: make(map[basics.Address]ledgercore.AccountData),
- creators: make(map[basics.CreatableIndex]basics.Address),
+ round: rnd,
+ balances: make(map[basics.Address]ledgercore.AccountData),
+ creators: make(map[basics.CreatableIndex]basics.Address),
+ errorTriggerAddress: make(map[basics.Address]bool),
}
ledger.balances[makeAddress(1)] = ledgercore.AccountData{
AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 100000000}},
@@ -168,6 +197,16 @@ func TestEvaluatorPrefetcher(t *testing.T) {
ledger.creators[1001] = makeAddress(2)
ledger.creators[2001] = makeAddress(15)
+ return ledger
+}
+
+func TestEvaluatorPrefetcher(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rnd := basics.Round(5)
+ var feeSinkAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+ ledger := getPrefetcherTestLedger(rnd)
type testCase struct {
name string
signedTxn transactions.SignedTxn
@@ -485,7 +524,7 @@ func TestEvaluatorPrefetcher(t *testing.T) {
loadedTxnGroup, ok := <-preloadedTxnGroupsCh
require.True(t, ok)
- require.NoError(t, loadedTxnGroup.Err)
+ require.Nil(t, loadedTxnGroup.Err)
compareLoadedAccountDataEntries(t, testCase.accounts, loadedTxnGroup.Accounts)
compareLoadedResourcesEntries(t, testCase.resources, loadedTxnGroup.Resources)
@@ -495,6 +534,144 @@ func TestEvaluatorPrefetcher(t *testing.T) {
}
}
+// Test for error from LookupAsset
+func TestAssetLookupError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rnd := basics.Round(5)
+ var feeSinkAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+ ledger := getPrefetcherTestLedger(rnd)
+ assetTransferTxn :=
+ transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.AssetTransferTx,
+ Header: transactions.Header{
+ Sender: makeAddress(1),
+ },
+ AssetTransferTxnFields: transactions.AssetTransferTxnFields{
+ XferAsset: 1001,
+ AssetSender: makeAddress(2),
+ AssetReceiver: makeAddress(2),
+ AssetCloseTo: makeAddress(2),
+ },
+ },
+ }
+
+ errorReceived := false
+ groups := make([][]transactions.SignedTxnWithAD, 5)
+ for i := 0; i < 5; i++ {
+ groups[i] = make([]transactions.SignedTxnWithAD, 2)
+ for j := 0; j < 2; j++ {
+ groups[i][j].SignedTxn = assetTransferTxn
+ if i == 2 {
+ // force error in asset lookup in the second txn group only
+ groups[i][j].SignedTxn.Txn.AssetTransferTxnFields.XferAsset = errorTriggerAssetIndex
+ }
+ }
+ }
+ preloadedTxnGroupsCh := prefetcher.PrefetchAccounts(context.Background(), ledger, rnd+100, groups, feeSinkAddr, config.Consensus[proto])
+ for loadedTxnGroup := range preloadedTxnGroupsCh {
+ if loadedTxnGroup.Err != nil {
+ errorReceived = true
+ require.Equal(t, int64(2), loadedTxnGroup.Err.GroupIdx)
+ require.True(t, errors.Is(loadedTxnGroup.Err, assetLookupError{}))
+ require.Equal(t, makeAddress(2), *loadedTxnGroup.Err.Address)
+ require.Equal(t, errorTriggerAssetIndex, int(loadedTxnGroup.Err.CreatableIndex))
+ require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType)
+ }
+ }
+ require.True(t, errorReceived)
+}
+
+// Test for error from GetCreatorForRound
+func TestGetCreatorForRoundError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rnd := basics.Round(5)
+ var feeSinkAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+ ledger := getPrefetcherTestLedger(rnd)
+
+ createAssetTxn :=
+ transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.AssetConfigTx,
+ Header: transactions.Header{
+ Sender: makeAddress(1),
+ },
+ AssetConfigTxnFields: transactions.AssetConfigTxnFields{
+ ConfigAsset: errorTriggerCreatableIndex,
+ },
+ },
+ }
+
+ errorReceived := false
+
+ groups := make([][]transactions.SignedTxnWithAD, 5)
+ for i := 0; i < 5; i++ {
+ groups[i] = make([]transactions.SignedTxnWithAD, 10)
+ for j := 0; j < 10; j++ {
+ groups[i][j].SignedTxn = createAssetTxn
+ }
+ }
+ preloadedTxnGroupsCh := prefetcher.PrefetchAccounts(context.Background(), ledger, rnd+100, groups, feeSinkAddr, config.Consensus[proto])
+
+ for loadedTxnGroup := range preloadedTxnGroupsCh {
+ if loadedTxnGroup.Err != nil {
+ errorReceived = true
+ require.True(t, errors.Is(loadedTxnGroup.Err, getCreatorError{}))
+ require.Nil(t, loadedTxnGroup.Err.Address)
+ require.Equal(t, errorTriggerCreatableIndex, int(loadedTxnGroup.Err.CreatableIndex))
+ require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType)
+ }
+ }
+ require.True(t, errorReceived)
+}
+
+// Test for error from LookupWithoutRewards
+func TestLookupWithoutRewards(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rnd := basics.Round(5)
+ var feeSinkAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+ ledger := getPrefetcherTestLedger(rnd)
+
+ createAssetTxn :=
+ transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.AssetConfigTx,
+ Header: transactions.Header{
+ Sender: makeAddress(1),
+ },
+ AssetConfigTxnFields: transactions.AssetConfigTxnFields{
+ ConfigAsset: 1001,
+ },
+ },
+ }
+
+ errorReceived := false
+
+ groups := make([][]transactions.SignedTxnWithAD, 5)
+ for i := 0; i < 5; i++ {
+ groups[i] = make([]transactions.SignedTxnWithAD, 10)
+ for j := 0; j < 10; j++ {
+ groups[i][j].SignedTxn = createAssetTxn
+ }
+ }
+ ledger.errorTriggerAddress[createAssetTxn.Txn.Sender] = true
+ preloadedTxnGroupsCh := prefetcher.PrefetchAccounts(context.Background(), ledger, rnd+100, groups, feeSinkAddr, config.Consensus[proto])
+
+ for loadedTxnGroup := range preloadedTxnGroupsCh {
+ if loadedTxnGroup.Err != nil {
+ errorReceived = true
+ require.True(t, errors.Is(loadedTxnGroup.Err, lookupError{}))
+ require.Equal(t, makeAddress(1), *loadedTxnGroup.Err.Address)
+ require.Equal(t, 0, int(loadedTxnGroup.Err.CreatableIndex))
+ require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType)
+ }
+ }
+ require.True(t, errorReceived)
+}
+
func TestEvaluatorPrefetcherQueueExpansion(t *testing.T) {
partitiontest.PartitionTest(t)
diff --git a/ledger/internal/txnbench_test.go b/ledger/internal/txnbench_test.go
new file mode 100644
index 000000000..9c92c896c
--- /dev/null
+++ b/ledger/internal/txnbench_test.go
@@ -0,0 +1,248 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package internal_test
+
+import (
+ "errors"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/stretchr/testify/require"
+)
+
+// BenchmarkTxnTypes compares the execution time of various txn types
+func BenchmarkTxnTypes(b *testing.B) {
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ benchConsensusRange(b, 30, 0, func(b *testing.B, ver int) {
+ l := newTestLedgerWithConsensusVersion(b, genBalances, consensusByNumber[ver])
+ defer l.Close()
+
+ createasa := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 1000000,
+ Decimals: 3,
+ UnitName: "oz",
+ AssetName: "Gold",
+ URL: "https://gold.rush/",
+
+ Manager: addrs[0],
+ Clawback: addrs[0],
+ Freeze: addrs[0],
+ Reserve: addrs[0],
+ },
+ }
+
+ eval := nextBlock(b, l)
+ txn(b, l, eval, &createasa)
+ vb := endBlock(b, l, eval)
+ asa := vb.Block().Payset[0].ApplyData.ConfigAsset
+ require.Positive(b, asa)
+
+ optin1 := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[1],
+ AssetReceiver: addrs[1],
+ XferAsset: asa,
+ }
+ optin2 := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[2],
+ AssetReceiver: addrs[2],
+ XferAsset: asa,
+ }
+
+ eval = nextBlock(b, l)
+ txns(b, l, eval, &optin1, &optin2)
+ endBlock(b, l, eval)
+
+ createapp1 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: "int 1",
+ }
+
+ eval = nextBlock(b, l)
+ txn(b, l, eval, &createapp1)
+ vb = endBlock(b, l, eval)
+ app1 := vb.Block().Payset[0].ApplyData.ApplicationID
+ require.Positive(b, app1)
+
+ createapp10 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: strings.Repeat("int 1\npop\n", 5) + "int 1",
+ }
+
+ eval = nextBlock(b, l)
+ txn(b, l, eval, &createapp10)
+ vb = endBlock(b, l, eval)
+ app10 := vb.Block().Payset[0].ApplyData.ApplicationID
+ require.Positive(b, app10)
+
+ createapp100 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: strings.Repeat("int 1\npop\n", 50) + "int 1",
+ }
+
+ eval = nextBlock(b, l)
+ txn(b, l, eval, &createapp100)
+ vb = endBlock(b, l, eval)
+ app100 := vb.Block().Payset[0].ApplyData.ApplicationID
+ require.Positive(b, app100)
+
+ createapp700 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: strings.Repeat("int 1\npop\n", 349) + "int 1",
+ }
+
+ eval = nextBlock(b, l)
+ txn(b, l, eval, &createapp700)
+ vb = endBlock(b, l, eval)
+ app700 := vb.Block().Payset[0].ApplyData.ApplicationID
+ require.Positive(b, app700)
+
+ createapp700s := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: strings.Repeat("int 1\n", 350) + strings.Repeat("pop\n", 349),
+ }
+
+ eval = nextBlock(b, l)
+ txn(b, l, eval, &createapp700s)
+ vb = endBlock(b, l, eval)
+ app700s := vb.Block().Payset[0].ApplyData.ApplicationID
+ require.Positive(b, app700s)
+
+ benches := []struct {
+ name string
+ txn txntest.Txn
+ }{
+ {"pay-self", txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: addrs[0],
+ }},
+ {"pay-other", txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: addrs[1],
+ }},
+ {"asa-self", txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[0],
+ AssetAmount: 1,
+ XferAsset: asa,
+ AssetReceiver: addrs[0],
+ }},
+ {"asa-other", txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[0],
+ XferAsset: asa,
+ AssetAmount: 10,
+ AssetReceiver: addrs[1],
+ }},
+ {"asa-clawback", txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[0],
+ XferAsset: asa,
+ AssetAmount: 1,
+ AssetSender: addrs[1],
+ AssetReceiver: addrs[2],
+ }},
+ {"afrz", txntest.Txn{
+ Type: "afrz",
+ Sender: addrs[0],
+ FreezeAsset: asa,
+ AssetFrozen: true,
+ FreezeAccount: addrs[1],
+ }},
+ {"acfg-big", txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ ConfigAsset: asa,
+ AssetParams: basics.AssetParams{
+ Manager: addrs[0],
+ Clawback: addrs[0],
+ Freeze: addrs[0],
+ Reserve: addrs[0],
+ },
+ }},
+ {"acfg-small", txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ ConfigAsset: asa,
+ AssetParams: basics.AssetParams{
+ Manager: addrs[0],
+ },
+ }},
+ {"call-1", txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: app1,
+ }},
+ {"call-10", txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: app10,
+ }},
+ {"call-100", txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: app100,
+ }},
+ {"call-700", txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: app700,
+ }},
+ {"call-700s", txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: app700s,
+ }},
+ }
+
+ for _, bench := range benches {
+ b.Run(bench.name, func(b *testing.B) {
+ b.ReportAllocs()
+ t := bench.txn
+ eval := nextBlock(b, l)
+ fillDefaults(b, l, eval, &t)
+ signed := t.SignedTxn()
+ for n := 0; n < b.N; n++ {
+ signed.Txn.Note = []byte(strconv.Itoa(n))
+ err := eval.Transaction(signed, transactions.ApplyData{})
+ if errors.Is(err, ledgercore.ErrNoSpace) {
+ endBlock(b, l, eval)
+ eval = nextBlock(b, l)
+ }
+ }
+ endBlock(b, l, eval)
+ })
+ }
+ })
+}
diff --git a/ledger/ledger.go b/ledger/ledger.go
index 550cf388d..a48385ff0 100644
--- a/ledger/ledger.go
+++ b/ledger/ledger.go
@@ -560,11 +560,7 @@ func (l *Ledger) LatestTotals() (basics.Round, ledgercore.AccountTotals, error)
func (l *Ledger) OnlineTotals(rnd basics.Round) (basics.MicroAlgos, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- totals, err := l.accts.Totals(rnd)
- if err != nil {
- return basics.MicroAlgos{}, err
- }
- return totals.Online.Money, nil
+ return l.accts.OnlineTotals(rnd)
}
// CheckDup return whether a transaction is a duplicate one.
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index 3b3956449..b48be3676 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -1689,6 +1689,52 @@ func TestLedgerMemoryLeak(t *testing.T) {
}
}
+// TestLookupAgreement ensures LookupAgreement return an empty data for offline accounts
+func TestLookupAgreement(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ var addrOnline, addrOffline basics.Address
+ for addr, ad := range genesisInitState.Accounts {
+ if addrOffline.IsZero() {
+ addrOffline = addr
+ ad.Status = basics.Offline
+ crypto.RandBytes(ad.VoteID[:]) // this is invalid but we set VoteID to ensure the account gets cleared
+ genesisInitState.Accounts[addr] = ad
+ } else if ad.Status == basics.Online {
+ addrOnline = addr
+ crypto.RandBytes(ad.VoteID[:])
+ genesisInitState.Accounts[addr] = ad
+ break
+ }
+ }
+
+ const inMem = true
+ log := logging.TestingLog(t)
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ ledger, err := OpenLedger(log, t.Name(), inMem, genesisInitState, cfg)
+ require.NoError(t, err, "could not open ledger")
+ defer ledger.Close()
+
+ oad, err := ledger.LookupAgreement(0, addrOnline)
+ require.NoError(t, err)
+ require.NotEmpty(t, oad)
+ ad, _, _, err := ledger.LookupLatest(addrOnline)
+ require.NoError(t, err)
+ require.NotEmpty(t, ad)
+ require.Equal(t, oad, ad.OnlineAccountData())
+
+ require.NoError(t, err)
+ oad, err = ledger.LookupAgreement(0, addrOffline)
+ require.NoError(t, err)
+ require.Empty(t, oad)
+ ad, _, _, err = ledger.LookupLatest(addrOffline)
+ require.NoError(t, err)
+ require.NotEmpty(t, ad)
+ require.Equal(t, oad, ad.OnlineAccountData())
+}
+
func BenchmarkLedgerStartup(b *testing.B) {
log := logging.TestingLog(b)
tmpDir, err := ioutil.TempDir(os.TempDir(), "BenchmarkLedgerStartup")
diff --git a/ledger/ledgercore/accountdata.go b/ledger/ledgercore/accountdata.go
index b452b9548..5f141cdbb 100644
--- a/ledger/ledgercore/accountdata.go
+++ b/ledger/ledgercore/accountdata.go
@@ -152,6 +152,11 @@ func (u AccountData) Money(proto config.ConsensusParams, rewardsLevel uint64) (m
// OnlineAccountData calculates the online account data given an AccountData, by adding the rewards.
func (u *AccountData) OnlineAccountData(proto config.ConsensusParams, rewardsLevel uint64) basics.OnlineAccountData {
+ if u.Status != basics.Online {
+ // if the account is not Online and agreement requests it for some reason, clear it out
+ return basics.OnlineAccountData{}
+ }
+
microAlgos, _, _ := basics.WithUpdatedRewards(
proto, u.Status, u.MicroAlgos, u.RewardedMicroAlgos, u.RewardsBase, rewardsLevel,
)
diff --git a/ledger/ledgercore/accountresource.go b/ledger/ledgercore/accountresource.go
index b49a7e5eb..5ccb1e53d 100644
--- a/ledger/ledgercore/accountresource.go
+++ b/ledger/ledgercore/accountresource.go
@@ -40,31 +40,38 @@ type AppResource struct {
AppParams *basics.AppParams
}
-// AssignAccountResourceToAccountData assignes the Asset/App params/holdings contained
+// AssignAccountResourceToAccountData assigns the Asset/App params/holdings contained
// in the AccountResource to the given basics.AccountData, creating maps if necessary.
-func AssignAccountResourceToAccountData(cindex basics.CreatableIndex, resource AccountResource, ad *basics.AccountData) {
+// Returns true if the AccountResource contained a new or updated resource,
+// and false if the AccountResource contained no changes (indicating the resource was deleted).
+func AssignAccountResourceToAccountData(cindex basics.CreatableIndex, resource AccountResource, ad *basics.AccountData) (assigned bool) {
if resource.AssetParams != nil {
if ad.AssetParams == nil {
ad.AssetParams = make(map[basics.AssetIndex]basics.AssetParams)
}
ad.AssetParams[basics.AssetIndex(cindex)] = *resource.AssetParams
+ assigned = true
}
if resource.AssetHolding != nil {
if ad.Assets == nil {
ad.Assets = make(map[basics.AssetIndex]basics.AssetHolding)
}
ad.Assets[basics.AssetIndex(cindex)] = *resource.AssetHolding
+ assigned = true
}
if resource.AppParams != nil {
if ad.AppParams == nil {
ad.AppParams = make(map[basics.AppIndex]basics.AppParams)
}
ad.AppParams[basics.AppIndex(cindex)] = *resource.AppParams
+ assigned = true
}
if resource.AppLocalState != nil {
if ad.AppLocalStates == nil {
ad.AppLocalStates = make(map[basics.AppIndex]basics.AppLocalState)
}
ad.AppLocalStates[basics.AppIndex(cindex)] = *resource.AppLocalState
+ assigned = true
}
+ return
}
diff --git a/ledger/tracker.go b/ledger/tracker.go
index ece28a79e..ca3bf894d 100644
--- a/ledger/tracker.go
+++ b/ledger/tracker.go
@@ -322,16 +322,15 @@ func (tr *trackerRegistry) committedUpTo(rnd basics.Round) basics.Round {
}
func (tr *trackerRegistry) scheduleCommit(blockqRound, maxLookback basics.Round) {
- tr.mu.RLock()
- dbRound := tr.dbRound
- tr.mu.RUnlock()
-
dcc := &deferredCommitContext{
deferredCommitRange: deferredCommitRange{
lookback: maxLookback,
},
}
cdr := &dcc.deferredCommitRange
+
+ tr.mu.RLock()
+ dbRound := tr.dbRound
for _, lt := range tr.trackers {
base := cdr.oldBase
offset := cdr.offset
@@ -351,8 +350,6 @@ func (tr *trackerRegistry) scheduleCommit(blockqRound, maxLookback basics.Round)
} else {
dcc = nil
}
-
- tr.mu.RLock()
// If we recently flushed, wait to aggregate some more blocks.
// ( unless we're creating a catchpoint, in which case we want to flush it right away
// so that all the instances of the catchpoint would contain exactly the same data )
diff --git a/ledger/tracker_test.go b/ledger/tracker_test.go
index 731772a24..fe212ad9a 100644
--- a/ledger/tracker_test.go
+++ b/ledger/tracker_test.go
@@ -18,12 +18,18 @@ package ledger
import (
"bytes"
+ "context"
+ "database/sql"
+ "sync"
"testing"
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
@@ -31,6 +37,13 @@ import (
"github.com/algorand/go-algorand/test/partitiontest"
)
+// commitRoundNext schedules a commit with as many rounds as possible
+func commitRoundNext(l *Ledger) {
+ // maxAcctLookback := l.trackers.cfg.MaxAcctLookback
+ maxAcctLookback := 320
+ commitRoundLookback(basics.Round(maxAcctLookback), l)
+}
+
// TestTrackerScheduleCommit checks catchpointTracker.produceCommittingTask does not increase commit offset relative
// to the value set by accountUpdates
func TestTrackerScheduleCommit(t *testing.T) {
@@ -123,3 +136,163 @@ func TestTrackerScheduleCommit(t *testing.T) {
dc := <-ml.trackers.deferredCommits
a.Equal(expectedOffset, dc.offset)
}
+
+type producePrepareBlockingTracker struct {
+ produceReleaseLock chan struct{}
+ prepareCommitEntryLock chan struct{}
+ prepareCommitReleaseLock chan struct{}
+ cancelTasks bool
+}
+
+// loadFromDisk is not implemented in the blockingTracker.
+func (bt *producePrepareBlockingTracker) loadFromDisk(ledgerForTracker, basics.Round) error {
+ return nil
+}
+
+// newBlock is not implemented in the blockingTracker.
+func (bt *producePrepareBlockingTracker) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
+}
+
+// committedUpTo in the blockingTracker just stores the committed round.
+func (bt *producePrepareBlockingTracker) committedUpTo(committedRnd basics.Round) (minRound, lookback basics.Round) {
+ return 0, basics.Round(0)
+}
+
+func (bt *producePrepareBlockingTracker) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+ if bt.cancelTasks {
+ return nil
+ }
+
+ <-bt.produceReleaseLock
+ return dcr
+}
+
+// prepareCommit, is not used by the blockingTracker
+func (bt *producePrepareBlockingTracker) prepareCommit(*deferredCommitContext) error {
+ bt.prepareCommitEntryLock <- struct{}{}
+ <-bt.prepareCommitReleaseLock
+ return nil
+}
+
+// commitRound is not used by the blockingTracker
+func (bt *producePrepareBlockingTracker) commitRound(context.Context, *sql.Tx, *deferredCommitContext) error {
+ return nil
+}
+
+func (bt *producePrepareBlockingTracker) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+}
+
+// postCommitUnlocked implements entry/exit blockers, designed for testing.
+func (bt *producePrepareBlockingTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
+}
+
+// handleUnorderedCommit is not used by the blockingTracker
+func (bt *producePrepareBlockingTracker) handleUnorderedCommit(*deferredCommitContext) {
+}
+
+// close is not used by the blockingTracker
+func (bt *producePrepareBlockingTracker) close() {
+}
+
+func (bt *producePrepareBlockingTracker) reset() {
+ bt.prepareCommitEntryLock = make(chan struct{})
+ bt.prepareCommitReleaseLock = make(chan struct{})
+ bt.prepareCommitReleaseLock = make(chan struct{})
+ bt.cancelTasks = false
+}
+
+// TestTrackerDbRoundDataRace checks for dbRound data race
+// when commit scheduling relies on dbRound from the tracker registry but tracker's deltas
+// are used in calculations
+// 1. Add say 128 + MaxAcctLookback (MaxLookback) blocks and commit
+// 2. Add 2*MaxLookback blocks without committing
+// 3. Set a block in prepareCommit, and initiate the commit
+// 4. Set a block in produceCommittingTask, add a new block and resume the commit
+// 5. Resume produceCommittingTask
+// 6. The data race and panic happens in block queue syncher thread
+func TestTrackerDbRoundDataRace(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ t.Skip("For manual run when touching ledger locking")
+
+ a := require.New(t)
+
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 1)
+ const inMem = true
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Warn)
+ cfg := config.GetDefaultLocal()
+ ledger, err := OpenLedger(log, t.Name(), inMem, genesisInitState, cfg)
+ a.NoError(err, "could not open ledger")
+ defer ledger.Close()
+
+ stallingTracker := &producePrepareBlockingTracker{
+ // produceEntryLock: make(chan struct{}, 10),
+ produceReleaseLock: make(chan struct{}),
+ prepareCommitEntryLock: make(chan struct{}, 10),
+ prepareCommitReleaseLock: make(chan struct{}),
+ }
+ ledger.trackerMu.Lock()
+ ledger.trackers.mu.Lock()
+ ledger.trackers.trackers = append([]ledgerTracker{stallingTracker}, ledger.trackers.trackers...)
+ ledger.trackers.mu.Unlock()
+ ledger.trackerMu.Unlock()
+
+ close(stallingTracker.produceReleaseLock)
+ close(stallingTracker.prepareCommitReleaseLock)
+
+ targetRound := basics.Round(128) * 5
+ blk := genesisInitState.Block
+ for i := basics.Round(0); i < targetRound-1; i++ {
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
+ err := ledger.AddBlock(blk, agreement.Certificate{})
+ a.NoError(err)
+ }
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
+ err = ledger.AddBlock(blk, agreement.Certificate{})
+ a.NoError(err)
+ commitRoundNext(ledger)
+ ledger.trackers.waitAccountsWriting()
+ lookback := 320
+ // lookback := cfg.MaxAcctLookback
+ a.Equal(targetRound-basics.Round(lookback), ledger.trackers.dbRound)
+
+ // build up some non-committed queue
+ stallingTracker.cancelTasks = true
+ for i := targetRound; i < 2*targetRound; i++ {
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
+ err := ledger.AddBlock(blk, agreement.Certificate{})
+ a.NoError(err)
+ }
+ ledger.WaitForCommit(2*targetRound - 1)
+
+ stallingTracker.reset()
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ commitRoundNext(ledger)
+ wg.Done()
+ }()
+
+ <-stallingTracker.prepareCommitEntryLock
+ stallingTracker.produceReleaseLock = make(chan struct{})
+
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
+ err = ledger.AddBlock(blk, agreement.Certificate{})
+ a.NoError(err)
+ // the notifyCommit -> committedUpTo -> scheduleCommit chain
+ // is called right after the cond var, so wait until that moment
+ ledger.WaitForCommit(2 * targetRound)
+
+ // let the commit to complete
+ close(stallingTracker.prepareCommitReleaseLock)
+ wg.Wait()
+
+ // unblock the notifyCommit (scheduleCommit) goroutine
+ stallingTracker.cancelTasks = true
+ close(stallingTracker.produceReleaseLock)
+}
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index a0107ba77..2fa488ec9 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -687,7 +687,7 @@ func (c *Client) RawAccountApplicationInformation(accountAddress string, applica
return
}
-// AccountAssetInformation gets account information about a given app.
+// AccountAssetInformation gets account information about a given asset.
func (c *Client) AccountAssetInformation(accountAddress string, assetID uint64) (resp generatedV2.AccountAssetResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
@@ -696,7 +696,7 @@ func (c *Client) AccountAssetInformation(accountAddress string, assetID uint64)
return
}
-// RawAccountAssetInformation gets account information about a given app.
+// RawAccountAssetInformation gets account information about a given asset.
func (c *Client) RawAccountAssetInformation(accountAddress string, assetID uint64) (accountResource modelV2.AccountAssetModel, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
diff --git a/libgoal/participation.go b/libgoal/participation.go
index d4b897d00..2dbbbde98 100644
--- a/libgoal/participation.go
+++ b/libgoal/participation.go
@@ -127,18 +127,6 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k
firstRound, lastRound := basics.Round(firstValid), basics.Round(lastValid)
- // Get the current protocol for ephemeral key parameters
- stat, err := c.Status()
- if err != nil {
- return
- }
-
- proto, ok := c.consensus[protocol.ConsensusVersion(stat.LastVersion)]
- if !ok {
- err = fmt.Errorf("consensus protocol %s not supported", stat.LastVersion)
- return
- }
-
// If output directory wasn't specified, store it in the current ledger directory.
if outDir == "" {
// Get the GenesisID for use in the participation key path
@@ -170,7 +158,7 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k
}
if keyDilution == 0 {
- keyDilution = proto.DefaultKeyDilution
+ keyDilution = 1 + uint64(math.Sqrt(float64(lastRound-firstRound)))
}
// Fill the database with new participation keys
diff --git a/logging/telemetry.go b/logging/telemetry.go
index 2ed18b8e0..6ed62bf40 100644
--- a/logging/telemetry.go
+++ b/logging/telemetry.go
@@ -109,14 +109,12 @@ func ReadTelemetryConfigOrDefault(dataDir string, genesisID string) (cfg Telemet
configPath, err = config.GetConfigFilePath(TelemetryConfigFilename)
if err != nil {
- // In this case we don't know what to do since we couldn't
- // create the directory. Just create an ephemeral config.
- cfg = createTelemetryConfig()
- return
+ // If the path could not be opened do nothing, the IsNotExist error
+ // is handled below.
+ } else {
+ // Load the telemetry from the default config path
+ cfg, err = LoadTelemetryConfig(configPath)
}
-
- // Load the telemetry from the default config path
- cfg, err = LoadTelemetryConfig(configPath)
}
// If there was some error loading the configuration from the config path...
diff --git a/logging/telemetry_test.go b/logging/telemetry_test.go
index cd40cc512..a8a913831 100644
--- a/logging/telemetry_test.go
+++ b/logging/telemetry_test.go
@@ -17,6 +17,7 @@
package logging
import (
+ "encoding/json"
"fmt"
"os"
"testing"
@@ -213,6 +214,44 @@ func TestDetails(t *testing.T) {
a.Equal(details, data[0]["details"])
}
+func TestHeartbeatDetails(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ f := makeTelemetryTestFixture(logrus.InfoLevel)
+
+ var hb telemetryspec.HeartbeatEventDetails
+ hb.Info.Version = "v2"
+ hb.Info.VersionNum = "1234"
+ hb.Info.Channel = "alpha"
+ hb.Info.Branch = "br0"
+ hb.Info.CommitHash = "abcd"
+ hb.Metrics = map[string]float64{
+ "Hello": 38.8,
+ }
+ f.telem.logEvent(f.l, telemetryspec.ApplicationState, telemetryspec.HeartbeatEvent, hb)
+
+ data := f.hookData()
+ a.NotNil(data)
+ a.Len(data, 1)
+ a.Equal(hb, data[0]["details"])
+
+ // assert JSON serialization is backwards compatible
+ js, err := json.Marshal(data[0])
+ a.NoError(err)
+ var unjs map[string]interface{}
+ a.NoError(json.Unmarshal(js, &unjs))
+ a.Contains(unjs, "details")
+ ev := unjs["details"].(map[string]interface{})
+ Metrics := ev["Metrics"].(map[string]interface{})
+ m := ev["m"].(map[string]interface{})
+ a.Equal("v2", Metrics["version"].(string))
+ a.Equal("1234", Metrics["version-num"].(string))
+ a.Equal("alpha", Metrics["channel"].(string))
+ a.Equal("br0", Metrics["branch"].(string))
+ a.Equal("abcd", Metrics["commit-hash"].(string))
+ a.InDelta(38.8, m["Hello"].(float64), 0.01)
+}
+
type testMetrics struct {
val string
}
diff --git a/logging/telemetryspec/event.go b/logging/telemetryspec/event.go
index f721fd7bd..dcd3d231c 100644
--- a/logging/telemetryspec/event.go
+++ b/logging/telemetryspec/event.go
@@ -43,7 +43,14 @@ const HeartbeatEvent Event = "Heartbeat"
// HeartbeatEventDetails contains details for the StartupEvent
type HeartbeatEventDetails struct {
- Metrics map[string]string
+ Info struct {
+ Version string `json:"version"`
+ VersionNum string `json:"version-num"`
+ Channel string `json:"channel"`
+ Branch string `json:"branch"`
+ CommitHash string `json:"commit-hash"`
+ } `json:"Metrics"` // backwards compatible name
+ Metrics map[string]float64 `json:"m"`
}
// CatchupStartEvent event
@@ -78,6 +85,8 @@ type BlockAcceptedEventDetails struct {
Round uint64
ValidatedAt time.Duration
PreValidated bool
+ PropBufLen uint64
+ VoteBufLen uint64
}
// TopAccountsEvent event
diff --git a/netdeploy/network.go b/netdeploy/network.go
index 007d90985..8f972625c 100644
--- a/netdeploy/network.go
+++ b/netdeploy/network.go
@@ -59,7 +59,7 @@ type Network struct {
// CreateNetworkFromTemplate uses the specified template to deploy a new private network
// under the specified root directory.
-func CreateNetworkFromTemplate(name, rootDir, templateFile, binDir string, importKeys bool, nodeExitCallback nodecontrol.AlgodExitErrorCallback, consensus config.ConsensusProtocols) (Network, error) {
+func CreateNetworkFromTemplate(name, rootDir, templateFile, binDir string, importKeys bool, nodeExitCallback nodecontrol.AlgodExitErrorCallback, consensus config.ConsensusProtocols, overrideDevMode bool) (Network, error) {
n := Network{
rootDir: rootDir,
nodeExitCallback: nodeExitCallback,
@@ -69,6 +69,12 @@ func CreateNetworkFromTemplate(name, rootDir, templateFile, binDir string, impor
template, err := loadTemplate(templateFile)
if err == nil {
+ if overrideDevMode {
+ template.Genesis.DevMode = true
+ if len(template.Nodes) > 0 {
+ template.Nodes[0].IsRelay = false
+ }
+ }
err = template.Validate()
}
if err != nil {
diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go
index 6f4cd72ad..dc485879e 100644
--- a/netdeploy/networkTemplate.go
+++ b/netdeploy/networkTemplate.go
@@ -74,7 +74,16 @@ func (t NetworkTemplate) createNodeDirectories(targetFolder string, binDir strin
nodeDir := filepath.Join(targetFolder, cfg.Name)
err = os.Mkdir(nodeDir, os.ModePerm)
if err != nil {
- return
+ if !os.IsExist(err) {
+ return
+ }
+
+ // allow some flexibility around pre-existing directories to
+ // support docker and pre-mounted volumes.
+ if !util.IsEmpty(nodeDir) {
+ err = fmt.Errorf("duplicate node directory detected: %w", err)
+ return
+ }
}
_, err = util.CopyFile(genesisFile, filepath.Join(nodeDir, genesisFileName))
@@ -212,6 +221,11 @@ func (t NetworkTemplate) Validate() error {
if len(t.Nodes) > 1 && countRelayNodes(t.Nodes) == 0 {
return fmt.Errorf("invalid template: at least one relay is required when more than a single node presents")
}
+
+ if t.Genesis.DevMode && len(t.Nodes) != 1 {
+ return fmt.Errorf("invalid template: DevMode should only have a single node")
+ }
+
return nil
}
diff --git a/network/dialer.go b/network/dialer.go
index b674d6e53..8d7c18aaa 100644
--- a/network/dialer.go
+++ b/network/dialer.go
@@ -22,6 +22,7 @@ import (
"time"
"github.com/algorand/go-algorand/tools/network/dnssec"
+ "github.com/algorand/go-algorand/util"
)
type netDialer interface {
@@ -79,7 +80,7 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.
select {
case <-ctx.Done():
return nil, ctx.Err()
- case <-time.After(waitTime):
+ case <-util.NanoAfter(waitTime):
}
}
conn, err := d.innerDialContext(ctx, network, address)
diff --git a/network/rateLimitingTransport.go b/network/rateLimitingTransport.go
index 2ec611865..88c6fec6b 100644
--- a/network/rateLimitingTransport.go
+++ b/network/rateLimitingTransport.go
@@ -20,6 +20,8 @@ import (
"errors"
"net/http"
"time"
+
+ "github.com/algorand/go-algorand/util"
)
// rateLimitingTransport is the transport for execute a single HTTP transaction, obtaining the Response for a given Request.
@@ -57,17 +59,18 @@ func makeRateLimitingTransport(phonebook Phonebook, queueingTimeout time.Duratio
func (r *rateLimitingTransport) RoundTrip(req *http.Request) (res *http.Response, err error) {
var waitTime time.Duration
var provisionalTime time.Time
- queueingTimedOut := time.After(r.queueingTimeout)
+ queueingDeadline := time.Now().Add(r.queueingTimeout)
for {
_, waitTime, provisionalTime = r.phonebook.GetConnectionWaitTime(req.Host)
if waitTime == 0 {
break // break out of the loop and proceed to the connection
}
- select {
- case <-time.After(waitTime):
- case <-queueingTimedOut:
- return nil, ErrConnectionQueueingTimeout
+ waitDeadline := time.Now().Add(waitTime)
+ if waitDeadline.Before(queueingDeadline) {
+ util.NanoSleep(waitTime)
+ continue
}
+ return nil, ErrConnectionQueueingTimeout
}
res, err = r.innerTransport.RoundTrip(req)
r.phonebook.UpdateConnectionTime(req.Host, provisionalTime)
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 8bde3040d..59de32eef 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -48,6 +48,7 @@ import (
"github.com/algorand/go-algorand/protocol"
tools_network "github.com/algorand/go-algorand/tools/network"
"github.com/algorand/go-algorand/tools/network/dnssec"
+ "github.com/algorand/go-algorand/util"
"github.com/algorand/go-algorand/util/metrics"
)
@@ -1274,7 +1275,7 @@ func (wn *WebsocketNetwork) broadcastThread() {
}
}
select {
- case <-time.After(sleepDuration):
+ case <-util.NanoAfter(sleepDuration):
if (request != nil) && time.Now().After(requestDeadline) {
// message time have elapsed.
return true
diff --git a/node/node.go b/node/node.go
index 40f7679fe..f173853d4 100644
--- a/node/node.go
+++ b/node/node.go
@@ -511,7 +511,17 @@ func (node *AlgorandFullNode) BroadcastSignedTxGroup(txgroup []transactions.Sign
node.mu.Unlock()
}()
}
+ return node.broadcastSignedTxGroup(txgroup)
+}
+
+// BroadcastInternalSignedTxGroup broadcasts a transaction group that has already been signed.
+// It is originated internally, and in DevMode, it will not advance the round.
+func (node *AlgorandFullNode) BroadcastInternalSignedTxGroup(txgroup []transactions.SignedTxn) (err error) {
+ return node.broadcastSignedTxGroup(txgroup)
+}
+// broadcastSignedTxGroup broadcasts a transaction group that has already been signed.
+func (node *AlgorandFullNode) broadcastSignedTxGroup(txgroup []transactions.SignedTxn) (err error) {
lastRound := node.ledger.Latest()
var b bookkeeping.BlockHeader
b, err = node.ledger.BlockHdr(lastRound)
@@ -630,7 +640,7 @@ func (node *AlgorandFullNode) GetPendingTransaction(txID transactions.Txid) (res
}
found = true
- // Keep looking in the ledger..
+ // Keep looking in the ledger.
}
var maxLife basics.Round
@@ -641,10 +651,17 @@ func (node *AlgorandFullNode) GetPendingTransaction(txID transactions.Txid) (res
} else {
node.log.Errorf("node.GetPendingTransaction: cannot get consensus params for latest round %v", latest)
}
+
+ // Search from newest to oldest round up to the max life of a transaction.
maxRound := latest
minRound := maxRound.SubSaturate(maxLife)
- for r := minRound; r <= maxRound; r++ {
+ // Since we're using uint64, if the minRound is 0, we need to check for an underflow.
+ if minRound == 0 {
+ minRound++
+ }
+
+ for r := maxRound; r >= minRound; r-- {
tx, found, err := node.ledger.LookupTxid(txID, r)
if err != nil || !found {
continue
diff --git a/scripts/buildtools/go.mod b/scripts/buildtools/go.mod
index 054b4ca8a..cf29e9304 100644
--- a/scripts/buildtools/go.mod
+++ b/scripts/buildtools/go.mod
@@ -1,10 +1,10 @@
module github.com/algorand/go-algorand/scripts/buildtools
-go 1.14
+go 1.16
require (
- github.com/algorand/msgp v1.1.49
- github.com/algorand/oapi-codegen v1.3.5-algorand5
+ github.com/algorand/msgp v1.1.50
+ github.com/algorand/oapi-codegen v1.3.7
github.com/go-swagger/go-swagger v0.25.0
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/tools v0.1.1 // indirect
diff --git a/scripts/buildtools/go.sum b/scripts/buildtools/go.sum
index f7f042dd7..bf37828f3 100644
--- a/scripts/buildtools/go.sum
+++ b/scripts/buildtools/go.sum
@@ -21,10 +21,10 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/algorand/msgp v1.1.49 h1:YBFRcYZNsD2WgzXONvzFrjv1/887pWzJSx874VL4P6g=
-github.com/algorand/msgp v1.1.49/go.mod h1:oyDY2SIeM1bytVYJTL88nt9kVeEBC00Avyqcnyrq/ec=
-github.com/algorand/oapi-codegen v1.3.5-algorand5 h1:y576Ca2/guQddQrQA7dtL5KcOx5xQgPeIupiuFMGyCI=
-github.com/algorand/oapi-codegen v1.3.5-algorand5/go.mod h1:/k0Ywn0lnt92uBMyE+yiRf/Wo3/chxHHsAfenD09EbY=
+github.com/algorand/msgp v1.1.50 h1:Mvsjs5LCE6HsXXbwJXD8ol1Y+c+QMoFNM4j0CY+mFGo=
+github.com/algorand/msgp v1.1.50/go.mod h1:R5sJrW9krk4YwNo+rs82Kq6V55q/zNgACwWqt3sQBM4=
+github.com/algorand/oapi-codegen v1.3.7 h1:TdXeGljgrnLXSCGPdeY6g6+i/G0Rr5CkjBgUJY6ht48=
+github.com/algorand/oapi-codegen v1.3.7/go.mod h1:UvOtAiP3hc0M2GUKBnZVTjLe3HKGDKh6y9rs3e3JyOg=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
diff --git a/scripts/buildtools/install_buildtools.sh b/scripts/buildtools/install_buildtools.sh
index 630b99310..edd346b38 100755
--- a/scripts/buildtools/install_buildtools.sh
+++ b/scripts/buildtools/install_buildtools.sh
@@ -70,16 +70,14 @@ function install_go_module {
# Check for version to go.mod version
VERSION=$(get_go_version "$1")
- # TODO: When we switch to 1.16 this should be changed to use 'go install'
- # instead of 'go get': https://tip.golang.org/doc/go1.16#modules
if [ -z "$VERSION" ]; then
echo "Unable to install requested package '$1' (${MODULE}): no version listed in ${SCRIPTPATH}/go.mod"
exit 1
else
- OUTPUT=$(GO111MODULE=on go get "${MODULE}@${VERSION}" 2>&1)
+ OUTPUT=$(go install "${MODULE}@${VERSION}" 2>&1)
fi
if [ $? != 0 ]; then
- echo "error: executing \"go get ${MODULE}\" failed : ${OUTPUT}"
+ echo "error: executing \"go install ${MODULE}\" failed : ${OUTPUT}"
exit 1
fi
}
diff --git a/scripts/create_and_deploy_recipe.sh b/scripts/create_and_deploy_recipe.sh
index bca4cc808..3e1121277 100755
--- a/scripts/create_and_deploy_recipe.sh
+++ b/scripts/create_and_deploy_recipe.sh
@@ -20,6 +20,7 @@
# directory as the recipe file path.
set -e
+set -x
if [[ "${AWS_ACCESS_KEY_ID}" = "" || "${AWS_SECRET_ACCESS_KEY}" = "" ]]; then
echo "You need to export your AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY for this to work"
diff --git a/scripts/get_golang_version.sh b/scripts/get_golang_version.sh
index a42b30709..1dd22eda4 100755
--- a/scripts/get_golang_version.sh
+++ b/scripts/get_golang_version.sh
@@ -11,9 +11,9 @@
# Our build task-runner `mule` will refer to this script and will automatically
# build a new image whenever the version number has been changed.
-BUILD=1.14.7
-MIN=1.14
-GO_MOD_SUPPORT=1.12
+BUILD=1.16.15
+ MIN=1.16
+ GO_MOD_SUPPORT=1.16
if [ "$1" = all ]
then
diff --git a/scripts/install_linux_deps.sh b/scripts/install_linux_deps.sh
index 78c42cd35..791cad080 100755
--- a/scripts/install_linux_deps.sh
+++ b/scripts/install_linux_deps.sh
@@ -5,7 +5,7 @@ set -e
DISTRIB=$ID
ARCH_DEPS="boost boost-libs expect jq autoconf shellcheck sqlite python-virtualenv"
-UBUNTU_DEPS="libtool libboost-math-dev expect jq autoconf shellcheck sqlite3 python3-venv"
+UBUNTU_DEPS="libtool libboost-math-dev expect jq autoconf shellcheck sqlite3 python3-venv build-essential"
FEDORA_DEPS="boost-devel expect jq autoconf ShellCheck sqlite python-virtualenv"
if [ "${DISTRIB}" = "arch" ]; then
diff --git a/scripts/travis/before_build.sh b/scripts/travis/before_build.sh
index b07efe33f..3dbb7339d 100755
--- a/scripts/travis/before_build.sh
+++ b/scripts/travis/before_build.sh
@@ -12,7 +12,6 @@ set -e
GOPATH=$(go env GOPATH)
export GOPATH
-export GO111MODULE=on
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
OS=$("${SCRIPTPATH}"/../ostype.sh)
diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go
index b5d4c35f9..7cf8de405 100644
--- a/shared/pingpong/accounts.go
+++ b/shared/pingpong/accounts.go
@@ -36,6 +36,7 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util"
"github.com/algorand/go-algorand/util/db"
)
@@ -138,7 +139,7 @@ func throttleTransactionRate(startTime time.Time, cfg PpConfig, totalSent uint64
if currentTps > float64(cfg.TxnPerSec) {
sleepSec := float64(totalSent)/float64(cfg.TxnPerSec) - localTimeDelta.Seconds()
sleepTime := time.Duration(int64(math.Round(sleepSec*1000))) * time.Millisecond
- time.Sleep(sleepTime)
+ util.NanoSleep(sleepTime)
}
}
diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go
index c26981e61..71d787f79 100644
--- a/shared/pingpong/pingpong.go
+++ b/shared/pingpong/pingpong.go
@@ -36,6 +36,7 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util"
)
// CreatablesInfo has information about created assets, apps and opting in
@@ -882,7 +883,7 @@ func (pps *WorkerState) sendFromTo(
timeCredit -= took
if timeCredit > 0 {
time.Sleep(timeCredit)
- timeCredit = time.Duration(0)
+ timeCredit -= time.Since(now)
} else if timeCredit < -1000*time.Millisecond {
// cap the "time debt" to 1000 ms.
timeCredit = -1000 * time.Millisecond
@@ -1232,7 +1233,7 @@ func (t *throttler) maybeSleep(count int) {
desiredSeconds := float64(countsum) / t.xps
extraSeconds := desiredSeconds - dt.Seconds()
t.iterm += 0.1 * extraSeconds / float64(len(t.times))
- time.Sleep(time.Duration(int64(1000000000.0 * (extraSeconds + t.iterm) / float64(len(t.times)))))
+ util.NanoSleep(time.Duration(1000000000.0 * (extraSeconds + t.iterm) / float64(len(t.times))))
} else {
t.iterm *= 0.95
diff --git a/test/README.md b/test/README.md
index 15d82e07a..1b9f859d1 100644
--- a/test/README.md
+++ b/test/README.md
@@ -51,10 +51,4 @@ To run a specific test, run e2e.sh with -i interactive flag, and follow the inst
test/scripts/e2e.sh -i
```
-Tests in the `e2e_subs/serial` directory are executed serially instead of in parallel. This should only be used when absolutely necessary.
-
-### Updating Indexer E2E test input
-
-Indexer `make e2e` runs tests using the output of go-algorand `e2e_subs` tests as input. The process for making new inputs available via S3 is manual.
-
-Test modifiers _must_ make a best-effort attempt to remember to upload new artifacts to S3 when modifying tests. Here's a step-by-step process overview: [https://github.com/algorand/indexer/blob/develop/misc/README.md](https://github.com/algorand/indexer/blob/develop/misc/README.md). \ No newline at end of file
+Tests in the `e2e_subs/serial` directory are executed serially instead of in parallel. This should only be used when absolutely necessary. \ No newline at end of file
diff --git a/test/e2e-go/cli/goal/account_test.go b/test/e2e-go/cli/goal/account_test.go
index 67165a6d0..b01eb6fda 100644
--- a/test/e2e-go/cli/goal/account_test.go
+++ b/test/e2e-go/cli/goal/account_test.go
@@ -22,12 +22,15 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
)
const statusOffline = "[offline]"
const statusOnline = "[online]"
func TestAccountNew(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
defer fixtures.ShutdownSynchronizedTest(t)
defer fixture.SetTestContext(t)()
a := require.New(fixtures.SynchronizedTest(t))
@@ -54,6 +57,8 @@ func TestAccountNew(t *testing.T) {
}
func TestAccountNewDuplicateFails(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
defer fixtures.ShutdownSynchronizedTest(t)
defer fixture.SetTestContext(t)()
a := require.New(fixtures.SynchronizedTest(t))
@@ -70,6 +75,8 @@ func TestAccountNewDuplicateFails(t *testing.T) {
}
func TestAccountRename(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
defer fixtures.ShutdownSynchronizedTest(t)
defer fixture.SetTestContext(t)()
a := require.New(fixtures.SynchronizedTest(t))
@@ -101,6 +108,8 @@ func TestAccountRename(t *testing.T) {
// Importing an account multiple times should not be considered an error by goal
func TestAccountMultipleImportRootKey(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
defer fixtures.ShutdownSynchronizedTest(t)
defer fixture.SetTestContext(t)()
a := require.New(fixtures.SynchronizedTest(t))
diff --git a/test/e2e-go/cli/goal/clerk_test.go b/test/e2e-go/cli/goal/clerk_test.go
index 948eb1a61..851eb1913 100644
--- a/test/e2e-go/cli/goal/clerk_test.go
+++ b/test/e2e-go/cli/goal/clerk_test.go
@@ -24,9 +24,12 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
)
func TestClerkSendNoteEncoding(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
defer fixtures.ShutdownSynchronizedTest(t)
defer fixture.SetTestContext(t)()
a := require.New(fixtures.SynchronizedTest(t))
diff --git a/test/e2e-go/cli/goal/node_cleanup_test.go b/test/e2e-go/cli/goal/node_cleanup_test.go
index 8365adbe5..7ad3eaab0 100644
--- a/test/e2e-go/cli/goal/node_cleanup_test.go
+++ b/test/e2e-go/cli/goal/node_cleanup_test.go
@@ -23,9 +23,12 @@ import (
"github.com/algorand/go-algorand/nodecontrol"
"github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
)
func TestGoalNodeCleanup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
defer fixtures.ShutdownSynchronizedTest(t)
defer fixture.SetTestContext(t)()
diff --git a/test/e2e-go/features/participation/participationRewards_test.go b/test/e2e-go/features/participation/participationRewards_test.go
index 9b4143d43..a7dd6452e 100644
--- a/test/e2e-go/features/participation/participationRewards_test.go
+++ b/test/e2e-go/features/participation/participationRewards_test.go
@@ -358,7 +358,7 @@ func TestRewardRateRecalculation(t *testing.T) {
r.NoError(fixture.WaitForRoundWithTimeout(rewardRecalcRound - 1))
balanceOfRewardsPool, roundQueried := fixture.GetBalanceAndRound(rewardsAccount)
if roundQueried != rewardRecalcRound-1 {
- r.FailNow("got rewards pool balance on round %d but wanted the balance on round %d, failing out", rewardRecalcRound-1, roundQueried)
+ r.FailNow("", "got rewards pool balance on round %d but wanted the balance on round %d, failing out", rewardRecalcRound-1, roundQueried)
}
lastRoundBeforeRewardRecals, err := client.Block(rewardRecalcRound - 1)
r.NoError(err)
@@ -381,7 +381,7 @@ func TestRewardRateRecalculation(t *testing.T) {
r.NoError(fixture.WaitForRoundWithTimeout(rewardRecalcRound - 1))
balanceOfRewardsPool, roundQueried = fixture.GetBalanceAndRound(rewardsAccount)
if roundQueried != rewardRecalcRound-1 {
- r.FailNow("got rewards pool balance on round %d but wanted the balance on round %d, failing out", rewardRecalcRound-1, roundQueried)
+ r.FailNow("", "got rewards pool balance on round %d but wanted the balance on round %d, failing out", rewardRecalcRound-1, roundQueried)
}
lastRoundBeforeRewardRecals, err = client.Block(rewardRecalcRound - 1)
r.NoError(err)
diff --git a/test/e2e-go/features/transactions/sendReceive_test.go b/test/e2e-go/features/transactions/sendReceive_test.go
index 2d55d0b83..be10042fd 100644
--- a/test/e2e-go/features/transactions/sendReceive_test.go
+++ b/test/e2e-go/features/transactions/sendReceive_test.go
@@ -55,6 +55,7 @@ func TestAccountsCanSendMoney(t *testing.T) {
// this test checks that two accounts' balances stay up to date
// as they send each other money many times
func TestDevModeAccountsCanSendMoney(t *testing.T) {
+ partitiontest.PartitionTest(t)
defer fixtures.ShutdownSynchronizedTest(t)
numberOfSends := 25
diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go
index 38ddefbeb..e6e90b47c 100644
--- a/test/framework/fixtures/libgoalFixture.go
+++ b/test/framework/fixtures/libgoalFixture.go
@@ -93,7 +93,7 @@ func (f *LibGoalFixture) setup(test TestingTB, testName string, templateFile str
os.RemoveAll(f.rootDir)
templateFile = filepath.Join(f.testDataDir, templateFile)
importKeys := false // Don't automatically import root keys when creating folders, we'll import on-demand
- network, err := netdeploy.CreateNetworkFromTemplate("test", f.rootDir, templateFile, f.binDir, importKeys, f.nodeExitWithError, f.consensus)
+ network, err := netdeploy.CreateNetworkFromTemplate("test", f.rootDir, templateFile, f.binDir, importKeys, f.nodeExitWithError, f.consensus, false)
f.failOnError(err, "CreateNetworkFromTemplate failed: %v")
f.network = network
diff --git a/test/heapwatch/heapWatch.py b/test/heapwatch/heapWatch.py
index d91b780ae..43e51618e 100644
--- a/test/heapwatch/heapWatch.py
+++ b/test/heapwatch/heapWatch.py
@@ -253,7 +253,7 @@ class watcher:
for ad in self.they:
ad.get_blockinfo(snapshot_name, outdir=self.args.out)
if self.args.svg:
- logger.debug('snapped, processing...')
+ logger.debug('snapped, processing pprof...')
# make absolute and differential plots
for path, snappath in newsnapshots.items():
subprocess.call(['go', 'tool', 'pprof', '-sample_index=inuse_space', '-svg', '-output', snappath + '.inuse.svg', snappath])
@@ -278,7 +278,7 @@ def main():
ap.add_argument('--admin-token', default='', help='default algod admin-api token to use')
ap.add_argument('--tf-roles', default='relay', help='comma separated list of terraform roles to follow')
ap.add_argument('--tf-name-re', action='append', default=[], help='regexp to match terraform node names, may be repeated')
- ap.add_argument('--no-svg', dest='svg', default=True, action='store_false', help='do not automatically run `go tool pprof` to generate svg from collected data')
+ ap.add_argument('--svg', dest='svg', default=False, action='store_true', help='automatically run `go tool pprof` to generate performance profile svg from collected data')
ap.add_argument('-p', '--port', default='8580', help='algod port on each host in terraform-inventory')
ap.add_argument('-o', '--out', default=None, help='directory to write to')
ap.add_argument('--verbose', default=False, action='store_true')
diff --git a/test/muleCI/mule.yaml b/test/muleCI/mule.yaml
index 2f4e3af80..eb585a3c9 100644
--- a/test/muleCI/mule.yaml
+++ b/test/muleCI/mule.yaml
@@ -13,6 +13,7 @@ agents:
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=amd64
+ - GOARCH=amd64
- name: cicd.centos.amd64
dockerFilePath: docker/build/cicd.centos.Dockerfile
image: algorand/go-algorand-ci-linux-centos
@@ -27,6 +28,20 @@ agents:
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=amd64
+ - name: cicd.centos8.amd64
+ dockerFilePath: docker/build/cicd.centos8.Dockerfile
+ image: algorand/go-algorand-ci-linux-centos8
+ version: scripts/configure_dev-deps.sh
+ arch: amd64
+ env:
+ - TRAVIS_BRANCH=${GIT_BRANCH}
+ - NETWORK=$NETWORK
+ - VERSION=$VERSION
+ - BUILD_NUMBER=$BUILD_NUMBER
+ - GOHOSTARCH=amd64
+ buildArgs:
+ - GOLANG_VERSION=`./scripts/get_golang_version.sh`
+ - ARCH=amd64
- name: cicd.ubuntu.arm64
dockerFilePath: docker/build/cicd.ubuntu.Dockerfile
image: algorand/go-algorand-ci-linux-ubuntu
@@ -41,11 +56,12 @@ agents:
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=arm64v8
- - name: cicd.alpine.arm
- dockerFilePath: docker/build/cicd.alpine.Dockerfile
+ - GOARCH=arm64
+ - name: cicd.ubuntu.arm
+ dockerFilePath: docker/build/cicd.ubuntu.Dockerfile
image: algorand/go-algorand-ci-linux
version: scripts/configure_dev-deps.sh
- arch: arm32v6
+ arch: arm32v7
env:
- TRAVIS_BRANCH=${GIT_BRANCH}
- NETWORK=$NETWORK
@@ -54,7 +70,8 @@ agents:
- GOHOSTARCH=arm
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- - ARCH=arm32v6
+ - ARCH=arm32v7
+ - GOARCH=armv6l
- name: docker-ubuntu
dockerFilePath: docker/build/docker.ubuntu.Dockerfile
image: algorand/go-algorand-docker-linux-ubuntu
@@ -96,12 +113,12 @@ tasks:
target: ci-build
- task: docker.Make
name: build.arm
- agent: cicd.alpine.arm
+ agent: cicd.ubuntu.arm
target: ci-build
- task: docker.Make
name: archive.amd64
- agent: cicd.centos.amd64
+ agent: cicd.centos8.amd64
target: archive
- task: docker.Make
diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh
index a587bc201..0d4a63d7e 100755
--- a/test/scripts/e2e.sh
+++ b/test/scripts/e2e.sh
@@ -158,7 +158,30 @@ if [ -z "$E2E_TEST_FILTER" ] || [ "$E2E_TEST_FILTER" == "SCRIPTS" ]; then
./timeout 200 ./e2e_basic_start_stop.sh
duration "e2e_basic_start_stop.sh"
- "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} "$SRCROOT"/test/scripts/e2e_subs/*.{sh,py}
+ echo "Current platform: ${E2E_PLATFORM}"
+
+ KEEP_TEMPS_CMD_STR=""
+
+ # If the platform is arm64, we want to pass "--keep-temps" into e2e_client_runner.py
+ # so that we can keep the temporary test artifact for use in the indexer e2e tests.
+ # The file is located at ${TEMPDIR}/net_done.tar.bz2
+ if [ $E2E_PLATFORM == "arm64" ]; then
+ KEEP_TEMPS_CMD_STR="--keep-temps"
+ fi
+
+ "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${KEEP_TEMPS_CMD_STR} ${RUN_KMD_WITH_UNSAFE_SCRYPT} "$SRCROOT"/test/scripts/e2e_subs/*.{sh,py}
+
+ # If the temporary artifact directory exists, then the test artifact needs to be created
+ if [ -d "${TEMPDIR}/net" ]; then
+ pushd "${TEMPDIR}" || exit 1
+ tar -j -c -f net_done.tar.bz2 --exclude node.log --exclude agreement.cdv net
+ rm -rf "${TEMPDIR}/net"
+ RSTAMP=$(TZ=UTC python -c 'import time; print("{:08x}".format(0xffffffff - int(time.time() - time.mktime((2020,1,1,0,0,0,-1,-1,-1)))))')
+ echo aws s3 cp --acl public-read "${TEMPDIR}/net_done.tar.bz2" s3://algorand-testdata/indexer/e2e4/"${RSTAMP}"/net_done.tar.bz2
+ aws s3 cp --acl public-read "${TEMPDIR}/net_done.tar.bz2" s3://algorand-testdata/indexer/e2e4/"${RSTAMP}"/net_done.tar.bz2
+ popd
+ fi
+
duration "parallel client runner"
for vdir in "$SRCROOT"/test/scripts/e2e_subs/v??; do
diff --git a/test/scripts/e2e_go_tests.sh b/test/scripts/e2e_go_tests.sh
index 560808a7c..b1ce3a355 100755
--- a/test/scripts/e2e_go_tests.sh
+++ b/test/scripts/e2e_go_tests.sh
@@ -6,7 +6,6 @@ set -e
set -o pipefail
export GOPATH=$(go env GOPATH)
-export GO111MODULE=on
# Needed for now because circleci doesn't use makefile yet
if [ -z "$(which gotestsum)" ]; then
diff --git a/test/scripts/e2e_subs/app-assets.sh b/test/scripts/e2e_subs/app-assets.sh
index 8b1fa6f5d..b582733d3 100755
--- a/test/scripts/e2e_subs/app-assets.sh
+++ b/test/scripts/e2e_subs/app-assets.sh
@@ -94,7 +94,7 @@ function asset-deposit {
}
function asset-optin {
- ${gcmd} asset send -a 0 "$@"
+ ${gcmd} asset optin "$@"
}
function clawback_addr {
@@ -175,7 +175,7 @@ appl "withdraw(uint64):void" --app-arg="int:800" --foreign-asset="$ASSETID" --fr
USER=$(${gcmd} account new | awk '{ print $6 }') #new account
${gcmd} clerk send -a 999000 -f "$ACCOUNT" -t "$USER" #fund account
-asset-optin -f "$USER" -t "$USER" --assetid "$ASSETID" #opt in to asset
+asset-optin --assetid "$ASSETID" -a $USER #opt in to asset
# SET $USER as clawback address
${gcmd} asset config --manager $SMALL --assetid $ASSETID --new-clawback $USER
cb_addr=$(${gcmd} asset info --assetid $ASSETID | clawback_addr)
@@ -190,7 +190,7 @@ ${gcmd} asset send -f "$SMALL" -t "$USER" -a "1000" --assetid "$ASSETID" --clawb
USER2=$(${gcmd} account new | awk '{ print $6 }') #new account
${gcmd} clerk send -a 999000 -f "$ACCOUNT" -t "$USER2" #fund account
-asset-optin -f "$USER2" -t "$USER2" --assetid "$ASSETID" #opt in to asset
+asset-optin --assetid "$ASSETID" -a $USER2 #opt in to asset
# set $APPACCT as clawback address on asset
${gcmd} asset config --manager $SMALL --assetid $ASSETID --new-clawback $APPACCT
cb_addr=$(${gcmd} asset info --assetid $ASSETID | clawback_addr)
@@ -231,7 +231,7 @@ appl "create(uint64):void" --app-arg="int:1000000" --from="$SMALL"
# mint asset
APPASSETID=$(asset_ids "$APPACCT")
-asset-optin -f "$SMALL" -t "$SMALL" --assetid "$APPASSETID" #opt in to asset
+asset-optin --assetid "$APPASSETID" -a $SMALL #opt in to asset
appl "mint():void" --from="$SMALL" --foreign-asset="$APPASSETID" -o "$T/mint.tx"
payin 1000 -o "$T/pay1.tx"
cat "$T/mint.tx" "$T/pay1.tx" | ${gcmd} clerk group -i - -o "$T/group.tx"
diff --git a/test/scripts/e2e_subs/asset-misc.sh b/test/scripts/e2e_subs/asset-misc.sh
index 73c018eb8..866a214dc 100755
--- a/test/scripts/e2e_subs/asset-misc.sh
+++ b/test/scripts/e2e_subs/asset-misc.sh
@@ -31,9 +31,9 @@ ${gcmd} clerk send --from ${ACCOUNT} --to ${ACCOUNTC} --amount 1000000
${gcmd} clerk send --from ${ACCOUNT} --to ${ACCOUNTD} --amount 1000000
# opt in to asset
-${gcmd} asset send --assetid ${ASSET_ID} -f ${ACCOUNTB} -t ${ACCOUNTB} -a 0
-${gcmd} asset send --assetid ${ASSET_ID} -f ${ACCOUNTC} -t ${ACCOUNTC} -a 0
-${gcmd} asset send --assetid ${ASSET_ID} -f ${ACCOUNTD} -t ${ACCOUNTD} -a 0
+${gcmd} asset optin --assetid ${ASSET_ID} -a ${ACCOUNTB}
+${gcmd} asset optin --assetid ${ASSET_ID} -a ${ACCOUNTC}
+${gcmd} asset optin --assetid ${ASSET_ID} -a ${ACCOUNTD}
# fund asset
${gcmd} asset send --assetid ${ASSET_ID} -f ${ACCOUNT} -t ${ACCOUNTB} -a 1000
diff --git a/test/scripts/e2e_subs/assets-app-b.sh b/test/scripts/e2e_subs/assets-app-b.sh
index 63c1431a8..7aac4615d 100755
--- a/test/scripts/e2e_subs/assets-app-b.sh
+++ b/test/scripts/e2e_subs/assets-app-b.sh
@@ -3,7 +3,9 @@
#
# assets-app.sh and assets-app-b.sh both test the same TEAL app script, but in two separate parallelizeable chunks
-date '+assets-app-b start %Y%m%d_%H%M%S'
+filename=$(basename "$0")
+scriptname="${filename%.*}"
+date "+${scriptname} start %Y%m%d_%H%M%S"
set -ex
set -o pipefail
@@ -30,7 +32,6 @@ wait $WA
wait $WB
wait $WC
-ZERO='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ'
SUPPLY=10000000
XFER1=1000
XFER2=42
@@ -44,12 +45,12 @@ ERR_APP_OI_STR1='has not opted in to application'
ERR_APP_OI_STR2='not opted in to app'
ERR_APP_OI_STR3='is not currently opted in'
ERR_APP_REJ_STR1='transaction rejected by ApprovalProgram'
-ERR_APP_REJ_STR2='TEAL runtime encountered err opcode'
+ERR_APP_REJ_STR2='err opcode executed'
ERR_APP_REJ_STR3='- would result negative'
### Reconfiguration, default-frozen, and clawback
-date '+assets-app wat4 %Y%m%d_%H%M%S'
+date "+$scriptname wat4 %Y%m%d_%H%M%S"
# create frozen
APP_ID=$(${gcmd} app interact execute --header ${DIR}/asa.json --from $CREATOR --approval-prog ${DIR}/asa_approve.teal --clear-prog ${DIR}/asa_clear.teal create --manager $MANAGER --reserve $CREATOR --freezer $MANAGER --clawback $MANAGER --supply $SUPPLY --default-frozen 1 | grep "$APP_CREATED_STR" | cut -d ' ' -f 6)
@@ -57,29 +58,28 @@ APP_ID=$(${gcmd} app interact execute --header ${DIR}/asa.json --from $CREATOR -
qcmd="${gcmd} app interact query --header ${DIR}/asa.json --app-id $APP_ID"
xcmd="${gcmd} app interact execute --header ${DIR}/asa.json --app-id $APP_ID"
+function assertContains {
+ if [[ $1 != *"$2"* ]]; then
+ echo "$1" does not contain "$2"
+ date "+$scriptname FAIL $3 %Y%m%d_%H%M%S"
+ false
+ fi
+}
+
# destroy bad manager F
RES=$(${xcmd} --from $CREATOR destroy 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date "+assets-app FAIL non-manager should not be able to delete asset %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "non-manager should not be able to delete asset"
# optin alice
${xcmd} --from $ALICE opt-in
# xfer1 F
RES=$(${xcmd} --from $CREATOR transfer --receiver $ALICE --amount $XFER1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date "+assets-app FAIL frozen account should not be able to receive %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "frozen account should not be able to receive"
# bad unfreeze F
RES=$(${xcmd} --from $ALICE freeze --frozen 0 --target $ALICE 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date "+assets-app FAIL non-freezer should not be able to unfreeze account %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "non-freezer should not be able to unfreeze account"
# set freezer alice
${xcmd} --from $MANAGER reconfigure --manager $MANAGER --reserve $CREATOR --freezer $ALICE --clawback $MANAGER
@@ -95,19 +95,13 @@ ${xcmd} --from $ALICE freeze --frozen 1 --target $ALICE
# xfer1 F
RES=$(${xcmd} --from $CREATOR transfer --receiver $ALICE --amount $XFER1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date "+assets-app FAIL re-frozen account should not be able to receive %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "re-frozen account should not be able to receive"
-date '+assets-app wat6 %Y%m%d_%H%M%S'
+date "+$scriptname wat6 %Y%m%d_%H%M%S"
# closeout F
RES=$(${xcmd} --from $ALICE close-out --close-to $CREATOR 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date "+assets-app FAIL frozen account should not be able to closeout w/o clear %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "frozen account should not be able to closeout w/o clear"
# clear alice
${xcmd} --from $ALICE clear
@@ -120,10 +114,7 @@ ${xcmd} --from $MANAGER clawback --sender $CREATOR --receiver $BOB --amount $XFE
# destroy F
RES=$(${xcmd} --from $MANAGER destroy 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date "+assets-app FAIL should not be able to delete asset while outstanding holdings exist %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "should not be able to delete asset while outstanding holdings exist"
# clawback
${xcmd} --from $MANAGER clawback --sender $BOB --receiver $CREATOR --amount $XFER1
@@ -134,4 +125,4 @@ ${xcmd} --from $MANAGER destroy
# clear bob
${xcmd} --from $BOB clear
-date '+assets-app-b done %Y%m%d_%H%M%S'
+date "+$scriptname done %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/assets-app.sh b/test/scripts/e2e_subs/assets-app.sh
index a295de681..1fa2d8f5c 100755
--- a/test/scripts/e2e_subs/assets-app.sh
+++ b/test/scripts/e2e_subs/assets-app.sh
@@ -3,7 +3,9 @@
#
# assets-app.sh and assets-app-b.sh both test the same TEAL app script, but in two separate parallelizeable chunks
-date '+assets-app start %Y%m%d_%H%M%S'
+filename=$(basename "$0")
+scriptname="${filename%.*}"
+date "+${scriptname} start %Y%m%d_%H%M%S"
set -ex
set -o pipefail
@@ -30,7 +32,6 @@ wait $WA
wait $WB
wait $WC
-ZERO='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ'
SUPPLY=10000000
XFER1=1000
XFER2=42
@@ -44,7 +45,7 @@ ERR_APP_OI_STR1='has not opted in to application'
ERR_APP_OI_STR2='not opted in to app'
ERR_APP_OI_STR3='is not currently opted in'
ERR_APP_REJ_STR1='transaction rejected by ApprovalProgram'
-ERR_APP_REJ_STR2='TEAL runtime encountered err opcode'
+ERR_APP_REJ_STR2='err opcode executed'
ERR_APP_REJ_STR3='- would result negative'
### Basic reading, creation, deletion, transfers, and freezing
@@ -55,27 +56,32 @@ APP_ID=$(${gcmd} app interact execute --header ${DIR}/asa.json --from $CREATOR -
qcmd="${gcmd} app interact query --header ${DIR}/asa.json --app-id $APP_ID"
xcmd="${gcmd} app interact execute --header ${DIR}/asa.json --app-id $APP_ID"
-date '+assets-app created %Y%m%d_%H%M%S'
+date "+$scriptname created %Y%m%d_%H%M%S"
+
+function assertContains {
+ if [[ $1 != *"$2"* ]]; then
+ echo "$1" does not contain "$2"
+ date "+$scriptname FAIL $3 %Y%m%d_%H%M%S"
+ false
+ fi
+}
# read global
RES=$(${qcmd} total-supply)
if [[ $RES != $SUPPLY ]]; then
- date "+assets-app FAIL expected supply to be set to $SUPPLY %Y%m%d_%H%M%S"
+ date "+$scriptname FAIL expected supply to be set to $SUPPLY %Y%m%d_%H%M%S"
false
fi
RES=$(${qcmd} creator-balance)
if [[ $RES != $SUPPLY ]]; then
- date "+assets-app FAIL expected creator to begin with $SUPPLY %Y%m%d_%H%M%S"
+ date "+$scriptname FAIL expected creator to begin with $SUPPLY %Y%m%d_%H%M%S"
false
fi
# read alice F
RES=$(${qcmd} --from $ALICE balance 2>&1 || true)
-if [[ $RES != *"$ERR_APP_OI_STR1"* ]]; then
- date '+assets-app FAIL expected read of non-opted in account to fail %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_OI_STR1" "expected read of non-opted in account to fail"
# optin alice
${xcmd} --from $ALICE opt-in
@@ -83,24 +89,21 @@ ${xcmd} --from $ALICE opt-in
# read alice
RES=$(${qcmd} --from $ALICE balance)
if [[ $RES != '0' ]]; then
- date '+assets-app FAIL expected opted-in account to start with no balance %Y%m%d_%H%M%S'
+ date "+$scriptname FAIL expected opted-in account to start with no balance %Y%m%d_%H%M%S"
false
fi
RES=$(${qcmd} --from $ALICE frozen)
if [[ $RES != '0' ]]; then
- date '+assets-app FAIL expected opted-in account to be non-frozen %Y%m%d_%H%M%S'
+ date "+$scriptname FAIL expected opted-in account to be non-frozen %Y%m%d_%H%M%S"
false
fi
-date '+assets-app wat1 %Y%m%d_%H%M%S'
+date "+$scriptname wat1 %Y%m%d_%H%M%S"
# xfer0 creator -> bob F
RES=$(${xcmd} --from $CREATOR transfer --receiver $BOB --amount $XFER1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_OI_STR2"* ]]; then
- date '+assets-app FAIL transfer succeeded on account which has not opted in %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_OI_STR2" "transfer succeeded on account which has not opted in"
# xfer1 (2) creator -> alice
${xcmd} --from $CREATOR transfer --receiver $ALICE --amount $XFER1 &
@@ -113,35 +116,26 @@ wait $WB
# read alice
RES=$(${qcmd} --from $ALICE balance)
if [[ $RES != $(( $XFER1 + $XFER1 )) ]]; then
- date "+assets-app FAIL transfer recipient does not have $XFER1 %Y%m%d_%H%M%S"
+ date "+$scriptname FAIL transfer recipient does not have $XFER1 %Y%m%d_%H%M%S"
false
fi
# destroy F
RES=$(${xcmd} --from $CREATOR destroy 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date "+assets-app FAIL should not be able to destroy asset while outstanding holdings exist %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "should not be able to destroy asset while outstanding holdings exist"
# freeze
${xcmd} --from $CREATOR freeze --frozen 1 --target $ALICE
# xfer2 alice -> creator F
RES=$(${xcmd} --from $ALICE transfer --receiver $CREATOR --amount $XFER2 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date "+assets-app FAIL frozen account should not be able to send %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "frozen account should not be able to send"
-date '+assets-app wat2 %Y%m%d_%H%M%S'
+date "+$scriptname wat2 %Y%m%d_%H%M%S"
# xfer1 creator -> alice F
RES=$(${xcmd} --from $CREATOR transfer --receiver $ALICE --amount $XFER1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date "+assets-app FAIL frozen account should not be able to receive %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "frozen account should not be able to receive"
# unfreeze
${xcmd} --from $CREATOR freeze --frozen 0 --target $ALICE
@@ -151,30 +145,21 @@ ${xcmd} --from $CREATOR transfer --receiver $ALICE --amount $XFER1
# xfer5 alice |-> alice F
RES=$(${xcmd} --from $ALICE close-out --close-to $ALICE 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date "+assets-app FAIL closing to self not permitted %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "closing to self not permitted"
# optin bob
${xcmd} --from $BOB opt-in
# xfer3 alice -> bob overdraw F
RES=$(${xcmd} --from $ALICE transfer --receiver $BOB --amount $XFER3 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR3"* ]]; then
- date "+assets-app FAIL overdraws are not permitted %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR3" "overdraws are not permitted"
# xfer4 alice -> creator |-> bob
${xcmd} --from $ALICE close-out --receiver $CREATOR --amount $XFER4 --close-to $BOB
# xfer5 bob |-> alice F
RES=$(${xcmd} --from $BOB close-out --close-to $ALICE 2>&1 || true)
-if [[ $RES != *"$ERR_APP_OI_STR2"* ]]; then
- date "+assets-app FAIL transfer succeeded on account which has closed out %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_OI_STR2" "transfer succeeded on account which has closed out"
# optin alice
${xcmd} --from $ALICE opt-in
@@ -187,35 +172,23 @@ ${xcmd} --from $ALICE clear
# clear alice F
RES=$(${xcmd} --from $ALICE clear 2>&1 || true)
-if [[ $RES != *"$ERR_APP_OI_STR3"* ]]; then
- date "+assets-app FAIL should not be able to clear asset holding twice %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_OI_STR3" "should not be able to clear asset holding twice"
# destroy
${xcmd} --from $CREATOR destroy
-date '+assets-app wat3 %Y%m%d_%H%M%S'
+date "+$scriptname wat3 %Y%m%d_%H%M%S"
# destroy F
RES=$(${xcmd} --from $CREATOR destroy 2>&1 || true)
-if [[ $RES != *"$ERR_APP_CL_STR"* ]]; then
- date '+assets-app FAIL second deletion of application should fail %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_CL_STR" "second deletion of application should fail"
# optin alice F
RES=$(${xcmd} --from $ALICE opt-in 2>&1 || true)
-if [[ $RES != *"$ERR_APP_CL_STR"* ]]; then
- date '+assets-app FAIL optin of deleted application should fail %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_CL_STR" "optin of deleted application should fail"
# read global F
RES=$(${qcmd} total-supply 2>&1 || true)
-if [[ $RES != *"$ERR_APP_NE_STR"* ]]; then
- date '+assets-app FAIL read global of deleted application should fail %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_NE_STR" "read global of deleted application should fail"
-date '+assets-app done %Y%m%d_%H%M%S'
+date "+$scriptname done %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/create_destroy_optin_optout.sh b/test/scripts/e2e_subs/create_destroy_optin_optout.sh
index 281e78c22..79f683ee4 100755
--- a/test/scripts/e2e_subs/create_destroy_optin_optout.sh
+++ b/test/scripts/e2e_subs/create_destroy_optin_optout.sh
@@ -43,9 +43,9 @@ ${gcmd} clerk send -a 100000 -f $ACCOUNT -t $ACCOUNTD
ASSET_ID=$(${gcmd} asset create --creator ${ACCOUNT} --name cdcoin --unitname cdc --total 1337 | grep "Created" | awk '{ sub("\r", "", $NF); print $NF }')
# Asset - optin / optout / optin / optout
-${gcmd} asset send --assetid $ASSET_ID -a 0 -f $ACCOUNTB -t $ACCOUNTB
+${gcmd} asset optin --assetid $ASSET_ID -a $ACCOUNTB
${gcmd} asset send --assetid $ASSET_ID -a 0 -f $ACCOUNTB -t $ACCOUNTB -c $ACCOUNT
-${gcmd} asset send --assetid $ASSET_ID -a 0 -f $ACCOUNTB -t $ACCOUNTB
+${gcmd} asset optin --assetid $ASSET_ID -a $ACCOUNTB
${gcmd} asset send --assetid $ASSET_ID -a 0 -f $ACCOUNTB -t $ACCOUNTB -c $ACCOUNT
# Destroy the ASA
@@ -57,9 +57,9 @@ ${gcmd} asset destroy --manager ${ACCOUNT} --assetid ${ASSET_ID}
ASSET_ID=$(${gcmd} asset create --creator ${ACCOUNT} --name cdcoin --unitname cdc --total 1337 | grep "Created" | awk '{ sub("\r", "", $NF); print $NF }')
# Asset - optin / optout / optin
-${gcmd} asset send --assetid $ASSET_ID -a 0 -f $ACCOUNTB -t $ACCOUNTB
+${gcmd} asset optin --assetid $ASSET_ID -a $ACCOUNTB
${gcmd} asset send --assetid $ASSET_ID -a 0 -f $ACCOUNTB -t $ACCOUNTB -c $ACCOUNT
-${gcmd} asset send --assetid $ASSET_ID -a 0 -f $ACCOUNTB -t $ACCOUNTB
+${gcmd} asset optin --assetid $ASSET_ID -a $ACCOUNTB
######################################
# Create an application, then delete #
diff --git a/test/scripts/e2e_subs/dex.sh b/test/scripts/e2e_subs/dex.sh
index 4881d073f..8ada24ace 100755
--- a/test/scripts/e2e_subs/dex.sh
+++ b/test/scripts/e2e_subs/dex.sh
@@ -26,7 +26,7 @@ $gcmd clerk send -a 10000000 -t "${ACCT_ACTOR}" -f "${ACCOUNT}"
echo "Created and funded accounts: creator ${ACCT_CREATOR}, actor ${ACCT_ACTOR}"
ASSETID=$(${gcmd} asset create --creator "${ACCT_CREATOR}" --total 100000 --unitname STOK --decimals 0 | grep "Created asset with asset index" | rev | cut -d ' ' -f 1 | rev)
-${gcmd} asset send -a 0 -f "${ACCT_ACTOR}" -t "${ACCT_ACTOR}" --creator "${ACCT_CREATOR}" --assetid "${ASSETID}"
+${gcmd} asset optin -a "${ACCT_ACTOR}" --creator "${ACCT_CREATOR}" --assetid "${ASSETID}"
echo "Created asset ${ASSETID}"
APPID=$(${gcmd} app create --creator "${ACCT_CREATOR}" --approval-prog "${DIR}/tealprogs/dex.teal" --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 16 --clear-prog <(printf '#pragma version 2\nint 1') | grep Created | awk '{ print $6 }')
diff --git a/test/scripts/e2e_subs/dynamic-fee-teal-test.sh b/test/scripts/e2e_subs/dynamic-fee-teal-test.sh
index 7978de676..b87b8ae1c 100755
--- a/test/scripts/e2e_subs/dynamic-fee-teal-test.sh
+++ b/test/scripts/e2e_subs/dynamic-fee-teal-test.sh
@@ -15,7 +15,6 @@ ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }')
ACCOUNTC=$(${gcmd} account new|awk '{ print $6 }')
ACCOUNTD=$(${gcmd} account new|awk '{ print $6 }')
-ZERO_ADDRESS=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ
LEASE=uImiLf+mqOqs0BFsqIUHBh436N/z964X50e3P9Ii4ac=
# Fund ACCOUNTB
diff --git a/test/scripts/e2e_subs/e2e-app-real-assets-round.sh b/test/scripts/e2e_subs/e2e-app-real-assets-round.sh
index 19314fa8d..25a543300 100755
--- a/test/scripts/e2e_subs/e2e-app-real-assets-round.sh
+++ b/test/scripts/e2e_subs/e2e-app-real-assets-round.sh
@@ -28,7 +28,7 @@ APP_ID=$(${gcmd} app create --creator ${ACCOUNT} --foreign-asset $ASSET_ID --app
# Create another account, fund it, send it some asset
ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }')
${gcmd} clerk send -a 1000000 -f $ACCOUNT -t $ACCOUNTB
-${gcmd} asset send --assetid $ASSET_ID -a 0 -f $ACCOUNTB -t $ACCOUNTB
+${gcmd} asset optin --assetid $ASSET_ID -a $ACCOUNTB
${gcmd} asset send --assetid $ASSET_ID -a 17 -f $ACCOUNT -t $ACCOUNTB
# Call app from account B, do some checks on asset balance
diff --git a/test/scripts/e2e_subs/e2e-teal.sh b/test/scripts/e2e_subs/e2e-teal.sh
index a71c78a10..06caa5efb 100755
--- a/test/scripts/e2e_subs/e2e-teal.sh
+++ b/test/scripts/e2e_subs/e2e-teal.sh
@@ -11,6 +11,8 @@ WALLET=$1
gcmd="goal -w ${WALLET}"
+TEAL=test/scripts/e2e_subs/tealprogs
+
ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
# prints:
@@ -148,6 +150,16 @@ ${gcmd} clerk compile ${TEMPDIR}/true3.teal -o ${TEMPDIR}/true3.lsig
cp ${TEMPDIR}/true3.lsig ${TEMPDIR}/true2.lsig
printf '\x02' | dd of=${TEMPDIR}/true2.lsig bs=1 seek=0 count=1 conv=notrunc
+# Try to compile with source map, and check that map is correct.
+# Since the source map contains info about the file path,
+# we do this in place and clean up the file later.
+${gcmd} clerk compile ${TEAL}/quine.teal -m
+trap 'rm ${TEAL}/quine.teal.*' EXIT
+if ! diff ${TEAL}/quine.map ${TEAL}/quine.teal.tok.map; then
+ echo "produced source maps do not match"
+ exit 1
+fi
+
# compute the escrow account for the frankenstein program
ACCOUNT_TRUE=$(python -c 'import algosdk, sys; print(algosdk.logic.address(sys.stdin.buffer.read()))' < ${TEMPDIR}/true2.lsig)
# fund that escrow account
diff --git a/test/scripts/e2e_subs/limit-swap-test.sh b/test/scripts/e2e_subs/limit-swap-test.sh
index 17fa337d5..937fb013a 100755
--- a/test/scripts/e2e_subs/limit-swap-test.sh
+++ b/test/scripts/e2e_subs/limit-swap-test.sh
@@ -57,7 +57,7 @@ ${gcmd} clerk send --amount 1000000 --from ${ACCOUNT} --to ${ACCOUNT_ASSET_TRADE
echo "make asset trader able to accept asset"
ROUND=$(goal node status | grep 'Last committed block:'|awk '{ print $4 }')
-${gcmd} asset send -o ${TEMPDIR}/b-asset-init.tx -a 0 --assetid ${ASSET_ID} -t $ACCOUNT_ASSET_TRADER -f $ACCOUNT_ASSET_TRADER --validrounds $((${SETUP_ROUND} - ${ROUND} - 1))
+${gcmd} asset optin -o ${TEMPDIR}/b-asset-init.tx --assetid ${ASSET_ID} -a $ACCOUNT_ASSET_TRADER --validrounds $((${SETUP_ROUND} - ${ROUND} - 1))
${gcmd} clerk sign -i ${TEMPDIR}/b-asset-init.tx -p ${TEMPDIR}/limit-order-b.teal -o ${TEMPDIR}/b-asset-init.stx
@@ -103,7 +103,7 @@ ${gcmd} clerk send --amount 1000000 --from ${ACCOUNT} --to ${ACCOUNT_ASSET_TRADE
echo "make asset trader able to accept asset"
ROUND=$(goal node status | grep 'Last committed block:'|awk '{ print $4 }')
-${gcmd} asset send -o ${TEMPDIR}/b-asset-init.tx -a 0 --assetid ${ASSET_ID} -t $ACCOUNT_ASSET_TRADER -f $ACCOUNT_ASSET_TRADER --validrounds $((${SETUP_ROUND} - ${ROUND} - 1))
+${gcmd} asset optin -o ${TEMPDIR}/b-asset-init.tx --assetid ${ASSET_ID} -a $ACCOUNT_ASSET_TRADER --validrounds $((${SETUP_ROUND} - ${ROUND} - 1))
${gcmd} clerk sign -i ${TEMPDIR}/b-asset-init.tx -p ${TEMPDIR}/limit-order-b.teal -o ${TEMPDIR}/b-asset-init.stx
diff --git a/test/scripts/e2e_subs/sectok-app.sh b/test/scripts/e2e_subs/sectok-app.sh
index 2216266ce..46154723f 100755
--- a/test/scripts/e2e_subs/sectok-app.sh
+++ b/test/scripts/e2e_subs/sectok-app.sh
@@ -1,7 +1,9 @@
#!/usr/bin/env bash
# TIMEOUT=380
-date '+sectok-app start %Y%m%d_%H%M%S'
+filename=$(basename "$0")
+scriptname="${filename%.*}"
+date "+${scriptname} start %Y%m%d_%H%M%S"
set -ex
set -o pipefail
@@ -30,7 +32,6 @@ wait $WB
wait $WC
# ${gcmd} clerk send -a 100000000 -f ${CREATOR} -t ${MANAGER}
-ZERO='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ'
SUPPLY=10000000
XFER1=1000
XFER2=42
@@ -45,7 +46,7 @@ ERR_APP_OI_STR1='has not opted in to application'
ERR_APP_OI_STR2='not opted in to app'
ERR_APP_OI_STR3='is not currently opted in'
ERR_APP_REJ_STR1='transaction rejected by ApprovalProgram'
-ERR_APP_REJ_STR2='TEAL runtime encountered err opcode'
+ERR_APP_REJ_STR2='err opcode executed'
ERR_APP_REJ_STR3='- would result negative'
# create
@@ -57,22 +58,27 @@ qcmd="${gcmd} app interact query --header ${DIR}/sectok.json --app-id ${APP_ID}"
# read global
RES=$(${qcmd} total-supply)
if [[ $RES != $SUPPLY ]]; then
- date "+sectok-app FAIL expected supply to be set to $SUPPLY %Y%m%d_%H%M%S"
+ date "+$scriptname FAIL expected supply to be set to $SUPPLY %Y%m%d_%H%M%S"
false
fi
RES=$(${qcmd} reserve-supply)
if [[ $RES != $SUPPLY ]]; then
- date "+sectok-app FAIL expected reserve to begin with $SUPPLY %Y%m%d_%H%M%S"
+ date "+$scriptname FAIL expected reserve to begin with $SUPPLY %Y%m%d_%H%M%S"
false
fi
+function assertContains {
+ if [[ $1 != *"$2"* ]]; then
+ echo "$1" does not contain "'$2'"
+ date "+$scriptname FAIL $3 %Y%m%d_%H%M%S"
+ false
+ fi
+}
+
# read alice F
RES=$(${qcmd} --from $ALICE balance 2>&1 || true)
-if [[ $RES != *"$ERR_APP_OI_STR1"* ]]; then
- date '+sectok-app FAIL expected read of non-opted in account to fail %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_OI_STR1" "expected read of non-opted in account to fail"
# optin alice, bob, carol
${xcmd} --from $ALICE opt-in &
@@ -87,65 +93,41 @@ wait $WC
RES=$(${qcmd} --from $ALICE transfer-group)
if [[ $RES != '0' ]]; then
- date '+sectok-app FAIL expected opt-in account to start with transfer group 0 %Y%m%d_%H%M%S'
+ date "+$scriptname FAIL expected opt-in account to start with transfer group 0 %Y%m%d_%H%M%S"
false
fi
RES=$(${qcmd} --from $ALICE balance)
if [[ $RES != '0' ]]; then
- date '+sectok-app FAIL expected opt-in account to start with 0 balance %Y%m%d_%H%M%S'
+ date "+$scriptname FAIL expected opt-in account to start with 0 balance %Y%m%d_%H%M%S"
false
fi
# assorted transfer-admin restrictions
RES=$(${xcmd} --from $CREATOR set-transfer-group --target $ALICE --transfer-group 1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL contract-admins cannot set transfer groups %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "contract-admins cannot set transfer groups"
RES=$(${xcmd} --from $CREATOR set-lock-until --target $ALICE --lock-until 1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL contract-admins cannot set lock-until %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "contract-admins cannot set lock-until"
RES=$(${xcmd} --from $CREATOR set-max-balance --target $ALICE --max-balance $SUPPLY 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL contract-admins cannot set max balance %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "contract-admins cannot set max balance"
RES=$(${xcmd} --from $ALICE set-transfer-group --target $ALICE --transfer-group 1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL non-admins cannot set transfer groups %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "non-admins cannot set transfer groups"
RES=$(${xcmd} --from $ALICE set-lock-until --target $ALICE --lock-until 1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL non-admins cannot set lock-until %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "non-admins cannot set lock-until"
RES=$(${xcmd} --from $ALICE set-max-balance --target $ALICE --max-balance $SUPPLY 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL non-admins cannot set max balance %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "non-admins cannot set max balance"
# setting transfer-admin
RES=$(${xcmd} --from $ALICE freeze --target $ALICE --frozen 1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL non-admins cannot freeze accounts %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "non-admins cannot freeze accounts"
RES=$(${xcmd} --from $ALICE set-transfer-admin --target $ALICE --status 1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date '+sectok-app FAIL non-admins cannot set transfer admin status %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "non-admins cannot set transfer admin status"
${xcmd} --from $CREATOR set-transfer-admin --target $ALICE --status 1
${xcmd} --from $ALICE freeze --target $ALICE --frozen 1
@@ -153,10 +135,7 @@ ${xcmd} --from $ALICE set-max-balance --target $ALICE --max-balance $SUPPLY
${xcmd} --from $CREATOR set-transfer-admin --target $ALICE --status 0
RES=$(${xcmd} --from $ALICE freeze --target $ALICE --frozen 0 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL non-admins (revoked) cannot freeze accounts %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "non-admins (revoked) cannot freeze accounts"
# setting contract-admin
${xcmd} --from $CREATOR set-contract-admin --target $BOB --status 1
@@ -164,16 +143,10 @@ ${xcmd} --from $BOB set-transfer-admin --target $ALICE --status 1
${xcmd} --from $CREATOR set-contract-admin --target $BOB --status 0
RES=$(${xcmd} --from $BOB set-transfer-admin --target $ALICE --status 0 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date '+sectok-app FAIL non-admins cannot set transfer admin status %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "non-admins cannot set transfer admin status"
RES=$(${xcmd} --from $BOB set-contract-admin --target $BOB --status 1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date '+sectok-app FAIL non-admins cannot set own contract admin status %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "non-admins cannot set own contract admin status"
# minting/burning
${xcmd} --from $CREATOR mint --target $ALICE --amount $XFER1 &
@@ -185,13 +158,13 @@ wait $WB
RES=$(${qcmd} --from $ALICE balance)
if [[ $RES != $(( $XFER1 + $XFER1 )) ]]; then
- date '+sectok-app FAIL minting twice did not produce the correct balance %Y%m%d_%H%M% S'
+ date "+$scriptname FAIL minting twice did not produce the correct balance %Y%m%d_%H%M%S"
false
fi
RES=$(${qcmd} reserve-supply)
if [[ $RES != $(( $SUPPLY - $XFER1 - $XFER1 )) ]]; then
- date '+sectok-app FAIL minting twice did not produce the correct reserve balance %Y%m%d_%H%M% S'
+ date "+$scriptname FAIL minting twice did not produce the correct reserve balance %Y%m%d_%H%M%S"
false
fi
@@ -199,7 +172,7 @@ ${xcmd} --from $CREATOR burn --target $ALICE --amount $XFER1
RES=$(${qcmd} --from $ALICE balance)
if [[ $RES != $XFER1 ]]; then
- date '+sectok-app FAIL minting and then burning did not produce the correct balance %Y%m%d_%H%M% S'
+ date "+$scriptname FAIL minting and then burning did not produce the correct balance %Y%m%d_%H%M%S"
false
fi
@@ -209,10 +182,7 @@ ${xcmd} --from $CREATOR burn --target $ALICE --amount $XFER1
${xcmd} --from $CREATOR mint --target $CAROL --amount $XFER1
RES=$(${xcmd} --from $CAROL transfer --receiver $BOB --amount $XFER2 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR3"* ]]; then
- date '+sectok-app FAIL new account should not be able to spend %Y%m%d_%H%M% S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR3" "new account should not be able to spend"
${xcmd} --from $ALICE set-max-balance --target $CAROL --max-balance $SUPPLY &
WA=$!
@@ -236,18 +206,12 @@ wait $WA
wait $WB
RES=$(${xcmd} --from $CAROL transfer --receiver $BOB --amount $XFER2 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR3"* ]]; then
- date '+sectok-app FAIL no transfers allowed without transfer rules %Y%m%d_%H%M% S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR3" "no transfers allowed without transfer rules"
${xcmd} --from $ALICE set-transfer-rule --send-group 1 --receive-group 2 --lock-until 1
${xcmd} --from $CAROL transfer --receiver $BOB --amount $XFER2
RES=$(${xcmd} --from $BOB transfer --receiver $CAROL --amount $XFER2 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR3"* ]]; then
- date '+sectok-app FAIL reverse transfer (by group) should fail %Y%m%d_%H%M% S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR3" "reverse transfer (by group) should fail"
-date '+sectok-app done %Y%m%d_%H%M%S'
+date "+$scriptname done %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/teal-app-params.sh b/test/scripts/e2e_subs/teal-app-params.sh
index 6fc83e89e..9149cc6b3 100755
--- a/test/scripts/e2e_subs/teal-app-params.sh
+++ b/test/scripts/e2e_subs/teal-app-params.sh
@@ -34,6 +34,6 @@ APPID_2=$(${gcmd} app create --creator "$ACCOUNTB" --approval-prog=${TEAL}/quine
${gcmd} app call --app-id="$APPID_2" --from="$ACCOUNTB"
# Verify "app_params_get AppApprovalProgram" works on update
-${gcmd} app update --app-id="$APPID_2" --from="$ACCOUNTB" --approval-prog=${TEAL}/approve-all.teal --clear-prog=${TEAL}/approve-all.teal
+${gcmd} app update --app-id="$APPID_2" --from="$ACCOUNTB" --approval-prog=${TEAL}/approve-all5.teal --clear-prog=${TEAL}/approve-all5.teal
date "+${scriptname} OK %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/tealprogs/approve-all5.teal b/test/scripts/e2e_subs/tealprogs/approve-all5.teal
new file mode 100644
index 000000000..c4ee9e330
--- /dev/null
+++ b/test/scripts/e2e_subs/tealprogs/approve-all5.teal
@@ -0,0 +1,2 @@
+#pragma version 5
+ int 1
diff --git a/test/scripts/e2e_subs/tealprogs/quine.map b/test/scripts/e2e_subs/tealprogs/quine.map
new file mode 100644
index 000000000..02e426d14
--- /dev/null
+++ b/test/scripts/e2e_subs/tealprogs/quine.map
@@ -0,0 +1 @@
+{"version":3,"sources":["test/scripts/e2e_subs/tealprogs/quine.teal"],"names":[],"mapping":";AAOA;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAQA;AASA;;;AAUA;;;AAWA;AAYA;;AAaA;AAcA;;;AAeA;AAgBA;AAiBA;;AAkBA;;AAmBA;AAoBA;AAqBA"} \ No newline at end of file
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json
index 9e37558fa..8200ee38c 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json
@@ -3,7 +3,7 @@
"VersionModifier": "",
"ConsensusProtocol": "",
"FirstPartKeyRound": 0,
- "LastPartKeyRound": 3000000,
+ "LastPartKeyRound": 50000,
"PartKeyDilution": 0,
"Wallets": [
{
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/Makefile b/test/testdata/deployednettemplates/recipes/txnsync/Makefile
new file mode 100644
index 000000000..bd62e7b67
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/Makefile
@@ -0,0 +1,12 @@
+PARAMS=-w 10 -R 4 -N 10 -n 10 -H 1 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+
+all: net.json genesis.json
+
+net.json: node.json nonPartNode.json
+ netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS}
+
+genesis.json:
+ netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS}
+
+clean:
+ rm -f net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/gen_topology.py b/test/testdata/deployednettemplates/recipes/txnsync/gen_topology.py
new file mode 100644
index 000000000..da7b1274a
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/gen_topology.py
@@ -0,0 +1,30 @@
+node_types = {"R":4, "N":10, "NPN":1}
+node_size = {"R":"-Large", "N":"-Small", "NPN":"-Large"}
+regions = [
+ "AWS-US-EAST-2",
+ "AWS-US-WEST-1",
+ "AWS-EU-WEST-2",
+ "AWS-EU-CENTRAL-1"
+]
+
+f = open("topology.json", "w")
+f.write("{ \"Hosts\":\n [")
+
+region_count = len(regions)
+first = True
+for x in node_types:
+ node_type = x
+ node_count = node_types[x]
+ region_size = node_size[x]
+ for i in range(node_count):
+ node_name = node_type + str(i+1)
+ region = regions[i%region_count]
+ if (first ):
+ first = False
+ else:
+ f.write(",")
+ f.write ("\n {\n \"Name\": \"" + node_name + "\",\n \"Template\": \"" + region + region_size + "\"\n }" )
+
+f.write("\n ]\n}\n")
+f.close()
+
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/genesis.json b/test/testdata/deployednettemplates/recipes/txnsync/genesis.json
new file mode 100644
index 000000000..8f33dee69
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/genesis.json
@@ -0,0 +1,69 @@
+{
+ "NetworkName": "",
+ "VersionModifier": "",
+ "ConsensusProtocol": "",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": 3000000,
+ "PartKeyDilution": 0,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet4",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet5",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet6",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet7",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet8",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet9",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet10",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet11",
+ "Stake": 50,
+ "Online": false
+ }
+ ],
+ "FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "DevMode": false,
+ "Comment": ""
+}
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/net.json b/test/testdata/deployednettemplates/recipes/txnsync/net.json
new file mode 100644
index 000000000..4f1d38be8
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/net.json
@@ -0,0 +1,346 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay1",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay2",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay3",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay4",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node1",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node2",
+ "Wallets": [
+ {
+ "Name": "Wallet2",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node3",
+ "Wallets": [
+ {
+ "Name": "Wallet3",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node4",
+ "Wallets": [
+ {
+ "Name": "Wallet4",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N5",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node5",
+ "Wallets": [
+ {
+ "Name": "Wallet5",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N6",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node6",
+ "Wallets": [
+ {
+ "Name": "Wallet6",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N7",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node7",
+ "Wallets": [
+ {
+ "Name": "Wallet7",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N8",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node8",
+ "Wallets": [
+ {
+ "Name": "Wallet8",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N9",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node9",
+ "Wallets": [
+ {
+ "Name": "Wallet9",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N10",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node10",
+ "Wallets": [
+ {
+ "Name": "Wallet10",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode1",
+ "Wallets": [
+ {
+ "Name": "Wallet11",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/node.json b/test/testdata/deployednettemplates/recipes/txnsync/node.json
new file mode 100644
index 000000000..b408e2ff7
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/node.json
@@ -0,0 +1,11 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }",
+ "FractionApply": 1.0
+}
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/nonPartNode.json b/test/testdata/deployednettemplates/recipes/txnsync/nonPartNode.json
new file mode 100644
index 000000000..8ab3b8bdd
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/nonPartNode.json
@@ -0,0 +1,5 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+}
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/recipe.json b/test/testdata/deployednettemplates/recipes/txnsync/recipe.json
new file mode 100644
index 000000000..a2f88f63b
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/recipe.json
@@ -0,0 +1,7 @@
+{
+ "GenesisFile":"genesis.json",
+ "NetworkFile":"net.json",
+ "ConfigFile": "../../configs/reference.json",
+ "HostTemplatesFile": "../../hosttemplates/hosttemplates.json",
+ "TopologyFile": "topology.json"
+}
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/relay.json b/test/testdata/deployednettemplates/recipes/txnsync/relay.json
new file mode 100644
index 000000000..db8fb939d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/relay.json
@@ -0,0 +1,11 @@
+{
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/topology.json b/test/testdata/deployednettemplates/recipes/txnsync/topology.json
new file mode 100644
index 000000000..b873a0885
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/topology.json
@@ -0,0 +1,64 @@
+{ "Hosts":
+ [
+ {
+ "Name": "R1",
+ "Template": "AWS-US-EAST-2-Large"
+ },
+ {
+ "Name": "R2",
+ "Template": "AWS-US-WEST-1-Large"
+ },
+ {
+ "Name": "R3",
+ "Template": "AWS-EU-WEST-2-Large"
+ },
+ {
+ "Name": "R4",
+ "Template": "AWS-EU-CENTRAL-1-Large"
+ },
+ {
+ "Name": "N1",
+ "Template": "AWS-US-EAST-2-Small"
+ },
+ {
+ "Name": "N2",
+ "Template": "AWS-US-WEST-1-Small"
+ },
+ {
+ "Name": "N3",
+ "Template": "AWS-EU-WEST-2-Small"
+ },
+ {
+ "Name": "N4",
+ "Template": "AWS-EU-CENTRAL-1-Small"
+ },
+ {
+ "Name": "N5",
+ "Template": "AWS-US-EAST-2-Small"
+ },
+ {
+ "Name": "N6",
+ "Template": "AWS-US-WEST-1-Small"
+ },
+ {
+ "Name": "N7",
+ "Template": "AWS-EU-WEST-2-Small"
+ },
+ {
+ "Name": "N8",
+ "Template": "AWS-EU-CENTRAL-1-Small"
+ },
+ {
+ "Name": "N9",
+ "Template": "AWS-US-EAST-2-Small"
+ },
+ {
+ "Name": "N10",
+ "Template": "AWS-US-WEST-1-Small"
+ },
+ {
+ "Name": "NPN1",
+ "Template": "AWS-US-EAST-2-Large"
+ }
+ ]
+}
diff --git a/util/condvar/timedwait.go b/util/condvar/timedwait.go
index 9ed142f1b..a4605d099 100644
--- a/util/condvar/timedwait.go
+++ b/util/condvar/timedwait.go
@@ -20,6 +20,8 @@ import (
"sync"
"sync/atomic"
"time"
+
+ "github.com/algorand/go-algorand/util"
)
// TimedWait waits for sync.Cond c to be signaled, with a timeout.
@@ -33,7 +35,7 @@ func TimedWait(c *sync.Cond, timeout time.Duration) {
var done int32
go func() {
- <-time.After(timeout)
+ util.NanoSleep(timeout)
for atomic.LoadInt32(&done) == 0 {
c.Broadcast()
@@ -42,7 +44,7 @@ func TimedWait(c *sync.Cond, timeout time.Duration) {
// thread hasn't gotten around to calling c.Wait()
// yet, so the c.Broadcast() did not wake it up.
// Sleep for a second and check again.
- <-time.After(time.Second)
+ time.Sleep(time.Second)
}
}()
diff --git a/util/io.go b/util/io.go
index 453a98663..081c1d568 100644
--- a/util/io.go
+++ b/util/io.go
@@ -59,6 +59,17 @@ func FileExists(filePath string) bool {
return fileExists
}
+// IsEmpty recursively check path for files and returns true if there are none.
+func IsEmpty(path string) bool {
+ err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
+ if info.IsDir() {
+ return nil
+ }
+ return os.ErrExist
+ })
+ return err == nil
+}
+
// ExeDir returns the absolute path to the current executing binary (not including the filename)
func ExeDir() (string, error) {
ex, err := os.Executable()
diff --git a/cmd/tealdbg/util_test.go b/util/io_test.go
index 85de0747d..353616c64 100644
--- a/cmd/tealdbg/util_test.go
+++ b/util/io_test.go
@@ -14,23 +14,28 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package main
+package util
import (
+ "os"
+ "path"
"testing"
+ "github.com/stretchr/testify/assert"
+
"github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
)
-func TestVLQ(t *testing.T) {
+func TestIsEmpty(t *testing.T) {
partitiontest.PartitionTest(t)
- a := require.New(t)
- a.Equal("AAAA", MakeSourceMapLine(0, 0, 0, 0))
- a.Equal("AACA", MakeSourceMapLine(0, 0, 1, 0))
- a.Equal("AAEA", MakeSourceMapLine(0, 0, 2, 0))
- a.Equal("AAgBA", MakeSourceMapLine(0, 0, 16, 0))
- a.Equal("AAggBA", MakeSourceMapLine(0, 0, 512, 0))
- a.Equal("ADggBD", MakeSourceMapLine(0, -1, 512, -1))
+ testPath := path.Join(os.TempDir(), "this", "is", "a", "long", "path")
+ err := os.MkdirAll(testPath, os.ModePerm)
+ assert.NoError(t, err)
+ defer os.RemoveAll(testPath)
+ assert.True(t, IsEmpty(testPath))
+
+ _, err = os.Create(path.Join(testPath, "file.txt"))
+ assert.NoError(t, err)
+ assert.False(t, IsEmpty(testPath))
}
diff --git a/util/metrics/counter.go b/util/metrics/counter.go
index 1fb33f67e..73debb3e9 100644
--- a/util/metrics/counter.go
+++ b/util/metrics/counter.go
@@ -186,7 +186,7 @@ func (counter *Counter) WriteMetric(buf *strings.Builder, parentLabels string) {
}
// AddMetric adds the metric into the map
-func (counter *Counter) AddMetric(values map[string]string) {
+func (counter *Counter) AddMetric(values map[string]float64) {
counter.Lock()
defer counter.Unlock()
@@ -199,7 +199,10 @@ func (counter *Counter) AddMetric(values map[string]string) {
if len(l.labels) == 0 {
sum += float64(atomic.LoadUint64(&counter.intValue))
}
-
- values[counter.name] = strconv.FormatFloat(sum, 'f', -1, 32)
+ var suffix string
+ if len(l.formattedLabels) > 0 {
+ suffix = ":" + l.formattedLabels
+ }
+ values[sanitizeTelemetryName(counter.name+suffix)] = sum
}
}
diff --git a/util/metrics/gauge.go b/util/metrics/gauge.go
index f4e27b957..0cf60932d 100644
--- a/util/metrics/gauge.go
+++ b/util/metrics/gauge.go
@@ -174,7 +174,7 @@ func (gauge *Gauge) WriteMetric(buf *strings.Builder, parentLabels string) {
}
// AddMetric adds the metric into the map
-func (gauge *Gauge) AddMetric(values map[string]string) {
+func (gauge *Gauge) AddMetric(values map[string]float64) {
gauge.Lock()
defer gauge.Unlock()
@@ -183,6 +183,10 @@ func (gauge *Gauge) AddMetric(values map[string]string) {
}
for _, l := range gauge.valuesIndices {
- values[gauge.name] = strconv.FormatFloat(l.gauge, 'f', -1, 32)
+ var suffix string
+ if len(l.formattedLabels) > 0 {
+ suffix = ":" + l.formattedLabels
+ }
+ values[sanitizeTelemetryName(gauge.name+suffix)] = l.gauge
}
}
diff --git a/util/metrics/metrics_test.go b/util/metrics/metrics_test.go
index 8d526906e..03369c7a1 100644
--- a/util/metrics/metrics_test.go
+++ b/util/metrics/metrics_test.go
@@ -22,9 +22,11 @@ import (
"net"
"net/http"
"strings"
+ "testing"
"time"
"github.com/algorand/go-deadlock"
+ "github.com/stretchr/testify/require"
)
type MetricTest struct {
@@ -91,3 +93,22 @@ func (p *MetricTest) testMetricsHandler(w http.ResponseWriter, r *http.Request)
}
w.Write([]byte(""))
}
+
+func TestSanitizeTelemetryName(t *testing.T) {
+ for _, tc := range []struct{ in, out string }{
+ {in: "algod_counter_x", out: "algod_counter_x"},
+ {in: "algod_counter_x{a=b}", out: "algod_counter_x_a_b_"},
+ {in: "this_is1-a-name0", out: "this_is1-a-name0"},
+ {in: "myMetricName1:a=yes", out: "myMetricName1_a_yes"},
+ {in: "myMetricName1:a=yes,b=no", out: "myMetricName1_a_yes_b_no"},
+ {in: "0myMetricName1", out: "_myMetricName1"},
+ {in: "myMetricName1{hello=x}", out: "myMetricName1_hello_x_"},
+ {in: "myMetricName1.moreNames-n.3", out: "myMetricName1_moreNames-n_3"},
+ {in: "-my-metric-name", out: "_my-metric-name"},
+ {in: `label-counter:label="a label value"`, out: "label-counter_label__a_label_value_"},
+ } {
+ t.Run(tc.in, func(t *testing.T) {
+ require.Equal(t, tc.out, sanitizeTelemetryName(tc.in))
+ })
+ }
+}
diff --git a/util/metrics/registry.go b/util/metrics/registry.go
index 33b902da3..53ada420a 100644
--- a/util/metrics/registry.go
+++ b/util/metrics/registry.go
@@ -70,7 +70,7 @@ func (r *Registry) WriteMetrics(buf *strings.Builder, parentLabels string) {
}
// AddMetrics will add all the metrics that were registered to this registry
-func (r *Registry) AddMetrics(values map[string]string) {
+func (r *Registry) AddMetrics(values map[string]float64) {
r.metricsMu.Lock()
defer r.metricsMu.Unlock()
for _, m := range r.metrics {
diff --git a/util/metrics/registryCommon.go b/util/metrics/registryCommon.go
index e5046d806..2eb8d6c53 100644
--- a/util/metrics/registryCommon.go
+++ b/util/metrics/registryCommon.go
@@ -17,6 +17,7 @@
package metrics
import (
+ "regexp"
"strings"
"github.com/algorand/go-deadlock"
@@ -25,7 +26,7 @@ import (
// Metric represent any collectable metric
type Metric interface {
WriteMetric(buf *strings.Builder, parentLabels string)
- AddMetric(values map[string]string)
+ AddMetric(values map[string]float64)
}
// Registry represents a single set of metrics registry
@@ -33,3 +34,11 @@ type Registry struct {
metrics []Metric
metricsMu deadlock.Mutex
}
+
+var sanitizeTelemetryCharactersRegexp = regexp.MustCompile("(^[^a-zA-Z_]|[^a-zA-Z0-9_-])")
+
+// sanitizeTelemetryName ensures a metric name reported to telemetry doesn't contain any
+// non-alphanumeric characters (apart from - or _) and doesn't start with a number or a hyphen.
+func sanitizeTelemetryName(name string) string {
+ return sanitizeTelemetryCharactersRegexp.ReplaceAllString(name, "_")
+}
diff --git a/util/metrics/registry_test.go b/util/metrics/registry_test.go
index 3c60f09d8..aa4851630 100644
--- a/util/metrics/registry_test.go
+++ b/util/metrics/registry_test.go
@@ -14,8 +14,6 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-// +build telemetry
-
package metrics
import (
@@ -33,33 +31,32 @@ func TestWriteAdd(t *testing.T) {
counter := MakeCounter(MetricName{Name: "gauge-name", Description: "gauge description"})
counter.Add(12.34, nil)
- results := make(map[string]string)
+ labelCounter := MakeCounter(MetricName{Name: "label-counter", Description: "counter with labels"})
+ labelCounter.Add(5, map[string]string{"label": "a label value"})
+
+ results := make(map[string]float64)
DefaultRegistry().AddMetrics(results)
- require.Equal(t, 1, len(results))
- require.True(t, hasKey(results, "gauge-name"))
- require.Equal(t, "12.34", results["gauge-name"])
+ require.Equal(t, 2, len(results))
+ require.Contains(t, results, "gauge-name")
+ require.InDelta(t, 12.34, results["gauge-name"], 0.01)
+ require.Contains(t, results, "label-counter_label__a_label_value_")
+ require.InDelta(t, 5, results["label-counter_label__a_label_value_"], 0.01)
bufBefore := strings.Builder{}
DefaultRegistry().WriteMetrics(&bufBefore, "label")
require.True(t, bufBefore.Len() > 0)
- // Test that WriteMetrics does not change after adding a StringGauge
- stringGauge := MakeStringGauge()
- stringGauge.Set("string-key", "value")
-
DefaultRegistry().AddMetrics(results)
- require.True(t, hasKey(results, "string-key"))
- require.Equal(t, "value", results["string-key"])
- require.True(t, hasKey(results, "gauge-name"))
- require.Equal(t, "12.34", results["gauge-name"])
+ require.Contains(t, results, "gauge-name")
+ require.InDelta(t, 12.34, results["gauge-name"], 0.01)
// not included in string builder
bufAfter := strings.Builder{}
DefaultRegistry().WriteMetrics(&bufAfter, "label")
require.Equal(t, bufBefore.String(), bufAfter.String())
- stringGauge.Deregister(nil)
counter.Deregister(nil)
+ labelCounter.Deregister(nil)
}
diff --git a/util/metrics/stringGauge.go b/util/metrics/stringGauge.go
deleted file mode 100644
index c398533e4..000000000
--- a/util/metrics/stringGauge.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package metrics
-
-import (
- "strings"
-)
-
-// MakeStringGauge create a new StringGauge.
-func MakeStringGauge() *StringGauge {
- c := &StringGauge{
- values: make(map[string]string),
- }
- c.Register(nil)
- return c
-}
-
-// Register registers the StringGauge with the default/specific registry
-func (stringGauge *StringGauge) Register(reg *Registry) {
- if reg == nil {
- DefaultRegistry().Register(stringGauge)
- } else {
- reg.Register(stringGauge)
- }
-}
-
-// Deregister deregisters the StringGauge with the default/specific registry
-func (stringGauge *StringGauge) Deregister(reg *Registry) {
- if reg == nil {
- DefaultRegistry().Deregister(stringGauge)
- } else {
- reg.Deregister(stringGauge)
- }
-}
-
-// Set updates a key with a value.
-func (stringGauge *StringGauge) Set(key string, value string) {
- stringGauge.values[key] = value
-}
-
-// WriteMetric omit string gauges from the metrics report, not sure how they act with prometheus
-func (stringGauge *StringGauge) WriteMetric(buf *strings.Builder, parentLabels string) {
-}
-
-// AddMetric sets all the key value pairs in the provided map.
-func (stringGauge *StringGauge) AddMetric(values map[string]string) {
- for k, v := range stringGauge.values {
- values[k] = v
- }
-}
diff --git a/util/metrics/tagcounter.go b/util/metrics/tagcounter.go
index 8dc73ea3b..53cce7ba6 100644
--- a/util/metrics/tagcounter.go
+++ b/util/metrics/tagcounter.go
@@ -129,7 +129,7 @@ func (tc *TagCounter) WriteMetric(buf *strings.Builder, parentLabels string) {
// AddMetric is part of the Metric interface
// Copy the values in this TagCounter out into the string-string map.
-func (tc *TagCounter) AddMetric(values map[string]string) {
+func (tc *TagCounter) AddMetric(values map[string]float64) {
tagp := tc.tagptr.Load()
if tagp == nil {
return
@@ -146,6 +146,6 @@ func (tc *TagCounter) AddMetric(values map[string]string) {
} else {
name = tc.Name + "_" + tag
}
- values[name] = strconv.FormatUint(*tagcount, 10)
+ values[sanitizeTelemetryName(name)] = float64(*tagcount)
}
}
diff --git a/util/metrics/tagcounter_test.go b/util/metrics/tagcounter_test.go
index a2f8a87a0..b76202c53 100644
--- a/util/metrics/tagcounter_test.go
+++ b/util/metrics/tagcounter_test.go
@@ -46,7 +46,7 @@ func TestTagCounter(t *testing.T) {
tc.WriteMetric(&sb, "")
require.Equal(t, "", sb.String())
- result := make(map[string]string)
+ result := make(map[string]float64)
tc.AddMetric(result)
require.Equal(t, 0, len(result))
diff --git a/util/metrics/stringGaugeCommon.go b/util/sleep.go
index 40358b22d..0d3a60acf 100644
--- a/util/metrics/stringGaugeCommon.go
+++ b/util/sleep.go
@@ -14,14 +14,21 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package metrics
+//go:build !linux
+// +build !linux
+
+package util
import (
- "github.com/algorand/go-deadlock"
+ "time"
)
-// StringGauge represents a map of key value pairs available to be written with the AddMetric
-type StringGauge struct {
- deadlock.Mutex
- values map[string]string
+// NanoSleep sleeps for the given d duration.
+func NanoSleep(d time.Duration) {
+ time.Sleep(d)
+}
+
+// NanoAfter waits for the duration to elapse and then sends the current time on the returned channel.
+func NanoAfter(d time.Duration) <-chan time.Time {
+ return time.After(d)
}
diff --git a/util/sleep_linux.go b/util/sleep_linux.go
new file mode 100644
index 000000000..02d450303
--- /dev/null
+++ b/util/sleep_linux.go
@@ -0,0 +1,39 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package util
+
+import (
+ "time"
+)
+
+// NanoAfter waits for the duration to elapse and then sends the current time on the returned channel.
+func NanoAfter(d time.Duration) <-chan time.Time {
+ // The following is a workaround for the go 1.16 bug, where timers are rounded up to the next millisecond resolution.
+ // Go implementation for "time.After" avoids creating the go-routine until it's needed for writing the time
+ // to the channel. This is a pretty impressive implementation compared to the one below, since it's much more
+ // resource-efficient. For that reason, we'll keep calling the efficient implementation when timing is not
+ // critical ( i.e. > 10ms ).
+ if d > 10*time.Millisecond {
+ return time.After(d)
+ }
+ c := make(chan time.Time, 1)
+ go func() {
+ NanoSleep(d)
+ c <- time.Now()
+ }()
+ return c
+}
diff --git a/util/sleep_linux_32.go b/util/sleep_linux_32.go
new file mode 100644
index 000000000..1d155fac0
--- /dev/null
+++ b/util/sleep_linux_32.go
@@ -0,0 +1,35 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+//go:build linux && (arm || 386)
+// +build linux
+// +build arm 386
+
+package util
+
+import (
+ "syscall"
+ "time"
+)
+
+// NanoSleep sleeps for the given d duration.
+func NanoSleep(d time.Duration) {
+ timeSpec := &syscall.Timespec{
+ Nsec: int32(d.Nanoseconds() % time.Second.Nanoseconds()),
+ Sec: int32(d.Nanoseconds() / time.Second.Nanoseconds()),
+ }
+ syscall.Nanosleep(timeSpec, nil) // nolint:errcheck
+}
diff --git a/util/sleep_linux_64.go b/util/sleep_linux_64.go
new file mode 100644
index 000000000..2897ceaa1
--- /dev/null
+++ b/util/sleep_linux_64.go
@@ -0,0 +1,34 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+//go:build linux && !(arm || 386)
+// +build linux,!arm,!386
+
+package util
+
+import (
+ "syscall"
+ "time"
+)
+
+// NanoSleep sleeps for the given d duration.
+func NanoSleep(d time.Duration) {
+ timeSpec := &syscall.Timespec{
+ Nsec: d.Nanoseconds() % time.Second.Nanoseconds(),
+ Sec: d.Nanoseconds() / time.Second.Nanoseconds(),
+ }
+ syscall.Nanosleep(timeSpec, nil) // nolint:errcheck
+}