summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorchris erway <chris.erway@algorand.com>2022-04-19 13:44:35 -0400
committerchris erway <chris.erway@algorand.com>2022-04-19 13:44:35 -0400
commit2b54c5ecc8c38e9d17e992a5ed236a2ccada1e48 (patch)
tree4b32843040225e5c09c9570782316fc0379d40ae
parentf51cd4652c35427c1a4aa6f127551ac07e2bb771 (diff)
parent8a5155ff226909bdace8998a906142e842c52788 (diff)
Merge remote-tracking branch 'upstream/master' into feature/txnsync
-rw-r--r--.circleci/config.yml48
-rw-r--r--.github/workflows/build.yml2
-rw-r--r--.github/workflows/reviewdog.yml2
-rw-r--r--Makefile1
-rw-r--r--agreement/cryptoVerifier.go5
-rw-r--r--agreement/pseudonode.go9
-rw-r--r--cmd/algod/main.go19
-rw-r--r--cmd/opdoc/opdoc.go192
-rw-r--r--cmd/opdoc/tmLanguage.go39
-rw-r--r--cmd/tealdbg/cdtSession.go67
-rw-r--r--cmd/tealdbg/cdtSession_test.go2
-rw-r--r--cmd/tealdbg/cdtState.go68
-rw-r--r--cmd/tealdbg/cdtStateObjects.go67
-rw-r--r--cmd/tealdbg/cdtdbg_test.go6
-rw-r--r--cmd/tealdbg/debugger.go157
-rw-r--r--cmd/tealdbg/debugger_test.go198
-rw-r--r--cmd/tealdbg/local.go39
-rw-r--r--cmd/tealdbg/local_test.go430
-rw-r--r--cmd/tealdbg/webdbg.go19
-rw-r--r--config/consensus.go8
-rw-r--r--crypto/secp256k1/secp256_test.go2
-rw-r--r--daemon/algod/api/client/restClient.go2
-rw-r--r--daemon/algod/api/server/v2/dryrun.go2
-rw-r--r--daemon/algod/api/server/v2/dryrun_test.go54
-rw-r--r--daemon/kmd/wallet/driver/ledger.go1
-rw-r--r--daemon/kmd/wallet/driver/ledger_hid.go23
-rw-r--r--data/abi/abi_encode.go32
-rw-r--r--data/abi/abi_encode_test.go81
-rw-r--r--data/abi/abi_json_test.go9
-rw-r--r--data/bookkeeping/block.go2
-rw-r--r--data/ledger_test.go43
-rw-r--r--data/transactions/logic/.gitignore2
-rw-r--r--data/transactions/logic/README.md10
-rw-r--r--data/transactions/logic/TEAL_opcodes.md82
-rw-r--r--data/transactions/logic/assembler.go1007
-rw-r--r--data/transactions/logic/assembler_test.go113
-rw-r--r--data/transactions/logic/backwardCompat_test.go20
-rw-r--r--data/transactions/logic/debugger.go33
-rw-r--r--data/transactions/logic/debugger_test.go59
-rw-r--r--data/transactions/logic/doc.go181
-rw-r--r--data/transactions/logic/doc_test.go11
-rw-r--r--data/transactions/logic/eval.go274
-rw-r--r--data/transactions/logic/evalCrypto_test.go96
-rw-r--r--data/transactions/logic/evalStateful_test.go43
-rw-r--r--data/transactions/logic/eval_test.go177
-rw-r--r--data/transactions/logic/fields.go984
-rw-r--r--data/transactions/logic/fields_string.go9
-rw-r--r--data/transactions/logic/fields_test.go10
-rw-r--r--data/transactions/logic/langspec.json2222
-rw-r--r--data/transactions/logic/opcodes.go289
-rw-r--r--data/transactions/logic/opcodes_test.go10
-rw-r--r--data/transactions/logic/teal.tmLanguage.json136
-rw-r--r--gen/generate.go4
-rw-r--r--go.mod26
-rw-r--r--go.sum80
-rw-r--r--ledger/acctupdates.go16
-rw-r--r--ledger/acctupdates_test.go51
-rw-r--r--ledger/catchpointtracker_test.go3
-rw-r--r--ledger/internal/eval_blackbox_test.go8
-rw-r--r--ledger/internal/prefetcher/error.go43
-rw-r--r--ledger/internal/prefetcher/prefetcher.go21
-rw-r--r--ledger/internal/prefetcher/prefetcher_alignment_test.go2
-rw-r--r--ledger/internal/prefetcher/prefetcher_test.go203
-rw-r--r--ledger/ledger.go6
-rw-r--r--ledger/ledger_test.go46
-rw-r--r--ledger/ledgercore/accountdata.go5
-rw-r--r--libgoal/libgoal.go4
-rw-r--r--logging/telemetry_test.go39
-rw-r--r--logging/telemetryspec/event.go9
-rw-r--r--network/dialer.go3
-rw-r--r--network/rateLimitingTransport.go13
-rw-r--r--network/wsNetwork.go3
-rw-r--r--scripts/buildtools/go.mod6
-rw-r--r--scripts/buildtools/go.sum8
-rwxr-xr-xscripts/buildtools/install_buildtools.sh6
-rwxr-xr-xscripts/create_and_deploy_recipe.sh1
-rwxr-xr-xscripts/get_golang_version.sh6
-rwxr-xr-xscripts/install_linux_deps.sh2
-rwxr-xr-xscripts/travis/before_build.sh1
-rw-r--r--shared/pingpong/accounts.go3
-rw-r--r--shared/pingpong/pingpong.go5
-rw-r--r--test/README.md8
-rw-r--r--test/e2e-go/cli/goal/account_test.go9
-rw-r--r--test/e2e-go/cli/goal/clerk_test.go3
-rw-r--r--test/e2e-go/cli/goal/node_cleanup_test.go3
-rw-r--r--test/e2e-go/features/participation/participationRewards_test.go4
-rw-r--r--test/e2e-go/features/transactions/sendReceive_test.go1
-rwxr-xr-xtest/scripts/e2e.sh25
-rwxr-xr-xtest/scripts/e2e_go_tests.sh1
-rwxr-xr-xtest/scripts/e2e_subs/assets-app-b.sh51
-rwxr-xr-xtest/scripts/e2e_subs/assets-app.sh95
-rwxr-xr-xtest/scripts/e2e_subs/dynamic-fee-teal-test.sh1
-rwxr-xr-xtest/scripts/e2e_subs/sectok-app.sh106
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json2
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/Makefile12
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/gen_topology.py30
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/genesis.json69
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/net.json311
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/node.json11
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/nonPartNode.json5
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/recipe.json7
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/relay.json11
-rw-r--r--test/testdata/deployednettemplates/recipes/txnsync/topology.json44
-rw-r--r--util/condvar/timedwait.go6
-rw-r--r--util/metrics/counter.go9
-rw-r--r--util/metrics/gauge.go8
-rw-r--r--util/metrics/metrics_test.go21
-rw-r--r--util/metrics/registry.go2
-rw-r--r--util/metrics/registryCommon.go11
-rw-r--r--util/metrics/registry_test.go27
-rw-r--r--util/metrics/stringGauge.go64
-rw-r--r--util/metrics/stringGauge_test.go55
-rw-r--r--util/metrics/tagcounter.go4
-rw-r--r--util/metrics/tagcounter_test.go2
-rw-r--r--util/sleep.go (renamed from util/metrics/stringGaugeCommon.go)18
-rw-r--r--util/sleep_linux.go49
116 files changed, 6334 insertions, 2771 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 327b49719..34828ef5e 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -138,7 +138,9 @@ workflows:
- << matrix.platform >>_build
filters:
<<: *filters-nightly
- context: slack-secrets
+ context:
+ - slack-secrets
+ - aws-secrets
- tests_verification_job:
name: << matrix.platform >>_<< matrix.job_type >>_verification
@@ -196,7 +198,7 @@ commands:
shell: bash.exe
command: |
choco install -y msys2 pacman make wget --force
- choco install -y golang --version=1.14.7 --force
+ choco install -y golang --version=1.16.15 --force
choco install -y python3 --version=3.7.3 --force
export msys2='cmd //C RefreshEnv.cmd '
export msys2+='& set MSYS=winsymlinks:nativestrict '
@@ -335,9 +337,6 @@ commands:
key: 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-{{ .Environment.CIRCLE_BUILD_NUM }}'
paths:
- tmp/go-cache
- - upload_to_buildpulse:
- platform: << parameters.platform >>
- path: << parameters.result_path >>/<< parameters.result_subdir>>
upload_coverage:
description: Collect coverage reports and upload them
@@ -348,40 +347,6 @@ commands:
command: |
scripts/travis/upload_coverage.sh || true
- upload_to_buildpulse:
- description: Collect build reports and upload them
- parameters:
- platform:
- type: string
- path:
- type: string
- steps:
- - run:
- name: Send test results to BuildPulse
- when: always
- command: |
- set -e
- if ! ls << parameters.path >>/*/*.xml &> /dev/null; then exit 0; fi
- sed -i"" -e 's/classname="/classname="<< parameters.platform >>-/' << parameters.path >>/*/*.xml
- case "<< parameters.platform >>" in
- arm64)
- URL=https://github.com/buildpulse/test-reporter/releases/download/v0.21.0-pre/test-reporter-linux-arm64
- SUM=53f94c29ad162c2b9ebb1f4a2f967f5262c0459ee4a0c34332977d8c89aafc18
- ;;
- amd64)
- URL=https://github.com/buildpulse/test-reporter/releases/download/v0.21.0-pre/test-reporter-linux-amd64
- SUM=4655e54d756580c0de0112cab488e6e08d0af75e9fc8caea2d63f9e13be8beb5
- ;;
- mac_amd64)
- URL=https://github.com/buildpulse/test-reporter/releases/download/v0.21.0-pre/test-reporter-darwin-amd64
- SUM=2f9e20a6f683c80f35d04e36bc57ecfe605bb48fee5a1b8d8f7c45094028eea3
- ;;
- esac
- curl -fsSL --retry 3 --retry-connrefused $URL > ./buildpulse-test-reporter
- echo "$SUM *buildpulse-test-reporter" | shasum -a 256 -c --status
- chmod +x ./buildpulse-test-reporter
- ./buildpulse-test-reporter submit << parameters.path >> --account-id 23182699 --repository-id 191266671 || true
-
generic_integration:
description: Run integration tests from build workspace, for re-use by diferent architectures
parameters:
@@ -432,6 +397,7 @@ commands:
export PARTITION_ID=${CIRCLE_NODE_INDEX}
export PARALLEL_FLAG="-p 1"
test/scripts/run_integration_tests.sh
+
- store_artifacts:
path: << parameters.result_path >>
destination: test-results
@@ -441,9 +407,6 @@ commands:
root: << parameters.result_path >>
paths:
- << parameters.result_subdir >>
- - upload_to_buildpulse:
- platform: << parameters.platform >>
- path: << parameters.result_path >>/<< parameters.result_subdir>>
tests_verification_command:
description: Check if all tests were run at least once and only once across all parallel runs
@@ -651,6 +614,7 @@ jobs:
working_directory: << pipeline.parameters.build_dir >>/project
environment:
E2E_TEST_FILTER: "SCRIPTS"
+ E2E_PLATFORM: << parameters.platform >>
steps:
- prepare_build_dir
- prepare_go
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 4bcee861e..4cc8eaba4 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -20,7 +20,7 @@ jobs:
- name: Install golang
uses: actions/setup-go@v2
with:
- go-version: '1.14.7'
+ go-version: '1.16.15'
- name: Build Test
run: |
export ALGORAND_DEADLOCK=enable
diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml
index 30b76064b..134685fc6 100644
--- a/.github/workflows/reviewdog.yml
+++ b/.github/workflows/reviewdog.yml
@@ -44,7 +44,7 @@ jobs:
- name: Install specific golang
uses: actions/setup-go@v2
with:
- go-version: '1.16.6'
+ go-version: '1.16.15'
- name: Create folders for golangci-lint
run: mkdir -p cicdtmp/golangci-lint
- name: Check if custom golangci-lint is already built
diff --git a/Makefile b/Makefile
index e62d24053..30dc3727e 100644
--- a/Makefile
+++ b/Makefile
@@ -8,7 +8,6 @@ else
export GOPATH := $(shell go env GOPATH)
GOPATH1 := $(firstword $(subst :, ,$(GOPATH)))
endif
-export GO111MODULE := on
export GOPROXY := direct
SRCPATH := $(shell pwd)
ARCH := $(shell ./scripts/archtype.sh)
diff --git a/agreement/cryptoVerifier.go b/agreement/cryptoVerifier.go
index ff8a6d6aa..cf6c466e5 100644
--- a/agreement/cryptoVerifier.go
+++ b/agreement/cryptoVerifier.go
@@ -22,8 +22,12 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/metrics"
)
+var voteVerifierOutFullCounter = metrics.MakeCounter(
+ metrics.MetricName{Name: "algod_agreement_vote_verifier_responses_dropped", Description: "Number of voteVerifier responses dropped due to full channel"})
+
// TODO put these in config
const (
voteParallelism = 16
@@ -210,6 +214,7 @@ func (c *poolCryptoVerifier) voteFillWorker(toBundleWait chan<- bundleFuture) {
select {
case c.votes.out <- asyncVerifyVoteResponse{index: votereq.TaskIndex, err: err, cancelled: true}:
default:
+ voteVerifierOutFullCounter.Inc(nil)
c.log.Infof("poolCryptoVerifier.voteFillWorker unable to write failed enqueue response to output channel")
}
}
diff --git a/agreement/pseudonode.go b/agreement/pseudonode.go
index f52854d6a..bdaa2f359 100644
--- a/agreement/pseudonode.go
+++ b/agreement/pseudonode.go
@@ -29,6 +29,7 @@ import (
"github.com/algorand/go-algorand/logging/logspec"
"github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/metrics"
)
// TODO put these in config
@@ -43,6 +44,9 @@ var errPseudonodeVerifierClosedChannel = errors.New("crypto verifier closed the
var errPseudonodeNoVotes = errors.New("no valid participation keys to generate votes for given round")
var errPseudonodeNoProposals = errors.New("no valid participation keys to generate proposals for given round")
+var pseudonodeBacklogFullByType = metrics.NewTagCounter("algod_agreement_pseudonode_tasks_dropped_{TAG}", "Number of pseudonode tasks dropped per type")
+var pseudonodeResultTimeoutsByType = metrics.NewTagCounter("algod_agreement_pseudonode_tasks_timeouts_{TAG}", "Number of pseudonode task result timeouts per type")
+
// A pseudonode creates proposals and votes with a KeyManager which holds participation keys.
//
// It constructs these messages as if they arrived from an external source and were verified.
@@ -176,6 +180,7 @@ func (n asyncPseudonode) MakeProposals(ctx context.Context, r round, p period) (
return proposalTask.outputChannel(), nil
default:
proposalTask.close()
+ pseudonodeBacklogFullByType.Add("proposal", 1)
return nil, fmt.Errorf("unable to make proposal for (%d, %d): %w", r, p, errPseudonodeBacklogFull)
}
}
@@ -193,6 +198,7 @@ func (n asyncPseudonode) MakeVotes(ctx context.Context, r round, p period, s ste
return proposalTask.outputChannel(), nil
default:
proposalTask.close()
+ pseudonodeBacklogFullByType.Add("vote", 1)
return nil, fmt.Errorf("unable to make vote for (%d, %d, %d): %w", r, p, s, errPseudonodeBacklogFull)
}
}
@@ -474,6 +480,7 @@ verifiedVotesLoop:
return
case <-outputTimeout:
// we've been waiting for too long for this vote to be written to the output.
+ pseudonodeResultTimeoutsByType.Add("vote", 1)
t.node.log.Warnf("pseudonode.makeVotes: unable to write vote to output channel for round %d, period %d", t.round, t.period)
outputTimeout = nil
}
@@ -577,6 +584,7 @@ verifiedVotesLoop:
return
case <-outputTimeout:
// we've been waiting for too long for this vote to be written to the output.
+ pseudonodeResultTimeoutsByType.Add("pvote", 1)
t.node.log.Warnf("pseudonode.makeProposals: unable to write proposal vote to output channel for round %d, period %d", t.round, t.period)
outputTimeout = nil
}
@@ -597,6 +605,7 @@ verifiedPayloadsLoop:
return
case <-outputTimeout:
// we've been waiting for too long for this vote to be written to the output.
+ pseudonodeResultTimeoutsByType.Add("ppayload", 1)
t.node.log.Warnf("pseudonode.makeProposals: unable to write proposal payload to output channel for round %d, period %d", t.round, t.period)
outputTimeout = nil
}
diff --git a/cmd/algod/main.go b/cmd/algod/main.go
index b29d55c2f..bdab16458 100644
--- a/cmd/algod/main.go
+++ b/cmd/algod/main.go
@@ -84,12 +84,12 @@ func run() int {
}
version := config.GetCurrentVersion()
- heartbeatGauge := metrics.MakeStringGauge()
- heartbeatGauge.Set("version", version.String())
- heartbeatGauge.Set("version-num", strconv.FormatUint(version.AsUInt64(), 10))
- heartbeatGauge.Set("channel", version.Channel)
- heartbeatGauge.Set("branch", version.Branch)
- heartbeatGauge.Set("commit-hash", version.GetCommitHash())
+ var baseHeartbeatEvent telemetryspec.HeartbeatEventDetails
+ baseHeartbeatEvent.Info.Version = version.String()
+ baseHeartbeatEvent.Info.VersionNum = strconv.FormatUint(version.AsUInt64(), 10)
+ baseHeartbeatEvent.Info.Channel = version.Channel
+ baseHeartbeatEvent.Info.Branch = version.Branch
+ baseHeartbeatEvent.Info.CommitHash = version.GetCommitHash()
if *branchCheck {
fmt.Println(config.Branch)
@@ -339,12 +339,11 @@ func run() int {
defer ticker.Stop()
sendHeartbeat := func() {
- values := make(map[string]string)
+ values := make(map[string]float64)
metrics.DefaultRegistry().AddMetrics(values)
- heartbeatDetails := telemetryspec.HeartbeatEventDetails{
- Metrics: values,
- }
+ heartbeatDetails := baseHeartbeatEvent
+ heartbeatDetails.Metrics = values
log.EventWithDetails(telemetryspec.ApplicationState, telemetryspec.HeartbeatEvent, heartbeatDetails)
}
diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go
index 9b6fff979..6f3b45e6b 100644
--- a/cmd/opdoc/opdoc.go
+++ b/cmd/opdoc/opdoc.go
@@ -33,7 +33,6 @@ func opGroupMarkdownTable(names []string, out io.Writer) {
| - | -- |
`)
opSpecs := logic.OpsByName[logic.LogicVersion]
- // TODO: sort by logic.OpSpecs[].Opcode
for _, opname := range names {
spec, ok := opSpecs[opname]
if !ok {
@@ -49,15 +48,6 @@ func markdownTableEscape(x string) string {
return strings.ReplaceAll(x, "|", "\\|")
}
-func typeEnumTableMarkdown(out io.Writer) {
- fmt.Fprintf(out, "| Index | \"Type\" string | Description |\n")
- fmt.Fprintf(out, "| --- | --- | --- |\n")
- for i, name := range logic.TxnTypeNames {
- fmt.Fprintf(out, "| %d | %s | %s |\n", i, markdownTableEscape(name), logic.TypeNameDescriptions[name])
- }
- out.Write([]byte("\n"))
-}
-
func integerConstantsTableMarkdown(out io.Writer) {
fmt.Fprintf(out, "#### OnComplete\n\n")
fmt.Fprintf(out, "%s\n\n", logic.OnCompletionPreamble)
@@ -77,20 +67,22 @@ func integerConstantsTableMarkdown(out io.Writer) {
out.Write([]byte("\n"))
}
-type speccer interface {
- SpecByName(name string) logic.FieldSpec
-}
-
-func fieldSpecsMarkdown(out io.Writer, names []string, specs speccer) {
+func fieldGroupMarkdown(out io.Writer, group *logic.FieldGroup) {
showTypes := false
showVers := false
- spec0 := specs.SpecByName(names[0])
- opVer := spec0.OpVersion()
- for _, name := range names {
- if specs.SpecByName(name).Type() != logic.StackNone {
+ opVer := uint64(0)
+ for _, name := range group.Names {
+ spec, ok := group.SpecByName(name)
+ // reminder: group.Names can be "sparse" See: logic.TxnaFields
+ if !ok {
+ continue
+ }
+ if spec.Type().Typed() {
showTypes = true
}
- if specs.SpecByName(name).Version() != opVer {
+ if opVer == uint64(0) {
+ opVer = spec.Version()
+ } else if opVer != spec.Version() {
showVers = true
}
}
@@ -107,8 +99,11 @@ func fieldSpecsMarkdown(out io.Writer, names []string, specs speccer) {
headers += " Notes |\n"
widths += " --------- |\n"
fmt.Fprint(out, headers, widths)
- for i, name := range names {
- spec := specs.SpecByName(name)
+ for i, name := range group.Names {
+ spec, ok := group.SpecByName(name)
+ if !ok {
+ continue
+ }
str := fmt.Sprintf("| %d | %s", i, markdownTableEscape(name))
if showTypes {
str = fmt.Sprintf("%s | %s", str, markdownTableEscape(spec.Type().String()))
@@ -125,41 +120,6 @@ func fieldSpecsMarkdown(out io.Writer, names []string, specs speccer) {
fmt.Fprint(out, "\n")
}
-func transactionFieldsMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`txn` Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/)):\n\n")
- fieldSpecsMarkdown(out, logic.TxnFieldNames, logic.TxnFieldSpecByName)
-}
-
-func globalFieldsMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`global` Fields:\n\n")
- fieldSpecsMarkdown(out, logic.GlobalFieldNames, logic.GlobalFieldSpecByName)
-}
-
-func assetHoldingFieldsMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`asset_holding_get` Fields:\n\n")
- fieldSpecsMarkdown(out, logic.AssetHoldingFieldNames, logic.AssetHoldingFieldSpecByName)
-}
-
-func assetParamsFieldsMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`asset_params_get` Fields:\n\n")
- fieldSpecsMarkdown(out, logic.AssetParamsFieldNames, logic.AssetParamsFieldSpecByName)
-}
-
-func appParamsFieldsMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`app_params_get` Fields:\n\n")
- fieldSpecsMarkdown(out, logic.AppParamsFieldNames, logic.AppParamsFieldSpecByName)
-}
-
-func acctParamsFieldsMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`acct_params_get` Fields:\n\n")
- fieldSpecsMarkdown(out, logic.AcctParamsFieldNames, logic.AcctParamsFieldSpecByName)
-}
-
-func ecDsaCurvesMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`ECDSA` Curves:\n\n")
- fieldSpecsMarkdown(out, logic.EcdsaCurveNames, logic.EcdsaCurveSpecByName)
-}
-
func immediateMarkdown(op *logic.OpSpec) string {
markdown := ""
for _, imm := range op.Details.Immediates {
@@ -198,7 +158,7 @@ func stackMarkdown(op *logic.OpSpec) string {
return out + "\n"
}
-func opToMarkdown(out io.Writer, op *logic.OpSpec) (err error) {
+func opToMarkdown(out io.Writer, op *logic.OpSpec, groupDocWritten map[string]bool) (err error) {
ws := ""
opextra := logic.OpImmediateNote(op.Name)
if opextra != "" {
@@ -214,45 +174,34 @@ func opToMarkdown(out io.Writer, op *logic.OpSpec) (err error) {
fmt.Fprintf(out, "- **Cost**:\n")
for _, cost := range costs {
if cost.From == cost.To {
- fmt.Fprintf(out, " - %d (v%d)\n", cost.Cost, cost.To)
+ fmt.Fprintf(out, " - %s (v%d)\n", cost.Cost, cost.To)
} else {
if cost.To < logic.LogicVersion {
- fmt.Fprintf(out, " - %d (v%d - v%d)\n", cost.Cost, cost.From, cost.To)
+ fmt.Fprintf(out, " - %s (v%d - v%d)\n", cost.Cost, cost.From, cost.To)
} else {
- fmt.Fprintf(out, " - %d (since v%d)\n", cost.Cost, cost.From)
+ fmt.Fprintf(out, " - %s (since v%d)\n", cost.Cost, cost.From)
}
}
}
} else {
cost := costs[0].Cost
- if cost != 1 {
- fmt.Fprintf(out, "- **Cost**: %d\n", cost)
+ if cost != "1" {
+ fmt.Fprintf(out, "- **Cost**: %s\n", cost)
}
}
if op.Version > 1 {
fmt.Fprintf(out, "- Availability: v%d\n", op.Version)
}
if !op.Modes.Any() {
- fmt.Fprintf(out, "- Mode: %s\n", op.Modes.String())
+ fmt.Fprintf(out, "- Mode: %s\n", op.Modes)
}
- switch op.Name {
- case "global":
- globalFieldsMarkdown(out)
- case "txn":
- transactionFieldsMarkdown(out)
- fmt.Fprintf(out, "\nTypeEnum mapping:\n\n")
- typeEnumTableMarkdown(out)
- case "asset_holding_get":
- assetHoldingFieldsMarkdown(out)
- case "asset_params_get":
- assetParamsFieldsMarkdown(out)
- case "app_params_get":
- appParamsFieldsMarkdown(out)
- case "acct_params_get":
- acctParamsFieldsMarkdown(out)
- default:
- if strings.HasPrefix(op.Name, "ecdsa") {
- ecDsaCurvesMarkdown(out)
+
+ for i := range op.Details.Immediates {
+ group := op.Details.Immediates[i].Group
+ if group != nil && group.Doc != "" && !groupDocWritten[group.Name] {
+ fmt.Fprintf(out, "\n`%s` %s:\n\n", group.Name, group.Doc)
+ fieldGroupMarkdown(out, group)
+ groupDocWritten[group.Name] = true
}
}
ode := logic.OpDocExtra(op.Name)
@@ -265,8 +214,9 @@ func opToMarkdown(out io.Writer, op *logic.OpSpec) (err error) {
func opsToMarkdown(out io.Writer) (err error) {
out.Write([]byte("# Opcodes\n\nOps have a 'cost' of 1 unless otherwise specified.\n\n"))
opSpecs := logic.OpcodesByVersion(logic.LogicVersion)
+ written := make(map[string]bool)
for _, spec := range opSpecs {
- err = opToMarkdown(out, &spec)
+ err = opToMarkdown(out, &spec, written)
if err != nil {
return
}
@@ -280,7 +230,6 @@ type OpRecord struct {
Name string
Args string `json:",omitempty"`
Returns string `json:",omitempty"`
- Cost int
Size int
ArgEnum []string `json:",omitempty"`
@@ -321,29 +270,39 @@ func typeString(types []logic.StackType) string {
return string(out)
}
-func fieldsAndTypes(names []string, specs speccer) ([]string, string) {
- types := make([]logic.StackType, len(names))
- for i, name := range names {
- types[i] = specs.SpecByName(name).Type()
+func fieldsAndTypes(group logic.FieldGroup) ([]string, string) {
+ // reminder: group.Names can be "sparse" See: logic.TxnaFields
+ fields := make([]string, 0, len(group.Names))
+ types := make([]logic.StackType, 0, len(group.Names))
+ for _, name := range group.Names {
+ if spec, ok := group.SpecByName(name); ok {
+ fields = append(fields, name)
+ types = append(types, spec.Type())
+ }
}
- return names, typeString(types)
+ return fields, typeString(types)
}
-func argEnums(name string) (names []string, types string) {
+func argEnums(name string) ([]string, string) {
switch name {
- case "txn", "gtxn", "gtxns", "itxn", "gitxn", "itxn_field":
- return fieldsAndTypes(logic.TxnFieldNames, logic.TxnFieldSpecByName)
+ case "txn", "gtxn", "gtxns", "itxn", "gitxn":
+ return fieldsAndTypes(logic.TxnFields)
+ case "itxn_field":
+ // itxn_field does not *return* a type depending on its immediate. It *takes* it.
+ // but until a consumer cares, ArgEnumTypes will be overloaded for that meaning.
+ return fieldsAndTypes(logic.ItxnSettableFields)
case "global":
- return
+ return fieldsAndTypes(logic.GlobalFields)
case "txna", "gtxna", "gtxnsa", "txnas", "gtxnas", "gtxnsas", "itxna", "gitxna":
- // Map is the whole txn field spec map. That's fine, we only lookup the given names.
- return fieldsAndTypes(logic.TxnaFieldNames(), logic.TxnFieldSpecByName)
+ return fieldsAndTypes(logic.TxnArrayFields)
case "asset_holding_get":
- return fieldsAndTypes(logic.AssetHoldingFieldNames, logic.AssetHoldingFieldSpecByName)
+ return fieldsAndTypes(logic.AssetHoldingFields)
case "asset_params_get":
- return fieldsAndTypes(logic.AssetParamsFieldNames, logic.AssetParamsFieldSpecByName)
+ return fieldsAndTypes(logic.AssetParamsFields)
case "app_params_get":
- return fieldsAndTypes(logic.AppParamsFieldNames, logic.AppParamsFieldSpecByName)
+ return fieldsAndTypes(logic.AppParamsFields)
+ case "acct_params_get":
+ return fieldsAndTypes(logic.AcctParamsFields)
default:
return nil, ""
}
@@ -357,7 +316,6 @@ func buildLanguageSpec(opGroups map[string][]string) *LanguageSpec {
records[i].Name = spec.Name
records[i].Args = typeString(spec.Args)
records[i].Returns = typeString(spec.Returns)
- records[i].Cost = spec.Details.Cost
records[i].Size = spec.Details.Size
records[i].ArgEnum, records[i].ArgEnumTypes = argEnums(spec.Name)
records[i].Doc = logic.OpDoc(spec.Name)
@@ -400,32 +358,22 @@ func main() {
integerConstantsTableMarkdown(constants)
constants.Close()
- txnfields := create("txn_fields.md")
- fieldSpecsMarkdown(txnfields, logic.TxnFieldNames, logic.TxnFieldSpecByName)
- txnfields.Close()
-
- globalfields := create("global_fields.md")
- fieldSpecsMarkdown(globalfields, logic.GlobalFieldNames, logic.GlobalFieldSpecByName)
- globalfields.Close()
-
- assetholding := create("asset_holding_fields.md")
- fieldSpecsMarkdown(assetholding, logic.AssetHoldingFieldNames, logic.AssetHoldingFieldSpecByName)
- assetholding.Close()
-
- assetparams := create("asset_params_fields.md")
- fieldSpecsMarkdown(assetparams, logic.AssetParamsFieldNames, logic.AssetParamsFieldSpecByName)
- assetparams.Close()
-
- appparams := create("app_params_fields.md")
- fieldSpecsMarkdown(appparams, logic.AppParamsFieldNames, logic.AppParamsFieldSpecByName)
- appparams.Close()
-
- acctparams, _ := os.Create("acct_params_fields.md")
- fieldSpecsMarkdown(acctparams, logic.AcctParamsFieldNames, logic.AcctParamsFieldSpecByName)
- acctparams.Close()
+ written := make(map[string]bool)
+ opSpecs := logic.OpcodesByVersion(logic.LogicVersion)
+ for _, spec := range opSpecs {
+ for _, imm := range spec.Details.Immediates {
+ if imm.Group != nil && !written[imm.Group.Name] {
+ out := create(strings.ToLower(imm.Group.Name) + "_fields.md")
+ fieldGroupMarkdown(out, imm.Group)
+ out.Close()
+ written[imm.Group.Name] = true
+ }
+ }
+ }
langspecjs := create("langspec.json")
enc := json.NewEncoder(langspecjs)
+ enc.SetIndent("", " ")
enc.Encode(buildLanguageSpec(opGroups))
langspecjs.Close()
diff --git a/cmd/opdoc/tmLanguage.go b/cmd/opdoc/tmLanguage.go
index 204068a62..e66be5789 100644
--- a/cmd/opdoc/tmLanguage.go
+++ b/cmd/opdoc/tmLanguage.go
@@ -18,6 +18,7 @@ package main
import (
"fmt"
+ "sort"
"strings"
"github.com/algorand/go-algorand/data/transactions/logic"
@@ -122,15 +123,31 @@ func buildSyntaxHighlight() *tmLanguage {
},
}
var allNamedFields []string
- allNamedFields = append(allNamedFields, logic.TxnFieldNames...)
- allNamedFields = append(allNamedFields, logic.GlobalFieldNames...)
- allNamedFields = append(allNamedFields, logic.AssetHoldingFieldNames...)
- allNamedFields = append(allNamedFields, logic.AssetParamsFieldNames...)
- allNamedFields = append(allNamedFields, logic.OnCompletionNames...)
+ allNamedFields = append(allNamedFields, logic.TxnTypeNames[:]...)
+ allNamedFields = append(allNamedFields, logic.OnCompletionNames[:]...)
+ accumulated := make(map[string]bool)
+ opSpecs := logic.OpcodesByVersion(logic.LogicVersion)
+ for _, spec := range opSpecs {
+ for _, imm := range spec.Details.Immediates {
+ if imm.Group != nil && !accumulated[imm.Group.Name] {
+ allNamedFields = append(allNamedFields, imm.Group.Names...)
+ accumulated[imm.Group.Name] = true
+ }
+ }
+ }
+
+ var seen = make(map[string]bool, len(allNamedFields))
+ var dedupe = make([]string, 0, len(allNamedFields))
+ for _, name := range allNamedFields {
+ if name != "" && !seen[name] {
+ dedupe = append(dedupe, name)
+ }
+ seen[name] = true
+ }
literals.Patterns = append(literals.Patterns, pattern{
Name: "variable.parameter.teal",
- Match: fmt.Sprintf("\\b(%s)\\b", strings.Join(allNamedFields, "|")),
+ Match: fmt.Sprintf("\\b(%s)\\b", strings.Join(dedupe, "|")),
})
tm.Repository["literals"] = literals
@@ -153,7 +170,15 @@ func buildSyntaxHighlight() *tmLanguage {
},
}
var allArithmetics []string
- for grp, names := range logic.OpGroups {
+
+ var keys []string
+ for key := range logic.OpGroups {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ for _, grp := range keys {
+ names := logic.OpGroups[grp]
+ sort.Strings(names)
switch grp {
case "Flow Control":
keywords.Patterns = append(keywords.Patterns, pattern{
diff --git a/cmd/tealdbg/cdtSession.go b/cmd/tealdbg/cdtSession.go
index e880296a3..3ae5675b3 100644
--- a/cmd/tealdbg/cdtSession.go
+++ b/cmd/tealdbg/cdtSession.go
@@ -168,8 +168,8 @@ func (s *cdtSession) websocketHandler(w http.ResponseWriter, r *http.Request) {
// set pc and line to 0 to workaround Register ack
state.Update(cdtStateUpdate{
dbgState.Stack, dbgState.Scratch,
- 0, 0, "",
- dbgState.OpcodeBudget, s.debugger.GetStates(nil),
+ 0, 0, "", dbgState.OpcodeBudget, dbgState.CallStack,
+ s.debugger.GetStates(nil),
})
hash := sha256.Sum256([]byte(state.disassembly)) // some random hash
@@ -247,7 +247,7 @@ func (s *cdtSession) websocketHandler(w http.ResponseWriter, r *http.Request) {
state.Update(cdtStateUpdate{
dbgState.Stack, dbgState.Scratch,
dbgState.PC, dbgState.Line, dbgState.Error,
- dbgState.OpcodeBudget, appState,
+ dbgState.OpcodeBudget, dbgState.CallStack, appState,
})
dbgStateMu.Unlock()
@@ -473,14 +473,26 @@ func (s *cdtSession) handleCdtRequest(req *cdt.ChromeRequest, state *cdtState) (
response = cdt.ChromeResponse{ID: req.ID, Result: empty}
case "Debugger.stepOut":
state.lastAction.Store("step")
- state.pauseOnCompeted.SetTo(true)
- s.debugger.Resume()
+ if len(state.callStack) == 0 {
+ // If we are not in a subroutine, pause at the end so user can
+ // inspect the final state of the program.
+ state.pauseOnCompleted.SetTo(true)
+ }
+ s.debugger.StepOut()
+ if state.completed.IsSet() {
+ evDestroyed := s.makeContextDestroyedEvent()
+ events = append(events, &evDestroyed)
+ }
+ response = cdt.ChromeResponse{ID: req.ID, Result: empty}
+ case "Debugger.stepOver":
+ state.lastAction.Store("step")
+ s.debugger.StepOver()
if state.completed.IsSet() {
evDestroyed := s.makeContextDestroyedEvent()
events = append(events, &evDestroyed)
}
response = cdt.ChromeResponse{ID: req.ID, Result: empty}
- case "Debugger.stepOver", "Debugger.stepInto":
+ case "Debugger.stepInto":
state.lastAction.Store("step")
s.debugger.Step()
if state.completed.IsSet() {
@@ -497,7 +509,7 @@ func (s *cdtSession) handleCdtRequest(req *cdt.ChromeRequest, state *cdtState) (
func (s *cdtSession) computeEvent(state *cdtState) (event interface{}) {
if state.completed.IsSet() {
- if state.pauseOnCompeted.IsSet() {
+ if state.pauseOnCompleted.IsSet() {
event = s.makeDebuggerPausedEvent(state)
return
}
@@ -571,22 +583,43 @@ func (s *cdtSession) makeDebuggerPausedEvent(state *cdtState) cdt.DebuggerPaused
},
}
sc := []cdt.DebuggerScope{scopeLocal, scopeGlobal}
- cf := cdt.DebuggerCallFrame{
- CallFrameID: "mainframe",
- FunctionName: "",
- Location: &cdt.DebuggerLocation{
- ScriptID: s.scriptID,
- LineNumber: state.line.Load(),
- ColumnNumber: 0,
+
+ cfs := []cdt.DebuggerCallFrame{
+ {
+ CallFrameID: "mainframe",
+ FunctionName: "main",
+ Location: &cdt.DebuggerLocation{
+ ScriptID: s.scriptID,
+ LineNumber: state.line.Load(),
+ ColumnNumber: 0,
+ },
+ URL: s.scriptURL,
+ ScopeChain: sc,
},
- URL: s.scriptURL,
- ScopeChain: sc,
+ }
+ for i := range state.callStack {
+ cf := cdt.DebuggerCallFrame{
+ CallFrameID: "mainframe",
+ FunctionName: state.callStack[i].LabelName,
+ Location: &cdt.DebuggerLocation{
+ ScriptID: s.scriptID,
+ LineNumber: state.line.Load(),
+ ColumnNumber: 0,
+ },
+ URL: s.scriptURL,
+ ScopeChain: sc,
+ }
+ // Set the previous call frame line number
+ cfs[0].Location.LineNumber = state.callStack[i].FrameLine
+ // We have to prepend the newest frame for it to appear first
+ // in the debugger...
+ cfs = append([]cdt.DebuggerCallFrame{cf}, cfs...)
}
evPaused := cdt.DebuggerPausedEvent{
Method: "Debugger.paused",
Params: cdt.DebuggerPausedParams{
- CallFrames: []cdt.DebuggerCallFrame{cf},
+ CallFrames: cfs,
Reason: "other",
HitBreakpoints: make([]string, 0),
},
diff --git a/cmd/tealdbg/cdtSession_test.go b/cmd/tealdbg/cdtSession_test.go
index e670572a8..1668c1a03 100644
--- a/cmd/tealdbg/cdtSession_test.go
+++ b/cmd/tealdbg/cdtSession_test.go
@@ -436,7 +436,7 @@ func TestCdtSessionStateToEvent(t *testing.T) {
// if completed and pause on competed then pause
state.completed.SetTo(true)
- state.pauseOnCompeted.SetTo(true)
+ state.pauseOnCompleted.SetTo(true)
e = s.computeEvent(&state)
_, ok = (e).(cdt.DebuggerPausedEvent)
require.True(t, ok)
diff --git a/cmd/tealdbg/cdtState.go b/cmd/tealdbg/cdtState.go
index 457def831..21273982f 100644
--- a/cmd/tealdbg/cdtState.go
+++ b/cmd/tealdbg/cdtState.go
@@ -42,19 +42,20 @@ type cdtState struct {
globals []basics.TealValue
// mutable program state
- mu deadlock.Mutex
- stack []basics.TealValue
- scratch []basics.TealValue
- pc atomicInt
- line atomicInt
- err atomicString
+ mu deadlock.Mutex
+ stack []basics.TealValue
+ scratch []basics.TealValue
+ pc atomicInt
+ line atomicInt
+ err atomicString
+ callStack []logic.CallFrame
AppState
// debugger states
- lastAction atomicString
- pauseOnError atomicBool
- pauseOnCompeted atomicBool
- completed atomicBool
+ lastAction atomicString
+ pauseOnError atomicBool
+ pauseOnCompleted atomicBool
+ completed atomicBool
}
type cdtStateUpdate struct {
@@ -64,6 +65,7 @@ type cdtStateUpdate struct {
line int
err string
opcodeBudget int
+ callStack []logic.CallFrame
AppState
}
@@ -110,37 +112,7 @@ func (s *cdtState) Update(state cdtStateUpdate) {
s.AppState = state.AppState
// We need to dynamically override opcodeBudget with the proper value each step.
s.globals[logic.OpcodeBudget].Uint = uint64(state.opcodeBudget)
-}
-
-const localScopeObjID = "localScopeObjId"
-const globalScopeObjID = "globalScopeObjID"
-const globalsObjID = "globalsObjID"
-const txnObjID = "txnObjID"
-const gtxnObjID = "gtxnObjID"
-const stackObjID = "stackObjID"
-const scratchObjID = "scratchObjID"
-const tealErrorID = "tealErrorID"
-const appGlobalObjID = "appGlobalObjID"
-const appLocalsObjID = "appLocalsObjID"
-const txnArrayFieldObjID = "txnArrayField"
-const logsObjID = "logsObjID"
-const innerTxnsObjID = "innerTxnsObjID"
-
-type objectDescFn func(s *cdtState, preview bool) []cdt.RuntimePropertyDescriptor
-
-var objectDescMap = map[string]objectDescFn{
- globalScopeObjID: makeGlobalScope,
- localScopeObjID: makeLocalScope,
- globalsObjID: makeGlobals,
- txnObjID: makeTxn,
- gtxnObjID: makeTxnGroup,
- stackObjID: makeStack,
- scratchObjID: makeScratch,
- tealErrorID: makeTealError,
- appGlobalObjID: makeAppGlobalState,
- appLocalsObjID: makeAppLocalsState,
- logsObjID: makeLogsState,
- innerTxnsObjID: makeInnerTxnsState,
+ s.callStack = state.callStack
}
func (s *cdtState) getObjectDescriptor(objID string, preview bool) (desc []cdt.RuntimePropertyDescriptor, err error) {
@@ -591,8 +563,6 @@ func makeGlobalsPreview(globals []basics.TealValue) cdt.RuntimeObjectPreview {
return p
}
-var gtxnObjIDPrefix = fmt.Sprintf("%s_gid_", gtxnObjID)
-
func encodeGroupTxnID(groupIndex int) string {
return gtxnObjIDPrefix + strconv.Itoa(groupIndex)
}
@@ -606,10 +576,6 @@ func decodeGroupTxnID(objID string) (int, bool) {
return 0, false
}
-var logObjIDPrefix = fmt.Sprintf("%s_id", logsObjID)
-var innerTxnObjIDPrefix = fmt.Sprintf("%s_id", innerTxnsObjID)
-var innerNestedTxnObjIDPrefix = fmt.Sprintf("%s_nested", innerTxnsObjID)
-
func encodeNestedObjID(groupIndexes []int, prefix string) string {
encodedElements := []string{prefix}
for _, i := range groupIndexes {
@@ -695,8 +661,6 @@ func decodeArraySlice(objID string) (string, int, int, bool) {
return "", 0, 0, false
}
-var appGlobalObjIDPrefix = fmt.Sprintf("%s_", appGlobalObjID)
-
func encodeAppGlobalAppID(key string) string {
return appGlobalObjIDPrefix + key
}
@@ -710,8 +674,6 @@ func decodeAppGlobalAppID(objID string) (uint64, bool) {
return 0, false
}
-var appLocalsObjIDPrefix = fmt.Sprintf("%s_", appLocalsObjID)
-
func encodeAppLocalsAddr(addr string) string {
return appLocalsObjIDPrefix + addr
}
@@ -723,8 +685,6 @@ func decodeAppLocalsAddr(objID string) (string, bool) {
return "", false
}
-var appLocalAppIDPrefix = fmt.Sprintf("%s__", appLocalsObjID)
-
func encodeAppLocalsAppID(addr string, appID string) string {
return fmt.Sprintf("%s%s_%s", appLocalAppIDPrefix, addr, appID)
}
@@ -740,8 +700,6 @@ func decodeAppLocalsAppID(objID string) (string, uint64, bool) {
return "", 0, false
}
-var txnArrayFieldPrefix = fmt.Sprintf("%s__", txnArrayFieldObjID)
-
func encodeTxnArrayField(groupIndex int, field int) string {
return fmt.Sprintf("%s%d_%d", txnArrayFieldPrefix, groupIndex, field)
}
diff --git a/cmd/tealdbg/cdtStateObjects.go b/cmd/tealdbg/cdtStateObjects.go
new file mode 100644
index 000000000..b6daf39f1
--- /dev/null
+++ b/cmd/tealdbg/cdtStateObjects.go
@@ -0,0 +1,67 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "github.com/algorand/go-algorand/cmd/tealdbg/cdt"
+)
+
+// Object IDs
+const (
+ localScopeObjID = "localScopeObjId"
+ globalScopeObjID = "globalScopeObjID"
+ globalsObjID = "globalsObjID"
+ txnObjID = "txnObjID"
+ gtxnObjID = "gtxnObjID"
+ stackObjID = "stackObjID"
+ scratchObjID = "scratchObjID"
+ tealErrorID = "tealErrorID"
+ appGlobalObjID = "appGlobalObjID"
+ appLocalsObjID = "appLocalsObjID"
+ txnArrayFieldObjID = "txnArrayField"
+ logsObjID = "logsObjID"
+ innerTxnsObjID = "innerTxnsObjID"
+)
+
+// Object Prefix IDs
+const (
+ gtxnObjIDPrefix = gtxnObjID + "_gid_"
+ logObjIDPrefix = logsObjID + "_id"
+ innerTxnObjIDPrefix = innerTxnsObjID + "_id"
+ innerNestedTxnObjIDPrefix = innerTxnsObjID + "_nested"
+ appGlobalObjIDPrefix = appGlobalObjID + "_"
+ appLocalsObjIDPrefix = appLocalsObjID + "_"
+ appLocalAppIDPrefix = appLocalsObjID + "__"
+ txnArrayFieldPrefix = txnArrayFieldObjID + "__"
+)
+
+type objectDescFn func(s *cdtState, preview bool) []cdt.RuntimePropertyDescriptor
+
+var objectDescMap = map[string]objectDescFn{
+ globalScopeObjID: makeGlobalScope,
+ localScopeObjID: makeLocalScope,
+ globalsObjID: makeGlobals,
+ txnObjID: makeTxn,
+ gtxnObjID: makeTxnGroup,
+ stackObjID: makeStack,
+ scratchObjID: makeScratch,
+ tealErrorID: makeTealError,
+ appGlobalObjID: makeAppGlobalState,
+ appLocalsObjID: makeAppLocalsState,
+ logsObjID: makeLogsState,
+ innerTxnsObjID: makeInnerTxnsState,
+}
diff --git a/cmd/tealdbg/cdtdbg_test.go b/cmd/tealdbg/cdtdbg_test.go
index 3e6acf8b1..41164a3af 100644
--- a/cmd/tealdbg/cdtdbg_test.go
+++ b/cmd/tealdbg/cdtdbg_test.go
@@ -106,6 +106,12 @@ type MockDebugControl struct {
func (c *MockDebugControl) Step() {
}
+func (c *MockDebugControl) StepOver() {
+}
+
+func (c *MockDebugControl) StepOut() {
+}
+
func (c *MockDebugControl) Resume() {
}
diff --git a/cmd/tealdbg/debugger.go b/cmd/tealdbg/debugger.go
index 4d7421f93..34957f2c7 100644
--- a/cmd/tealdbg/debugger.go
+++ b/cmd/tealdbg/debugger.go
@@ -53,6 +53,8 @@ type DebugAdapter interface {
// Control interface for execution control
type Control interface {
Step()
+ StepOver()
+ StepOut()
Resume()
SetBreakpoint(line int) error
RemoveBreakpoint(line int) error
@@ -89,19 +91,54 @@ type programMeta struct {
states AppState
}
-// breakpointLine is a source line number with a couple special values:
-// -1 do not break
-// 0 break at next instruction
-// N break at line N
-type breakpointLine int
+// debugConfig contains information about control execution and breakpoints.
+type debugConfig struct {
+ NoBreak bool `json:"nobreak"`
+ StepBreak bool `json:"stepbreak"`
+ StepOutOver bool `json:"stepover"`
-const (
- noBreak breakpointLine = -1
- stepBreak breakpointLine = 0
-)
+ ActiveBreak map[int]struct{} `json:"activebreak"`
+ CallDepth int `json:"calldepth"`
+}
-type debugConfig struct {
- BreakAtLine breakpointLine `json:"breakatline"`
+func makeDebugConfig() debugConfig {
+ dc := debugConfig{}
+ dc.ActiveBreak = make(map[int]struct{})
+ return dc
+}
+
+func (dc *debugConfig) setNoBreak() {
+ dc.NoBreak = true
+}
+
+func (dc *debugConfig) setStepBreak() {
+ dc.StepBreak = true
+}
+
+func (dc *debugConfig) setStepOutOver(callDepth int) {
+ dc.StepOutOver = true
+ dc.CallDepth = callDepth
+}
+
+// setActiveBreak does not check if the line is a valid value, so it should
+// be called inside the setBreakpoint() in session.
+func (dc *debugConfig) setActiveBreak(line int) {
+ dc.ActiveBreak[line] = struct{}{}
+}
+
+// isBreak checks if Update() should break at this line and callDepth.
+func (dc *debugConfig) isBreak(line int, callDepth int) bool {
+ if dc.StepBreak {
+ return true
+ }
+
+ _, ok := dc.ActiveBreak[line]
+ if !dc.StepOutOver || dc.CallDepth == callDepth {
+ // If we are in stepOver or stepOut, then make sure we check
+ // callstack depth before breaking at this line.
+ return ok
+ }
+ return false
}
type session struct {
@@ -111,7 +148,8 @@ type session struct {
acknowledged chan bool
// debugConfigs holds information about this debugging session,
- // currently just when we want to break
+ // such as the breakpoints, initial call stack depth, and whether we want
+ // to step over/out/in.
debugConfig debugConfig
// notifications from eval
@@ -130,6 +168,8 @@ type session struct {
breakpoints []breakpoint
line atomicInt
+ callStack []logic.CallFrame
+
states AppState
}
@@ -146,9 +186,8 @@ func makeSession(disassembly string, line int) (s *session) {
s = new(session)
// Allocate a default debugConfig (don't break)
- s.debugConfig = debugConfig{
- BreakAtLine: noBreak,
- }
+ s.debugConfig = makeDebugConfig()
+ s.debugConfig.setNoBreak()
// Allocate an acknowledgement and notifications channels
s.acknowledged = make(chan bool)
@@ -158,6 +197,7 @@ func makeSession(disassembly string, line int) (s *session) {
s.lines = strings.Split(disassembly, "\n")
s.breakpoints = make([]breakpoint, len(s.lines))
s.line.Store(line)
+ s.callStack = []logic.CallFrame{}
return
}
@@ -181,25 +221,69 @@ func (s *session) Step() {
func() {
s.mu.Lock()
defer s.mu.Unlock()
- s.debugConfig = debugConfig{BreakAtLine: stepBreak}
+ s.debugConfig = makeDebugConfig()
+ s.debugConfig.setStepBreak()
}()
s.resume()
}
-func (s *session) Resume() {
- currentLine := s.line.Load()
+func (s *session) StepOver() {
+ func() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ // Get the first TEAL opcode in the line
+ currentOp := strings.Fields(s.lines[s.line.Load()])[0]
+ s.debugConfig = makeDebugConfig()
+
+ // Step over a function call (callsub op).
+ if currentOp == "callsub" && s.line.Load() < len(s.breakpoints) {
+ // Set a flag to check if we are in StepOver mode and to
+ // save our initial call depth so we can pass over breakpoints that
+ // are not on the correct call depth.
+ s.debugConfig.setStepOutOver(len(s.callStack))
+ err := s.setBreakpoint(s.line.Load() + 1)
+ if err != nil {
+ s.debugConfig.setStepBreak()
+ }
+ } else {
+ s.debugConfig.setStepBreak()
+ }
+ }()
+ s.resume()
+}
+
+func (s *session) StepOut() {
+ func() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.debugConfig = makeDebugConfig()
+ if len(s.callStack) == 0 {
+ s.debugConfig.setNoBreak()
+ } else {
+ callFrame := s.callStack[len(s.callStack)-1]
+ s.debugConfig.setStepOutOver(len(s.callStack) - 1)
+ err := s.setBreakpoint(callFrame.FrameLine + 1)
+ if err != nil {
+ s.debugConfig.setStepBreak()
+ }
+ }
+ }()
+ s.resume()
+}
+
+func (s *session) Resume() {
func() {
s.mu.Lock()
defer s.mu.Unlock()
- s.debugConfig = debugConfig{BreakAtLine: noBreak} // reset possible break after Step
- // find any active breakpoints and set next break
- if currentLine < len(s.breakpoints) {
- for line, state := range s.breakpoints[currentLine+1:] {
- if state.set && state.active {
- s.setBreakpoint(line + currentLine + 1)
- break
+ s.debugConfig = makeDebugConfig()
+ // find any active breakpoints and set break
+ for line, state := range s.breakpoints {
+ if state.set && state.active {
+ err := s.setBreakpoint(line)
+ if err != nil {
+ s.debugConfig.setStepBreak()
}
}
}
@@ -209,21 +293,30 @@ func (s *session) Resume() {
}
// setBreakpoint must be called with lock taken
+// Used for setting a breakpoint in step execution and adding bp to the session.
func (s *session) setBreakpoint(line int) error {
if line >= len(s.breakpoints) {
return fmt.Errorf("invalid bp line %d", line)
}
s.breakpoints[line] = breakpoint{set: true, active: true}
- s.debugConfig = debugConfig{BreakAtLine: breakpointLine(line)}
+ s.debugConfig.setActiveBreak(line)
return nil
}
func (s *session) SetBreakpoint(line int) error {
s.mu.Lock()
defer s.mu.Unlock()
+ // Reset all existing flags and breakpoints and set a new bp.
+ s.debugConfig = makeDebugConfig()
return s.setBreakpoint(line)
}
+func (s *session) setCallStack(callStack []logic.CallFrame) {
+ s.mu.Lock()
+ s.callStack = callStack
+ s.mu.Unlock()
+}
+
func (s *session) RemoveBreakpoint(line int) error {
s.mu.Lock()
defer s.mu.Unlock()
@@ -232,7 +325,8 @@ func (s *session) RemoveBreakpoint(line int) error {
return fmt.Errorf("invalid bp line %d", line)
}
if s.breakpoints[line].NonEmpty() {
- s.debugConfig = debugConfig{BreakAtLine: noBreak}
+ s.debugConfig = makeDebugConfig()
+ s.debugConfig.setNoBreak()
s.breakpoints[line] = breakpoint{}
}
return nil
@@ -248,7 +342,8 @@ func (s *session) SetBreakpointsActive(active bool) {
}
}
if !active {
- s.debugConfig = debugConfig{BreakAtLine: noBreak}
+ s.debugConfig = makeDebugConfig()
+ s.debugConfig.setNoBreak()
}
}
@@ -478,8 +573,10 @@ func (d *Debugger) Update(state *logic.DebugState) error {
// copy state to prevent a data race in this the go-routine and upcoming updates to the state
go func(localState logic.DebugState) {
// Check if we are triggered and acknowledge asynchronously
- if cfg.BreakAtLine != noBreak {
- if cfg.BreakAtLine == stepBreak || breakpointLine(localState.Line) == cfg.BreakAtLine {
+ if !cfg.NoBreak {
+ if cfg.isBreak(localState.Line, len(localState.CallStack)) {
+ // Copy callstack information
+ s.setCallStack(state.CallStack)
// Breakpoint hit! Inform the user
s.notifications <- Notification{"updated", localState}
} else {
diff --git a/cmd/tealdbg/debugger_test.go b/cmd/tealdbg/debugger_test.go
index b37f7fb35..4a390d461 100644
--- a/cmd/tealdbg/debugger_test.go
+++ b/cmd/tealdbg/debugger_test.go
@@ -53,7 +53,7 @@ func (d *testDbgAdapter) WaitForCompletion() {
<-d.done
}
-func (d *testDbgAdapter) SessionStarted(sid string, debugger Control, ch chan Notification) {
+func (d *testDbgAdapter) SessionStarted(_ string, debugger Control, ch chan Notification) {
d.debugger = debugger
d.notifications = ch
@@ -62,7 +62,7 @@ func (d *testDbgAdapter) SessionStarted(sid string, debugger Control, ch chan No
d.started = true
}
-func (d *testDbgAdapter) SessionEnded(sid string) {
+func (d *testDbgAdapter) SessionEnded(_ string) {
d.ended = true
}
@@ -84,7 +84,8 @@ func (d *testDbgAdapter) eventLoop() {
require.NotNil(d.t, n.DebugState.Scratch)
require.NotEmpty(d.t, n.DebugState.Disassembly)
require.NotEmpty(d.t, n.DebugState.ExecID)
- d.debugger.SetBreakpoint(n.DebugState.Line + 1)
+ err := d.debugger.SetBreakpoint(n.DebugState.Line + 1)
+ require.NoError(d.t, err)
}
d.debugger.Resume()
}
@@ -121,9 +122,8 @@ int 1
require.Equal(t, 3, da.eventCount) // register, update, complete
}
-func TestSession(t *testing.T) {
- partitiontest.PartitionTest(t)
- source := fmt.Sprintf("#pragma version %d\nint 1\ndup\n+\n", logic.LogicVersion)
+func createSessionFromSource(t *testing.T, program string) *session {
+ source := fmt.Sprintf(program, logic.LogicVersion)
ops, err := logic.AssembleStringWithVersion(source, logic.LogicVersion)
require.NoError(t, err)
disassembly, err := logic.Disassemble(ops.Program)
@@ -141,7 +141,14 @@ func TestSession(t *testing.T) {
s.programName = "test"
s.offsetToLine = ops.OffsetToLine
s.pcOffset = pcOffset
- err = s.SetBreakpoint(2)
+
+ return s
+}
+
+func TestSession(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ s := createSessionFromSource(t, "#pragma version %d\nint 1\ndup\n+\n")
+ err := s.SetBreakpoint(2)
require.NoError(t, err)
ackCount := 0
@@ -155,9 +162,9 @@ func TestSession(t *testing.T) {
s.Resume()
<-done
-
- require.Equal(t, breakpointLine(2), s.debugConfig.BreakAtLine)
+ require.Equal(t, map[int]struct{}{2: {}}, s.debugConfig.ActiveBreak)
require.Equal(t, breakpoint{true, true}, s.breakpoints[2])
+ require.Equal(t, true, s.debugConfig.isBreak(2, len(s.callStack)))
require.Equal(t, 1, ackCount)
s.SetBreakpointsActive(false)
@@ -166,7 +173,8 @@ func TestSession(t *testing.T) {
s.SetBreakpointsActive(true)
require.Equal(t, breakpoint{true, true}, s.breakpoints[2])
- s.RemoveBreakpoint(2)
+ err = s.RemoveBreakpoint(2)
+ require.NoError(t, err)
require.Equal(t, breakpoint{false, false}, s.breakpoints[2])
go ackFunc()
@@ -174,7 +182,7 @@ func TestSession(t *testing.T) {
s.Step()
<-done
- require.Equal(t, stepBreak, s.debugConfig.BreakAtLine)
+ require.Equal(t, true, s.debugConfig.StepBreak)
require.Equal(t, 2, ackCount)
data, err := s.GetSourceMap()
@@ -185,3 +193,171 @@ func TestSession(t *testing.T) {
require.NotEmpty(t, name)
require.Greater(t, len(data), 0)
}
+
+// Tests control functions for stepping over subroutines and checks
+// that call stack is inspected correctly.
+func TestCallStackControl(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ newTestCase := func() (*session, chan struct{}, func(), *int) {
+ s := createSessionFromSource(t, "#pragma version %d\nlab1:\nint 1\ncallsub lab1\ndup\n+\n")
+
+ ackCount := 0
+ done := make(chan struct{})
+ ackFunc := func() {
+ ackCount++
+ <-s.acknowledged
+ done <- struct{}{}
+
+ }
+
+ return s, done, ackFunc, &ackCount
+ }
+
+ cases := map[string]func(*testing.T){
+ "Check that step over on callsub line returns correct callstack depth": func(t *testing.T) {
+ s, done, ackFunc, ackCount := newTestCase()
+ s.setCallStack([]logic.CallFrame{{FrameLine: 2, LabelName: "lab1"}})
+ initialStackDepth := len(s.callStack)
+ s.line.Store(3)
+
+ go ackFunc()
+ s.StepOver()
+ <-done
+
+ require.Equal(t, map[int]struct{}{4: {}}, s.debugConfig.ActiveBreak)
+ require.Equal(t, breakpoint{true, true}, s.breakpoints[4])
+
+ require.Equal(t, false, s.debugConfig.NoBreak)
+ require.Equal(t, false, s.debugConfig.StepBreak)
+ require.Equal(t, true, s.debugConfig.StepOutOver)
+
+ require.Equal(t, 1, *ackCount)
+ require.Equal(t, initialStackDepth, len(s.callStack))
+ },
+ "Breakpoint should not trigger at the wrong call stack height": func(t *testing.T) {
+ s, done, ackFunc, ackCount := newTestCase()
+
+ s.setCallStack([]logic.CallFrame{{FrameLine: 2, LabelName: "lab1"}})
+ s.line.Store(3)
+
+ go ackFunc()
+ s.StepOver()
+ <-done
+
+ s.setCallStack([]logic.CallFrame{
+ {FrameLine: 2, LabelName: "lab1"},
+ {FrameLine: 2, LabelName: "lab1"},
+ })
+ require.Equal(t, false, s.debugConfig.isBreak(4, len(s.callStack)))
+
+ s.setCallStack([]logic.CallFrame{
+ {FrameLine: 2, LabelName: "lab1"},
+ })
+ require.Equal(t, true, s.debugConfig.isBreak(4, len(s.callStack)))
+ require.Equal(t, 1, *ackCount)
+ },
+ "Check step over on a non callsub line breaks at next line": func(t *testing.T) {
+ s, done, ackFunc, ackCount := newTestCase()
+
+ s.line.Store(4)
+
+ go ackFunc()
+ s.StepOver()
+ <-done
+
+ require.Equal(t, false, s.debugConfig.NoBreak)
+ require.Equal(t, true, s.debugConfig.StepBreak)
+ require.Equal(t, false, s.debugConfig.StepOutOver)
+
+ require.Equal(t, 1, *ackCount)
+ require.Equal(t, 0, len(s.callStack))
+ },
+ "Check that step out when call stack depth is 1 sets breakpoint to the line after frame": func(t *testing.T) {
+ s, done, ackFunc, ackCount := newTestCase()
+
+ s.setCallStack([]logic.CallFrame{{FrameLine: 2, LabelName: "lab1"}})
+ s.line.Store(4)
+
+ go ackFunc()
+ s.StepOut()
+ <-done
+
+ require.Equal(t, map[int]struct{}{3: {}}, s.debugConfig.ActiveBreak)
+ require.Equal(t, breakpoint{true, true}, s.breakpoints[3])
+ require.Equal(t, true, s.debugConfig.isBreak(3, len(s.callStack)-1))
+
+ require.Equal(t, false, s.debugConfig.NoBreak)
+ require.Equal(t, false, s.debugConfig.StepBreak)
+ require.Equal(t, true, s.debugConfig.StepOutOver)
+
+ require.Equal(t, 1, *ackCount)
+ require.Equal(t, 1, len(s.callStack))
+ },
+ "Check that step out when call stack depth is 0 sets NoBreak to true": func(t *testing.T) {
+ s, done, ackFunc, ackCount := newTestCase()
+
+ s.setCallStack(nil)
+ s.line.Store(3)
+
+ go ackFunc()
+ s.StepOut()
+ <-done
+
+ require.Equal(t, true, s.debugConfig.NoBreak)
+ require.Equal(t, false, s.debugConfig.StepBreak)
+ require.Equal(t, false, s.debugConfig.StepOutOver)
+
+ require.Equal(t, 1, *ackCount)
+ require.Equal(t, 0, len(s.callStack))
+ },
+ "Check that resume keeps track of every breakpoint": func(t *testing.T) {
+ s, done, ackFunc, ackCount := newTestCase()
+
+ s.line.Store(3)
+ err := s.RemoveBreakpoint(3)
+ require.NoError(t, err)
+ require.Equal(t, breakpoint{false, false}, s.breakpoints[2])
+ err = s.SetBreakpoint(2)
+ require.NoError(t, err)
+ err = s.SetBreakpoint(4)
+ require.NoError(t, err)
+
+ go ackFunc()
+ s.Resume()
+ <-done
+
+ require.Equal(t, map[int]struct{}{2: {}, 4: {}}, s.debugConfig.ActiveBreak)
+ require.Equal(t, breakpoint{true, true}, s.breakpoints[2])
+ require.Equal(t, breakpoint{true, true}, s.breakpoints[4])
+ require.Equal(t, true, s.debugConfig.isBreak(2, len(s.callStack)))
+ require.Equal(t, false, s.debugConfig.isBreak(3, len(s.callStack)))
+ require.Equal(t, true, s.debugConfig.isBreak(4, len(s.callStack)))
+
+ require.Equal(t, false, s.debugConfig.NoBreak)
+ require.Equal(t, false, s.debugConfig.StepBreak)
+ require.Equal(t, false, s.debugConfig.StepOutOver)
+
+ require.Equal(t, 1, *ackCount)
+ require.Equal(t, 0, len(s.callStack))
+ },
+ }
+
+ for name, f := range cases {
+ t.Run(name, f)
+ }
+}
+
+func TestSourceMaps(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ s := createSessionFromSource(t, "#pragma version %d\nint 1\n")
+
+ // Source and source map checks
+ data, err := s.GetSourceMap()
+ require.NoError(t, err)
+ require.Greater(t, len(data), 0)
+
+ name, data := s.GetSource()
+ require.NotEmpty(t, name)
+ require.Greater(t, len(data), 0)
+}
diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go
index 6bf088e2f..8e92f66fd 100644
--- a/cmd/tealdbg/local.go
+++ b/cmd/tealdbg/local.go
@@ -530,17 +530,29 @@ func (r *LocalRunner) RunAll() error {
return fmt.Errorf("no program to debug")
}
+ configureDebugger := func(ep *logic.EvalParams) {
+ // Workaround for Go's nil/empty interfaces nil check after nil assignment, i.e.
+ // r.debugger = nil
+ // ep.Debugger = r.debugger
+ // if ep.Debugger != nil // FALSE
+ if r.debugger != nil {
+ ep.Debugger = r.debugger
+ }
+ }
+
txngroup := transactions.WrapSignedTxnsWithAD(r.txnGroup)
failed := 0
start := time.Now()
ep := logic.NewEvalParams(txngroup, &r.proto, &transactions.SpecialAddresses{})
- ep.Debugger = r.debugger
+ configureDebugger(ep)
var last error
for i := range r.runs {
run := &r.runs[i]
- r.debugger.SaveProgram(run.name, run.program, run.source, run.offsetToLine, run.states)
+ if r.debugger != nil {
+ r.debugger.SaveProgram(run.name, run.program, run.source, run.offsetToLine, run.states)
+ }
run.result.pass, run.result.err = run.eval(int(run.groupIndex), ep)
if run.result.err != nil {
@@ -554,26 +566,3 @@ func (r *LocalRunner) RunAll() error {
}
return nil
}
-
-// Run starts the first program in list
-func (r *LocalRunner) Run() (bool, error) {
- if len(r.runs) < 1 {
- return false, fmt.Errorf("no program to debug")
- }
-
- txngroup := transactions.WrapSignedTxnsWithAD(r.txnGroup)
-
- ep := logic.NewEvalParams(txngroup, &r.proto, &transactions.SpecialAddresses{})
-
- run := r.runs[0]
- // Workaround for Go's nil/empty interfaces nil check after nil assignment, i.e.
- // r.debugger = nil
- // ep.Debugger = r.debugger
- // if ep.Debugger != nil // FALSE
- if r.debugger != nil {
- r.debugger.SaveProgram(run.name, run.program, run.source, run.offsetToLine, run.states)
- ep.Debugger = r.debugger
- }
-
- return run.eval(int(run.groupIndex), ep)
-}
diff --git a/cmd/tealdbg/local_test.go b/cmd/tealdbg/local_test.go
index 99cfa1562..f39c9da8a 100644
--- a/cmd/tealdbg/local_test.go
+++ b/cmd/tealdbg/local_test.go
@@ -17,6 +17,7 @@
package main
import (
+ "encoding/base64"
"encoding/json"
"fmt"
"net/http"
@@ -34,6 +35,7 @@ import (
"github.com/algorand/go-algorand/ledger/apply"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -55,6 +57,55 @@ var txnSample string = `{
}
`
+type runAllResult struct {
+ invocationError error
+ results []evalResult
+}
+
+func runAllResultFromInvocation(lr LocalRunner) runAllResult {
+ err := lr.RunAll()
+ results := make([]evalResult, len(lr.runs))
+ for i := range results {
+ results[i] = lr.runs[i].result
+ }
+
+ return runAllResult{
+ invocationError: err,
+ results: results,
+ }
+}
+
+func (r runAllResult) allErrors() []error {
+ es := make([]error, len(r.results)+1)
+ es[0] = r.invocationError
+ for i := range r.results {
+ es[i+1] = r.results[i].err
+ }
+ return es
+}
+
+func allPassing(runCount int) runAllResult {
+ results := make([]evalResult, runCount)
+ for i := range results {
+ results[i].pass = true
+ }
+ return runAllResult{
+ invocationError: nil,
+ results: results,
+ }
+}
+
+func allErrors(es []error) assert.Comparison {
+ return func() bool {
+ for _, e := range es {
+ if e == nil {
+ return false
+ }
+ }
+ return true
+ }
+}
+
func TestTxnJSONInput(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
@@ -473,9 +524,7 @@ int 100
err = local.Setup(&ds)
a.NoError(err)
- pass, err := local.Run()
- a.NoError(err)
- a.True(pass)
+ a.Equal(allPassing(len(local.runs)), runAllResultFromInvocation(*local))
// check relaxed - opted in for both
source = `#pragma version 2
@@ -496,9 +545,8 @@ int 1
err = local.Setup(&ds)
a.NoError(err)
- pass, err = local.Run()
- a.NoError(err)
- a.True(pass)
+ a.Equal(allPassing(len(local.runs)), runAllResultFromInvocation(*local))
+
ds.Painless = false
// check ForeignApp
@@ -516,9 +564,8 @@ byte 0x676c6f62616c // global
err = local.Setup(&ds)
a.NoError(err)
- pass, err = local.Run()
- a.Error(err)
- a.False(pass)
+ r := runAllResultFromInvocation(*local)
+ a.Condition(allErrors(r.allErrors()))
}
func TestDebugFromPrograms(t *testing.T) {
@@ -1136,9 +1183,8 @@ int 1`
err = local.Setup(&ds)
a.NoError(err)
- pass, err := local.Run()
- a.NoError(err)
- a.True(pass)
+ r := runAllResultFromInvocation(*local)
+ a.Equal(allPassing(len(local.runs)), r)
}
func TestDebugFeePooling(t *testing.T) {
@@ -1195,12 +1241,20 @@ int 1`
// two testcase: success with enough fees and fail otherwise
var tests = []struct {
- pass bool
- fee uint64
+ fee uint64
+ expected func(LocalRunner, runAllResult)
}{
- {true, 2000},
- {false, 1500},
+ {2000, func(l LocalRunner, r runAllResult) {
+ a.Equal(allPassing(len(l.runs)), r)
+ }},
+ {1500, func(_ LocalRunner, r runAllResult) {
+ a.Condition(allErrors(r.allErrors()))
+ for _, result := range r.results {
+ a.False(result.pass)
+ }
+ }},
}
+
for _, test := range tests {
t.Run(fmt.Sprintf("fee=%d", test.fee), func(t *testing.T) {
@@ -1223,14 +1277,8 @@ int 1`
err = local.Setup(&ds)
a.NoError(err)
- pass, err := local.Run()
- if test.pass {
- a.NoError(err)
- a.True(pass)
- } else {
- a.Error(err)
- a.False(pass)
- }
+ r := runAllResultFromInvocation(*local)
+ test.expected(*local, r)
})
}
}
@@ -1315,11 +1363,22 @@ byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr
balanceBlob := protocol.EncodeMsgp(&br)
var tests = []struct {
- pass bool
additionalApps int
+ expected func(LocalRunner, runAllResult)
}{
- {false, 2},
- {true, 3},
+ {2, func(_ LocalRunner, r runAllResult) {
+ a.ErrorContains(r.results[0].err, "dynamic cost budget exceeded")
+
+ a.Equal(
+ allPassing(len(r.results)-1),
+ runAllResult{
+ invocationError: r.invocationError,
+ results: r.results[1:],
+ })
+ }},
+ {3, func(l LocalRunner, r runAllResult) {
+ a.Equal(allPassing(len(l.runs)), r)
+ }},
}
for _, test := range tests {
t.Run(fmt.Sprintf("txn-count=%d", test.additionalApps+1), func(t *testing.T) {
@@ -1347,15 +1406,7 @@ byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr
err = local.Setup(&ds)
a.NoError(err)
- pass, err := local.Run()
- if test.pass {
- a.NoError(err)
- a.True(pass)
- } else {
- a.Error(err)
- a.Contains(err.Error(), "dynamic cost budget exceeded")
- a.False(pass)
- }
+ test.expected(*local, runAllResultFromInvocation(*local))
})
}
}
@@ -1455,7 +1506,310 @@ func TestGroupTxnIdx(t *testing.T) {
err := local.Setup(&ds)
a.NoError(err)
- pass, err := local.Run()
+ r := runAllResultFromInvocation(*local)
+ a.Equal(allPassing(len(local.runs)), r)
+}
+
+func TestRunAllGloads(t *testing.T) {
+
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ sourceA := `#pragma version 6
+
+ txn ApplicationID
+ bz handle_createapp
+
+ int 99
+ store 1
+
+ itxn_begin
+ int acfg
+ itxn_field TypeEnum
+ int 1000000
+ itxn_field ConfigAssetTotal
+ int 3
+ itxn_field ConfigAssetDecimals
+ byte base64 AA==
+ itxn_field ConfigAssetUnitName
+ byte base64(AAAAAAAAAAA=)
+ itxn_field ConfigAssetName
+ pushbytes 0x0000000000000000
+ itxn_field ConfigAssetURL
+ global CurrentApplicationAddress
+ dup
+ dup2
+ itxn_field ConfigAssetManager
+ itxn_field ConfigAssetReserve
+ itxn_field ConfigAssetFreeze
+ itxn_field ConfigAssetClawback
+ itxn_submit
+
+ handle_createapp:
+ int 1`
+
+ sourceB := `#pragma version 6
+
+ txn ApplicationID
+ bz handle_createapp
+
+ gload 2 1
+ itob
+ log
+
+ handle_createapp:
+ int 1`
+
+ ops, err := logic.AssembleString(sourceA)
+ a.NoError(err)
+ progA := base64.StdEncoding.EncodeToString(ops.Program)
+
+ ops, err = logic.AssembleString(sourceB)
+ a.NoError(err)
+ progB := base64.StdEncoding.EncodeToString(ops.Program)
+
+ // Transaction group with 5 transactions
+ // 1. Payment txn to app A
+ // 2. Payment txn to app B
+ // 3. App call to app A
+ // 4. App call to app B with gload on app A scratch slot
+ ddrBlob := `{
+ "accounts": [
+ {
+ "address": "KQCXQJRLGPOQCMGM6ZH2WOVCQXYMH4XVYBJFTNPY4YW3CAVN3DB72N6ODA",
+ "amount": 4000001724861773,
+ "amount-without-pending-rewards": 4000001724861773,
+ "min-balance": 100000,
+ "participation": {
+ "selection-participation-key": "S3YIZ2TNGSl1plq93eXsXsRhJRfCyIMKq0sq12++C8Y=",
+ "state-proof-key": "4BqeyojB23ZEj7Ddf9MKtIHBKFFYKhIYEwctoSuL9iXXdQ6R5lWzIJ5Sun5wHJhE9Rk5/wjjTeiCFJPEJVafrA==",
+ "vote-first-valid": 0,
+ "vote-key-dilution": 10000,
+ "vote-last-valid": 3000000,
+ "vote-participation-key": "qmkEl2AbMO/KKK+iOgIhSB3Q/4WXftoucPUvEYFaWbo="
+ },
+ "pending-rewards": 0,
+ "reward-base": 1,
+ "rewards": 3999997773,
+ "round": 41,
+ "status": "Online",
+ "total-apps-opted-in": 0,
+ "total-assets-opted-in": 0,
+ "total-created-apps": 0,
+ "total-created-assets": 0
+ },
+ {
+ "address": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "amount": 74198032,
+ "amount-without-pending-rewards": 74198032,
+ "assets": [
+ {
+ "amount": 1000000,
+ "asset-id": 45,
+ "is-frozen": false
+ },
+ {
+ "amount": 1000000,
+ "asset-id": 50,
+ "is-frozen": false
+ }
+ ],
+ "created-assets": [
+ {
+ "index": 45,
+ "params": {
+ "clawback": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "creator": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "decimals": 3,
+ "freeze": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "manager": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "name-b64": "AAAAAAAAAAA=",
+ "reserve": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "total": 1000000,
+ "unit-name-b64": "AA==",
+ "url-b64": "AAAAAAAAAAA="
+ }
+ },
+ {
+ "index": 50,
+ "params": {
+ "clawback": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "creator": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "decimals": 3,
+ "freeze": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "manager": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "name-b64": "AAAAAAAAAAA=",
+ "reserve": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "total": 1000000,
+ "unit-name-b64": "AA==",
+ "url-b64": "AAAAAAAAAAA="
+ }
+ }
+ ],
+ "min-balance": 300000,
+ "pending-rewards": 0,
+ "reward-base": 1,
+ "rewards": 32,
+ "round": 41,
+ "status": "Offline",
+ "total-apps-opted-in": 0,
+ "total-assets-opted-in": 2,
+ "total-created-apps": 0,
+ "total-created-assets": 2
+ },
+ {
+ "address": "KQCXQJRLGPOQCMGM6ZH2WOVCQXYMH4XVYBJFTNPY4YW3CAVN3DB72N6ODA",
+ "amount": 4000001724861773,
+ "amount-without-pending-rewards": 4000001724861773,
+ "min-balance": 100000,
+ "participation": {
+ "selection-participation-key": "S3YIZ2TNGSl1plq93eXsXsRhJRfCyIMKq0sq12++C8Y=",
+ "state-proof-key": "4BqeyojB23ZEj7Ddf9MKtIHBKFFYKhIYEwctoSuL9iXXdQ6R5lWzIJ5Sun5wHJhE9Rk5/wjjTeiCFJPEJVafrA==",
+ "vote-first-valid": 0,
+ "vote-key-dilution": 10000,
+ "vote-last-valid": 3000000,
+ "vote-participation-key": "qmkEl2AbMO/KKK+iOgIhSB3Q/4WXftoucPUvEYFaWbo="
+ },
+ "pending-rewards": 0,
+ "reward-base": 1,
+ "rewards": 3999997773,
+ "round": 41,
+ "status": "Online",
+ "total-apps-opted-in": 0,
+ "total-assets-opted-in": 0,
+ "total-created-apps": 0,
+ "total-created-assets": 0
+ },
+ {
+ "address": "KLWQTWPJXUAPVZNANKGGTTFGPPJZDOLGOCBCBRHR53C6J2FDYF2GBABCRU",
+ "amount": 27300019,
+ "amount-without-pending-rewards": 27300019,
+ "min-balance": 100000,
+ "pending-rewards": 0,
+ "reward-base": 1,
+ "rewards": 19,
+ "round": 41,
+ "status": "Offline",
+ "total-apps-opted-in": 0,
+ "total-assets-opted-in": 0,
+ "total-created-apps": 0,
+ "total-created-assets": 0
+ }
+ ],
+ "apps": [
+ {
+ "id": 39,
+ "params": {
+ "approval-program": "%s",
+ "clear-state-program": "BoEB",
+ "creator": "5Z2LOJJCA52LM6I6FLS3DLRBG7UWDEQ2RS2Y76Z66QPUNLAGGJIDDX7BII",
+ "global-state-schema": {
+ "num-byte-slice": 1,
+ "num-uint": 1
+ },
+ "local-state-schema": {
+ "num-byte-slice": 1,
+ "num-uint": 1
+ }
+ }
+ },
+ {
+ "id": 41,
+ "params": {
+ "approval-program": "%s",
+ "clear-state-program": "BoEB",
+ "creator": "5P7Y556QIE3UCBNWJ7GXPNDCV6CLZF5VDEZ2PTTGNY5PQ2OBA4D6GXZFZA",
+ "global-state-schema": {
+ "num-byte-slice": 1,
+ "num-uint": 1
+ },
+ "local-state-schema": {
+ "num-byte-slice": 1,
+ "num-uint": 1
+ }
+ }
+ }
+ ],
+ "latest-timestamp": 1646848841,
+ "protocol-version": "future",
+ "round": 41,
+ "sources": null,
+ "txns": [
+ {
+ "sig": "EPT8gSZDv20jj+bRwoqeqt7js8pquiYoH+pK4tl+qzujseK6+3QiFJV0qFU6p2xlrLNvsbqHBMmbOGjX9HUmAQ==",
+ "txn": {
+ "amt": 41300000,
+ "fee": 1000,
+ "fv": 40,
+ "gen": "sandnet-v1",
+ "gh": "2m5E5yOWZvqfj2FL3GRJOo+Pq2tJMRH8LbpCwQRPRDY=",
+ "grp": "SY3swywpYP2hEQqZCKSM6uvqHgI34063jST7KPiKjBg=",
+ "lv": 1040,
+ "rcv": "55VWZPQYI3VTDONPQX2RD77F2VULZ3SXPTIZ42QXO7TETRU5TJ5VZYLT44",
+ "snd": "KQCXQJRLGPOQCMGM6ZH2WOVCQXYMH4XVYBJFTNPY4YW3CAVN3DB72N6ODA",
+ "type": "pay"
+ }
+ },
+ {
+ "sig": "Wmphf7cw//QSlNg0WD1VjFRwtVh6KOo/hFxdwD57aW/swuNCUN7L5ew0BS1vWOp2C6eVzZPK145b+H2A2PziBg==",
+ "txn": {
+ "amt": 7700000,
+ "fee": 1000,
+ "fv": 40,
+ "gen": "sandnet-v1",
+ "gh": "2m5E5yOWZvqfj2FL3GRJOo+Pq2tJMRH8LbpCwQRPRDY=",
+ "grp": "SY3swywpYP2hEQqZCKSM6uvqHgI34063jST7KPiKjBg=",
+ "lv": 1040,
+ "rcv": "KLWQTWPJXUAPVZNANKGGTTFGPPJZDOLGOCBCBRHR53C6J2FDYF2GBABCRU",
+ "snd": "KQCXQJRLGPOQCMGM6ZH2WOVCQXYMH4XVYBJFTNPY4YW3CAVN3DB72N6ODA",
+ "type": "pay"
+ }
+ },
+ {
+ "sig": "IyrYrbX6yaQfUcNHmArTWptV3WI9fdUbRT4K7q6KaCoub5L/dRRV6bFcLAcNZKTXNLYR+d4/GYz6XFhfFBp+DQ==",
+ "txn": {
+ "apid": 39,
+ "fee": 1000,
+ "fv": 40,
+ "gen": "sandnet-v1",
+ "gh": "2m5E5yOWZvqfj2FL3GRJOo+Pq2tJMRH8LbpCwQRPRDY=",
+ "grp": "SY3swywpYP2hEQqZCKSM6uvqHgI34063jST7KPiKjBg=",
+ "lv": 1040,
+ "snd": "KQCXQJRLGPOQCMGM6ZH2WOVCQXYMH4XVYBJFTNPY4YW3CAVN3DB72N6ODA",
+ "type": "appl"
+ }
+ },
+ {
+ "sig": "H1TQRug7WG3tjGae3bXzDiAoXbILByvc9//J+imkFgaAHW5UPzvJGtn7yVpr8tInYVPnnTF+l88TXY/ANUB2CQ==",
+ "txn": {
+ "apid": 41,
+ "fee": 1000,
+ "fv": 40,
+ "gen": "sandnet-v1",
+ "gh": "2m5E5yOWZvqfj2FL3GRJOo+Pq2tJMRH8LbpCwQRPRDY=",
+ "grp": "SY3swywpYP2hEQqZCKSM6uvqHgI34063jST7KPiKjBg=",
+ "lv": 1040,
+ "snd": "KQCXQJRLGPOQCMGM6ZH2WOVCQXYMH4XVYBJFTNPY4YW3CAVN3DB72N6ODA",
+ "type": "appl"
+ }
+ }
+ ]
+ }`
+
+ // Format string with base64 encoded program bytes string
+ ddrBlob = fmt.Sprintf(ddrBlob, progA, progB)
+
+ ds := DebugParams{
+ Proto: string(protocol.ConsensusCurrentVersion),
+ DdrBlob: []byte(ddrBlob),
+ GroupIndex: 4,
+ RunMode: "application",
+ }
+
+ local := MakeLocalRunner(nil)
+ err = local.Setup(&ds)
+ a.NoError(err)
+
+ err = local.RunAll()
a.NoError(err)
- a.True(pass)
}
diff --git a/cmd/tealdbg/webdbg.go b/cmd/tealdbg/webdbg.go
index 3f99eea70..fe6058d9b 100644
--- a/cmd/tealdbg/webdbg.go
+++ b/cmd/tealdbg/webdbg.go
@@ -171,11 +171,20 @@ func (a *WebPageFrontend) configHandler(w http.ResponseWriter, r *http.Request)
}
// Extract PC from config
- line := req.debugConfig.BreakAtLine
- if line == noBreak {
- s.debugger.RemoveBreakpoint(int(line))
- } else {
- s.debugger.SetBreakpoint(int(line))
+ for line := range req.debugConfig.ActiveBreak {
+ if req.debugConfig.NoBreak {
+ err := s.debugger.RemoveBreakpoint(int(line))
+ if err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ } else {
+ err := s.debugger.SetBreakpoint(int(line))
+ if err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ }
}
w.WriteHeader(http.StatusOK)
diff --git a/config/consensus.go b/config/consensus.go
index ecc9c72e6..120eb9496 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -118,6 +118,14 @@ type ConsensusParams struct {
//
// Rewards are received by whole reward units. Fractions of
// RewardUnits do not receive rewards.
+ //
+ // Ensure both considerations below are taken into account if RewardUnit is planned for change:
+ // 1. RewardUnits should not be changed without touching all accounts to apply their rewards
+ // based on the old RewardUnits and then use the new RewardUnits for all subsequent calculations.
+ // 2. Having a consistent RewardUnit is also important for preserving
+ // a constant amount of total algos in the system:
+ // the block header tracks how many reward units worth of algos are in existence
+ // and have logically received rewards.
RewardUnit uint64
// RewardsRateRefreshInterval is the number of rounds after which the
diff --git a/crypto/secp256k1/secp256_test.go b/crypto/secp256k1/secp256_test.go
index 3ee7d2c0b..5da4e593c 100644
--- a/crypto/secp256k1/secp256_test.go
+++ b/crypto/secp256k1/secp256_test.go
@@ -63,6 +63,8 @@ func compactSigCheck(t *testing.T, sig []byte) {
}
func TestSignatureValidity(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
pubkey, seckey := generateKeyPair()
msg := csprngEntropy(32)
sig, err := Sign(msg, seckey)
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index 7f8b2d412..596a8d458 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -422,7 +422,7 @@ func (client RestClient) AccountInformation(address string) (response v1.Account
func (client RestClient) AccountInformationV2(address string, includeCreatables bool) (response generatedV2.Account, err error) {
var infoParams accountInformationParams
if includeCreatables {
- infoParams = accountInformationParams{Exclude: "", Format: "json"}
+ infoParams = accountInformationParams{Exclude: "none", Format: "json"}
} else {
infoParams = accountInformationParams{Exclude: "all", Format: "json"}
}
diff --git a/daemon/algod/api/server/v2/dryrun.go b/daemon/algod/api/server/v2/dryrun.go
index e0036516f..38f902ed3 100644
--- a/daemon/algod/api/server/v2/dryrun.go
+++ b/daemon/algod/api/server/v2/dryrun.go
@@ -511,7 +511,7 @@ func doDryrunRequest(dr *DryrunRequest, response *generated.DryrunResponse) {
result.AppCallTrace = &debug.history
result.GlobalDelta = StateDeltaToStateDelta(delta.GlobalDelta)
if len(delta.LocalDeltas) > 0 {
- localDeltas := make([]generated.AccountStateDelta, len(delta.LocalDeltas))
+ localDeltas := make([]generated.AccountStateDelta, 0, len(delta.LocalDeltas))
for k, v := range delta.LocalDeltas {
ldaddr, err2 := stxn.Txn.AddressByIndex(k, stxn.Txn.Sender)
if err2 != nil {
diff --git a/daemon/algod/api/server/v2/dryrun_test.go b/daemon/algod/api/server/v2/dryrun_test.go
index 99246ec71..287e47d19 100644
--- a/daemon/algod/api/server/v2/dryrun_test.go
+++ b/daemon/algod/api/server/v2/dryrun_test.go
@@ -563,27 +563,27 @@ func TestDryrunLocal1(t *testing.T) {
if response.Txns[0].LocalDeltas == nil {
t.Fatal("empty local delta")
}
- addrFound := false
+
+ // Should be a single account
+ assert.Len(t, *response.Txns[0].LocalDeltas, 1)
+
+ lds := (*response.Txns[0].LocalDeltas)[0]
+ assert.Equal(t, lds.Address, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ")
+
valueFound := false
- for _, lds := range *response.Txns[0].LocalDeltas {
- if lds.Address == "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ" {
- addrFound = true
- for _, ld := range lds.Delta {
- if ld.Key == b64("foo") {
- valueFound = true
- assert.Equal(t, ld.Value.Action, uint64(basics.SetBytesAction))
- assert.Equal(t, *ld.Value.Bytes, b64("bar"))
+ for _, ld := range lds.Delta {
+ if ld.Key == b64("foo") {
+ valueFound = true
+ assert.Equal(t, ld.Value.Action, uint64(basics.SetBytesAction))
+ assert.Equal(t, *ld.Value.Bytes, b64("bar"))
- }
- }
}
}
- if !addrFound {
- t.Error("no local delta for AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ")
- }
+
if !valueFound {
t.Error("no local delta for value foo")
}
+
if t.Failed() {
logResponse(t, &response)
}
@@ -644,24 +644,22 @@ func TestDryrunLocal1A(t *testing.T) {
if response.Txns[0].LocalDeltas == nil {
t.Fatal("empty local delta")
}
- addrFound := false
+
+ assert.Len(t, *response.Txns[0].LocalDeltas, 1)
+
+ lds := (*response.Txns[0].LocalDeltas)[0]
+ assert.Equal(t, lds.Address, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ")
+
valueFound := false
- for _, lds := range *response.Txns[0].LocalDeltas {
- if lds.Address == "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ" {
- addrFound = true
- for _, ld := range lds.Delta {
- if ld.Key == b64("foo") {
- valueFound = true
- assert.Equal(t, ld.Value.Action, uint64(basics.SetBytesAction))
- assert.Equal(t, *ld.Value.Bytes, b64("bar"))
+ for _, ld := range lds.Delta {
+ if ld.Key == b64("foo") {
+ valueFound = true
+ assert.Equal(t, ld.Value.Action, uint64(basics.SetBytesAction))
+ assert.Equal(t, *ld.Value.Bytes, b64("bar"))
- }
- }
}
}
- if !addrFound {
- t.Error("no local delta for AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ")
- }
+
if !valueFound {
t.Error("no local delta for value foo")
}
diff --git a/daemon/kmd/wallet/driver/ledger.go b/daemon/kmd/wallet/driver/ledger.go
index 36029d370..1d64a6ab1 100644
--- a/daemon/kmd/wallet/driver/ledger.go
+++ b/daemon/kmd/wallet/driver/ledger.go
@@ -145,6 +145,7 @@ func (lwd *LedgerWalletDriver) scanWalletsLocked() error {
newDevs = append(newDevs, LedgerUSB{
hiddev: dev,
+ info: info,
})
}
diff --git a/daemon/kmd/wallet/driver/ledger_hid.go b/daemon/kmd/wallet/driver/ledger_hid.go
index 32e3d8321..28ba4ce5c 100644
--- a/daemon/kmd/wallet/driver/ledger_hid.go
+++ b/daemon/kmd/wallet/driver/ledger_hid.go
@@ -21,7 +21,7 @@ import (
"fmt"
"os"
- "github.com/karalabe/hid"
+ "github.com/karalabe/usb"
)
const ledgerVendorID = 0x2c97
@@ -31,7 +31,8 @@ const ledgerUsagePage = 0xffa0
// the protocol used for sending messages to the application running on the
// Ledger hardware wallet.
type LedgerUSB struct {
- hiddev *hid.Device
+ hiddev usb.Device
+ info usb.DeviceInfo
}
// LedgerUSBError is a wrapper around the two-byte error code that the Ledger
@@ -196,21 +197,25 @@ func (l *LedgerUSB) Exchange(msg []byte) ([]byte, error) {
}
// USBInfo returns information about the underlying USB device.
-func (l *LedgerUSB) USBInfo() hid.DeviceInfo {
- return l.hiddev.DeviceInfo
+func (l *LedgerUSB) USBInfo() usb.DeviceInfo {
+ return l.info
}
// LedgerEnumerate returns all of the Ledger devices connected to this machine.
-func LedgerEnumerate() ([]hid.DeviceInfo, error) {
- if !hid.Supported() || os.Getenv("KMD_NOUSB") != "" {
+func LedgerEnumerate() ([]usb.DeviceInfo, error) {
+ if !usb.Supported() || os.Getenv("KMD_NOUSB") != "" {
return nil, fmt.Errorf("HID not supported")
}
- var infos []hid.DeviceInfo
+ var infos []usb.DeviceInfo
// The enumeration process is based on:
// https://github.com/LedgerHQ/blue-loader-python/blob/master/ledgerblue/comm.py#L212
- // we search for the Ledger Vendor id and igonre devices that don't have specific usagepage or interface
- for _, info := range hid.Enumerate(ledgerVendorID, 0) {
+ // we search for the Ledger Vendor id and ignore devices that don't have specific usagepage or interface
+ hids, err := usb.EnumerateHid(ledgerVendorID, 0)
+ if err != nil {
+ return []usb.DeviceInfo{}, err
+ }
+ for _, info := range hids {
if info.UsagePage != ledgerUsagePage && info.Interface != 0 {
continue
}
diff --git a/data/abi/abi_encode.go b/data/abi/abi_encode.go
index cdc2b7df3..b0f8b6c12 100644
--- a/data/abi/abi_encode.go
+++ b/data/abi/abi_encode.go
@@ -25,23 +25,6 @@ import (
"strings"
)
-// bigIntToBytes casts non-negative big integer to byte slice with specific byte length
-// DEPRECATED: THIS IS A WORKAROUND FOR `fillBytes` METHOD BEFORE GOLANG 1.15+
-// SHOULD BE REMOVED AFTER WE MOVE TO HIGHER VERSION
-func bigIntToBytes(x *big.Int, byteLen uint) ([]byte, error) {
- if x.Cmp(big.NewInt(0)) < 0 {
- return nil, fmt.Errorf("ABI: big Int To Bytes error: should pass in non-negative integer")
- }
- if uint(x.BitLen()) > byteLen*8 {
- return nil, fmt.Errorf("ABI: big Int To Bytes error: integer byte length > given byte length")
- }
-
- buffer := make([]byte, byteLen)
- intBytes := x.Bytes()
- copy(buffer[int(byteLen)-len(intBytes):], intBytes)
- return buffer, nil
-}
-
// typeCastToTuple cast an array-like ABI type into an ABI tuple type.
func (t Type) typeCastToTuple(tupLen ...int) (Type, error) {
var childT []Type
@@ -187,14 +170,13 @@ func encodeInt(intValue interface{}, bitSize uint16) ([]byte, error) {
return nil, fmt.Errorf("passed in numeric value should be non negative")
}
+ castedBytes := make([]byte, bitSize/8)
+
if bigInt.Cmp(new(big.Int).Lsh(big.NewInt(1), uint(bitSize))) >= 0 {
return nil, fmt.Errorf("input value bit size %d > abi type bit size %d", bigInt.BitLen(), bitSize)
}
- castedBytes, err := bigIntToBytes(bigInt, uint(bitSize/8))
- if err != nil {
- return nil, err
- }
+ bigInt.FillBytes(castedBytes)
return castedBytes, nil
}
@@ -204,12 +186,8 @@ func inferToSlice(value interface{}) ([]interface{}, error) {
if reflectVal.Kind() != reflect.Slice && reflectVal.Kind() != reflect.Array {
return nil, fmt.Errorf("cannot infer an interface value as a slice of interface element")
}
- if reflectVal.IsNil() {
- if reflectVal.Kind() == reflect.Slice {
- return nil, nil
- }
- return nil, fmt.Errorf("cannot infer nil value for array kind interface")
- }
+ // * if input is a slice, with nil, then reflectVal.Len() == 0
+ // * if input is an array, it is not possible it is nil
values := make([]interface{}, reflectVal.Len())
for i := 0; i < reflectVal.Len(); i++ {
values[i] = reflectVal.Index(i).Interface()
diff --git a/data/abi/abi_encode_test.go b/data/abi/abi_encode_test.go
index 620013c23..231c1a0e0 100644
--- a/data/abi/abi_encode_test.go
+++ b/data/abi/abi_encode_test.go
@@ -83,8 +83,8 @@ func TestEncodeValid(t *testing.T) {
randomInt, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
- expected, err := bigIntToBytes(randomInt, uint(intSize/8))
- require.NoError(t, err, "big int to byte conversion error")
+ expected := make([]byte, intSize/8)
+ randomInt.FillBytes(expected)
uintEncode, err := uintType.Encode(randomInt)
require.NoError(t, err, "encoding from uint type fail")
@@ -122,9 +122,8 @@ func TestEncodeValid(t *testing.T) {
encodedUfixed, err := typeUfixed.Encode(randomInt)
require.NoError(t, err, "ufixed encode fail")
- expected, err := bigIntToBytes(randomInt, uint(size/8))
- require.NoError(t, err, "big int to byte conversion error")
-
+ expected := make([]byte, size/8)
+ randomInt.FillBytes(expected)
require.Equal(t, expected, encodedUfixed, "encode ufixed not match with expected")
}
// (2^[bitSize] - 1) / (10^[precision]) test
@@ -142,8 +141,8 @@ func TestEncodeValid(t *testing.T) {
randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
- addrBytesExpected, err := bigIntToBytes(randomAddrInt, uint(addressByteSize))
- require.NoError(t, err, "big int to byte conversion error")
+ addrBytesExpected := make([]byte, addressByteSize)
+ randomAddrInt.FillBytes(addrBytesExpected)
addrBytesActual, err := addressType.Encode(addrBytesExpected)
require.NoError(t, err, "address encode fail")
@@ -422,8 +421,8 @@ func TestDecodeValid(t *testing.T) {
randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
- expected, err := bigIntToBytes(randomAddrInt, uint(addressByteSize))
- require.NoError(t, err, "big int to byte conversion error")
+ expected := make([]byte, addressByteSize)
+ randomAddrInt.FillBytes(expected)
actual, err := addressType.Decode(expected)
require.NoError(t, err, "decoding address should not return error")
@@ -952,10 +951,8 @@ func addPrimitiveRandomValues(t *testing.T, pool *map[BaseType][]testUnit) {
for i := 0; i < addressTestCaseCount; i++ {
randAddrVal, err := rand.Int(rand.Reader, maxAddress)
require.NoError(t, err, "generate random value for address, should be no error")
-
- addrBytes, err := bigIntToBytes(randAddrVal, uint(addressByteSize))
- require.NoError(t, err, "big int to byte conversion error")
-
+ addrBytes := make([]byte, addressByteSize)
+ randAddrVal.FillBytes(addrBytes)
(*pool)[Address][i] = testUnit{serializedType: addressType.String(), value: addrBytes}
}
categorySelfRoundTripTest(t, (*pool)[Address])
@@ -1165,7 +1162,7 @@ func TestParseArgJSONtoByteSlice(t *testing.T) {
for i, test := range tests {
t.Run(fmt.Sprintf("index=%d", i), func(t *testing.T) {
- applicationArgs := make([][]byte, 0)
+ applicationArgs := [][]byte{}
err := ParseArgJSONtoByteSlice(test.argTypes, test.jsonArgs, &applicationArgs)
require.NoError(t, err)
require.Equal(t, test.expectedAppArgs, applicationArgs)
@@ -1224,3 +1221,59 @@ func TestParseMethodSignature(t *testing.T) {
})
}
}
+
+func TestInferToSlice(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var emptySlice []int
+ tests := []struct {
+ toBeInferred interface{}
+ length int
+ }{
+ {
+ toBeInferred: []int{},
+ length: 0,
+ },
+ {
+ toBeInferred: make([]int, 0),
+ length: 0,
+ },
+ {
+ toBeInferred: emptySlice,
+ length: 0,
+ },
+ {
+ toBeInferred: [0]int{},
+ length: 0,
+ },
+ {
+ toBeInferred: [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32},
+ length: 32,
+ },
+ {
+ toBeInferred: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32},
+ length: 32,
+ },
+ }
+
+ for i, test := range tests {
+ inferredSlice, err := inferToSlice(test.toBeInferred)
+ require.NoError(t, err, "inferToSlice on testcase %d failed to successfully infer %v", i, test.toBeInferred)
+ require.Equal(t, test.length, len(inferredSlice), "inferToSlice on testcase %d inferred different length, expected %d", i, test.length)
+ }
+
+ // one more testcase for totally nil (with no type information) is bad, should not pass the test
+ _, err := inferToSlice(nil)
+ require.EqualError(
+ t, err,
+ "cannot infer an interface value as a slice of interface element",
+ "inferToSlice should return type inference error when passed in nil with unexpected Kind")
+
+ // one moar testcase for wrong typed nil is bad, should not pass the test
+ var nilPt *uint64 = nil
+ _, err = inferToSlice(nilPt)
+ require.EqualError(
+ t, err,
+ "cannot infer an interface value as a slice of interface element",
+ "inferToSlice should return type inference error when passing argument type other than slice or array")
+}
diff --git a/data/abi/abi_json_test.go b/data/abi/abi_json_test.go
index 0c3a006e3..49083fdea 100644
--- a/data/abi/abi_json_test.go
+++ b/data/abi/abi_json_test.go
@@ -31,17 +31,14 @@ func TestRandomAddressEquality(t *testing.T) {
upperLimit := new(big.Int).Lsh(big.NewInt(1), addressByteSize<<3)
var addrBasics basics.Address
- var addrABI = make([]byte, addressByteSize)
+ var addrABI []byte = make([]byte, addressByteSize)
for testCaseIndex := 0; testCaseIndex < addressTestCaseCount; testCaseIndex++ {
randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
- expected, err := bigIntToBytes(randomAddrInt, uint(addressByteSize))
- require.NoError(t, err, "big int to byte conversion error")
-
- copy(addrABI[:], expected)
- copy(addrBasics[:], expected)
+ randomAddrInt.FillBytes(addrBasics[:])
+ randomAddrInt.FillBytes(addrABI)
checkSumBasics := addrBasics.GetChecksum()
checkSumABI, err := addressCheckSum(addrABI)
diff --git a/data/bookkeeping/block.go b/data/bookkeeping/block.go
index 1e4b1393e..46f17c8ef 100644
--- a/data/bookkeeping/block.go
+++ b/data/bookkeeping/block.go
@@ -152,7 +152,7 @@ type (
FeeSink basics.Address `codec:"fees"`
// The RewardsPool accepts periodic injections from the
- // FeeSink and continually redistributes them to adresses as
+ // FeeSink and continually redistributes them to addresses as
// rewards.
RewardsPool basics.Address `codec:"rwd"`
diff --git a/data/ledger_test.go b/data/ledger_test.go
index 0483de994..6af896e97 100644
--- a/data/ledger_test.go
+++ b/data/ledger_test.go
@@ -156,9 +156,10 @@ func TestLedgerCirculation(t *testing.T) {
require.False(t, sourceAccount.IsZero())
require.False(t, destAccount.IsZero())
- data, err := realLedger.LookupAgreement(basics.Round(0), destAccount)
+ data, validThrough, _, err := realLedger.LookupAccount(basics.Round(0), destAccount)
+ require.Equal(t, basics.Round(0), validThrough)
require.NoError(t, err)
- baseDestValue := data.MicroAlgosWithRewards.Raw
+ baseDestValue := data.MicroAlgos.Raw
blk := genesisInitState.Block
totalsRound, totals, err := realLedger.LatestTotals()
@@ -191,12 +192,14 @@ func TestLedgerCirculation(t *testing.T) {
// test most recent round
if rnd < basics.Round(500) {
- data, err = realLedger.LookupAgreement(rnd, destAccount)
+ data, validThrough, _, err = realLedger.LookupAccount(rnd, destAccount)
require.NoError(t, err)
- require.Equal(t, baseDestValue+uint64(rnd), data.MicroAlgosWithRewards.Raw)
- data, err = l.LookupAgreement(rnd, destAccount)
+ require.Equal(t, rnd, validThrough)
+ require.Equal(t, baseDestValue+uint64(rnd), data.MicroAlgos.Raw)
+ data, validThrough, _, err = realLedger.LookupAccount(rnd, destAccount)
require.NoError(t, err)
- require.Equal(t, baseDestValue+uint64(rnd), data.MicroAlgosWithRewards.Raw)
+ require.Equal(t, rnd, validThrough)
+ require.Equal(t, baseDestValue+uint64(rnd), data.MicroAlgos.Raw)
roundCirculation, err := realLedger.OnlineTotals(rnd)
require.NoError(t, err)
@@ -207,12 +210,14 @@ func TestLedgerCirculation(t *testing.T) {
require.Equal(t, baseCirculation-uint64(rnd)*(10001), roundCirculation.Raw)
} else if rnd < basics.Round(510) {
// test one round ago
- data, err = realLedger.LookupAgreement(rnd-1, destAccount)
+ data, validThrough, _, err = realLedger.LookupAccount(rnd-1, destAccount)
require.NoError(t, err)
- require.Equal(t, baseDestValue+uint64(rnd)-1, data.MicroAlgosWithRewards.Raw)
- data, err = l.LookupAgreement(rnd-1, destAccount)
+ require.Equal(t, rnd-1, validThrough)
+ require.Equal(t, baseDestValue+uint64(rnd)-1, data.MicroAlgos.Raw)
+ data, validThrough, _, err = l.LookupAccount(rnd-1, destAccount)
require.NoError(t, err)
- require.Equal(t, baseDestValue+uint64(rnd)-1, data.MicroAlgosWithRewards.Raw)
+ require.Equal(t, rnd-1, validThrough)
+ require.Equal(t, baseDestValue+uint64(rnd)-1, data.MicroAlgos.Raw)
roundCirculation, err := realLedger.OnlineTotals(rnd - 1)
require.NoError(t, err)
@@ -223,12 +228,12 @@ func TestLedgerCirculation(t *testing.T) {
require.Equal(t, baseCirculation-uint64(rnd-1)*(10001), roundCirculation.Raw)
} else if rnd < basics.Round(520) {
// test one round in the future ( expected error )
- data, err = realLedger.LookupAgreement(rnd+1, destAccount)
+ data, _, _, err = realLedger.LookupAccount(rnd+1, destAccount)
require.Error(t, err)
- require.Equal(t, uint64(0), data.MicroAlgosWithRewards.Raw)
- data, err = l.LookupAgreement(rnd+1, destAccount)
+ require.Equal(t, uint64(0), data.MicroAlgos.Raw)
+ data, _, _, err = l.LookupAccount(rnd+1, destAccount)
require.Error(t, err)
- require.Equal(t, uint64(0), data.MicroAlgosWithRewards.Raw)
+ require.Equal(t, uint64(0), data.MicroAlgos.Raw)
_, err = realLedger.OnlineTotals(rnd + 1)
require.Error(t, err)
@@ -244,7 +249,6 @@ func TestLedgerCirculation(t *testing.T) {
require.Error(t, err)
}
}
- return
}
func TestLedgerSeed(t *testing.T) {
@@ -318,7 +322,6 @@ func TestLedgerSeed(t *testing.T) {
require.Equal(t, seed.elements[1].seed, expectedHdr.Seed)
}
}
- return
}
func TestConsensusVersion(t *testing.T) {
@@ -473,7 +476,7 @@ func TestLedgerErrorValidate(t *testing.T) {
var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
- proto, _ := config.Consensus[protocol.ConsensusCurrentVersion]
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
origProto := proto
defer func() {
config.Consensus[protocol.ConsensusCurrentVersion] = origProto
@@ -533,8 +536,8 @@ func TestLedgerErrorValidate(t *testing.T) {
// Add blocks to the ledger via EnsureValidatedBlock. This calls AddValidatedBlock, which simply
// passes the block to blockQueue. The returned error is handled by EnsureValidatedBlock, which reports
// in the form of logged error message.
+ wg.Add(1)
go func() {
- wg.Add(1)
i := 0
for blk := range blkChan1 {
i++
@@ -554,8 +557,8 @@ func TestLedgerErrorValidate(t *testing.T) {
// Add blocks to the ledger via EnsureBlock. This basically calls AddBlock, but handles
// the errors by logging them. Checking the logged messages to verify its behavior.
+ wg.Add(1)
go func() {
- wg.Add(1)
i := 0
for blk := range blkChan2 {
i++
@@ -565,8 +568,8 @@ func TestLedgerErrorValidate(t *testing.T) {
}()
// Add blocks directly to the ledger
+ wg.Add(1)
go func() {
- wg.Add(1)
i := 0
for blk := range blkChan3 {
i++
diff --git a/data/transactions/logic/.gitignore b/data/transactions/logic/.gitignore
deleted file mode 100644
index 24f8b4a36..000000000
--- a/data/transactions/logic/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-langspec.json
-teal.tmLanguage.json
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index 71d15cb46..769b3b9cd 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -292,8 +292,8 @@ return stack matches the name of the input value.
| `!=` | A is not equal to B => {0 or 1} |
| `!` | A == 0 yields 1; else 0 |
| `len` | yields length of byte value A |
-| `itob` | converts uint64 A to big endian bytes |
-| `btoi` | converts bytes A as big endian to uint64 |
+| `itob` | converts uint64 A to big-endian byte array, always of length 8 |
+| `btoi` | converts big-endian byte array A to uint64. Fails if len(A) > 8. Padded by leading 0s if len(A) < 8. |
| `%` | A modulo B. Fail if B == 0. |
| `\|` | A bitwise-or B |
| `&` | A bitwise-and B |
@@ -436,7 +436,7 @@ Some of these have immediate data in the byte or bytes after the opcode.
| 16 | TypeEnum | uint64 | | See table below |
| 17 | XferAsset | uint64 | | Asset ID |
| 18 | AssetAmount | uint64 | | value in Asset's units |
-| 19 | AssetSender | []byte | | 32 byte address. Causes clawback of all value of asset from AssetSender if Sender is the Clawback address of the asset. |
+| 19 | AssetSender | []byte | | 32 byte address. Moves asset from AssetSender if Sender is the Clawback address of the asset. |
| 20 | AssetReceiver | []byte | | 32 byte address |
| 21 | AssetCloseTo | []byte | | 32 byte address |
| 22 | GroupIndex | uint64 | | Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1 |
@@ -457,7 +457,7 @@ Some of these have immediate data in the byte or bytes after the opcode.
| 37 | ConfigAssetUnitName | []byte | v2 | Unit name of the asset |
| 38 | ConfigAssetName | []byte | v2 | The asset name |
| 39 | ConfigAssetURL | []byte | v2 | URL |
-| 40 | ConfigAssetMetadataHash | []byte | v2 | 32 byte commitment to some unspecified asset metadata |
+| 40 | ConfigAssetMetadataHash | []byte | v2 | 32 byte commitment to unspecified asset metadata |
| 41 | ConfigAssetManager | []byte | v2 | 32 byte address |
| 42 | ConfigAssetReserve | []byte | v2 | 32 byte address |
| 43 | ConfigAssetFreeze | []byte | v2 | 32 byte address |
@@ -527,7 +527,7 @@ Asset fields include `AssetHolding` and `AssetParam` fields that are used in the
| 4 | AssetName | []byte | | Asset name |
| 5 | AssetURL | []byte | | URL with additional info about the asset |
| 6 | AssetMetadataHash | []byte | | Arbitrary commitment |
-| 7 | AssetManager | []byte | | Manager commitment |
+| 7 | AssetManager | []byte | | Manager address |
| 8 | AssetReserve | []byte | | Reserve address |
| 9 | AssetFreeze | []byte | | Freeze address |
| 10 | AssetClawback | []byte | | Clawback address |
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index c16a5017a..86e5f16fd 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -50,15 +50,15 @@ The 32 byte public key is the last element on the stack, preceded by the 64 byte
- Opcode: 0x05 {uint8 curve index}
- Stack: ..., A: []byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., uint64
- for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1}
-- **Cost**: 1700
+- **Cost**: Secp256k1=1700 Secp256r1=2500
- Availability: v5
`ECDSA` Curves:
| Index | Name | In | Notes |
| - | ------ | - | --------- |
-| 0 | Secp256k1 | | secp256k1 curve |
-| 1 | Secp256r1 | v7 | secp256r1 curve |
+| 0 | Secp256k1 | | secp256k1 curve, used in Bitcoin |
+| 1 | Secp256r1 | v7 | secp256r1 curve, NIST standard |
The 32 byte Y-component of a public key is the last element on the stack, preceded by X-component of a pubkey, preceded by S and R components of a signature, preceded by the data that is fifth element on the stack. All values are big-endian encoded. The signed data must be 32 bytes long, and signatures in lower-S form are only accepted.
@@ -68,17 +68,9 @@ The 32 byte Y-component of a public key is the last element on the stack, preced
- Opcode: 0x06 {uint8 curve index}
- Stack: ..., A: []byte &rarr; ..., X: []byte, Y: []byte
- decompress pubkey A into components X, Y
-- **Cost**: 650
+- **Cost**: Secp256k1=650 Secp256r1=2400
- Availability: v5
-`ECDSA` Curves:
-
-| Index | Name | In | Notes |
-| - | ------ | - | --------- |
-| 0 | Secp256k1 | | secp256k1 curve |
-| 1 | Secp256r1 | v7 | secp256r1 curve |
-
-
The 33 byte public key in a compressed form to be decompressed into X and Y (top) components. All values are big-endian encoded.
## ecdsa_pk_recover v
@@ -89,14 +81,6 @@ The 33 byte public key in a compressed form to be decompressed into X and Y (top
- **Cost**: 2000
- Availability: v5
-`ECDSA` Curves:
-
-| Index | Name | In | Notes |
-| - | ------ | - | --------- |
-| 0 | Secp256k1 | | secp256k1 curve |
-| 1 | Secp256r1 | v7 | secp256r1 curve |
-
-
S (top) and R elements of a signature, recovery id and data (bottom) are expected on the stack and used to deriver a public key. All values are big-endian encoded. The signed data must be 32 bytes long.
## +
@@ -193,13 +177,13 @@ Overflow is an error condition which halts execution and fails the transaction.
- Opcode: 0x16
- Stack: ..., A: uint64 &rarr; ..., []byte
-- converts uint64 A to big endian bytes
+- converts uint64 A to big-endian byte array, always of length 8
## btoi
- Opcode: 0x17
- Stack: ..., A: []byte &rarr; ..., uint64
-- converts bytes A as big endian to uint64
+- converts big-endian byte array A to uint64. Fails if len(A) > 8. Padded by leading 0s if len(A) < 8.
`btoi` fails if the input is longer than 8 bytes.
@@ -396,7 +380,7 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
| 16 | TypeEnum | uint64 | | See table below |
| 17 | XferAsset | uint64 | | Asset ID |
| 18 | AssetAmount | uint64 | | value in Asset's units |
-| 19 | AssetSender | []byte | | 32 byte address. Causes clawback of all value of asset from AssetSender if Sender is the Clawback address of the asset. |
+| 19 | AssetSender | []byte | | 32 byte address. Moves asset from AssetSender if Sender is the Clawback address of the asset. |
| 20 | AssetReceiver | []byte | | 32 byte address |
| 21 | AssetCloseTo | []byte | | 32 byte address |
| 22 | GroupIndex | uint64 | | Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1 |
@@ -417,7 +401,7 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
| 37 | ConfigAssetUnitName | []byte | v2 | Unit name of the asset |
| 38 | ConfigAssetName | []byte | v2 | The asset name |
| 39 | ConfigAssetURL | []byte | v2 | URL |
-| 40 | ConfigAssetMetadataHash | []byte | v2 | 32 byte commitment to some unspecified asset metadata |
+| 40 | ConfigAssetMetadataHash | []byte | v2 | 32 byte commitment to unspecified asset metadata |
| 41 | ConfigAssetManager | []byte | v2 | 32 byte address |
| 42 | ConfigAssetReserve | []byte | v2 | 32 byte address |
| 43 | ConfigAssetFreeze | []byte | v2 | 32 byte address |
@@ -443,19 +427,6 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
| 63 | StateProofPK | []byte | v6 | 64 byte state proof public key commitment |
-TypeEnum mapping:
-
-| Index | "Type" string | Description |
-| --- | --- | --- |
-| 0 | unknown | Unknown type. Invalid transaction |
-| 1 | pay | Payment |
-| 2 | keyreg | KeyRegistration |
-| 3 | acfg | AssetConfig |
-| 4 | axfer | AssetTransfer |
-| 5 | afrz | AssetFreeze |
-| 6 | appl | ApplicationCall |
-
-
FirstValidTime causes the program to fail. The field is reserved for future use.
## global f
@@ -591,7 +562,7 @@ for notes on transaction fields available, see `txn`. If top of stack is _i_, `g
## bnz target
-- Opcode: 0x40 {int16 branch offset, big endian}
+- Opcode: 0x40 {int16 branch offset, big-endian}
- Stack: ..., A: uint64 &rarr; ...
- branch to TARGET if value A is not zero
@@ -601,7 +572,7 @@ At v2 it became allowed to branch to the end of the program exactly after the la
## bz target
-- Opcode: 0x41 {int16 branch offset, big endian}
+- Opcode: 0x41 {int16 branch offset, big-endian}
- Stack: ..., A: uint64 &rarr; ...
- branch to TARGET if value A is zero
- Availability: v2
@@ -610,7 +581,7 @@ See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.
## b target
-- Opcode: 0x42 {int16 branch offset, big endian}
+- Opcode: 0x42 {int16 branch offset, big-endian}
- Stack: ... &rarr; ...
- branch unconditionally to TARGET
- Availability: v2
@@ -780,9 +751,17 @@ When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on
- Opcode: 0x5c {uint8 encoding index}
- Stack: ..., A: []byte &rarr; ..., []byte
- decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E
-- **Cost**: 25
+- **Cost**: 1 + 1 per 16 bytes
- Availability: v7
+`base64` Encodings:
+
+| Index | Name | Notes |
+| - | ------ | --------- |
+| 0 | URLEncoding | |
+| 1 | StdEncoding | |
+
+
Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See <a href="https://rfc-editor.org/rfc/rfc4648.html#section-4">RFC 4648</a> (sections 4 and 5). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\n` and `\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\r`, or `\n`.
## json_ref r
@@ -792,6 +771,15 @@ Decodes A using the base64 encoding E. Specify the encoding with an immediate ar
- return key B's value from a [valid](jsonspec.md) utf-8 encoded json object A
- Availability: v7
+`json_ref` Types:
+
+| Index | Name | Type | Notes |
+| - | ------ | -- | --------- |
+| 0 | JSONString | []byte | |
+| 1 | JSONUint64 | uint64 | |
+| 2 | JSONObject | []byte | |
+
+
specify the return type with an immediate arg either as JSONUint64 or JSONString or JSONObject.
## balance
@@ -904,7 +892,7 @@ Deleting a key which is already absent has no effect on the application global s
- Availability: v2
- Mode: Application
-`asset_holding_get` Fields:
+`asset_holding` Fields:
| Index | Name | Type | Notes |
| - | ------ | -- | --------- |
@@ -922,7 +910,7 @@ params: Txn.Accounts offset (or, since v4, an _available_ address), asset id (or
- Availability: v2
- Mode: Application
-`asset_params_get` Fields:
+`asset_params` Fields:
| Index | Name | Type | In | Notes |
| - | ------ | -- | - | --------- |
@@ -933,7 +921,7 @@ params: Txn.Accounts offset (or, since v4, an _available_ address), asset id (or
| 4 | AssetName | []byte | | Asset name |
| 5 | AssetURL | []byte | | URL with additional info about the asset |
| 6 | AssetMetadataHash | []byte | | Arbitrary commitment |
-| 7 | AssetManager | []byte | | Manager commitment |
+| 7 | AssetManager | []byte | | Manager address |
| 8 | AssetReserve | []byte | | Reserve address |
| 9 | AssetFreeze | []byte | | Freeze address |
| 10 | AssetClawback | []byte | | Clawback address |
@@ -950,7 +938,7 @@ params: Txn.ForeignAssets offset (or, since v4, an _available_ asset id. Return:
- Availability: v5
- Mode: Application
-`app_params_get` Fields:
+`app_params` Fields:
| Index | Name | Type | Notes |
| - | ------ | -- | --------- |
@@ -975,7 +963,7 @@ params: Txn.ForeignApps offset or an _available_ app id. Return: did_exist flag
- Availability: v6
- Mode: Application
-`acct_params_get` Fields:
+`acct_params` Fields:
| Index | Name | Type | Notes |
| - | ------ | -- | --------- |
@@ -1022,7 +1010,7 @@ pushint args are not added to the intcblock during assembly processes
## callsub target
-- Opcode: 0x88 {int16 branch offset, big endian}
+- Opcode: 0x88 {int16 branch offset, big-endian}
- Stack: ... &rarr; ...
- branch unconditionally to TARGET, saving the next instruction on the call stack
- Availability: v4
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index 733ef4a58..03c257f2e 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -275,29 +275,30 @@ func (ops *OpStream) ReferToLabel(pc int, label string) {
type opTypeFunc func(ops *OpStream, immediates []string) (StackTypes, StackTypes)
-// returns allows opcodes like `txn` to be specific about their return
-// value types, based on the field requested, rather than use Any as
-// specified by opSpec.
-func (ops *OpStream) returns(argTypes ...StackType) {
- for range argTypes {
- ops.tpop()
+// returns allows opcodes like `txn` to be specific about their return value
+// types, based on the field requested, rather than use Any as specified by
+// opSpec. It replaces StackAny in the top `count` elements of the typestack.
+func (ops *OpStream) returns(spec *OpSpec, replacement StackType) {
+ end := len(ops.typeStack)
+ tip := ops.typeStack[end-len(spec.Returns):]
+ for i := range tip {
+ if tip[i] == StackAny {
+ tip[i] = replacement
+ return
+ }
}
- ops.tpusha(argTypes)
-}
-
-func (ops *OpStream) tpusha(argType []StackType) {
- ops.typeStack = append(ops.typeStack, argType...)
+ // returns was called on an OpSpec with no StackAny in its Returns
+ panic(spec)
}
-func (ops *OpStream) tpop() (argType StackType) {
+func (ops *OpStream) tpop() StackType {
if len(ops.typeStack) == 0 {
- argType = StackNone
- return
+ return StackNone
}
last := len(ops.typeStack) - 1
- argType = ops.typeStack[last]
+ t := ops.typeStack[last]
ops.typeStack = ops.typeStack[:last]
- return
+ return t
}
// Intc writes opcodes for loading a uint64 constant onto the stack.
@@ -399,20 +400,14 @@ func asmInt(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.error("int needs one argument")
}
- // check friendly TypeEnum constants
- te, isTypeEnum := txnTypeConstToUint64[args[0]]
- if isTypeEnum {
- ops.Uint(te)
- return nil
- }
- // check raw transaction type strings
- tt, isTypeStr := txnTypeIndexes[args[0]]
- if isTypeStr {
- ops.Uint(tt)
+ // check txn type constants
+ i, ok := txnTypeMap[args[0]]
+ if ok {
+ ops.Uint(i)
return nil
}
// check OnCompetion constants
- oc, isOCStr := onCompletionConstToUint64[args[0]]
+ oc, isOCStr := onCompletionMap[args[0]]
if isOCStr {
ops.Uint(oc)
return nil
@@ -786,25 +781,7 @@ func asmSubstring(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func txnFieldImm(name string, expectArray bool, ops *OpStream) (*txnFieldSpec, error) {
- fs, ok := TxnFieldSpecByName[name]
- if !ok {
- return nil, fmt.Errorf("unknown field: %#v", name)
- }
- if expectArray != fs.array {
- if expectArray {
- return nil, fmt.Errorf("found scalar field %#v while expecting array", name)
- }
- return nil, fmt.Errorf("found array field %#v while expecting scalar", name)
- }
- if fs.version > ops.Version {
- return nil,
- fmt.Errorf("field %#v available in version %d. Missed #pragma version?", name, fs.version)
- }
- return &fs, nil
-}
-
-func simpleImm(value string, label string) (uint64, error) {
+func simpleImm(value string, label string) (byte, error) {
res, err := strconv.ParseUint(value, 0, 64)
if err != nil {
return 0, fmt.Errorf("unable to parse %s %#v as integer", label, value)
@@ -812,352 +789,74 @@ func simpleImm(value string, label string) (uint64, error) {
if res > 255 {
return 0, fmt.Errorf("%s beyond 255: %d", label, res)
}
- return res, err
-}
-
-func asmTxn(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.error("txn expects one argument")
- }
- fs, err := txnFieldImm(args[0], false, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- ops.returns(fs.ftype)
- return nil
+ return byte(res), err
}
// asmTxn2 delegates to asmTxn or asmTxna depending on number of operands
func asmTxn2(ops *OpStream, spec *OpSpec, args []string) error {
switch len(args) {
case 1:
- return asmTxn(ops, spec, args)
+ txn := OpsByName[1]["txn"] // v1 txn opcode does not have array names
+ return asmDefault(ops, &txn, args)
case 2:
txna := OpsByName[ops.Version]["txna"]
- return asmTxna(ops, &txna, args)
+ return asmDefault(ops, &txna, args)
default:
- return ops.error("txn expects one or two arguments")
- }
-}
-
-// asmTxna also assemble asmItxna
-func asmTxna(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 2 {
- return ops.errorf("%s expects two immediate arguments", spec.Name)
- }
- fs, err := txnFieldImm(args[0], true, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
+ return ops.errorf("%s expects 1 or 2 immediate arguments", spec.Name)
}
- arrayFieldIdx, err := simpleImm(args[1], "array index")
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
-
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- ops.pending.WriteByte(uint8(arrayFieldIdx))
- ops.returns(fs.ftype)
- return nil
-}
-
-// asmTxnas also assembles itxnas
-func asmTxnas(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one immediate argument", spec.Name)
- }
- fs, err := txnFieldImm(args[0], true, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
-
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- ops.returns(fs.ftype)
- return nil
-}
-
-func asmGtxn(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 2 {
- return ops.errorf("%s expects two arguments", spec.Name)
- }
- slot, err := simpleImm(args[0], "transaction index")
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- fs, err := txnFieldImm(args[1], false, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
-
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(slot))
- ops.pending.WriteByte(uint8(fs.field))
- ops.returns(fs.ftype)
- return nil
}
func asmGtxn2(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 2 {
- return asmGtxn(ops, spec, args)
+ gtxn := OpsByName[1]["gtxn"] // v1 gtxn opcode does not have array names
+ return asmDefault(ops, &gtxn, args)
}
if len(args) == 3 {
gtxna := OpsByName[ops.Version]["gtxna"]
- return asmGtxna(ops, &gtxna, args)
- }
- return ops.errorf("%s expects two or three arguments", spec.Name)
-}
-
-//asmGtxna also assembles asmGitxna
-func asmGtxna(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 3 {
- return ops.errorf("%s expects three arguments", spec.Name)
- }
- slot, err := simpleImm(args[0], "transaction index")
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- fs, err := txnFieldImm(args[1], true, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- arrayFieldIdx, err := simpleImm(args[2], "array index")
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
-
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(slot))
- ops.pending.WriteByte(uint8(fs.field))
- ops.pending.WriteByte(uint8(arrayFieldIdx))
- ops.returns(fs.ftype)
- return nil
-}
-
-// asmGtxnas also assembles gitxnas
-func asmGtxnas(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 2 {
- return ops.errorf("%s expects two immediate arguments", spec.Name)
+ return asmDefault(ops, &gtxna, args)
}
- slot, err := simpleImm(args[0], "transaction index")
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- fs, err := txnFieldImm(args[1], true, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
-
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(slot))
- ops.pending.WriteByte(uint8(fs.field))
- ops.returns(fs.ftype)
- return nil
+ return ops.errorf("%s expects 2 or 3 immediate arguments", spec.Name)
}
func asmGtxns(ops *OpStream, spec *OpSpec, args []string) error {
+ if len(args) == 1 {
+ return asmDefault(ops, spec, args)
+ }
if len(args) == 2 {
gtxnsa := OpsByName[ops.Version]["gtxnsa"]
- return asmGtxnsa(ops, &gtxnsa, args)
- }
- if len(args) != 1 {
- return ops.errorf("%s expects one or two immediate arguments", spec.Name)
- }
- fs, err := txnFieldImm(args[0], false, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
-
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- ops.returns(fs.ftype)
- return nil
-}
-
-func asmGtxnsa(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 2 {
- return ops.errorf("%s expects two immediate arguments", spec.Name)
- }
- fs, err := txnFieldImm(args[0], true, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- arrayFieldIdx, err := simpleImm(args[1], "array index")
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- ops.pending.WriteByte(uint8(arrayFieldIdx))
- ops.returns(fs.ftype)
- return nil
-}
-
-func asmGtxnsas(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one immediate argument", spec.Name)
+ return asmDefault(ops, &gtxnsa, args)
}
- fs, err := txnFieldImm(args[0], true, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- ops.returns(fs.ftype)
- return nil
+ return ops.errorf("%s expects 1 or 2 immediate arguments", spec.Name)
}
-// asmItxn delegates to asmItxnOnly or asmItxna depending on number of operands
func asmItxn(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 1 {
- return asmItxnOnly(ops, spec, args)
+ return asmDefault(ops, spec, args)
}
if len(args) == 2 {
itxna := OpsByName[ops.Version]["itxna"]
- return asmTxna(ops, &itxna, args)
+ return asmDefault(ops, &itxna, args)
}
- return ops.errorf("%s expects one or two arguments", spec.Name)
+ return ops.errorf("%s expects 1 or 2 immediate arguments", spec.Name)
}
-func asmItxnOnly(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
- fs, err := txnFieldImm(args[0], false, ops)
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
- }
-
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- ops.returns(fs.ftype)
- return nil
-}
-
-// asmGitxn delegates to asmGtxn or asmGtxna depending on number of operands
+// asmGitxn substitutes gitna's spec if there are 3 args
func asmGitxn(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 2 {
- return asmGtxn(ops, spec, args)
+ return asmDefault(ops, spec, args)
}
if len(args) == 3 {
itxna := OpsByName[ops.Version]["gitxna"]
- return asmGtxna(ops, &itxna, args)
- }
- return ops.errorf("%s expects two or three arguments", spec.Name)
-}
-
-func asmGlobal(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
- fs, ok := GlobalFieldSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
+ return asmDefault(ops, &itxna, args)
}
- if fs.version > ops.Version {
- //nolint:errcheck // we continue to maintain typestack
- ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], fs.version)
- }
-
- val := fs.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(val))
- ops.trace("%s (%s)", fs.field, fs.ftype)
- ops.returns(fs.ftype)
- return nil
-}
-
-func asmAssetHolding(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
- fs, ok := AssetHoldingFieldSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
- }
- if fs.version > ops.Version {
- //nolint:errcheck // we continue to maintain typestack
- ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], fs.version)
- }
-
- val := fs.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(val))
- ops.trace("%s (%s)", fs.field, fs.ftype)
- ops.returns(fs.ftype, StackUint64)
- return nil
-}
-
-func asmAssetParams(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
- fs, ok := AssetParamsFieldSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
- }
- if fs.version > ops.Version {
- //nolint:errcheck // we continue to maintain typestack
- ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], fs.version)
- }
-
- val := fs.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(val))
- ops.trace("%s (%s)", fs.field, fs.ftype)
- ops.returns(fs.ftype, StackUint64)
- return nil
-}
-
-func asmAppParams(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
- fs, ok := AppParamsFieldSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
- }
- if fs.version > ops.Version {
- //nolint:errcheck // we continue to maintain typestack
- ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], fs.version)
- }
-
- val := fs.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(val))
- ops.trace("%s (%s)", fs.field, fs.ftype)
- ops.returns(fs.ftype, StackUint64)
- return nil
-}
-
-func asmAcctParams(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
- fs, ok := AcctParamsFieldSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
- }
- if fs.version > ops.Version {
- //nolint:errcheck // we continue to maintain typestack
- ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], fs.version)
- }
-
- val := fs.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(val))
- ops.trace("%s (%s)", fs.field, fs.ftype)
- ops.returns(fs.ftype, StackUint64)
- return nil
+ return ops.errorf("%s expects 2 or 3 immediate arguments", spec.Name)
}
-func asmTxField(ops *OpStream, spec *OpSpec, args []string) error {
+func asmItxnField(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.errorf("%s expects one argument", spec.Name)
}
- fs, ok := TxnFieldSpecByName[args[0]]
+ fs, ok := txnFieldSpecByName[args[0]]
if !ok {
return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
}
@@ -1165,73 +864,10 @@ func asmTxField(ops *OpStream, spec *OpSpec, args []string) error {
return ops.errorf("%s %#v is not allowed.", spec.Name, args[0])
}
if fs.itxVersion > ops.Version {
- return ops.errorf("%s %#v available in version %d. Missed #pragma version?", spec.Name, args[0], fs.itxVersion)
+ return ops.errorf("%s %s field was introduced in TEAL v%d. Missed #pragma version?", spec.Name, args[0], fs.itxVersion)
}
ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(fs.field))
- return nil
-}
-
-func asmEcdsa(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
-
- cs, ok := EcdsaCurveSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
- }
- if cs.version > ops.Version {
- //nolint:errcheck // we continue to maintain typestack
- ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], cs.version)
- }
-
- val := cs.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(val))
- return nil
-}
-
-func asmBase64Decode(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
-
- encoding, ok := base64EncodingSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unknown encoding: %#v", spec.Name, args[0])
- }
- if encoding.version > ops.Version {
- //nolint:errcheck // we continue to maintain typestack
- ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], encoding.version)
- }
-
- val := encoding.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(val))
- ops.trace("%s (%s)", encoding.field, encoding.ftype)
- ops.returns(encoding.ftype)
- return nil
-}
-
-func asmJSONRef(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
- }
-
- jsonSpec, ok := jsonRefSpecByName[args[0]]
- if !ok {
- return ops.errorf("%s unsupported JSON value type: %#v", spec.Name, args[0])
- }
- if jsonSpec.version > ops.Version {
- return ops.errorf("%s %s available in version %d. Missed #pragma version?", spec.Name, args[0], jsonSpec.version)
- }
-
- valueType := jsonSpec.field
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(uint8(valueType))
- ops.trace("%s (%s)", jsonSpec.field, jsonSpec.ftype)
- ops.returns(jsonSpec.ftype)
+ ops.pending.WriteByte(fs.Field())
return nil
}
@@ -1239,16 +875,42 @@ type asmFunc func(*OpStream, *OpSpec, []string) error
// Basic assembly. Any extra bytes of opcode are encoded as byte immediates.
func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != spec.Details.Size-1 {
- return ops.errorf("%s expects %d immediate arguments", spec.Name, spec.Details.Size-1)
+ expected := len(spec.Details.Immediates)
+ if len(args) != expected {
+ if expected == 1 {
+ return ops.errorf("%s expects 1 immediate argument", spec.Name)
+ }
+ return ops.errorf("%s expects %d immediate arguments", spec.Name, expected)
}
ops.pending.WriteByte(spec.Opcode)
- for i := 0; i < spec.Details.Size-1; i++ {
- val, err := simpleImm(args[i], "argument")
- if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
+ for i, imm := range spec.Details.Immediates {
+ switch imm.kind {
+ case immByte:
+ if imm.Group != nil {
+ fs, ok := imm.Group.SpecByName(args[i])
+ if !ok {
+ return ops.errorf("%s unknown field: %#v", spec.Name, args[i])
+ }
+ // refine the typestack now, so it is maintain even if there's a version error
+ if fs.Type().Typed() {
+ ops.returns(spec, fs.Type())
+ }
+ if fs.Version() > ops.Version {
+ return ops.errorf("%s %s field was introduced in TEAL v%d. Missed #pragma version?",
+ spec.Name, args[i], fs.Version())
+ }
+ ops.pending.WriteByte(fs.Field())
+ } else {
+ // simple immediate that must be a number from 0-255
+ val, err := simpleImm(args[i], imm.Name)
+ if err != nil {
+ return ops.errorf("%s %w", spec.Name, err)
+ }
+ ops.pending.WriteByte(val)
+ }
+ default:
+ return ops.errorf("unable to assemble immKind %d", imm.kind)
}
- ops.pending.WriteByte(byte(val))
}
return nil
}
@@ -1400,22 +1062,24 @@ func typeTxField(ops *OpStream, args []string) (StackTypes, StackTypes) {
if len(args) != 1 {
return oneAny, nil
}
- fs, ok := TxnFieldSpecByName[args[0]]
+ fs, ok := txnFieldSpecByName[args[0]]
if !ok {
return oneAny, nil
}
return StackTypes{fs.ftype}, nil
}
-// keywords handle parsing and assembling special asm language constructs like 'addr'
-// We use OpSpec here, but somewhat degenerate, since they don't have opcodes or eval functions
+// keywords or "pseudo-ops" handle parsing and assembling special asm language
+// constructs like 'addr' We use an OpSpec here, but it's somewhat degenerate,
+// since they don't have opcodes or eval functions. But it does need a lot of
+// OpSpec, in order to support assembly - Mode, typing info, etc.
var keywords = map[string]OpSpec{
- "int": {0, "int", nil, asmInt, nil, nil, oneInt, 1, modeAny, opDetails{1, 2, nil, nil, nil}},
- "byte": {0, "byte", nil, asmByte, nil, nil, oneBytes, 1, modeAny, opDetails{1, 2, nil, nil, nil}},
+ "int": {0, "int", nil, asmInt, nil, nil, oneInt, 1, modeAny, opDetails{}},
+ "byte": {0, "byte", nil, asmByte, nil, nil, oneBytes, 1, modeAny, opDetails{}},
// parse basics.Address, actually just another []byte constant
- "addr": {0, "addr", nil, asmAddr, nil, nil, oneBytes, 1, modeAny, opDetails{1, 2, nil, nil, nil}},
+ "addr": {0, "addr", nil, asmAddr, nil, nil, oneBytes, 1, modeAny, opDetails{}},
// take a signature, hash it, and take first 4 bytes, actually just another []byte constant
- "method": {0, "method", nil, asmMethod, nil, nil, oneBytes, 1, modeAny, opDetails{1, 2, nil, nil, nil}},
+ "method": {0, "method", nil, asmMethod, nil, nil, oneBytes, 1, modeAny, opDetails{}},
}
type lineError struct {
@@ -1541,12 +1205,12 @@ func (ops *OpStream) checkStack(args StackTypes, returns StackTypes, instruction
stype := ops.tpop()
if firstPop {
firstPop = false
- ops.trace("pops(%s", argType.String())
+ ops.trace("pops(%s", argType)
} else {
- ops.trace(", %s", argType.String())
+ ops.trace(", %s", argType)
}
if !typecheck(argType, stype) {
- err := fmt.Errorf("%s arg %d wanted type %s got %s", strings.Join(instruction, " "), i, argType.String(), stype.String())
+ err := fmt.Errorf("%s arg %d wanted type %s got %s", strings.Join(instruction, " "), i, argType, stype)
if len(ops.labelReferences) > 0 {
ops.warnf("%w; but branches have happened and assembler does not precisely track types in this case", err)
} else {
@@ -1560,11 +1224,11 @@ func (ops *OpStream) checkStack(args StackTypes, returns StackTypes, instruction
}
if len(returns) > 0 {
- ops.tpusha(returns)
- ops.trace(" pushes(%s", returns[0].String())
+ ops.typeStack = append(ops.typeStack, returns...)
+ ops.trace(" pushes(%s", returns[0])
if len(returns) > 1 {
for _, rt := range returns[1:] {
- ops.trace(", %s", rt.String())
+ ops.trace(", %s", rt)
}
}
ops.trace(")")
@@ -2188,27 +1852,131 @@ func (dis *disassembleState) outputLabelIfNeeded() (err error) {
type disFunc func(dis *disassembleState, spec *OpSpec) (string, error)
-// Basic disasemble, and extra bytes of opcode are decoded as bytes integers.
+// Basic disasemble. Immediates are decoded based on info in the OpSpec.
func disDefault(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + spec.Details.Size - 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + spec.Details.Size
out := spec.Name
- for s := 1; s < spec.Details.Size; s++ {
- b := uint(dis.program[dis.pc+s])
- out += fmt.Sprintf(" %d", b)
+ pc := dis.pc + 1
+ for _, imm := range spec.Details.Immediates {
+ out += " "
+ switch imm.kind {
+ case immByte:
+ if pc >= len(dis.program) {
+ return "", fmt.Errorf("program end while reading immediate %s for %s",
+ imm.Name, spec.Name)
+ }
+ b := dis.program[pc]
+ if imm.Group != nil {
+ if int(b) >= len(imm.Group.Names) {
+ return "", fmt.Errorf("invalid immediate %s for %s: %d", imm.Name, spec.Name, b)
+ }
+ name := imm.Group.Names[b]
+ if name == "" {
+ return "", fmt.Errorf("invalid immediate %s for %s: %d", imm.Name, spec.Name, b)
+ }
+ out += name
+ } else {
+ out += fmt.Sprintf("%d", b)
+ }
+ if spec.Name == "intc" && int(b) < len(dis.intc) {
+ out += fmt.Sprintf(" // %d", dis.intc[b])
+ }
+ if spec.Name == "bytec" && int(b) < len(dis.bytec) {
+ out += fmt.Sprintf(" // %s", guessByteFormat(dis.bytec[b]))
+ }
+
+ pc++
+ case immLabel:
+ offset := (uint(dis.program[pc]) << 8) | uint(dis.program[pc+1])
+ target := int(offset) + pc + 2
+ if target > 0xffff {
+ target -= 0x10000
+ }
+ var label string
+ if dis.numericTargets {
+ label = fmt.Sprintf("%d", target)
+ } else {
+ if known, ok := dis.pendingLabels[target]; ok {
+ label = known
+ } else {
+ dis.labelCount++
+ label = fmt.Sprintf("label%d", dis.labelCount)
+ dis.putLabel(label, target)
+ }
+ }
+ out += label
+ pc += 2
+ case immInt:
+ val, bytesUsed := binary.Uvarint(dis.program[pc:])
+ if bytesUsed <= 0 {
+ return "", fmt.Errorf("could not decode immediate %s for %s", imm.Name, spec.Name)
+ }
+ out += fmt.Sprintf("%d", val)
+ pc += bytesUsed
+ case immBytes:
+ length, bytesUsed := binary.Uvarint(dis.program[pc:])
+ if bytesUsed <= 0 {
+ return "", fmt.Errorf("could not decode immediate %s for %s", imm.Name, spec.Name)
+ }
+ pc += bytesUsed
+ end := uint64(pc) + length
+ if end > uint64(len(dis.program)) || end < uint64(pc) {
+ return "", fmt.Errorf("could not decode immediate %s for %s", imm.Name, spec.Name)
+ }
+ constant := dis.program[pc:end]
+ out += fmt.Sprintf("0x%s // %s", hex.EncodeToString(constant), guessByteFormat(constant))
+ pc = int(end)
+ case immInts:
+ intc, nextpc, err := parseIntcblock(dis.program, pc)
+ if err != nil {
+ return "", err
+ }
+
+ dis.intc = append(dis.intc, intc...)
+ for i, iv := range intc {
+ if i != 0 {
+ out += " "
+ }
+ out += fmt.Sprintf("%d", iv)
+ }
+ pc = nextpc
+ case immBytess:
+ bytec, nextpc, err := parseBytecBlock(dis.program, pc)
+ if err != nil {
+ return "", err
+ }
+ dis.bytec = append(dis.bytec, bytec...)
+ for i, bv := range bytec {
+ if i != 0 {
+ out += " "
+ }
+ out += fmt.Sprintf("0x%s", hex.EncodeToString(bv))
+ }
+ pc = nextpc
+ default:
+ return "", fmt.Errorf("unknown immKind %d", imm.kind)
+ }
+ }
+
+ if strings.HasPrefix(spec.Name, "intc_") {
+ b := spec.Name[len(spec.Name)-1] - byte('0')
+ if int(b) < len(dis.intc) {
+ out += fmt.Sprintf(" // %d", dis.intc[b])
+ }
}
+ if strings.HasPrefix(spec.Name, "bytec_") {
+ b := spec.Name[len(spec.Name)-1] - byte('0')
+ if int(b) < len(dis.intc) {
+ out += fmt.Sprintf(" // %s", guessByteFormat(dis.bytec[b]))
+ }
+ }
+ dis.nextpc = pc
return out, nil
}
var errShortIntcblock = errors.New("intcblock ran past end of program")
var errTooManyIntc = errors.New("intcblock with too many items")
-func parseIntcblock(program []byte, pc int) (intc []uint64, nextpc int, err error) {
- pos := pc + 1
+func parseIntcblock(program []byte, pos int) (intc []uint64, nextpc int, err error) {
numInts, bytesUsed := binary.Uvarint(program[pos:])
if bytesUsed <= 0 {
err = fmt.Errorf("could not decode intcblock size at pc=%d", pos)
@@ -2264,8 +2032,7 @@ func checkIntConstBlock(cx *EvalContext) error {
var errShortBytecblock = errors.New("bytecblock ran past end of program")
var errTooManyItems = errors.New("bytecblock with too many items")
-func parseBytecBlock(program []byte, pc int) (bytec [][]byte, nextpc int, err error) {
- pos := pc + 1
+func parseBytecBlock(program []byte, pos int) (bytec [][]byte, nextpc int, err error) {
numItems, bytesUsed := binary.Uvarint(program[pos:])
if bytesUsed <= 0 {
err = fmt.Errorf("could not decode bytecblock size at pc=%d", pos)
@@ -2338,68 +2105,6 @@ func checkByteConstBlock(cx *EvalContext) error {
return nil
}
-func disIntcblock(dis *disassembleState, spec *OpSpec) (string, error) {
- intc, nextpc, err := parseIntcblock(dis.program, dis.pc)
- if err != nil {
- return "", err
- }
- dis.nextpc = nextpc
- out := spec.Name
- for _, iv := range intc {
- dis.intc = append(dis.intc, iv)
- out += fmt.Sprintf(" %d", iv)
- }
- return out, nil
-}
-
-func disIntc(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + spec.Details.Size - 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + spec.Details.Size
- var suffix string
- var b int
- switch spec.Opcode {
- case 0x22:
- suffix = "_0"
- b = 0
- case 0x23:
- suffix = "_1"
- b = 1
- case 0x24:
- suffix = "_2"
- b = 2
- case 0x25:
- suffix = "_3"
- b = 3
- case 0x21:
- b = int(dis.program[dis.pc+1])
- suffix = fmt.Sprintf(" %d", b)
- default:
- return "", fmt.Errorf("disIntc on %v", spec)
- }
- if b < len(dis.intc) {
- return fmt.Sprintf("intc%s // %d", suffix, dis.intc[b]), nil
- }
- return fmt.Sprintf("intc%s", suffix), nil
-}
-
-func disBytecblock(dis *disassembleState, spec *OpSpec) (string, error) {
- bytec, nextpc, err := parseBytecBlock(dis.program, dis.pc)
- if err != nil {
- return "", err
- }
- dis.nextpc = nextpc
- out := spec.Name
- for _, bv := range bytec {
- dis.bytec = append(dis.bytec, bv)
- out += fmt.Sprintf(" 0x%s", hex.EncodeToString(bv))
- }
- return out, nil
-}
-
func allPrintableASCII(bytes []byte) bool {
for _, b := range bytes {
if b < 32 || b > 126 {
@@ -2409,11 +2114,11 @@ func allPrintableASCII(bytes []byte) bool {
return true
}
func guessByteFormat(bytes []byte) string {
- var short basics.Address
+ var addr basics.Address
- if len(bytes) == len(short) {
- copy(short[:], bytes[:])
- return fmt.Sprintf("addr %s", short.String())
+ if len(bytes) == len(addr) {
+ copy(addr[:], bytes[:])
+ return fmt.Sprintf("addr %s", addr)
}
if allPrintableASCII(bytes) {
return fmt.Sprintf("%#v", string(bytes))
@@ -2421,284 +2126,6 @@ func guessByteFormat(bytes []byte) string {
return "0x" + hex.EncodeToString(bytes)
}
-func disBytec(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + spec.Details.Size - 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + spec.Details.Size
- var suffix string
- var b int
- switch spec.Opcode {
- case 0x28:
- suffix = "_0"
- b = 0
- case 0x29:
- suffix = "_1"
- b = 1
- case 0x2a:
- suffix = "_2"
- b = 2
- case 0x2b:
- suffix = "_3"
- b = 3
- case 0x27:
- b = int(dis.program[dis.pc+1])
- suffix = fmt.Sprintf(" %d", b)
- }
- if b < len(dis.bytec) {
- return fmt.Sprintf("bytec%s // %s", suffix, guessByteFormat(dis.bytec[b])), nil
- }
- return fmt.Sprintf("bytec%s", suffix), nil
-}
-
-func disPushInt(dis *disassembleState, spec *OpSpec) (string, error) {
- pos := dis.pc + 1
- val, bytesUsed := binary.Uvarint(dis.program[pos:])
- if bytesUsed <= 0 {
- return "", fmt.Errorf("could not decode int at pc=%d", pos)
- }
- dis.nextpc = pos + bytesUsed
- return fmt.Sprintf("%s %d", spec.Name, val), nil
-}
-
-func disPushBytes(dis *disassembleState, spec *OpSpec) (string, error) {
- pos := dis.pc + 1
- length, bytesUsed := binary.Uvarint(dis.program[pos:])
- if bytesUsed <= 0 {
- return "", fmt.Errorf("could not decode bytes length at pc=%d", pos)
- }
- pos += bytesUsed
- end := uint64(pos) + length
- if end > uint64(len(dis.program)) || end < uint64(pos) {
- return "", fmt.Errorf("pushbytes too long %d %d", end, pos)
- }
- bytes := dis.program[pos:end]
- dis.nextpc = int(end)
- return fmt.Sprintf("%s 0x%s // %s", spec.Name, hex.EncodeToString(bytes), guessByteFormat(bytes)), nil
-}
-
-// This is also used to disassemble gtxns, gtxnsas, txnas, itxn, itxnas
-func disTxn(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- txarg := dis.program[dis.pc+1]
- if int(txarg) >= len(TxnFieldNames) {
- return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, TxnFieldNames[txarg]), nil
-}
-
-// This is also used to disassemble gtxnsa
-func disTxna(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 2
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 3
- txarg := dis.program[dis.pc+1]
- if int(txarg) >= len(TxnFieldNames) {
- return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
- }
- arrayFieldIdx := dis.program[dis.pc+2]
- return fmt.Sprintf("%s %s %d", spec.Name, TxnFieldNames[txarg], arrayFieldIdx), nil
-}
-
-// disGtxn is also used to disassemble gtxnas, gitxn, gitxnas
-func disGtxn(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 2
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 3
- gi := dis.program[dis.pc+1]
- txarg := dis.program[dis.pc+2]
- if int(txarg) >= len(TxnFieldNames) {
- return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
- }
- return fmt.Sprintf("%s %d %s", spec.Name, gi, TxnFieldNames[txarg]), nil
-}
-
-// disGtxna is also used to disassemble gitxna
-func disGtxna(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 3
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 4
- gi := dis.program[dis.pc+1]
- txarg := dis.program[dis.pc+2]
- if int(txarg) >= len(TxnFieldNames) {
- return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
- }
- arrayFieldIdx := dis.program[dis.pc+3]
- return fmt.Sprintf("%s %d %s %d", spec.Name, gi, TxnFieldNames[txarg], arrayFieldIdx), nil
-}
-
-func disGlobal(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- garg := dis.program[dis.pc+1]
- if int(garg) >= len(GlobalFieldNames) {
- return "", fmt.Errorf("invalid global arg index %d at pc=%d", garg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, GlobalFieldNames[garg]), nil
-}
-
-func disBranch(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 2
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
-
- dis.nextpc = dis.pc + 3
- offset := (uint(dis.program[dis.pc+1]) << 8) | uint(dis.program[dis.pc+2])
- target := int(offset) + dis.pc + 3
- if target > 0xffff {
- target -= 0x10000
- }
- var label string
- if dis.numericTargets {
- label = fmt.Sprintf("%d", target)
- } else {
- if known, ok := dis.pendingLabels[target]; ok {
- label = known
- } else {
- dis.labelCount++
- label = fmt.Sprintf("label%d", dis.labelCount)
- dis.putLabel(label, target)
- }
- }
- return fmt.Sprintf("%s %s", spec.Name, label), nil
-}
-
-func disAssetHolding(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- arg := dis.program[dis.pc+1]
- if int(arg) >= len(AssetHoldingFieldNames) {
- return "", fmt.Errorf("invalid asset holding arg index %d at pc=%d", arg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, AssetHoldingFieldNames[arg]), nil
-}
-
-func disAssetParams(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- arg := dis.program[dis.pc+1]
- if int(arg) >= len(AssetParamsFieldNames) {
- return "", fmt.Errorf("invalid asset params arg index %d at pc=%d", arg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, AssetParamsFieldNames[arg]), nil
-}
-
-func disAppParams(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- arg := dis.program[dis.pc+1]
- if int(arg) >= len(AppParamsFieldNames) {
- return "", fmt.Errorf("invalid app params arg index %d at pc=%d", arg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, AppParamsFieldNames[arg]), nil
-}
-
-func disAcctParams(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- arg := dis.program[dis.pc+1]
- if int(arg) >= len(AcctParamsFieldNames) {
- return "", fmt.Errorf("invalid acct params arg index %d at pc=%d", arg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, AcctParamsFieldNames[arg]), nil
-}
-
-func disTxField(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- arg := dis.program[dis.pc+1]
- if int(arg) >= len(TxnFieldNames) {
- return "", fmt.Errorf("invalid %s arg index %d at pc=%d", spec.Name, arg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, TxnFieldNames[arg]), nil
-}
-
-func disEcdsa(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- arg := dis.program[dis.pc+1]
- if int(arg) >= len(EcdsaCurveNames) {
- return "", fmt.Errorf("invalid curve arg index %d at pc=%d", arg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, EcdsaCurveNames[arg]), nil
-}
-
-func disBase64Decode(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
- b64dArg := dis.program[dis.pc+1]
- if int(b64dArg) >= len(base64EncodingNames) {
- return "", fmt.Errorf("invalid base64_decode arg index %d at pc=%d", b64dArg, dis.pc)
- }
- return fmt.Sprintf("%s %s", spec.Name, base64EncodingNames[b64dArg]), nil
-}
-
-func disJSONRef(dis *disassembleState, spec *OpSpec) (string, error) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- }
- dis.nextpc = dis.pc + 2
-
- jsonRefArg := dis.program[dis.pc+1]
- if int(jsonRefArg) >= len(jsonRefSpecByName) {
- return "", fmt.Errorf("invalid json_ref arg index %d at pc=%d", jsonRefArg, dis.pc)
- }
-
- return fmt.Sprintf("%s %s", spec.Name, jsonRefTypeNames[jsonRefArg]), nil
-}
-
type disInfo struct {
pcOffset []PCOffset
hasStatefulOps bool
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index 29a7a1aff..88ba667f4 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -556,40 +556,46 @@ func testLine(t *testing.T, line string, ver uint64, expected string) {
func TestAssembleTxna(t *testing.T) {
partitiontest.PartitionTest(t)
- testLine(t, "txna Accounts 256", AssemblerMaxVersion, "txna array index beyond 255: 256")
- testLine(t, "txna ApplicationArgs 256", AssemblerMaxVersion, "txna array index beyond 255: 256")
- testLine(t, "txna Sender 256", AssemblerMaxVersion, "txna found scalar field \"Sender\"...")
- testLine(t, "gtxna 0 Accounts 256", AssemblerMaxVersion, "gtxna array index beyond 255: 256")
- testLine(t, "gtxna 0 ApplicationArgs 256", AssemblerMaxVersion, "gtxna array index beyond 255: 256")
- testLine(t, "gtxna 256 Accounts 0", AssemblerMaxVersion, "gtxna transaction index beyond 255: 256")
- testLine(t, "gtxna 0 Sender 256", AssemblerMaxVersion, "gtxna found scalar field \"Sender\"...")
- testLine(t, "txn Accounts 0", 1, "txn expects one argument")
- testLine(t, "txn Accounts 0 1", 2, "txn expects one or two arguments")
- testLine(t, "txna Accounts 0 1", AssemblerMaxVersion, "txna expects two immediate arguments")
- testLine(t, "txnas Accounts 1", AssemblerMaxVersion, "txnas expects one immediate argument")
+ testLine(t, "txna Accounts 256", AssemblerMaxVersion, "txna i beyond 255: 256")
+ testLine(t, "txna ApplicationArgs 256", AssemblerMaxVersion, "txna i beyond 255: 256")
+ testLine(t, "txna Sender 256", AssemblerMaxVersion, "txna unknown field: \"Sender\"")
+ testLine(t, "gtxna 0 Accounts 256", AssemblerMaxVersion, "gtxna i beyond 255: 256")
+ testLine(t, "gtxna 0 ApplicationArgs 256", AssemblerMaxVersion, "gtxna i beyond 255: 256")
+ testLine(t, "gtxna 256 Accounts 0", AssemblerMaxVersion, "gtxna t beyond 255: 256")
+ testLine(t, "gtxna 0 Sender 256", AssemblerMaxVersion, "gtxna unknown field: \"Sender\"")
+ testLine(t, "txn Accounts 0", 1, "txn expects 1 immediate argument")
+ testLine(t, "txn Accounts 0 1", 2, "txn expects 1 or 2 immediate arguments")
+ testLine(t, "txna Accounts 0 1", AssemblerMaxVersion, "txna expects 2 immediate arguments")
+ testLine(t, "txnas Accounts 1", AssemblerMaxVersion, "txnas expects 1 immediate argument")
testLine(t, "txna Accounts a", AssemblerMaxVersion, "txna unable to parse...")
- testLine(t, "gtxn 0 Sender 0", 1, "gtxn expects two arguments")
- testLine(t, "gtxn 0 Sender 1 2", 2, "gtxn expects two or three arguments")
- testLine(t, "gtxna 0 Accounts 1 2", AssemblerMaxVersion, "gtxna expects three arguments")
+ testLine(t, "gtxn 0 Sender 0", 1, "gtxn expects 2 immediate arguments")
+ testLine(t, "gtxn 0 Sender 1 2", 2, "gtxn expects 2 or 3 immediate arguments")
+ testLine(t, "gtxna 0 Accounts 1 2", AssemblerMaxVersion, "gtxna expects 3 immediate arguments")
testLine(t, "gtxna a Accounts 0", AssemblerMaxVersion, "gtxna unable to parse...")
testLine(t, "gtxna 0 Accounts a", AssemblerMaxVersion, "gtxna unable to parse...")
- testLine(t, "gtxnas Accounts 1 2", AssemblerMaxVersion, "gtxnas expects two immediate arguments")
+ testLine(t, "gtxnas Accounts 1 2", AssemblerMaxVersion, "gtxnas expects 2 immediate arguments")
testLine(t, "txn ABC", 2, "txn unknown field: \"ABC\"")
testLine(t, "gtxn 0 ABC", 2, "gtxn unknown field: \"ABC\"")
testLine(t, "gtxn a ABC", 2, "gtxn unable to parse...")
- testLine(t, "txn Accounts", AssemblerMaxVersion, "txn found array field \"Accounts\"...")
- testLine(t, "txn Accounts", 1, "txn found array field \"Accounts\"...")
+ testLine(t, "txn Accounts", 1, "txn unknown field: \"Accounts\"")
+ testLine(t, "txn Accounts", AssemblerMaxVersion, "txn unknown field: \"Accounts\"")
testLine(t, "txn Accounts 0", AssemblerMaxVersion, "")
- testLine(t, "gtxn 0 Accounts", AssemblerMaxVersion, "gtxn found array field \"Accounts\"...")
- testLine(t, "gtxn 0 Accounts", 1, "gtxn found array field \"Accounts\"...")
+ testLine(t, "gtxn 0 Accounts", AssemblerMaxVersion, "gtxn unknown field: \"Accounts\"...")
+ testLine(t, "gtxn 0 Accounts", 1, "gtxn unknown field: \"Accounts\"")
testLine(t, "gtxn 0 Accounts 1", AssemblerMaxVersion, "")
}
func TestAssembleGlobal(t *testing.T) {
partitiontest.PartitionTest(t)
- testLine(t, "global", AssemblerMaxVersion, "global expects one argument")
+ testLine(t, "global", AssemblerMaxVersion, "global expects 1 immediate argument")
testLine(t, "global a", AssemblerMaxVersion, "global unknown field: \"a\"")
+ testProg(t, "global MinTxnFee; int 2; +", AssemblerMaxVersion)
+ testProg(t, "global ZeroAddress; byte 0x12; concat; len", AssemblerMaxVersion)
+ testProg(t, "global MinTxnFee; byte 0x12; concat", AssemblerMaxVersion,
+ Expect{3, "concat arg 0 wanted type []byte..."})
+ testProg(t, "int 2; global ZeroAddress; +", AssemblerMaxVersion,
+ Expect{3, "+ arg 1 wanted type uint64..."})
}
func TestAssembleDefault(t *testing.T) {
@@ -1522,7 +1528,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program[2] = 0x50 // txn field
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "invalid txn arg index")
+ require.Contains(t, err.Error(), "invalid immediate f for txn")
source = `txna Accounts 0`
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1530,7 +1536,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program[2] = 0x50 // txn field
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "invalid txn arg index")
+ require.Contains(t, err.Error(), "invalid immediate f for txna")
source = `gtxn 0 Sender`
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1538,7 +1544,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program[3] = 0x50 // txn field
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "invalid txn arg index")
+ require.Contains(t, err.Error(), "invalid immediate f for gtxn")
source = `gtxna 0 Accounts 0`
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1546,7 +1552,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program[3] = 0x50 // txn field
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "invalid txn arg index")
+ require.Contains(t, err.Error(), "invalid immediate f for gtxna")
source = `global MinTxnFee`
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1554,7 +1560,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program[2] = 0x50 // txn field
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "invalid global arg index")
+ require.Contains(t, err.Error(), "invalid immediate f for global")
ops.Program[0] = 0x11 // version
out, err := Disassemble(ops.Program)
@@ -1573,7 +1579,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program[7] = 0x50 // holding field
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "invalid asset holding arg index")
+ require.Contains(t, err.Error(), "invalid immediate f for asset_holding_get")
source = "int 0\nasset_params_get AssetTotal"
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1581,7 +1587,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program[4] = 0x50 // params field
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "invalid asset params arg index")
+ require.Contains(t, err.Error(), "invalid immediate f for asset_params_get")
source = "int 0\nasset_params_get AssetTotal"
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1591,17 +1597,22 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program = ops.Program[0 : len(ops.Program)-1]
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "unexpected asset_params_get opcode end: missing 1 bytes")
+ require.Contains(t, err.Error(), "program end while reading immediate f for asset_params_get")
source = "gtxna 0 Accounts 0"
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
_, err = Disassemble(ops.Program)
require.NoError(t, err)
- ops.Program = ops.Program[0 : len(ops.Program)-2]
- _, err = Disassemble(ops.Program)
+ _, err = Disassemble(ops.Program[0 : len(ops.Program)-1])
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "program end while reading immediate i for gtxna")
+ _, err = Disassemble(ops.Program[0 : len(ops.Program)-2])
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "program end while reading immediate f for gtxna")
+ _, err = Disassemble(ops.Program[0 : len(ops.Program)-3])
require.Error(t, err)
- require.Contains(t, err.Error(), "unexpected gtxna opcode end: missing 2 bytes")
+ require.Contains(t, err.Error(), "program end while reading immediate t for gtxna")
source = "txna Accounts 0"
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1611,7 +1622,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program = ops.Program[0 : len(ops.Program)-1]
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "unexpected txna opcode end: missing 1 bytes")
+ require.Contains(t, err.Error(), "program end while reading immediate i for txna")
source = "byte 0x4141\nsubstring 0 1"
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
@@ -1621,7 +1632,7 @@ func TestAssembleDisassembleErrors(t *testing.T) {
ops.Program = ops.Program[0 : len(ops.Program)-1]
_, err = Disassemble(ops.Program)
require.Error(t, err)
- require.Contains(t, err.Error(), "unexpected substring opcode end: missing 1 bytes")
+ require.Contains(t, err.Error(), "program end while reading immediate e for substring")
}
func TestAssembleVersions(t *testing.T) {
@@ -1671,21 +1682,28 @@ func TestAssembleAsset(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- introduction := OpsByName[LogicVersion]["asset_holding_get"].Version
- for v := introduction; v <= AssemblerMaxVersion; v++ {
+ for v := uint64(2); v <= AssemblerMaxVersion; v++ {
testProg(t, "asset_holding_get ABC 1", v,
Expect{1, "asset_holding_get ABC 1 expects 2 stack arguments..."})
testProg(t, "int 1; asset_holding_get ABC 1", v,
Expect{2, "asset_holding_get ABC 1 expects 2 stack arguments..."})
testProg(t, "int 1; int 1; asset_holding_get ABC 1", v,
- Expect{3, "asset_holding_get expects one argument"})
+ Expect{3, "asset_holding_get expects 1 immediate argument"})
testProg(t, "int 1; int 1; asset_holding_get ABC", v,
Expect{3, "asset_holding_get unknown field: \"ABC\""})
testProg(t, "byte 0x1234; asset_params_get ABC 1", v,
Expect{2, "asset_params_get ABC 1 arg 0 wanted type uint64..."})
- testLine(t, "asset_params_get ABC 1", v, "asset_params_get expects one argument")
+ // Test that AssetUnitName is known to return bytes
+ testProg(t, "int 1; asset_params_get AssetUnitName; pop; int 1; +", v,
+ Expect{5, "+ arg 0 wanted type uint64..."})
+
+ // Test that AssetTotal is known to return uint64
+ testProg(t, "int 1; asset_params_get AssetTotal; pop; byte 0x12; concat", v,
+ Expect{5, "concat arg 0 wanted type []byte..."})
+
+ testLine(t, "asset_params_get ABC 1", v, "asset_params_get expects 1 immediate argument")
testLine(t, "asset_params_get ABC", v, "asset_params_get unknown field: \"ABC\"")
}
}
@@ -2198,7 +2216,7 @@ func TestErrShortBytecblock(t *testing.T) {
text := `intcblock 0x1234567812345678 0x1234567812345671 0x1234567812345672 0x1234567812345673 4 5 6 7 8`
ops, err := AssembleStringWithVersion(text, 1)
require.NoError(t, err)
- _, _, err = parseIntcblock(ops.Program, 0)
+ _, _, err = parseIntcblock(ops.Program, 1)
require.Equal(t, err, errShortIntcblock)
var cx EvalContext
@@ -2364,6 +2382,7 @@ func TestCoverAsm(t *testing.T) {
testProg(t, `int 4; byte "ayush"; int 5; cover 1; pop; +`, AssemblerMaxVersion)
testProg(t, `int 4; byte "john"; int 5; cover 2; +`, AssemblerMaxVersion, Expect{5, "+ arg 1..."})
+ testProg(t, `int 4; cover junk`, AssemblerMaxVersion, Expect{2, "cover unable to parse n ..."})
}
func TestUncoverAsm(t *testing.T) {
@@ -2376,6 +2395,7 @@ func TestUncoverAsm(t *testing.T) {
}
func TestTxTypes(t *testing.T) {
+ partitiontest.PartitionTest(t)
testProg(t, "itxn_begin; itxn_field Sender", 5, Expect{2, "itxn_field Sender expects 1 stack argument..."})
testProg(t, "itxn_begin; int 1; itxn_field Sender", 5, Expect{3, "...wanted type []byte got uint64"})
testProg(t, "itxn_begin; byte 0x56127823; itxn_field Sender", 5)
@@ -2384,3 +2404,20 @@ func TestTxTypes(t *testing.T) {
testProg(t, "itxn_begin; byte 0x87123376; itxn_field Amount", 5, Expect{3, "...wanted type uint64 got []byte"})
testProg(t, "itxn_begin; int 1; itxn_field Amount", 5)
}
+
+func TestBadInnerFields(t *testing.T) {
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 5, Expect{3, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValidTime", 5, Expect{3, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 5, Expect{3, "...is not allowed."})
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 5, Expect{4, "...is not allowed."})
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 5, Expect{3, "...Note field was introduced in TEAL v6..."})
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field VotePK", 5, Expect{3, "...VotePK field was introduced in TEAL v6..."})
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 5, Expect{4, "...is not allowed."})
+
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 6, Expect{3, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 6, Expect{3, "...is not allowed."})
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 6, Expect{4, "...is not allowed."})
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 6)
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field VotePK", 6)
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 6, Expect{4, "...is not allowed."})
+}
diff --git a/data/transactions/logic/backwardCompat_test.go b/data/transactions/logic/backwardCompat_test.go
index b5917219d..e2503e887 100644
--- a/data/transactions/logic/backwardCompat_test.go
+++ b/data/transactions/logic/backwardCompat_test.go
@@ -360,9 +360,9 @@ func TestBackwardCompatGlobalFields(t *testing.T) {
for _, field := range fields {
text := fmt.Sprintf("global %s", field.field.String())
// check assembler fails if version before introduction
- testLine(t, text, assemblerNoVersion, "...available in version...")
+ testLine(t, text, assemblerNoVersion, "...was introduced in...")
for v := uint64(0); v < field.version; v++ {
- testLine(t, text, v, "...available in version...")
+ testLine(t, text, v, "...was introduced in...")
}
ops := testProg(t, text, AssemblerMaxVersion)
@@ -410,13 +410,13 @@ func TestBackwardCompatTxnFields(t *testing.T) {
field := fs.field.String()
for _, command := range tests {
text := fmt.Sprintf(command, field)
- asmError := "...available in version ..."
+ asmError := "...was introduced in ..."
if fs.array {
parts := strings.Split(text, " ")
op := parts[0]
- asmError = fmt.Sprintf("%s found array field %#v while expecting scalar", op, field)
+ asmError = fmt.Sprintf("%s unknown field: %#v", op, field)
}
- // check assembler fails if version before introduction
+ // check assembler fails in versions before introduction
testLine(t, text, assemblerNoVersion, asmError)
for v := uint64(0); v < fs.version; v++ {
testLine(t, text, v, asmError)
@@ -425,7 +425,7 @@ func TestBackwardCompatTxnFields(t *testing.T) {
ops, err := AssembleStringWithVersion(text, AssemblerMaxVersion)
if fs.array {
// "txn Accounts" is invalid, so skip evaluation
- require.Error(t, err, asmError)
+ require.Error(t, err)
continue
} else {
require.NoError(t, err)
@@ -488,8 +488,8 @@ func TestBackwardCompatAssemble(t *testing.T) {
func TestExplicitConstants(t *testing.T) {
partitiontest.PartitionTest(t)
- require.Equal(t, 4096, MaxStringSize, "constant changed, move it to consensus params")
- require.Equal(t, 64, MaxByteMathSize, "constant changed, move it to consensus params")
- require.Equal(t, 1024, MaxLogSize, "constant changed, move it to consensus params")
- require.Equal(t, 32, MaxLogCalls, "constant changed, move it to consensus params")
+ require.Equal(t, 4096, maxStringSize, "constant changed, make it version dependent")
+ require.Equal(t, 64, maxByteMathSize, "constant changed, move it version dependent")
+ require.Equal(t, 1024, maxLogSize, "constant changed, move it version dependent")
+ require.Equal(t, 32, maxLogCalls, "constant changed, move it version dependent")
}
diff --git a/data/transactions/logic/debugger.go b/data/transactions/logic/debugger.go
index 7d7c95024..a9ff00400 100644
--- a/data/transactions/logic/debugger.go
+++ b/data/transactions/logic/debugger.go
@@ -56,6 +56,13 @@ type PCOffset struct {
Offset int `codec:"offset"`
}
+// CallFrame stores the label name and the line of the subroutine.
+// An array of CallFrames form the CallStack.
+type CallFrame struct {
+ FrameLine int `codec:"frameLine"`
+ LabelName string `codec:"labelname"`
+}
+
// DebugState is a representation of the evaluation context that we encode
// to json and send to tealdbg
type DebugState struct {
@@ -75,6 +82,7 @@ type DebugState struct {
Scratch []basics.TealValue `codec:"scratch"`
Error string `codec:"error"`
OpcodeBudget int `codec:"budget"`
+ CallStack []CallFrame `codec:"callstack"`
// global/local state changes are updated every step. Stateful TEAL only.
transactions.EvalDelta
@@ -192,10 +200,32 @@ func valueDeltaToValueDelta(vd *basics.ValueDelta) basics.ValueDelta {
}
}
+// parseCallStack initializes an array of CallFrame objects from the raw
+// callstack.
+func (d *DebugState) parseCallstack(callstack []int) []CallFrame {
+ callFrames := make([]CallFrame, 0)
+ lines := strings.Split(d.Disassembly, "\n")
+ for _, pc := range callstack {
+ // The callsub is pc - 3 from the callstack pc
+ callsubLineNum := d.PCToLine(pc - 3)
+ callSubLine := strings.Fields(lines[callsubLineNum])
+ label := ""
+ if callSubLine[0] == "callsub" {
+ label = callSubLine[1]
+ }
+ callFrames = append(callFrames, CallFrame{
+ FrameLine: callsubLineNum,
+ LabelName: label,
+ })
+ }
+ return callFrames
+}
+
func (cx *EvalContext) refreshDebugState(evalError error) *DebugState {
ds := cx.debugState
- // Update pc, line, error, stack, and scratch space
+ // Update pc, line, error, stack, scratch space, callstack,
+ // and opcode budget
ds.PC = cx.pc
ds.Line = ds.PCToLine(cx.pc)
if evalError != nil {
@@ -215,6 +245,7 @@ func (cx *EvalContext) refreshDebugState(evalError error) *DebugState {
ds.Stack = stack
ds.Scratch = scratch
ds.OpcodeBudget = cx.remainingBudget()
+ ds.CallStack = ds.parseCallstack(cx.callstack)
if (cx.runModeFlags & runModeApplication) != 0 {
ds.EvalDelta = cx.txn.EvalDelta
diff --git a/data/transactions/logic/debugger_test.go b/data/transactions/logic/debugger_test.go
index 80e4a639a..060b953fc 100644
--- a/data/transactions/logic/debugger_test.go
+++ b/data/transactions/logic/debugger_test.go
@@ -174,3 +174,62 @@ func TestValueDeltaToValueDelta(t *testing.T) {
require.Equal(t, base64.StdEncoding.EncodeToString([]byte(vDelta.Bytes)), ans.Bytes)
require.Equal(t, vDelta.Uint, ans.Uint)
}
+
+var testCallStackProgram string = `intcblock 1
+callsub label1
+intc_0
+label1:
+callsub label2
+label2:
+intc_0
+`
+
+func TestParseCallstack(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ expectedCallFrames := []CallFrame{
+ {
+ FrameLine: 1,
+ LabelName: "label1",
+ },
+ {
+ FrameLine: 4,
+ LabelName: "label2",
+ },
+ }
+
+ dState := DebugState{
+ Disassembly: testCallStackProgram,
+ PCOffset: []PCOffset{{PC: 1, Offset: 18}, {PC: 4, Offset: 30}, {PC: 7, Offset: 45}, {PC: 8, Offset: 65}, {PC: 11, Offset: 88}},
+ }
+ callstack := []int{4, 8}
+
+ cfs := dState.parseCallstack(callstack)
+ require.Equal(t, expectedCallFrames, cfs)
+}
+
+func TestCallStackUpdate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ expectedCallFrames := []CallFrame{
+ {
+ FrameLine: 2,
+ LabelName: "label1",
+ },
+ {
+ FrameLine: 5,
+ LabelName: "label2",
+ },
+ }
+
+ testDbg := testDbgHook{}
+ ep := defaultEvalParams(nil)
+ ep.Debugger = &testDbg
+ testLogic(t, testCallStackProgram, AssemblerMaxVersion, ep)
+
+ require.Equal(t, 1, testDbg.register)
+ require.Equal(t, 1, testDbg.complete)
+ require.Greater(t, testDbg.update, 1)
+ require.Len(t, testDbg.state.Stack, 1)
+ require.Equal(t, testDbg.state.CallStack, expectedCallFrames)
+}
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index ea714744d..9e2dc2def 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -47,8 +47,8 @@ var opDocByName = map[string]string{
"!=": "A is not equal to B => {0 or 1}",
"!": "A == 0 yields 1; else 0",
"len": "yields length of byte value A",
- "itob": "converts uint64 A to big endian bytes",
- "btoi": "converts bytes A as big endian to uint64",
+ "itob": "converts uint64 A to big-endian byte array, always of length 8",
+ "btoi": "converts big-endian byte array A to uint64. Fails if len(A) > 8. Padded by leading 0s if len(A) < 8.",
"%": "A modulo B. Fail if B == 0.",
"|": "A bitwise-or B",
"&": "A bitwise-and B",
@@ -213,10 +213,10 @@ var opcodeImmediateNotes = map[string]string{
"gtxnas": "{uint8 transaction group index} {uint8 transaction field index}",
"gtxnsas": "{uint8 transaction field index}",
- "bnz": "{int16 branch offset, big endian}",
- "bz": "{int16 branch offset, big endian}",
- "b": "{int16 branch offset, big endian}",
- "callsub": "{int16 branch offset, big endian}",
+ "bnz": "{int16 branch offset, big-endian}",
+ "bz": "{int16 branch offset, big-endian}",
+ "b": "{int16 branch offset, big-endian}",
+ "callsub": "{int16 branch offset, big-endian}",
"load": "{uint8 position in scratch space to load from}",
"store": "{uint8 position in scratch space to store to}",
@@ -344,28 +344,30 @@ var OpGroups = map[string][]string{
"Inner Transactions": {"itxn_begin", "itxn_next", "itxn_field", "itxn_submit", "itxn", "itxna", "itxnas", "gitxn", "gitxna", "gitxnas"},
}
-// OpCost indicates the cost of an operation over the range of
+// VerCost indicates the cost of an operation over the range of
// LogicVersions from From to To.
-type OpCost struct {
+type VerCost struct {
From int
To int
- Cost int
+ // Cost is a human readable string to describe costs. Simple opcodes are
+ // just an integer, but some opcodes have field or stack dependencies.
+ Cost string
}
-// OpAllCosts returns an array of the cost score for an op by version.
-// Each entry indicates the cost over a range of versions, so if the
-// cost has remained constant, there is only one result, otherwise
-// each entry shows the cost for a consecutive range of versions,
-// inclusive.
-func OpAllCosts(opName string) []OpCost {
- var costs []OpCost
+// OpAllCosts returns an array of the cost of an op by version. Each entry
+// indicates the cost over a range of versions, so if the cost has remained
+// constant, there is only one result, otherwise each entry shows the cost for a
+// consecutive range of versions, inclusive.
+func OpAllCosts(opName string) []VerCost {
+ var costs []VerCost
for v := 1; v <= LogicVersion; v++ {
- cost := OpsByName[v][opName].Details.Cost
- if cost == 0 {
+ spec, ok := OpsByName[v][opName]
+ if !ok {
continue
}
+ cost := spec.Details.docCost()
if costs == nil || cost != costs[len(costs)-1].Cost {
- costs = append(costs, OpCost{v, v, cost})
+ costs = append(costs, VerCost{v, v, cost})
} else {
costs[len(costs)-1].To = v
}
@@ -408,99 +410,6 @@ func OnCompletionDescription(value uint64) string {
// OnCompletionPreamble describes what the OnCompletion constants represent.
const OnCompletionPreamble = "An application transaction must indicate the action to be taken following the execution of its approvalProgram or clearStateProgram. The constants below describe the available actions."
-var txnFieldDocs = map[string]string{
- "Type": "Transaction type as bytes",
- "TypeEnum": "See table below",
- "Sender": "32 byte address",
- "Fee": "microalgos",
- "FirstValid": "round number",
- "FirstValidTime": "Causes program to fail; reserved for future use",
- "LastValid": "round number",
- "Note": "Any data up to 1024 bytes",
- "Lease": "32 byte lease value",
- "RekeyTo": "32 byte Sender's new AuthAddr",
-
- "GroupIndex": "Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1",
- "TxID": "The computed ID for this transaction. 32 bytes.",
-
- "Receiver": "32 byte address",
- "Amount": "microalgos",
- "CloseRemainderTo": "32 byte address",
-
- "VotePK": "32 byte address",
- "SelectionPK": "32 byte address",
- "StateProofPK": "64 byte state proof public key commitment",
- "VoteFirst": "The first round that the participation key is valid.",
- "VoteLast": "The last round that the participation key is valid.",
- "VoteKeyDilution": "Dilution for the 2-level participation key",
- "Nonparticipation": "Marks an account nonparticipating for rewards",
-
- "XferAsset": "Asset ID",
- "AssetAmount": "value in Asset's units",
- "AssetSender": "32 byte address. Causes clawback of all value of asset from AssetSender if Sender is the Clawback address of the asset.",
- "AssetReceiver": "32 byte address",
- "AssetCloseTo": "32 byte address",
-
- "ApplicationID": "ApplicationID from ApplicationCall transaction",
- "OnCompletion": "ApplicationCall transaction on completion action",
- "ApplicationArgs": "Arguments passed to the application in the ApplicationCall transaction",
- "NumAppArgs": "Number of ApplicationArgs",
- "Accounts": "Accounts listed in the ApplicationCall transaction",
- "NumAccounts": "Number of Accounts",
- "Assets": "Foreign Assets listed in the ApplicationCall transaction",
- "NumAssets": "Number of Assets",
- "Applications": "Foreign Apps listed in the ApplicationCall transaction",
- "NumApplications": "Number of Applications",
- "GlobalNumUint": "Number of global state integers in ApplicationCall",
- "GlobalNumByteSlice": "Number of global state byteslices in ApplicationCall",
- "LocalNumUint": "Number of local state integers in ApplicationCall",
- "LocalNumByteSlice": "Number of local state byteslices in ApplicationCall",
- "ApprovalProgram": "Approval program",
- "ClearStateProgram": "Clear state program",
- "ExtraProgramPages": "Number of additional pages for each of the application's approval and clear state programs. An ExtraProgramPages of 1 means 2048 more total bytes, or 1024 for each program.",
-
- "ConfigAsset": "Asset ID in asset config transaction",
- "ConfigAssetTotal": "Total number of units of this asset created",
- "ConfigAssetDecimals": "Number of digits to display after the decimal place when displaying the asset",
- "ConfigAssetDefaultFrozen": "Whether the asset's slots are frozen by default or not, 0 or 1",
- "ConfigAssetUnitName": "Unit name of the asset",
- "ConfigAssetName": "The asset name",
- "ConfigAssetURL": "URL",
- "ConfigAssetMetadataHash": "32 byte commitment to some unspecified asset metadata",
- "ConfigAssetManager": "32 byte address",
- "ConfigAssetReserve": "32 byte address",
- "ConfigAssetFreeze": "32 byte address",
- "ConfigAssetClawback": "32 byte address",
-
- "FreezeAsset": "Asset ID being frozen or un-frozen",
- "FreezeAssetAccount": "32 byte address of the account whose asset slot is being frozen or un-frozen",
- "FreezeAssetFrozen": "The new frozen value, 0 or 1",
-
- "Logs": "Log messages emitted by an application call (only with `itxn` in v5)",
- "NumLogs": "Number of Logs (only with `itxn` in v5)",
- "LastLog": "The last message emitted. Empty bytes if none were emitted",
- "CreatedAssetID": "Asset ID allocated by the creation of an ASA (only with `itxn` in v5)",
- "CreatedApplicationID": "ApplicationID allocated by the creation of an application (only with `itxn` in v5)",
-}
-
-var globalFieldDocs = map[string]string{
- "MinTxnFee": "microalgos",
- "MinBalance": "microalgos",
- "MaxTxnLife": "rounds",
- "ZeroAddress": "32 byte address of all zero bytes",
- "GroupSize": "Number of transactions in this atomic transaction group. At least 1",
- "LogicSigVersion": "Maximum supported version",
- "Round": "Current round number",
- "LatestTimestamp": "Last confirmed block UNIX timestamp. Fails if negative",
- "CurrentApplicationID": "ID of current application executing",
- "CreatorAddress": "Address of the creator of the current application",
- "CurrentApplicationAddress": "Address that the current application controls",
- "GroupID": "ID of the transaction group. 32 zero bytes if the transaction is not part of a group.",
- "OpcodeBudget": "The remaining cost that can be spent by opcodes in this program.",
- "CallerApplicationID": "The application ID of the application that called this application. 0 if this application is at the top-level.",
- "CallerApplicationAddress": "The application address of the application that called this application. ZeroAddress if this application is at the top-level.",
-}
-
func addExtra(original string, extra string) string {
if len(original) == 0 {
return extra
@@ -514,51 +423,3 @@ func addExtra(original string, extra string) string {
}
return original + sep + extra
}
-
-// AssetHoldingFieldDocs are notes on fields available in `asset_holding_get`
-var assetHoldingFieldDocs = map[string]string{
- "AssetBalance": "Amount of the asset unit held by this account",
- "AssetFrozen": "Is the asset frozen or not",
-}
-
-// assetParamsFieldDocs are notes on fields available in `asset_params_get`
-var assetParamsFieldDocs = map[string]string{
- "AssetTotal": "Total number of units of this asset",
- "AssetDecimals": "See AssetParams.Decimals",
- "AssetDefaultFrozen": "Frozen by default or not",
- "AssetUnitName": "Asset unit name",
- "AssetName": "Asset name",
- "AssetURL": "URL with additional info about the asset",
- "AssetMetadataHash": "Arbitrary commitment",
- "AssetManager": "Manager commitment",
- "AssetReserve": "Reserve address",
- "AssetFreeze": "Freeze address",
- "AssetClawback": "Clawback address",
- "AssetCreator": "Creator address",
-}
-
-// appParamsFieldDocs are notes on fields available in `app_params_get`
-var appParamsFieldDocs = map[string]string{
- "AppApprovalProgram": "Bytecode of Approval Program",
- "AppClearStateProgram": "Bytecode of Clear State Program",
- "AppGlobalNumUint": "Number of uint64 values allowed in Global State",
- "AppGlobalNumByteSlice": "Number of byte array values allowed in Global State",
- "AppLocalNumUint": "Number of uint64 values allowed in Local State",
- "AppLocalNumByteSlice": "Number of byte array values allowed in Local State",
- "AppExtraProgramPages": "Number of Extra Program Pages of code space",
- "AppCreator": "Creator address",
- "AppAddress": "Address for which this application has authority",
-}
-
-// acctParamsFieldDocs are notes on fields available in `app_params_get`
-var acctParamsFieldDocs = map[string]string{
- "AcctBalance": "Account balance in microalgos",
- "AcctMinBalance": "Minimum required blance for account, in microalgos",
- "AcctAuthAddr": "Address the account is rekeyed to.",
-}
-
-// EcdsaCurveDocs are notes on curves available in `ecdsa_` opcodes
-var EcdsaCurveDocs = map[string]string{
- "Secp256k1": "secp256k1 curve",
- "Secp256r1": "secp256r1 curve",
-}
diff --git a/data/transactions/logic/doc_test.go b/data/transactions/logic/doc_test.go
index 85b97aaad..a0bbaa384 100644
--- a/data/transactions/logic/doc_test.go
+++ b/data/transactions/logic/doc_test.go
@@ -40,15 +40,8 @@ func TestOpDocs(t *testing.T) {
assert.True(t, seen, "opDocByName is missing doc for %#v", op)
}
- require.Len(t, txnFieldDocs, len(TxnFieldNames))
require.Len(t, onCompletionDescriptions, len(OnCompletionNames))
- require.Len(t, globalFieldDocs, len(GlobalFieldNames))
- require.Len(t, assetHoldingFieldDocs, len(AssetHoldingFieldNames))
- require.Len(t, assetParamsFieldDocs, len(AssetParamsFieldNames))
- require.Len(t, appParamsFieldDocs, len(AppParamsFieldNames))
- require.Len(t, acctParamsFieldDocs, len(AcctParamsFieldNames))
require.Len(t, TypeNameDescriptions, len(TxnTypeNames))
- require.Len(t, EcdsaCurveDocs, len(EcdsaCurveNames))
}
// TestDocStragglers confirms that we don't have any docs laying
@@ -137,12 +130,12 @@ func TestOpAllCosts(t *testing.T) {
a := OpAllCosts("+")
require.Len(t, a, 1)
- require.Equal(t, 1, a[0].Cost)
+ require.Equal(t, "1", a[0].Cost)
a = OpAllCosts("sha256")
require.Len(t, a, 2)
for _, cost := range a {
- require.True(t, cost.Cost > 1)
+ require.True(t, cost.Cost != "0")
}
}
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index b34c9a5e7..e2512cf04 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -46,23 +46,23 @@ import (
"github.com/algorand/go-algorand/protocol"
)
-// EvalMaxVersion is the max version we can interpret and run
-const EvalMaxVersion = LogicVersion
+// evalMaxVersion is the max version we can interpret and run
+const evalMaxVersion = LogicVersion
-// The constants below control TEAL opcodes evaluation and MAY NOT be changed
-// without moving them into consensus parameters.
+// The constants below control opcode evaluation and MAY NOT be changed without
+// gating them by version. Old programs need to retain their old behavior.
-// MaxStringSize is the limit of byte string length in an AVM value
-const MaxStringSize = 4096
+// maxStringSize is the limit of byte string length in an AVM value
+const maxStringSize = 4096
-// MaxByteMathSize is the limit of byte strings supplied as input to byte math opcodes
-const MaxByteMathSize = 64
+// maxByteMathSize is the limit of byte strings supplied as input to byte math opcodes
+const maxByteMathSize = 64
-// MaxLogSize is the limit of total log size from n log calls in a program
-const MaxLogSize = 1024
+// maxLogSize is the limit of total log size from n log calls in a program
+const maxLogSize = 1024
-// MaxLogCalls is the limit of total log calls during a program execution
-const MaxLogCalls = 32
+// maxLogCalls is the limit of total log calls during a program execution
+const maxLogCalls = 32
// maxAppCallDepth is the limit on inner appl call depth
// To be clear, 0 would prevent inner appls, 1 would mean inner app calls cannot
@@ -70,6 +70,9 @@ const MaxLogCalls = 32
// you count the top-level app call.
const maxAppCallDepth = 8
+// maxStackDepth should not change unless controlled by a teal version change
+const maxStackDepth = 1000
+
// stackValue is the type for the operand stack.
// Each stackValue is either a valid []byte value or a uint64 value.
// If (.Bytes != nil) the stackValue is a []byte value, otherwise uint64 value.
@@ -395,7 +398,7 @@ func NewInnerEvalParams(txg []transactions.SignedTxnWithAD, caller *EvalContext)
return ep
}
-type opEvalFunc func(cx *EvalContext) error
+type evalFunc func(cx *EvalContext) error
type opCheckFunc func(cx *EvalContext) error
type runMode uint64
@@ -775,9 +778,6 @@ func check(program []byte, params *EvalParams, mode runMode) (err error) {
cx.instructionStarts = make(map[int]bool)
maxCost := cx.remainingBudget()
- if version >= backBranchEnabledVersion {
- maxCost = math.MaxInt32
- }
staticCost := 0
for cx.pc < len(cx.program) {
prevpc := cx.pc
@@ -786,7 +786,7 @@ func check(program []byte, params *EvalParams, mode runMode) (err error) {
return fmt.Errorf("pc=%3d %w", cx.pc, err)
}
staticCost += stepCost
- if staticCost > maxCost {
+ if version < backBranchEnabledVersion && staticCost > maxCost {
return fmt.Errorf("pc=%3d static cost budget of %d exceeded", cx.pc, maxCost)
}
if cx.pc <= prevpc {
@@ -794,7 +794,7 @@ func check(program []byte, params *EvalParams, mode runMode) (err error) {
// without evaluation. It always goes forward,
// even if we're in v4 and the jump would go
// back.
- return fmt.Errorf("pc did not advance, stuck at %d", cx.pc)
+ return fmt.Errorf("pc=%3d pc did not advance", cx.pc)
}
}
return nil
@@ -805,8 +805,8 @@ func versionCheck(program []byte, params *EvalParams) (uint64, int, error) {
if err != nil {
return 0, 0, err
}
- if version > EvalMaxVersion {
- return 0, 0, fmt.Errorf("program version %d greater than max supported version %d", version, EvalMaxVersion)
+ if version > evalMaxVersion {
+ return 0, 0, fmt.Errorf("program version %d greater than max supported version %d", version, evalMaxVersion)
}
if version > params.Proto.LogicSigVersion {
return 0, 0, fmt.Errorf("program version %d greater than protocol supported version %d", version, params.Proto.LogicSigVersion)
@@ -843,9 +843,6 @@ func boolToUint(x bool) uint64 {
return 0
}
-// MaxStackDepth should not change unless gated by a teal version change / consensus upgrade.
-const MaxStackDepth = 1000
-
func (cx *EvalContext) remainingBudget() int {
if cx.runModeFlags == runModeSignature {
return int(cx.Proto.LogicSigMaxCost) - cx.cost
@@ -904,18 +901,29 @@ func (cx *EvalContext) step() error {
if deets.Size != 0 && (cx.pc+deets.Size > len(cx.program)) {
return fmt.Errorf("%3d %s program ends short of immediate values", cx.pc, spec.Name)
}
- cx.cost += deets.Cost
+
+ // It's something like a 5-10% overhead on our simplest instructions to make
+ // the Cost() call without the FullCost.compute() short-circuit, even
+ // though Cost() tries to exit fast. Use BenchmarkUintMath to test changes.
+ opcost := deets.FullCost.compute(cx.stack)
+ if opcost <= 0 {
+ opcost = deets.Cost(cx.program, cx.pc, cx.stack)
+ if opcost <= 0 {
+ return fmt.Errorf("%3d %s returned 0 cost", cx.pc, spec.Name)
+ }
+ }
+ cx.cost += opcost
if cx.PooledApplicationBudget != nil {
- *cx.PooledApplicationBudget -= deets.Cost
+ *cx.PooledApplicationBudget -= opcost
}
if cx.remainingBudget() < 0 {
// We're not going to execute the instruction, so give the cost back.
// This only matters if this is an inner ClearState - the caller should
// not be over debited. (Normally, failure causes total txtree failure.)
- cx.cost -= deets.Cost
+ cx.cost -= opcost
if cx.PooledApplicationBudget != nil {
- *cx.PooledApplicationBudget += deets.Cost
+ *cx.PooledApplicationBudget += opcost
}
return fmt.Errorf("pc=%3d dynamic cost budget exceeded, executing %s: local program cost was %d",
cx.pc, spec.Name, cx.cost)
@@ -936,7 +944,7 @@ func (cx *EvalContext) step() error {
if !opCompat(argType, stackType) {
return fmt.Errorf("%s produced %s but intended %s", spec.Name, cx.stack[first+i].typeName(), argType)
}
- if stackType == StackBytes && len(cx.stack[first+i].Bytes) > MaxStringSize {
+ if stackType == StackBytes && len(cx.stack[first+i].Bytes) > maxStringSize {
return fmt.Errorf("%s produced a too big (%d) byte-array", spec.Name, len(cx.stack[first+i].Bytes))
}
}
@@ -993,7 +1001,7 @@ func (cx *EvalContext) step() error {
return err
}
- if len(cx.stack) > MaxStackDepth {
+ if len(cx.stack) > maxStackDepth {
return errors.New("stack overflow")
}
if cx.nextpc != 0 {
@@ -1005,19 +1013,31 @@ func (cx *EvalContext) step() error {
return nil
}
+// oneBlank is a boring stack provided to deets.Cost during checkStep. It is
+// good enough to allow Cost() to not crash. It would be incorrect to provide
+// this stack if there were linear cost opcodes before backBranchEnabledVersion,
+// because the static cost would be wrong. But then again, a static cost model
+// wouldn't work before backBranchEnabledVersion, so such an opcode is already
+// unacceptable. TestLinearOpcodes ensures.
+var oneBlank = []stackValue{{Bytes: []byte{}}}
+
func (cx *EvalContext) checkStep() (int, error) {
cx.instructionStarts[cx.pc] = true
opcode := cx.program[cx.pc]
spec := &opsByOpcode[cx.version][opcode]
if spec.op == nil {
- return 0, fmt.Errorf("%3d illegal opcode 0x%02x", cx.pc, opcode)
+ return 0, fmt.Errorf("illegal opcode 0x%02x", opcode)
}
if (cx.runModeFlags & spec.Modes) == 0 {
return 0, fmt.Errorf("%s not allowed in current mode", spec.Name)
}
deets := spec.Details
if deets.Size != 0 && (cx.pc+deets.Size > len(cx.program)) {
- return 0, fmt.Errorf("%3d %s program ends short of immediate values", cx.pc, spec.Name)
+ return 0, fmt.Errorf("%s program ends short of immediate values", spec.Name)
+ }
+ opcost := deets.Cost(cx.program, cx.pc, oneBlank)
+ if opcost <= 0 {
+ return 0, fmt.Errorf("%s reported non-positive cost", spec.Name)
}
prevpc := cx.pc
if deets.checkFunc != nil {
@@ -1042,11 +1062,11 @@ func (cx *EvalContext) checkStep() (int, error) {
return 0, fmt.Errorf("branch target %d is not an aligned instruction", pc)
}
}
- return deets.Cost, nil
+ return opcost, nil
}
func opErr(cx *EvalContext) error {
- return errors.New("TEAL runtime encountered err opcode")
+ return errors.New("err opcode executed")
}
func opReturn(cx *EvalContext) error {
@@ -1564,7 +1584,7 @@ func opBytesBinOp(cx *EvalContext, result *big.Int, op func(x, y *big.Int) *big.
last := len(cx.stack) - 1
prev := last - 1
- if len(cx.stack[last].Bytes) > MaxByteMathSize || len(cx.stack[prev].Bytes) > MaxByteMathSize {
+ if len(cx.stack[last].Bytes) > maxByteMathSize || len(cx.stack[prev].Bytes) > maxByteMathSize {
return errors.New("math attempted on large byte-array")
}
@@ -1614,7 +1634,7 @@ func opBytesMul(cx *EvalContext) error {
func opBytesSqrt(cx *EvalContext) error {
last := len(cx.stack) - 1
- if len(cx.stack[last].Bytes) > MaxByteMathSize {
+ if len(cx.stack[last].Bytes) > maxByteMathSize {
return errors.New("math attempted on large byte-array")
}
@@ -1628,7 +1648,7 @@ func opBytesLt(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
- if len(cx.stack[last].Bytes) > MaxByteMathSize || len(cx.stack[prev].Bytes) > MaxByteMathSize {
+ if len(cx.stack[last].Bytes) > maxByteMathSize || len(cx.stack[prev].Bytes) > maxByteMathSize {
return errors.New("math attempted on large byte-array")
}
@@ -1665,7 +1685,7 @@ func opBytesEq(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
- if len(cx.stack[last].Bytes) > MaxByteMathSize || len(cx.stack[prev].Bytes) > MaxByteMathSize {
+ if len(cx.stack[last].Bytes) > maxByteMathSize || len(cx.stack[prev].Bytes) > maxByteMathSize {
return errors.New("math attempted on large byte-array")
}
@@ -1769,7 +1789,7 @@ func opBytesBitNot(cx *EvalContext) error {
func opBytesZero(cx *EvalContext) error {
last := len(cx.stack) - 1
length := cx.stack[last].Uint
- if length > MaxStringSize {
+ if length > maxStringSize {
return fmt.Errorf("bzero attempted to create a too large string")
}
cx.stack[last].Bytes = make([]byte, length)
@@ -1778,19 +1798,19 @@ func opBytesZero(cx *EvalContext) error {
func opIntConstBlock(cx *EvalContext) error {
var err error
- cx.intc, cx.nextpc, err = parseIntcblock(cx.program, cx.pc)
+ cx.intc, cx.nextpc, err = parseIntcblock(cx.program, cx.pc+1)
return err
}
-func opIntConstN(cx *EvalContext, n uint) error {
- if n >= uint(len(cx.intc)) {
+func opIntConstN(cx *EvalContext, n byte) error {
+ if int(n) >= len(cx.intc) {
return fmt.Errorf("intc [%d] beyond %d constants", n, len(cx.intc))
}
cx.stack = append(cx.stack, stackValue{Uint: cx.intc[n]})
return nil
}
func opIntConstLoad(cx *EvalContext) error {
- n := uint(cx.program[cx.pc+1])
+ n := cx.program[cx.pc+1]
return opIntConstN(cx, n)
}
func opIntConst0(cx *EvalContext) error {
@@ -1807,19 +1827,20 @@ func opIntConst3(cx *EvalContext) error {
}
func opPushInt(cx *EvalContext) error {
- val, bytesUsed := binary.Uvarint(cx.program[cx.pc+1:])
+ pos := cx.pc + 1
+ val, bytesUsed := binary.Uvarint(cx.program[pos:])
if bytesUsed <= 0 {
- return fmt.Errorf("could not decode int at pc=%d", cx.pc+1)
+ return fmt.Errorf("could not decode int at program[%d]", pos)
}
sv := stackValue{Uint: val}
cx.stack = append(cx.stack, sv)
- cx.nextpc = cx.pc + 1 + bytesUsed
+ cx.nextpc = pos + bytesUsed
return nil
}
func opByteConstBlock(cx *EvalContext) error {
var err error
- cx.bytec, cx.nextpc, err = parseBytecBlock(cx.program, cx.pc)
+ cx.bytec, cx.nextpc, err = parseBytecBlock(cx.program, cx.pc+1)
return err
}
@@ -1851,12 +1872,12 @@ func opPushBytes(cx *EvalContext) error {
pos := cx.pc + 1
length, bytesUsed := binary.Uvarint(cx.program[pos:])
if bytesUsed <= 0 {
- return fmt.Errorf("could not decode length at pc=%d", pos)
+ return fmt.Errorf("could not decode length at program[%d]", pos)
}
pos += bytesUsed
end := uint64(pos) + length
if end > uint64(len(cx.program)) || end < uint64(pos) {
- return fmt.Errorf("pushbytes too long at pc=%d", pos)
+ return fmt.Errorf("pushbytes too long at program[%d]", pos)
}
sv := stackValue{Bytes: cx.program[pos:end]}
cx.stack = append(cx.stack, sv)
@@ -1911,7 +1932,7 @@ func branchTarget(cx *EvalContext) (int, error) {
branchTooFar = target >= len(cx.program) || target < 0
}
if branchTooFar {
- return 0, errors.New("branch target beyond end of program")
+ return 0, fmt.Errorf("branch target %d outside of program", target)
}
return target, nil
@@ -1919,12 +1940,11 @@ func branchTarget(cx *EvalContext) (int, error) {
// checks any branch that is {op} {int16 be offset}
func checkBranch(cx *EvalContext) error {
- cx.nextpc = cx.pc + 3
target, err := branchTarget(cx)
if err != nil {
return err
}
- if target < cx.nextpc {
+ if target < cx.pc+3 {
// If a branch goes backwards, we should have already noted that an instruction began at that location.
if _, ok := cx.instructionStarts[target]; !ok {
return fmt.Errorf("back branch target %d is not an aligned instruction", target)
@@ -2062,7 +2082,7 @@ func (cx *EvalContext) assetHoldingToValue(holding *basics.AssetHolding, fs asse
return sv, fmt.Errorf("invalid asset_holding_get field %d", fs.field)
}
- if !typecheck(fs.ftype, sv.argType()) {
+ if fs.ftype != sv.argType() {
return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
}
return sv, nil
@@ -2098,7 +2118,7 @@ func (cx *EvalContext) assetParamsToValue(params *basics.AssetParams, creator ba
return sv, fmt.Errorf("invalid asset_params_get field %d", fs.field)
}
- if !typecheck(fs.ftype, sv.argType()) {
+ if fs.ftype != sv.argType() {
return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
}
return sv, nil
@@ -2125,7 +2145,7 @@ func (cx *EvalContext) appParamsToValue(params *basics.AppParams, fs appParamsFi
return sv, fmt.Errorf("invalid app_params_get field %d", fs.field)
}
- if !typecheck(fs.ftype, sv.argType()) {
+ if fs.ftype != sv.argType() {
return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
}
return sv, nil
@@ -2138,7 +2158,10 @@ func TxnFieldToTealValue(txn *transactions.Transaction, groupIndex int, field Tx
}
var cx EvalContext
stxnad := &transactions.SignedTxnWithAD{SignedTxn: transactions.SignedTxn{Txn: *txn}}
- fs := txnFieldSpecByField[field]
+ fs, ok := txnFieldSpecByField(field)
+ if !ok {
+ return basics.TealValue{}, fmt.Errorf("invalid field %s", field)
+ }
sv, err := cx.txnFieldToStack(stxnad, &fs, arrayFieldIdx, groupIndex, inner)
return sv.toTealValue(), err
}
@@ -2220,7 +2243,7 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t
case Type:
sv.Bytes = []byte(txn.Type)
case TypeEnum:
- sv.Uint = txnTypeIndexes[string(txn.Type)]
+ sv.Uint = txnTypeMap[string(txn.Type)]
case XferAsset:
sv.Uint = uint64(txn.XferAsset)
case AssetAmount:
@@ -2356,14 +2379,14 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t
return sv, fmt.Errorf("invalid txn field %s", fs.field)
}
- if !typecheck(fs.ftype, sv.argType()) {
+ if fs.ftype != sv.argType() {
return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
}
return sv, nil
}
func (cx *EvalContext) fetchField(field TxnField, expectArray bool) (*txnFieldSpec, error) {
- fs, ok := txnFieldSpecByField[field]
+ fs, ok := txnFieldSpecByField(field)
if !ok || fs.version > cx.version {
return nil, fmt.Errorf("invalid txn field %d", field)
}
@@ -2804,8 +2827,8 @@ func (cx *EvalContext) globalFieldToValue(fs globalFieldSpec) (sv stackValue, er
err = fmt.Errorf("invalid global field %d", fs.field)
}
- if !typecheck(fs.ftype, sv.argType()) {
- err = fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
+ if fs.ftype != sv.argType() {
+ return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
}
return sv, err
@@ -2813,7 +2836,7 @@ func (cx *EvalContext) globalFieldToValue(fs globalFieldSpec) (sv stackValue, er
func opGlobal(cx *EvalContext) error {
globalField := GlobalField(cx.program[cx.pc+1])
- fs, ok := globalFieldSpecByField[globalField]
+ fs, ok := globalFieldSpecByField(globalField)
if !ok || fs.version > cx.version {
return fmt.Errorf("invalid global field %s", globalField)
}
@@ -2898,75 +2921,24 @@ func opEd25519VerifyBare(cx *EvalContext) error {
return nil
}
-// leadingZeros needs to be replaced by big.Int.FillBytes
func leadingZeros(size int, b *big.Int) ([]byte, error) {
- data := b.Bytes()
- if size < len(data) {
- return nil, fmt.Errorf("insufficient buffer size: %d < %d", size, len(data))
+ byteLength := (b.BitLen() + 7) / 8
+ if size < byteLength {
+ return nil, fmt.Errorf("insufficient buffer size: %d < %d", size, byteLength)
}
- if size == len(data) {
- return data, nil
- }
-
buf := make([]byte, size)
- copy(buf[size-len(data):], data)
+ b.FillBytes(buf)
return buf, nil
}
-// polynomial returns x³ - 3x + b.
-//
-// TODO: remove this when go-algorand is updated to go 1.15+
-func polynomial(curve *elliptic.CurveParams, x *big.Int) *big.Int {
- x3 := new(big.Int).Mul(x, x)
- x3.Mul(x3, x)
-
- threeX := new(big.Int).Lsh(x, 1)
- threeX.Add(threeX, x)
-
- x3.Sub(x3, threeX)
- x3.Add(x3, curve.B)
- x3.Mod(x3, curve.P)
-
- return x3
-}
-
-// unmarshalCompressed converts a point, serialized by MarshalCompressed, into an x, y pair.
-// It is an error if the point is not in compressed form or is not on the curve.
-// On error, x = nil.
-//
-// TODO: remove this and replace usage with elliptic.UnmarshallCompressed when go-algorand is
-// updated to go 1.15+
-func unmarshalCompressed(curve elliptic.Curve, data []byte) (x, y *big.Int) {
- byteLen := (curve.Params().BitSize + 7) / 8
- if len(data) != 1+byteLen {
- return nil, nil
- }
- if data[0] != 2 && data[0] != 3 { // compressed form
- return nil, nil
- }
- p := curve.Params().P
- x = new(big.Int).SetBytes(data[1:])
- if x.Cmp(p) >= 0 {
- return nil, nil
- }
- // y² = x³ - 3x + b
- y = polynomial(curve.Params(), x)
- y = y.ModSqrt(y, p)
- if y == nil {
- return nil, nil
- }
- if byte(y.Bit(0)) != data[0]&1 {
- y.Neg(y).Mod(y, p)
- }
- if !curve.IsOnCurve(x, y) {
- return nil, nil
- }
- return
+var ecdsaVerifyCosts = []int{
+ Secp256k1: 1700,
+ Secp256r1: 2500,
}
func opEcdsaVerify(cx *EvalContext) error {
ecdsaCurve := EcdsaCurve(cx.program[cx.pc+1])
- fs, ok := ecdsaCurveSpecByField[ecdsaCurve]
+ fs, ok := ecdsaCurveSpecByField(ecdsaCurve)
if !ok || fs.version > cx.version {
return fmt.Errorf("invalid curve %d", ecdsaCurve)
}
@@ -3020,9 +2992,14 @@ func opEcdsaVerify(cx *EvalContext) error {
return nil
}
+var ecdsaDecompressCosts = []int{
+ Secp256k1: 650,
+ Secp256r1: 2400,
+}
+
func opEcdsaPkDecompress(cx *EvalContext) error {
ecdsaCurve := EcdsaCurve(cx.program[cx.pc+1])
- fs, ok := ecdsaCurveSpecByField[ecdsaCurve]
+ fs, ok := ecdsaCurveSpecByField(ecdsaCurve)
if !ok || fs.version > cx.version {
return fmt.Errorf("invalid curve %d", ecdsaCurve)
}
@@ -3041,7 +3018,7 @@ func opEcdsaPkDecompress(cx *EvalContext) error {
return fmt.Errorf("invalid pubkey")
}
} else if fs.field == Secp256r1 {
- x, y = unmarshalCompressed(elliptic.P256(), pubkey)
+ x, y = elliptic.UnmarshalCompressed(elliptic.P256(), pubkey)
if x == nil {
return fmt.Errorf("invalid compressed pubkey")
}
@@ -3066,7 +3043,7 @@ func opEcdsaPkDecompress(cx *EvalContext) error {
func opEcdsaPkRecover(cx *EvalContext) error {
ecdsaCurve := EcdsaCurve(cx.program[cx.pc+1])
- fs, ok := ecdsaCurveSpecByField[ecdsaCurve]
+ fs, ok := ecdsaCurveSpecByField(ecdsaCurve)
if !ok || fs.version > cx.version {
return fmt.Errorf("invalid curve %d", ecdsaCurve)
}
@@ -3820,12 +3797,11 @@ func opAppGlobalDel(cx *EvalContext) error {
return nil
}
-// We have a difficult naming problem here. In some opcodes, TEAL
-// allows (and used to require) ASAs and Apps to to be referenced by
-// their "index" in an app call txn's foreign-apps or foreign-assets
-// arrays. That was a small integer, no more than 2 or so, and was
-// often called an "index". But it was not a basics.AssetIndex or
-// basics.ApplicationIndex.
+// We have a difficult naming problem here. Some opcodes allow (and used to
+// require) ASAs and Apps to to be referenced by their "index" in an app call
+// txn's foreign-apps or foreign-assets arrays. That was a small integer, no
+// more than 2 or so, and was often called an "index". But it was not a
+// basics.AssetIndex or basics.ApplicationIndex.
func appReference(cx *EvalContext, ref uint64, foreign bool) (basics.AppIndex, error) {
if cx.version >= directRefEnabledVersion {
@@ -3911,7 +3887,7 @@ func opAssetHoldingGet(cx *EvalContext) error {
prev := last - 1 // account
holdingField := AssetHoldingField(cx.program[cx.pc+1])
- fs, ok := assetHoldingFieldSpecByField[holdingField]
+ fs, ok := assetHoldingFieldSpecByField(holdingField)
if !ok || fs.version > cx.version {
return fmt.Errorf("invalid asset_holding_get field %d", holdingField)
}
@@ -3946,7 +3922,7 @@ func opAssetParamsGet(cx *EvalContext) error {
last := len(cx.stack) - 1 // asset
paramField := AssetParamsField(cx.program[cx.pc+1])
- fs, ok := assetParamsFieldSpecByField[paramField]
+ fs, ok := assetParamsFieldSpecByField(paramField)
if !ok || fs.version > cx.version {
return fmt.Errorf("invalid asset_params_get field %d", paramField)
}
@@ -3976,7 +3952,7 @@ func opAppParamsGet(cx *EvalContext) error {
last := len(cx.stack) - 1 // app
paramField := AppParamsField(cx.program[cx.pc+1])
- fs, ok := appParamsFieldSpecByField[paramField]
+ fs, ok := appParamsFieldSpecByField(paramField)
if !ok || fs.version > cx.version {
return fmt.Errorf("invalid app_params_get field %d", paramField)
}
@@ -4020,7 +3996,7 @@ func opAcctParamsGet(cx *EvalContext) error {
}
paramField := AcctParamsField(cx.program[cx.pc+1])
- fs, ok := acctParamsFieldSpecByField[paramField]
+ fs, ok := acctParamsFieldSpecByField(paramField)
if !ok || fs.version > cx.version {
return fmt.Errorf("invalid acct_params_get field %d", paramField)
}
@@ -4050,13 +4026,13 @@ func opAcctParamsGet(cx *EvalContext) error {
func opLog(cx *EvalContext) error {
last := len(cx.stack) - 1
- if len(cx.txn.EvalDelta.Logs) >= MaxLogCalls {
- return fmt.Errorf("too many log calls in program. up to %d is allowed", MaxLogCalls)
+ if len(cx.txn.EvalDelta.Logs) >= maxLogCalls {
+ return fmt.Errorf("too many log calls in program. up to %d is allowed", maxLogCalls)
}
log := cx.stack[last]
cx.logSize += len(log.Bytes)
- if cx.logSize > MaxLogSize {
- return fmt.Errorf("program logs too large. %d bytes > %d bytes limit", cx.logSize, MaxLogSize)
+ if cx.logSize > maxLogSize {
+ return fmt.Errorf("program logs too large. %d bytes > %d bytes limit", cx.logSize, maxLogSize)
}
cx.txn.EvalDelta.Logs = append(cx.txn.EvalDelta.Logs, string(log.Bytes))
cx.stack = cx.stack[:last]
@@ -4130,7 +4106,7 @@ func opTxBegin(cx *EvalContext) error {
return addInnerTxn(cx)
}
-func opTxNext(cx *EvalContext) error {
+func opItxnNext(cx *EvalContext) error {
if len(cx.subtxns) == 0 {
return errors.New("itxn_next without itxn_begin")
}
@@ -4444,14 +4420,14 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
return
}
-func opTxField(cx *EvalContext) error {
+func opItxnField(cx *EvalContext) error {
itx := len(cx.subtxns) - 1
if itx < 0 {
return errors.New("itxn_field without itxn_begin")
}
last := len(cx.stack) - 1
field := TxnField(cx.program[cx.pc+1])
- fs, ok := txnFieldSpecByField[field]
+ fs, ok := txnFieldSpecByField(field)
if !ok || fs.itxVersion == 0 || fs.itxVersion > cx.version {
return fmt.Errorf("invalid itxn_field %s", field)
}
@@ -4461,7 +4437,7 @@ func opTxField(cx *EvalContext) error {
return err
}
-func opTxSubmit(cx *EvalContext) error {
+func opItxnSubmit(cx *EvalContext) error {
// Should rarely trigger, since itxn_next checks these too. (but that check
// must be imperfect, see its comment) In contrast to that check, subtxns is
// already populated here.
@@ -4630,9 +4606,9 @@ func base64Decode(encoded []byte, encoding *base64.Encoding) ([]byte, error) {
func opBase64Decode(cx *EvalContext) error {
last := len(cx.stack) - 1
encodingField := Base64Encoding(cx.program[cx.pc+1])
- fs, ok := base64EncodingSpecByField[encodingField]
+ fs, ok := base64EncodingSpecByField(encodingField)
if !ok || fs.version > cx.version {
- return fmt.Errorf("invalid base64_decode encoding %d", encodingField)
+ return fmt.Errorf("invalid base64_decode encoding %s", encodingField)
}
encoding := base64.URLEncoding
@@ -4703,6 +4679,12 @@ func opJSONRef(cx *EvalContext) error {
key := string(cx.stack[last].Bytes)
cx.stack = cx.stack[:last] // pop
+ expectedType := JSONRefType(cx.program[cx.pc+1])
+ fs, ok := jsonRefSpecByField(expectedType)
+ if !ok || fs.version > cx.version {
+ return fmt.Errorf("invalid json_ref type %s", expectedType)
+ }
+
// parse json text
last = len(cx.stack) - 1
parsed, err := parseJSON(cx.stack[last].Bytes)
@@ -4712,11 +4694,11 @@ func opJSONRef(cx *EvalContext) error {
// get value from json
var stval stackValue
- _, ok := parsed[key]
+ _, ok = parsed[key]
if !ok {
return fmt.Errorf("key %s not found in JSON text", key)
}
- expectedType := JSONRefType(cx.program[cx.pc+1])
+
switch expectedType {
case JSONString:
var value string
@@ -4740,7 +4722,7 @@ func opJSONRef(cx *EvalContext) error {
}
stval.Bytes = parsed[key]
default:
- return fmt.Errorf("unsupported json_ref return type, should not have reached here")
+ return fmt.Errorf("unsupported json_ref return type %s", expectedType)
}
cx.stack[last] = stval
return nil
diff --git a/data/transactions/logic/evalCrypto_test.go b/data/transactions/logic/evalCrypto_test.go
index f38fb85cc..2b291e808 100644
--- a/data/transactions/logic/evalCrypto_test.go
+++ b/data/transactions/logic/evalCrypto_test.go
@@ -173,23 +173,10 @@ ed25519verify_bare`, pkStr), v)
}
}
-// bitIntFillBytes is a replacement for big.Int.FillBytes from future Go
-func bitIntFillBytes(b *big.Int, buf []byte) []byte {
- for i := range buf {
- buf[i] = 0
- }
- bytes := b.Bytes()
- if len(bytes) > len(buf) {
- panic(fmt.Sprintf("bitIntFillBytes: has %d but got %d buffer", len(bytes), len(buf)))
- }
- copy(buf[len(buf)-len(bytes):], bytes)
- return buf
-}
-
func keyToByte(tb testing.TB, b *big.Int) []byte {
k := make([]byte, 32)
require.NotPanics(tb, func() {
- k = bitIntFillBytes(b, k)
+ b.FillBytes(k)
})
return k
}
@@ -381,18 +368,6 @@ ecdsa_verify Secp256k1`, hex.EncodeToString(r), hex.EncodeToString(s), hex.Encod
require.True(t, pass)
}
-// MarshalCompressed converts a point on the curve into the compressed form
-// specified in section 4.3.6 of ANSI X9.62.
-//
-// TODO: replace with elliptic.MarshalCompressed when updating to go 1.15+
-func marshalCompressed(curve elliptic.Curve, x, y *big.Int) []byte {
- byteLen := (curve.Params().BitSize + 7) / 8
- compressed := make([]byte, 1+byteLen)
- compressed[0] = byte(y.Bit(0)) | 2
- bitIntFillBytes(x, compressed[1:])
- return compressed
-}
-
func TestEcdsaWithSecp256r1(t *testing.T) {
if LogicVersion < fidoVersion {
return
@@ -403,7 +378,7 @@ func TestEcdsaWithSecp256r1(t *testing.T) {
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err)
- pk := marshalCompressed(elliptic.P256(), key.X, key.Y)
+ pk := elliptic.MarshalCompressed(elliptic.P256(), key.X, key.Y)
x := keyToByte(t, key.PublicKey.X)
y := keyToByte(t, key.PublicKey.Y)
@@ -437,9 +412,9 @@ byte 0x%s
t.Log("decompressTests i", i)
src := fmt.Sprintf(source, hex.EncodeToString(test.key), hex.EncodeToString(x), hex.EncodeToString(y))
if test.pass {
- testAcceptsWithField(t, src, 5, fidoVersion)
+ testAccepts(t, src, fidoVersion)
} else {
- testPanicsWithField(t, src, 5, fidoVersion)
+ testPanics(t, src, fidoVersion)
}
})
}
@@ -479,9 +454,9 @@ ecdsa_verify Secp256r1
t.Run(fmt.Sprintf("verify/pass=%v", test.pass), func(t *testing.T) {
src := fmt.Sprintf(source, test.data, hex.EncodeToString(test.r), hex.EncodeToString(s), hex.EncodeToString(x), hex.EncodeToString(y))
if test.pass {
- testAcceptsWithField(t, src, 5, fidoVersion)
+ testAccepts(t, src, fidoVersion)
} else {
- testRejectsWithField(t, src, 5, fidoVersion)
+ testRejects(t, src, fidoVersion)
}
})
}
@@ -505,6 +480,8 @@ ecdsa_verify Secp256r1`, hex.EncodeToString(r), hex.EncodeToString(s), hex.Encod
// test compatibility with ethereum signatures
func TestEcdsaEthAddress(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
/*
pip install eth-keys pycryptodome
from eth_keys import keys
@@ -531,19 +508,61 @@ byte 0x5ce9454909639d2d17a3f753ce7d93fa0b9ab12e // addr
testAccepts(t, progText, 5)
}
+func TestEcdsaCostVariation(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // Doesn't matter if it passes or fails. Just confirm the cost depends on curve.
+ source := `
+global ZeroAddress // need 32 bytes
+byte "signature r"
+byte "signature s"
+byte "PK x"
+byte "PK y"
+ecdsa_verify Secp256k1
+!
+assert
+global OpcodeBudget
+int ` + fmt.Sprintf("%d", 20_000-1700-8) + `
+==
+`
+ testAccepts(t, source, 6) // Secp256k1 was 5, but OpcodeBudget is 6
+
+ source = `
+global ZeroAddress // need 32 bytes
+byte "signature r"
+byte "signature s"
+byte "PK x"
+byte "PK y"
+ecdsa_verify Secp256r1
+!
+assert
+global OpcodeBudget
+int ` + fmt.Sprintf("%d", 20_000-2500-8) + `
+==
+`
+ testAccepts(t, source, fidoVersion)
+}
+
func BenchmarkHash(b *testing.B) {
for _, hash := range []string{"sha256", "keccak256", "sha512_256"} {
- b.Run(hash+"-small", func(b *testing.B) { // hash 32 bytes
+ b.Run(hash+"-0w", func(b *testing.B) { // hash 0 bytes
+ benchmarkOperation(b, "", "byte 0x; "+hash+"; pop", "int 1")
+ })
+ b.Run(hash+"-32", func(b *testing.B) { // hash 32 bytes
benchmarkOperation(b, "int 32; bzero", hash, "pop; int 1")
})
- b.Run(hash+"-med", func(b *testing.B) { // hash 128 bytes
+ b.Run(hash+"-128", func(b *testing.B) { // hash 128 bytes
benchmarkOperation(b, "int 32; bzero",
"dup; concat; dup; concat;"+hash, "pop; int 1")
})
- b.Run(hash+"-big", func(b *testing.B) { // hash 512 bytes
+ b.Run(hash+"-512", func(b *testing.B) { // hash 512 bytes
benchmarkOperation(b, "int 32; bzero",
"dup; concat; dup; concat; dup; concat; dup; concat;"+hash, "pop; int 1")
})
+ b.Run(hash+"-4096", func(b *testing.B) { // hash 4k bytes
+ benchmarkOperation(b, "int 32; bzero",
+ "dup; concat; dup; concat; dup; concat; dup; concat; dup; concat; dup; concat; dup; concat;"+hash, "pop; int 1")
+ })
}
}
@@ -636,7 +655,7 @@ func benchmarkEcdsaGenData(b *testing.B, curve EcdsaCurve) (data []benchmarkEcds
if curve == Secp256k1 {
data[i].pk = secp256k1.CompressPubkey(key.PublicKey.X, key.PublicKey.Y)
} else if curve == Secp256r1 {
- data[i].pk = marshalCompressed(elliptic.P256(), key.PublicKey.X, key.PublicKey.Y)
+ data[i].pk = elliptic.MarshalCompressed(elliptic.P256(), key.PublicKey.X, key.PublicKey.Y)
}
d := []byte("testdata")
@@ -664,10 +683,9 @@ func benchmarkEcdsa(b *testing.B, source string, curve EcdsaCurve) {
if curve == Secp256k1 {
version = 5
} else if curve == Secp256r1 {
- version = 6
+ version = fidoVersion
}
- ops, err := AssembleStringWithVersion(source, version)
- require.NoError(b, err)
+ ops := testProg(b, source, version)
for i := 0; i < b.N; i++ {
data[i].programs = ops.Program
}
@@ -707,7 +725,7 @@ ecdsa_verify Secp256k1`
if LogicVersion >= fidoVersion {
b.Run("ecdsa_verify secp256r1", func(b *testing.B) {
source := `#pragma version ` + strconv.Itoa(fidoVersion) + `
- arg 0d
+ arg 0
arg 1
arg 2
arg 3
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index 86271a592..92a8f44aa 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -237,9 +237,8 @@ log
// check err opcode work in both modes
source := "err"
- testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil), "encountered err")
- testApp(t, source, defaultEvalParams(nil), "encountered err")
- // require.NotContains(t, err.Error(), "not allowed in current mode")
+ testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil), "err opcode executed")
+ testApp(t, source, defaultEvalParams(nil), "err opcode executed")
// check that ed25519verify and arg is not allowed in stateful mode between v2-v4
disallowedV4 := []string{
@@ -905,14 +904,14 @@ func TestAssets(t *testing.T) {
}
func testAssetsByVersion(t *testing.T, assetsTestProgram string, version uint64) {
- for _, field := range AssetHoldingFieldNames {
- fs := AssetHoldingFieldSpecByName[field]
+ for _, field := range assetHoldingFieldNames {
+ fs := assetHoldingFieldSpecByName[field]
if fs.version <= version && !strings.Contains(assetsTestProgram, field) {
t.Errorf("TestAssets missing field %v", field)
}
}
- for _, field := range AssetParamsFieldNames {
- fs := AssetParamsFieldSpecByName[field]
+ for _, field := range assetParamsFieldNames {
+ fs := assetParamsFieldSpecByName[field]
if fs.version <= version && !strings.Contains(assetsTestProgram, field) {
t.Errorf("TestAssets missing field %v", field)
}
@@ -2197,12 +2196,12 @@ func TestEnumFieldErrors(t *testing.T) {
partitiontest.PartitionTest(t)
source := `txn Amount`
- origSpec := txnFieldSpecByField[Amount]
+ origSpec := txnFieldSpecs[Amount]
changed := origSpec
changed.ftype = StackBytes
- txnFieldSpecByField[Amount] = changed
+ txnFieldSpecs[Amount] = changed
defer func() {
- txnFieldSpecByField[Amount] = origSpec
+ txnFieldSpecs[Amount] = origSpec
}()
testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil), "Amount expected field type is []byte but got uint64")
@@ -2210,12 +2209,12 @@ func TestEnumFieldErrors(t *testing.T) {
source = `global MinTxnFee`
- origMinTxnFs := globalFieldSpecByField[MinTxnFee]
+ origMinTxnFs := globalFieldSpecs[MinTxnFee]
badMinTxnFs := origMinTxnFs
badMinTxnFs.ftype = StackBytes
- globalFieldSpecByField[MinTxnFee] = badMinTxnFs
+ globalFieldSpecs[MinTxnFee] = badMinTxnFs
defer func() {
- globalFieldSpecByField[MinTxnFee] = origMinTxnFs
+ globalFieldSpecs[MinTxnFee] = origMinTxnFs
}()
testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(nil), "MinTxnFee expected field type is []byte but got uint64")
@@ -2242,12 +2241,12 @@ int 55
asset_holding_get AssetBalance
assert
`
- origBalanceFs := assetHoldingFieldSpecByField[AssetBalance]
+ origBalanceFs := assetHoldingFieldSpecs[AssetBalance]
badBalanceFs := origBalanceFs
badBalanceFs.ftype = StackBytes
- assetHoldingFieldSpecByField[AssetBalance] = badBalanceFs
+ assetHoldingFieldSpecs[AssetBalance] = badBalanceFs
defer func() {
- assetHoldingFieldSpecByField[AssetBalance] = origBalanceFs
+ assetHoldingFieldSpecs[AssetBalance] = origBalanceFs
}()
testApp(t, source, ep, "AssetBalance expected field type is []byte but got uint64")
@@ -2256,12 +2255,12 @@ assert
asset_params_get AssetTotal
assert
`
- origTotalFs := assetParamsFieldSpecByField[AssetTotal]
+ origTotalFs := assetParamsFieldSpecs[AssetTotal]
badTotalFs := origTotalFs
badTotalFs.ftype = StackBytes
- assetParamsFieldSpecByField[AssetTotal] = badTotalFs
+ assetParamsFieldSpecs[AssetTotal] = badTotalFs
defer func() {
- assetParamsFieldSpecByField[AssetTotal] = origTotalFs
+ assetParamsFieldSpecs[AssetTotal] = origTotalFs
}()
testApp(t, source, ep, "AssetTotal expected field type is []byte but got uint64")
@@ -2577,6 +2576,8 @@ func appAddr(id int) basics.Address {
}
func TestAppInfo(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
ep, tx, ledger := makeSampleEnv()
require.Equal(t, 888, int(tx.ApplicationID))
ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
@@ -2595,6 +2596,8 @@ func TestAppInfo(t *testing.T) {
}
func TestBudget(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
ep := defaultEvalParams(nil)
source := `
global OpcodeBudget
@@ -2609,6 +2612,8 @@ int 695
}
func TestSelfMutate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
ep, _, ledger := makeSampleEnv()
/* In order to test the added protection of mutableAccountReference, we're
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index b405d7c14..93ea9eecc 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -593,6 +593,7 @@ func TestDivw(t *testing.T) {
func TestUint128(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
x := uint128(0, 3)
require.Equal(t, x.String(), "3")
x = uint128(0, 0)
@@ -653,6 +654,7 @@ func TestDivModw(t *testing.T) {
func TestWideMath(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
// 2^64 = 18446744073709551616, we use a bunch of numbers close to that below
pattern := `
int %d
@@ -697,11 +699,14 @@ int 1
}
func TestMulDiv(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
// Demonstrate a "function" that expects three u64s on stack,
// and calculates B*C/A. (Following opcode documentation
// convention, C is top-of-stack, B is below it, and A is
// below B.
+ t.Parallel()
muldiv := `
muldiv:
mulw // multiply B*C. puts TWO u64s on stack
@@ -1089,7 +1094,7 @@ func TestOnCompletionConstants(t *testing.T) {
}
require.Less(t, last, max, "too many OnCompletion constants, adjust max limit")
require.Equal(t, int(invalidOnCompletionConst), last)
- require.Equal(t, len(onCompletionConstToUint64), len(onCompletionDescriptions))
+ require.Equal(t, len(onCompletionMap), len(onCompletionDescriptions))
require.Equal(t, len(OnCompletionNames), last)
for v := NoOp; v < invalidOnCompletionConst; v++ {
require.Equal(t, v.String(), OnCompletionNames[int(v)])
@@ -1099,8 +1104,8 @@ func TestOnCompletionConstants(t *testing.T) {
for i := 0; i < last; i++ {
oc := OnCompletionConstType(i)
symbol := oc.String()
- require.Contains(t, onCompletionConstToUint64, symbol)
- require.Equal(t, uint64(i), onCompletionConstToUint64[symbol])
+ require.Contains(t, onCompletionMap, symbol)
+ require.Equal(t, uint64(i), onCompletionMap[symbol])
t.Run(symbol, func(t *testing.T) {
testAccepts(t, fmt.Sprintf("int %s; int %s; ==;", symbol, oc), 1)
})
@@ -1543,7 +1548,7 @@ func TestTxn(t *testing.T) {
}
for i, txnField := range TxnFieldNames {
- fs := txnFieldSpecByField[TxnField(i)]
+ fs := txnFieldSpecs[i]
// Ensure that each field appears, starting in the version it was introduced
for v := uint64(1); v <= uint64(LogicVersion); v++ {
if v < fs.version {
@@ -2855,12 +2860,15 @@ func TestPanic(t *testing.T) {
ops := testProg(t, `int 1`, v)
var hackedOpcode int
var oldSpec OpSpec
+ // Find an unused opcode to temporarily convert to a panicing opcde,
+ // and append it to program.
for opcode, spec := range opsByOpcode[v] {
if spec.op == nil {
hackedOpcode = opcode
oldSpec = spec
opsByOpcode[v][opcode].op = opPanic
opsByOpcode[v][opcode].Modes = modeAny
+ opsByOpcode[v][opcode].Details.FullCost.baseCost = 1
opsByOpcode[v][opcode].Details.checkFunc = checkPanic
ops.Program = append(ops.Program, byte(opcode))
break
@@ -2905,7 +2913,7 @@ func TestProgramTooNew(t *testing.T) {
t.Parallel()
var program [12]byte
- vlen := binary.PutUvarint(program[:], EvalMaxVersion+1)
+ vlen := binary.PutUvarint(program[:], evalMaxVersion+1)
testLogicBytes(t, program[:vlen], defaultEvalParams(nil),
"greater than max supported", "greater than max supported")
}
@@ -2924,10 +2932,10 @@ func TestProgramProtoForbidden(t *testing.T) {
t.Parallel()
var program [12]byte
- vlen := binary.PutUvarint(program[:], EvalMaxVersion)
+ vlen := binary.PutUvarint(program[:], evalMaxVersion)
ep := defaultEvalParams(nil)
ep.Proto = &config.ConsensusParams{
- LogicSigVersion: EvalMaxVersion - 1,
+ LogicSigVersion: evalMaxVersion - 1,
}
testLogicBytes(t, program[:vlen], ep, "greater than protocol", "greater than protocol")
}
@@ -2983,7 +2991,7 @@ int 1`, v)
require.Equal(t, ops.Program, canonicalProgramBytes)
ops.Program[7] = 200 // clobber the branch offset to be beyond the end of the program
testLogicBytes(t, ops.Program, defaultEvalParams(nil),
- "beyond end of program", "beyond end of program")
+ "outside of program", "outside of program")
})
}
}
@@ -3006,7 +3014,7 @@ int 1`, v)
require.NoError(t, err)
require.Equal(t, ops.Program, canonicalProgramBytes)
ops.Program[6] = 0x70 // clobber hi byte of branch offset
- testLogicBytes(t, ops.Program, defaultEvalParams(nil), "beyond", "beyond")
+ testLogicBytes(t, ops.Program, defaultEvalParams(nil), "outside", "outside")
})
}
branches := []string{
@@ -3027,7 +3035,7 @@ intc_1
ops.Program[7] = 0xf0 // clobber the branch offset - highly negative
ops.Program[8] = 0xff // clobber the branch offset
testLogicBytes(t, ops.Program, defaultEvalParams(nil),
- "branch target beyond", "branch target beyond")
+ "outside of program", "outside of program")
})
}
}
@@ -3385,11 +3393,11 @@ func BenchmarkUintCmp(b *testing.B) {
})
}
}
-func BenchmarkBigLogic(b *testing.B) {
+func BenchmarkByteLogic(b *testing.B) {
benches := [][]string{
- {"b&", "byte 0x01234576", "byte 0x01ffffffffffffff; b&", "pop; int 1"},
- {"b|", "byte 0x0ffff1234576", "byte 0x1202; b|", "pop; int 1"},
- {"b^", "byte 0x01234576", "byte 0x0223627389; b^", "pop; int 1"},
+ {"b&", "", "byte 0x012345678901feab; byte 0x01ffffffffffffff; b&; pop", "int 1"},
+ {"b|", "", "byte 0x0ffff1234576abef; byte 0x1202120212021202; b|; pop", "int 1"},
+ {"b^", "", "byte 0x0ffff1234576abef; byte 0x1202120212021202; b^; pop", "int 1"},
{"b~", "byte 0x0123457673624736", "b~", "pop; int 1"},
{"b&big",
@@ -3400,7 +3408,7 @@ func BenchmarkBigLogic(b *testing.B) {
"byte 0x0123457601234576012345760123457601234576012345760123457601234576",
"byte 0xffffff01ffffffffffffff01234576012345760123457601234576; b|",
"pop; int 1"},
- {"b^big", "", // u256*u256
+ {"b^big", "", // u256^u256
`byte 0x123457601234576012345760123457601234576012345760123457601234576a
byte 0xf123457601234576012345760123457601234576012345760123457601234576; b^; pop`,
"int 1"},
@@ -3416,7 +3424,7 @@ func BenchmarkBigLogic(b *testing.B) {
}
}
-func BenchmarkBigMath(b *testing.B) {
+func BenchmarkByteMath(b *testing.B) {
benches := [][]string{
{"bpop", "", "byte 0x01ffffffffffffff; pop", "int 1"},
@@ -3467,18 +3475,13 @@ func BenchmarkBase64Decode(b *testing.B) {
bigStd := strings.Repeat(medStd, 4)
bigURL := strings.Repeat(medURL, 4)
- tags := []string{"small", "medium", "large"}
- stds := []string{smallStd, medStd, bigStd}
- urls := []string{smallURL, medURL, bigURL}
+ tags := []string{"0", "64", "1024", "4096"}
+ stds := []string{"", smallStd, medStd, bigStd}
+ urls := []string{"", smallURL, medURL, bigURL}
ops := []string{
- "",
- "len",
+ "int 1; int 2; +; pop",
"b~",
"int 1; pop",
- "keccak256",
- "sha256",
- "sha512_256",
- "sha3_256",
"base64_decode StdEncoding",
"base64_decode URLEncoding",
}
@@ -3603,7 +3606,7 @@ func TestStackOverflow(t *testing.T) {
t.Parallel()
source := "int 1; int 2;"
- for i := 1; i < MaxStackDepth/2; i++ {
+ for i := 1; i < maxStackDepth/2; i++ {
source += "dup2;"
}
testAccepts(t, source+"return", 2)
@@ -3695,6 +3698,7 @@ func TestArgType(t *testing.T) {
func TestApplicationsDisallowOldTeal(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
const source = "int 1"
txn := makeSampleTxn()
@@ -3714,6 +3718,7 @@ func TestApplicationsDisallowOldTeal(t *testing.T) {
func TestAnyRekeyToOrApplicationRaisesMinTealVersion(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
const source = "int 1"
// Construct a group of two payments, no rekeying
@@ -3892,10 +3897,24 @@ func TestAllowedOpcodesV3(t *testing.T) {
require.Len(t, tests, cnt)
}
-func TestRekeyFailsOnOldVersion(t *testing.T) {
+// TestLinearOpcodes ensures we don't have a linear cost opcode (which
+// inherently requires a dynamic cost model) before backBranchEnabledVersion,
+// which introduced our dynamic model.
+func TestLinearOpcodes(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
+ for _, spec := range OpSpecs {
+ if spec.Version < backBranchEnabledVersion {
+ require.Zero(t, spec.Details.FullCost.chunkCost, spec)
+ }
+ }
+}
+
+func TestRekeyFailsOnOldVersion(t *testing.T) {
+ partitiontest.PartitionTest(t)
t.Parallel()
+
for v := uint64(0); v < rekeyingEnabledVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := testProg(t, `int 1`, v)
@@ -3921,18 +3940,9 @@ func obfuscate(program string) string {
type evalTester func(pass bool, err error) bool
-func testEvaluation(t *testing.T, program string, introduced uint64, tester evalTester, xtras ...uint64) error {
+func testEvaluation(t *testing.T, program string, introduced uint64, tester evalTester) error {
t.Helper()
- numXtras := len(xtras)
- require.LessOrEqual(t, numXtras, 1, "can handle at most 1 extra parameter but provided %d", numXtras)
- withField := false
- var introducedField uint64
- if numXtras == 1 {
- withField = true
- introducedField = xtras[0]
- }
-
var outer error
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
@@ -3940,9 +3950,6 @@ func testEvaluation(t *testing.T, program string, introduced uint64, tester eval
if v < introduced {
testProg(t, obfuscate(program), v, Expect{0, "...was introduced..."})
return
- } else if withField && v < introducedField {
- testProg(t, obfuscate(program), v, Expect{0, "...available in version..."})
- return
}
ops := testProg(t, program, v)
// Programs created with a previous assembler
@@ -3992,19 +3999,6 @@ func testRejects(t *testing.T, program string, introduced uint64) {
return !pass && err == nil
})
}
-func testRejectsWithField(t *testing.T, program string, introducedOpcode, introducedField uint64) {
- t.Helper()
- testEvaluation(t, program, introducedOpcode, func(pass bool, err error) bool {
- // Returned False, but didn't panic
- return !pass && err == nil
- }, introducedField)
-}
-func testAcceptsWithField(t *testing.T, program string, introducedOpcode, introducedField uint64) {
- t.Helper()
- testEvaluation(t, program, introducedOpcode, func(pass bool, err error) bool {
- return pass && err == nil
- }, introducedField)
-}
func testPanics(t *testing.T, program string, introduced uint64) error {
t.Helper()
return testEvaluation(t, program, introduced, func(pass bool, err error) bool {
@@ -4012,13 +4006,6 @@ func testPanics(t *testing.T, program string, introduced uint64) error {
return !pass && err != nil
})
}
-func testPanicsWithField(t *testing.T, program string, introducedOpcode, introducedField uint64) error {
- t.Helper()
- return testEvaluation(t, program, introducedOpcode, func(pass bool, err error) bool {
- // TEAL panic! not just reject at exit
- return !pass && err != nil
- }, introducedField)
-}
func TestAssert(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -4495,6 +4482,7 @@ func TestBytesBits(t *testing.T) {
func TestBytesConversions(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
testAccepts(t, "byte 0x11; byte 0x10; b+; btoi; int 0x21; ==", 4)
testAccepts(t, "byte 0x0011; byte 0x10; b+; btoi; int 0x21; ==", 4)
}
@@ -4523,15 +4511,15 @@ func TestLog(t *testing.T) {
loglen: 2,
},
{
- source: fmt.Sprintf(`%s int 1`, strings.Repeat(`byte "a logging message"; log;`, MaxLogCalls)),
- loglen: MaxLogCalls,
+ source: fmt.Sprintf(`%s int 1`, strings.Repeat(`byte "a logging message"; log;`, maxLogCalls)),
+ loglen: maxLogCalls,
},
{
source: `int 1; loop: byte "a logging message"; log; int 1; +; dup; int 30; <=; bnz loop;`,
loglen: 30,
},
{
- source: fmt.Sprintf(`byte "%s"; log; int 1`, strings.Repeat("a", MaxLogSize)),
+ source: fmt.Sprintf(`byte "%s"; log; int 1`, strings.Repeat("a", maxLogSize)),
loglen: 1,
},
}
@@ -4541,7 +4529,7 @@ func TestLog(t *testing.T) {
delta := testApp(t, s.source, ep)
require.Len(t, delta.Logs, s.loglen)
if i == len(testCases)-1 {
- require.Equal(t, strings.Repeat("a", MaxLogSize), delta.Logs[0])
+ require.Equal(t, strings.Repeat("a", maxLogSize), delta.Logs[0])
} else {
for _, l := range delta.Logs {
require.Equal(t, "a logging message", l)
@@ -4556,17 +4544,17 @@ func TestLog(t *testing.T) {
errContains string
}{
{
- source: fmt.Sprintf(`byte "%s"; log; int 1`, strings.Repeat("a", MaxLogSize+1)),
- errContains: fmt.Sprintf("> %d bytes limit", MaxLogSize),
+ source: fmt.Sprintf(`byte "%s"; log; int 1`, strings.Repeat("a", maxLogSize+1)),
+ errContains: fmt.Sprintf("> %d bytes limit", maxLogSize),
runMode: runModeApplication,
},
{
source: fmt.Sprintf(`byte "%s"; log; byte "%s"; log; byte "%s"; log; int 1`, msg, msg, msg),
- errContains: fmt.Sprintf("> %d bytes limit", MaxLogSize),
+ errContains: fmt.Sprintf("> %d bytes limit", maxLogSize),
runMode: runModeApplication,
},
{
- source: fmt.Sprintf(`%s; int 1`, strings.Repeat(`byte "a"; log;`, MaxLogCalls+1)),
+ source: fmt.Sprintf(`%s; int 1`, strings.Repeat(`byte "a"; log;`, maxLogCalls+1)),
errContains: "too many log calls",
runMode: runModeApplication,
},
@@ -4577,7 +4565,7 @@ func TestLog(t *testing.T) {
},
{
source: fmt.Sprintf(`int 1; loop: byte "%s"; log; int 1; +; dup; int 6; <; bnz loop;`, strings.Repeat(`a`, 400)),
- errContains: fmt.Sprintf("> %d bytes limit", MaxLogSize),
+ errContains: fmt.Sprintf("> %d bytes limit", maxLogSize),
runMode: runModeApplication,
},
{
@@ -4634,6 +4622,7 @@ func TestPcDetails(t *testing.T) {
})
}
}
+
func TestOpBase64Decode(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -4764,6 +4753,51 @@ By Herman Melville`, "",
}
}
+func TestBase64CostVariation(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ source := `
+byte ""
+base64_decode URLEncoding
+pop
+global OpcodeBudget
+int ` + fmt.Sprintf("%d", 20_000-3-1) + ` // base64_decode cost = 1
+==
+`
+ testAccepts(t, source, fidoVersion)
+
+ source = `
+byte "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"
+base64_decode URLEncoding
+pop
+global OpcodeBudget
+int ` + fmt.Sprintf("%d", 20_000-3-5) + ` // base64_decode cost = 5 (64 bytes -> 1 + 64/16)
+==
+`
+ testAccepts(t, source, fidoVersion)
+
+ source = `
+byte "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
+base64_decode URLEncoding
+pop
+global OpcodeBudget
+int ` + fmt.Sprintf("%d", 20_000-3-5) + ` // base64_decode cost = 5 (60 bytes -> 1 + ceil(60/16))
+==
+`
+ testAccepts(t, source, fidoVersion)
+
+ source = `
+byte "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_AA=="
+base64_decode URLEncoding
+pop
+global OpcodeBudget
+int ` + fmt.Sprintf("%d", 20_000-3-6) + ` // base64_decode cost = 6 (68 bytes -> 1 + ceil(68/16))
+==
+`
+ testAccepts(t, source, fidoVersion)
+}
+
func TestHasDuplicateKeys(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -5157,3 +5191,8 @@ func TestOpJSONRef(t *testing.T) {
}
}
+
+func TestTypeComplaints(t *testing.T) {
+ t.Skip("Issue #3837")
+ testProg(t, "int 1; return; store 0", AssemblerMaxVersion)
+}
diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go
index 330f58305..83efae00f 100644
--- a/data/transactions/logic/fields.go
+++ b/data/transactions/logic/fields.go
@@ -17,12 +17,48 @@
package logic
import (
+ "fmt"
+
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/protocol"
)
//go:generate stringer -type=TxnField,GlobalField,AssetParamsField,AppParamsField,AcctParamsField,AssetHoldingField,OnCompletionConstType,EcdsaCurve,Base64Encoding,JSONRefType -output=fields_string.go
+// FieldSpec unifies the various specs for assembly, disassembly, and doc generation.
+type FieldSpec interface {
+ Field() byte
+ Type() StackType
+ OpVersion() uint64
+ Note() string
+ Version() uint64
+}
+
+// fieldSpecMap is something that yields a FieldSpec, given a name for the field
+type fieldSpecMap interface {
+ get(name string) (FieldSpec, bool)
+}
+
+// FieldGroup binds all the info for a field (names, int value, spec access) so
+// they can be attached to opcodes and used by doc generation
+type FieldGroup struct {
+ Name string
+ Doc string
+ Names []string
+ specs fieldSpecMap
+}
+
+// SpecByName returns a FieldsSpec for a name, respecting the "sparseness" of
+// the Names array to hide some names
+func (fg *FieldGroup) SpecByName(name string) (FieldSpec, bool) {
+ if fs, ok := fg.specs.get(name); ok {
+ if fg.Names[fs.Field()] != "" {
+ return fs, true
+ }
+ }
+ return nil, false
+}
+
// TxnField is an enum type for `txn` and `gtxn`
type TxnField int
@@ -165,31 +201,27 @@ const (
// StateProofPK Transaction.StateProofPK
StateProofPK
- invalidTxnField // fence for some setup that loops from Sender..invalidTxnField
+ invalidTxnField // compile-time constant for number of fields
)
-// FieldSpec unifies the various specs for presentation
-type FieldSpec interface {
- Type() StackType
- OpVersion() uint64
- Note() string
- Version() uint64
+func txnFieldSpecByField(f TxnField) (txnFieldSpec, bool) {
+ if int(f) >= len(txnFieldSpecs) {
+ return txnFieldSpec{}, false
+ }
+ return txnFieldSpecs[f], true
}
-// TxnFieldNames are arguments to the 'txn' and 'txnById' opcodes
-var TxnFieldNames []string
-
-var txnFieldSpecByField map[TxnField]txnFieldSpec
+// TxnFieldNames are arguments to the 'txn' family of opcodes.
+var TxnFieldNames [invalidTxnField]string
-// TxnFieldSpecByName gives access to the field specs by field name
-var TxnFieldSpecByName tfNameSpecMap
+var txnFieldSpecByName = make(tfNameSpecMap, len(TxnFieldNames))
// simple interface used by doc generator for fields versioning
type tfNameSpecMap map[string]txnFieldSpec
-func (s tfNameSpecMap) SpecByName(name string) FieldSpec {
- fs := s[name]
- return &fs
+func (s tfNameSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
}
type txnFieldSpec struct {
@@ -199,104 +231,181 @@ type txnFieldSpec struct {
version uint64 // When this field become available to txn/gtxn. 0=always
itxVersion uint64 // When this field become available to itxn_field. 0=never
effects bool // Is this a field on the "effects"? That is, something in ApplyData
+ doc string
}
-func (fs *txnFieldSpec) Type() StackType {
+func (fs txnFieldSpec) Field() byte {
+ return byte(fs.field)
+}
+func (fs txnFieldSpec) Type() StackType {
return fs.ftype
}
-
-func (fs *txnFieldSpec) OpVersion() uint64 {
+func (fs txnFieldSpec) OpVersion() uint64 {
return 0
}
-
-func (fs *txnFieldSpec) Version() uint64 {
+func (fs txnFieldSpec) Version() uint64 {
return fs.version
}
-
-func (fs *txnFieldSpec) Note() string {
- note := txnFieldDocs[fs.field.String()]
+func (fs txnFieldSpec) Note() string {
+ note := fs.doc
if fs.effects {
note = addExtra(note, "Application mode only")
}
return note
}
-var txnFieldSpecs = []txnFieldSpec{
- {Sender, StackBytes, false, 0, 5, false},
- {Fee, StackUint64, false, 0, 5, false},
- {FirstValid, StackUint64, false, 0, 0, false},
- {FirstValidTime, StackUint64, false, 0, 0, false},
- {LastValid, StackUint64, false, 0, 0, false},
- {Note, StackBytes, false, 0, 6, false},
- {Lease, StackBytes, false, 0, 0, false},
- {Receiver, StackBytes, false, 0, 5, false},
- {Amount, StackUint64, false, 0, 5, false},
- {CloseRemainderTo, StackBytes, false, 0, 5, false},
- {VotePK, StackBytes, false, 0, 6, false},
- {SelectionPK, StackBytes, false, 0, 6, false},
- {VoteFirst, StackUint64, false, 0, 6, false},
- {VoteLast, StackUint64, false, 0, 6, false},
- {VoteKeyDilution, StackUint64, false, 0, 6, false},
- {Type, StackBytes, false, 0, 5, false},
- {TypeEnum, StackUint64, false, 0, 5, false},
- {XferAsset, StackUint64, false, 0, 5, false},
- {AssetAmount, StackUint64, false, 0, 5, false},
- {AssetSender, StackBytes, false, 0, 5, false},
- {AssetReceiver, StackBytes, false, 0, 5, false},
- {AssetCloseTo, StackBytes, false, 0, 5, false},
- {GroupIndex, StackUint64, false, 0, 0, false},
- {TxID, StackBytes, false, 0, 0, false},
- {ApplicationID, StackUint64, false, 2, 6, false},
- {OnCompletion, StackUint64, false, 2, 6, false},
- {ApplicationArgs, StackBytes, true, 2, 6, false},
- {NumAppArgs, StackUint64, false, 2, 0, false},
- {Accounts, StackBytes, true, 2, 6, false},
- {NumAccounts, StackUint64, false, 2, 0, false},
- {ApprovalProgram, StackBytes, false, 2, 6, false},
- {ClearStateProgram, StackBytes, false, 2, 6, false},
- {RekeyTo, StackBytes, false, 2, 6, false},
- {ConfigAsset, StackUint64, false, 2, 5, false},
- {ConfigAssetTotal, StackUint64, false, 2, 5, false},
- {ConfigAssetDecimals, StackUint64, false, 2, 5, false},
- {ConfigAssetDefaultFrozen, StackUint64, false, 2, 5, false},
- {ConfigAssetUnitName, StackBytes, false, 2, 5, false},
- {ConfigAssetName, StackBytes, false, 2, 5, false},
- {ConfigAssetURL, StackBytes, false, 2, 5, false},
- {ConfigAssetMetadataHash, StackBytes, false, 2, 5, false},
- {ConfigAssetManager, StackBytes, false, 2, 5, false},
- {ConfigAssetReserve, StackBytes, false, 2, 5, false},
- {ConfigAssetFreeze, StackBytes, false, 2, 5, false},
- {ConfigAssetClawback, StackBytes, false, 2, 5, false},
- {FreezeAsset, StackUint64, false, 2, 5, false},
- {FreezeAssetAccount, StackBytes, false, 2, 5, false},
- {FreezeAssetFrozen, StackUint64, false, 2, 5, false},
- {Assets, StackUint64, true, 3, 6, false},
- {NumAssets, StackUint64, false, 3, 0, false},
- {Applications, StackUint64, true, 3, 6, false},
- {NumApplications, StackUint64, false, 3, 0, false},
- {GlobalNumUint, StackUint64, false, 3, 6, false},
- {GlobalNumByteSlice, StackUint64, false, 3, 6, false},
- {LocalNumUint, StackUint64, false, 3, 6, false},
- {LocalNumByteSlice, StackUint64, false, 3, 6, false},
- {ExtraProgramPages, StackUint64, false, 4, 6, false},
- {Nonparticipation, StackUint64, false, 5, 6, false},
+var txnFieldSpecs = [...]txnFieldSpec{
+ {Sender, StackBytes, false, 0, 5, false, "32 byte address"},
+ {Fee, StackUint64, false, 0, 5, false, "microalgos"},
+ {FirstValid, StackUint64, false, 0, 0, false, "round number"},
+ {FirstValidTime, StackUint64, false, 0, 0, false, "Causes program to fail; reserved for future use"},
+ {LastValid, StackUint64, false, 0, 0, false, "round number"},
+ {Note, StackBytes, false, 0, 6, false, "Any data up to 1024 bytes"},
+ {Lease, StackBytes, false, 0, 0, false, "32 byte lease value"},
+ {Receiver, StackBytes, false, 0, 5, false, "32 byte address"},
+ {Amount, StackUint64, false, 0, 5, false, "microalgos"},
+ {CloseRemainderTo, StackBytes, false, 0, 5, false, "32 byte address"},
+ {VotePK, StackBytes, false, 0, 6, false, "32 byte address"},
+ {SelectionPK, StackBytes, false, 0, 6, false, "32 byte address"},
+ {VoteFirst, StackUint64, false, 0, 6, false, "The first round that the participation key is valid."},
+ {VoteLast, StackUint64, false, 0, 6, false, "The last round that the participation key is valid."},
+ {VoteKeyDilution, StackUint64, false, 0, 6, false, "Dilution for the 2-level participation key"},
+ {Type, StackBytes, false, 0, 5, false, "Transaction type as bytes"},
+ {TypeEnum, StackUint64, false, 0, 5, false, "See table below"},
+ {XferAsset, StackUint64, false, 0, 5, false, "Asset ID"},
+ {AssetAmount, StackUint64, false, 0, 5, false, "value in Asset's units"},
+ {AssetSender, StackBytes, false, 0, 5, false,
+ "32 byte address. Moves asset from AssetSender if Sender is the Clawback address of the asset."},
+ {AssetReceiver, StackBytes, false, 0, 5, false, "32 byte address"},
+ {AssetCloseTo, StackBytes, false, 0, 5, false, "32 byte address"},
+ {GroupIndex, StackUint64, false, 0, 0, false,
+ "Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1"},
+ {TxID, StackBytes, false, 0, 0, false, "The computed ID for this transaction. 32 bytes."},
+ {ApplicationID, StackUint64, false, 2, 6, false, "ApplicationID from ApplicationCall transaction"},
+ {OnCompletion, StackUint64, false, 2, 6, false, "ApplicationCall transaction on completion action"},
+ {ApplicationArgs, StackBytes, true, 2, 6, false,
+ "Arguments passed to the application in the ApplicationCall transaction"},
+ {NumAppArgs, StackUint64, false, 2, 0, false, "Number of ApplicationArgs"},
+ {Accounts, StackBytes, true, 2, 6, false, "Accounts listed in the ApplicationCall transaction"},
+ {NumAccounts, StackUint64, false, 2, 0, false, "Number of Accounts"},
+ {ApprovalProgram, StackBytes, false, 2, 6, false, "Approval program"},
+ {ClearStateProgram, StackBytes, false, 2, 6, false, "Clear state program"},
+ {RekeyTo, StackBytes, false, 2, 6, false, "32 byte Sender's new AuthAddr"},
+ {ConfigAsset, StackUint64, false, 2, 5, false, "Asset ID in asset config transaction"},
+ {ConfigAssetTotal, StackUint64, false, 2, 5, false, "Total number of units of this asset created"},
+ {ConfigAssetDecimals, StackUint64, false, 2, 5, false,
+ "Number of digits to display after the decimal place when displaying the asset"},
+ {ConfigAssetDefaultFrozen, StackUint64, false, 2, 5, false,
+ "Whether the asset's slots are frozen by default or not, 0 or 1"},
+ {ConfigAssetUnitName, StackBytes, false, 2, 5, false, "Unit name of the asset"},
+ {ConfigAssetName, StackBytes, false, 2, 5, false, "The asset name"},
+ {ConfigAssetURL, StackBytes, false, 2, 5, false, "URL"},
+ {ConfigAssetMetadataHash, StackBytes, false, 2, 5, false,
+ "32 byte commitment to unspecified asset metadata"},
+ {ConfigAssetManager, StackBytes, false, 2, 5, false, "32 byte address"},
+ {ConfigAssetReserve, StackBytes, false, 2, 5, false, "32 byte address"},
+ {ConfigAssetFreeze, StackBytes, false, 2, 5, false, "32 byte address"},
+ {ConfigAssetClawback, StackBytes, false, 2, 5, false, "32 byte address"},
+ {FreezeAsset, StackUint64, false, 2, 5, false, "Asset ID being frozen or un-frozen"},
+ {FreezeAssetAccount, StackBytes, false, 2, 5, false,
+ "32 byte address of the account whose asset slot is being frozen or un-frozen"},
+ {FreezeAssetFrozen, StackUint64, false, 2, 5, false, "The new frozen value, 0 or 1"},
+ {Assets, StackUint64, true, 3, 6, false, "Foreign Assets listed in the ApplicationCall transaction"},
+ {NumAssets, StackUint64, false, 3, 0, false, "Number of Assets"},
+ {Applications, StackUint64, true, 3, 6, false, "Foreign Apps listed in the ApplicationCall transaction"},
+ {NumApplications, StackUint64, false, 3, 0, false, "Number of Applications"},
+ {GlobalNumUint, StackUint64, false, 3, 6, false, "Number of global state integers in ApplicationCall"},
+ {GlobalNumByteSlice, StackUint64, false, 3, 6, false, "Number of global state byteslices in ApplicationCall"},
+ {LocalNumUint, StackUint64, false, 3, 6, false, "Number of local state integers in ApplicationCall"},
+ {LocalNumByteSlice, StackUint64, false, 3, 6, false, "Number of local state byteslices in ApplicationCall"},
+ {ExtraProgramPages, StackUint64, false, 4, 6, false,
+ "Number of additional pages for each of the application's approval and clear state programs. An ExtraProgramPages of 1 means 2048 more total bytes, or 1024 for each program."},
+ {Nonparticipation, StackUint64, false, 5, 6, false, "Marks an account nonparticipating for rewards"},
// "Effects" Last two things are always going to: 0, true
- {Logs, StackBytes, true, 5, 0, true},
- {NumLogs, StackUint64, false, 5, 0, true},
- {CreatedAssetID, StackUint64, false, 5, 0, true},
- {CreatedApplicationID, StackUint64, false, 5, 0, true},
- {LastLog, StackBytes, false, 6, 0, true},
- {StateProofPK, StackBytes, false, 6, 6, false},
-}
-
-// TxnaFieldNames are arguments to the 'txna' opcode
-// It need not be fast, as it's only used for doc generation.
-func TxnaFieldNames() []string {
- var names []string
- for _, fs := range txnFieldSpecs {
+ {Logs, StackBytes, true, 5, 0, true, "Log messages emitted by an application call (only with `itxn` in v5)"},
+ {NumLogs, StackUint64, false, 5, 0, true, "Number of Logs (only with `itxn` in v5)"},
+ {CreatedAssetID, StackUint64, false, 5, 0, true,
+ "Asset ID allocated by the creation of an ASA (only with `itxn` in v5)"},
+ {CreatedApplicationID, StackUint64, false, 5, 0, true,
+ "ApplicationID allocated by the creation of an application (only with `itxn` in v5)"},
+ {LastLog, StackBytes, false, 6, 0, true, "The last message emitted. Empty bytes if none were emitted"},
+
+ // Not an effect. Just added after the effects fields.
+ {StateProofPK, StackBytes, false, 6, 6, false, "64 byte state proof public key commitment"},
+}
+
+// TxnFields contains info on the arguments to the txn* family of opcodes
+var TxnFields = FieldGroup{
+ "txn", "Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/))",
+ TxnFieldNames[:],
+ txnFieldSpecByName,
+}
+
+// TxnScalarFields narows TxnFields to only have the names of scalar fetching opcodes
+var TxnScalarFields = FieldGroup{
+ "txn", "",
+ txnScalarFieldNames(),
+ txnFieldSpecByName,
+}
+
+// txnScalarFieldNames are txn field names that return scalars. Return value is
+// a "sparse" slice, the names appear at their usual index, array slots are set
+// to "". They are laid out this way so that it is possible to get the name
+// from the index value.
+func txnScalarFieldNames() []string {
+ names := make([]string, len(txnFieldSpecs))
+ for i, fs := range txnFieldSpecs {
+ if fs.array {
+ names[i] = ""
+ } else {
+ names[i] = fs.field.String()
+ }
+ }
+ return names
+}
+
+// TxnArrayFields narows TxnFields to only have the names of array fetching opcodes
+var TxnArrayFields = FieldGroup{
+ "txna", "",
+ txnaFieldNames(),
+ txnFieldSpecByName,
+}
+
+// txnaFieldNames are txn field names that return arrays. Return value is a
+// "sparse" slice, the names appear at their usual index, non-array slots are
+// set to "". They are laid out this way so that it is possible to get the name
+// from the index value.
+func txnaFieldNames() []string {
+ names := make([]string, len(txnFieldSpecs))
+ for i, fs := range txnFieldSpecs {
if fs.array {
- names = append(names, fs.field.String())
+ names[i] = fs.field.String()
+ } else {
+ names[i] = ""
+ }
+ }
+ return names
+}
+
+// ItxnSettableFields collects info for itxn_field opcode
+var ItxnSettableFields = FieldGroup{
+ "itxn_field", "",
+ itxnSettableFieldNames(),
+ txnFieldSpecByName,
+}
+
+// itxnSettableFieldNames are txn field names that can be set by
+// itxn_field. Return value is a "sparse" slice, the names appear at their usual
+// index, unsettable slots are set to "". They are laid out this way so that it is
+// possible to get the name from the index value.
+func itxnSettableFieldNames() []string {
+ names := make([]string, len(txnFieldSpecs))
+ for i, fs := range txnFieldSpecs {
+ if fs.itxVersion == 0 {
+ names[i] = ""
+ } else {
+ names[i] = fs.field.String()
}
}
return names
@@ -312,7 +421,7 @@ var innerTxnTypes = map[string]uint64{
}
// TxnTypeNames is the values of Txn.Type in enum order
-var TxnTypeNames = []string{
+var TxnTypeNames = [...]string{
string(protocol.UnknownTx),
string(protocol.PaymentTx),
string(protocol.KeyRegistrationTx),
@@ -322,37 +431,34 @@ var TxnTypeNames = []string{
string(protocol.ApplicationCallTx),
}
-// map TxnTypeName to its enum index, for `txn TypeEnum`
-var txnTypeIndexes map[string]uint64
-
-// map symbolic name to uint64 for assembleInt
-var txnTypeConstToUint64 map[string]uint64
+// map txn type names (long and short) to index/enum value
+var txnTypeMap = make(map[string]uint64)
// OnCompletionConstType is the same as transactions.OnCompletion
type OnCompletionConstType transactions.OnCompletion
const (
// NoOp = transactions.NoOpOC
- NoOp OnCompletionConstType = OnCompletionConstType(transactions.NoOpOC)
+ NoOp = OnCompletionConstType(transactions.NoOpOC)
// OptIn = transactions.OptInOC
- OptIn OnCompletionConstType = OnCompletionConstType(transactions.OptInOC)
+ OptIn = OnCompletionConstType(transactions.OptInOC)
// CloseOut = transactions.CloseOutOC
- CloseOut OnCompletionConstType = OnCompletionConstType(transactions.CloseOutOC)
+ CloseOut = OnCompletionConstType(transactions.CloseOutOC)
// ClearState = transactions.ClearStateOC
- ClearState OnCompletionConstType = OnCompletionConstType(transactions.ClearStateOC)
+ ClearState = OnCompletionConstType(transactions.ClearStateOC)
// UpdateApplication = transactions.UpdateApplicationOC
- UpdateApplication OnCompletionConstType = OnCompletionConstType(transactions.UpdateApplicationOC)
+ UpdateApplication = OnCompletionConstType(transactions.UpdateApplicationOC)
// DeleteApplication = transactions.DeleteApplicationOC
- DeleteApplication OnCompletionConstType = OnCompletionConstType(transactions.DeleteApplicationOC)
+ DeleteApplication = OnCompletionConstType(transactions.DeleteApplicationOC)
// end of constants
- invalidOnCompletionConst OnCompletionConstType = DeleteApplication + 1
+ invalidOnCompletionConst = DeleteApplication + 1
)
// OnCompletionNames is the string names of Txn.OnCompletion, array index is the const value
-var OnCompletionNames []string
+var OnCompletionNames [invalidOnCompletionConst]string
-// onCompletionConstToUint64 map symbolic name to uint64 for assembleInt
-var onCompletionConstToUint64 map[string]uint64
+// onCompletionMap maps symbolic name to uint64 for assembleInt
+var onCompletionMap map[string]uint64
// GlobalField is an enum for `global` opcode
type GlobalField uint64
@@ -403,32 +509,34 @@ const (
// CallerApplicationAddress The Address of the caller app, else ZeroAddress
CallerApplicationAddress
- invalidGlobalField
+ invalidGlobalField // compile-time constant for number of fields
)
// GlobalFieldNames are arguments to the 'global' opcode
-var GlobalFieldNames []string
+var GlobalFieldNames [invalidGlobalField]string
type globalFieldSpec struct {
field GlobalField
ftype StackType
mode runMode
version uint64
+ doc string
}
-func (fs *globalFieldSpec) Type() StackType {
+func (fs globalFieldSpec) Field() byte {
+ return byte(fs.field)
+}
+func (fs globalFieldSpec) Type() StackType {
return fs.ftype
}
-
-func (fs *globalFieldSpec) OpVersion() uint64 {
+func (fs globalFieldSpec) OpVersion() uint64 {
return 0
}
-
-func (fs *globalFieldSpec) Version() uint64 {
+func (fs globalFieldSpec) Version() uint64 {
return fs.version
}
-func (fs *globalFieldSpec) Note() string {
- note := globalFieldDocs[fs.field.String()]
+func (fs globalFieldSpec) Note() string {
+ note := fs.doc
if fs.mode == runModeApplication {
note = addExtra(note, "Application mode only.")
}
@@ -436,34 +544,54 @@ func (fs *globalFieldSpec) Note() string {
return note
}
-var globalFieldSpecs = []globalFieldSpec{
- {MinTxnFee, StackUint64, modeAny, 0}, // version 0 is the same as TEAL v1 (initial TEAL release)
- {MinBalance, StackUint64, modeAny, 0},
- {MaxTxnLife, StackUint64, modeAny, 0},
- {ZeroAddress, StackBytes, modeAny, 0},
- {GroupSize, StackUint64, modeAny, 0},
- {LogicSigVersion, StackUint64, modeAny, 2},
- {Round, StackUint64, runModeApplication, 2},
- {LatestTimestamp, StackUint64, runModeApplication, 2},
- {CurrentApplicationID, StackUint64, runModeApplication, 2},
- {CreatorAddress, StackBytes, runModeApplication, 3},
- {CurrentApplicationAddress, StackBytes, runModeApplication, 5},
- {GroupID, StackBytes, modeAny, 5},
- {OpcodeBudget, StackUint64, modeAny, 6},
- {CallerApplicationID, StackUint64, runModeApplication, 6},
- {CallerApplicationAddress, StackBytes, runModeApplication, 6},
+var globalFieldSpecs = [...]globalFieldSpec{
+ // version 0 is the same as TEAL v1 (initial TEAL release)
+ {MinTxnFee, StackUint64, modeAny, 0, "microalgos"},
+ {MinBalance, StackUint64, modeAny, 0, "microalgos"},
+ {MaxTxnLife, StackUint64, modeAny, 0, "rounds"},
+ {ZeroAddress, StackBytes, modeAny, 0, "32 byte address of all zero bytes"},
+ {GroupSize, StackUint64, modeAny, 0,
+ "Number of transactions in this atomic transaction group. At least 1"},
+ {LogicSigVersion, StackUint64, modeAny, 2, "Maximum supported version"},
+ {Round, StackUint64, runModeApplication, 2, "Current round number"},
+ {LatestTimestamp, StackUint64, runModeApplication, 2,
+ "Last confirmed block UNIX timestamp. Fails if negative"},
+ {CurrentApplicationID, StackUint64, runModeApplication, 2, "ID of current application executing"},
+ {CreatorAddress, StackBytes, runModeApplication, 3,
+ "Address of the creator of the current application"},
+ {CurrentApplicationAddress, StackBytes, runModeApplication, 5,
+ "Address that the current application controls"},
+ {GroupID, StackBytes, modeAny, 5,
+ "ID of the transaction group. 32 zero bytes if the transaction is not part of a group."},
+ {OpcodeBudget, StackUint64, modeAny, 6,
+ "The remaining cost that can be spent by opcodes in this program."},
+ {CallerApplicationID, StackUint64, runModeApplication, 6,
+ "The application ID of the application that called this application. 0 if this application is at the top-level."},
+ {CallerApplicationAddress, StackBytes, runModeApplication, 6,
+ "The application address of the application that called this application. ZeroAddress if this application is at the top-level."},
}
-var globalFieldSpecByField map[GlobalField]globalFieldSpec
+func globalFieldSpecByField(f GlobalField) (globalFieldSpec, bool) {
+ if int(f) >= len(globalFieldSpecs) {
+ return globalFieldSpec{}, false
+ }
+ return globalFieldSpecs[f], true
+}
-// GlobalFieldSpecByName gives access to the field specs by field name
-var GlobalFieldSpecByName gfNameSpecMap
+var globalFieldSpecByName = make(gfNameSpecMap, len(GlobalFieldNames))
type gfNameSpecMap map[string]globalFieldSpec
-func (s gfNameSpecMap) SpecByName(name string) FieldSpec {
- fs := s[name]
- return &fs
+func (s gfNameSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
+
+// GlobalFields has info on the global opcode's immediate
+var GlobalFields = FieldGroup{
+ "global", "Fields",
+ GlobalFieldNames[:],
+ globalFieldSpecByName,
}
// EcdsaCurve is an enum for `ecdsa_` opcodes
@@ -474,50 +602,59 @@ const (
Secp256k1 EcdsaCurve = iota
// Secp256r1 curve
Secp256r1
- invalidEcdsaCurve
+ invalidEcdsaCurve // compile-time constant for number of fields
)
-// EcdsaCurveNames are arguments to the 'ecdsa_' opcode
-var EcdsaCurveNames []string
+var ecdsaCurveNames [invalidEcdsaCurve]string
type ecdsaCurveSpec struct {
field EcdsaCurve
version uint64
+ doc string
}
-func (fs *ecdsaCurveSpec) Type() StackType {
- return StackNone // Will not show, since all are the same
+func (fs ecdsaCurveSpec) Field() byte {
+ return byte(fs.field)
}
-
-func (fs *ecdsaCurveSpec) OpVersion() uint64 {
+func (fs ecdsaCurveSpec) Type() StackType {
+ return StackNone // Will not show, since all are untyped
+}
+func (fs ecdsaCurveSpec) OpVersion() uint64 {
return 5
}
-
-func (fs *ecdsaCurveSpec) Version() uint64 {
+func (fs ecdsaCurveSpec) Version() uint64 {
return fs.version
}
+func (fs ecdsaCurveSpec) Note() string {
+ return fs.doc
+}
-func (fs *ecdsaCurveSpec) Note() string {
- note := EcdsaCurveDocs[fs.field.String()]
- return note
+var ecdsaCurveSpecs = [...]ecdsaCurveSpec{
+ {Secp256k1, 5, "secp256k1 curve, used in Bitcoin"},
+ {Secp256r1, fidoVersion, "secp256r1 curve, NIST standard"},
}
-var ecdsaCurveSpecs = []ecdsaCurveSpec{
- {Secp256k1, 5},
- {Secp256r1, fidoVersion},
+func ecdsaCurveSpecByField(c EcdsaCurve) (ecdsaCurveSpec, bool) {
+ if int(c) >= len(ecdsaCurveSpecs) {
+ return ecdsaCurveSpec{}, false
+ }
+ return ecdsaCurveSpecs[c], true
}
-var ecdsaCurveSpecByField map[EcdsaCurve]ecdsaCurveSpec
+var ecdsaCurveSpecByName = make(ecdsaCurveNameSpecMap, len(ecdsaCurveNames))
-// EcdsaCurveSpecByName gives access to the field specs by field name
-var EcdsaCurveSpecByName ecDsaCurveNameSpecMap
+type ecdsaCurveNameSpecMap map[string]ecdsaCurveSpec
-// simple interface used by doc generator for fields versioning
-type ecDsaCurveNameSpecMap map[string]ecdsaCurveSpec
+func (s ecdsaCurveNameSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
-func (s ecDsaCurveNameSpecMap) SpecByName(name string) FieldSpec {
- fs := s[name]
- return &fs
+// EcdsaCurves collects details about the constants used to describe EcdsaCurves
+var EcdsaCurves = FieldGroup{
+ "ECDSA", "Curves",
+ ecdsaCurveNames[:],
+ ecdsaCurveSpecByName,
}
// Base64Encoding is an enum for the `base64decode` opcode
@@ -528,45 +665,61 @@ const (
URLEncoding Base64Encoding = iota
// StdEncoding represents the standard encoding of the RFC
StdEncoding
- invalidBase64Alphabet
+ invalidBase64Encoding // compile-time constant for number of fields
)
-// After running `go generate` these strings will be available:
-var base64EncodingNames [2]string = [...]string{URLEncoding.String(), StdEncoding.String()}
+var base64EncodingNames [invalidBase64Encoding]string
type base64EncodingSpec struct {
field Base64Encoding
- ftype StackType
version uint64
}
-var base64EncodingSpecs = []base64EncodingSpec{
- {URLEncoding, StackBytes, 6},
- {StdEncoding, StackBytes, 6},
+var base64EncodingSpecs = [...]base64EncodingSpec{
+ {URLEncoding, 6},
+ {StdEncoding, 6},
}
-var base64EncodingSpecByField map[Base64Encoding]base64EncodingSpec
-var base64EncodingSpecByName base64EncodingSpecMap
+func base64EncodingSpecByField(e Base64Encoding) (base64EncodingSpec, bool) {
+ if int(e) >= len(base64EncodingSpecs) {
+ return base64EncodingSpec{}, false
+ }
+ return base64EncodingSpecs[e], true
+}
+
+var base64EncodingSpecByName = make(base64EncodingSpecMap, len(base64EncodingNames))
type base64EncodingSpecMap map[string]base64EncodingSpec
-func (fs *base64EncodingSpec) Type() StackType {
- return fs.ftype
+func (fs base64EncodingSpec) Field() byte {
+ return byte(fs.field)
}
-
-func (fs *base64EncodingSpec) OpVersion() uint64 {
+func (fs base64EncodingSpec) Type() StackType {
+ return StackAny // Will not show in docs, since all are untyped
+}
+func (fs base64EncodingSpec) OpVersion() uint64 {
return 6
}
-
-func (fs *base64EncodingSpec) Version() uint64 {
+func (fs base64EncodingSpec) Version() uint64 {
return fs.version
}
-
-func (fs *base64EncodingSpec) Note() string {
+func (fs base64EncodingSpec) Note() string {
note := "" // no doc list?
return note
}
+func (s base64EncodingSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
+
+// Base64Encodings describes the base64_encode immediate
+var Base64Encodings = FieldGroup{
+ "base64", "Encodings",
+ base64EncodingNames[:],
+ base64EncodingSpecByName,
+}
+
// JSONRefType is an enum for the `json_ref` opcode
type JSONRefType int
@@ -577,11 +730,10 @@ const (
JSONUint64
// JSONObject represents json object
JSONObject
- invalidJSONRefType
+ invalidJSONRefType // compile-time constant for number of fields
)
-// After running `go generate` these strings will be available:
-var jsonRefTypeNames [3]string = [...]string{JSONString.String(), JSONUint64.String(), JSONObject.String()}
+var jsonRefTypeNames [invalidJSONRefType]string
type jsonRefSpec struct {
field JSONRefType
@@ -589,17 +741,52 @@ type jsonRefSpec struct {
version uint64
}
-var jsonRefSpecs = []jsonRefSpec{
+var jsonRefSpecs = [...]jsonRefSpec{
{JSONString, StackBytes, fidoVersion},
{JSONUint64, StackUint64, fidoVersion},
{JSONObject, StackBytes, fidoVersion},
}
-var jsonRefSpecByField map[JSONRefType]jsonRefSpec
-var jsonRefSpecByName jsonRefSpecMap
+func jsonRefSpecByField(r JSONRefType) (jsonRefSpec, bool) {
+ if int(r) >= len(jsonRefSpecs) {
+ return jsonRefSpec{}, false
+ }
+ return jsonRefSpecs[r], true
+}
+
+var jsonRefSpecByName = make(jsonRefSpecMap, len(jsonRefTypeNames))
type jsonRefSpecMap map[string]jsonRefSpec
+func (fs jsonRefSpec) Field() byte {
+ return byte(fs.field)
+}
+func (fs jsonRefSpec) Type() StackType {
+ return fs.ftype
+}
+func (fs jsonRefSpec) OpVersion() uint64 {
+ return fidoVersion
+}
+func (fs jsonRefSpec) Version() uint64 {
+ return fs.version
+}
+func (fs jsonRefSpec) Note() string {
+ note := "" // no doc list?
+ return note
+}
+
+func (s jsonRefSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
+
+// JSONRefTypes describes the json_ref immediate
+var JSONRefTypes = FieldGroup{
+ "json_ref", "Types",
+ jsonRefTypeNames[:],
+ jsonRefSpecByName,
+}
+
// AssetHoldingField is an enum for `asset_holding_get` opcode
type AssetHoldingField int
@@ -608,50 +795,60 @@ const (
AssetBalance AssetHoldingField = iota
// AssetFrozen AssetHolding.Frozen
AssetFrozen
- invalidAssetHoldingField
+ invalidAssetHoldingField // compile-time constant for number of fields
)
-// AssetHoldingFieldNames are arguments to the 'asset_holding_get' opcode
-var AssetHoldingFieldNames []string
+var assetHoldingFieldNames [invalidAssetHoldingField]string
type assetHoldingFieldSpec struct {
field AssetHoldingField
ftype StackType
version uint64
+ doc string
}
-func (fs *assetHoldingFieldSpec) Type() StackType {
+func (fs assetHoldingFieldSpec) Field() byte {
+ return byte(fs.field)
+}
+func (fs assetHoldingFieldSpec) Type() StackType {
return fs.ftype
}
-
-func (fs *assetHoldingFieldSpec) OpVersion() uint64 {
+func (fs assetHoldingFieldSpec) OpVersion() uint64 {
return 2
}
-
-func (fs *assetHoldingFieldSpec) Version() uint64 {
+func (fs assetHoldingFieldSpec) Version() uint64 {
return fs.version
}
-
-func (fs *assetHoldingFieldSpec) Note() string {
- note := assetHoldingFieldDocs[fs.field.String()]
- return note
+func (fs assetHoldingFieldSpec) Note() string {
+ return fs.doc
}
-var assetHoldingFieldSpecs = []assetHoldingFieldSpec{
- {AssetBalance, StackUint64, 2},
- {AssetFrozen, StackUint64, 2},
+var assetHoldingFieldSpecs = [...]assetHoldingFieldSpec{
+ {AssetBalance, StackUint64, 2, "Amount of the asset unit held by this account"},
+ {AssetFrozen, StackUint64, 2, "Is the asset frozen or not"},
}
-var assetHoldingFieldSpecByField map[AssetHoldingField]assetHoldingFieldSpec
+func assetHoldingFieldSpecByField(f AssetHoldingField) (assetHoldingFieldSpec, bool) {
+ if int(f) >= len(assetHoldingFieldSpecs) {
+ return assetHoldingFieldSpec{}, false
+ }
+ return assetHoldingFieldSpecs[f], true
+}
-// AssetHoldingFieldSpecByName gives access to the field specs by field name
-var AssetHoldingFieldSpecByName ahfNameSpecMap
+var assetHoldingFieldSpecByName = make(ahfNameSpecMap, len(assetHoldingFieldNames))
type ahfNameSpecMap map[string]assetHoldingFieldSpec
-func (s ahfNameSpecMap) SpecByName(name string) FieldSpec {
- fs := s[name]
- return &fs
+func (s ahfNameSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
+
+// AssetHoldingFields describes asset_holding_get's immediates
+var AssetHoldingFields = FieldGroup{
+ "asset_holding", "Fields",
+ assetHoldingFieldNames[:],
+ assetHoldingFieldSpecByName,
}
// AssetParamsField is an enum for `asset_params_get` opcode
@@ -684,60 +881,70 @@ const (
// AssetCreator is not *in* the Params, but it is uniquely determined.
AssetCreator
- invalidAssetParamsField
+ invalidAssetParamsField // compile-time constant for number of fields
)
-// AssetParamsFieldNames are arguments to the 'asset_params_get' opcode
-var AssetParamsFieldNames []string
+var assetParamsFieldNames [invalidAssetParamsField]string
type assetParamsFieldSpec struct {
field AssetParamsField
ftype StackType
version uint64
+ doc string
}
-func (fs *assetParamsFieldSpec) Type() StackType {
+func (fs assetParamsFieldSpec) Field() byte {
+ return byte(fs.field)
+}
+func (fs assetParamsFieldSpec) Type() StackType {
return fs.ftype
}
-
-func (fs *assetParamsFieldSpec) OpVersion() uint64 {
+func (fs assetParamsFieldSpec) OpVersion() uint64 {
return 2
}
-
-func (fs *assetParamsFieldSpec) Version() uint64 {
+func (fs assetParamsFieldSpec) Version() uint64 {
return fs.version
}
-
-func (fs *assetParamsFieldSpec) Note() string {
- note := assetParamsFieldDocs[fs.field.String()]
- return note
+func (fs assetParamsFieldSpec) Note() string {
+ return fs.doc
}
-var assetParamsFieldSpecs = []assetParamsFieldSpec{
- {AssetTotal, StackUint64, 2},
- {AssetDecimals, StackUint64, 2},
- {AssetDefaultFrozen, StackUint64, 2},
- {AssetUnitName, StackBytes, 2},
- {AssetName, StackBytes, 2},
- {AssetURL, StackBytes, 2},
- {AssetMetadataHash, StackBytes, 2},
- {AssetManager, StackBytes, 2},
- {AssetReserve, StackBytes, 2},
- {AssetFreeze, StackBytes, 2},
- {AssetClawback, StackBytes, 2},
- {AssetCreator, StackBytes, 5},
+var assetParamsFieldSpecs = [...]assetParamsFieldSpec{
+ {AssetTotal, StackUint64, 2, "Total number of units of this asset"},
+ {AssetDecimals, StackUint64, 2, "See AssetParams.Decimals"},
+ {AssetDefaultFrozen, StackUint64, 2, "Frozen by default or not"},
+ {AssetUnitName, StackBytes, 2, "Asset unit name"},
+ {AssetName, StackBytes, 2, "Asset name"},
+ {AssetURL, StackBytes, 2, "URL with additional info about the asset"},
+ {AssetMetadataHash, StackBytes, 2, "Arbitrary commitment"},
+ {AssetManager, StackBytes, 2, "Manager address"},
+ {AssetReserve, StackBytes, 2, "Reserve address"},
+ {AssetFreeze, StackBytes, 2, "Freeze address"},
+ {AssetClawback, StackBytes, 2, "Clawback address"},
+ {AssetCreator, StackBytes, 5, "Creator address"},
}
-var assetParamsFieldSpecByField map[AssetParamsField]assetParamsFieldSpec
+func assetParamsFieldSpecByField(f AssetParamsField) (assetParamsFieldSpec, bool) {
+ if int(f) >= len(assetParamsFieldSpecs) {
+ return assetParamsFieldSpec{}, false
+ }
+ return assetParamsFieldSpecs[f], true
+}
-// AssetParamsFieldSpecByName gives access to the field specs by field name
-var AssetParamsFieldSpecByName apfNameSpecMap
+var assetParamsFieldSpecByName = make(apfNameSpecMap, len(assetParamsFieldNames))
type apfNameSpecMap map[string]assetParamsFieldSpec
-func (s apfNameSpecMap) SpecByName(name string) FieldSpec {
- fs := s[name]
- return &fs
+func (s apfNameSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
+
+// AssetParamsFields describes asset_params_get's immediates
+var AssetParamsFields = FieldGroup{
+ "asset_params", "Fields",
+ assetParamsFieldNames[:],
+ assetParamsFieldSpecByName,
}
// AppParamsField is an enum for `app_params_get` opcode
@@ -765,58 +972,68 @@ const (
// AppAddress is also not *in* the Params, but can be derived
AppAddress
- invalidAppParamsField
+ invalidAppParamsField // compile-time constant for number of fields
)
-// AppParamsFieldNames are arguments to the 'app_params_get' opcode
-var AppParamsFieldNames []string
+var appParamsFieldNames [invalidAppParamsField]string
type appParamsFieldSpec struct {
field AppParamsField
ftype StackType
version uint64
+ doc string
}
-func (fs *appParamsFieldSpec) Type() StackType {
+func (fs appParamsFieldSpec) Field() byte {
+ return byte(fs.field)
+}
+func (fs appParamsFieldSpec) Type() StackType {
return fs.ftype
}
-
-func (fs *appParamsFieldSpec) OpVersion() uint64 {
+func (fs appParamsFieldSpec) OpVersion() uint64 {
return 5
}
-
-func (fs *appParamsFieldSpec) Version() uint64 {
+func (fs appParamsFieldSpec) Version() uint64 {
return fs.version
}
-
-func (fs *appParamsFieldSpec) Note() string {
- note := appParamsFieldDocs[fs.field.String()]
- return note
+func (fs appParamsFieldSpec) Note() string {
+ return fs.doc
}
-var appParamsFieldSpecs = []appParamsFieldSpec{
- {AppApprovalProgram, StackBytes, 5},
- {AppClearStateProgram, StackBytes, 5},
- {AppGlobalNumUint, StackUint64, 5},
- {AppGlobalNumByteSlice, StackUint64, 5},
- {AppLocalNumUint, StackUint64, 5},
- {AppLocalNumByteSlice, StackUint64, 5},
- {AppExtraProgramPages, StackUint64, 5},
- {AppCreator, StackBytes, 5},
- {AppAddress, StackBytes, 5},
+var appParamsFieldSpecs = [...]appParamsFieldSpec{
+ {AppApprovalProgram, StackBytes, 5, "Bytecode of Approval Program"},
+ {AppClearStateProgram, StackBytes, 5, "Bytecode of Clear State Program"},
+ {AppGlobalNumUint, StackUint64, 5, "Number of uint64 values allowed in Global State"},
+ {AppGlobalNumByteSlice, StackUint64, 5, "Number of byte array values allowed in Global State"},
+ {AppLocalNumUint, StackUint64, 5, "Number of uint64 values allowed in Local State"},
+ {AppLocalNumByteSlice, StackUint64, 5, "Number of byte array values allowed in Local State"},
+ {AppExtraProgramPages, StackUint64, 5, "Number of Extra Program Pages of code space"},
+ {AppCreator, StackBytes, 5, "Creator address"},
+ {AppAddress, StackBytes, 5, "Address for which this application has authority"},
}
-var appParamsFieldSpecByField map[AppParamsField]appParamsFieldSpec
+func appParamsFieldSpecByField(f AppParamsField) (appParamsFieldSpec, bool) {
+ if int(f) >= len(appParamsFieldSpecs) {
+ return appParamsFieldSpec{}, false
+ }
+ return appParamsFieldSpecs[f], true
+}
-// AppParamsFieldSpecByName gives access to the field specs by field name
-var AppParamsFieldSpecByName appNameSpecMap
+var appParamsFieldSpecByName = make(appNameSpecMap, len(appParamsFieldNames))
// simple interface used by doc generator for fields versioning
type appNameSpecMap map[string]appParamsFieldSpec
-func (s appNameSpecMap) SpecByName(name string) FieldSpec {
- fs := s[name]
- return &fs
+func (s appNameSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
+}
+
+// AppParamsFields describes app_params_get's immediates
+var AppParamsFields = FieldGroup{
+ "app_params", "Fields",
+ appParamsFieldNames[:],
+ appParamsFieldSpecByName,
}
// AcctParamsField is an enum for `acct_params_get` opcode
@@ -830,199 +1047,146 @@ const (
//AcctAuthAddr is the rekeyed address if any, else ZeroAddress
AcctAuthAddr
- invalidAcctParamsField
+ invalidAcctParamsField // compile-time constant for number of fields
)
-// AcctParamsFieldNames are arguments to the 'acct_params_get' opcode
-var AcctParamsFieldNames []string
+var acctParamsFieldNames [invalidAcctParamsField]string
type acctParamsFieldSpec struct {
field AcctParamsField
ftype StackType
version uint64
+ doc string
}
-func (fs *acctParamsFieldSpec) Type() StackType {
+func (fs acctParamsFieldSpec) Field() byte {
+ return byte(fs.field)
+}
+func (fs acctParamsFieldSpec) Type() StackType {
return fs.ftype
}
-
-func (fs *acctParamsFieldSpec) OpVersion() uint64 {
+func (fs acctParamsFieldSpec) OpVersion() uint64 {
return 6
}
-
-func (fs *acctParamsFieldSpec) Version() uint64 {
+func (fs acctParamsFieldSpec) Version() uint64 {
return fs.version
}
-
-func (fs *acctParamsFieldSpec) Note() string {
- note := acctParamsFieldDocs[fs.field.String()]
- return note
+func (fs acctParamsFieldSpec) Note() string {
+ return fs.doc
}
-var acctParamsFieldSpecs = []acctParamsFieldSpec{
- {AcctBalance, StackUint64, 6},
- {AcctMinBalance, StackUint64, 6},
- {AcctAuthAddr, StackBytes, 6},
+var acctParamsFieldSpecs = [...]acctParamsFieldSpec{
+ {AcctBalance, StackUint64, 6, "Account balance in microalgos"},
+ {AcctMinBalance, StackUint64, 6, "Minimum required blance for account, in microalgos"},
+ {AcctAuthAddr, StackBytes, 6, "Address the account is rekeyed to."},
}
-var acctParamsFieldSpecByField map[AcctParamsField]acctParamsFieldSpec
+func acctParamsFieldSpecByField(f AcctParamsField) (acctParamsFieldSpec, bool) {
+ if int(f) >= len(acctParamsFieldSpecs) {
+ return acctParamsFieldSpec{}, false
+ }
+ return acctParamsFieldSpecs[f], true
+}
-// AcctParamsFieldSpecByName gives access to the field specs by field name
-var AcctParamsFieldSpecByName acctNameSpecMap
+var acctParamsFieldSpecByName = make(acctNameSpecMap, len(acctParamsFieldNames))
-// simple interface used by doc generator for fields versioning
type acctNameSpecMap map[string]acctParamsFieldSpec
-func (s acctNameSpecMap) SpecByName(name string) FieldSpec {
- fs := s[name]
- return &fs
+func (s acctNameSpecMap) get(name string) (FieldSpec, bool) {
+ fs, ok := s[name]
+ return fs, ok
}
-func init() {
- TxnFieldNames = make([]string, int(invalidTxnField))
- for fi := Sender; fi < invalidTxnField; fi++ {
- TxnFieldNames[fi] = fi.String()
- }
- txnFieldSpecByField = make(map[TxnField]txnFieldSpec, len(TxnFieldNames))
- for i, s := range txnFieldSpecs {
- if int(s.field) != i {
- panic("txnFieldSpecs disjoint with TxnField enum")
- }
- txnFieldSpecByField[s.field] = s
- }
- TxnFieldSpecByName = make(map[string]txnFieldSpec, len(TxnFieldNames))
- for i, tfn := range TxnFieldNames {
- TxnFieldSpecByName[tfn] = txnFieldSpecByField[TxnField(i)]
- }
+// AcctParamsFields describes acct_params_get's immediates
+var AcctParamsFields = FieldGroup{
+ "acct_params", "Fields",
+ acctParamsFieldNames[:],
+ acctParamsFieldSpecByName,
+}
- GlobalFieldNames = make([]string, int(invalidGlobalField))
- for i := MinTxnFee; i < invalidGlobalField; i++ {
- GlobalFieldNames[i] = i.String()
- }
- globalFieldSpecByField = make(map[GlobalField]globalFieldSpec, len(GlobalFieldNames))
- for i, s := range globalFieldSpecs {
- if int(s.field) != i {
- panic("globalFieldSpecs disjoint with GlobalField enum")
+func init() {
+ equal := func(x int, y int) {
+ if x != y {
+ panic(fmt.Sprintf("%d != %d", x, y))
}
- globalFieldSpecByField[s.field] = s
- }
- GlobalFieldSpecByName = make(gfNameSpecMap, len(GlobalFieldNames))
- for i, gfn := range GlobalFieldNames {
- GlobalFieldSpecByName[gfn] = globalFieldSpecByField[GlobalField(i)]
}
- EcdsaCurveNames = make([]string, int(invalidEcdsaCurve))
- for i := Secp256k1; i < invalidEcdsaCurve; i++ {
- EcdsaCurveNames[i] = i.String()
- }
- ecdsaCurveSpecByField = make(map[EcdsaCurve]ecdsaCurveSpec, len(EcdsaCurveNames))
- for _, s := range ecdsaCurveSpecs {
- ecdsaCurveSpecByField[s.field] = s
- }
-
- EcdsaCurveSpecByName = make(ecDsaCurveNameSpecMap, len(EcdsaCurveNames))
- for i, ahfn := range EcdsaCurveNames {
- EcdsaCurveSpecByName[ahfn] = ecdsaCurveSpecByField[EcdsaCurve(i)]
+ equal(len(txnFieldSpecs), len(TxnFieldNames))
+ for i, s := range txnFieldSpecs {
+ equal(int(s.field), i)
+ TxnFieldNames[s.field] = s.field.String()
+ txnFieldSpecByName[s.field.String()] = s
}
- base64EncodingSpecByField = make(map[Base64Encoding]base64EncodingSpec, len(base64EncodingNames))
- for _, s := range base64EncodingSpecs {
- base64EncodingSpecByField[s.field] = s
+ equal(len(globalFieldSpecs), len(GlobalFieldNames))
+ for i, s := range globalFieldSpecs {
+ equal(int(s.field), i)
+ GlobalFieldNames[s.field] = s.field.String()
+ globalFieldSpecByName[s.field.String()] = s
}
- base64EncodingSpecByName = make(base64EncodingSpecMap, len(base64EncodingNames))
- for i, encoding := range base64EncodingNames {
- base64EncodingSpecByName[encoding] = base64EncodingSpecByField[Base64Encoding(i)]
+ equal(len(ecdsaCurveSpecs), len(ecdsaCurveNames))
+ for i, s := range ecdsaCurveSpecs {
+ equal(int(s.field), i)
+ ecdsaCurveNames[s.field] = s.field.String()
+ ecdsaCurveSpecByName[s.field.String()] = s
}
- base64EncodingSpecByField = make(map[Base64Encoding]base64EncodingSpec, len(base64EncodingNames))
- for _, s := range base64EncodingSpecs {
- base64EncodingSpecByField[s.field] = s
+ equal(len(base64EncodingSpecs), len(base64EncodingNames))
+ for i, s := range base64EncodingSpecs {
+ equal(int(s.field), i)
+ base64EncodingNames[i] = s.field.String()
+ base64EncodingSpecByName[s.field.String()] = s
}
- base64EncodingSpecByName = make(base64EncodingSpecMap, len(base64EncodingNames))
- for i, encoding := range base64EncodingNames {
- base64EncodingSpecByName[encoding] = base64EncodingSpecByField[Base64Encoding(i)]
+ equal(len(jsonRefSpecs), len(jsonRefTypeNames))
+ for i, s := range jsonRefSpecs {
+ equal(int(s.field), i)
+ jsonRefTypeNames[i] = s.field.String()
+ jsonRefSpecByName[s.field.String()] = s
}
- jsonRefSpecByField = make(map[JSONRefType]jsonRefSpec, len(jsonRefTypeNames))
- for _, s := range jsonRefSpecs {
- jsonRefSpecByField[s.field] = s
+ equal(len(assetHoldingFieldSpecs), len(assetHoldingFieldNames))
+ for i, s := range assetHoldingFieldSpecs {
+ equal(int(s.field), i)
+ assetHoldingFieldNames[i] = s.field.String()
+ assetHoldingFieldSpecByName[s.field.String()] = s
}
- jsonRefSpecByName = make(jsonRefSpecMap, len(jsonRefTypeNames))
- for i, typename := range jsonRefTypeNames {
- jsonRefSpecByName[typename] = jsonRefSpecByField[JSONRefType(i)]
+ equal(len(assetParamsFieldSpecs), len(assetParamsFieldNames))
+ for i, s := range assetParamsFieldSpecs {
+ equal(int(s.field), i)
+ assetParamsFieldNames[i] = s.field.String()
+ assetParamsFieldSpecByName[s.field.String()] = s
}
- AssetHoldingFieldNames = make([]string, int(invalidAssetHoldingField))
- for i := AssetBalance; i < invalidAssetHoldingField; i++ {
- AssetHoldingFieldNames[i] = i.String()
- }
- assetHoldingFieldSpecByField = make(map[AssetHoldingField]assetHoldingFieldSpec, len(AssetHoldingFieldNames))
- for _, s := range assetHoldingFieldSpecs {
- assetHoldingFieldSpecByField[s.field] = s
- }
- AssetHoldingFieldSpecByName = make(ahfNameSpecMap, len(AssetHoldingFieldNames))
- for i, ahfn := range AssetHoldingFieldNames {
- AssetHoldingFieldSpecByName[ahfn] = assetHoldingFieldSpecByField[AssetHoldingField(i)]
+ equal(len(appParamsFieldSpecs), len(appParamsFieldNames))
+ for i, s := range appParamsFieldSpecs {
+ equal(int(s.field), i)
+ appParamsFieldNames[i] = s.field.String()
+ appParamsFieldSpecByName[s.field.String()] = s
}
- AssetParamsFieldNames = make([]string, int(invalidAssetParamsField))
- for i := AssetTotal; i < invalidAssetParamsField; i++ {
- AssetParamsFieldNames[i] = i.String()
- }
- assetParamsFieldSpecByField = make(map[AssetParamsField]assetParamsFieldSpec, len(AssetParamsFieldNames))
- for _, s := range assetParamsFieldSpecs {
- assetParamsFieldSpecByField[s.field] = s
- }
- AssetParamsFieldSpecByName = make(apfNameSpecMap, len(AssetParamsFieldNames))
- for i, apfn := range AssetParamsFieldNames {
- AssetParamsFieldSpecByName[apfn] = assetParamsFieldSpecByField[AssetParamsField(i)]
+ equal(len(acctParamsFieldSpecs), len(acctParamsFieldNames))
+ for i, s := range acctParamsFieldSpecs {
+ equal(int(s.field), i)
+ acctParamsFieldNames[i] = s.field.String()
+ acctParamsFieldSpecByName[s.field.String()] = s
}
- AppParamsFieldNames = make([]string, int(invalidAppParamsField))
- for i := AppApprovalProgram; i < invalidAppParamsField; i++ {
- AppParamsFieldNames[i] = i.String()
- }
- appParamsFieldSpecByField = make(map[AppParamsField]appParamsFieldSpec, len(AppParamsFieldNames))
- for _, s := range appParamsFieldSpecs {
- appParamsFieldSpecByField[s.field] = s
- }
- AppParamsFieldSpecByName = make(appNameSpecMap, len(AppParamsFieldNames))
- for i, apfn := range AppParamsFieldNames {
- AppParamsFieldSpecByName[apfn] = appParamsFieldSpecByField[AppParamsField(i)]
- }
-
- AcctParamsFieldNames = make([]string, int(invalidAcctParamsField))
- for i := AcctBalance; i < invalidAcctParamsField; i++ {
- AcctParamsFieldNames[i] = i.String()
- }
- acctParamsFieldSpecByField = make(map[AcctParamsField]acctParamsFieldSpec, len(AcctParamsFieldNames))
- for _, s := range acctParamsFieldSpecs {
- acctParamsFieldSpecByField[s.field] = s
- }
- AcctParamsFieldSpecByName = make(acctNameSpecMap, len(AcctParamsFieldNames))
- for i, apfn := range AcctParamsFieldNames {
- AcctParamsFieldSpecByName[apfn] = acctParamsFieldSpecByField[AcctParamsField(i)]
- }
-
- txnTypeIndexes = make(map[string]uint64, len(TxnTypeNames))
+ txnTypeMap = make(map[string]uint64)
for i, tt := range TxnTypeNames {
- txnTypeIndexes[tt] = uint64(i)
+ txnTypeMap[tt] = uint64(i)
}
-
- txnTypeConstToUint64 = make(map[string]uint64, len(TxnTypeNames))
- for tt, v := range txnTypeIndexes {
- symbol := TypeNameDescriptions[tt]
- txnTypeConstToUint64[symbol] = v
+ for k, v := range TypeNameDescriptions {
+ txnTypeMap[v] = txnTypeMap[k]
}
- OnCompletionNames = make([]string, int(invalidOnCompletionConst))
- onCompletionConstToUint64 = make(map[string]uint64, len(OnCompletionNames))
+ onCompletionMap = make(map[string]uint64, len(OnCompletionNames))
for oc := NoOp; oc < invalidOnCompletionConst; oc++ {
symbol := oc.String()
OnCompletionNames[oc] = symbol
- onCompletionConstToUint64[symbol] = uint64(oc)
+ onCompletionMap[symbol] = uint64(oc)
}
+
}
diff --git a/data/transactions/logic/fields_string.go b/data/transactions/logic/fields_string.go
index 4e3cdbc88..cd2298463 100644
--- a/data/transactions/logic/fields_string.go
+++ b/data/transactions/logic/fields_string.go
@@ -221,12 +221,11 @@ func _() {
_ = x[ClearState-3]
_ = x[UpdateApplication-4]
_ = x[DeleteApplication-5]
- _ = x[invalidOnCompletionConst-6]
}
-const _OnCompletionConstType_name = "NoOpOptInCloseOutClearStateUpdateApplicationDeleteApplicationinvalidOnCompletionConst"
+const _OnCompletionConstType_name = "NoOpOptInCloseOutClearStateUpdateApplicationDeleteApplication"
-var _OnCompletionConstType_index = [...]uint8{0, 4, 9, 17, 27, 44, 61, 85}
+var _OnCompletionConstType_index = [...]uint8{0, 4, 9, 17, 27, 44, 61}
func (i OnCompletionConstType) String() string {
if i >= OnCompletionConstType(len(_OnCompletionConstType_index)-1) {
@@ -259,10 +258,10 @@ func _() {
var x [1]struct{}
_ = x[URLEncoding-0]
_ = x[StdEncoding-1]
- _ = x[invalidBase64Alphabet-2]
+ _ = x[invalidBase64Encoding-2]
}
-const _Base64Encoding_name = "URLEncodingStdEncodinginvalidBase64Alphabet"
+const _Base64Encoding_name = "URLEncodingStdEncodinginvalidBase64Encoding"
var _Base64Encoding_index = [...]uint8{0, 11, 22, 43}
diff --git a/data/transactions/logic/fields_test.go b/data/transactions/logic/fields_test.go
index b6d1a4989..4a8128c87 100644
--- a/data/transactions/logic/fields_test.go
+++ b/data/transactions/logic/fields_test.go
@@ -45,9 +45,9 @@ func TestGlobalFieldsVersions(t *testing.T) {
for _, field := range fields {
text := fmt.Sprintf("global %s", field.field.String())
// check assembler fails if version before introduction
- testLine(t, text, assemblerNoVersion, "...available in version...")
+ testLine(t, text, assemblerNoVersion, "...was introduced in...")
for v := uint64(0); v < field.version; v++ {
- testLine(t, text, v, "...available in version...")
+ testLine(t, text, v, "...was introduced in...")
}
testLine(t, text, field.version, "")
@@ -108,7 +108,7 @@ func TestTxnFieldVersions(t *testing.T) {
// TEAL version
txn.Txn.RekeyTo = basics.Address{}
txgroup := makeSampleTxnGroup(txn)
- asmDefaultError := "...available in version ..."
+ asmDefaultError := "...was introduced in ..."
for _, fs := range fields {
field := fs.field.String()
for _, command := range tests {
@@ -176,7 +176,7 @@ func TestTxnEffectsAvailable(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- for _, fs := range txnFieldSpecByField {
+ for _, fs := range txnFieldSpecs {
if !fs.effects {
continue
}
@@ -225,7 +225,7 @@ func TestAssetParamsFieldsVersions(t *testing.T) {
ep, _, _ := makeSampleEnv()
ep.Proto.LogicSigVersion = v
if field.version > v {
- testProg(t, text, v, Expect{3, "...available in version..."})
+ testProg(t, text, v, Expect{3, "...was introduced in..."})
ops := testProg(t, text, field.version) // assemble in the future
ops.Program[0] = byte(v)
testAppBytes(t, ops.Program, ep, "invalid asset_params_get field")
diff --git a/data/transactions/logic/langspec.json b/data/transactions/logic/langspec.json
new file mode 100644
index 000000000..7a6cef2d9
--- /dev/null
+++ b/data/transactions/logic/langspec.json
@@ -0,0 +1,2222 @@
+{
+ "EvalMaxVersion": 7,
+ "LogicSigVersion": 6,
+ "Ops": [
+ {
+ "Opcode": 0,
+ "Name": "err",
+ "Size": 1,
+ "Doc": "Fail immediately.",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 1,
+ "Name": "sha256",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "SHA256 hash of value A, yields [32]byte",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 2,
+ "Name": "keccak256",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "Keccak256 hash of value A, yields [32]byte",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 3,
+ "Name": "sha512_256",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "SHA512_256 hash of value A, yields [32]byte",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 4,
+ "Name": "ed25519verify",
+ "Args": "BBB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "for (data A, signature B, pubkey C) verify the signature of (\"ProgData\" || program_hash || data) against the pubkey =\u003e {0 or 1}",
+ "DocExtra": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 5,
+ "Name": "ecdsa_verify",
+ "Args": "BBBBB",
+ "Returns": "U",
+ "Size": 2,
+ "Doc": "for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey =\u003e {0 or 1}",
+ "DocExtra": "The 32 byte Y-component of a public key is the last element on the stack, preceded by X-component of a pubkey, preceded by S and R components of a signature, preceded by the data that is fifth element on the stack. All values are big-endian encoded. The signed data must be 32 bytes long, and signatures in lower-S form are only accepted.",
+ "ImmediateNote": "{uint8 curve index}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 6,
+ "Name": "ecdsa_pk_decompress",
+ "Args": "B",
+ "Returns": "BB",
+ "Size": 2,
+ "Doc": "decompress pubkey A into components X, Y",
+ "DocExtra": "The 33 byte public key in a compressed form to be decompressed into X and Y (top) components. All values are big-endian encoded.",
+ "ImmediateNote": "{uint8 curve index}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 7,
+ "Name": "ecdsa_pk_recover",
+ "Args": "BUBB",
+ "Returns": "BB",
+ "Size": 2,
+ "Doc": "for (data A, recovery id B, signature C, D) recover a public key",
+ "DocExtra": "S (top) and R elements of a signature, recovery id and data (bottom) are expected on the stack and used to deriver a public key. All values are big-endian encoded. The signed data must be 32 bytes long.",
+ "ImmediateNote": "{uint8 curve index}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 8,
+ "Name": "+",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A plus B. Fail on overflow.",
+ "DocExtra": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `addw`.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 9,
+ "Name": "-",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A minus B. Fail if B \u003e A.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 10,
+ "Name": "/",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A divided by B (truncated division). Fail if B == 0.",
+ "DocExtra": "`divmodw` is available to divide the two-element values produced by `mulw` and `addw`.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 11,
+ "Name": "*",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A times B. Fail on overflow.",
+ "DocExtra": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 12,
+ "Name": "\u003c",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A less than B =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 13,
+ "Name": "\u003e",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A greater than B =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 14,
+ "Name": "\u003c=",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A less than or equal to B =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 15,
+ "Name": "\u003e=",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A greater than or equal to B =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 16,
+ "Name": "\u0026\u0026",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A is not zero and B is not zero =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 17,
+ "Name": "||",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A is not zero or B is not zero =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 18,
+ "Name": "==",
+ "Args": "..",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A is equal to B =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 19,
+ "Name": "!=",
+ "Args": "..",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A is not equal to B =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 20,
+ "Name": "!",
+ "Args": "U",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A == 0 yields 1; else 0",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 21,
+ "Name": "len",
+ "Args": "B",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "yields length of byte value A",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 22,
+ "Name": "itob",
+ "Args": "U",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "converts uint64 A to big-endian byte array, always of length 8",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 23,
+ "Name": "btoi",
+ "Args": "B",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "converts big-endian byte array A to uint64. Fails if len(A) \u003e 8. Padded by leading 0s if len(A) \u003c 8.",
+ "DocExtra": "`btoi` fails if the input is longer than 8 bytes.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 24,
+ "Name": "%",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A modulo B. Fail if B == 0.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 25,
+ "Name": "|",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A bitwise-or B",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 26,
+ "Name": "\u0026",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A bitwise-and B",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 27,
+ "Name": "^",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A bitwise-xor B",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 28,
+ "Name": "~",
+ "Args": "U",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "bitwise invert value A",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 29,
+ "Name": "mulw",
+ "Args": "UU",
+ "Returns": "UU",
+ "Size": 1,
+ "Doc": "A times B as a 128-bit result in two uint64s. X is the high 64 bits, Y is the low",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 30,
+ "Name": "addw",
+ "Args": "UU",
+ "Returns": "UU",
+ "Size": 1,
+ "Doc": "A plus B as a 128-bit result. X is the carry-bit, Y is the low-order 64 bits.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 31,
+ "Name": "divmodw",
+ "Args": "UUUU",
+ "Returns": "UUUU",
+ "Size": 1,
+ "Doc": "W,X = (A,B / C,D); Y,Z = (A,B modulo C,D)",
+ "DocExtra": "The notation J,K indicates that two uint64 values J and K are interpreted as a uint128 value, with J as the high uint64 and K the low.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 32,
+ "Name": "intcblock",
+ "Size": 0,
+ "Doc": "prepare block of uint64 constants for use by intc",
+ "DocExtra": "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.",
+ "ImmediateNote": "{varuint length} [{varuint value}, ...]",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 33,
+ "Name": "intc",
+ "Returns": "U",
+ "Size": 2,
+ "Doc": "Ith constant from intcblock",
+ "ImmediateNote": "{uint8 int constant index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 34,
+ "Name": "intc_0",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "constant 0 from intcblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 35,
+ "Name": "intc_1",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "constant 1 from intcblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 36,
+ "Name": "intc_2",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "constant 2 from intcblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 37,
+ "Name": "intc_3",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "constant 3 from intcblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 38,
+ "Name": "bytecblock",
+ "Size": 0,
+ "Doc": "prepare block of byte-array constants for use by bytec",
+ "DocExtra": "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.",
+ "ImmediateNote": "{varuint length} [({varuint value length} bytes), ...]",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 39,
+ "Name": "bytec",
+ "Returns": "B",
+ "Size": 2,
+ "Doc": "Ith constant from bytecblock",
+ "ImmediateNote": "{uint8 byte constant index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 40,
+ "Name": "bytec_0",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "constant 0 from bytecblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 41,
+ "Name": "bytec_1",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "constant 1 from bytecblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 42,
+ "Name": "bytec_2",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "constant 2 from bytecblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 43,
+ "Name": "bytec_3",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "constant 3 from bytecblock",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 44,
+ "Name": "arg",
+ "Returns": "B",
+ "Size": 2,
+ "Doc": "Nth LogicSig argument",
+ "ImmediateNote": "{uint8 arg index N}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 45,
+ "Name": "arg_0",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "LogicSig argument 0",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 46,
+ "Name": "arg_1",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "LogicSig argument 1",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 47,
+ "Name": "arg_2",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "LogicSig argument 2",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 48,
+ "Name": "arg_3",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "LogicSig argument 3",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 49,
+ "Name": "txn",
+ "Returns": ".",
+ "Size": 2,
+ "ArgEnum": [
+ "Sender",
+ "Fee",
+ "FirstValid",
+ "FirstValidTime",
+ "LastValid",
+ "Note",
+ "Lease",
+ "Receiver",
+ "Amount",
+ "CloseRemainderTo",
+ "VotePK",
+ "SelectionPK",
+ "VoteFirst",
+ "VoteLast",
+ "VoteKeyDilution",
+ "Type",
+ "TypeEnum",
+ "XferAsset",
+ "AssetAmount",
+ "AssetSender",
+ "AssetReceiver",
+ "AssetCloseTo",
+ "GroupIndex",
+ "TxID",
+ "ApplicationID",
+ "OnCompletion",
+ "ApplicationArgs",
+ "NumAppArgs",
+ "Accounts",
+ "NumAccounts",
+ "ApprovalProgram",
+ "ClearStateProgram",
+ "RekeyTo",
+ "ConfigAsset",
+ "ConfigAssetTotal",
+ "ConfigAssetDecimals",
+ "ConfigAssetDefaultFrozen",
+ "ConfigAssetUnitName",
+ "ConfigAssetName",
+ "ConfigAssetURL",
+ "ConfigAssetMetadataHash",
+ "ConfigAssetManager",
+ "ConfigAssetReserve",
+ "ConfigAssetFreeze",
+ "ConfigAssetClawback",
+ "FreezeAsset",
+ "FreezeAssetAccount",
+ "FreezeAssetFrozen",
+ "Assets",
+ "NumAssets",
+ "Applications",
+ "NumApplications",
+ "GlobalNumUint",
+ "GlobalNumByteSlice",
+ "LocalNumUint",
+ "LocalNumByteSlice",
+ "ExtraProgramPages",
+ "Nonparticipation",
+ "Logs",
+ "NumLogs",
+ "CreatedAssetID",
+ "CreatedApplicationID",
+ "LastLog",
+ "StateProofPK"
+ ],
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "Doc": "field F of current transaction",
+ "DocExtra": "FirstValidTime causes the program to fail. The field is reserved for future use.",
+ "ImmediateNote": "{uint8 transaction field index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 50,
+ "Name": "global",
+ "Returns": ".",
+ "Size": 2,
+ "ArgEnum": [
+ "MinTxnFee",
+ "MinBalance",
+ "MaxTxnLife",
+ "ZeroAddress",
+ "GroupSize",
+ "LogicSigVersion",
+ "Round",
+ "LatestTimestamp",
+ "CurrentApplicationID",
+ "CreatorAddress",
+ "CurrentApplicationAddress",
+ "GroupID",
+ "OpcodeBudget",
+ "CallerApplicationID",
+ "CallerApplicationAddress"
+ ],
+ "ArgEnumTypes": "UUUBUUUUUBBBUUB",
+ "Doc": "global field F",
+ "ImmediateNote": "{uint8 global field index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 51,
+ "Name": "gtxn",
+ "Returns": ".",
+ "Size": 3,
+ "ArgEnum": [
+ "Sender",
+ "Fee",
+ "FirstValid",
+ "FirstValidTime",
+ "LastValid",
+ "Note",
+ "Lease",
+ "Receiver",
+ "Amount",
+ "CloseRemainderTo",
+ "VotePK",
+ "SelectionPK",
+ "VoteFirst",
+ "VoteLast",
+ "VoteKeyDilution",
+ "Type",
+ "TypeEnum",
+ "XferAsset",
+ "AssetAmount",
+ "AssetSender",
+ "AssetReceiver",
+ "AssetCloseTo",
+ "GroupIndex",
+ "TxID",
+ "ApplicationID",
+ "OnCompletion",
+ "ApplicationArgs",
+ "NumAppArgs",
+ "Accounts",
+ "NumAccounts",
+ "ApprovalProgram",
+ "ClearStateProgram",
+ "RekeyTo",
+ "ConfigAsset",
+ "ConfigAssetTotal",
+ "ConfigAssetDecimals",
+ "ConfigAssetDefaultFrozen",
+ "ConfigAssetUnitName",
+ "ConfigAssetName",
+ "ConfigAssetURL",
+ "ConfigAssetMetadataHash",
+ "ConfigAssetManager",
+ "ConfigAssetReserve",
+ "ConfigAssetFreeze",
+ "ConfigAssetClawback",
+ "FreezeAsset",
+ "FreezeAssetAccount",
+ "FreezeAssetFrozen",
+ "Assets",
+ "NumAssets",
+ "Applications",
+ "NumApplications",
+ "GlobalNumUint",
+ "GlobalNumByteSlice",
+ "LocalNumUint",
+ "LocalNumByteSlice",
+ "ExtraProgramPages",
+ "Nonparticipation",
+ "Logs",
+ "NumLogs",
+ "CreatedAssetID",
+ "CreatedApplicationID",
+ "LastLog",
+ "StateProofPK"
+ ],
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "Doc": "field F of the Tth transaction in the current group",
+ "DocExtra": "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`.",
+ "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 52,
+ "Name": "load",
+ "Returns": ".",
+ "Size": 2,
+ "Doc": "Ith scratch space value. All scratch spaces are 0 at program start.",
+ "ImmediateNote": "{uint8 position in scratch space to load from}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 53,
+ "Name": "store",
+ "Args": ".",
+ "Size": 2,
+ "Doc": "store A to the Ith scratch space",
+ "ImmediateNote": "{uint8 position in scratch space to store to}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 54,
+ "Name": "txna",
+ "Returns": ".",
+ "Size": 3,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Ith value of the array field F of the current transaction",
+ "ImmediateNote": "{uint8 transaction field index} {uint8 transaction field array index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 55,
+ "Name": "gtxna",
+ "Returns": ".",
+ "Size": 4,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Ith value of the array field F from the Tth transaction in the current group",
+ "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 56,
+ "Name": "gtxns",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 2,
+ "ArgEnum": [
+ "Sender",
+ "Fee",
+ "FirstValid",
+ "FirstValidTime",
+ "LastValid",
+ "Note",
+ "Lease",
+ "Receiver",
+ "Amount",
+ "CloseRemainderTo",
+ "VotePK",
+ "SelectionPK",
+ "VoteFirst",
+ "VoteLast",
+ "VoteKeyDilution",
+ "Type",
+ "TypeEnum",
+ "XferAsset",
+ "AssetAmount",
+ "AssetSender",
+ "AssetReceiver",
+ "AssetCloseTo",
+ "GroupIndex",
+ "TxID",
+ "ApplicationID",
+ "OnCompletion",
+ "ApplicationArgs",
+ "NumAppArgs",
+ "Accounts",
+ "NumAccounts",
+ "ApprovalProgram",
+ "ClearStateProgram",
+ "RekeyTo",
+ "ConfigAsset",
+ "ConfigAssetTotal",
+ "ConfigAssetDecimals",
+ "ConfigAssetDefaultFrozen",
+ "ConfigAssetUnitName",
+ "ConfigAssetName",
+ "ConfigAssetURL",
+ "ConfigAssetMetadataHash",
+ "ConfigAssetManager",
+ "ConfigAssetReserve",
+ "ConfigAssetFreeze",
+ "ConfigAssetClawback",
+ "FreezeAsset",
+ "FreezeAssetAccount",
+ "FreezeAssetFrozen",
+ "Assets",
+ "NumAssets",
+ "Applications",
+ "NumApplications",
+ "GlobalNumUint",
+ "GlobalNumByteSlice",
+ "LocalNumUint",
+ "LocalNumByteSlice",
+ "ExtraProgramPages",
+ "Nonparticipation",
+ "Logs",
+ "NumLogs",
+ "CreatedAssetID",
+ "CreatedApplicationID",
+ "LastLog",
+ "StateProofPK"
+ ],
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "Doc": "field F of the Ath transaction in the current group",
+ "DocExtra": "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction.",
+ "ImmediateNote": "{uint8 transaction field index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 57,
+ "Name": "gtxnsa",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 3,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Ith value of the array field F from the Ath transaction in the current group",
+ "ImmediateNote": "{uint8 transaction field index} {uint8 transaction field array index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 58,
+ "Name": "gload",
+ "Returns": ".",
+ "Size": 3,
+ "Doc": "Ith scratch space value of the Tth transaction in the current group",
+ "DocExtra": "`gload` fails unless the requested transaction is an ApplicationCall and T \u003c GroupIndex.",
+ "ImmediateNote": "{uint8 transaction group index} {uint8 position in scratch space to load from}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 59,
+ "Name": "gloads",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 2,
+ "Doc": "Ith scratch space value of the Ath transaction in the current group",
+ "DocExtra": "`gloads` fails unless the requested transaction is an ApplicationCall and A \u003c GroupIndex.",
+ "ImmediateNote": "{uint8 position in scratch space to load from}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 60,
+ "Name": "gaid",
+ "Returns": "U",
+ "Size": 2,
+ "Doc": "ID of the asset or application created in the Tth transaction of the current group",
+ "DocExtra": "`gaid` fails unless the requested transaction created an asset or application and T \u003c GroupIndex.",
+ "ImmediateNote": "{uint8 transaction group index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 61,
+ "Name": "gaids",
+ "Args": "U",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "ID of the asset or application created in the Ath transaction of the current group",
+ "DocExtra": "`gaids` fails unless the requested transaction created an asset or application and A \u003c GroupIndex.",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 62,
+ "Name": "loads",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 1,
+ "Doc": "Ath scratch space value. All scratch spaces are 0 at program start.",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 63,
+ "Name": "stores",
+ "Args": "U.",
+ "Size": 1,
+ "Doc": "store B to the Ath scratch space",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 64,
+ "Name": "bnz",
+ "Args": "U",
+ "Size": 3,
+ "Doc": "branch to TARGET if value A is not zero",
+ "DocExtra": "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Starting at v4, the offset is treated as a signed 16 bit integer allowing for backward branches and looping. In prior version (v1 to v3), branch offsets are limited to forward branches only, 0-0x7fff.\n\nAt v2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before v2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)",
+ "ImmediateNote": "{int16 branch offset, big-endian}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 65,
+ "Name": "bz",
+ "Args": "U",
+ "Size": 3,
+ "Doc": "branch to TARGET if value A is zero",
+ "DocExtra": "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.",
+ "ImmediateNote": "{int16 branch offset, big-endian}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 66,
+ "Name": "b",
+ "Size": 3,
+ "Doc": "branch unconditionally to TARGET",
+ "DocExtra": "See `bnz` for details on how branches work. `b` always jumps to the offset.",
+ "ImmediateNote": "{int16 branch offset, big-endian}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 67,
+ "Name": "return",
+ "Args": "U",
+ "Size": 1,
+ "Doc": "use A as success value; end",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 68,
+ "Name": "assert",
+ "Args": "U",
+ "Size": 1,
+ "Doc": "immediately fail unless A is a non-zero number",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 72,
+ "Name": "pop",
+ "Args": ".",
+ "Size": 1,
+ "Doc": "discard A",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 73,
+ "Name": "dup",
+ "Args": ".",
+ "Returns": "..",
+ "Size": 1,
+ "Doc": "duplicate A",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 74,
+ "Name": "dup2",
+ "Args": "..",
+ "Returns": "....",
+ "Size": 1,
+ "Doc": "duplicate A and B",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 75,
+ "Name": "dig",
+ "Args": ".",
+ "Returns": "..",
+ "Size": 2,
+ "Doc": "Nth value from the top of the stack. dig 0 is equivalent to dup",
+ "ImmediateNote": "{uint8 depth}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 76,
+ "Name": "swap",
+ "Args": "..",
+ "Returns": "..",
+ "Size": 1,
+ "Doc": "swaps A and B on stack",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 77,
+ "Name": "select",
+ "Args": "..U",
+ "Returns": ".",
+ "Size": 1,
+ "Doc": "selects one of two values based on top-of-stack: B if C != 0, else A",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 78,
+ "Name": "cover",
+ "Args": ".",
+ "Returns": ".",
+ "Size": 2,
+ "Doc": "remove top of stack, and place it deeper in the stack such that N elements are above it. Fails if stack depth \u003c= N.",
+ "ImmediateNote": "{uint8 depth}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 79,
+ "Name": "uncover",
+ "Args": ".",
+ "Returns": ".",
+ "Size": 2,
+ "Doc": "remove the value at depth N in the stack and shift above items down so the Nth deep value is on top of the stack. Fails if stack depth \u003c= N.",
+ "ImmediateNote": "{uint8 depth}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 80,
+ "Name": "concat",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "join A and B",
+ "DocExtra": "`concat` fails if the result would be greater than 4096 bytes.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 81,
+ "Name": "substring",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 3,
+ "Doc": "A range of bytes from A starting at S up to but not including E. If E \u003c S, or either is larger than the array length, the program fails",
+ "ImmediateNote": "{uint8 start position} {uint8 end position}",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 82,
+ "Name": "substring3",
+ "Args": "BUU",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A range of bytes from A starting at B up to but not including C. If C \u003c B, or either is larger than the array length, the program fails",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 83,
+ "Name": "getbit",
+ "Args": ".U",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
+ "DocExtra": "see explanation of bit ordering in setbit",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 84,
+ "Name": "setbit",
+ "Args": ".UU",
+ "Returns": ".",
+ "Size": 1,
+ "Doc": "Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
+ "DocExtra": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 85,
+ "Name": "getbyte",
+ "Args": "BU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 86,
+ "Name": "setbyte",
+ "Args": "BUU",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 87,
+ "Name": "extract",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 3,
+ "Doc": "A range of bytes from A starting at S up to but not including S+L. If L is 0, then extract to the end of the string. If S or S+L is larger than the array length, the program fails",
+ "ImmediateNote": "{uint8 start position} {uint8 length}",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 88,
+ "Name": "extract3",
+ "Args": "BUU",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 89,
+ "Name": "extract_uint16",
+ "Args": "BU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A uint16 formed from a range of big-endian bytes from A starting at B up to but not including B+2. If B+2 is larger than the array length, the program fails",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 90,
+ "Name": "extract_uint32",
+ "Args": "BU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A uint32 formed from a range of big-endian bytes from A starting at B up to but not including B+4. If B+4 is larger than the array length, the program fails",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 91,
+ "Name": "extract_uint64",
+ "Args": "BU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A uint64 formed from a range of big-endian bytes from A starting at B up to but not including B+8. If B+8 is larger than the array length, the program fails",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 92,
+ "Name": "base64_decode",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 2,
+ "Doc": "decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E",
+ "DocExtra": "Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See \u003ca href=\"https://rfc-editor.org/rfc/rfc4648.html#section-4\"\u003eRFC 4648\u003c/a\u003e (sections 4 and 5). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\\n` and `\\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\\r`, or `\\n`.",
+ "ImmediateNote": "{uint8 encoding index}",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 93,
+ "Name": "json_ref",
+ "Args": "BB",
+ "Returns": ".",
+ "Size": 2,
+ "Doc": "return key B's value from a [valid](jsonspec.md) utf-8 encoded json object A",
+ "DocExtra": "specify the return type with an immediate arg either as JSONUint64 or JSONString or JSONObject.",
+ "ImmediateNote": "{string return type}",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 96,
+ "Name": "balance",
+ "Args": ".",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "get balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted.",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 97,
+ "Name": "app_opted_in",
+ "Args": ".U",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "1 if account A is opted in to application B, else 0",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: 1 if opted in and 0 otherwise.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 98,
+ "Name": "app_local_get",
+ "Args": ".B",
+ "Returns": ".",
+ "Size": 1,
+ "Doc": "local state of the key B in the current application in account A",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key. Return: value. The value is zero (of type uint64) if the key does not exist.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 99,
+ "Name": "app_local_get_ex",
+ "Args": ".UB",
+ "Returns": ".U",
+ "Size": 1,
+ "Doc": "X is the local state of application B, key C in account A. Y is 1 if key existed, else 0",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 100,
+ "Name": "app_global_get",
+ "Args": "B",
+ "Returns": ".",
+ "Size": 1,
+ "Doc": "global state of the key A in the current application",
+ "DocExtra": "params: state key. Return: value. The value is zero (of type uint64) if the key does not exist.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 101,
+ "Name": "app_global_get_ex",
+ "Args": "UB",
+ "Returns": ".U",
+ "Size": 1,
+ "Doc": "X is the global state of application A, key B. Y is 1 if key existed, else 0",
+ "DocExtra": "params: Txn.ForeignApps offset (or, since v4, an _available_ application id), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 102,
+ "Name": "app_local_put",
+ "Args": ".B.",
+ "Size": 1,
+ "Doc": "write C to key B in account A's local state of the current application",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key, value.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 103,
+ "Name": "app_global_put",
+ "Args": "B.",
+ "Size": 1,
+ "Doc": "write B to key A in the global state of the current application",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 104,
+ "Name": "app_local_del",
+ "Args": ".B",
+ "Size": 1,
+ "Doc": "delete key B from account A's local state of the current application",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key.\n\nDeleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.)",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 105,
+ "Name": "app_global_del",
+ "Args": "B",
+ "Size": 1,
+ "Doc": "delete key A from the global state of the current application",
+ "DocExtra": "params: state key.\n\nDeleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.)",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 112,
+ "Name": "asset_holding_get",
+ "Args": ".U",
+ "Returns": ".U",
+ "Size": 2,
+ "ArgEnum": [
+ "AssetBalance",
+ "AssetFrozen"
+ ],
+ "ArgEnumTypes": "UU",
+ "Doc": "X is field F from account A's holding of asset B. Y is 1 if A is opted into B, else 0",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ address), asset id (or, since v4, a Txn.ForeignAssets offset). Return: did_exist flag (1 if the asset existed and 0 otherwise), value.",
+ "ImmediateNote": "{uint8 asset holding field index}",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 113,
+ "Name": "asset_params_get",
+ "Args": "U",
+ "Returns": ".U",
+ "Size": 2,
+ "ArgEnum": [
+ "AssetTotal",
+ "AssetDecimals",
+ "AssetDefaultFrozen",
+ "AssetUnitName",
+ "AssetName",
+ "AssetURL",
+ "AssetMetadataHash",
+ "AssetManager",
+ "AssetReserve",
+ "AssetFreeze",
+ "AssetClawback",
+ "AssetCreator"
+ ],
+ "ArgEnumTypes": "UUUBBBBBBBBB",
+ "Doc": "X is field F from asset A. Y is 1 if A exists, else 0",
+ "DocExtra": "params: Txn.ForeignAssets offset (or, since v4, an _available_ asset id. Return: did_exist flag (1 if the asset existed and 0 otherwise), value.",
+ "ImmediateNote": "{uint8 asset params field index}",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 114,
+ "Name": "app_params_get",
+ "Args": "U",
+ "Returns": ".U",
+ "Size": 2,
+ "ArgEnum": [
+ "AppApprovalProgram",
+ "AppClearStateProgram",
+ "AppGlobalNumUint",
+ "AppGlobalNumByteSlice",
+ "AppLocalNumUint",
+ "AppLocalNumByteSlice",
+ "AppExtraProgramPages",
+ "AppCreator",
+ "AppAddress"
+ ],
+ "ArgEnumTypes": "BBUUUUUBB",
+ "Doc": "X is field F from app A. Y is 1 if A exists, else 0",
+ "DocExtra": "params: Txn.ForeignApps offset or an _available_ app id. Return: did_exist flag (1 if the application existed and 0 otherwise), value.",
+ "ImmediateNote": "{uint8 app params field index}",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 115,
+ "Name": "acct_params_get",
+ "Args": ".",
+ "Returns": ".U",
+ "Size": 2,
+ "ArgEnum": [
+ "AcctBalance",
+ "AcctMinBalance",
+ "AcctAuthAddr"
+ ],
+ "ArgEnumTypes": "UUB",
+ "Doc": "X is field F from account A. Y is 1 if A owns positive algos, else 0",
+ "ImmediateNote": "{uint8 account params field index}",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 120,
+ "Name": "min_balance",
+ "Args": ".",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "get minimum required balance for account A, in microalgos. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes.",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 128,
+ "Name": "pushbytes",
+ "Returns": "B",
+ "Size": 0,
+ "Doc": "immediate BYTES",
+ "DocExtra": "pushbytes args are not added to the bytecblock during assembly processes",
+ "ImmediateNote": "{varuint length} {bytes}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 129,
+ "Name": "pushint",
+ "Returns": "U",
+ "Size": 0,
+ "Doc": "immediate UINT",
+ "DocExtra": "pushint args are not added to the intcblock during assembly processes",
+ "ImmediateNote": "{varuint int}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 132,
+ "Name": "ed25519verify_bare",
+ "Args": "BBB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "for (data A, signature B, pubkey C) verify the signature of the data against the pubkey =\u003e {0 or 1}",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 136,
+ "Name": "callsub",
+ "Size": 3,
+ "Doc": "branch unconditionally to TARGET, saving the next instruction on the call stack",
+ "DocExtra": "The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.",
+ "ImmediateNote": "{int16 branch offset, big-endian}",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 137,
+ "Name": "retsub",
+ "Size": 1,
+ "Doc": "pop the top instruction from the call stack and branch to it",
+ "DocExtra": "The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
+ {
+ "Opcode": 144,
+ "Name": "shl",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A times 2^B, modulo 2^64",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 145,
+ "Name": "shr",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A divided by 2^B",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 146,
+ "Name": "sqrt",
+ "Args": "U",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "The largest integer I such that I^2 \u003c= A",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 147,
+ "Name": "bitlen",
+ "Args": ".",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "The highest set bit in A. If A is a byte-array, it is interpreted as a big-endian unsigned integer. bitlen of 0 is 0, bitlen of 8 is 4",
+ "DocExtra": "bitlen interprets arrays as big-endian integers, unlike setbit/getbit",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 148,
+ "Name": "exp",
+ "Args": "UU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A raised to the Bth power. Fail if A == B == 0 and on overflow",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 149,
+ "Name": "expw",
+ "Args": "UU",
+ "Returns": "UU",
+ "Size": 1,
+ "Doc": "A raised to the Bth power as a 128-bit result in two uint64s. X is the high 64 bits, Y is the low. Fail if A == B == 0 or if the results exceeds 2^128-1",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 150,
+ "Name": "bsqrt",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "The largest integer I such that I^2 \u003c= A. A and I are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 151,
+ "Name": "divw",
+ "Args": "UUU",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "A,B / C. Fail if C == 0 or if result overflows.",
+ "DocExtra": "The notation A,B indicates that A and B are interpreted as a uint128 value, with A as the high uint64 and B the low.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 152,
+ "Name": "sha3_256",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "SHA3_256 hash of value A, yields [32]byte",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 160,
+ "Name": "b+",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A plus B. A and B are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 161,
+ "Name": "b-",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A minus B. A and B are interpreted as big-endian unsigned integers. Fail on underflow.",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 162,
+ "Name": "b/",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A divided by B (truncated division). A and B are interpreted as big-endian unsigned integers. Fail if B is zero.",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 163,
+ "Name": "b*",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A times B. A and B are interpreted as big-endian unsigned integers.",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 164,
+ "Name": "b\u003c",
+ "Args": "BB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "1 if A is less than B, else 0. A and B are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 165,
+ "Name": "b\u003e",
+ "Args": "BB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "1 if A is greater than B, else 0. A and B are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 166,
+ "Name": "b\u003c=",
+ "Args": "BB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "1 if A is less than or equal to B, else 0. A and B are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 167,
+ "Name": "b\u003e=",
+ "Args": "BB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "1 if A is greater than or equal to B, else 0. A and B are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 168,
+ "Name": "b==",
+ "Args": "BB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "1 if A is equal to B, else 0. A and B are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 169,
+ "Name": "b!=",
+ "Args": "BB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "0 if A is equal to B, else 1. A and B are interpreted as big-endian unsigned integers",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 170,
+ "Name": "b%",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A modulo B. A and B are interpreted as big-endian unsigned integers. Fail if B is zero.",
+ "Groups": [
+ "Byte Array Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 171,
+ "Name": "b|",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A bitwise-or B. A and B are zero-left extended to the greater of their lengths",
+ "Groups": [
+ "Byte Array Logic"
+ ]
+ },
+ {
+ "Opcode": 172,
+ "Name": "b\u0026",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A bitwise-and B. A and B are zero-left extended to the greater of their lengths",
+ "Groups": [
+ "Byte Array Logic"
+ ]
+ },
+ {
+ "Opcode": 173,
+ "Name": "b^",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A bitwise-xor B. A and B are zero-left extended to the greater of their lengths",
+ "Groups": [
+ "Byte Array Logic"
+ ]
+ },
+ {
+ "Opcode": 174,
+ "Name": "b~",
+ "Args": "B",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "A with all bits inverted",
+ "Groups": [
+ "Byte Array Logic"
+ ]
+ },
+ {
+ "Opcode": 175,
+ "Name": "bzero",
+ "Args": "U",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "zero filled byte-array of length A",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 176,
+ "Name": "log",
+ "Args": "B",
+ "Size": 1,
+ "Doc": "write A to log state of the current application",
+ "DocExtra": "`log` fails if called more than MaxLogCalls times in a program, or if the sum of logged bytes exceeds 1024 bytes.",
+ "Groups": [
+ "State Access"
+ ]
+ },
+ {
+ "Opcode": 177,
+ "Name": "itxn_begin",
+ "Size": 1,
+ "Doc": "begin preparation of a new inner transaction in a new transaction group",
+ "DocExtra": "`itxn_begin` initializes Sender to the application address; Fee to the minimum allowable, taking into account MinTxnFee and credit from overpaying in earlier transactions; FirstValid/LastValid to the values in the invoking transaction, and all other fields to zero or empty values.",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 178,
+ "Name": "itxn_field",
+ "Args": ".",
+ "Size": 2,
+ "ArgEnum": [
+ "Sender",
+ "Fee",
+ "Note",
+ "Receiver",
+ "Amount",
+ "CloseRemainderTo",
+ "VotePK",
+ "SelectionPK",
+ "VoteFirst",
+ "VoteLast",
+ "VoteKeyDilution",
+ "Type",
+ "TypeEnum",
+ "XferAsset",
+ "AssetAmount",
+ "AssetSender",
+ "AssetReceiver",
+ "AssetCloseTo",
+ "ApplicationID",
+ "OnCompletion",
+ "ApplicationArgs",
+ "Accounts",
+ "ApprovalProgram",
+ "ClearStateProgram",
+ "RekeyTo",
+ "ConfigAsset",
+ "ConfigAssetTotal",
+ "ConfigAssetDecimals",
+ "ConfigAssetDefaultFrozen",
+ "ConfigAssetUnitName",
+ "ConfigAssetName",
+ "ConfigAssetURL",
+ "ConfigAssetMetadataHash",
+ "ConfigAssetManager",
+ "ConfigAssetReserve",
+ "ConfigAssetFreeze",
+ "ConfigAssetClawback",
+ "FreezeAsset",
+ "FreezeAssetAccount",
+ "FreezeAssetFrozen",
+ "Assets",
+ "Applications",
+ "GlobalNumUint",
+ "GlobalNumByteSlice",
+ "LocalNumUint",
+ "LocalNumByteSlice",
+ "ExtraProgramPages",
+ "Nonparticipation",
+ "StateProofPK"
+ ],
+ "ArgEnumTypes": "BUBBUBBBUUUBUUUBBBUUBBBBBUUUUBBBBBBBBUBUUUUUUUUUB",
+ "Doc": "set field F of the current inner transaction to A",
+ "DocExtra": "`itxn_field` fails if A is of the wrong type for F, including a byte array of the wrong size for use as an address when F is an address field. `itxn_field` also fails if A is an account, asset, or app that is not _available_, or an attempt is made extend an array field beyond the limit imposed by consensus parameters. (Addresses set into asset params of acfg transactions need not be _available_.)",
+ "ImmediateNote": "{uint8 transaction field index}",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 179,
+ "Name": "itxn_submit",
+ "Size": 1,
+ "Doc": "execute the current inner transaction group. Fail if executing this group would exceed the inner transaction limit, or if any transaction in the group fails.",
+ "DocExtra": "`itxn_submit` resets the current transaction so that it can not be resubmitted. A new `itxn_begin` is required to prepare another inner transaction.",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 180,
+ "Name": "itxn",
+ "Returns": ".",
+ "Size": 2,
+ "ArgEnum": [
+ "Sender",
+ "Fee",
+ "FirstValid",
+ "FirstValidTime",
+ "LastValid",
+ "Note",
+ "Lease",
+ "Receiver",
+ "Amount",
+ "CloseRemainderTo",
+ "VotePK",
+ "SelectionPK",
+ "VoteFirst",
+ "VoteLast",
+ "VoteKeyDilution",
+ "Type",
+ "TypeEnum",
+ "XferAsset",
+ "AssetAmount",
+ "AssetSender",
+ "AssetReceiver",
+ "AssetCloseTo",
+ "GroupIndex",
+ "TxID",
+ "ApplicationID",
+ "OnCompletion",
+ "ApplicationArgs",
+ "NumAppArgs",
+ "Accounts",
+ "NumAccounts",
+ "ApprovalProgram",
+ "ClearStateProgram",
+ "RekeyTo",
+ "ConfigAsset",
+ "ConfigAssetTotal",
+ "ConfigAssetDecimals",
+ "ConfigAssetDefaultFrozen",
+ "ConfigAssetUnitName",
+ "ConfigAssetName",
+ "ConfigAssetURL",
+ "ConfigAssetMetadataHash",
+ "ConfigAssetManager",
+ "ConfigAssetReserve",
+ "ConfigAssetFreeze",
+ "ConfigAssetClawback",
+ "FreezeAsset",
+ "FreezeAssetAccount",
+ "FreezeAssetFrozen",
+ "Assets",
+ "NumAssets",
+ "Applications",
+ "NumApplications",
+ "GlobalNumUint",
+ "GlobalNumByteSlice",
+ "LocalNumUint",
+ "LocalNumByteSlice",
+ "ExtraProgramPages",
+ "Nonparticipation",
+ "Logs",
+ "NumLogs",
+ "CreatedAssetID",
+ "CreatedApplicationID",
+ "LastLog",
+ "StateProofPK"
+ ],
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "Doc": "field F of the last inner transaction",
+ "ImmediateNote": "{uint8 transaction field index}",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 181,
+ "Name": "itxna",
+ "Returns": ".",
+ "Size": 3,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Ith value of the array field F of the last inner transaction",
+ "ImmediateNote": "{uint8 transaction field index} {uint8 transaction field array index}",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 182,
+ "Name": "itxn_next",
+ "Size": 1,
+ "Doc": "begin preparation of a new inner transaction in the same transaction group",
+ "DocExtra": "`itxn_next` initializes the transaction exactly as `itxn_begin` does",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 183,
+ "Name": "gitxn",
+ "Returns": ".",
+ "Size": 3,
+ "ArgEnum": [
+ "Sender",
+ "Fee",
+ "FirstValid",
+ "FirstValidTime",
+ "LastValid",
+ "Note",
+ "Lease",
+ "Receiver",
+ "Amount",
+ "CloseRemainderTo",
+ "VotePK",
+ "SelectionPK",
+ "VoteFirst",
+ "VoteLast",
+ "VoteKeyDilution",
+ "Type",
+ "TypeEnum",
+ "XferAsset",
+ "AssetAmount",
+ "AssetSender",
+ "AssetReceiver",
+ "AssetCloseTo",
+ "GroupIndex",
+ "TxID",
+ "ApplicationID",
+ "OnCompletion",
+ "ApplicationArgs",
+ "NumAppArgs",
+ "Accounts",
+ "NumAccounts",
+ "ApprovalProgram",
+ "ClearStateProgram",
+ "RekeyTo",
+ "ConfigAsset",
+ "ConfigAssetTotal",
+ "ConfigAssetDecimals",
+ "ConfigAssetDefaultFrozen",
+ "ConfigAssetUnitName",
+ "ConfigAssetName",
+ "ConfigAssetURL",
+ "ConfigAssetMetadataHash",
+ "ConfigAssetManager",
+ "ConfigAssetReserve",
+ "ConfigAssetFreeze",
+ "ConfigAssetClawback",
+ "FreezeAsset",
+ "FreezeAssetAccount",
+ "FreezeAssetFrozen",
+ "Assets",
+ "NumAssets",
+ "Applications",
+ "NumApplications",
+ "GlobalNumUint",
+ "GlobalNumByteSlice",
+ "LocalNumUint",
+ "LocalNumByteSlice",
+ "ExtraProgramPages",
+ "Nonparticipation",
+ "Logs",
+ "NumLogs",
+ "CreatedAssetID",
+ "CreatedApplicationID",
+ "LastLog",
+ "StateProofPK"
+ ],
+ "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBB",
+ "Doc": "field F of the Tth transaction in the last inner group submitted",
+ "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 184,
+ "Name": "gitxna",
+ "Returns": ".",
+ "Size": 4,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Ith value of the array field F from the Tth transaction in the last inner group submitted",
+ "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 192,
+ "Name": "txnas",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 2,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Ath value of the array field F of the current transaction",
+ "ImmediateNote": "{uint8 transaction field index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 193,
+ "Name": "gtxnas",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 3,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Ath value of the array field F from the Tth transaction in the current group",
+ "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 194,
+ "Name": "gtxnsas",
+ "Args": "UU",
+ "Returns": ".",
+ "Size": 2,
+ "ArgEnum": [
+ "ApplicationArgs",
+ "Accounts",
+ "Assets",
+ "Applications",
+ "Logs"
+ ],
+ "ArgEnumTypes": "BBUUB",
+ "Doc": "Bth value of the array field F from the Ath transaction in the current group",
+ "ImmediateNote": "{uint8 transaction field index}",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 195,
+ "Name": "args",
+ "Args": "U",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "Ath LogicSig argument",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 196,
+ "Name": "gloadss",
+ "Args": "UU",
+ "Returns": ".",
+ "Size": 1,
+ "Doc": "Bth scratch space value of the Ath transaction in the current group",
+ "Groups": [
+ "Loading Values"
+ ]
+ },
+ {
+ "Opcode": 197,
+ "Name": "itxnas",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 2,
+ "Doc": "Ath value of the array field F of the last inner transaction",
+ "ImmediateNote": "{uint8 transaction field index}",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ },
+ {
+ "Opcode": 198,
+ "Name": "gitxnas",
+ "Args": "U",
+ "Returns": ".",
+ "Size": 3,
+ "Doc": "Ath value of the array field F from the Tth transaction in the last inner group submitted",
+ "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
+ "Groups": [
+ "Inner Transactions"
+ ]
+ }
+ ]
+}
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index ab0402e8e..4c8ead551 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -17,7 +17,9 @@
package logic
import (
+ "fmt"
"sort"
+ "strconv"
"github.com/algorand/go-algorand/data/transactions"
)
@@ -61,29 +63,119 @@ const createdResourcesVersion = 6
// experimental-
const fidoVersion = 7 // base64, json, secp256r1
+type linearCost struct {
+ baseCost int
+ chunkCost int
+ chunkSize int
+}
+
+// divideCeilUnsafely provides `math.Ceil` semantics using integer division. The technique avoids slower floating point operations as suggested in https://stackoverflow.com/a/2745086.
+// The method does _not_ check for divide-by-zero.
+func divideCeilUnsafely(numerator int, denominator int) int {
+ return (numerator + denominator - 1) / denominator
+}
+
+func (lc *linearCost) compute(stack []stackValue) int {
+ cost := lc.baseCost
+ if lc.chunkCost != 0 && lc.chunkSize != 0 {
+ // Uses divideCeilUnsafely rather than (len/size) to match how Ethereum discretizes hashing costs.
+ cost += divideCeilUnsafely(lc.chunkCost*len(stack[len(stack)-1].Bytes), lc.chunkSize)
+ }
+ return cost
+}
+
+func (lc *linearCost) docCost() string {
+ if *lc == (linearCost{}) {
+ return ""
+ }
+ if lc.chunkCost == 0 {
+ return strconv.Itoa(lc.baseCost)
+ }
+ if lc.chunkSize == 1 {
+ return fmt.Sprintf("%d + %d per byte", lc.baseCost, lc.chunkCost)
+ }
+ return fmt.Sprintf("%d + %d per %d bytes", lc.baseCost, lc.chunkCost, lc.chunkSize)
+}
+
// opDetails records details such as non-standard costs, immediate
// arguments, or dynamic layout controlled by a check function.
type opDetails struct {
- Cost int
+ FullCost linearCost // if non-zero, the cost of the opcode, no fields matter
Size int
checkFunc opCheckFunc
Immediates []immediate
typeFunc opTypeFunc
}
-var opDefault = opDetails{1, 1, nil, nil, nil}
-var opBranch = opDetails{1, 3, checkBranch, []immediate{{"target", immLabel}}, nil}
+func (d *opDetails) docCost() string {
+ cost := d.FullCost.docCost()
+ if cost != "" {
+ return cost
+ }
+ found := false
+ for _, imm := range d.Immediates {
+ if imm.fieldCosts != nil {
+ if found {
+ panic("two cost dependent fields")
+ }
+ found = true
+ group := imm.Group
+ for _, name := range group.Names {
+ fs, ok := group.SpecByName(name)
+ if !ok {
+ continue
+ }
+ cost += fmt.Sprintf(" %s=%d", name, imm.fieldCosts[fs.Field()])
+ }
+ }
+ }
+ return cost
+}
+
+// Cost computes the cost of the opcode, given details about how it is used,
+// both static (the program, which can be used to find the immediate values
+// supplied), and dynamic (the stack, which can be used to find the run-time
+// arguments supplied). Cost is used at run-time. docCost returns similar
+// information in human-reable form.
+func (d *opDetails) Cost(program []byte, pc int, stack []stackValue) int {
+ cost := d.FullCost.compute(stack)
+ if cost != 0 {
+ return cost
+ }
+ for i := range d.Immediates {
+ if d.Immediates[i].fieldCosts != nil {
+ cost += d.Immediates[i].fieldCosts[program[pc+1+i]]
+ }
+ }
+ return cost
+}
+
+var opDefault = opDetails{linearCost{baseCost: 1}, 1, nil, nil, nil}
+var opBranch = opDetails{linearCost{baseCost: 1}, 3, checkBranch,
+ []immediate{imm("target", immLabel)}, nil}
func costly(cost int) opDetails {
- return opDetails{cost, 1, nil, nil, nil}
+ return opDetails{linearCost{baseCost: cost}, 1, nil, nil, nil}
+}
+
+func (d opDetails) costs(cost int) opDetails {
+ clone := d
+ clone.FullCost = linearCost{baseCost: cost}
+ return clone
+}
+
+func (d opDetails) costByLength(initial, perChunk, chunkSize int) opDetails {
+ clone := d
+ clone.FullCost = costByLength(initial, perChunk, chunkSize).FullCost
+ return clone
}
func immediates(names ...string) opDetails {
immediates := make([]immediate, len(names))
for i, name := range names {
- immediates[i] = immediate{name, immByte}
+ immediates[i] = imm(name, immByte)
}
- return opDetails{1, 1 + len(immediates), nil, immediates, nil}
+ return opDetails{linearCost{baseCost: 1}, 1 + len(immediates), nil, immediates, nil}
}
func stacky(typer opTypeFunc, imms ...string) opDetails {
@@ -92,16 +184,44 @@ func stacky(typer opTypeFunc, imms ...string) opDetails {
return d
}
-func varies(checker opCheckFunc, name string, kind immKind) opDetails {
- return opDetails{1, 0, checker, []immediate{{name, kind}}, nil}
+func sizeVaries(checker opCheckFunc, name string, kind immKind) opDetails {
+ return opDetails{linearCost{baseCost: 1}, 0, checker, []immediate{imm(name, kind)}, nil}
+}
+
+// field is used to create an opDetails for an opcode with a single field
+func field(immediate string, group *FieldGroup) opDetails {
+ opd := immediates(immediate)
+ opd.Immediates[0].Group = group
+ return opd
+}
+
+// field is used to annotate an existing immediate with group info
+func (d opDetails) field(name string, group *FieldGroup) opDetails {
+ for i := range d.Immediates {
+ if d.Immediates[i].Name == name {
+ d.Immediates[i].Group = group
+ return d
+ }
+ }
+ panic(name)
}
-func costlyImm(cost int, names ...string) opDetails {
- opd := immediates(names...)
- opd.Cost = cost
+func costByField(immediate string, group *FieldGroup, costs []int) opDetails {
+ opd := immediates(immediate).costs(0)
+ opd.Immediates[0].Group = group
+ fieldCosts := make([]int, 256)
+ copy(fieldCosts, costs)
+ opd.Immediates[0].fieldCosts = fieldCosts
return opd
}
+func costByLength(initial int, perChunk int, chunkSize int) opDetails {
+ if initial < 1 || perChunk <= 0 || chunkSize < 1 || chunkSize > maxStringSize {
+ panic("bad cost configuration")
+ }
+ return opDetails{linearCost{initial, perChunk, chunkSize}, 1, nil, nil, nil}
+}
+
// immType describes the immediate arguments to an opcode
type immKind byte
@@ -115,15 +235,23 @@ const (
)
type immediate struct {
- Name string
- kind immKind
+ Name string
+ kind immKind
+ Group *FieldGroup
+
+ // If non-nil, always 256 long, so cost can be checked before eval
+ fieldCosts []int
+}
+
+func imm(name string, kind immKind) immediate {
+ return immediate{name, kind, nil, nil}
}
// OpSpec defines an opcode
type OpSpec struct {
Opcode byte
Name string
- op opEvalFunc // evaluate the op
+ op evalFunc // evaluate the op
asm asmFunc // assemble the op
dis disFunc // disassemble the op
Args StackTypes // what gets popped from the stack
@@ -165,12 +293,22 @@ var OpSpecs = []OpSpec{
{0x02, "keccak256", opKeccak256, asmDefault, disDefault, oneBytes, oneBytes, 2, modeAny, costly(130)},
{0x03, "sha512_256", opSHA512_256, asmDefault, disDefault, oneBytes, oneBytes, 2, modeAny, costly(45)},
+ /*
+ Tabling these changes until we offer unlimited global storage as there
+ is currently a useful pattern that requires hashes on long slices to
+ creating logicsigs in apps.
+
+ {0x01, "sha256", opSHA256, asmDefault, disDefault, oneBytes, oneBytes, unlimitedStorage, modeAny, costByLength(12, 6, 8)},
+ {0x02, "keccak256", opKeccak256, asmDefault, disDefault, oneBytes, oneBytes, unlimitedStorage, modeAny, costByLength(58, 4, 8)},
+ {0x03, "sha512_256", opSHA512_256, asmDefault, disDefault, oneBytes, oneBytes, 7, unlimitedStorage, costByLength(17, 5, 8)},
+ */
+
{0x04, "ed25519verify", opEd25519Verify, asmDefault, disDefault, threeBytes, oneInt, 1, runModeSignature, costly(1900)},
{0x04, "ed25519verify", opEd25519Verify, asmDefault, disDefault, threeBytes, oneInt, 5, modeAny, costly(1900)},
- {0x05, "ecdsa_verify", opEcdsaVerify, asmEcdsa, disEcdsa, threeBytes.plus(twoBytes), oneInt, 5, modeAny, costlyImm(1700, "v")},
- {0x06, "ecdsa_pk_decompress", opEcdsaPkDecompress, asmEcdsa, disEcdsa, oneBytes, twoBytes, 5, modeAny, costlyImm(650, "v")},
- {0x07, "ecdsa_pk_recover", opEcdsaPkRecover, asmEcdsa, disEcdsa, oneBytes.plus(oneInt).plus(twoBytes), twoBytes, 5, modeAny, costlyImm(2000, "v")},
+ {0x05, "ecdsa_verify", opEcdsaVerify, asmDefault, disDefault, threeBytes.plus(twoBytes), oneInt, 5, modeAny, costByField("v", &EcdsaCurves, ecdsaVerifyCosts)},
+ {0x06, "ecdsa_pk_decompress", opEcdsaPkDecompress, asmDefault, disDefault, oneBytes, twoBytes, 5, modeAny, costByField("v", &EcdsaCurves, ecdsaDecompressCosts)},
+ {0x07, "ecdsa_pk_recover", opEcdsaPkRecover, asmDefault, disDefault, oneBytes.plus(oneInt).plus(twoBytes), twoBytes, 5, modeAny, field("v", &EcdsaCurves).costs(2000)},
{0x08, "+", opPlus, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
{0x09, "-", opMinus, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
@@ -197,38 +335,45 @@ var OpSpecs = []OpSpec{
{0x1e, "addw", opAddw, asmDefault, disDefault, twoInts, twoInts, 2, modeAny, opDefault},
{0x1f, "divmodw", opDivModw, asmDefault, disDefault, twoInts.plus(twoInts), twoInts.plus(twoInts), 4, modeAny, costly(20)},
- {0x20, "intcblock", opIntConstBlock, asmIntCBlock, disIntcblock, nil, nil, 1, modeAny, varies(checkIntConstBlock, "uint ...", immInts)},
- {0x21, "intc", opIntConstLoad, asmIntC, disIntc, nil, oneInt, 1, modeAny, immediates("i")},
- {0x22, "intc_0", opIntConst0, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault},
- {0x23, "intc_1", opIntConst1, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault},
- {0x24, "intc_2", opIntConst2, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault},
- {0x25, "intc_3", opIntConst3, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault},
- {0x26, "bytecblock", opByteConstBlock, asmByteCBlock, disBytecblock, nil, nil, 1, modeAny, varies(checkByteConstBlock, "bytes ...", immBytess)},
- {0x27, "bytec", opByteConstLoad, asmByteC, disBytec, nil, oneBytes, 1, modeAny, immediates("i")},
- {0x28, "bytec_0", opByteConst0, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault},
- {0x29, "bytec_1", opByteConst1, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault},
- {0x2a, "bytec_2", opByteConst2, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault},
- {0x2b, "bytec_3", opByteConst3, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault},
+ {0x20, "intcblock", opIntConstBlock, asmIntCBlock, disDefault, nil, nil, 1, modeAny, sizeVaries(checkIntConstBlock, "uint ...", immInts)},
+ {0x21, "intc", opIntConstLoad, asmIntC, disDefault, nil, oneInt, 1, modeAny, immediates("i")},
+ {0x22, "intc_0", opIntConst0, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault},
+ {0x23, "intc_1", opIntConst1, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault},
+ {0x24, "intc_2", opIntConst2, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault},
+ {0x25, "intc_3", opIntConst3, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault},
+ {0x26, "bytecblock", opByteConstBlock, asmByteCBlock, disDefault, nil, nil, 1, modeAny, sizeVaries(checkByteConstBlock, "bytes ...", immBytess)},
+ {0x27, "bytec", opByteConstLoad, asmByteC, disDefault, nil, oneBytes, 1, modeAny, immediates("i")},
+ {0x28, "bytec_0", opByteConst0, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault},
+ {0x29, "bytec_1", opByteConst1, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault},
+ {0x2a, "bytec_2", opByteConst2, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault},
+ {0x2b, "bytec_3", opByteConst3, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault},
{0x2c, "arg", opArg, asmArg, disDefault, nil, oneBytes, 1, runModeSignature, immediates("n")},
{0x2d, "arg_0", opArg0, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault},
{0x2e, "arg_1", opArg1, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault},
{0x2f, "arg_2", opArg2, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault},
{0x30, "arg_3", opArg3, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault},
- {0x31, "txn", opTxn, asmTxn, disTxn, nil, oneAny, 1, modeAny, immediates("f")},
+ {0x31, "txn", opTxn, asmDefault, disDefault, nil, oneAny, 1, modeAny, field("f", &TxnScalarFields)},
// It is ok to have the same opcode for different TEAL versions.
// This 'txn' asm command supports additional argument in version 2 and
// generates 'txna' opcode in that particular case
- {0x31, "txn", opTxn, asmTxn2, disTxn, nil, oneAny, 2, modeAny, immediates("f")},
- {0x32, "global", opGlobal, asmGlobal, disGlobal, nil, oneAny, 1, modeAny, immediates("f")},
- {0x33, "gtxn", opGtxn, asmGtxn, disGtxn, nil, oneAny, 1, modeAny, immediates("t", "f")},
- {0x33, "gtxn", opGtxn, asmGtxn2, disGtxn, nil, oneAny, 2, modeAny, immediates("t", "f")},
+ {0x31, "txn", opTxn, asmTxn2, disDefault, nil, oneAny, 2, modeAny, field("f", &TxnFields)},
+ {0x32, "global", opGlobal, asmDefault, disDefault, nil, oneAny, 1, modeAny,
+ field("f", &GlobalFields)},
+ {0x33, "gtxn", opGtxn, asmDefault, disDefault, nil, oneAny, 1, modeAny,
+ immediates("t", "f").field("f", &TxnScalarFields)},
+ {0x33, "gtxn", opGtxn, asmGtxn2, disDefault, nil, oneAny, 2, modeAny,
+ immediates("t", "f").field("f", &TxnFields)},
{0x34, "load", opLoad, asmDefault, disDefault, nil, oneAny, 1, modeAny, immediates("i")},
{0x35, "store", opStore, asmDefault, disDefault, oneAny, nil, 1, modeAny, immediates("i")},
- {0x36, "txna", opTxna, asmTxna, disTxna, nil, oneAny, 2, modeAny, immediates("f", "i")},
- {0x37, "gtxna", opGtxna, asmGtxna, disGtxna, nil, oneAny, 2, modeAny, immediates("t", "f", "i")},
+ {0x36, "txna", opTxna, asmDefault, disDefault, nil, oneAny, 2, modeAny,
+ immediates("f", "i").field("f", &TxnArrayFields)},
+ {0x37, "gtxna", opGtxna, asmDefault, disDefault, nil, oneAny, 2, modeAny,
+ immediates("t", "f", "i").field("f", &TxnArrayFields)},
// Like gtxn, but gets txn index from stack, rather than immediate arg
- {0x38, "gtxns", opGtxns, asmGtxns, disTxn, oneInt, oneAny, 3, modeAny, immediates("f")},
- {0x39, "gtxnsa", opGtxnsa, asmGtxns, disTxna, oneInt, oneAny, 3, modeAny, immediates("f", "i")},
+ {0x38, "gtxns", opGtxns, asmGtxns, disDefault, oneInt, oneAny, 3, modeAny,
+ immediates("f").field("f", &TxnFields)},
+ {0x39, "gtxnsa", opGtxnsa, asmDefault, disDefault, oneInt, oneAny, 3, modeAny,
+ immediates("f", "i").field("f", &TxnArrayFields)},
// Group scratch space access
{0x3a, "gload", opGload, asmDefault, disDefault, nil, oneAny, 4, runModeApplication, immediates("t", "i")},
{0x3b, "gloads", opGloads, asmDefault, disDefault, oneInt, oneAny, 4, runModeApplication, immediates("i")},
@@ -240,9 +385,9 @@ var OpSpecs = []OpSpec{
{0x3e, "loads", opLoads, asmDefault, disDefault, oneInt, oneAny, 5, modeAny, opDefault},
{0x3f, "stores", opStores, asmDefault, disDefault, oneInt.plus(oneAny), nil, 5, modeAny, opDefault},
- {0x40, "bnz", opBnz, asmBranch, disBranch, oneInt, nil, 1, modeAny, opBranch},
- {0x41, "bz", opBz, asmBranch, disBranch, oneInt, nil, 2, modeAny, opBranch},
- {0x42, "b", opB, asmBranch, disBranch, nil, nil, 2, modeAny, opBranch},
+ {0x40, "bnz", opBnz, asmBranch, disDefault, oneInt, nil, 1, modeAny, opBranch},
+ {0x41, "bz", opBz, asmBranch, disDefault, oneInt, nil, 2, modeAny, opBranch},
+ {0x42, "b", opB, asmBranch, disDefault, nil, nil, 2, modeAny, opBranch},
{0x43, "return", opReturn, asmDefault, disDefault, oneInt, nil, 2, modeAny, opDefault},
{0x44, "assert", opAssert, asmDefault, disDefault, oneInt, nil, 3, modeAny, opDefault},
{0x48, "pop", opPop, asmDefault, disDefault, oneAny, nil, 1, modeAny, opDefault},
@@ -269,8 +414,8 @@ var OpSpecs = []OpSpec{
{0x59, "extract_uint16", opExtract16Bits, asmDefault, disDefault, byteInt, oneInt, 5, modeAny, opDefault},
{0x5a, "extract_uint32", opExtract32Bits, asmDefault, disDefault, byteInt, oneInt, 5, modeAny, opDefault},
{0x5b, "extract_uint64", opExtract64Bits, asmDefault, disDefault, byteInt, oneInt, 5, modeAny, opDefault},
- {0x5c, "base64_decode", opBase64Decode, asmBase64Decode, disBase64Decode, oneBytes, oneBytes, fidoVersion, modeAny, costlyImm(25, "e")},
- {0x5d, "json_ref", opJSONRef, asmJSONRef, disJSONRef, twoBytes, oneAny, fidoVersion, modeAny, immediates("r")},
+ {0x5c, "base64_decode", opBase64Decode, asmDefault, disDefault, oneBytes, oneBytes, fidoVersion, modeAny, field("e", &Base64Encodings).costByLength(1, 1, 16)},
+ {0x5d, "json_ref", opJSONRef, asmDefault, disDefault, twoBytes, oneAny, fidoVersion, modeAny, field("r", &JSONRefTypes)},
{0x60, "balance", opBalance, asmDefault, disDefault, oneInt, oneInt, 2, runModeApplication, opDefault},
{0x60, "balance", opBalance, asmDefault, disDefault, oneAny, oneInt, directRefEnabledVersion, runModeApplication, opDefault},
@@ -289,23 +434,23 @@ var OpSpecs = []OpSpec{
{0x68, "app_local_del", opAppLocalDel, asmDefault, disDefault, oneAny.plus(oneBytes), nil, directRefEnabledVersion, runModeApplication, opDefault},
{0x69, "app_global_del", opAppGlobalDel, asmDefault, disDefault, oneBytes, nil, 2, runModeApplication, opDefault},
- {0x70, "asset_holding_get", opAssetHoldingGet, asmAssetHolding, disAssetHolding, twoInts, oneAny.plus(oneInt), 2, runModeApplication, immediates("f")},
- {0x70, "asset_holding_get", opAssetHoldingGet, asmAssetHolding, disAssetHolding, oneAny.plus(oneInt), oneAny.plus(oneInt), directRefEnabledVersion, runModeApplication, immediates("f")},
- {0x71, "asset_params_get", opAssetParamsGet, asmAssetParams, disAssetParams, oneInt, oneAny.plus(oneInt), 2, runModeApplication, immediates("f")},
- {0x72, "app_params_get", opAppParamsGet, asmAppParams, disAppParams, oneInt, oneAny.plus(oneInt), 5, runModeApplication, immediates("f")},
- {0x73, "acct_params_get", opAcctParamsGet, asmAcctParams, disAcctParams, oneAny, oneAny.plus(oneInt), 6, runModeApplication, immediates("f")},
+ {0x70, "asset_holding_get", opAssetHoldingGet, asmDefault, disDefault, twoInts, oneAny.plus(oneInt), 2, runModeApplication, field("f", &AssetHoldingFields)},
+ {0x70, "asset_holding_get", opAssetHoldingGet, asmDefault, disDefault, oneAny.plus(oneInt), oneAny.plus(oneInt), directRefEnabledVersion, runModeApplication, field("f", &AssetHoldingFields)},
+ {0x71, "asset_params_get", opAssetParamsGet, asmDefault, disDefault, oneInt, oneAny.plus(oneInt), 2, runModeApplication, field("f", &AssetParamsFields)},
+ {0x72, "app_params_get", opAppParamsGet, asmDefault, disDefault, oneInt, oneAny.plus(oneInt), 5, runModeApplication, field("f", &AppParamsFields)},
+ {0x73, "acct_params_get", opAcctParamsGet, asmDefault, disDefault, oneAny, oneAny.plus(oneInt), 6, runModeApplication, field("f", &AcctParamsFields)},
{0x78, "min_balance", opMinBalance, asmDefault, disDefault, oneInt, oneInt, 3, runModeApplication, opDefault},
{0x78, "min_balance", opMinBalance, asmDefault, disDefault, oneAny, oneInt, directRefEnabledVersion, runModeApplication, opDefault},
// Immediate bytes and ints. Smaller code size for single use of constant.
- {0x80, "pushbytes", opPushBytes, asmPushBytes, disPushBytes, nil, oneBytes, 3, modeAny, varies(opPushBytes, "bytes", immBytes)},
- {0x81, "pushint", opPushInt, asmPushInt, disPushInt, nil, oneInt, 3, modeAny, varies(opPushInt, "uint", immInt)},
+ {0x80, "pushbytes", opPushBytes, asmPushBytes, disDefault, nil, oneBytes, 3, modeAny, sizeVaries(opPushBytes, "bytes", immBytes)},
+ {0x81, "pushint", opPushInt, asmPushInt, disDefault, nil, oneInt, 3, modeAny, sizeVaries(opPushInt, "uint", immInt)},
{0x84, "ed25519verify_bare", opEd25519VerifyBare, asmDefault, disDefault, threeBytes, oneInt, 7, modeAny, costly(1900)},
// "Function oriented"
- {0x88, "callsub", opCallSub, asmBranch, disBranch, nil, nil, 4, modeAny, opBranch},
+ {0x88, "callsub", opCallSub, asmBranch, disDefault, nil, nil, 4, modeAny, opBranch},
{0x89, "retsub", opRetSub, asmDefault, disDefault, nil, nil, 4, modeAny, opDefault},
// Leave a little room for indirect function calls, or similar
@@ -320,6 +465,10 @@ var OpSpecs = []OpSpec{
{0x97, "divw", opDivw, asmDefault, disDefault, twoInts.plus(oneInt), oneInt, 6, modeAny, opDefault},
{0x98, "sha3_256", opSHA3_256, asmDefault, disDefault, oneBytes, oneBytes, 7, modeAny, costly(130)},
+ /* Will end up following keccak256 -
+ {0x98, "sha3_256", opSHA3_256, asmDefault, disDefault, oneBytes, oneBytes, unlimitedStorage, modeAny, costByLength(58, 4, 8)},},
+ */
+
// Byteslice math.
{0xa0, "b+", opBytesPlus, asmDefault, disDefault, twoBytes, oneBytes, 4, modeAny, costly(10)},
{0xa1, "b-", opBytesMinus, asmDefault, disDefault, twoBytes, oneBytes, 4, modeAny, costly(10)},
@@ -341,22 +490,32 @@ var OpSpecs = []OpSpec{
// AVM "effects"
{0xb0, "log", opLog, asmDefault, disDefault, oneBytes, nil, 5, runModeApplication, opDefault},
{0xb1, "itxn_begin", opTxBegin, asmDefault, disDefault, nil, nil, 5, runModeApplication, opDefault},
- {0xb2, "itxn_field", opTxField, asmTxField, disTxField, oneAny, nil, 5, runModeApplication, stacky(typeTxField, "f")},
- {0xb3, "itxn_submit", opTxSubmit, asmDefault, disDefault, nil, nil, 5, runModeApplication, opDefault},
- {0xb4, "itxn", opItxn, asmItxn, disTxn, nil, oneAny, 5, runModeApplication, immediates("f")},
- {0xb5, "itxna", opItxna, asmTxna, disTxna, nil, oneAny, 5, runModeApplication, immediates("f", "i")},
- {0xb6, "itxn_next", opTxNext, asmDefault, disDefault, nil, nil, 6, runModeApplication, opDefault},
- {0xb7, "gitxn", opGitxn, asmGitxn, disGtxn, nil, oneAny, 6, runModeApplication, immediates("t", "f")},
- {0xb8, "gitxna", opGitxna, asmGtxna, disGtxna, nil, oneAny, 6, runModeApplication, immediates("t", "f", "i")},
+ {0xb2, "itxn_field", opItxnField, asmItxnField, disDefault, oneAny, nil, 5, runModeApplication,
+ stacky(typeTxField, "f").field("f", &TxnFields)},
+ {0xb3, "itxn_submit", opItxnSubmit, asmDefault, disDefault, nil, nil, 5, runModeApplication, opDefault},
+ {0xb4, "itxn", opItxn, asmItxn, disDefault, nil, oneAny, 5, runModeApplication,
+ field("f", &TxnScalarFields)},
+ {0xb5, "itxna", opItxna, asmDefault, disDefault, nil, oneAny, 5, runModeApplication,
+ immediates("f", "i").field("f", &TxnArrayFields)},
+ {0xb6, "itxn_next", opItxnNext, asmDefault, disDefault, nil, nil, 6, runModeApplication, opDefault},
+ {0xb7, "gitxn", opGitxn, asmGitxn, disDefault, nil, oneAny, 6, runModeApplication,
+ immediates("t", "f").field("f", &TxnFields)},
+ {0xb8, "gitxna", opGitxna, asmDefault, disDefault, nil, oneAny, 6, runModeApplication,
+ immediates("t", "f", "i").field("f", &TxnArrayFields)},
// Dynamic indexing
- {0xc0, "txnas", opTxnas, asmTxnas, disTxn, oneInt, oneAny, 5, modeAny, immediates("f")},
- {0xc1, "gtxnas", opGtxnas, asmGtxnas, disGtxn, oneInt, oneAny, 5, modeAny, immediates("t", "f")},
- {0xc2, "gtxnsas", opGtxnsas, asmGtxnsas, disTxn, twoInts, oneAny, 5, modeAny, immediates("f")},
+ {0xc0, "txnas", opTxnas, asmDefault, disDefault, oneInt, oneAny, 5, modeAny,
+ field("f", &TxnArrayFields)},
+ {0xc1, "gtxnas", opGtxnas, asmDefault, disDefault, oneInt, oneAny, 5, modeAny,
+ immediates("t", "f").field("f", &TxnArrayFields)},
+ {0xc2, "gtxnsas", opGtxnsas, asmDefault, disDefault, twoInts, oneAny, 5, modeAny,
+ field("f", &TxnArrayFields)},
{0xc3, "args", opArgs, asmDefault, disDefault, oneInt, oneBytes, 5, runModeSignature, opDefault},
{0xc4, "gloadss", opGloadss, asmDefault, disDefault, twoInts, oneAny, 6, runModeApplication, opDefault},
- {0xc5, "itxnas", opItxnas, asmTxnas, disTxn, oneInt, oneAny, 6, runModeApplication, immediates("f")},
- {0xc6, "gitxnas", opGitxnas, asmGtxnas, disGtxn, oneInt, oneAny, 6, runModeApplication, immediates("t", "f")},
+ {0xc5, "itxnas", opItxnas, asmDefault, disDefault, oneInt, oneAny, 6, runModeApplication,
+ field("f", &TxnArrayFields)},
+ {0xc6, "gitxnas", opGitxnas, asmDefault, disDefault, oneInt, oneAny, 6, runModeApplication,
+ immediates("t", "f").field("f", &TxnArrayFields)},
}
type sortByOpcode []OpSpec
@@ -414,7 +573,7 @@ func OpcodesByVersion(version uint64) []OpSpec {
// direct opcode bytes
var opsByOpcode [LogicVersion + 1][256]OpSpec
-// OpsByName map for each each version, mapping opcode name to OpSpec
+// OpsByName map for each version, mapping opcode name to OpSpec
var OpsByName [LogicVersion + 1]map[string]OpSpec
// Migration from TEAL v1 to TEAL v2.
@@ -441,7 +600,7 @@ func init() {
}
// Start from v2 TEAL and higher,
// copy lower version opcodes and overwrite matching version
- for v := uint64(2); v <= EvalMaxVersion; v++ {
+ for v := uint64(2); v <= evalMaxVersion; v++ {
OpsByName[v] = make(map[string]OpSpec, 256)
// Copy opcodes from lower version
diff --git a/data/transactions/logic/opcodes_test.go b/data/transactions/logic/opcodes_test.go
index 955662ced..2640070ee 100644
--- a/data/transactions/logic/opcodes_test.go
+++ b/data/transactions/logic/opcodes_test.go
@@ -77,13 +77,15 @@ func TestOpcodesByVersionReordered(t *testing.T) {
OpSpecs[1] = OpSpecs[4]
OpSpecs[4] = tmp
- t.Run("TestOpcodesByVersion", TestOpcodesByVersion)
+ t.Run("TestOpcodesByVersion", testOpcodesByVersion)
}
func TestOpcodesByVersion(t *testing.T) {
- // partitiontest.PartitionTest(t)
- // has partitioning in the TestOpcodesByVersionReordered()
+ partitiontest.PartitionTest(t)
+ testOpcodesByVersion(t)
+}
+func testOpcodesByVersion(t *testing.T) {
// Make a copy of the OpSpecs to check if OpcodesByVersion will change it
OpSpecs2 := make([]OpSpec, len(OpSpecs))
for idx, opspec := range OpSpecs {
@@ -168,7 +170,7 @@ func TestOpcodesVersioningV2(t *testing.T) {
reflect.ValueOf(a.dis).Pointer() == reflect.ValueOf(b.dis).Pointer() &&
reflect.DeepEqual(a.Args, b.Args) && reflect.DeepEqual(a.Returns, b.Returns) &&
a.Modes == b.Modes &&
- a.Details.Cost == b.Details.Cost && a.Details.Size == b.Details.Size &&
+ a.Details.FullCost == b.Details.FullCost && a.Details.Size == b.Details.Size &&
reflect.ValueOf(a.Details.checkFunc).Pointer() == reflect.ValueOf(b.Details.checkFunc).Pointer()
return
}
diff --git a/data/transactions/logic/teal.tmLanguage.json b/data/transactions/logic/teal.tmLanguage.json
new file mode 100644
index 000000000..efc093602
--- /dev/null
+++ b/data/transactions/logic/teal.tmLanguage.json
@@ -0,0 +1,136 @@
+{
+ "$schema": "https://raw.githubusercontent.com/martinring/tmlanguage/master/tmlanguage.json",
+ "name": "Algorand TEAL",
+ "patterns": [
+ {
+ "include": "#invalid"
+ },
+ {
+ "include": "#comments"
+ },
+ {
+ "include": "#strings"
+ },
+ {
+ "include": "#literals"
+ },
+ {
+ "include": "#labels"
+ },
+ {
+ "include": "#keywords"
+ },
+ {
+ "include": "#pragmas"
+ }
+ ],
+ "repository": {
+ "comments": {
+ "name": "comment.line.double-slash.teal",
+ "begin": "//",
+ "end": "$"
+ },
+ "invalid": {
+ "patterns": [
+ {
+ "name": "invalid.illegal.teal",
+ "match": "^\\s+.*$"
+ }
+ ]
+ },
+ "keywords": {
+ "patterns": [
+ {
+ "match": "\\b(base64|b64|base32|b32)(?:\\(|\\s+)([a-zA-Z0-9\\+\\/\\=]+)(?:\\)|\\s?|$)",
+ "captures": {
+ "1": {
+ "name": "support.class.teal"
+ },
+ "2": {
+ "name": "string.quoted.triple.teal"
+ }
+ }
+ },
+ {
+ "match": "^(addr)\\s+([A-Z2-7\\=]+)",
+ "captures": {
+ "1": {
+ "name": "keyword.other.teal"
+ },
+ "2": {
+ "name": "string.unquoted.teal"
+ }
+ }
+ },
+ {
+ "name": "keyword.control.teal",
+ "match": "^(assert|b|bnz|bz|callsub|cover|dig|dup|dup2|err|pop|retsub|return|select|swap|uncover)\\b"
+ },
+ {
+ "name": "keyword.other.teal",
+ "match": "^(int|byte|addr|arg|arg_0|arg_1|arg_2|arg_3|args|bytec|bytec_0|bytec_1|bytec_2|bytec_3|bytecblock|bzero|gaid|gaids|gload|gloads|gloadss|global|gtxn|gtxna|gtxnas|gtxns|gtxnsa|gtxnsas|intc|intc_0|intc_1|intc_2|intc_3|intcblock|load|loads|pushbytes|pushint|store|stores|txn|txna|txnas)\\b"
+ },
+ {
+ "name": "keyword.other.unit.teal",
+ "match": "^(acct_params_get|app_global_del|app_global_get|app_global_get_ex|app_global_put|app_local_del|app_local_get|app_local_get_ex|app_local_put|app_opted_in|app_params_get|asset_holding_get|asset_params_get|balance|log|min_balance)\\b"
+ },
+ {
+ "name": "keyword.operator.teal",
+ "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|btoi|concat|divmodw|divw|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|exp|expw|getbit|getbyte|itob|keccak256|len|mulw|setbit|setbyte|sha256|sha3_256|sha512_256|shl|shr|sqrt|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|extract|extract3|extract_uint16|extract_uint32|extract_uint64|json_ref|substring|substring3|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b"
+ }
+ ]
+ },
+ "labels": {
+ "patterns": [
+ {
+ "name": "support.variable.teal",
+ "match": "^\\w+:.*$"
+ },
+ {
+ "match": "\\b(?\u003c=b|bz|bnz)\\s+(\\w+)\\b",
+ "captures": {
+ "1": {
+ "name": "support.variable.teal"
+ }
+ }
+ }
+ ]
+ },
+ "literals": {
+ "patterns": [
+ {
+ "name": "constant.numeric.teal",
+ "match": "\\b([0-9]+)\\b"
+ },
+ {
+ "name": "constant.numeric.teal",
+ "match": "\\b(?\u003c=int\\s+)(0x[0-9]+)\\b"
+ },
+ {
+ "name": "string.quoted.double.teal",
+ "match": "\\b(?\u003c=byte\\s+)(0x[0-9]+)\\b"
+ },
+ {
+ "name": "variable.parameter.teal",
+ "match": "\\b(unknown|pay|keyreg|acfg|axfer|afrz|appl|NoOp|OptIn|CloseOut|ClearState|UpdateApplication|DeleteApplication|Secp256k1|Secp256r1|Sender|Fee|FirstValid|FirstValidTime|LastValid|Note|Lease|Receiver|Amount|CloseRemainderTo|VotePK|SelectionPK|VoteFirst|VoteLast|VoteKeyDilution|Type|TypeEnum|XferAsset|AssetAmount|AssetSender|AssetReceiver|AssetCloseTo|GroupIndex|TxID|ApplicationID|OnCompletion|ApplicationArgs|NumAppArgs|Accounts|NumAccounts|ApprovalProgram|ClearStateProgram|RekeyTo|ConfigAsset|ConfigAssetTotal|ConfigAssetDecimals|ConfigAssetDefaultFrozen|ConfigAssetUnitName|ConfigAssetName|ConfigAssetURL|ConfigAssetMetadataHash|ConfigAssetManager|ConfigAssetReserve|ConfigAssetFreeze|ConfigAssetClawback|FreezeAsset|FreezeAssetAccount|FreezeAssetFrozen|Assets|NumAssets|Applications|NumApplications|GlobalNumUint|GlobalNumByteSlice|LocalNumUint|LocalNumByteSlice|ExtraProgramPages|Nonparticipation|Logs|NumLogs|CreatedAssetID|CreatedApplicationID|LastLog|StateProofPK|MinTxnFee|MinBalance|MaxTxnLife|ZeroAddress|GroupSize|LogicSigVersion|Round|LatestTimestamp|CurrentApplicationID|CreatorAddress|CurrentApplicationAddress|GroupID|OpcodeBudget|CallerApplicationID|CallerApplicationAddress|URLEncoding|StdEncoding|JSONString|JSONUint64|JSONObject|AssetBalance|AssetFrozen|AssetTotal|AssetDecimals|AssetDefaultFrozen|AssetUnitName|AssetName|AssetURL|AssetMetadataHash|AssetManager|AssetReserve|AssetFreeze|AssetClawback|AssetCreator|AppApprovalProgram|AppClearStateProgram|AppGlobalNumUint|AppGlobalNumByteSlice|AppLocalNumUint|AppLocalNumByteSlice|AppExtraProgramPages|AppCreator|AppAddress|AcctBalance|AcctMinBalance|AcctAuthAddr)\\b"
+ }
+ ]
+ },
+ "pragmas": {
+ "name": "support.function.teal",
+ "match": "^#pragma\\b.*$"
+ },
+ "strings": {
+ "name": "string.quoted.double.teal",
+ "begin": "\"",
+ "end": "\"",
+ "patterns": [
+ {
+ "name": "constant.character.escape.teal",
+ "match": "\\\\(x[0-9A-Fa-f]{2}|.|$)"
+ }
+ ]
+ }
+ },
+ "scopeName": "source.teal"
+}
diff --git a/gen/generate.go b/gen/generate.go
index 05c2ecb97..dc76a485d 100644
--- a/gen/generate.go
+++ b/gen/generate.go
@@ -27,6 +27,7 @@ import (
"sort"
"sync"
"sync/atomic"
+ "time"
"github.com/algorand/go-deadlock"
@@ -301,6 +302,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion,
}()
}
+ createStart := time.Now()
creatingWalletsWaitGroup.Add(concurrentWalletGenerators)
for routinesCounter := 0; routinesCounter < concurrentWalletGenerators; routinesCounter++ {
go createWallet()
@@ -375,7 +377,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion,
err = ioutil.WriteFile(filepath.Join(outDir, config.GenesisJSONFile), append(jsonData, '\n'), 0666)
if (verbose) && (rootKeyCreated > 0 || partKeyCreated > 0) {
- fmt.Printf("Created %d new rootkeys and %d new partkeys.\n", rootKeyCreated, partKeyCreated)
+ fmt.Printf("Created %d new rootkeys and %d new partkeys in %s.\n", rootKeyCreated, partKeyCreated, time.Since(createStart))
fmt.Printf("NOTICE: Participation keys are valid for a period of %d rounds. After this many rounds the network will stall unless new keys are registered.\n", lastWalletValid-firstWalletValid)
}
diff --git a/go.mod b/go.mod
index 1a1521c8a..91e901133 100644
--- a/go.mod
+++ b/go.mod
@@ -1,16 +1,16 @@
module github.com/algorand/go-algorand
-go 1.14
+go 1.16
require (
github.com/algorand/falcon v0.0.0-20220130164023-c9e1d466f123
- github.com/algorand/go-codec/codec v0.0.0-20190507210007-269d70b6135d
- github.com/algorand/go-deadlock v0.2.1
+ github.com/algorand/go-codec/codec v1.1.8
+ github.com/algorand/go-deadlock v0.2.2
github.com/algorand/go-sumhash v0.1.0
- github.com/algorand/graphtrace v0.0.0-20201117160756-e524ed1a6f64
- github.com/algorand/msgp v1.1.49
- github.com/algorand/oapi-codegen v1.3.5-algorand5
- github.com/algorand/websocket v1.4.4
+ github.com/algorand/graphtrace v0.1.0
+ github.com/algorand/msgp v1.1.50
+ github.com/algorand/oapi-codegen v1.3.7
+ github.com/algorand/websocket v1.4.5
github.com/algorand/xorfilter v0.2.0
github.com/aws/aws-sdk-go v1.16.5
github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e
@@ -28,10 +28,9 @@ require (
github.com/gopherjs/gopherwasm v1.0.1 // indirect
github.com/gorilla/context v1.1.1 // indirect
github.com/gorilla/mux v1.6.2
- github.com/gorilla/websocket v1.4.2 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmoiron/sqlx v1.2.0
- github.com/karalabe/hid v1.0.0
+ github.com/karalabe/usb v0.0.2
github.com/labstack/echo/v4 v4.1.17
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-sqlite3 v1.10.0
@@ -40,14 +39,13 @@ require (
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect
github.com/olivere/elastic v6.2.14+incompatible
github.com/russross/blackfriday v1.5.2 // indirect
- github.com/sirupsen/logrus v1.4.2
+ github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v0.0.3
github.com/spf13/pflag v1.0.5 // indirect
- github.com/stretchr/testify v1.7.0
+ github.com/stretchr/testify v1.7.1
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
- golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 // indirect
- golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
- golang.org/x/text v0.3.3
+ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654
+ golang.org/x/text v0.3.7
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009
diff --git a/go.sum b/go.sum
index f6d72443a..d43f840d2 100644
--- a/go.sum
+++ b/go.sum
@@ -1,21 +1,21 @@
github.com/algorand/falcon v0.0.0-20220130164023-c9e1d466f123 h1:cnUjJ/iqUjJNbhUzgmxbfwHMVFnz+DLnNQx8uJcGaks=
github.com/algorand/falcon v0.0.0-20220130164023-c9e1d466f123/go.mod h1:OkQyHlGvS0kLNcIWbC21/uQcnbfwSOQm+wiqWwBG9pQ=
-github.com/algorand/go-codec v1.1.2 h1:QWS9YC3EEWBpJq5AqFPELcCJ2QPpTIg9aqR2K/sRDq4=
-github.com/algorand/go-codec v1.1.2/go.mod h1:A3YI4V24jUUnU1eNekNmx2fLi60FvlNssqOiUsyfNM8=
-github.com/algorand/go-codec/codec v0.0.0-20190507210007-269d70b6135d h1:W9MgGUodEl4Y4+CxeEr+T3fZ26kOcWA4yfqhjbFxxmI=
-github.com/algorand/go-codec/codec v0.0.0-20190507210007-269d70b6135d/go.mod h1:qm6LyXvDa1+uZJxaVg8X+OEjBqt/zDinDa2EohtTDxU=
-github.com/algorand/go-deadlock v0.2.1 h1:TQPQwWAB133bS5uwHpmrgH5hCMyZK5hnUW26aqWMvq4=
-github.com/algorand/go-deadlock v0.2.1/go.mod h1:HgdF2cwtBIBCL7qmUaozuG/UIZFR6PLpSMR58pvWiXE=
+github.com/algorand/go-codec v1.1.8 h1:XDSreeeZY8gMst6Edz4RBkl08/DGMJOeHYkoXL2B7wI=
+github.com/algorand/go-codec v1.1.8/go.mod h1:XhzVs6VVyWMLu6cApb9/192gBjGRVGm5cX5j203Heg4=
+github.com/algorand/go-codec/codec v1.1.8 h1:lsFuhcOH2LiEhpBH3BVUUkdevVmwCRyvb7FCAAPeY6U=
+github.com/algorand/go-codec/codec v1.1.8/go.mod h1:tQ3zAJ6ijTps6V+wp8KsGDnPC2uhHVC7ANyrtkIY0bA=
+github.com/algorand/go-deadlock v0.2.2 h1:L7AKATSUCzoeVuOgpTipfCEjdUu5ECmlje8R7lP9DOY=
+github.com/algorand/go-deadlock v0.2.2/go.mod h1:Hat1OXKqKNUcN/iv74FjGhF4hsOE2l7gOgQ9ZVIq6Fk=
github.com/algorand/go-sumhash v0.1.0 h1:b/QRhyLuF//vOcicBIxBXYW8bERNoeLxieht/dUYpVg=
github.com/algorand/go-sumhash v0.1.0/go.mod h1:OOe7jdDWUhLkuP1XytkK5gnLu9entAviN5DfDZh6XAc=
-github.com/algorand/graphtrace v0.0.0-20201117160756-e524ed1a6f64 h1:yvKeJdS/mvLRiyIxu8j5BQDXIzs1XbC9/22KycJnt3A=
-github.com/algorand/graphtrace v0.0.0-20201117160756-e524ed1a6f64/go.mod h1:qFtQmC+kmsfnLfS9j3xgKtzsWyozemL5ek1R4dWZa5c=
-github.com/algorand/msgp v1.1.49 h1:YBFRcYZNsD2WgzXONvzFrjv1/887pWzJSx874VL4P6g=
-github.com/algorand/msgp v1.1.49/go.mod h1:oyDY2SIeM1bytVYJTL88nt9kVeEBC00Avyqcnyrq/ec=
-github.com/algorand/oapi-codegen v1.3.5-algorand5 h1:y576Ca2/guQddQrQA7dtL5KcOx5xQgPeIupiuFMGyCI=
-github.com/algorand/oapi-codegen v1.3.5-algorand5/go.mod h1:/k0Ywn0lnt92uBMyE+yiRf/Wo3/chxHHsAfenD09EbY=
-github.com/algorand/websocket v1.4.4 h1:BL9atWs/7tkV73NCwiLZ5YqDENMBsSxozc5gDtPdsQ4=
-github.com/algorand/websocket v1.4.4/go.mod h1:0nFSn+xppw/GZS9hgWPS3b8/4FcA3Pj7XQxm+wqHGx8=
+github.com/algorand/graphtrace v0.1.0 h1:QemP1iT0W56SExD0NfiU6rsG34/v0Je6bg5UZnptEUM=
+github.com/algorand/graphtrace v0.1.0/go.mod h1:HscLQrzBdH1BH+5oehs3ICd8SYcXvnSL9BjfTu8WHCc=
+github.com/algorand/msgp v1.1.50 h1:Mvsjs5LCE6HsXXbwJXD8ol1Y+c+QMoFNM4j0CY+mFGo=
+github.com/algorand/msgp v1.1.50/go.mod h1:R5sJrW9krk4YwNo+rs82Kq6V55q/zNgACwWqt3sQBM4=
+github.com/algorand/oapi-codegen v1.3.7 h1:TdXeGljgrnLXSCGPdeY6g6+i/G0Rr5CkjBgUJY6ht48=
+github.com/algorand/oapi-codegen v1.3.7/go.mod h1:UvOtAiP3hc0M2GUKBnZVTjLe3HKGDKh6y9rs3e3JyOg=
+github.com/algorand/websocket v1.4.5 h1:Cs6UTaCReAl02evYxmN8k57cNHmBILRcspfSxYg4AJE=
+github.com/algorand/websocket v1.4.5/go.mod h1:79n6FSZY08yQagHzE/YWZqTPBYfY5wc3IS+UTZe1W5c=
github.com/algorand/xorfilter v0.2.0 h1:YC31ANxdZ2jmtbwqv1+USskVSqjkeiRZcQGc6//ro9Q=
github.com/algorand/xorfilter v0.2.0/go.mod h1:f5cJsYrFbJhXkbjnV4odJB44np05/PvwvdBnABnQoUs=
github.com/aws/aws-sdk-go v1.16.5 h1:NVxzZXIuwX828VcJrpNxxWjur1tlOBISdMdDdHIKHcc=
@@ -24,7 +24,6 @@ github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e h1:CHPYEbz7
github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e/go.mod h1:6Xhs0ZlsRjXLIiSMLKafbZxML/j30pg9Z1priLuha5s=
github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M=
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
-github.com/cyberdelia/templates v0.0.0-20191230040416-20a325f050d4 h1:Fphwr1XDjkTR/KFbrrkLfY6D2CEOlHqFGomQQrxcHFs=
github.com/cyberdelia/templates v0.0.0-20191230040416-20a325f050d4/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -46,7 +45,6 @@ github.com/getkin/kin-openapi v0.22.0 h1:J5IFyKd/5yuB6AZAgwK0CMBKnabWcmkowtsl6bR
github.com/getkin/kin-openapi v0.22.0/go.mod h1:WGRs2ZMM1Q8LR1QBEwUxC6RJEfaBcD0s+pcEVXFuAjw=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-chi/chi v4.1.1+incompatible h1:MmTgB0R8Bt/jccxp+t6S/1VGIKdJw5J74CK/c9tTfA4=
github.com/go-chi/chi v4.1.1+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@@ -54,7 +52,6 @@ github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f h1:zlOR3rOlPAVvtfuxGKo
github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/gofrs/flock v0.7.0 h1:pGFUjl501gafK9HBt1VGL1KCOd/YhIooID+xgyJCf3g=
github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
-github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
@@ -68,8 +65,6 @@ github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
@@ -78,15 +73,12 @@ github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/karalabe/hid v1.0.0 h1:+/CIMNXhSU/zIJgnIvBD2nKHxS/bnRHhhs9xBryLpPo=
-github.com/karalabe/hid v1.0.0/go.mod h1:Vr51f8rUOLYrfrWDFlV12GGQgM5AT8sVh+2fY4MPeu8=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
+github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/labstack/echo/v4 v4.1.16 h1:8swiwjE5Jkai3RPfZoahp8kjVCRNq+y7Q0hPji2Kz0o=
github.com/labstack/echo/v4 v4.1.16/go.mod h1:awO+5TzAjvL8XpibdsfXxPgHr+orhtXZJZIQCVjogKI=
github.com/labstack/echo/v4 v4.1.17 h1:PQIBaRplyRy3OjwILGkPg89JRtH2x5bssi59G2EL3fo=
github.com/labstack/echo/v4 v4.1.17/go.mod h1:Tn2yRQL/UclUalpb5rPdXDevbkJ+lp/2svdyFBg6CHQ=
@@ -96,10 +88,8 @@ github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/matryer/moq v0.0.0-20200310130814-7721994d1b54 h1:p8zN0Xu28xyEkPpqLbFXAnjdgBVvTJCpfOtoDf/+/RQ=
github.com/matryer/moq v0.0.0-20200310130814-7721994d1b54/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -126,31 +116,28 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
-github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
-github.com/valyala/fasttemplate v1.1.0 h1:RZqt0yGBsps8NGvLSGW804QQqCUYYLsaOjTVHy1Ocw4=
github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -160,8 +147,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -169,43 +156,46 @@ golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200423205358-59e73619c742 h1:9OGWpORUXvk8AsaBJlpzzDx7Srv/rSK6rvjcsJq4rJo=
golang.org/x/tools v0.0.0-20200423205358-59e73619c742/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index c6ff9cc84..35f11682d 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -620,11 +620,11 @@ func (au *accountUpdates) newBlock(blk bookkeeping.Block, delta ledgercore.State
au.accountsReadCond.Broadcast()
}
-// Totals returns the totals for a given round
-func (au *accountUpdates) Totals(rnd basics.Round) (totals ledgercore.AccountTotals, err error) {
+// OnlineTotals returns the online totals of all accounts at the end of round rnd.
+func (au *accountUpdates) OnlineTotals(rnd basics.Round) (basics.MicroAlgos, error) {
au.accountsMu.RLock()
defer au.accountsMu.RUnlock()
- return au.totalsImpl(rnd)
+ return au.onlineTotalsImpl(rnd)
}
// LatestTotals returns the totals of all accounts for the most recent round, as well as the round number
@@ -728,15 +728,15 @@ func (aul *accountUpdatesLedgerEvaluator) GetCreatorForRound(rnd basics.Round, c
return aul.au.getCreatorForRound(rnd, cidx, ctype, false /* don't sync */)
}
-// totalsImpl returns the totals for a given round
-func (au *accountUpdates) totalsImpl(rnd basics.Round) (totals ledgercore.AccountTotals, err error) {
+// onlineTotalsImpl returns the online totals of all accounts at the end of round rnd.
+func (au *accountUpdates) onlineTotalsImpl(rnd basics.Round) (basics.MicroAlgos, error) {
offset, err := au.roundOffset(rnd)
if err != nil {
- return
+ return basics.MicroAlgos{}, err
}
- totals = au.roundTotals[offset]
- return
+ totals := au.roundTotals[offset]
+ return totals.Online.Money, nil
}
// latestTotalsImpl returns the totals of all accounts for the most recent round, as well as the round number
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index e2c577ee2..7037adffb 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -274,7 +274,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates
latest := au.latest()
require.Equal(t, latestRnd, latest)
- _, err := au.Totals(latest + 1)
+ _, err := au.OnlineTotals(latest + 1)
require.Error(t, err)
var validThrough basics.Round
@@ -283,7 +283,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates
require.Equal(t, basics.Round(0), validThrough)
if base > 0 {
- _, err := au.Totals(base - 1)
+ _, err := au.OnlineTotals(base - 1)
require.Error(t, err)
_, validThrough, err = au.LookupWithoutRewards(base-1, ledgertesting.RandomAddress())
@@ -338,13 +338,9 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates
bll := accts[rnd]
require.Equal(t, all, bll)
- totals, err := au.Totals(rnd)
+ totals, err := au.OnlineTotals(rnd)
require.NoError(t, err)
- require.Equal(t, totals.Online.Money.Raw, totalOnline)
- require.Equal(t, totals.Offline.Money.Raw, totalOffline)
- require.Equal(t, totals.NotParticipating.Money.Raw, totalNotPart)
- require.Equal(t, totals.Participating().Raw, totalOnline+totalOffline)
- require.Equal(t, totals.All().Raw, totalOnline+totalOffline+totalNotPart)
+ require.Equal(t, totals.Raw, totalOnline)
d, validThrough, err := au.LookupWithoutRewards(rnd, ledgertesting.RandomAddress())
require.NoError(t, err)
@@ -457,7 +453,8 @@ func TestAcctUpdates(t *testing.T) {
var totals map[basics.Address]ledgercore.AccountData
base := accts[i-1]
updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -565,7 +562,8 @@ func TestAcctUpdatesFastUpdates(t *testing.T) {
rewardLevelDelta := crypto.RandUint64() % 5
rewardLevel += rewardLevelDelta
updates, totals := ledgertesting.RandomDeltasBalanced(1, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -653,7 +651,8 @@ func BenchmarkBalancesChanges(b *testing.B) {
}
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(b, i-1, prevRound)
require.NoError(b, err)
newPool := totals[testPoolAddr]
@@ -786,7 +785,8 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) {
rewardLevel += rewardLevelDelta
updates, totals := ledgertesting.RandomDeltasBalanced(1, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -1595,7 +1595,8 @@ func TestAcctUpdatesCachesInitialization(t *testing.T) {
accountChanges := 2
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -1698,7 +1699,8 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) {
accountChanges := 2
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -1734,7 +1736,8 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) {
accountChanges := 2
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -1817,7 +1820,8 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundry(t *testing.T) {
accountChanges := 2
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -1852,7 +1856,8 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundry(t *testing.T) {
accountChanges := 2
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -1889,7 +1894,8 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundry(t *testing.T) {
accountChanges := 2
updates, totals := ledgertesting.RandomDeltasBalanced(accountChanges, accts[i-1], rewardLevel)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -2034,7 +2040,8 @@ func TestAcctUpdatesResources(t *testing.T) {
updates.UpsertAssetResource(addr1, aidx4, creatorParams, ledgercore.AssetHoldingDelta{Holding: &basics.AssetHolding{Amount: 0}})
}
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
base := accts[i-1]
@@ -2224,7 +2231,8 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates,
var totals map[basics.Address]ledgercore.AccountData
base := accts[i-1]
updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
@@ -2440,7 +2448,8 @@ func TestAcctUpdatesLookupLatestCacheRetry(t *testing.T) {
newBlock := func(au *accountUpdates, rnd basics.Round, base map[basics.Address]basics.AccountData, updates ledgercore.AccountDeltas) {
rewardLevel := uint64(0)
- prevTotals, err := au.Totals(basics.Round(rnd - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, rnd-1, prevRound)
require.NoError(t, err)
newTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, rewardLevel, protoParams, base, prevTotals)
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
index 4b7bcb545..eb812937f 100644
--- a/ledger/catchpointtracker_test.go
+++ b/ledger/catchpointtracker_test.go
@@ -445,7 +445,8 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
var totals map[basics.Address]ledgercore.AccountData
base := accts[i-1]
updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
- prevTotals, err := au.Totals(basics.Round(i - 1))
+ prevRound, prevTotals, err := au.LatestTotals()
+ require.Equal(t, i-1, prevRound)
require.NoError(t, err)
newPool := totals[testPoolAddr]
diff --git a/ledger/internal/eval_blackbox_test.go b/ledger/internal/eval_blackbox_test.go
index 33a04a691..887d49c86 100644
--- a/ledger/internal/eval_blackbox_test.go
+++ b/ledger/internal/eval_blackbox_test.go
@@ -422,11 +422,11 @@ ok:
return eval, addrs[0], err
}
-// TestEvalAppStateCountsWithTxnGroup ensures txns in a group can't violate app state schema limits
+// TestEvalAppStateCountsWithTxnGroupBlackbox ensures txns in a group can't violate app state schema limits
// the test ensures that
// commitToParent -> applyChild copies child's cow state usage counts into parent
// and the usage counts correctly propagated from parent cow to child cow and back
-func TestEvalAppStateCountsWithTxnGroup(t *testing.T) {
+func TestEvalAppStateCountsWithTxnGroupBlackbox(t *testing.T) {
partitiontest.PartitionTest(t)
_, _, err := testEvalAppGroup(t, basics.StateSchema{NumByteSlice: 1})
@@ -434,9 +434,9 @@ func TestEvalAppStateCountsWithTxnGroup(t *testing.T) {
require.Contains(t, err.Error(), "store bytes count 2 exceeds schema bytes count 1")
}
-// TestEvalAppAllocStateWithTxnGroup ensures roundCowState.deltas and applyStorageDelta
+// TestEvalAppAllocStateWithTxnGroupBlackbox ensures roundCowState.deltas and applyStorageDelta
// produce correct results when a txn group has storage allocate and storage update actions
-func TestEvalAppAllocStateWithTxnGroup(t *testing.T) {
+func TestEvalAppAllocStateWithTxnGroupBlackbox(t *testing.T) {
partitiontest.PartitionTest(t)
eval, addr, err := testEvalAppGroup(t, basics.StateSchema{NumByteSlice: 2})
diff --git a/ledger/internal/prefetcher/error.go b/ledger/internal/prefetcher/error.go
new file mode 100644
index 000000000..58b52f891
--- /dev/null
+++ b/ledger/internal/prefetcher/error.go
@@ -0,0 +1,43 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package prefetcher
+
+import (
+ "fmt"
+
+ "github.com/algorand/go-algorand/data/basics"
+)
+
+// GroupTaskError indicates the group index of the unfulfilled resource
+type GroupTaskError struct {
+ err error
+ GroupIdx int64
+ Address *basics.Address
+ CreatableIndex basics.CreatableIndex
+ CreatableType basics.CreatableType
+}
+
+// Error satisfies builtin interface `error`
+func (err *GroupTaskError) Error() string {
+ return fmt.Sprintf("prefetch failed for groupIdx %d, address: %s, creatableIndex %d, creatableType %d, cause: %v",
+ err.GroupIdx, err.Address, err.CreatableIndex, err.CreatableType, err.err)
+}
+
+// Unwrap provides access to the underlying error
+func (err *GroupTaskError) Unwrap() error {
+ return err.err
+}
diff --git a/ledger/internal/prefetcher/prefetcher.go b/ledger/internal/prefetcher/prefetcher.go
index 24803ea2d..aa08d850e 100644
--- a/ledger/internal/prefetcher/prefetcher.go
+++ b/ledger/internal/prefetcher/prefetcher.go
@@ -69,7 +69,7 @@ type LoadedTransactionGroup struct {
// Err indicates whether any of the balances in this structure have failed to load. In case of an error, at least
// one of the entries in the balances would be uninitialized.
- Err error
+ Err *GroupTaskError
}
// accountPrefetcher used to prefetch accounts balances and resources before the evaluator is being called.
@@ -146,6 +146,7 @@ type preloaderTaskQueue struct {
type groupTaskDone struct {
groupIdx int64
err error
+ task *preloaderTask
}
func allocPreloaderQueue(count int, maxTxnGroupEntries int) preloaderTaskQueue {
@@ -408,7 +409,13 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
if done.err != nil {
// if there is an error, report the error to the output channel.
p.outChan <- LoadedTransactionGroup{
- Err: done.err,
+ Err: &GroupTaskError{
+ err: done.err,
+ GroupIdx: done.groupIdx,
+ Address: done.task.address,
+ CreatableIndex: done.task.creatableIndex,
+ CreatableType: done.task.creatableType,
+ },
}
return
}
@@ -463,14 +470,18 @@ func (gt *groupTask) markCompletionResource(idx int, res LoadedResourcesEntry, g
}
}
-func (gt *groupTask) markCompletionAcctError(err error, groupDoneCh chan groupTaskDone) {
+func (gt *groupTask) markCompletionAcctError(err error, task *preloaderTask, groupDoneCh chan groupTaskDone) {
for {
curVal := atomic.LoadInt64(&gt.incompleteCount)
if curVal <= 0 {
return
}
if atomic.CompareAndSwapInt64(&gt.incompleteCount, curVal, 0) {
- groupDoneCh <- groupTaskDone{groupIdx: gt.groupTaskIndex, err: err}
+ groupDoneCh <- groupTaskDone{
+ groupIdx: gt.groupTaskIndex,
+ err: err,
+ task: task,
+ }
return
}
}
@@ -558,6 +569,6 @@ func (p *accountPrefetcher) asyncPrefetchRoutine(queue *preloaderTaskQueue, task
// in every case we get here, the task is gurenteed to be a non-nil.
for _, wt := range task.groups {
// notify the channel of the error.
- wt.markCompletionAcctError(err, groupDoneCh)
+ wt.markCompletionAcctError(err, task, groupDoneCh)
}
}
diff --git a/ledger/internal/prefetcher/prefetcher_alignment_test.go b/ledger/internal/prefetcher/prefetcher_alignment_test.go
index 015283508..ba14fbe03 100644
--- a/ledger/internal/prefetcher/prefetcher_alignment_test.go
+++ b/ledger/internal/prefetcher/prefetcher_alignment_test.go
@@ -259,7 +259,7 @@ func prefetch(t *testing.T, l prefetcher.Ledger, txn transactions.Transaction) l
loaded, ok := <-ch
require.True(t, ok)
- require.NoError(t, loaded.Err)
+ require.Nil(t, loaded.Err)
require.Equal(t, group, loaded.TxnGroup)
_, ok = <-ch
diff --git a/ledger/internal/prefetcher/prefetcher_test.go b/ledger/internal/prefetcher/prefetcher_test.go
index 2aa59cf78..87a2e9d63 100644
--- a/ledger/internal/prefetcher/prefetcher_test.go
+++ b/ledger/internal/prefetcher/prefetcher_test.go
@@ -18,6 +18,7 @@ package prefetcher_test
import (
"context"
+ "errors"
"testing"
"github.com/stretchr/testify/require"
@@ -49,19 +50,44 @@ func makeAddress(addressSeed int) (o basics.Address) {
const proto = protocol.ConsensusCurrentVersion
+type lookupError struct{}
+
+func (le lookupError) Error() string {
+ return "lookup error"
+}
+
+type assetLookupError struct{}
+
+func (ale assetLookupError) Error() string {
+ return "asset lookup error"
+}
+
+type getCreatorError struct{}
+
+func (gce getCreatorError) Error() string {
+ return "get creator error"
+}
+
type prefetcherTestLedger struct {
- round basics.Round
- balances map[basics.Address]ledgercore.AccountData
- creators map[basics.CreatableIndex]basics.Address
+ round basics.Round
+ balances map[basics.Address]ledgercore.AccountData
+ creators map[basics.CreatableIndex]basics.Address
+ errorTriggerAddress map[basics.Address]bool
}
+const errorTriggerCreatableIndex = 1000001
+const errorTriggerAssetIndex = 1000002
+
func (l *prefetcherTestLedger) BlockHdr(basics.Round) (bookkeeping.BlockHeader, error) {
return bookkeeping.BlockHeader{}, nil
}
func (l *prefetcherTestLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
return nil
}
-func (l *prefetcherTestLedger) LookupWithoutRewards(_ basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, error) {
+func (l *prefetcherTestLedger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, error) {
+ if _, has := l.errorTriggerAddress[addr]; has {
+ return ledgercore.AccountData{}, l.round, lookupError{}
+ }
if data, has := l.balances[addr]; has {
return data, l.round, nil
}
@@ -71,9 +97,15 @@ func (l *prefetcherTestLedger) LookupApplication(rnd basics.Round, addr basics.A
return ledgercore.AppResource{}, nil
}
func (l *prefetcherTestLedger) LookupAsset(rnd basics.Round, addr basics.Address, aidx basics.AssetIndex) (ledgercore.AssetResource, error) {
+ if aidx == errorTriggerAssetIndex {
+ return ledgercore.AssetResource{}, assetLookupError{}
+ }
return ledgercore.AssetResource{}, nil
}
func (l *prefetcherTestLedger) GetCreatorForRound(_ basics.Round, cidx basics.CreatableIndex, _ basics.CreatableType) (basics.Address, bool, error) {
+ if cidx == errorTriggerCreatableIndex {
+ return basics.Address{}, false, getCreatorError{}
+ }
if addr, has := l.creators[cidx]; has {
return addr, true, nil
}
@@ -151,16 +183,13 @@ func compareLoadedResourcesEntries(t *testing.T, expected []prefetcher.LoadedRes
require.Equal(t, expectedForTest, actualForTest)
}
-func TestEvaluatorPrefetcher(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- rnd := basics.Round(5)
- var feeSinkAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+func getPrefetcherTestLedger(rnd basics.Round) *prefetcherTestLedger {
var ledger = &prefetcherTestLedger{
- round: rnd,
- balances: make(map[basics.Address]ledgercore.AccountData),
- creators: make(map[basics.CreatableIndex]basics.Address),
+ round: rnd,
+ balances: make(map[basics.Address]ledgercore.AccountData),
+ creators: make(map[basics.CreatableIndex]basics.Address),
+ errorTriggerAddress: make(map[basics.Address]bool),
}
ledger.balances[makeAddress(1)] = ledgercore.AccountData{
AccountBaseData: ledgercore.AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 100000000}},
@@ -168,6 +197,16 @@ func TestEvaluatorPrefetcher(t *testing.T) {
ledger.creators[1001] = makeAddress(2)
ledger.creators[2001] = makeAddress(15)
+ return ledger
+}
+
+func TestEvaluatorPrefetcher(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rnd := basics.Round(5)
+ var feeSinkAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+ ledger := getPrefetcherTestLedger(rnd)
type testCase struct {
name string
signedTxn transactions.SignedTxn
@@ -485,7 +524,7 @@ func TestEvaluatorPrefetcher(t *testing.T) {
loadedTxnGroup, ok := <-preloadedTxnGroupsCh
require.True(t, ok)
- require.NoError(t, loadedTxnGroup.Err)
+ require.Nil(t, loadedTxnGroup.Err)
compareLoadedAccountDataEntries(t, testCase.accounts, loadedTxnGroup.Accounts)
compareLoadedResourcesEntries(t, testCase.resources, loadedTxnGroup.Resources)
@@ -495,6 +534,144 @@ func TestEvaluatorPrefetcher(t *testing.T) {
}
}
+// Test for error from LookupAsset
+func TestAssetLookupError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rnd := basics.Round(5)
+ var feeSinkAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+ ledger := getPrefetcherTestLedger(rnd)
+ assetTransferTxn :=
+ transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.AssetTransferTx,
+ Header: transactions.Header{
+ Sender: makeAddress(1),
+ },
+ AssetTransferTxnFields: transactions.AssetTransferTxnFields{
+ XferAsset: 1001,
+ AssetSender: makeAddress(2),
+ AssetReceiver: makeAddress(2),
+ AssetCloseTo: makeAddress(2),
+ },
+ },
+ }
+
+ errorReceived := false
+ groups := make([][]transactions.SignedTxnWithAD, 5)
+ for i := 0; i < 5; i++ {
+ groups[i] = make([]transactions.SignedTxnWithAD, 2)
+ for j := 0; j < 2; j++ {
+ groups[i][j].SignedTxn = assetTransferTxn
+ if i == 2 {
+ // force error in asset lookup in the second txn group only
+ groups[i][j].SignedTxn.Txn.AssetTransferTxnFields.XferAsset = errorTriggerAssetIndex
+ }
+ }
+ }
+ preloadedTxnGroupsCh := prefetcher.PrefetchAccounts(context.Background(), ledger, rnd+100, groups, feeSinkAddr, config.Consensus[proto])
+ for loadedTxnGroup := range preloadedTxnGroupsCh {
+ if loadedTxnGroup.Err != nil {
+ errorReceived = true
+ require.Equal(t, int64(2), loadedTxnGroup.Err.GroupIdx)
+ require.True(t, errors.Is(loadedTxnGroup.Err, assetLookupError{}))
+ require.Equal(t, makeAddress(2), *loadedTxnGroup.Err.Address)
+ require.Equal(t, errorTriggerAssetIndex, int(loadedTxnGroup.Err.CreatableIndex))
+ require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType)
+ }
+ }
+ require.True(t, errorReceived)
+}
+
+// Test for error from GetCreatorForRound
+func TestGetCreatorForRoundError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rnd := basics.Round(5)
+ var feeSinkAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+ ledger := getPrefetcherTestLedger(rnd)
+
+ createAssetTxn :=
+ transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.AssetConfigTx,
+ Header: transactions.Header{
+ Sender: makeAddress(1),
+ },
+ AssetConfigTxnFields: transactions.AssetConfigTxnFields{
+ ConfigAsset: errorTriggerCreatableIndex,
+ },
+ },
+ }
+
+ errorReceived := false
+
+ groups := make([][]transactions.SignedTxnWithAD, 5)
+ for i := 0; i < 5; i++ {
+ groups[i] = make([]transactions.SignedTxnWithAD, 10)
+ for j := 0; j < 10; j++ {
+ groups[i][j].SignedTxn = createAssetTxn
+ }
+ }
+ preloadedTxnGroupsCh := prefetcher.PrefetchAccounts(context.Background(), ledger, rnd+100, groups, feeSinkAddr, config.Consensus[proto])
+
+ for loadedTxnGroup := range preloadedTxnGroupsCh {
+ if loadedTxnGroup.Err != nil {
+ errorReceived = true
+ require.True(t, errors.Is(loadedTxnGroup.Err, getCreatorError{}))
+ require.Nil(t, loadedTxnGroup.Err.Address)
+ require.Equal(t, errorTriggerCreatableIndex, int(loadedTxnGroup.Err.CreatableIndex))
+ require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType)
+ }
+ }
+ require.True(t, errorReceived)
+}
+
+// Test for error from LookupWithoutRewards
+func TestLookupWithoutRewards(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rnd := basics.Round(5)
+ var feeSinkAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+ ledger := getPrefetcherTestLedger(rnd)
+
+ createAssetTxn :=
+ transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.AssetConfigTx,
+ Header: transactions.Header{
+ Sender: makeAddress(1),
+ },
+ AssetConfigTxnFields: transactions.AssetConfigTxnFields{
+ ConfigAsset: 1001,
+ },
+ },
+ }
+
+ errorReceived := false
+
+ groups := make([][]transactions.SignedTxnWithAD, 5)
+ for i := 0; i < 5; i++ {
+ groups[i] = make([]transactions.SignedTxnWithAD, 10)
+ for j := 0; j < 10; j++ {
+ groups[i][j].SignedTxn = createAssetTxn
+ }
+ }
+ ledger.errorTriggerAddress[createAssetTxn.Txn.Sender] = true
+ preloadedTxnGroupsCh := prefetcher.PrefetchAccounts(context.Background(), ledger, rnd+100, groups, feeSinkAddr, config.Consensus[proto])
+
+ for loadedTxnGroup := range preloadedTxnGroupsCh {
+ if loadedTxnGroup.Err != nil {
+ errorReceived = true
+ require.True(t, errors.Is(loadedTxnGroup.Err, lookupError{}))
+ require.Equal(t, makeAddress(1), *loadedTxnGroup.Err.Address)
+ require.Equal(t, 0, int(loadedTxnGroup.Err.CreatableIndex))
+ require.Equal(t, basics.AssetCreatable, loadedTxnGroup.Err.CreatableType)
+ }
+ }
+ require.True(t, errorReceived)
+}
+
func TestEvaluatorPrefetcherQueueExpansion(t *testing.T) {
partitiontest.PartitionTest(t)
diff --git a/ledger/ledger.go b/ledger/ledger.go
index 550cf388d..a48385ff0 100644
--- a/ledger/ledger.go
+++ b/ledger/ledger.go
@@ -560,11 +560,7 @@ func (l *Ledger) LatestTotals() (basics.Round, ledgercore.AccountTotals, error)
func (l *Ledger) OnlineTotals(rnd basics.Round) (basics.MicroAlgos, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- totals, err := l.accts.Totals(rnd)
- if err != nil {
- return basics.MicroAlgos{}, err
- }
- return totals.Online.Money, nil
+ return l.accts.OnlineTotals(rnd)
}
// CheckDup return whether a transaction is a duplicate one.
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index 3b3956449..b48be3676 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -1689,6 +1689,52 @@ func TestLedgerMemoryLeak(t *testing.T) {
}
}
+// TestLookupAgreement ensures LookupAgreement return an empty data for offline accounts
+func TestLookupAgreement(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ var addrOnline, addrOffline basics.Address
+ for addr, ad := range genesisInitState.Accounts {
+ if addrOffline.IsZero() {
+ addrOffline = addr
+ ad.Status = basics.Offline
+ crypto.RandBytes(ad.VoteID[:]) // this is invalid but we set VoteID to ensure the account gets cleared
+ genesisInitState.Accounts[addr] = ad
+ } else if ad.Status == basics.Online {
+ addrOnline = addr
+ crypto.RandBytes(ad.VoteID[:])
+ genesisInitState.Accounts[addr] = ad
+ break
+ }
+ }
+
+ const inMem = true
+ log := logging.TestingLog(t)
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ ledger, err := OpenLedger(log, t.Name(), inMem, genesisInitState, cfg)
+ require.NoError(t, err, "could not open ledger")
+ defer ledger.Close()
+
+ oad, err := ledger.LookupAgreement(0, addrOnline)
+ require.NoError(t, err)
+ require.NotEmpty(t, oad)
+ ad, _, _, err := ledger.LookupLatest(addrOnline)
+ require.NoError(t, err)
+ require.NotEmpty(t, ad)
+ require.Equal(t, oad, ad.OnlineAccountData())
+
+ require.NoError(t, err)
+ oad, err = ledger.LookupAgreement(0, addrOffline)
+ require.NoError(t, err)
+ require.Empty(t, oad)
+ ad, _, _, err = ledger.LookupLatest(addrOffline)
+ require.NoError(t, err)
+ require.NotEmpty(t, ad)
+ require.Equal(t, oad, ad.OnlineAccountData())
+}
+
func BenchmarkLedgerStartup(b *testing.B) {
log := logging.TestingLog(b)
tmpDir, err := ioutil.TempDir(os.TempDir(), "BenchmarkLedgerStartup")
diff --git a/ledger/ledgercore/accountdata.go b/ledger/ledgercore/accountdata.go
index b452b9548..5f141cdbb 100644
--- a/ledger/ledgercore/accountdata.go
+++ b/ledger/ledgercore/accountdata.go
@@ -152,6 +152,11 @@ func (u AccountData) Money(proto config.ConsensusParams, rewardsLevel uint64) (m
// OnlineAccountData calculates the online account data given an AccountData, by adding the rewards.
func (u *AccountData) OnlineAccountData(proto config.ConsensusParams, rewardsLevel uint64) basics.OnlineAccountData {
+ if u.Status != basics.Online {
+ // if the account is not Online and agreement requests it for some reason, clear it out
+ return basics.OnlineAccountData{}
+ }
+
microAlgos, _, _ := basics.WithUpdatedRewards(
proto, u.Status, u.MicroAlgos, u.RewardedMicroAlgos, u.RewardsBase, rewardsLevel,
)
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index a0107ba77..2fa488ec9 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -687,7 +687,7 @@ func (c *Client) RawAccountApplicationInformation(accountAddress string, applica
return
}
-// AccountAssetInformation gets account information about a given app.
+// AccountAssetInformation gets account information about a given asset.
func (c *Client) AccountAssetInformation(accountAddress string, assetID uint64) (resp generatedV2.AccountAssetResponse, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
@@ -696,7 +696,7 @@ func (c *Client) AccountAssetInformation(accountAddress string, assetID uint64)
return
}
-// RawAccountAssetInformation gets account information about a given app.
+// RawAccountAssetInformation gets account information about a given asset.
func (c *Client) RawAccountAssetInformation(accountAddress string, assetID uint64) (accountResource modelV2.AccountAssetModel, err error) {
algod, err := c.ensureAlgodClient()
if err == nil {
diff --git a/logging/telemetry_test.go b/logging/telemetry_test.go
index cd40cc512..a8a913831 100644
--- a/logging/telemetry_test.go
+++ b/logging/telemetry_test.go
@@ -17,6 +17,7 @@
package logging
import (
+ "encoding/json"
"fmt"
"os"
"testing"
@@ -213,6 +214,44 @@ func TestDetails(t *testing.T) {
a.Equal(details, data[0]["details"])
}
+func TestHeartbeatDetails(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ f := makeTelemetryTestFixture(logrus.InfoLevel)
+
+ var hb telemetryspec.HeartbeatEventDetails
+ hb.Info.Version = "v2"
+ hb.Info.VersionNum = "1234"
+ hb.Info.Channel = "alpha"
+ hb.Info.Branch = "br0"
+ hb.Info.CommitHash = "abcd"
+ hb.Metrics = map[string]float64{
+ "Hello": 38.8,
+ }
+ f.telem.logEvent(f.l, telemetryspec.ApplicationState, telemetryspec.HeartbeatEvent, hb)
+
+ data := f.hookData()
+ a.NotNil(data)
+ a.Len(data, 1)
+ a.Equal(hb, data[0]["details"])
+
+ // assert JSON serialization is backwards compatible
+ js, err := json.Marshal(data[0])
+ a.NoError(err)
+ var unjs map[string]interface{}
+ a.NoError(json.Unmarshal(js, &unjs))
+ a.Contains(unjs, "details")
+ ev := unjs["details"].(map[string]interface{})
+ Metrics := ev["Metrics"].(map[string]interface{})
+ m := ev["m"].(map[string]interface{})
+ a.Equal("v2", Metrics["version"].(string))
+ a.Equal("1234", Metrics["version-num"].(string))
+ a.Equal("alpha", Metrics["channel"].(string))
+ a.Equal("br0", Metrics["branch"].(string))
+ a.Equal("abcd", Metrics["commit-hash"].(string))
+ a.InDelta(38.8, m["Hello"].(float64), 0.01)
+}
+
type testMetrics struct {
val string
}
diff --git a/logging/telemetryspec/event.go b/logging/telemetryspec/event.go
index f721fd7bd..3e11d1afd 100644
--- a/logging/telemetryspec/event.go
+++ b/logging/telemetryspec/event.go
@@ -43,7 +43,14 @@ const HeartbeatEvent Event = "Heartbeat"
// HeartbeatEventDetails contains details for the StartupEvent
type HeartbeatEventDetails struct {
- Metrics map[string]string
+ Info struct {
+ Version string `json:"version"`
+ VersionNum string `json:"version-num"`
+ Channel string `json:"channel"`
+ Branch string `json:"branch"`
+ CommitHash string `json:"commit-hash"`
+ } `json:"Metrics"` // backwards compatible name
+ Metrics map[string]float64 `json:"m"`
}
// CatchupStartEvent event
diff --git a/network/dialer.go b/network/dialer.go
index b674d6e53..8d7c18aaa 100644
--- a/network/dialer.go
+++ b/network/dialer.go
@@ -22,6 +22,7 @@ import (
"time"
"github.com/algorand/go-algorand/tools/network/dnssec"
+ "github.com/algorand/go-algorand/util"
)
type netDialer interface {
@@ -79,7 +80,7 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.
select {
case <-ctx.Done():
return nil, ctx.Err()
- case <-time.After(waitTime):
+ case <-util.NanoAfter(waitTime):
}
}
conn, err := d.innerDialContext(ctx, network, address)
diff --git a/network/rateLimitingTransport.go b/network/rateLimitingTransport.go
index 2ec611865..88c6fec6b 100644
--- a/network/rateLimitingTransport.go
+++ b/network/rateLimitingTransport.go
@@ -20,6 +20,8 @@ import (
"errors"
"net/http"
"time"
+
+ "github.com/algorand/go-algorand/util"
)
// rateLimitingTransport is the transport for execute a single HTTP transaction, obtaining the Response for a given Request.
@@ -57,17 +59,18 @@ func makeRateLimitingTransport(phonebook Phonebook, queueingTimeout time.Duratio
func (r *rateLimitingTransport) RoundTrip(req *http.Request) (res *http.Response, err error) {
var waitTime time.Duration
var provisionalTime time.Time
- queueingTimedOut := time.After(r.queueingTimeout)
+ queueingDeadline := time.Now().Add(r.queueingTimeout)
for {
_, waitTime, provisionalTime = r.phonebook.GetConnectionWaitTime(req.Host)
if waitTime == 0 {
break // break out of the loop and proceed to the connection
}
- select {
- case <-time.After(waitTime):
- case <-queueingTimedOut:
- return nil, ErrConnectionQueueingTimeout
+ waitDeadline := time.Now().Add(waitTime)
+ if waitDeadline.Before(queueingDeadline) {
+ util.NanoSleep(waitTime)
+ continue
}
+ return nil, ErrConnectionQueueingTimeout
}
res, err = r.innerTransport.RoundTrip(req)
r.phonebook.UpdateConnectionTime(req.Host, provisionalTime)
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 422424587..be8cd5ee4 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -48,6 +48,7 @@ import (
"github.com/algorand/go-algorand/protocol"
tools_network "github.com/algorand/go-algorand/tools/network"
"github.com/algorand/go-algorand/tools/network/dnssec"
+ "github.com/algorand/go-algorand/util"
"github.com/algorand/go-algorand/util/metrics"
)
@@ -1295,7 +1296,7 @@ func (wn *WebsocketNetwork) broadcastThread() {
}
}
select {
- case <-time.After(sleepDuration):
+ case <-util.NanoAfter(sleepDuration):
if (request != nil) && time.Now().After(requestDeadline) {
// message time have elapsed.
return true
diff --git a/scripts/buildtools/go.mod b/scripts/buildtools/go.mod
index 054b4ca8a..cf29e9304 100644
--- a/scripts/buildtools/go.mod
+++ b/scripts/buildtools/go.mod
@@ -1,10 +1,10 @@
module github.com/algorand/go-algorand/scripts/buildtools
-go 1.14
+go 1.16
require (
- github.com/algorand/msgp v1.1.49
- github.com/algorand/oapi-codegen v1.3.5-algorand5
+ github.com/algorand/msgp v1.1.50
+ github.com/algorand/oapi-codegen v1.3.7
github.com/go-swagger/go-swagger v0.25.0
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/tools v0.1.1 // indirect
diff --git a/scripts/buildtools/go.sum b/scripts/buildtools/go.sum
index f7f042dd7..bf37828f3 100644
--- a/scripts/buildtools/go.sum
+++ b/scripts/buildtools/go.sum
@@ -21,10 +21,10 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/algorand/msgp v1.1.49 h1:YBFRcYZNsD2WgzXONvzFrjv1/887pWzJSx874VL4P6g=
-github.com/algorand/msgp v1.1.49/go.mod h1:oyDY2SIeM1bytVYJTL88nt9kVeEBC00Avyqcnyrq/ec=
-github.com/algorand/oapi-codegen v1.3.5-algorand5 h1:y576Ca2/guQddQrQA7dtL5KcOx5xQgPeIupiuFMGyCI=
-github.com/algorand/oapi-codegen v1.3.5-algorand5/go.mod h1:/k0Ywn0lnt92uBMyE+yiRf/Wo3/chxHHsAfenD09EbY=
+github.com/algorand/msgp v1.1.50 h1:Mvsjs5LCE6HsXXbwJXD8ol1Y+c+QMoFNM4j0CY+mFGo=
+github.com/algorand/msgp v1.1.50/go.mod h1:R5sJrW9krk4YwNo+rs82Kq6V55q/zNgACwWqt3sQBM4=
+github.com/algorand/oapi-codegen v1.3.7 h1:TdXeGljgrnLXSCGPdeY6g6+i/G0Rr5CkjBgUJY6ht48=
+github.com/algorand/oapi-codegen v1.3.7/go.mod h1:UvOtAiP3hc0M2GUKBnZVTjLe3HKGDKh6y9rs3e3JyOg=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
diff --git a/scripts/buildtools/install_buildtools.sh b/scripts/buildtools/install_buildtools.sh
index 630b99310..edd346b38 100755
--- a/scripts/buildtools/install_buildtools.sh
+++ b/scripts/buildtools/install_buildtools.sh
@@ -70,16 +70,14 @@ function install_go_module {
# Check for version to go.mod version
VERSION=$(get_go_version "$1")
- # TODO: When we switch to 1.16 this should be changed to use 'go install'
- # instead of 'go get': https://tip.golang.org/doc/go1.16#modules
if [ -z "$VERSION" ]; then
echo "Unable to install requested package '$1' (${MODULE}): no version listed in ${SCRIPTPATH}/go.mod"
exit 1
else
- OUTPUT=$(GO111MODULE=on go get "${MODULE}@${VERSION}" 2>&1)
+ OUTPUT=$(go install "${MODULE}@${VERSION}" 2>&1)
fi
if [ $? != 0 ]; then
- echo "error: executing \"go get ${MODULE}\" failed : ${OUTPUT}"
+ echo "error: executing \"go install ${MODULE}\" failed : ${OUTPUT}"
exit 1
fi
}
diff --git a/scripts/create_and_deploy_recipe.sh b/scripts/create_and_deploy_recipe.sh
index bca4cc808..3e1121277 100755
--- a/scripts/create_and_deploy_recipe.sh
+++ b/scripts/create_and_deploy_recipe.sh
@@ -20,6 +20,7 @@
# directory as the recipe file path.
set -e
+set -x
if [[ "${AWS_ACCESS_KEY_ID}" = "" || "${AWS_SECRET_ACCESS_KEY}" = "" ]]; then
echo "You need to export your AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY for this to work"
diff --git a/scripts/get_golang_version.sh b/scripts/get_golang_version.sh
index a42b30709..1dd22eda4 100755
--- a/scripts/get_golang_version.sh
+++ b/scripts/get_golang_version.sh
@@ -11,9 +11,9 @@
# Our build task-runner `mule` will refer to this script and will automatically
# build a new image whenever the version number has been changed.
-BUILD=1.14.7
-MIN=1.14
-GO_MOD_SUPPORT=1.12
+BUILD=1.16.15
+ MIN=1.16
+ GO_MOD_SUPPORT=1.16
if [ "$1" = all ]
then
diff --git a/scripts/install_linux_deps.sh b/scripts/install_linux_deps.sh
index 78c42cd35..791cad080 100755
--- a/scripts/install_linux_deps.sh
+++ b/scripts/install_linux_deps.sh
@@ -5,7 +5,7 @@ set -e
DISTRIB=$ID
ARCH_DEPS="boost boost-libs expect jq autoconf shellcheck sqlite python-virtualenv"
-UBUNTU_DEPS="libtool libboost-math-dev expect jq autoconf shellcheck sqlite3 python3-venv"
+UBUNTU_DEPS="libtool libboost-math-dev expect jq autoconf shellcheck sqlite3 python3-venv build-essential"
FEDORA_DEPS="boost-devel expect jq autoconf ShellCheck sqlite python-virtualenv"
if [ "${DISTRIB}" = "arch" ]; then
diff --git a/scripts/travis/before_build.sh b/scripts/travis/before_build.sh
index b07efe33f..3dbb7339d 100755
--- a/scripts/travis/before_build.sh
+++ b/scripts/travis/before_build.sh
@@ -12,7 +12,6 @@ set -e
GOPATH=$(go env GOPATH)
export GOPATH
-export GO111MODULE=on
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
OS=$("${SCRIPTPATH}"/../ostype.sh)
diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go
index b5d4c35f9..7cf8de405 100644
--- a/shared/pingpong/accounts.go
+++ b/shared/pingpong/accounts.go
@@ -36,6 +36,7 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util"
"github.com/algorand/go-algorand/util/db"
)
@@ -138,7 +139,7 @@ func throttleTransactionRate(startTime time.Time, cfg PpConfig, totalSent uint64
if currentTps > float64(cfg.TxnPerSec) {
sleepSec := float64(totalSent)/float64(cfg.TxnPerSec) - localTimeDelta.Seconds()
sleepTime := time.Duration(int64(math.Round(sleepSec*1000))) * time.Millisecond
- time.Sleep(sleepTime)
+ util.NanoSleep(sleepTime)
}
}
diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go
index c26981e61..71d787f79 100644
--- a/shared/pingpong/pingpong.go
+++ b/shared/pingpong/pingpong.go
@@ -36,6 +36,7 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util"
)
// CreatablesInfo has information about created assets, apps and opting in
@@ -882,7 +883,7 @@ func (pps *WorkerState) sendFromTo(
timeCredit -= took
if timeCredit > 0 {
time.Sleep(timeCredit)
- timeCredit = time.Duration(0)
+ timeCredit -= time.Since(now)
} else if timeCredit < -1000*time.Millisecond {
// cap the "time debt" to 1000 ms.
timeCredit = -1000 * time.Millisecond
@@ -1232,7 +1233,7 @@ func (t *throttler) maybeSleep(count int) {
desiredSeconds := float64(countsum) / t.xps
extraSeconds := desiredSeconds - dt.Seconds()
t.iterm += 0.1 * extraSeconds / float64(len(t.times))
- time.Sleep(time.Duration(int64(1000000000.0 * (extraSeconds + t.iterm) / float64(len(t.times)))))
+ util.NanoSleep(time.Duration(1000000000.0 * (extraSeconds + t.iterm) / float64(len(t.times))))
} else {
t.iterm *= 0.95
diff --git a/test/README.md b/test/README.md
index 15d82e07a..1b9f859d1 100644
--- a/test/README.md
+++ b/test/README.md
@@ -51,10 +51,4 @@ To run a specific test, run e2e.sh with -i interactive flag, and follow the inst
test/scripts/e2e.sh -i
```
-Tests in the `e2e_subs/serial` directory are executed serially instead of in parallel. This should only be used when absolutely necessary.
-
-### Updating Indexer E2E test input
-
-Indexer `make e2e` runs tests using the output of go-algorand `e2e_subs` tests as input. The process for making new inputs available via S3 is manual.
-
-Test modifiers _must_ make a best-effort attempt to remember to upload new artifacts to S3 when modifying tests. Here's a step-by-step process overview: [https://github.com/algorand/indexer/blob/develop/misc/README.md](https://github.com/algorand/indexer/blob/develop/misc/README.md). \ No newline at end of file
+Tests in the `e2e_subs/serial` directory are executed serially instead of in parallel. This should only be used when absolutely necessary. \ No newline at end of file
diff --git a/test/e2e-go/cli/goal/account_test.go b/test/e2e-go/cli/goal/account_test.go
index 67165a6d0..b01eb6fda 100644
--- a/test/e2e-go/cli/goal/account_test.go
+++ b/test/e2e-go/cli/goal/account_test.go
@@ -22,12 +22,15 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
)
const statusOffline = "[offline]"
const statusOnline = "[online]"
func TestAccountNew(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
defer fixtures.ShutdownSynchronizedTest(t)
defer fixture.SetTestContext(t)()
a := require.New(fixtures.SynchronizedTest(t))
@@ -54,6 +57,8 @@ func TestAccountNew(t *testing.T) {
}
func TestAccountNewDuplicateFails(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
defer fixtures.ShutdownSynchronizedTest(t)
defer fixture.SetTestContext(t)()
a := require.New(fixtures.SynchronizedTest(t))
@@ -70,6 +75,8 @@ func TestAccountNewDuplicateFails(t *testing.T) {
}
func TestAccountRename(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
defer fixtures.ShutdownSynchronizedTest(t)
defer fixture.SetTestContext(t)()
a := require.New(fixtures.SynchronizedTest(t))
@@ -101,6 +108,8 @@ func TestAccountRename(t *testing.T) {
// Importing an account multiple times should not be considered an error by goal
func TestAccountMultipleImportRootKey(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
defer fixtures.ShutdownSynchronizedTest(t)
defer fixture.SetTestContext(t)()
a := require.New(fixtures.SynchronizedTest(t))
diff --git a/test/e2e-go/cli/goal/clerk_test.go b/test/e2e-go/cli/goal/clerk_test.go
index 948eb1a61..851eb1913 100644
--- a/test/e2e-go/cli/goal/clerk_test.go
+++ b/test/e2e-go/cli/goal/clerk_test.go
@@ -24,9 +24,12 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
)
func TestClerkSendNoteEncoding(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
defer fixtures.ShutdownSynchronizedTest(t)
defer fixture.SetTestContext(t)()
a := require.New(fixtures.SynchronizedTest(t))
diff --git a/test/e2e-go/cli/goal/node_cleanup_test.go b/test/e2e-go/cli/goal/node_cleanup_test.go
index 8365adbe5..7ad3eaab0 100644
--- a/test/e2e-go/cli/goal/node_cleanup_test.go
+++ b/test/e2e-go/cli/goal/node_cleanup_test.go
@@ -23,9 +23,12 @@ import (
"github.com/algorand/go-algorand/nodecontrol"
"github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
)
func TestGoalNodeCleanup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
defer fixtures.ShutdownSynchronizedTest(t)
defer fixture.SetTestContext(t)()
diff --git a/test/e2e-go/features/participation/participationRewards_test.go b/test/e2e-go/features/participation/participationRewards_test.go
index 9b4143d43..a7dd6452e 100644
--- a/test/e2e-go/features/participation/participationRewards_test.go
+++ b/test/e2e-go/features/participation/participationRewards_test.go
@@ -358,7 +358,7 @@ func TestRewardRateRecalculation(t *testing.T) {
r.NoError(fixture.WaitForRoundWithTimeout(rewardRecalcRound - 1))
balanceOfRewardsPool, roundQueried := fixture.GetBalanceAndRound(rewardsAccount)
if roundQueried != rewardRecalcRound-1 {
- r.FailNow("got rewards pool balance on round %d but wanted the balance on round %d, failing out", rewardRecalcRound-1, roundQueried)
+ r.FailNow("", "got rewards pool balance on round %d but wanted the balance on round %d, failing out", rewardRecalcRound-1, roundQueried)
}
lastRoundBeforeRewardRecals, err := client.Block(rewardRecalcRound - 1)
r.NoError(err)
@@ -381,7 +381,7 @@ func TestRewardRateRecalculation(t *testing.T) {
r.NoError(fixture.WaitForRoundWithTimeout(rewardRecalcRound - 1))
balanceOfRewardsPool, roundQueried = fixture.GetBalanceAndRound(rewardsAccount)
if roundQueried != rewardRecalcRound-1 {
- r.FailNow("got rewards pool balance on round %d but wanted the balance on round %d, failing out", rewardRecalcRound-1, roundQueried)
+ r.FailNow("", "got rewards pool balance on round %d but wanted the balance on round %d, failing out", rewardRecalcRound-1, roundQueried)
}
lastRoundBeforeRewardRecals, err = client.Block(rewardRecalcRound - 1)
r.NoError(err)
diff --git a/test/e2e-go/features/transactions/sendReceive_test.go b/test/e2e-go/features/transactions/sendReceive_test.go
index dd7378ef7..742e914c4 100644
--- a/test/e2e-go/features/transactions/sendReceive_test.go
+++ b/test/e2e-go/features/transactions/sendReceive_test.go
@@ -57,6 +57,7 @@ func TestAccountsCanSendMoney(t *testing.T) {
// this test checks that two accounts' balances stay up to date
// as they send each other money many times
func TestDevModeAccountsCanSendMoney(t *testing.T) {
+ partitiontest.PartitionTest(t)
defer fixtures.ShutdownSynchronizedTest(t)
numberOfSends := 25
diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh
index a587bc201..0d4a63d7e 100755
--- a/test/scripts/e2e.sh
+++ b/test/scripts/e2e.sh
@@ -158,7 +158,30 @@ if [ -z "$E2E_TEST_FILTER" ] || [ "$E2E_TEST_FILTER" == "SCRIPTS" ]; then
./timeout 200 ./e2e_basic_start_stop.sh
duration "e2e_basic_start_stop.sh"
- "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} "$SRCROOT"/test/scripts/e2e_subs/*.{sh,py}
+ echo "Current platform: ${E2E_PLATFORM}"
+
+ KEEP_TEMPS_CMD_STR=""
+
+ # If the platform is arm64, we want to pass "--keep-temps" into e2e_client_runner.py
+ # so that we can keep the temporary test artifact for use in the indexer e2e tests.
+ # The file is located at ${TEMPDIR}/net_done.tar.bz2
+ if [ $E2E_PLATFORM == "arm64" ]; then
+ KEEP_TEMPS_CMD_STR="--keep-temps"
+ fi
+
+ "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${KEEP_TEMPS_CMD_STR} ${RUN_KMD_WITH_UNSAFE_SCRYPT} "$SRCROOT"/test/scripts/e2e_subs/*.{sh,py}
+
+ # If the temporary artifact directory exists, then the test artifact needs to be created
+ if [ -d "${TEMPDIR}/net" ]; then
+ pushd "${TEMPDIR}" || exit 1
+ tar -j -c -f net_done.tar.bz2 --exclude node.log --exclude agreement.cdv net
+ rm -rf "${TEMPDIR}/net"
+ RSTAMP=$(TZ=UTC python -c 'import time; print("{:08x}".format(0xffffffff - int(time.time() - time.mktime((2020,1,1,0,0,0,-1,-1,-1)))))')
+ echo aws s3 cp --acl public-read "${TEMPDIR}/net_done.tar.bz2" s3://algorand-testdata/indexer/e2e4/"${RSTAMP}"/net_done.tar.bz2
+ aws s3 cp --acl public-read "${TEMPDIR}/net_done.tar.bz2" s3://algorand-testdata/indexer/e2e4/"${RSTAMP}"/net_done.tar.bz2
+ popd
+ fi
+
duration "parallel client runner"
for vdir in "$SRCROOT"/test/scripts/e2e_subs/v??; do
diff --git a/test/scripts/e2e_go_tests.sh b/test/scripts/e2e_go_tests.sh
index 560808a7c..b1ce3a355 100755
--- a/test/scripts/e2e_go_tests.sh
+++ b/test/scripts/e2e_go_tests.sh
@@ -6,7 +6,6 @@ set -e
set -o pipefail
export GOPATH=$(go env GOPATH)
-export GO111MODULE=on
# Needed for now because circleci doesn't use makefile yet
if [ -z "$(which gotestsum)" ]; then
diff --git a/test/scripts/e2e_subs/assets-app-b.sh b/test/scripts/e2e_subs/assets-app-b.sh
index 63c1431a8..7aac4615d 100755
--- a/test/scripts/e2e_subs/assets-app-b.sh
+++ b/test/scripts/e2e_subs/assets-app-b.sh
@@ -3,7 +3,9 @@
#
# assets-app.sh and assets-app-b.sh both test the same TEAL app script, but in two separate parallelizeable chunks
-date '+assets-app-b start %Y%m%d_%H%M%S'
+filename=$(basename "$0")
+scriptname="${filename%.*}"
+date "+${scriptname} start %Y%m%d_%H%M%S"
set -ex
set -o pipefail
@@ -30,7 +32,6 @@ wait $WA
wait $WB
wait $WC
-ZERO='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ'
SUPPLY=10000000
XFER1=1000
XFER2=42
@@ -44,12 +45,12 @@ ERR_APP_OI_STR1='has not opted in to application'
ERR_APP_OI_STR2='not opted in to app'
ERR_APP_OI_STR3='is not currently opted in'
ERR_APP_REJ_STR1='transaction rejected by ApprovalProgram'
-ERR_APP_REJ_STR2='TEAL runtime encountered err opcode'
+ERR_APP_REJ_STR2='err opcode executed'
ERR_APP_REJ_STR3='- would result negative'
### Reconfiguration, default-frozen, and clawback
-date '+assets-app wat4 %Y%m%d_%H%M%S'
+date "+$scriptname wat4 %Y%m%d_%H%M%S"
# create frozen
APP_ID=$(${gcmd} app interact execute --header ${DIR}/asa.json --from $CREATOR --approval-prog ${DIR}/asa_approve.teal --clear-prog ${DIR}/asa_clear.teal create --manager $MANAGER --reserve $CREATOR --freezer $MANAGER --clawback $MANAGER --supply $SUPPLY --default-frozen 1 | grep "$APP_CREATED_STR" | cut -d ' ' -f 6)
@@ -57,29 +58,28 @@ APP_ID=$(${gcmd} app interact execute --header ${DIR}/asa.json --from $CREATOR -
qcmd="${gcmd} app interact query --header ${DIR}/asa.json --app-id $APP_ID"
xcmd="${gcmd} app interact execute --header ${DIR}/asa.json --app-id $APP_ID"
+function assertContains {
+ if [[ $1 != *"$2"* ]]; then
+ echo "$1" does not contain "$2"
+ date "+$scriptname FAIL $3 %Y%m%d_%H%M%S"
+ false
+ fi
+}
+
# destroy bad manager F
RES=$(${xcmd} --from $CREATOR destroy 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date "+assets-app FAIL non-manager should not be able to delete asset %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "non-manager should not be able to delete asset"
# optin alice
${xcmd} --from $ALICE opt-in
# xfer1 F
RES=$(${xcmd} --from $CREATOR transfer --receiver $ALICE --amount $XFER1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date "+assets-app FAIL frozen account should not be able to receive %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "frozen account should not be able to receive"
# bad unfreeze F
RES=$(${xcmd} --from $ALICE freeze --frozen 0 --target $ALICE 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date "+assets-app FAIL non-freezer should not be able to unfreeze account %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "non-freezer should not be able to unfreeze account"
# set freezer alice
${xcmd} --from $MANAGER reconfigure --manager $MANAGER --reserve $CREATOR --freezer $ALICE --clawback $MANAGER
@@ -95,19 +95,13 @@ ${xcmd} --from $ALICE freeze --frozen 1 --target $ALICE
# xfer1 F
RES=$(${xcmd} --from $CREATOR transfer --receiver $ALICE --amount $XFER1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date "+assets-app FAIL re-frozen account should not be able to receive %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "re-frozen account should not be able to receive"
-date '+assets-app wat6 %Y%m%d_%H%M%S'
+date "+$scriptname wat6 %Y%m%d_%H%M%S"
# closeout F
RES=$(${xcmd} --from $ALICE close-out --close-to $CREATOR 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date "+assets-app FAIL frozen account should not be able to closeout w/o clear %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "frozen account should not be able to closeout w/o clear"
# clear alice
${xcmd} --from $ALICE clear
@@ -120,10 +114,7 @@ ${xcmd} --from $MANAGER clawback --sender $CREATOR --receiver $BOB --amount $XFE
# destroy F
RES=$(${xcmd} --from $MANAGER destroy 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date "+assets-app FAIL should not be able to delete asset while outstanding holdings exist %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "should not be able to delete asset while outstanding holdings exist"
# clawback
${xcmd} --from $MANAGER clawback --sender $BOB --receiver $CREATOR --amount $XFER1
@@ -134,4 +125,4 @@ ${xcmd} --from $MANAGER destroy
# clear bob
${xcmd} --from $BOB clear
-date '+assets-app-b done %Y%m%d_%H%M%S'
+date "+$scriptname done %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/assets-app.sh b/test/scripts/e2e_subs/assets-app.sh
index a295de681..1fa2d8f5c 100755
--- a/test/scripts/e2e_subs/assets-app.sh
+++ b/test/scripts/e2e_subs/assets-app.sh
@@ -3,7 +3,9 @@
#
# assets-app.sh and assets-app-b.sh both test the same TEAL app script, but in two separate parallelizeable chunks
-date '+assets-app start %Y%m%d_%H%M%S'
+filename=$(basename "$0")
+scriptname="${filename%.*}"
+date "+${scriptname} start %Y%m%d_%H%M%S"
set -ex
set -o pipefail
@@ -30,7 +32,6 @@ wait $WA
wait $WB
wait $WC
-ZERO='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ'
SUPPLY=10000000
XFER1=1000
XFER2=42
@@ -44,7 +45,7 @@ ERR_APP_OI_STR1='has not opted in to application'
ERR_APP_OI_STR2='not opted in to app'
ERR_APP_OI_STR3='is not currently opted in'
ERR_APP_REJ_STR1='transaction rejected by ApprovalProgram'
-ERR_APP_REJ_STR2='TEAL runtime encountered err opcode'
+ERR_APP_REJ_STR2='err opcode executed'
ERR_APP_REJ_STR3='- would result negative'
### Basic reading, creation, deletion, transfers, and freezing
@@ -55,27 +56,32 @@ APP_ID=$(${gcmd} app interact execute --header ${DIR}/asa.json --from $CREATOR -
qcmd="${gcmd} app interact query --header ${DIR}/asa.json --app-id $APP_ID"
xcmd="${gcmd} app interact execute --header ${DIR}/asa.json --app-id $APP_ID"
-date '+assets-app created %Y%m%d_%H%M%S'
+date "+$scriptname created %Y%m%d_%H%M%S"
+
+function assertContains {
+ if [[ $1 != *"$2"* ]]; then
+ echo "$1" does not contain "$2"
+ date "+$scriptname FAIL $3 %Y%m%d_%H%M%S"
+ false
+ fi
+}
# read global
RES=$(${qcmd} total-supply)
if [[ $RES != $SUPPLY ]]; then
- date "+assets-app FAIL expected supply to be set to $SUPPLY %Y%m%d_%H%M%S"
+ date "+$scriptname FAIL expected supply to be set to $SUPPLY %Y%m%d_%H%M%S"
false
fi
RES=$(${qcmd} creator-balance)
if [[ $RES != $SUPPLY ]]; then
- date "+assets-app FAIL expected creator to begin with $SUPPLY %Y%m%d_%H%M%S"
+ date "+$scriptname FAIL expected creator to begin with $SUPPLY %Y%m%d_%H%M%S"
false
fi
# read alice F
RES=$(${qcmd} --from $ALICE balance 2>&1 || true)
-if [[ $RES != *"$ERR_APP_OI_STR1"* ]]; then
- date '+assets-app FAIL expected read of non-opted in account to fail %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_OI_STR1" "expected read of non-opted in account to fail"
# optin alice
${xcmd} --from $ALICE opt-in
@@ -83,24 +89,21 @@ ${xcmd} --from $ALICE opt-in
# read alice
RES=$(${qcmd} --from $ALICE balance)
if [[ $RES != '0' ]]; then
- date '+assets-app FAIL expected opted-in account to start with no balance %Y%m%d_%H%M%S'
+ date "+$scriptname FAIL expected opted-in account to start with no balance %Y%m%d_%H%M%S"
false
fi
RES=$(${qcmd} --from $ALICE frozen)
if [[ $RES != '0' ]]; then
- date '+assets-app FAIL expected opted-in account to be non-frozen %Y%m%d_%H%M%S'
+ date "+$scriptname FAIL expected opted-in account to be non-frozen %Y%m%d_%H%M%S"
false
fi
-date '+assets-app wat1 %Y%m%d_%H%M%S'
+date "+$scriptname wat1 %Y%m%d_%H%M%S"
# xfer0 creator -> bob F
RES=$(${xcmd} --from $CREATOR transfer --receiver $BOB --amount $XFER1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_OI_STR2"* ]]; then
- date '+assets-app FAIL transfer succeeded on account which has not opted in %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_OI_STR2" "transfer succeeded on account which has not opted in"
# xfer1 (2) creator -> alice
${xcmd} --from $CREATOR transfer --receiver $ALICE --amount $XFER1 &
@@ -113,35 +116,26 @@ wait $WB
# read alice
RES=$(${qcmd} --from $ALICE balance)
if [[ $RES != $(( $XFER1 + $XFER1 )) ]]; then
- date "+assets-app FAIL transfer recipient does not have $XFER1 %Y%m%d_%H%M%S"
+ date "+$scriptname FAIL transfer recipient does not have $XFER1 %Y%m%d_%H%M%S"
false
fi
# destroy F
RES=$(${xcmd} --from $CREATOR destroy 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date "+assets-app FAIL should not be able to destroy asset while outstanding holdings exist %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "should not be able to destroy asset while outstanding holdings exist"
# freeze
${xcmd} --from $CREATOR freeze --frozen 1 --target $ALICE
# xfer2 alice -> creator F
RES=$(${xcmd} --from $ALICE transfer --receiver $CREATOR --amount $XFER2 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date "+assets-app FAIL frozen account should not be able to send %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "frozen account should not be able to send"
-date '+assets-app wat2 %Y%m%d_%H%M%S'
+date "+$scriptname wat2 %Y%m%d_%H%M%S"
# xfer1 creator -> alice F
RES=$(${xcmd} --from $CREATOR transfer --receiver $ALICE --amount $XFER1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date "+assets-app FAIL frozen account should not be able to receive %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "frozen account should not be able to receive"
# unfreeze
${xcmd} --from $CREATOR freeze --frozen 0 --target $ALICE
@@ -151,30 +145,21 @@ ${xcmd} --from $CREATOR transfer --receiver $ALICE --amount $XFER1
# xfer5 alice |-> alice F
RES=$(${xcmd} --from $ALICE close-out --close-to $ALICE 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date "+assets-app FAIL closing to self not permitted %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "closing to self not permitted"
# optin bob
${xcmd} --from $BOB opt-in
# xfer3 alice -> bob overdraw F
RES=$(${xcmd} --from $ALICE transfer --receiver $BOB --amount $XFER3 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR3"* ]]; then
- date "+assets-app FAIL overdraws are not permitted %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR3" "overdraws are not permitted"
# xfer4 alice -> creator |-> bob
${xcmd} --from $ALICE close-out --receiver $CREATOR --amount $XFER4 --close-to $BOB
# xfer5 bob |-> alice F
RES=$(${xcmd} --from $BOB close-out --close-to $ALICE 2>&1 || true)
-if [[ $RES != *"$ERR_APP_OI_STR2"* ]]; then
- date "+assets-app FAIL transfer succeeded on account which has closed out %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_OI_STR2" "transfer succeeded on account which has closed out"
# optin alice
${xcmd} --from $ALICE opt-in
@@ -187,35 +172,23 @@ ${xcmd} --from $ALICE clear
# clear alice F
RES=$(${xcmd} --from $ALICE clear 2>&1 || true)
-if [[ $RES != *"$ERR_APP_OI_STR3"* ]]; then
- date "+assets-app FAIL should not be able to clear asset holding twice %Y%m%d_%H%M%S"
- false
-fi
+assertContains "$RES" "$ERR_APP_OI_STR3" "should not be able to clear asset holding twice"
# destroy
${xcmd} --from $CREATOR destroy
-date '+assets-app wat3 %Y%m%d_%H%M%S'
+date "+$scriptname wat3 %Y%m%d_%H%M%S"
# destroy F
RES=$(${xcmd} --from $CREATOR destroy 2>&1 || true)
-if [[ $RES != *"$ERR_APP_CL_STR"* ]]; then
- date '+assets-app FAIL second deletion of application should fail %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_CL_STR" "second deletion of application should fail"
# optin alice F
RES=$(${xcmd} --from $ALICE opt-in 2>&1 || true)
-if [[ $RES != *"$ERR_APP_CL_STR"* ]]; then
- date '+assets-app FAIL optin of deleted application should fail %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_CL_STR" "optin of deleted application should fail"
# read global F
RES=$(${qcmd} total-supply 2>&1 || true)
-if [[ $RES != *"$ERR_APP_NE_STR"* ]]; then
- date '+assets-app FAIL read global of deleted application should fail %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_NE_STR" "read global of deleted application should fail"
-date '+assets-app done %Y%m%d_%H%M%S'
+date "+$scriptname done %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/dynamic-fee-teal-test.sh b/test/scripts/e2e_subs/dynamic-fee-teal-test.sh
index 7978de676..b87b8ae1c 100755
--- a/test/scripts/e2e_subs/dynamic-fee-teal-test.sh
+++ b/test/scripts/e2e_subs/dynamic-fee-teal-test.sh
@@ -15,7 +15,6 @@ ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }')
ACCOUNTC=$(${gcmd} account new|awk '{ print $6 }')
ACCOUNTD=$(${gcmd} account new|awk '{ print $6 }')
-ZERO_ADDRESS=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ
LEASE=uImiLf+mqOqs0BFsqIUHBh436N/z964X50e3P9Ii4ac=
# Fund ACCOUNTB
diff --git a/test/scripts/e2e_subs/sectok-app.sh b/test/scripts/e2e_subs/sectok-app.sh
index 2216266ce..46154723f 100755
--- a/test/scripts/e2e_subs/sectok-app.sh
+++ b/test/scripts/e2e_subs/sectok-app.sh
@@ -1,7 +1,9 @@
#!/usr/bin/env bash
# TIMEOUT=380
-date '+sectok-app start %Y%m%d_%H%M%S'
+filename=$(basename "$0")
+scriptname="${filename%.*}"
+date "+${scriptname} start %Y%m%d_%H%M%S"
set -ex
set -o pipefail
@@ -30,7 +32,6 @@ wait $WB
wait $WC
# ${gcmd} clerk send -a 100000000 -f ${CREATOR} -t ${MANAGER}
-ZERO='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ'
SUPPLY=10000000
XFER1=1000
XFER2=42
@@ -45,7 +46,7 @@ ERR_APP_OI_STR1='has not opted in to application'
ERR_APP_OI_STR2='not opted in to app'
ERR_APP_OI_STR3='is not currently opted in'
ERR_APP_REJ_STR1='transaction rejected by ApprovalProgram'
-ERR_APP_REJ_STR2='TEAL runtime encountered err opcode'
+ERR_APP_REJ_STR2='err opcode executed'
ERR_APP_REJ_STR3='- would result negative'
# create
@@ -57,22 +58,27 @@ qcmd="${gcmd} app interact query --header ${DIR}/sectok.json --app-id ${APP_ID}"
# read global
RES=$(${qcmd} total-supply)
if [[ $RES != $SUPPLY ]]; then
- date "+sectok-app FAIL expected supply to be set to $SUPPLY %Y%m%d_%H%M%S"
+ date "+$scriptname FAIL expected supply to be set to $SUPPLY %Y%m%d_%H%M%S"
false
fi
RES=$(${qcmd} reserve-supply)
if [[ $RES != $SUPPLY ]]; then
- date "+sectok-app FAIL expected reserve to begin with $SUPPLY %Y%m%d_%H%M%S"
+ date "+$scriptname FAIL expected reserve to begin with $SUPPLY %Y%m%d_%H%M%S"
false
fi
+function assertContains {
+ if [[ $1 != *"$2"* ]]; then
+ echo "$1" does not contain "'$2'"
+ date "+$scriptname FAIL $3 %Y%m%d_%H%M%S"
+ false
+ fi
+}
+
# read alice F
RES=$(${qcmd} --from $ALICE balance 2>&1 || true)
-if [[ $RES != *"$ERR_APP_OI_STR1"* ]]; then
- date '+sectok-app FAIL expected read of non-opted in account to fail %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_OI_STR1" "expected read of non-opted in account to fail"
# optin alice, bob, carol
${xcmd} --from $ALICE opt-in &
@@ -87,65 +93,41 @@ wait $WC
RES=$(${qcmd} --from $ALICE transfer-group)
if [[ $RES != '0' ]]; then
- date '+sectok-app FAIL expected opt-in account to start with transfer group 0 %Y%m%d_%H%M%S'
+ date "+$scriptname FAIL expected opt-in account to start with transfer group 0 %Y%m%d_%H%M%S"
false
fi
RES=$(${qcmd} --from $ALICE balance)
if [[ $RES != '0' ]]; then
- date '+sectok-app FAIL expected opt-in account to start with 0 balance %Y%m%d_%H%M%S'
+ date "+$scriptname FAIL expected opt-in account to start with 0 balance %Y%m%d_%H%M%S"
false
fi
# assorted transfer-admin restrictions
RES=$(${xcmd} --from $CREATOR set-transfer-group --target $ALICE --transfer-group 1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL contract-admins cannot set transfer groups %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "contract-admins cannot set transfer groups"
RES=$(${xcmd} --from $CREATOR set-lock-until --target $ALICE --lock-until 1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL contract-admins cannot set lock-until %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "contract-admins cannot set lock-until"
RES=$(${xcmd} --from $CREATOR set-max-balance --target $ALICE --max-balance $SUPPLY 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL contract-admins cannot set max balance %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "contract-admins cannot set max balance"
RES=$(${xcmd} --from $ALICE set-transfer-group --target $ALICE --transfer-group 1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL non-admins cannot set transfer groups %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "non-admins cannot set transfer groups"
RES=$(${xcmd} --from $ALICE set-lock-until --target $ALICE --lock-until 1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL non-admins cannot set lock-until %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "non-admins cannot set lock-until"
RES=$(${xcmd} --from $ALICE set-max-balance --target $ALICE --max-balance $SUPPLY 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL non-admins cannot set max balance %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "non-admins cannot set max balance"
# setting transfer-admin
RES=$(${xcmd} --from $ALICE freeze --target $ALICE --frozen 1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL non-admins cannot freeze accounts %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "non-admins cannot freeze accounts"
RES=$(${xcmd} --from $ALICE set-transfer-admin --target $ALICE --status 1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date '+sectok-app FAIL non-admins cannot set transfer admin status %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "non-admins cannot set transfer admin status"
${xcmd} --from $CREATOR set-transfer-admin --target $ALICE --status 1
${xcmd} --from $ALICE freeze --target $ALICE --frozen 1
@@ -153,10 +135,7 @@ ${xcmd} --from $ALICE set-max-balance --target $ALICE --max-balance $SUPPLY
${xcmd} --from $CREATOR set-transfer-admin --target $ALICE --status 0
RES=$(${xcmd} --from $ALICE freeze --target $ALICE --frozen 0 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR1"* ]]; then
- date '+sectok-app FAIL non-admins (revoked) cannot freeze accounts %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR1" "non-admins (revoked) cannot freeze accounts"
# setting contract-admin
${xcmd} --from $CREATOR set-contract-admin --target $BOB --status 1
@@ -164,16 +143,10 @@ ${xcmd} --from $BOB set-transfer-admin --target $ALICE --status 1
${xcmd} --from $CREATOR set-contract-admin --target $BOB --status 0
RES=$(${xcmd} --from $BOB set-transfer-admin --target $ALICE --status 0 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date '+sectok-app FAIL non-admins cannot set transfer admin status %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "non-admins cannot set transfer admin status"
RES=$(${xcmd} --from $BOB set-contract-admin --target $BOB --status 1 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR2"* ]]; then
- date '+sectok-app FAIL non-admins cannot set own contract admin status %Y%m%d_%H%M%S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR2" "non-admins cannot set own contract admin status"
# minting/burning
${xcmd} --from $CREATOR mint --target $ALICE --amount $XFER1 &
@@ -185,13 +158,13 @@ wait $WB
RES=$(${qcmd} --from $ALICE balance)
if [[ $RES != $(( $XFER1 + $XFER1 )) ]]; then
- date '+sectok-app FAIL minting twice did not produce the correct balance %Y%m%d_%H%M% S'
+ date "+$scriptname FAIL minting twice did not produce the correct balance %Y%m%d_%H%M%S"
false
fi
RES=$(${qcmd} reserve-supply)
if [[ $RES != $(( $SUPPLY - $XFER1 - $XFER1 )) ]]; then
- date '+sectok-app FAIL minting twice did not produce the correct reserve balance %Y%m%d_%H%M% S'
+ date "+$scriptname FAIL minting twice did not produce the correct reserve balance %Y%m%d_%H%M%S"
false
fi
@@ -199,7 +172,7 @@ ${xcmd} --from $CREATOR burn --target $ALICE --amount $XFER1
RES=$(${qcmd} --from $ALICE balance)
if [[ $RES != $XFER1 ]]; then
- date '+sectok-app FAIL minting and then burning did not produce the correct balance %Y%m%d_%H%M% S'
+ date "+$scriptname FAIL minting and then burning did not produce the correct balance %Y%m%d_%H%M%S"
false
fi
@@ -209,10 +182,7 @@ ${xcmd} --from $CREATOR burn --target $ALICE --amount $XFER1
${xcmd} --from $CREATOR mint --target $CAROL --amount $XFER1
RES=$(${xcmd} --from $CAROL transfer --receiver $BOB --amount $XFER2 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR3"* ]]; then
- date '+sectok-app FAIL new account should not be able to spend %Y%m%d_%H%M% S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR3" "new account should not be able to spend"
${xcmd} --from $ALICE set-max-balance --target $CAROL --max-balance $SUPPLY &
WA=$!
@@ -236,18 +206,12 @@ wait $WA
wait $WB
RES=$(${xcmd} --from $CAROL transfer --receiver $BOB --amount $XFER2 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR3"* ]]; then
- date '+sectok-app FAIL no transfers allowed without transfer rules %Y%m%d_%H%M% S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR3" "no transfers allowed without transfer rules"
${xcmd} --from $ALICE set-transfer-rule --send-group 1 --receive-group 2 --lock-until 1
${xcmd} --from $CAROL transfer --receiver $BOB --amount $XFER2
RES=$(${xcmd} --from $BOB transfer --receiver $CAROL --amount $XFER2 2>&1 || true)
-if [[ $RES != *"$ERR_APP_REJ_STR3"* ]]; then
- date '+sectok-app FAIL reverse transfer (by group) should fail %Y%m%d_%H%M% S'
- false
-fi
+assertContains "$RES" "$ERR_APP_REJ_STR3" "reverse transfer (by group) should fail"
-date '+sectok-app done %Y%m%d_%H%M%S'
+date "+$scriptname done %Y%m%d_%H%M%S"
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json
index 9e37558fa..8200ee38c 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json
@@ -3,7 +3,7 @@
"VersionModifier": "",
"ConsensusProtocol": "",
"FirstPartKeyRound": 0,
- "LastPartKeyRound": 3000000,
+ "LastPartKeyRound": 50000,
"PartKeyDilution": 0,
"Wallets": [
{
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/Makefile b/test/testdata/deployednettemplates/recipes/txnsync/Makefile
new file mode 100644
index 000000000..f7fd6a43e
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/Makefile
@@ -0,0 +1,12 @@
+PARAMS=-w 10 -R 4 -N 5 -n 10 -H 1 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+
+all: net.json genesis.json
+
+net.json: node.json nonPartNode.json
+ netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS}
+
+genesis.json:
+ netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS}
+
+clean:
+ rm -f net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/gen_topology.py b/test/testdata/deployednettemplates/recipes/txnsync/gen_topology.py
new file mode 100644
index 000000000..6a571983b
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/gen_topology.py
@@ -0,0 +1,30 @@
+node_types = {"R":4, "N":5, "NPN":1}
+node_size = {"R":"-Large", "N":"-Large", "NPN":"-Large"}
+regions = [
+ "AWS-US-EAST-2",
+ "AWS-US-WEST-1",
+ "AWS-EU-WEST-2",
+ "AWS-EU-CENTRAL-1"
+]
+
+f = open("topology.json", "w")
+f.write("{ \"Hosts\":\n [")
+
+region_count = len(regions)
+first = True
+for x in node_types:
+ node_type = x
+ node_count = node_types[x]
+ region_size = node_size[x]
+ for i in range(node_count):
+ node_name = node_type + str(i+1)
+ region = regions[i%region_count]
+ if (first ):
+ first = False
+ else:
+ f.write(",")
+ f.write ("\n {\n \"Name\": \"" + node_name + "\",\n \"Template\": \"" + region + region_size + "\"\n }" )
+
+f.write("\n ]\n}\n")
+f.close()
+
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/genesis.json b/test/testdata/deployednettemplates/recipes/txnsync/genesis.json
new file mode 100644
index 000000000..8f33dee69
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/genesis.json
@@ -0,0 +1,69 @@
+{
+ "NetworkName": "",
+ "VersionModifier": "",
+ "ConsensusProtocol": "",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": 3000000,
+ "PartKeyDilution": 0,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet4",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet5",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet6",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet7",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet8",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet9",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet10",
+ "Stake": 5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet11",
+ "Stake": 50,
+ "Online": false
+ }
+ ],
+ "FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "DevMode": false,
+ "Comment": ""
+}
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/net.json b/test/testdata/deployednettemplates/recipes/txnsync/net.json
new file mode 100644
index 000000000..1be022b14
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/net.json
@@ -0,0 +1,311 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay1",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay2",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay3",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay4",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node1",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node6",
+ "Wallets": [
+ {
+ "Name": "Wallet2",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N2",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node2",
+ "Wallets": [
+ {
+ "Name": "Wallet3",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node7",
+ "Wallets": [
+ {
+ "Name": "Wallet4",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N3",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node3",
+ "Wallets": [
+ {
+ "Name": "Wallet5",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node8",
+ "Wallets": [
+ {
+ "Name": "Wallet6",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N4",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node4",
+ "Wallets": [
+ {
+ "Name": "Wallet7",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node9",
+ "Wallets": [
+ {
+ "Name": "Wallet8",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "N5",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node5",
+ "Wallets": [
+ {
+ "Name": "Wallet9",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ },
+ {
+ "Name": "node10",
+ "Wallets": [
+ {
+ "Name": "Wallet10",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN1",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode1",
+ "Wallets": [
+ {
+ "Name": "Wallet11",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/node.json b/test/testdata/deployednettemplates/recipes/txnsync/node.json
new file mode 100644
index 000000000..b408e2ff7
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/node.json
@@ -0,0 +1,11 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }",
+ "FractionApply": 1.0
+}
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/nonPartNode.json b/test/testdata/deployednettemplates/recipes/txnsync/nonPartNode.json
new file mode 100644
index 000000000..8ab3b8bdd
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/nonPartNode.json
@@ -0,0 +1,5 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+}
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/recipe.json b/test/testdata/deployednettemplates/recipes/txnsync/recipe.json
new file mode 100644
index 000000000..a2f88f63b
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/recipe.json
@@ -0,0 +1,7 @@
+{
+ "GenesisFile":"genesis.json",
+ "NetworkFile":"net.json",
+ "ConfigFile": "../../configs/reference.json",
+ "HostTemplatesFile": "../../hosttemplates/hosttemplates.json",
+ "TopologyFile": "topology.json"
+}
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/relay.json b/test/testdata/deployednettemplates/recipes/txnsync/relay.json
new file mode 100644
index 000000000..db8fb939d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/relay.json
@@ -0,0 +1,11 @@
+{
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/txnsync/topology.json b/test/testdata/deployednettemplates/recipes/txnsync/topology.json
new file mode 100644
index 000000000..f72f27a08
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/txnsync/topology.json
@@ -0,0 +1,44 @@
+{ "Hosts":
+ [
+ {
+ "Name": "R1",
+ "Template": "AWS-US-EAST-2-Large"
+ },
+ {
+ "Name": "R2",
+ "Template": "AWS-US-WEST-1-Large"
+ },
+ {
+ "Name": "R3",
+ "Template": "AWS-EU-WEST-2-Large"
+ },
+ {
+ "Name": "R4",
+ "Template": "AWS-EU-CENTRAL-1-Large"
+ },
+ {
+ "Name": "N1",
+ "Template": "AWS-US-EAST-2-Large"
+ },
+ {
+ "Name": "N2",
+ "Template": "AWS-US-WEST-1-Large"
+ },
+ {
+ "Name": "N3",
+ "Template": "AWS-EU-WEST-2-Large"
+ },
+ {
+ "Name": "N4",
+ "Template": "AWS-EU-CENTRAL-1-Large"
+ },
+ {
+ "Name": "N5",
+ "Template": "AWS-US-EAST-2-Large"
+ },
+ {
+ "Name": "NPN1",
+ "Template": "AWS-US-EAST-2-Large"
+ }
+ ]
+}
diff --git a/util/condvar/timedwait.go b/util/condvar/timedwait.go
index 9ed142f1b..a4605d099 100644
--- a/util/condvar/timedwait.go
+++ b/util/condvar/timedwait.go
@@ -20,6 +20,8 @@ import (
"sync"
"sync/atomic"
"time"
+
+ "github.com/algorand/go-algorand/util"
)
// TimedWait waits for sync.Cond c to be signaled, with a timeout.
@@ -33,7 +35,7 @@ func TimedWait(c *sync.Cond, timeout time.Duration) {
var done int32
go func() {
- <-time.After(timeout)
+ util.NanoSleep(timeout)
for atomic.LoadInt32(&done) == 0 {
c.Broadcast()
@@ -42,7 +44,7 @@ func TimedWait(c *sync.Cond, timeout time.Duration) {
// thread hasn't gotten around to calling c.Wait()
// yet, so the c.Broadcast() did not wake it up.
// Sleep for a second and check again.
- <-time.After(time.Second)
+ time.Sleep(time.Second)
}
}()
diff --git a/util/metrics/counter.go b/util/metrics/counter.go
index 1fb33f67e..73debb3e9 100644
--- a/util/metrics/counter.go
+++ b/util/metrics/counter.go
@@ -186,7 +186,7 @@ func (counter *Counter) WriteMetric(buf *strings.Builder, parentLabels string) {
}
// AddMetric adds the metric into the map
-func (counter *Counter) AddMetric(values map[string]string) {
+func (counter *Counter) AddMetric(values map[string]float64) {
counter.Lock()
defer counter.Unlock()
@@ -199,7 +199,10 @@ func (counter *Counter) AddMetric(values map[string]string) {
if len(l.labels) == 0 {
sum += float64(atomic.LoadUint64(&counter.intValue))
}
-
- values[counter.name] = strconv.FormatFloat(sum, 'f', -1, 32)
+ var suffix string
+ if len(l.formattedLabels) > 0 {
+ suffix = ":" + l.formattedLabels
+ }
+ values[sanitizeTelemetryName(counter.name+suffix)] = sum
}
}
diff --git a/util/metrics/gauge.go b/util/metrics/gauge.go
index f4e27b957..0cf60932d 100644
--- a/util/metrics/gauge.go
+++ b/util/metrics/gauge.go
@@ -174,7 +174,7 @@ func (gauge *Gauge) WriteMetric(buf *strings.Builder, parentLabels string) {
}
// AddMetric adds the metric into the map
-func (gauge *Gauge) AddMetric(values map[string]string) {
+func (gauge *Gauge) AddMetric(values map[string]float64) {
gauge.Lock()
defer gauge.Unlock()
@@ -183,6 +183,10 @@ func (gauge *Gauge) AddMetric(values map[string]string) {
}
for _, l := range gauge.valuesIndices {
- values[gauge.name] = strconv.FormatFloat(l.gauge, 'f', -1, 32)
+ var suffix string
+ if len(l.formattedLabels) > 0 {
+ suffix = ":" + l.formattedLabels
+ }
+ values[sanitizeTelemetryName(gauge.name+suffix)] = l.gauge
}
}
diff --git a/util/metrics/metrics_test.go b/util/metrics/metrics_test.go
index 8d526906e..03369c7a1 100644
--- a/util/metrics/metrics_test.go
+++ b/util/metrics/metrics_test.go
@@ -22,9 +22,11 @@ import (
"net"
"net/http"
"strings"
+ "testing"
"time"
"github.com/algorand/go-deadlock"
+ "github.com/stretchr/testify/require"
)
type MetricTest struct {
@@ -91,3 +93,22 @@ func (p *MetricTest) testMetricsHandler(w http.ResponseWriter, r *http.Request)
}
w.Write([]byte(""))
}
+
+func TestSanitizeTelemetryName(t *testing.T) {
+ for _, tc := range []struct{ in, out string }{
+ {in: "algod_counter_x", out: "algod_counter_x"},
+ {in: "algod_counter_x{a=b}", out: "algod_counter_x_a_b_"},
+ {in: "this_is1-a-name0", out: "this_is1-a-name0"},
+ {in: "myMetricName1:a=yes", out: "myMetricName1_a_yes"},
+ {in: "myMetricName1:a=yes,b=no", out: "myMetricName1_a_yes_b_no"},
+ {in: "0myMetricName1", out: "_myMetricName1"},
+ {in: "myMetricName1{hello=x}", out: "myMetricName1_hello_x_"},
+ {in: "myMetricName1.moreNames-n.3", out: "myMetricName1_moreNames-n_3"},
+ {in: "-my-metric-name", out: "_my-metric-name"},
+ {in: `label-counter:label="a label value"`, out: "label-counter_label__a_label_value_"},
+ } {
+ t.Run(tc.in, func(t *testing.T) {
+ require.Equal(t, tc.out, sanitizeTelemetryName(tc.in))
+ })
+ }
+}
diff --git a/util/metrics/registry.go b/util/metrics/registry.go
index 33b902da3..53ada420a 100644
--- a/util/metrics/registry.go
+++ b/util/metrics/registry.go
@@ -70,7 +70,7 @@ func (r *Registry) WriteMetrics(buf *strings.Builder, parentLabels string) {
}
// AddMetrics will add all the metrics that were registered to this registry
-func (r *Registry) AddMetrics(values map[string]string) {
+func (r *Registry) AddMetrics(values map[string]float64) {
r.metricsMu.Lock()
defer r.metricsMu.Unlock()
for _, m := range r.metrics {
diff --git a/util/metrics/registryCommon.go b/util/metrics/registryCommon.go
index e5046d806..2eb8d6c53 100644
--- a/util/metrics/registryCommon.go
+++ b/util/metrics/registryCommon.go
@@ -17,6 +17,7 @@
package metrics
import (
+ "regexp"
"strings"
"github.com/algorand/go-deadlock"
@@ -25,7 +26,7 @@ import (
// Metric represent any collectable metric
type Metric interface {
WriteMetric(buf *strings.Builder, parentLabels string)
- AddMetric(values map[string]string)
+ AddMetric(values map[string]float64)
}
// Registry represents a single set of metrics registry
@@ -33,3 +34,11 @@ type Registry struct {
metrics []Metric
metricsMu deadlock.Mutex
}
+
+var sanitizeTelemetryCharactersRegexp = regexp.MustCompile("(^[^a-zA-Z_]|[^a-zA-Z0-9_-])")
+
+// sanitizeTelemetryName ensures a metric name reported to telemetry doesn't contain any
+// non-alphanumeric characters (apart from - or _) and doesn't start with a number or a hyphen.
+func sanitizeTelemetryName(name string) string {
+ return sanitizeTelemetryCharactersRegexp.ReplaceAllString(name, "_")
+}
diff --git a/util/metrics/registry_test.go b/util/metrics/registry_test.go
index 3c60f09d8..aa4851630 100644
--- a/util/metrics/registry_test.go
+++ b/util/metrics/registry_test.go
@@ -14,8 +14,6 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-// +build telemetry
-
package metrics
import (
@@ -33,33 +31,32 @@ func TestWriteAdd(t *testing.T) {
counter := MakeCounter(MetricName{Name: "gauge-name", Description: "gauge description"})
counter.Add(12.34, nil)
- results := make(map[string]string)
+ labelCounter := MakeCounter(MetricName{Name: "label-counter", Description: "counter with labels"})
+ labelCounter.Add(5, map[string]string{"label": "a label value"})
+
+ results := make(map[string]float64)
DefaultRegistry().AddMetrics(results)
- require.Equal(t, 1, len(results))
- require.True(t, hasKey(results, "gauge-name"))
- require.Equal(t, "12.34", results["gauge-name"])
+ require.Equal(t, 2, len(results))
+ require.Contains(t, results, "gauge-name")
+ require.InDelta(t, 12.34, results["gauge-name"], 0.01)
+ require.Contains(t, results, "label-counter_label__a_label_value_")
+ require.InDelta(t, 5, results["label-counter_label__a_label_value_"], 0.01)
bufBefore := strings.Builder{}
DefaultRegistry().WriteMetrics(&bufBefore, "label")
require.True(t, bufBefore.Len() > 0)
- // Test that WriteMetrics does not change after adding a StringGauge
- stringGauge := MakeStringGauge()
- stringGauge.Set("string-key", "value")
-
DefaultRegistry().AddMetrics(results)
- require.True(t, hasKey(results, "string-key"))
- require.Equal(t, "value", results["string-key"])
- require.True(t, hasKey(results, "gauge-name"))
- require.Equal(t, "12.34", results["gauge-name"])
+ require.Contains(t, results, "gauge-name")
+ require.InDelta(t, 12.34, results["gauge-name"], 0.01)
// not included in string builder
bufAfter := strings.Builder{}
DefaultRegistry().WriteMetrics(&bufAfter, "label")
require.Equal(t, bufBefore.String(), bufAfter.String())
- stringGauge.Deregister(nil)
counter.Deregister(nil)
+ labelCounter.Deregister(nil)
}
diff --git a/util/metrics/stringGauge.go b/util/metrics/stringGauge.go
deleted file mode 100644
index c398533e4..000000000
--- a/util/metrics/stringGauge.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package metrics
-
-import (
- "strings"
-)
-
-// MakeStringGauge create a new StringGauge.
-func MakeStringGauge() *StringGauge {
- c := &StringGauge{
- values: make(map[string]string),
- }
- c.Register(nil)
- return c
-}
-
-// Register registers the StringGauge with the default/specific registry
-func (stringGauge *StringGauge) Register(reg *Registry) {
- if reg == nil {
- DefaultRegistry().Register(stringGauge)
- } else {
- reg.Register(stringGauge)
- }
-}
-
-// Deregister deregisters the StringGauge with the default/specific registry
-func (stringGauge *StringGauge) Deregister(reg *Registry) {
- if reg == nil {
- DefaultRegistry().Deregister(stringGauge)
- } else {
- reg.Deregister(stringGauge)
- }
-}
-
-// Set updates a key with a value.
-func (stringGauge *StringGauge) Set(key string, value string) {
- stringGauge.values[key] = value
-}
-
-// WriteMetric omit string gauges from the metrics report, not sure how they act with prometheus
-func (stringGauge *StringGauge) WriteMetric(buf *strings.Builder, parentLabels string) {
-}
-
-// AddMetric sets all the key value pairs in the provided map.
-func (stringGauge *StringGauge) AddMetric(values map[string]string) {
- for k, v := range stringGauge.values {
- values[k] = v
- }
-}
diff --git a/util/metrics/stringGauge_test.go b/util/metrics/stringGauge_test.go
deleted file mode 100644
index 97296ba5c..000000000
--- a/util/metrics/stringGauge_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package metrics
-
-import (
- "strings"
- "testing"
-
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
-)
-
-func hasKey(data map[string]string, key string) bool {
- _, ok := data[key]
- return ok
-}
-
-func TestMetricStringGauge(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- stringGauge := MakeStringGauge()
- stringGauge.Set("number-key", "1")
- stringGauge.Set("string-key", "value")
-
- results := make(map[string]string)
- DefaultRegistry().AddMetrics(results)
-
- // values are populated
- require.Equal(t, 2, len(results))
- require.True(t, hasKey(results, "number-key"))
- require.Equal(t, "1", results["number-key"])
- require.True(t, hasKey(results, "string-key"))
- require.Equal(t, "value", results["string-key"])
-
- // not included in string builder
- buf := strings.Builder{}
- DefaultRegistry().WriteMetrics(&buf, "not used")
- require.Equal(t, "", buf.String())
-
- stringGauge.Deregister(nil)
-}
diff --git a/util/metrics/tagcounter.go b/util/metrics/tagcounter.go
index 8dc73ea3b..53cce7ba6 100644
--- a/util/metrics/tagcounter.go
+++ b/util/metrics/tagcounter.go
@@ -129,7 +129,7 @@ func (tc *TagCounter) WriteMetric(buf *strings.Builder, parentLabels string) {
// AddMetric is part of the Metric interface
// Copy the values in this TagCounter out into the string-string map.
-func (tc *TagCounter) AddMetric(values map[string]string) {
+func (tc *TagCounter) AddMetric(values map[string]float64) {
tagp := tc.tagptr.Load()
if tagp == nil {
return
@@ -146,6 +146,6 @@ func (tc *TagCounter) AddMetric(values map[string]string) {
} else {
name = tc.Name + "_" + tag
}
- values[name] = strconv.FormatUint(*tagcount, 10)
+ values[sanitizeTelemetryName(name)] = float64(*tagcount)
}
}
diff --git a/util/metrics/tagcounter_test.go b/util/metrics/tagcounter_test.go
index a2f8a87a0..b76202c53 100644
--- a/util/metrics/tagcounter_test.go
+++ b/util/metrics/tagcounter_test.go
@@ -46,7 +46,7 @@ func TestTagCounter(t *testing.T) {
tc.WriteMetric(&sb, "")
require.Equal(t, "", sb.String())
- result := make(map[string]string)
+ result := make(map[string]float64)
tc.AddMetric(result)
require.Equal(t, 0, len(result))
diff --git a/util/metrics/stringGaugeCommon.go b/util/sleep.go
index 40358b22d..6f8c150ab 100644
--- a/util/metrics/stringGaugeCommon.go
+++ b/util/sleep.go
@@ -14,14 +14,20 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package metrics
+// +build !linux
+
+package util
import (
- "github.com/algorand/go-deadlock"
+ "time"
)
-// StringGauge represents a map of key value pairs available to be written with the AddMetric
-type StringGauge struct {
- deadlock.Mutex
- values map[string]string
+// NanoSleep sleeps for the given d duration.
+func NanoSleep(d time.Duration) {
+ time.Sleep(d)
+}
+
+// NanoAfter waits for the duration to elapse and then sends the current time on the returned channel.
+func NanoAfter(d time.Duration) <-chan time.Time {
+ return time.After(d)
}
diff --git a/util/sleep_linux.go b/util/sleep_linux.go
new file mode 100644
index 000000000..b540303df
--- /dev/null
+++ b/util/sleep_linux.go
@@ -0,0 +1,49 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package util
+
+import (
+ "syscall"
+ "time"
+)
+
+// NanoSleep sleeps for the given d duration.
+func NanoSleep(d time.Duration) {
+ timeSpec := &syscall.Timespec{
+ Nsec: d.Nanoseconds() % time.Second.Nanoseconds(),
+ Sec: d.Nanoseconds() / time.Second.Nanoseconds(),
+ }
+ syscall.Nanosleep(timeSpec, nil) // nolint:errcheck
+}
+
+// NanoAfter waits for the duration to elapse and then sends the current time on the returned channel.
+func NanoAfter(d time.Duration) <-chan time.Time {
+ // The following is a workaround for the go 1.16 bug, where timers are rounded up to the next millisecond resolution.
+ // Go implementation for "time.After" avoids creating the go-routine until it's needed for writing the time
+ // to the channel. This is a pretty impressive implementation compared to the one below, since it's much more
+ // resource-efficient. For that reason, we'll keep calling the efficient implementation when timing is not
+ // critical ( i.e. > 10ms ).
+ if d > 10*time.Millisecond {
+ return time.After(d)
+ }
+ c := make(chan time.Time, 1)
+ go func() {
+ NanoSleep(d)
+ c <- time.Now()
+ }()
+ return c
+}