summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <john.lee@algorand.com>2021-08-13 13:44:55 -0400
committerJohn Lee <john.lee@algorand.com>2021-08-13 13:44:55 -0400
commit0c3ce934ec98d61ec7ac9f0ac8193e1c8d1e3964 (patch)
tree0812c02715b79e5f05febdef43d4c03348c49f13
parentde84e90e84febadb8224591f8d4ad1551044b0b1 (diff)
parent852a8a94741b8e25690b6d44b621fdf88c4a1e27 (diff)
Merge branch 'master' into relbeta2.10.0relbeta2.10.0
-rw-r--r--.circleci/config.yml135
-rw-r--r--.github/workflows/reviewdog.yml90
-rw-r--r--.golangci-warnings.yml76
-rw-r--r--.golangci.yml59
-rw-r--r--.travis.yml4
-rw-r--r--Makefile5
-rw-r--r--catchup/fetcher_test.go2
-rw-r--r--catchup/pref_test.go5
-rw-r--r--cmd/partitiontest_linter/go.mod3
-rw-r--r--cmd/partitiontest_linter/plugin/plugin.go2
-rw-r--r--components/mocks/mockNodeContext.go45
-rw-r--r--config/consensus.go27
-rw-r--r--config/version.go2
-rw-r--r--crypto/batchverifier.go98
-rw-r--r--crypto/batchverifier_test.go126
-rw-r--r--crypto/cryptoerror.go18
-rw-r--r--crypto/multisig.go69
-rw-r--r--crypto/multisig_test.go336
-rw-r--r--daemon/algod/api/Makefile2
-rw-r--r--daemon/algod/api/algod.oas2.json31
-rw-r--r--daemon/algod/api/algod.oas3.yml45
-rw-r--r--daemon/algod/api/client/restClient.go8
-rw-r--r--daemon/algod/api/server/v2/dryrun.go23
-rw-r--r--daemon/algod/api/server/v2/dryrun_test.go112
-rw-r--r--daemon/algod/api/server/v2/generated/private/routes.go261
-rw-r--r--daemon/algod/api/server/v2/generated/private/types.go14
-rw-r--r--daemon/algod/api/server/v2/generated/routes.go269
-rw-r--r--daemon/algod/api/server/v2/generated/types.go14
-rw-r--r--daemon/algod/api/server/v2/handlers.go9
-rw-r--r--daemon/algod/api/server/v2/test/helpers.go2
-rw-r--r--daemon/algod/api/server/v2/utils.go29
-rw-r--r--data/basics/msgp_gen.go521
-rw-r--r--data/basics/msgp_gen_test.go60
-rw-r--r--data/basics/overflow.go32
-rw-r--r--data/basics/teal.go29
-rw-r--r--data/basics/teal_test.go32
-rw-r--r--data/basics/units_test.go22
-rw-r--r--data/bookkeeping/block.go12
-rw-r--r--data/bookkeeping/block_test.go1
-rw-r--r--data/bookkeeping/genesis.go73
-rw-r--r--data/common_test.go5
-rw-r--r--data/datatest/fabricateLedger.go3
-rw-r--r--data/genesisBalances.go41
-rw-r--r--data/ledger.go65
-rw-r--r--data/transactions/application.go42
-rw-r--r--data/transactions/application_test.go27
-rw-r--r--data/transactions/logic/README.md1
-rw-r--r--data/transactions/logic/TEAL_opcodes.md12
-rw-r--r--data/transactions/logic/assembler.go50
-rw-r--r--data/transactions/logic/assembler_test.go37
-rw-r--r--data/transactions/logic/backwardCompat_test.go147
-rw-r--r--data/transactions/logic/doc.go5
-rw-r--r--data/transactions/logic/eval.go48
-rw-r--r--data/transactions/logic/evalStateful_test.go111
-rw-r--r--data/transactions/logic/eval_test.go133
-rw-r--r--data/transactions/logic/fields_test.go152
-rw-r--r--data/transactions/logic/opcodes.go16
-rw-r--r--data/transactions/verify/txn.go102
-rw-r--r--data/txHandler_test.go3
-rw-r--r--data/txntest/txn.go183
-rw-r--r--ledger/acctupdates.go8
-rw-r--r--ledger/appcow.go25
-rw-r--r--ledger/appcow_test.go46
-rw-r--r--ledger/applications.go13
-rw-r--r--ledger/applications_test.go90
-rw-r--r--ledger/apply/application.go33
-rw-r--r--ledger/apply/application_test.go152
-rw-r--r--ledger/apply/asset.go20
-rw-r--r--ledger/assetcow.go12
-rw-r--r--ledger/cow.go10
-rw-r--r--ledger/eval.go19
-rw-r--r--ledger/eval_test.go611
-rw-r--r--ledger/ledgercore/statedelta.go21
-rw-r--r--libgoal/libgoal.go12
-rw-r--r--node/assemble_test.go5
-rw-r--r--node/indexer/indexer_test.go24
-rw-r--r--node/node.go16
-rw-r--r--node/nodeContext.go58
-rw-r--r--node/node_test.go2
-rw-r--r--node/topAccountListener_test.go8
-rw-r--r--protocol/consensus.go7
-rw-r--r--rpcs/blockService_test.go3
-rw-r--r--scripts/buildtools/go.mod1
-rw-r--r--scripts/buildtools/go.sum32
-rwxr-xr-xscripts/buildtools/install_buildtools.sh9
-rwxr-xr-xscripts/travis/build.sh2
-rwxr-xr-xscripts/travis/codegen_verification.sh4
-rwxr-xr-xscripts/travis/run_tests.sh4
-rwxr-xr-xscripts/travis/test.sh2
-rw-r--r--shared/pingpong/accounts.go153
-rw-r--r--shared/pingpong/pingpong.go208
-rw-r--r--test/e2e-go/features/transactions/app_pages_test.go186
-rw-r--r--test/e2e-go/features/transactions/application_test.go130
-rw-r--r--test/e2e-go/restAPI/restClient_test.go131
-rw-r--r--test/e2e-go/upgrades/rekey_support_test.go41
-rw-r--r--test/linttest/lintissues.go (renamed from components/nodeContext.go)29
-rwxr-xr-xtest/scripts/e2e.sh75
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-extra-pages.sh2
-rwxr-xr-xtest/scripts/e2e_subs/teal-app-params.sh11
-rw-r--r--test/scripts/e2e_subs/tealprogs/quine.teal22
100 files changed, 4901 insertions, 1222 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 0d75f568a..c0d1c0181 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -3,6 +3,7 @@ version: 2.1
orbs:
win: circleci/windows@2.3.0
go: circleci/go@1.7.0
+ slack: circleci/slack@4.4.2
workflows:
version: 2
@@ -15,111 +16,156 @@ workflows:
- amd64_build
filters:
branches:
- ignore: "rel/nightly"
+ ignore:
+ - /rel\/.*/
+ - /hotfix\/.*/
- amd64_test_nightly:
requires:
- amd64_build
filters:
branches:
- only: "rel/nightly"
+ only:
+ - /rel\/.*/
+ - /hotfix\/.*/
+ context: slack-secrets
- amd64_integration:
requires:
- amd64_build
filters:
branches:
- ignore: "rel/nightly"
+ ignore:
+ - /rel\/.*/
+ - /hotfix\/.*/
- amd64_integration_nightly:
requires:
- amd64_build
filters:
branches:
- only: "rel/nightly"
+ only:
+ - /rel\/.*/
+ - /hotfix\/.*/
+ context: slack-secrets
- amd64_e2e_subs:
requires:
- amd64_build
filters:
branches:
- ignore: "rel/nightly"
+ ignore:
+ - /rel\/.*/
+ - /hotfix\/.*/
- amd64_e2e_subs_nightly:
requires:
- amd64_build
filters:
branches:
- only: "rel/nightly"
+ only:
+ - /rel\/.*/
+ - /hotfix\/.*/
+ context: slack-secrets
- arm64_build
- arm64_test:
requires:
- arm64_build
filters:
branches:
- ignore: "rel/nightly"
+ ignore:
+ - /rel\/.*/
+ - /hotfix\/.*/
- arm64_test_nightly:
requires:
- arm64_build
filters:
branches:
- only: "rel/nightly"
+ only:
+ - /rel\/.*/
+ - /hotfix\/.*/
+ context: slack-secrets
- arm64_integration:
requires:
- arm64_build
filters:
branches:
- ignore: "rel/nightly"
+ ignore:
+ - /rel\/.*/
+ - /hotfix\/.*/
- arm64_integration_nightly:
requires:
- arm64_build
filters:
branches:
- only: "rel/nightly"
+ only:
+ - /rel\/.*/
+ - /hotfix\/.*/
+ context: slack-secrets
- arm64_e2e_subs:
requires:
- arm64_build
filters:
branches:
- ignore: "rel/nightly"
+ ignore:
+ - /rel\/.*/
+ - /hotfix\/.*/
- arm64_e2e_subs_nightly:
requires:
- arm64_build
filters:
branches:
- only: "rel/nightly"
+ only:
+ - /rel\/.*/
+ - /hotfix\/.*/
+ context: slack-secrets
- mac_amd64_build
- mac_amd64_test:
requires:
- mac_amd64_build
filters:
branches:
- ignore: "rel/nightly"
+ ignore:
+ - /rel\/.*/
+ - /hotfix\/.*/
- mac_amd64_test_nightly:
requires:
- mac_amd64_build
filters:
branches:
- only: "rel/nightly"
+ only:
+ - /rel\/.*/
+ - /hotfix\/.*/
+ context: slack-secrets
- mac_amd64_integration:
requires:
- mac_amd64_build
filters:
branches:
- ignore: "rel/nightly"
+ ignore:
+ - /rel\/.*/
+ - /hotfix\/.*/
- mac_amd64_integration_nightly:
requires:
- mac_amd64_build
filters:
branches:
- only: "rel/nightly"
+ only:
+ - /rel\/.*/
+ - /hotfix\/.*/
+ context: slack-secrets
- mac_amd64_e2e_subs:
requires:
- mac_amd64_build
filters:
branches:
- ignore: "rel/nightly"
+ ignore:
+ - /rel\/.*/
+ - /hotfix\/.*/
- mac_amd64_e2e_subs_nightly:
requires:
- mac_amd64_build
filters:
branches:
- only: "rel/nightly"
+ only:
+ - /rel\/.*/
+ - /hotfix\/.*/
+ context: slack-secrets
#- windows_x64_build
commands:
@@ -367,6 +413,9 @@ jobs:
result_subdir: amd64-nightly
no_output_timeout: 45m
- upload_coverage
+ - slack/notify:
+ event: fail
+ template: basic_fail_1
amd64_integration:
machine:
@@ -374,7 +423,7 @@ jobs:
resource_class: medium
parallelism: 4
environment:
- SKIP_E2E_SUBS: "true"
+ E2E_TEST_FILTER: "GO"
steps:
- prepare_go
- generic_integration:
@@ -387,19 +436,22 @@ jobs:
resource_class: medium
parallelism: 4
environment:
- SKIP_E2E_SUBS: "true"
+ E2E_TEST_FILTER: "GO"
steps:
- prepare_go
- generic_integration:
result_subdir: amd64-integrationnightly
no_output_timeout: 45m
+ - slack/notify:
+ event: fail
+ template: basic_fail_1
amd64_e2e_subs:
machine:
image: ubuntu-2004:202104-01
resource_class: large
environment:
- E2E_SUBS_ONLY: "true"
+ E2E_TEST_FILTER: "SCRIPTS"
steps:
- prepare_go
- generic_integration:
@@ -411,12 +463,15 @@ jobs:
image: ubuntu-2004:202104-01
resource_class: large
environment:
- E2E_SUBS_ONLY: "true"
+ E2E_TEST_FILTER: "SCRIPTS"
steps:
- prepare_go
- generic_integration:
result_subdir: amd64-e2e_subs_nightly
no_output_timeout: 45m
+ - slack/notify:
+ event: fail
+ template: basic_fail_1
arm64_build:
machine:
@@ -452,6 +507,9 @@ jobs:
result_subdir: arm64-nightly
no_output_timeout: 45m
- upload_coverage
+ - slack/notify:
+ event: fail
+ template: basic_fail_1
arm64_integration:
machine:
@@ -459,7 +517,7 @@ jobs:
resource_class: arm.medium
parallelism: 4
environment:
- SKIP_E2E_SUBS: "true"
+ E2E_TEST_FILTER: "GO"
steps:
- checkout
- prepare_go
@@ -473,20 +531,23 @@ jobs:
resource_class: arm.medium
parallelism: 4
environment:
- SKIP_E2E_SUBS: "true"
+ E2E_TEST_FILTER: "GO"
steps:
- checkout
- prepare_go
- generic_integration:
result_subdir: arm64-integration-nightly
no_output_timeout: 45m
+ - slack/notify:
+ event: fail
+ template: basic_fail_1
arm64_e2e_subs:
machine:
image: ubuntu-2004:202101-01
resource_class: arm.large
environment:
- E2E_SUBS_ONLY: "true"
+ E2E_TEST_FILTER: "SCRIPTS"
steps:
- checkout
- prepare_go
@@ -499,13 +560,16 @@ jobs:
image: ubuntu-2004:202101-01
resource_class: arm.large
environment:
- E2E_SUBS_ONLY: "true"
+ E2E_TEST_FILTER: "SCRIPTS"
steps:
- checkout
- prepare_go
- generic_integration:
result_subdir: arm64-e2e_subs-nightly
no_output_timeout: 45m
+ - slack/notify:
+ event: fail
+ template: basic_fail_1
mac_amd64_build:
macos:
@@ -551,6 +615,9 @@ jobs:
circleci_home: /Users/distiller
no_output_timeout: 45m
- upload_coverage
+ - slack/notify:
+ event: fail
+ template: basic_fail_1
mac_amd64_integration:
macos:
@@ -558,7 +625,7 @@ jobs:
resource_class: medium
parallelism: 4
environment:
- SKIP_E2E_SUBS: "true"
+ E2E_TEST_FILTER: "GO"
HOMEBREW_NO_AUTO_UPDATE: "true"
steps:
#- run: git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow
@@ -574,7 +641,7 @@ jobs:
resource_class: medium
parallelism: 4
environment:
- SKIP_E2E_SUBS: "true"
+ E2E_TEST_FILTER: "GO"
HOMEBREW_NO_AUTO_UPDATE: "true"
steps:
#- run: git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow
@@ -583,13 +650,16 @@ jobs:
result_subdir: mac-amd64-integration-nightly
circleci_home: /Users/distiller
no_output_timeout: 45m
+ - slack/notify:
+ event: fail
+ template: basic_fail_1
mac_amd64_e2e_subs:
macos:
xcode: 12.0.1
resource_class: large
environment:
- E2E_SUBS_ONLY: "true"
+ E2E_TEST_FILTER: "SCRIPTS"
HOMEBREW_NO_AUTO_UPDATE: "true"
steps:
#- run: git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow
@@ -604,7 +674,7 @@ jobs:
xcode: 12.0.1
resource_class: large
environment:
- E2E_SUBS_ONLY: "true"
+ E2E_TEST_FILTER: "SCRIPTS"
HOMEBREW_NO_AUTO_UPDATE: "true"
steps:
#- run: git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow
@@ -613,6 +683,9 @@ jobs:
result_subdir: mac-amd64-e2e_subs-nightly
circleci_home: /Users/distiller
no_output_timeout: 45m
+ - slack/notify:
+ event: fail
+ template: basic_fail_1
windows_x64_build:
executor:
@@ -627,7 +700,7 @@ jobs:
#export PATH=$(echo "$PATH" | sed -e 's|:/home/circleci/\.go_workspace/bin||g' | sed -e 's|:/usr/local/go/bin||g')
export GOPATH="/home/circleci/go"
export ALGORAND_DEADLOCK=enable
- export NO_GIMME=True
+ export SKIP_GO_INSTALLATION=True
export PATH=/mingw64/bin:/C/tools/msys64/mingw64/bin:/usr/bin:$PATH
export MAKE=mingw32-make
$msys2 scripts/travis/build_test.sh
diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml
new file mode 100644
index 000000000..30b76064b
--- /dev/null
+++ b/.github/workflows/reviewdog.yml
@@ -0,0 +1,90 @@
+name: "ReviewDog workflow"
+on:
+ pull_request:
+jobs:
+ # Blocking Errors Section
+ reviewdog-errors:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out code into the Go module directory
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 0 # required for new-from-rev option in .golangci.yml
+ - name: Install libraries
+ run: sudo apt-get -y -q install libboost-math-dev
+ # move go out of the way temporarily to avoid "go list ./..." from installing modules
+ - name: Make libsodium.a
+ run: sudo mv /usr/bin/go /usr/bin/go.bak && make crypto/libs/linux/amd64/lib/libsodium.a && sudo mv /usr/bin/go.bak /usr/bin/go
+ - name: reviewdog-golangci-lint
+ uses: reviewdog/action-golangci-lint@v2
+ with:
+ golangci_lint_version: "v1.41.1"
+ golangci_lint_flags: "-c .golangci.yml --allow-parallel-runners"
+ reporter: "github-pr-review"
+ tool_name: "Lint Errors"
+ level: "error"
+ fail_on_error: true
+ # Non-Blocking Warnings Section
+ reviewdog-warnings:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out code into the Go module directory
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 0 # required for new-from-rev option in .golangci.yml
+ - name: Install libraries
+ run: sudo apt-get -y -q install libboost-math-dev
+ # move go out of the way temporarily to avoid "go list ./..." from installing modules
+ - name: Make libsodium.a
+ run: sudo mv /usr/bin/go /usr/bin/go.bak && make crypto/libs/linux/amd64/lib/libsodium.a && sudo mv /usr/bin/go.bak /usr/bin/go
+ - name: Add bin to PATH
+ run: |
+ echo "$GITHUB_WORKSPACE/bin" >> $GITHUB_PATH
+ echo "$RUNNER_WORKSPACE/$(basename $GITHUB_REPOSITORY)/bin" >> $GITHUB_PATH
+ - name: Install specific golang
+ uses: actions/setup-go@v2
+ with:
+ go-version: '1.16.6'
+ - name: Create folders for golangci-lint
+ run: mkdir -p cicdtmp/golangci-lint
+ - name: Check if custom golangci-lint is already built
+ id: cache-golangci-lint
+ uses: actions/cache@v2
+ with:
+ path: cicdtmp/golangci-lint/golangci-lint-cgo
+ key: cicd-golangci-lint-cgo-v0.0.1
+
+ - name: Build custom golangci-lint with CGO_ENABLED
+ if: steps.cache-golangci-lint.outputs.cache-hit != 'true'
+ run: |
+ cd cicdtmp/golangci-lint
+ git clone https://github.com/golangci/golangci-lint.git .
+ git checkout tags/v1.41.1
+ CGO_ENABLED=true go build -trimpath -o golangci-lint-cgo ./cmd/golangci-lint
+ ./golangci-lint-cgo --version
+ cd ../../
+ - name: Install reviewdog
+ run: |
+ curl -sfL https://raw.githubusercontent.com/reviewdog/reviewdog/v0.13.0/install.sh | sh -s
+ reviewdog --version
+ - name: Build custom linters
+ run: |
+ cd cmd/partitiontest_linter/
+ CGO_ENABLED=true go build -buildmode=plugin -trimpath plugin/plugin.go
+ cd ../../
+ ls -la cmd/partitiontest_linter/
+ - name: Run golangci-lint with reviewdog
+ env:
+ REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: >
+ ./cicdtmp/golangci-lint/golangci-lint-cgo run
+ --out-format line-number
+ -c .golangci-warnings.yml
+ --allow-parallel-runners
+ | reviewdog
+ -f=golangci-lint
+ -name="Lint Warnings"
+ -reporter=github-check
+ -filter-mode=added
+ -fail-on-error=false
+ -level=warning
diff --git a/.golangci-warnings.yml b/.golangci-warnings.yml
new file mode 100644
index 000000000..2ee916a65
--- /dev/null
+++ b/.golangci-warnings.yml
@@ -0,0 +1,76 @@
+run:
+ timeout: 5m
+ tests: true
+
+linters:
+ disable-all: true
+ enable:
+ - staticcheck
+ - structcheck
+ - typecheck
+ - varcheck
+ - deadcode
+ - gosimple
+ - unused
+ - partitiontest
+
+
+linters-settings:
+ custom:
+ partitiontest:
+ path: cmd/partitiontest_linter/plugin.so
+ description: This custom linter checks files that end in '_test.go', specifically functions that start with 'Test' and have testing argument, for a line 'partitiontest.ParitionTest(<testing arg>)'
+ original-url: github.com/algorand/go-algorand/cmd/partitiontest_linter
+
+severity:
+ default-severity: warning
+
+issues:
+ # use these new lint checks on code since #2574
+ new-from-rev: eb019291beed556ec6ac1ceb4a15114ce4df0c57
+
+ # Disable default exclude rules listed in `golangci-lint run --help` (selectively re-enable some below)
+ exclude-use-default: false
+
+ # Maximum issues count per one linter. Set to 0 to disable. Default is 50.
+ max-issues-per-linter: 0
+
+ # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
+ max-same-issues: 0
+
+ exclude:
+ # ignore govet false positive fixed in https://github.com/golang/go/issues/45043
+ - "sigchanyzer: misuse of unbuffered os.Signal channel as argument to signal.Notify"
+ # ignore golint false positive fixed in https://github.com/golang/lint/pull/487
+ - "exported method (.*).Unwrap` should have comment or be unexported"
+ # ignore issues about the way we use _struct fields to define encoding settings
+ - "`_struct` is unused"
+
+ # Enable some golangci-lint default exception rules:
+ # "EXC0001 errcheck: Almost all programs ignore errors on these functions and in most cases it's ok"
+ - Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
+ # "EXC0005 staticcheck: Developers tend to write in C-style with an explicit 'break' in a 'switch', so it's ok to ignore"
+ - ineffective break statement. Did you mean to break out of the outer loop
+
+ exclude-rules:
+ # be more lenient with test code
+ - path: _test\.go
+ linters:
+ - staticcheck
+ - structcheck
+ - typecheck
+ - varcheck
+ - deadcode
+ - gosimple
+ - unused
+ # Add all linters here -- Comment this block out for testing linters
+ - path: test/linttest/lintissues\.go
+ linters:
+ - staticcheck
+ - structcheck
+ - typecheck
+ - varcheck
+ - deadcode
+ - gosimple
+ - unused
+ - partitiontest
diff --git a/.golangci.yml b/.golangci.yml
index 2e2fbe1a0..9cf49999f 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,35 +1,25 @@
run:
timeout: 5m
- tests: true
+ tests: false
linters:
+ disable-all: true
enable:
+ - errcheck
+ - gofmt
- golint
- - misspell
- govet
- ineffassign
- - partitiontest
+ - misspell
- disable:
- - deadcode
- - errcheck
- - gosimple
- - staticcheck
- - structcheck
- - unused
- - varcheck
-
-linters-settings:
- custom:
- partitiontest:
- path: cmd/partitiontest_linter/plugin.so
- description: This custom linter checks files that end in '_test.go', specifically functions that start with 'Test' and have testing argument, for a line 'partitiontest.ParitionTest(<testing arg>)'
- original-url: github.com/algorand/go-algorand/cmd/partitiontest_linter
-# govet:
-# check-shadowing: true
+severity:
+ default-severity: error
issues:
- # don't use default exclude rules listed in `golangci-lint run --help`
+ # use these new lint checks on code since #2574
+ new-from-rev: eb019291beed556ec6ac1ceb4a15114ce4df0c57
+
+ # Disable default exclude rules listed in `golangci-lint run --help` (selectively re-enable some below)
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
@@ -38,12 +28,27 @@ issues:
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0
- exclude-rules:
+ exclude:
# ignore govet false positive fixed in https://github.com/golang/go/issues/45043
- - linters:
- - govet
- text: "sigchanyzer: misuse of unbuffered os.Signal channel as argument to signal.Notify"
+ - "sigchanyzer: misuse of unbuffered os.Signal channel as argument to signal.Notify"
# ignore golint false positive fixed in https://github.com/golang/lint/pull/487
- - linters:
+ - "exported method (.*).Unwrap` should have comment or be unexported"
+ # ignore issues about the way we use _struct fields to define encoding settings
+ - "`_struct` is unused"
+
+ # Enable some golangci-lint default exception rules:
+ # "EXC0001 errcheck: Almost all programs ignore errors on these functions and in most cases it's ok"
+ - Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
+ # "EXC0005 staticcheck: Developers tend to write in C-style with an explicit 'break' in a 'switch', so it's ok to ignore"
+ - ineffective break statement. Did you mean to break out of the outer loop
+
+ exclude-rules:
+ # Add all linters here -- Comment this block out for testing linters
+ - path: test/linttest/lintissues\.go
+ linters:
+ - errcheck
+ - gofmt
- golint
- text: "exported method (.*).Unwrap` should have comment or be unexported"
+ - govet
+ - ineffassign
+ - misspell
diff --git a/.travis.yml b/.travis.yml
index 83a9d5717..f36615714 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -7,11 +7,11 @@ if: tag IS blank
stages:
- name: build_commit
- if: NOT (branch =~ /^rel\//) AND type != pull_request
+ if: NOT (branch =~ /^hotfix\//) AND NOT (branch =~ /^rel\//) AND type != pull_request
- name: build_pr
if: type = pull_request
- name: build_release
- if: branch =~ /^rel\// AND type != pull_request
+ if: (branch =~ /^hotfix\// OR branch =~ /^rel\//) AND type != pull_request
- name: deploy
if: branch =~ /^rel\// AND type != pull_request
- name: post_deploy
diff --git a/Makefile b/Makefile
index 5ba6defbd..50613c6da 100644
--- a/Makefile
+++ b/Makefile
@@ -324,6 +324,5 @@ archive:
aws s3 cp tmp/node_pkgs s3://algorand-internal/channel/$(CHANNEL)/$(FULLBUILDNUMBER) --recursive --exclude "*" --include "*$(FULLBUILDNUMBER)*"
build_custom_linters:
- cd cmd/partitiontest_linter/
- go build -buildmode=plugin -trimpath plugin/plugin.go
- cd -
+ cd $(SRCPATH)/cmd/partitiontest_linter/ && go build -buildmode=plugin -trimpath plugin/plugin.go && ls plugin.so
+ cd $(SRCPATH)
diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go
index cd204d2d6..65ace1345 100644
--- a/catchup/fetcher_test.go
+++ b/catchup/fetcher_test.go
@@ -60,7 +60,7 @@ func buildTestLedger(t *testing.T, blk bookkeeping.Block) (ledger *data.Ledger,
}
log := logging.TestingLog(t)
- genBal := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
genHash := crypto.Digest{0x42}
const inMem = true
cfg := config.GetDefaultLocal()
diff --git a/catchup/pref_test.go b/catchup/pref_test.go
index 2bbb32d10..8cf08c6bd 100644
--- a/catchup/pref_test.go
+++ b/catchup/pref_test.go
@@ -29,6 +29,7 @@ import (
"github.com/algorand/go-algorand/data"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/datatest"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
@@ -81,7 +82,7 @@ func BenchmarkServiceFetchBlocks(b *testing.B) {
}
// one service
-func benchenv(t testing.TB, numAccounts, numBlocks int) (ledger, emptyLedger *data.Ledger, release func(), genesisBalances data.GenesisBalances) {
+func benchenv(t testing.TB, numAccounts, numBlocks int) (ledger, emptyLedger *data.Ledger, release func(), genesisBalances bookkeeping.GenesisBalances) {
P := numAccounts // n accounts
maxMoneyAtStart := uint64(10 * defaultRewardUnit) // max money start
minMoneyAtStart := uint64(defaultRewardUnit) // min money start
@@ -143,7 +144,7 @@ func benchenv(t testing.TB, numAccounts, numBlocks int) (ledger, emptyLedger *da
}
var err error
- genesisBalances = data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ genesisBalances = bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
diff --git a/cmd/partitiontest_linter/go.mod b/cmd/partitiontest_linter/go.mod
index d9e2120c6..17a694f6d 100644
--- a/cmd/partitiontest_linter/go.mod
+++ b/cmd/partitiontest_linter/go.mod
@@ -2,4 +2,7 @@ module github.com/algorand/go-algorand/cmd/partitiontest_linter
go 1.16
+require golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
+require golang.org/x/sys v0.0.0-20210510120138-977fb7262007
+require golang.org/x/mod v0.4.2
require golang.org/x/tools v0.1.3
diff --git a/cmd/partitiontest_linter/plugin/plugin.go b/cmd/partitiontest_linter/plugin/plugin.go
index 16b9ee08c..eefcfffca 100644
--- a/cmd/partitiontest_linter/plugin/plugin.go
+++ b/cmd/partitiontest_linter/plugin/plugin.go
@@ -31,7 +31,7 @@ func (*analyzerPlugin) GetAnalyzers() []*analysis.Analyzer {
}
}
-// This must be defined and named 'AnalyzerPlugin'
+// AnalyzerPlugin must be defined and named 'AnalyzerPlugin'
var AnalyzerPlugin analyzerPlugin
func main() {
diff --git a/components/mocks/mockNodeContext.go b/components/mocks/mockNodeContext.go
deleted file mode 100644
index 101239d90..000000000
--- a/components/mocks/mockNodeContext.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package mocks
-
-import "github.com/algorand/go-algorand/data/basics"
-
-// MockNodeContext implements NodeContext for testing
-type MockNodeContext struct {
- CatchingUp bool
- InitialCatchupNotComplete bool
- NotCaughtUp bool
-}
-
-// IsCatchingUp (implements NodeContext) returns true if our sync routine is currently running
-func (ctx *MockNodeContext) IsCatchingUp() bool {
- return ctx.CatchingUp
-}
-
-// IsInitialCatchupComplete (implements NodeContext) returns true if the initial sync has completed (doesn't mean it succeeded)
-func (ctx *MockNodeContext) IsInitialCatchupComplete() bool {
- return !ctx.InitialCatchupNotComplete
-}
-
-// HasCaughtUp (implements NodeContext) returns true if we have completely caught up at least once
-func (ctx *MockNodeContext) HasCaughtUp() bool {
- return ctx.NotCaughtUp
-}
-
-// SetLastLiveRound is called to record observation of a round completion
-func (ctx *MockNodeContext) SetLastLiveRound(round basics.Round) {
-}
diff --git a/config/consensus.go b/config/consensus.go
index 9c7903c0f..7c485c36d 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -108,6 +108,11 @@ type ConsensusParams struct {
// each Txn has a MinFee.
EnableFeePooling bool
+ // EnableAppCostPooling specifies that the sum of fees for application calls
+ // in a group is checked against the sum of the budget for application calls,
+ // rather than check each individual app call is within the budget.
+ EnableAppCostPooling bool
+
// RewardUnit specifies the number of MicroAlgos corresponding to one reward
// unit.
//
@@ -422,6 +427,10 @@ var MaxEvalDeltaAccounts int
// in a StateDelta, used for decoding purposes.
var MaxStateDeltaKeys int
+// MaxLogCalls is the highest allowable log messages that may appear in
+// any version, used for decoding purposes. Never decrease this value.
+const MaxLogCalls = 32
+
// MaxLogicSigMaxSize is the largest logical signature appear in any of the supported
// protocols, used for decoding purposes.
var MaxLogicSigMaxSize int
@@ -971,9 +980,21 @@ func initConsensusProtocols() {
// v27 can be upgraded to v28, with an update delay of 7 days ( see calculation above )
v27.ApprovedUpgrades[protocol.ConsensusV28] = 140000
+ // v29 fixes application update by using ExtraProgramPages in size calculations
+ v29 := v28
+ v29.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+
+ // Enable ExtraProgramPages for application update
+ v29.EnableExtraPagesOnAppUpdate = true
+
+ Consensus[protocol.ConsensusV29] = v29
+
+ // v28 can be upgraded to v29, with an update delay of 3 days ( see calculation above )
+ v28.ApprovedUpgrades[protocol.ConsensusV29] = 60000
+
// ConsensusFuture is used to test features that are implemented
// but not yet released in a production protocol version.
- vFuture := v28
+ vFuture := v29
vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
// FilterTimeout for period 0 should take a new optimized, configured value, need to revisit this later
@@ -989,8 +1010,8 @@ func initConsensusProtocols() {
// Enable TEAL 5 / AVM 1.0
vFuture.LogicSigVersion = 5
- // Enable ExtraProgramPages for application update
- vFuture.EnableExtraPagesOnAppUpdate = true
+ // Enable App calls to pool budget in grouped transactions
+ vFuture.EnableAppCostPooling = true
Consensus[protocol.ConsensusFuture] = vFuture
}
diff --git a/config/version.go b/config/version.go
index b2d9ca833..39a80b6b8 100644
--- a/config/version.go
+++ b/config/version.go
@@ -33,7 +33,7 @@ const VersionMajor = 2
// VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced.
// Not enforced until after initial public release (x > 0).
-const VersionMinor = 9
+const VersionMinor = 10
// Version is the type holding our full version information.
type Version struct {
diff --git a/crypto/batchverifier.go b/crypto/batchverifier.go
new file mode 100644
index 000000000..71df901d7
--- /dev/null
+++ b/crypto/batchverifier.go
@@ -0,0 +1,98 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package crypto
+
+import "errors"
+
+// BatchVerifier enqueues signatures to be validated in batch.
+type BatchVerifier struct {
+ messages []Hashable // contains a slice of messages to be hashed. Each message is varible length
+ publicKeys []SignatureVerifier // contains a slice of public keys. Each individual public key is 32 bytes.
+ signatures []Signature // contains a slice of signatures keys. Each individual signature is 64 bytes.
+}
+
+const minBatchVerifierAlloc = 16
+
+// Batch verifications errors
+var (
+ ErrBatchVerificationFailed = errors.New("At least on signature didn't pass verification")
+ ErrZeroTranscationsInBatch = errors.New("Could not validate empty signature set")
+)
+
+// MakeBatchVerifierDefaultSize create a BatchVerifier instance. This function pre-allocates
+// amount of free space to enqueue signatures without exapneding
+func MakeBatchVerifierDefaultSize() *BatchVerifier {
+ return MakeBatchVerifier(minBatchVerifierAlloc)
+}
+
+// MakeBatchVerifier create a BatchVerifier instance. This function pre-allocates
+// a given space so it will not expaned the storage
+func MakeBatchVerifier(hint int) *BatchVerifier {
+ // preallocate enough storage for the expected usage. We will reallocate as needed.
+ if hint < minBatchVerifierAlloc {
+ hint = minBatchVerifierAlloc
+ }
+ return &BatchVerifier{
+ messages: make([]Hashable, 0, hint),
+ publicKeys: make([]SignatureVerifier, 0, hint),
+ signatures: make([]Signature, 0, hint),
+ }
+}
+
+// EnqueueSignature enqueues a signature to be enqueued
+func (b *BatchVerifier) EnqueueSignature(sigVerifier SignatureVerifier, message Hashable, sig Signature) {
+ // do we need to reallocate ?
+ if len(b.messages) == cap(b.messages) {
+ b.expand()
+ }
+ b.messages = append(b.messages, message)
+ b.publicKeys = append(b.publicKeys, sigVerifier)
+ b.signatures = append(b.signatures, sig)
+}
+
+func (b *BatchVerifier) expand() {
+ messages := make([]Hashable, len(b.messages), len(b.messages)*2)
+ publicKeys := make([]SignatureVerifier, len(b.publicKeys), len(b.publicKeys)*2)
+ signatures := make([]Signature, len(b.signatures), len(b.signatures)*2)
+ copy(messages, b.messages)
+ copy(publicKeys, b.publicKeys)
+ copy(signatures, b.signatures)
+ b.messages = messages
+ b.publicKeys = publicKeys
+ b.signatures = signatures
+}
+
+// GetNumberOfEnqueuedSignatures returns the number of signatures current enqueue onto the bacth verifier object
+func (b *BatchVerifier) GetNumberOfEnqueuedSignatures() int {
+ return len(b.messages)
+}
+
+// Verify verifies that all the signatures are valid. in that case nil is returned
+// if the batch is zero an appropriate error is return.
+func (b *BatchVerifier) Verify() error {
+ if b.GetNumberOfEnqueuedSignatures() == 0 {
+ return ErrZeroTranscationsInBatch
+ }
+
+ for i := range b.messages {
+ verifier := SignatureVerifier(b.publicKeys[i])
+ if !verifier.Verify(b.messages[i], b.signatures[i]) {
+ return ErrBatchVerificationFailed
+ }
+ }
+ return nil
+}
diff --git a/crypto/batchverifier_test.go b/crypto/batchverifier_test.go
new file mode 100644
index 000000000..18e2e22e4
--- /dev/null
+++ b/crypto/batchverifier_test.go
@@ -0,0 +1,126 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package crypto
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestBatchVerifierSingle(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ // test expected success
+ bv := MakeBatchVerifier(1)
+ msg := randString()
+ var s Seed
+ RandBytes(s[:])
+ sigSecrets := GenerateSignatureSecrets(s)
+ sig := sigSecrets.Sign(msg)
+ bv.EnqueueSignature(sigSecrets.SignatureVerifier, msg, sig)
+ require.NoError(t, bv.Verify())
+
+ // test expected failuire
+ bv = MakeBatchVerifier(1)
+ msg = randString()
+ RandBytes(s[:])
+ sigSecrets = GenerateSignatureSecrets(s)
+ sig = sigSecrets.Sign(msg)
+ // break the signature:
+ sig[0] = sig[0] + 1
+ bv.EnqueueSignature(sigSecrets.SignatureVerifier, msg, sig)
+ require.Error(t, bv.Verify())
+}
+
+func TestBatchVerifierBulk(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ for i := 1; i < 64*2+3; i++ {
+ n := i
+ bv := MakeBatchVerifier(n)
+ var s Seed
+
+ for i := 0; i < n; i++ {
+ msg := randString()
+ RandBytes(s[:])
+ sigSecrets := GenerateSignatureSecrets(s)
+ sig := sigSecrets.Sign(msg)
+ bv.EnqueueSignature(sigSecrets.SignatureVerifier, msg, sig)
+ }
+ require.Equal(t, n, bv.GetNumberOfEnqueuedSignatures())
+ require.NoError(t, bv.Verify())
+ }
+
+}
+
+func TestBatchVerifierBulkWithExpand(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ n := 64
+ bv := MakeBatchVerifier(1)
+ var s Seed
+ RandBytes(s[:])
+
+ for i := 0; i < n; i++ {
+ msg := randString()
+ sigSecrets := GenerateSignatureSecrets(s)
+ sig := sigSecrets.Sign(msg)
+ bv.EnqueueSignature(sigSecrets.SignatureVerifier, msg, sig)
+ }
+ require.NoError(t, bv.Verify())
+}
+
+func TestBatchVerifierWithInvalidSiganture(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ n := 64
+ bv := MakeBatchVerifier(1)
+ var s Seed
+ RandBytes(s[:])
+
+ for i := 0; i < n-1; i++ {
+ msg := randString()
+ sigSecrets := GenerateSignatureSecrets(s)
+ sig := sigSecrets.Sign(msg)
+ bv.EnqueueSignature(sigSecrets.SignatureVerifier, msg, sig)
+ }
+
+ msg := randString()
+ sigSecrets := GenerateSignatureSecrets(s)
+ sig := sigSecrets.Sign(msg)
+ sig[0] = sig[0] + 1
+ bv.EnqueueSignature(sigSecrets.SignatureVerifier, msg, sig)
+
+ require.Error(t, bv.Verify())
+}
+
+func BenchmarkBatchVerifier(b *testing.B) {
+ c := makeCurve25519Secret()
+ bv := MakeBatchVerifier(1)
+ for i := 0; i < b.N; i++ {
+ str := randString()
+ bv.EnqueueSignature(c.SignatureVerifier, str, c.Sign(str))
+ }
+
+ b.ResetTimer()
+ require.NoError(b, bv.Verify())
+}
+
+func TestEmpty(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ bv := MakeBatchVerifierDefaultSize()
+ require.Error(t, bv.Verify())
+}
diff --git a/crypto/cryptoerror.go b/crypto/cryptoerror.go
index 606750c45..fb8e45698 100644
--- a/crypto/cryptoerror.go
+++ b/crypto/cryptoerror.go
@@ -18,13 +18,15 @@ package crypto
import "errors"
-const errorinvalidversion = "Invalid version"
-const errorinvalidaddress = "Invalid address"
-const errorinvalidthreshold = "Invalid threshold"
-const errorinvalidnumberofsignature = "Invalid number of signatures"
-const errorkeynotexist = "Key does not exist"
-const errorsubsigverification = "Verification failure: subsignature"
-const errorkeysnotmatch = "Public key lists do not match"
-const errorinvalidduplicates = "Invalid duplicates"
+var (
+ errInvalidVersion = errors.New("Invalid version")
+ errInvalidAddress = errors.New("Invalid address")
+ errInvalidThreshold = errors.New("Invalid threshold")
+ errInvalidNumberOfSignature = errors.New("Invalid number of signatures")
+ errKeyNotExist = errors.New("Key does not exist")
+ errKeysNotMatch = errors.New("Public key lists do not match")
+ errInvalidDuplicates = errors.New("Invalid duplicates")
+ errInvalidNumberOfSig = errors.New("invalid number of signatures to add")
+)
var errUnknownVersion = errors.New("unknown version")
diff --git a/crypto/multisig.go b/crypto/multisig.go
index 6dcb2589a..6c9a1ca66 100644
--- a/crypto/multisig.go
+++ b/crypto/multisig.go
@@ -88,7 +88,7 @@ func MultisigAddrGen(version, threshold uint8, pk []PublicKey) (addr Digest, err
}
if threshold == 0 || len(pk) == 0 || int(threshold) > len(pk) {
- err = errors.New(errorinvalidthreshold)
+ err = errInvalidThreshold
return
}
@@ -110,7 +110,7 @@ func MultisigAddrGenWithSubsigs(version uint8, threshold uint8,
}
if threshold == 0 || len(subsigs) == 0 || int(threshold) > len(subsigs) {
- err = errors.New(errorinvalidthreshold)
+ err = errInvalidThreshold
return
}
@@ -135,7 +135,7 @@ func MultisigSign(msg Hashable, addr Digest, version, threshold uint8, pk []Publ
}
if addr != addrnew {
- err = errors.New(errorinvalidaddress)
+ err = errInvalidAddress
return
}
@@ -152,7 +152,7 @@ func MultisigSign(msg Hashable, addr Digest, version, threshold uint8, pk []Publ
}
}
if keyexist == len(pk) {
- err = errors.New(errorkeynotexist)
+ err = errKeyNotExist
return
}
@@ -177,20 +177,20 @@ func MultisigAssemble(unisig []MultisigSig) (msig MultisigSig, err error) {
// check if all unisig match
for i := 1; i < len(unisig); i++ {
if unisig[0].Threshold != unisig[i].Threshold {
- err = errors.New(errorinvalidthreshold)
+ err = errInvalidThreshold
return
}
if unisig[0].Version != unisig[i].Version {
- err = errors.New(errorinvalidversion)
+ err = errInvalidVersion
return
}
if len(unisig[0].Subsigs) != len(unisig[i].Subsigs) {
- err = errors.New(errorinvalidnumberofsignature)
+ err = errInvalidNumberOfSignature
return
}
for j := 0; j < len(unisig[0].Subsigs); j++ {
if unisig[0].Subsigs[j].Key != unisig[i].Subsigs[j].Key {
- err = errors.New(errorkeysnotmatch)
+ err = errKeysNotMatch
return
}
}
@@ -217,11 +217,31 @@ func MultisigAssemble(unisig []MultisigSig) (msig MultisigSig, err error) {
// MultisigVerify verifies an assembled MultisigSig
func MultisigVerify(msg Hashable, addr Digest, sig MultisigSig) (verified bool, err error) {
+ batchVerifier := MakeBatchVerifierDefaultSize()
+ if verified, err = MultisigBatchVerify(msg, addr, sig, batchVerifier); err != nil {
+ return
+ }
+ if !verified {
+ return
+ }
+ if batchVerifier.GetNumberOfEnqueuedSignatures() == 0 {
+ return true, nil
+ }
+ if err = batchVerifier.Verify(); err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+// MultisigBatchVerify verifies an assembled MultisigSig.
+// it is the caller responsibility to call batchVerifier.verify()
+func MultisigBatchVerify(msg Hashable, addr Digest, sig MultisigSig, batchVerifier *BatchVerifier) (verified bool, err error) {
verified = false
// short circuit: if msig doesn't have subsigs or if Subsigs are empty
// then terminate (the upper layer should now verify the unisig)
if (len(sig.Subsigs) == 0 || sig.Subsigs[0] == MultisigSubsig{}) {
+ err = errInvalidNumberOfSignature
return
}
@@ -231,19 +251,19 @@ func MultisigVerify(msg Hashable, addr Digest, sig MultisigSig) (verified bool,
return
}
if addr != addrnew {
- err = errors.New(errorinvalidaddress)
+ err = errInvalidAddress
return
}
// check that we don't have too many multisig subsigs
if len(sig.Subsigs) > maxMultisig {
- err = errors.New(errorinvalidnumberofsignature)
+ err = errInvalidNumberOfSignature
return
}
// check that we don't have too few multisig subsigs
if len(sig.Subsigs) < int(sig.Threshold) {
- err = errors.New(errorinvalidnumberofsignature)
+ err = errInvalidNumberOfSignature
return
}
@@ -255,7 +275,7 @@ func MultisigVerify(msg Hashable, addr Digest, sig MultisigSig) (verified bool,
}
}
if counter < sig.Threshold {
- err = errors.New(errorinvalidnumberofsignature)
+ err = errInvalidNumberOfSignature
return
}
@@ -263,10 +283,7 @@ func MultisigVerify(msg Hashable, addr Digest, sig MultisigSig) (verified bool,
var verifiedCount int
for _, subsigi := range sig.Subsigs {
if (subsigi.Sig != Signature{}) {
- if !subsigi.Key.Verify(msg, subsigi.Sig) {
- err = errors.New(errorsubsigverification)
- return
- }
+ batchVerifier.EnqueueSignature(subsigi.Key, msg, subsigi.Sig)
verifiedCount++
}
}
@@ -274,7 +291,7 @@ func MultisigVerify(msg Hashable, addr Digest, sig MultisigSig) (verified bool,
// sanity check. if we get here then every non-blank subsig should have
// been verified successfully, and we should have had enough of them
if verifiedCount < int(sig.Threshold) {
- err = errors.New(errorinvalidnumberofsignature)
+ err = errInvalidNumberOfSignature
return
}
@@ -285,29 +302,29 @@ func MultisigVerify(msg Hashable, addr Digest, sig MultisigSig) (verified bool,
// MultisigAdd adds unisig to an existing msig
func MultisigAdd(unisig []MultisigSig, msig *MultisigSig) (err error) {
if len(unisig) < 1 || msig == nil {
- err = errors.New("invalid number of signatures to add")
+ err = errInvalidNumberOfSig
return
}
// check if all unisig match
for i := 0; i < len(unisig); i++ {
if msig.Threshold != unisig[i].Threshold {
- err = errors.New(errorinvalidthreshold)
+ err = errInvalidThreshold
return
}
if msig.Version != unisig[i].Version {
- err = errors.New(errorinvalidversion)
+ err = errInvalidVersion
return
}
if len(msig.Subsigs) != len(unisig[i].Subsigs) {
- err = errors.New(errorkeysnotmatch)
+ err = errKeysNotMatch
return
}
for j := 0; j < len(unisig[0].Subsigs); j++ {
if msig.Subsigs[j].Key != unisig[i].Subsigs[j].Key {
- err = errors.New(errorkeysnotmatch)
+ err = errKeysNotMatch
return
}
}
@@ -321,7 +338,7 @@ func MultisigAdd(unisig []MultisigSig, msig *MultisigSig) (err error) {
msig.Subsigs[j].Sig = unisig[i].Subsigs[j].Sig
} else if msig.Subsigs[j].Sig != unisig[i].Subsigs[j].Sig {
// invalid duplicates
- err = errors.New(errorinvalidduplicates)
+ err = errInvalidDuplicates
return
} else {
// valid duplicates
@@ -339,12 +356,12 @@ func MultisigMerge(msig1 MultisigSig, msig2 MultisigSig) (msigt MultisigSig, err
if msig1.Threshold != msig2.Threshold ||
msig1.Version != msig2.Version ||
len(msig1.Subsigs) != len(msig2.Subsigs) {
- err = errors.New(errorinvalidthreshold)
+ err = errInvalidThreshold
return
}
for i := 0; i < len(msig1.Subsigs); i++ {
if msig1.Subsigs[i].Key != msig2.Subsigs[i].Key {
- err = errors.New(errorkeysnotmatch)
+ err = errKeysNotMatch
return
}
}
@@ -365,7 +382,7 @@ func MultisigMerge(msig1 MultisigSig, msig2 MultisigSig) (msigt MultisigSig, err
msigt.Subsigs[i].Sig = msig1.Subsigs[i].Sig
} else {
// invalid duplicates
- err = errors.New(errorinvalidduplicates)
+ err = errInvalidDuplicates
msigt = MultisigSig{}
return
}
diff --git a/crypto/multisig_test.go b/crypto/multisig_test.go
index 9073a8782..8636331bf 100644
--- a/crypto/multisig_test.go
+++ b/crypto/multisig_test.go
@@ -21,6 +21,8 @@ import (
"testing"
"github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
)
func MultisigSigPrint(sig MultisigSig) {
@@ -39,49 +41,51 @@ func MultisigSigPrint(sig MultisigSig) {
// detect invalid threshold and versions
//
func TestMultisigAddr(t *testing.T) {
+ partitiontest.PartitionTest(t)
var s Seed
- var userkeypair []*SecretKey
- var pk []PublicKey
+ var secrets []*SecretKey
+ var pks []PublicKey
var err error
version := uint8(1)
threshold := uint8(3)
- userkeypair = make([]*SecretKey, 4)
+ secrets = make([]*SecretKey, 4)
for i := 0; i < 4; i++ {
RandBytes(s[:])
- userkeypair[i] = GenerateSignatureSecrets(s)
+ secrets[i] = GenerateSignatureSecrets(s)
}
- pk = make([]PublicKey, 2)
- pk[0] = userkeypair[0].SignatureVerifier
- pk[1] = userkeypair[1].SignatureVerifier
+ pks = make([]PublicKey, 2)
+ pks[0] = secrets[0].SignatureVerifier
+ pks[1] = secrets[1].SignatureVerifier
// test if invalid threshold can be detected
// #keys= 2 < threshold = 3
- _, err = MultisigAddrGen(version, threshold, pk)
- require.Error(t, err, "MultisigAddr: unable to detect invalid threshold (keys == %d, threshold == %d)", len(pk), threshold)
+ _, err = MultisigAddrGen(version, threshold, pks)
+ require.Error(t, err, "MultisigAddr: unable to detect invalid threshold (keys == %d, threshold == %d)", len(pks), threshold)
// #keys = 3 == threshold = 3
- pk = append(pk, userkeypair[2].SignatureVerifier)
- _, err = MultisigAddrGen(version, threshold, pk)
- require.NoError(t, err, "MultisigAddr: unexpected failure generating message digest with %d keys and a threshold of %d", len(pk), threshold)
+ pks = append(pks, secrets[2].SignatureVerifier)
+ _, err = MultisigAddrGen(version, threshold, pks)
+ require.NoError(t, err, "MultisigAddr: unexpected failure generating message digest with %d keys and a threshold of %d", len(pks), threshold)
// #keys = 4 > threshold = 3
- pk = append(pk, userkeypair[3].SignatureVerifier)
- _, err = MultisigAddrGen(version, threshold, pk)
- require.NoError(t, err, "MultisigAddr: unexpected failure generating message digest with %d keys and a threshold of %d", len(pk), threshold)
+ pks = append(pks, secrets[3].SignatureVerifier)
+ _, err = MultisigAddrGen(version, threshold, pks)
+ require.NoError(t, err, "MultisigAddr: unexpected failure generating message digest with %d keys and a threshold of %d", len(pks), threshold)
}
// this test generates a set of 4 public keys for a threshold of 3
// signs with 3 keys to get 3 signatures
// assembles 3 signatures, verify the msig
func TestMultisig(t *testing.T) {
+ partitiontest.PartitionTest(t)
var msig MultisigSig
var sigs []MultisigSig
var s Seed
- var userkeypair []*SecretKey
- var pk []PublicKey
+ var secrets []*SecretKey
+ var pks []PublicKey
var err error
var addr Digest
@@ -90,33 +94,33 @@ func TestMultisig(t *testing.T) {
threshold := uint8(3)
txid := TestingHashable{[]byte("test: txid 1000")}
- userkeypair = make([]*SecretKey, 5)
+ secrets = make([]*SecretKey, 5)
for i := 0; i < 5; i++ {
RandBytes(s[:])
- userkeypair[i] = GenerateSignatureSecrets(s)
+ secrets[i] = GenerateSignatureSecrets(s)
}
// addr = hash (... |pk0|pk1|pk2|pk3), pk4 is not included
- pk = make([]PublicKey, 4)
- pk[0] = userkeypair[0].SignatureVerifier
- pk[1] = userkeypair[1].SignatureVerifier
- pk[2] = userkeypair[2].SignatureVerifier
- pk[3] = userkeypair[3].SignatureVerifier
- addr, err = MultisigAddrGen(version, threshold, pk)
+ pks = make([]PublicKey, 4)
+ pks[0] = secrets[0].SignatureVerifier
+ pks[1] = secrets[1].SignatureVerifier
+ pks[2] = secrets[2].SignatureVerifier
+ pks[3] = secrets[3].SignatureVerifier
+ addr, err = MultisigAddrGen(version, threshold, pks)
require.NoError(t, err, "Multisig: unexpected failure generating message digest")
// now testing signing functions
// check if invalid version can be detected
- _, err = MultisigSign(txid, addr, version+1, threshold, pk, *userkeypair[0])
+ _, err = MultisigSign(txid, addr, version+1, threshold, pks, *secrets[0])
require.Error(t, err, "should be able to detect invalid version number")
// check if invalid secret key can be detected
- _, err = MultisigSign(txid, addr, version, threshold, pk, *userkeypair[4])
+ _, err = MultisigSign(txid, addr, version, threshold, pks, *secrets[4])
require.Error(t, err, "should be able to detect invalid secret key used")
// test assembling
// test1: assemble a single signature -- should return failure
sigs = make([]MultisigSig, 1)
- sigs[0], err = MultisigSign(txid, addr, version, threshold, pk, *userkeypair[3])
+ sigs[0], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[3])
require.NoError(t, err, "Multisig: unexpected failure in multisig signing")
_, err = MultisigAssemble(sigs)
require.Error(t, err, "should be able to detect insufficient signatures for assembling")
@@ -124,17 +128,25 @@ func TestMultisig(t *testing.T) {
// test2: assemble 3 signatures
// signing three signatures with pk0, pk1 and pk2
sigs = make([]MultisigSig, 3)
- sigs[0], err = MultisigSign(txid, addr, version, threshold, pk, *userkeypair[0])
+ sigs[0], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[0])
require.NoError(t, err, "Multisig: unexpected failure in generating sig from pk 0")
- sigs[1], err = MultisigSign(txid, addr, version, threshold, pk, *userkeypair[1])
+ sigs[1], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[1])
require.NoError(t, err, "Multisig: unexpected failure in generating sig from pk 1")
- sigs[2], err = MultisigSign(txid, addr, version, threshold, pk, *userkeypair[2])
+ sigs[2], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[2])
require.NoError(t, err, "Multisig: unexpected failure in generating sig from pk 2")
msig, err = MultisigAssemble(sigs)
require.NoError(t, err, "Multisig: unexpected failure when assembling multisig")
verify, err := MultisigVerify(txid, addr, msig)
+ require.NoError(t, err, "Multisig: unexpected verification failure with err")
require.True(t, verify, "Multisig: verification failed, verify flag was false")
+
+ //test3: use the batch verification
+ br := MakeBatchVerifier(1)
+ verify, err = MultisigBatchVerify(txid, addr, msig, br)
require.NoError(t, err, "Multisig: unexpected verification failure with err")
+ require.True(t, verify, "Multisig: verification failed, verify flag was false")
+ res := br.Verify()
+ require.NoError(t, res, "Multisig: batch verification failed")
}
// test multisig merge functions
@@ -144,13 +156,14 @@ func TestMultisig(t *testing.T) {
// 4. merge msig1 and msig2
// 5. verify the merged one
func TestMultisigAddAndMerge(t *testing.T) {
+ partitiontest.PartitionTest(t)
var msig1 MultisigSig
var msig2 MultisigSig
var sigs []MultisigSig
var s Seed
- var userkeypair []*SecretKey
- var pk []PublicKey
+ var secrets []*SecretKey
+ var pks []PublicKey
var err error
var addr Digest
@@ -159,31 +172,31 @@ func TestMultisigAddAndMerge(t *testing.T) {
threshold := uint8(3)
txid := TestingHashable{[]byte("test: txid 1000")}
- userkeypair = make([]*SecretKey, 5)
+ secrets = make([]*SecretKey, 5)
RandBytes(s[:])
- pk = make([]PublicKey, 5)
+ pks = make([]PublicKey, 5)
for i := 0; i < 5; i++ {
- userkeypair[i] = GenerateSignatureSecrets(s)
- pk[i] = userkeypair[i].SignatureVerifier
+ secrets[i] = GenerateSignatureSecrets(s)
+ pks[i] = secrets[i].SignatureVerifier
}
// addr = hash (... |pk0|pk1|pk2|pk3|pk4)
- addr, err = MultisigAddrGen(version, threshold, pk)
+ addr, err = MultisigAddrGen(version, threshold, pks)
require.NoError(t, err, "Multisig: unexpected failure generating message digest")
// msig1 = {sig0,sig1}
sigs = make([]MultisigSig, 2)
- sigs[0], err = MultisigSign(txid, addr, version, threshold, pk, *userkeypair[0])
+ sigs[0], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[0])
require.NoError(t, err, "Multisig: unexpected failure signing with pk 0")
- sigs[1], err = MultisigSign(txid, addr, version, threshold, pk, *userkeypair[1])
+ sigs[1], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[1])
require.NoError(t, err, "Multisig: unexpected failure signing with pk 1")
msig1, err = MultisigAssemble(sigs)
require.NoError(t, err, "Multisig: unexpected failure assembling message from signatures 0 and 1")
// add sig3 to msig and then verify
sigs = make([]MultisigSig, 1)
- sigs[0], err = MultisigSign(txid, addr, version, threshold, pk, *userkeypair[2])
+ sigs[0], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[2])
require.NoError(t, err, "Multisig: unexpected failure signing with pk 2")
err = MultisigAdd(sigs, &msig1)
require.NoError(t, err, "Multisig: unexpected err adding pk 2 signature to that of pk 0 and 1")
@@ -193,9 +206,9 @@ func TestMultisigAddAndMerge(t *testing.T) {
// msig2 = {sig3, sig4}
sigs = make([]MultisigSig, 2)
- sigs[0], err = MultisigSign(txid, addr, version, threshold, pk, *userkeypair[3])
+ sigs[0], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[3])
require.NoError(t, err, "Multisig: unexpected failure signing with pk 3")
- sigs[1], err = MultisigSign(txid, addr, version, threshold, pk, *userkeypair[4])
+ sigs[1], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[4])
require.NoError(t, err, "Multisig: unexpected failure signing with pk 4")
msig2, err = MultisigAssemble(sigs)
require.NoError(t, err, "Multisig: unexpected failure assembling message from signatures 3 and 4")
@@ -203,23 +216,254 @@ func TestMultisigAddAndMerge(t *testing.T) {
msigt, err := MultisigMerge(msig1, msig2)
require.NoError(t, err, "Multisig: unexpected failure merging multisig messages {0, 1, 2} and {3, 4}")
verify, err = MultisigVerify(txid, addr, msigt)
- require.True(t, verify, "Multisig: verification failed, verify flag was false")
require.NoError(t, err, "Multisig: unexpected verification failure with err")
+ require.True(t, verify, "Multisig: verification failed, verify flag was false")
// create a valid duplicate on purpose
// msig1 = {sig0, sig1, sig2}
// msig2 = {sig2, sig3, sig4}
// then verify the merged signature
sigs = make([]MultisigSig, 1)
- sigs[0], err = MultisigSign(txid, addr, version, threshold, pk, *userkeypair[2])
+ sigs[0], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[2])
require.NoError(t, err, "Multisig: unexpected failure signing with pk 2")
err = MultisigAdd(sigs, &msig2)
require.NoError(t, err, "Multisig: unexpected failure adding pk 2 signature to that of pk 3 and 4")
msigt, err = MultisigMerge(msig1, msig2)
require.NoError(t, err, "Multisig: unexpected failure merging multisig messages {0, 1, 2} and {2, 3, 4}")
verify, err = MultisigVerify(txid, addr, msigt)
- require.True(t, verify, "Multisig: verification failed, verify flag was false")
require.NoError(t, err, "Multisig: unexpected verification failure with err")
+ require.True(t, verify, "Multisig: verification failed, verify flag was false")
return
}
+
+func TestEmptyMultisig(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var s Seed
+ var secrets *SecretKey
+ var pks []PublicKey
+
+ txid := TestingHashable{[]byte("test: txid 1000")}
+ version := uint8(1)
+ threshold := uint8(1)
+ RandBytes(s[:])
+ secrets = GenerateSignatureSecrets(s)
+ pks = make([]PublicKey, 1)
+ pks[0] = secrets.SignatureVerifier
+
+ addr, err := MultisigAddrGen(version, threshold, pks)
+ require.NoError(t, err, "Multisig: unexpected failure generating message digest")
+ emptyMutliSig := MultisigSig{Version: version, Threshold: threshold, Subsigs: make([]MultisigSubsig, 0)}
+ verify, err := MultisigVerify(txid, addr, emptyMutliSig)
+ require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ require.Error(t, err, "Multisig: did not return error as expected")
+ br := MakeBatchVerifier(1)
+ verify, err = MultisigBatchVerify(txid, addr, emptyMutliSig, br)
+ require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ require.Error(t, err, "Multisig: did not return error as expected")
+}
+
+func TestIncorrectAddrresInMultisig(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var s Seed
+ var secrets *SecretKey
+ var pks []PublicKey
+
+ txid := TestingHashable{[]byte("test: txid 1000")}
+ version := uint8(1)
+ threshold := uint8(1)
+ RandBytes(s[:])
+ secrets = GenerateSignatureSecrets(s)
+ pks = make([]PublicKey, 1)
+ pks[0] = secrets.SignatureVerifier
+
+ addr, err := MultisigAddrGen(version, threshold, pks)
+ require.NoError(t, err, "Multisig: unexpected failure generating message digest")
+ MutliSig, err := MultisigSign(txid, addr, version, threshold, pks, *secrets)
+ require.NoError(t, err, "Multisig: could not create mutlisig")
+ addr[0] = addr[0] + 1
+ verify, err := MultisigVerify(txid, addr, MutliSig)
+ require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ require.Error(t, err, "Multisig: did not return error as expected")
+ br := MakeBatchVerifier(1)
+ verify, err = MultisigBatchVerify(txid, addr, MutliSig, br)
+ require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ require.Error(t, err, "Multisig: did not return error as expected")
+
+}
+
+func TestMoreThanMaxSigsInMultisig(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var s Seed
+ var secrets []*SecretKey
+ var pks []PublicKey
+ multiSigLen := maxMultisig + 1
+ txid := TestingHashable{[]byte("test: txid 1000")}
+ version := uint8(1)
+ threshold := uint8(1)
+ pks = make([]PublicKey, multiSigLen)
+ secrets = make([]*SecretKey, multiSigLen)
+ for i := 0; i < multiSigLen; i++ {
+ RandBytes(s[:])
+ secrets[i] = GenerateSignatureSecrets(s)
+ pks[i] = secrets[i].SignatureVerifier
+ }
+
+ addr, err := MultisigAddrGen(version, threshold, pks)
+ require.NoError(t, err, "Multisig: unexpected failure generating message digest")
+
+ sigs := make([]MultisigSig, multiSigLen)
+
+ for i := 0; i < len(sigs); i++ {
+ sigs[i], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[i])
+ require.NoError(t, err, "Multisig: unexpected failure in generating sig from pk %v", i)
+ }
+
+ msig, err := MultisigAssemble(sigs)
+ require.NoError(t, err, "Multisig: error assmeble multisig")
+ verify, err := MultisigVerify(txid, addr, msig)
+ require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ require.Error(t, err, "Multisig: did not return error as expected")
+ br := MakeBatchVerifier(1)
+ verify, err = MultisigBatchVerify(txid, addr, msig, br)
+ require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ require.Error(t, err, "Multisig: did not return error as expected")
+}
+
+func TestOneSignatureIsEmpty(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var s Seed
+ var secrets []*SecretKey
+ var pks []PublicKey
+ multiSigLen := 6
+ txid := TestingHashable{[]byte("test: txid 1000")}
+ version := uint8(1)
+ threshold := uint8(multiSigLen)
+ pks = make([]PublicKey, multiSigLen)
+ secrets = make([]*SecretKey, multiSigLen)
+ for i := 0; i < multiSigLen; i++ {
+ RandBytes(s[:])
+ secrets[i] = GenerateSignatureSecrets(s)
+ pks[i] = secrets[i].SignatureVerifier
+ }
+
+ addr, err := MultisigAddrGen(version, threshold, pks)
+ require.NoError(t, err, "Multisig: unexpected failure generating message digest")
+
+ sigs := make([]MultisigSig, multiSigLen)
+
+ for i := 0; i < multiSigLen; i++ {
+ sigs[i], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[i])
+ require.NoError(t, err, "Multisig: unexpected failure in generating sig from pk %v", i)
+ }
+
+ msig, err := MultisigAssemble(sigs)
+ require.NoError(t, err, "Multisig: error assmeble multisig")
+ msig.Subsigs[0].Sig = Signature{}
+ verify, err := MultisigVerify(txid, addr, msig)
+ require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ require.Error(t, err, "Multisig: did not return error as expected")
+ br := MakeBatchVerifier(1)
+ verify, err = MultisigBatchVerify(txid, addr, msig, br)
+ require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ require.Error(t, err, "Multisig: did not return error as expected")
+}
+
+// in this test we want to test what happen if one of the signatures are not valid.
+// we create case where are enoguht valid signatures (that pass the thrashold). but since one is false. everything fails.
+func TestOneSignatureIsInvalid(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var s Seed
+ var userkeypair []*SecretKey
+ var pks []PublicKey
+ multiSigLen := 6
+ txid := TestingHashable{[]byte("test: txid 1000")}
+ version := uint8(1)
+ threshold := uint8(3)
+ pks = make([]PublicKey, multiSigLen)
+ userkeypair = make([]*SecretKey, multiSigLen)
+ for i := 0; i < multiSigLen; i++ {
+ RandBytes(s[:])
+ userkeypair[i] = GenerateSignatureSecrets(s)
+ pks[i] = userkeypair[i].SignatureVerifier
+ }
+
+ addr, err := MultisigAddrGen(version, threshold, pks)
+ require.NoError(t, err, "Multisig: unexpected failure generating message digest")
+
+ sigs := make([]MultisigSig, multiSigLen)
+
+ for i := 0; i < multiSigLen; i++ {
+ sigs[i], err = MultisigSign(txid, addr, version, threshold, pks, *userkeypair[i])
+ require.NoError(t, err, "Multisig: unexpected failure in generating sig from pk %v", i)
+ }
+
+ sigs[1].Subsigs[1].Sig[5] = sigs[1].Subsigs[1].Sig[5] + 1
+ msig, err := MultisigAssemble(sigs)
+ require.NoError(t, err, "Multisig: error assmeble multisig")
+ verify, err := MultisigVerify(txid, addr, msig)
+ require.False(t, verify, "Multisig: verification succeeded, it should failed")
+ require.Error(t, err, "Multisig: did not return error as expected")
+ br := MakeBatchVerifier(1)
+ verify, err = MultisigBatchVerify(txid, addr, msig, br)
+ require.NoError(t, err, "Multisig: did not return error as expected")
+ require.True(t, verify, "Multisig: verification succeeded, it should failed")
+ res := br.Verify()
+ require.Error(t, res, "Multisig: batch verification passed on broken signature")
+
+}
+
+func TestMultisigLessThanTrashold(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var msig MultisigSig
+ var sigs []MultisigSig
+
+ var s Seed
+ var secrets []*SecretKey
+ var pks []PublicKey
+
+ var err error
+ var addr Digest
+
+ version := uint8(1)
+ threshold := uint8(3)
+ txid := TestingHashable{[]byte("test: txid 1000")}
+
+ secrets = make([]*SecretKey, 4)
+ for i := 0; i < 4; i++ {
+ RandBytes(s[:])
+ secrets[i] = GenerateSignatureSecrets(s)
+ }
+
+ // addr = hash (... |pk0|pk1|pk2|pk3)
+ pks = make([]PublicKey, 4)
+ pks[0] = secrets[0].SignatureVerifier
+ pks[1] = secrets[1].SignatureVerifier
+ pks[2] = secrets[2].SignatureVerifier
+ pks[3] = secrets[3].SignatureVerifier
+ addr, err = MultisigAddrGen(version, threshold, pks)
+ require.NoError(t, err, "Multisig: unexpected failure generating message digest")
+
+ sigs = make([]MultisigSig, 3)
+ sigs[0], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[0])
+ require.NoError(t, err, "Multisig: unexpected failure in multisig signing")
+ sigs[1], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[1])
+ require.NoError(t, err, "Multisig: unexpected failure in multisig signing")
+ sigs[2], err = MultisigSign(txid, addr, version, threshold, pks, *secrets[2])
+ require.NoError(t, err, "Multisig: unexpected failure in multisig signing")
+
+ msig, err = MultisigAssemble(sigs)
+ require.NoError(t, err, "should be able to detect insufficient signatures for assembling")
+ msig.Subsigs[1].Sig = BlankSignature
+ verify, err := MultisigVerify(txid, addr, msig)
+ require.False(t, verify, "Multisig: verification passed, should have failed")
+ require.Error(t, err, "Multisig: expected verification failure with err")
+
+ msig, err = MultisigAssemble(sigs)
+ require.NoError(t, err, "should be able to detect insufficient signatures for assembling")
+ msig.Subsigs = msig.Subsigs[:len(msig.Subsigs)-1]
+ verify, err = MultisigVerify(txid, addr, msig)
+ require.False(t, verify, "Multisig: verification passed, should have failed")
+ require.Error(t, err, "Multisig: expected verification failure with err")
+
+}
diff --git a/daemon/algod/api/Makefile b/daemon/algod/api/Makefile
index d2f8c9cc3..cfebb428f 100644
--- a/daemon/algod/api/Makefile
+++ b/daemon/algod/api/Makefile
@@ -25,7 +25,7 @@ algod.oas3.yml: algod.oas2.json
rm -f .3tmp.json
oapi-codegen: .PHONY
- GO111MODULE=on go get -u "github.com/algorand/oapi-codegen/...@v1.3.5-algorand5"
+ ../../../scripts/buildtools/install_buildtools.sh -o github.com/algorand/oapi-codegen -c github.com/algorand/oapi-codegen/cmd/oapi-codegen
clean:
rm -rf server/v2/generated/types.go server/v2/generated/routes.go server/v2/generated/private/types.go server/v2/generated/private/routes.go algod.oas3.yml
diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json
index b71ed0231..fa39bf77a 100644
--- a/daemon/algod/api/algod.oas2.json
+++ b/daemon/algod/api/algod.oas2.json
@@ -1744,6 +1744,12 @@
"items": {
"$ref": "#/definitions/AccountStateDelta"
}
+ },
+ "logs": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/LogItem"
+ }
}
}
},
@@ -1910,6 +1916,24 @@
"format": "int64"
}
}
+ },
+ "LogItem": {
+ "description": "Application Log",
+ "type": "object",
+ "required": [
+ "id",
+ "value"
+ ],
+ "properties": {
+ "id": {
+ "description": "unique application identifier",
+ "type": "integer"
+ },
+ "value": {
+ "description": " base64 encoded log message",
+ "type": "string"
+ }
+ }
}
},
"parameters": {
@@ -2307,6 +2331,13 @@
"description": "\\[gd\\] Global state key/value changes for the application being executed by this transaction.",
"$ref": "#/definitions/StateDelta"
},
+ "logs": {
+ "description": "\\[lg\\] Logs for the application being executed by this transaction.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/LogItem"
+ }
+ },
"txn": {
"description": "The raw signed transaction.",
"type": "object",
diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml
index 08c6289b1..46e157c13 100644
--- a/daemon/algod/api/algod.oas3.yml
+++ b/daemon/algod/api/algod.oas3.yml
@@ -493,6 +493,13 @@
},
"type": "array"
},
+ "logs": {
+ "description": "\\[lg\\] Logs for the application being executed by this transaction.",
+ "items": {
+ "$ref": "#/components/schemas/LogItem"
+ },
+ "type": "array"
+ },
"pool-error": {
"description": "Indicates that the transaction was kicked out of this node's transaction pool (and specifies why that happened). An empty string indicates the transaction wasn't kicked out of this node's txpool due to an error.\n",
"type": "string"
@@ -1267,6 +1274,12 @@
"$ref": "#/components/schemas/DryrunState"
},
"type": "array"
+ },
+ "logs": {
+ "items": {
+ "$ref": "#/components/schemas/LogItem"
+ },
+ "type": "array"
}
},
"required": [
@@ -1327,6 +1340,24 @@
],
"type": "object"
},
+ "LogItem": {
+ "description": "Application Log",
+ "properties": {
+ "id": {
+ "description": "unique application identifier",
+ "type": "integer"
+ },
+ "value": {
+ "description": " base64 encoded log message",
+ "type": "string"
+ }
+ },
+ "required": [
+ "id",
+ "value"
+ ],
+ "type": "object"
+ },
"StateDelta": {
"description": "Application state delta.",
"items": {
@@ -3340,6 +3371,13 @@
},
"type": "array"
},
+ "logs": {
+ "description": "\\[lg\\] Logs for the application being executed by this transaction.",
+ "items": {
+ "$ref": "#/components/schemas/LogItem"
+ },
+ "type": "array"
+ },
"pool-error": {
"description": "Indicates that the transaction was kicked out of this node's transaction pool (and specifies why that happened). An empty string indicates the transaction wasn't kicked out of this node's txpool due to an error.\n",
"type": "string"
@@ -3404,6 +3442,13 @@
},
"type": "array"
},
+ "logs": {
+ "description": "\\[lg\\] Logs for the application being executed by this transaction.",
+ "items": {
+ "$ref": "#/components/schemas/LogItem"
+ },
+ "type": "array"
+ },
"pool-error": {
"description": "Indicates that the transaction was kicked out of this node's transaction pool (and specifies why that happened). An empty string indicates the transaction wasn't kicked out of this node's txpool due to an error.\n",
"type": "string"
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index b231e80a7..beab935e7 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -434,6 +434,14 @@ func (client RestClient) PendingTransactionInformation(transactionID string) (re
return
}
+// PendingTransactionInformationV2 gets information about a recently issued transaction.
+// See PendingTransactionInformation for more details.
+func (client RestClient) PendingTransactionInformationV2(transactionID string) (response generatedV2.PendingTransactionResponse, err error) {
+ transactionID = stripTransaction(transactionID)
+ err = client.get(&response, fmt.Sprintf("/v2/transactions/pending/%s", transactionID), nil)
+ return
+}
+
// SuggestedFee gets the recommended transaction fee from the node
func (client RestClient) SuggestedFee() (response v1.TransactionFee, err error) {
err = client.get(&response, "/v1/transactions/fee", nil)
diff --git a/daemon/algod/api/server/v2/dryrun.go b/daemon/algod/api/server/v2/dryrun.go
index 4e0b21581..5a76d6fd2 100644
--- a/daemon/algod/api/server/v2/dryrun.go
+++ b/daemon/algod/api/server/v2/dryrun.go
@@ -521,6 +521,13 @@ func doDryrunRequest(dr *DryrunRequest, response *generated.DryrunResponse) {
}
result.LocalDeltas = &localDeltas
}
+
+ var err3 error
+ result.Logs, err3 = DeltaLogToLog(delta.Logs, appIdx)
+ if err3 != nil {
+ messages = append(messages, err3.Error())
+ }
+
if pass {
messages = append(messages, "PASS")
} else {
@@ -563,6 +570,22 @@ func StateDeltaToStateDelta(sd basics.StateDelta) *generated.StateDelta {
return &gsd
}
+// DeltaLogToLog EvalDelta.Logs to generated.LogItem
+func DeltaLogToLog(logs []basics.LogItem, appIdx basics.AppIndex) (*[]generated.LogItem, error) {
+ if len(logs) == 0 {
+ return nil, nil
+ }
+ encodedLogs := make([]generated.LogItem, 0, len(logs))
+ for _, log := range logs {
+ if log.ID != 0 {
+ return nil, fmt.Errorf("logging for a foreign app is not supported")
+ }
+ msg := base64.StdEncoding.EncodeToString([]byte(log.Message))
+ encodedLogs = append(encodedLogs, generated.LogItem{Id: uint64(appIdx), Value: msg})
+ }
+ return &encodedLogs, nil
+}
+
// MergeAppParams merges values, existing in "base" take priority over new in "update"
func MergeAppParams(base *basics.AppParams, update *basics.AppParams) {
if len(base.ApprovalProgram) == 0 && len(update.ApprovalProgram) > 0 {
diff --git a/daemon/algod/api/server/v2/dryrun_test.go b/daemon/algod/api/server/v2/dryrun_test.go
index 421025dab..c926934dd 100644
--- a/daemon/algod/api/server/v2/dryrun_test.go
+++ b/daemon/algod/api/server/v2/dryrun_test.go
@@ -346,7 +346,7 @@ func init() {
// legder requires proto string and proto params set
var proto config.ConsensusParams
- proto.LogicSigVersion = 4
+ proto.LogicSigVersion = 5
proto.LogicSigMaxCost = 20000
proto.MaxAppProgramCost = 700
proto.MaxAppKeyLen = 64
@@ -1091,3 +1091,113 @@ int 1`)
logResponse(t, &response)
}
}
+
+func TestDryrunLogs(t *testing.T) {
+ t.Parallel()
+
+ ops, err := logic.AssembleString(`
+#pragma version 5
+byte "A"
+loop:
+int 0
+dup2
+getbyte
+int 1
++
+dup
+int 97 //ascii code of last char
+<=
+bz end
+setbyte
+dup
+log
+b loop
+end:
+int 1
+return
+`)
+
+ require.NoError(t, err)
+ approval := ops.Program
+ ops, err = logic.AssembleString("int 1")
+ clst := ops.Program
+ ops, err = logic.AssembleString("#pragma version 5 \nint 1")
+ approv := ops.Program
+ require.NoError(t, err)
+
+ var appIdx basics.AppIndex = 1
+ creator := randomAddress()
+ sender := randomAddress()
+ dr := DryrunRequest{
+ Txns: []transactions.SignedTxn{
+ {
+ Txn: transactions.Transaction{
+ Header: transactions.Header{Sender: sender},
+ Type: protocol.ApplicationCallTx,
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: appIdx,
+ OnCompletion: transactions.OptInOC,
+ },
+ },
+ },
+ {
+ Txn: transactions.Transaction{
+ Header: transactions.Header{Sender: sender},
+ Type: protocol.ApplicationCallTx,
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: appIdx + 1,
+ OnCompletion: transactions.OptInOC,
+ },
+ },
+ },
+ },
+ Apps: []generated.Application{
+ {
+ Id: uint64(appIdx),
+ Params: generated.ApplicationParams{
+ Creator: creator.String(),
+ ApprovalProgram: approval,
+ ClearStateProgram: clst,
+ LocalStateSchema: &generated.ApplicationStateSchema{NumByteSlice: 1},
+ },
+ },
+ {
+ Id: uint64(appIdx + 1),
+ Params: generated.ApplicationParams{
+ Creator: creator.String(),
+ ApprovalProgram: approv,
+ ClearStateProgram: clst,
+ LocalStateSchema: &generated.ApplicationStateSchema{NumByteSlice: 1},
+ },
+ },
+ },
+ Accounts: []generated.Account{
+ {
+ Address: sender.String(),
+ Status: "Online",
+ Amount: 10000000,
+ },
+ },
+ }
+ dr.ProtocolVersion = string(dryrunProtoVersion)
+
+ var response generated.DryrunResponse
+ doDryrunRequest(&dr, &response)
+ require.NoError(t, err)
+ checkAppCallPass(t, &response)
+ if t.Failed() {
+ logResponse(t, &response)
+ }
+ logs := *response.Txns[0].Logs
+ assert.Equal(t, 32, len(logs))
+ for i, m := range logs {
+ assert.Equal(t, base64.StdEncoding.EncodeToString([]byte(string(rune('B'+i)))), m.Value)
+ }
+ encoded := string(protocol.EncodeJSON(response.Txns[0]))
+ assert.Contains(t, encoded, "logs")
+
+ assert.Empty(t, response.Txns[1].Logs)
+ encoded = string(protocol.EncodeJSON(response.Txns[1]))
+ assert.NotContains(t, encoded, "logs")
+
+}
diff --git a/daemon/algod/api/server/v2/generated/private/routes.go b/daemon/algod/api/server/v2/generated/private/routes.go
index 73c3e0faa..6cebc1644 100644
--- a/daemon/algod/api/server/v2/generated/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/private/routes.go
@@ -236,136 +236,137 @@ func RegisterHandlers(router interface {
var swaggerSpec = []string{
"H4sIAAAAAAAC/+x9/ZPbOK7gv8Lze1X5OMvufMzspqum3vUmmdm+yWRS6d69u5fOzdASbHNbIrUi1W1P",
- "rv/3K4CkREmU7f542Tf19qekTRIEARAEARD6MklVUSoJ0ujJ8ZdJyStegIGK/uJpqmppEpHhXxnotBKl",
- "EUpOjn0b06YScjWZTgT+WnKznkwnkhfQ9sHx00kFf69FBdnk2FQ1TCc6XUPBEbDZlti7gbRJVipxIE4s",
- "iNM3k5sdDTzLKtB6iOXPMt8yIdO8zoCZikvNU2zS7FqYNTNroZkbzIRkSgJTS2bWnc5sKSDP9Mwv8u81",
- "VNtglW7y8SXdtCgmlcphiOdrVSyEBI8VNEg1DGFGsQyW1GnNDcMZEFff0SimgVfpmi1VtQdVi0SIL8i6",
- "mBx/mmiQGVTErRTEFf13WQH8Bonh1QrM5PM0trilgSoxoogs7dRRvwJd50Yz6ktrXIkrkAxHzdhPtTZs",
- "AYxL9vH71+zFixevcCEFNwYyJ2Sjq2pnD9dkh0+OJxk34JuHssbzlaq4zJKm/8fvX9P8Z26Bh/biWkN8",
- "s5xgCzt9M7YAPzAiQkIaWBEfOtKPIyKbov15AUtVwYE8sZ0flCnh/P9QrqTcpOtSCWkifGHUymxzVIcF",
- "w3fpsAaBTv8SKVUh0E9HyavPX55Nnx3d/Munk+Tf3Z/fvLg5cPmvG7h7KBDtmNZVBTLdJqsKOO2WNZdD",
- "enx08qDXqs4ztuZXxHxekKp3YxmOtarziuc1yolIK3WSr5Rm3IlRBkte54b5iVktc1RTCM1JOxOalZW6",
- "EhlkU9S+12uRrlnKtQVB/di1yHOUwVpDNiZr8dXt2Ew3IUkQrzvRgxb0n5cY7br2UAI2pA2SNFcaEqP2",
- "HE/+xOEyY+GB0p5V+naHFTtfA6PJscEetkQ7iTKd51tmiK8Z45px5o+mKRNLtlU1uybm5OKSxrvVINUK",
- "hkQj5nTOUdy8Y+QbECNCvIVSOXBJxPP7bkgyuRSrugLNrtdg1u7Mq0CXSmpgavE3SA2y/X+e/fyeqYr9",
- "BFrzFXzg6SUDmapsnMdu0tgJ/jetkOGFXpU8vYwf17koRATln/hGFHXBZF0soEJ++fPBKFaBqSs5hpCF",
- "uEfOCr4ZTnpe1TIl5rbTdgw1FCWhy5xvZ+x0yQq++e5o6tDRjOc5K0FmQq6Y2chRIw3n3o9eUqlaZgfY",
- "MAYZFpyauoRULAVkrIGyAxM3zT58hLwdPq1lFaDjgYyi08yyBx0Jm4jM4NbFFlbyFQQiM2N/cZqLWo26",
- "BNkoOLbYUlNZwZVQtW4GjeBIU+82r6UykJQVLEVExs4cOVB72D5OvRbOwEmVNFxIyFDzEtLKgNVEozgF",
- "E+6+zAyP6AXX8O3LsQO8bT2Q+0vV5/pOjh/EbeqU2C0ZORex1W3YuNnUGX/A5S+cW4tVYn8eMFKszvEo",
- "WYqcjpm/If88GWpNSqBDCH/waLGS3NQVHF/Ip/gXS9iZ4TLjVYa/FPann+rciDOxwp9y+9M7tRLpmViN",
- "ELPBNXqbomGF/QfhxdWx2UQvDe+UuqzLcEFp51a62LLTN2NMtjBvK5gnzVU2vFWcb/xN47YjzKZh5AiS",
- "o7QrOXa8hG0FiC1Pl/TPZknyxJfVb/hPWeYxmqIAu4OWnALOWfDR/YY/4ZYHeydAKCLlSNQ5HZ/HXwKE",
- "/rWC5eR48i/z1lMyt6167uDijDfTyUkL5+Fnakfa9fUuMm0zE9Jyh7pO7Z3w4fFBqFFMyFDt4fCnXKWX",
- "d8KhrFQJlRGWjwuEM9wpBJ6tgWdQsYwbPmsvVdbOGpF3GvhnGke3JKgiR9zP9B+eM2zGXciNN9/QdBUa",
- "jTgVOJoytPjsOWJnwg5kiSpWWCOPoXF2Kyxft5NbBd1o1E+OLJ/70CLceWvtSkYj/CJw6e2t8WShqrvJ",
- "S08QJGvvwowj1Mb6xZV3OUtd6zJx9InY07ZDD1Drfhyq1ZBCffAxWnWocGb4fwAVNEJ9CCp0AT00FVRR",
- "ihweYL+uuV4PF4EGzovn7OzPJ988e/7L82++xRO6rNSq4gVbbA1o9tidK0ybbQ5PhisjBV/nJg7925f+",
- "BtWFu5dChHAD+5AddQ6oGSzFmPUXIHZvqm1VywcgIVSVqiI2L4mOUanKkyuotFAR98UH14O5HqiHrN3d",
- "+91iy665Zjg3XcdqmUE1i1Ee71l0pBso9L6DwoI+38iWNg4gryq+HXDArjeyOjfvITzpEt9b95qVUCVm",
- "I1kGi3oVnlFsWamCcZbRQFKI71UGZ4abWj+AFmiBtcggI0IU+ELVhnEmVYYbGjvH9cOIL5OcKOT7MaHK",
- "MWt7/iwAreOU16u1YWhWqhhr24EJTy1TEjor9MjVr7mz2152Ousnyyvg2ZYtACRTC3e/cjc/WiQnt4zx",
- "ERennVq0mjtBB6+yUiloDVniwkt7UfP9LJfNDjoR4oRwMwvTii15dUdkjTI834Mo9Ymh25gT7lI6xPqw",
- "6XcxsD95yEZe4R3TSgHaLri7czAwRsIDaXIFFV3O/kP55ye5K/vqciR04k7gc1Hg9mWSS6UhVTLTUWA5",
- "1ybZt22xU8dMwBUEOyW2UwnwiIPgHdfGXtGFzMhktOqG5qExNMU4wqMnCkL+qz9MhrBT1JNS17o5WXRd",
- "lqoykMXWIGGzY673sGnmUssAdnN8GcVqDfsgj1EpgO+IZVdiCcSN8xE1Pqzh4sgdj+fANkrKDhItIXYh",
- "cuZ7BdQN3ccjiOD9ohlJgiN0T3Ian/V0oo0qS9x/JqllM26MTGe294n5S9t3KFzctHo9U4CzG4+Tw/za",
- "UtYGDtYcbTuCzAp+iWcTWWrWlzDEGTdjooVMIdkl+bgtz7BXuAX2bNIRI9mFJoPZepujJ79RoRsVgj1c",
- "GFvwiMX+wXrAz1vv0AMYLW/AcJHrxjBp3OztLOSR72dLoBVZQQrS5FuU1aWoChvUouNM+9+s2ZO5WWz4",
- "pt1+MmMVXPMq8z2Gt6VgMYmQGWzi2pV3fCMZbJiII71sZhaGpT7kJEMAs+hGt0G8NFdayFVio4P7DrUm",
- "qPdIs1oKd4BdQ+XwWkLljl3jo2OJUT6CtguPXaRwzpm7EAGHxqe1yFlu6VgQlRpwIxYirRS3sVEkam+B",
- "rIKCI3YUpXPH/vicu4j92rb7UK13kYeyG4fr5XVUwzQier0mZqGq7RMxlHq82oKGsYWscrXgeYIGPyQZ",
- "5Gav6w0vEvCGeuJ5rdLh8C7KFxef8uzi4jN7h33pbgHsErZzilizdM3lCtowQrhf7K0BNpDW4dHSI+NB",
- "F0HnK+1i370KTielUnnSXHn7YY/BcdOn+6VILyFjqK9oi7lT8FGXQzgJe4wirpvA0PV6603IsgQJ2ZMZ",
- "YyeSQVGarfOv9Cye3uTykdk1/4ZmzWqKUXPJaJGzCxl3bdgI9z33lAezeyfZlK97TmWB7J7IbOTIduLX",
- "FKBBcNH9udM7ekYjg6NvcKIHQmWxOMSH8APlQfEOl0VG15H2dNP1ohCUDBV0m6Lm9PHp4Q1fmBlj56Q7",
- "8IKl4QoqnlOmh/aOY6FZIfCirus0BciOL2TSwSRVhZv4cftfq5Yu6qOjF8COnvTHaIPmqrtL2j3QH/sd",
- "O5raJiIX+45dTC4mA0gVFOoKMnsfC+XajtoL9r81cC/kzwPFzAq+tTc5vxeZrpdLkQpL9FyhXl+pntUp",
- "FbVAhegBHrOaCTOlo4woSta65Uu7ASdR6+khfD4RqGin41GK2s5HJbuyoxlseIqr5KRkttYiaORsaAQZ",
- "VSYhgKgLeseMLgigO3r8jvtuqM+tA2I3fuc9F0SHHIG4zvbb7gNiRDE4ZPufsFIh14XLP/JJKrnQZoCk",
- "c0dQBKgRyMihM2P/R9Us5bR/y9pAc7dTFV2Y6CKNM9AZ6+d0llpLIcihAOshopanT/sLf/rU8VxotoRr",
- "n7SHHfvkePrUbgKlzb13QE80N6cRA4oc83iaRhKt11yvZ3ud9AT3IN98APr0jZ+QNpPWdMTgwiullg+w",
- "WpFtojYLbGIrdZwjd9sjzUq+HTWvS0Qwkq0F1WVOvny17Ekkc/pvLUoE2WaWbA10slL/7+N/O/50kvw7",
- "T347Sl799/nnLy9vnjwd/Pj85rvv/l/3pxc33z35t3+NGS/aiEU87vNnrteIqdMcG3kqbeQWLU9y2G2d",
- "H0AtvzbePRFDZnrKB0s6ROg+xBgi0JQgZpPMndVlmW8f4JCxgFgF7o6hO+5RbVvVMkxKdZKnt9pAMYww",
- "2KG/jNx+PnrvxEBKlcyFhKRQErbRdxhCwk/UGLUNSS2NDKYDYmxs33vTwb+HVneeQ5h5X/oStwM19KFJ",
- "kX0A5vfh9oJLYTou3WwgLxlnaS7Ida6kNlWdmgvJyTnXM717YuFdjuPu2te+S9w/HHHfOlAXkmukYeOy",
- "iwYdlxBxxn8P4L22ul6tQPdMcbYEuJCul5DkaKG56CaTWIaVUFF0eGZ7ovW55Dl5l3+DSrFFbbrHPWUN",
- "WmvaRrpwGqaWF5IblgPXhv0k5PmGwPlbtZcZCeZaVZcNFUa8AiBBC53EFekPtpX0qVv+2ulWesJhm72+",
- "+doHgMc9ltPmMD9940zh0zdk77QxrgHuXy3wUQiZRIUMr6iFkJQa3ZMt9hitNi9AT9pomeP6hTQbiYJ0",
- "xXORcXM3ceiruMFetLujJzUdRvT82H6tn2NX7JVKSp5eUv7JZCXMul7MUlXM/RVgvlLNdWCecSiUpLZs",
- "zksx1yWk86tne8yxe+grFlFXN9OJ0zr6wTPdHODYgvpzNhEk/7dR7NEPb8/Z3HFKP7IJrhZ0kJkYubW5",
- "95UdBwIu3j7Qshm+eIF+A0shBbYfX8iMGz5fcC1SPa81VH/iOZcpzFaKHTMH8g03nPxOPW/62BtK8gk6",
- "bMp6kYuUXYZHcbs1x5yxFxefUEAuLj4P4s3Dg9NNFXdw0wTJtTBrVZvERSTGfVetf48gW1/wrlmnzMG2",
- "EukiHg7+iNO9LHUSeGHjyy/LHJcfiKFmNIjyFZk2qvJKEDWj86Mhf98rF3Gv+LV/NVJr0OzXgpefhDSf",
- "WeJ8PidlSS5e8rH+6nQNyuS2hMP9tC2KLbDY3Z4Wbg0q2JiKJyVfgY4u3wAvift0UBfkRctzRsM6/maf",
- "rUWg2gXs9CsGeNw6l5YWd2ZH+QBKfAnURCykPqidWn/4XfmFoP6schSyO7MrgBHlUm3WCe7t6Ko0irjn",
- "TPOwa4U62ce/tVhJ3ATuDdwCWLqG9BIyCv6Rf3zaGe5TLNwJ51WH0PbZmk2ZpbcV5ApZAKvLjDsbgMtt",
- "P8ldgzE+s/8jXML2XLVPM26T1X4znbiAVoIyM7ZRSVKDwwiFNdy2PijWY76Lb1LQqSyZjevYbGQvFseN",
- "XPgx4xvZnpAPsIljQtGQYYe8l7yKEMIK/wgJ7rBQhHcv0Y9GkXhlRCpKu/7D4lIfOmMQyL7DJXqcqGX/",
- "1Bgo9agSs52TBdfxAwSwBfmBe6ifzeRnsl5FG6hmVPrACe4ihyCiqt3O5hUZXX7Z9i33GGpxKYFKtqe6",
- "R6NLkdB8WLvUAHHVJgSQy+eQg3ZvQBalyOfsiG7oReC8OVzx0SjY6Juj0yARJ3jK2rwo8oqtvxmmzesy",
- "W1XCvzzyz438G6PJ9FbvhaYTlxsaY4eSZGVkkMOKu6APZZ36hAOL2iMdMAjx+Hm5zIUElsRyerjWKhU2",
- "D6DV5W4OQCP0KWPWwcMOhhAT4wBt8pYTYPZehXtTrm6DpARB7nXuYZOfPfgb9nub2/Iezrzda4YOdUe7",
- "iabt8zvLxqEXajqJqqSxG0KnF7NdFjC4UsVEFFXT0C8z9P5oyIGO46SjWZPLmLcOrQogMTzzw4JrA3ss",
- "lnjIPwmCJhWshDbQ3ptxt3pH0Nf1XVwpA8lSVNokdGWPLg87fa/JGPweu8bVT4dUzNYHEFlc+9C0l7BN",
- "MpHXcW67eX98g9O+b+5Pul5cwpYOGeDpmi2ongWeQp3psc+OqW1e284Fv7MLfscfbL2HyRJ2xYkrpUxv",
- "jt+JVPX0ya7NFBHAmHAMuTZK0h3qJcjEGeqW4E5m84Uot2i2y2sw2Ey3zmYa1bwWUnQtgaG7cxU26c3m",
- "tQXlIIZvLEb2AC9LkW16d3gLdSRsRwb8LQx1a/FHQlGTBtgeCgT39VgabwXe52BZGpyZtrDHINVxP2X6",
- "CZaBQginEtqXpRoSCkWbMtH20eoceP4jbP+KfWk5k5vp5H5X/hitHcQ9tP7QsDdKZ/Jl2ytgx4N3S5Lz",
- "sqzUFc8T5xgZE81KXTnRpO7ej/KVVV38+n3+9uTdB4c+ZW4Cr1zC4q5VUb/yd7MqvBHHshbPA88IWav+",
- "7mwNsYD5zVvi0Jnik0w7thxqMSdcdnu1jrJgKzrnyjIeUtvrKnE+PbvEHb49KBvXXnsjtp69rjePX3GR",
- "+6uox3Z/UuydtEInq/a+XsEwxfZB1c1gd8d3Rytde3RSONeOIiiFrfOjmZL9xCI0IemGS6Ja8C1KkHVO",
- "D5WTrIsEt1+ic5HG3RZyoVE4pPX5YmdGnUeMUYRYi5EQgqxFAAu76QOiZT0kgzmixCSX0g7aLZQr0FhL",
- "8fcamMhAGmyqXKJhZ6PivvS588PjNJ6n7wC7VP0G/H1sDAQ1Zl0QErsNjNDDHHkl4i+cfqGNaxx/CByD",
- "twhUhTMOjsQdQSYnH06abbR/3fUUh/UUh/oPBcPW3tlfzNG7LdYW0ZE5osUZR0+Lk/GTgt5fHH5GtEcC",
- "oRseBjYnludaRcDU8ppLW2sNx1kautEarM8AR12rih4taohG6YVOlpX6DeI32SUyKpL76EhJ5iKNnkUe",
- "g/WVaOOVaatoevqGeIyK9pglFzSybiBxZIeTlAeuc0rm9g4uLq1Y27pwnfB1fHOEKSdzC7/dHA7nQZpO",
- "zq8XPFYiBQ0qxOmkDdJ0XHFGMT/Yc0E3bxic7AXxnqavsC/9SqjaBOXhq/I7Gke/L5HPIBUFz+NWUkbU",
- "7z4By8RK2OJ6tYagepsDZKuSWilyFfBsGKwlzemSHU2D+pCOG5m4EloscqAez2yPBddgX5qFr89cYpQB",
- "adaauj8/oPu6llkFmVlrS1itWGPA2kdF3ve9AHMNINkR9Xv2ij0mr78WV/AEqehskcnxs1eUlmL/OIod",
- "dq6K5i69kpFi+V9OscTlmMIeFgYeUg7qLPrq1JY+HldhO3aTHXrIXqKeTuvt30sFl3wF8WhusQcnO5a4",
- "SU7DHl1kZut2alOpLRMmPj8YjvppJDUN1Z9Fw71RKXADGcW0KlCe2tJsdlIPzhYBdeWSPF6+kUIspX9r",
- "1Lswf10HsT3LY6umQNh7XkCXrFPG7eNsei7lHvU7hThjp77EA9WPaspGWdrgXLh0MumQhVQmR0hDl6ja",
- "LJM/snTNK56i+puNoZssvn0ZqZnVLZMjb4f4V6d7BRqqqzjpqxGx99aEG8seSyWTAjVK9qRNBQ12ZbTY",
- "jTI8jye1eI3ez2naDfpQAxShJKPiVnfEjQea+l6CJ3cAvKcoNuu5lTzeemVfXTLrKi4evEYO/eXjO2dl",
- "FKqKFfxpt7uzOCowlYAryq+JMwlh3pMXVX4QF+6D/T82ytLeABqzzO/l2EXgT7XIs7+2qe29soMVl+k6",
- "GuNY4MBf2jqpzZLtPo4+MV9zKSGPgrNn5i/+bI2c/n9Th85TCHlg3345Qbvc3uJaxLtoeqT8hEheYXKc",
- "IKRqN9e3SQ7LVypjNE9bzKSVsuEb4KC02t9r0Cb2XpkabF4l+bLwXmArezGQGVnVM2bf9yIunReaZM2K",
- "os7taz/IVlA5J2td5opnU4Zwzt+evGN2Vu1qVdC7UqostrJvxTur6PkwgspHt3k8P5aGeTic3XlhuGpt",
- "qPSJNrwoYxn22OPcd6A0/tCvS2ZeSJ0Ze2MtbO3tNztJWyOBNdM5HU8ygf8xhqdrMl072mRc5A8vieel",
- "UgeloZsqu03xIvvs3yhfFc8WxZsyhfeLa6FteXu4gm5Sf/PCxV2dfJJ/d3lVLaWVlKiO3vUC6y5k98jZ",
- "4L13/UYx6xH+loaLVnWVwm0rBJ7RqOgb4n65wUFNaPuasKnJ6j9bknKppEjpBW9QUL9B2ZXKPyQucsBj",
- "575bym9xt0Mjmyta5LBJD3JUHC176BWhI9zQMRu0IlOtdNg/DdVkX3PDVmC002yQTX0hS+cvEVKDK0ZF",
- "X00I9KSqOrEm0pDR8GVbjuaWYkQpviMG8PfY9t5djygt71JIMoQc2VwGoPVoUCVvg9aTMGylQLv1dJ/k",
- "6k84ZkbPUjPYfJ75yt8Ew4ZqcNk2LjkEdeKjlC4qiH1fY19GYZn25046sZ30pCzdpNEXtQ2HY6U4Rwkc",
- "iTYl3t0fELeBH0LbIW470wvoPEVBgysKTkJJ5/BAMEZKvLy94nltJcpWirBpPdFnYEJG0HgnJLR16SMH",
- "RBo9EogxtF9Hxum04saagAfptHPgOUUkYwpNG+eivS+oHoOJJLRGP8c4G9uCrCOKo+nQGm5cbpty+Cjd",
- "gTHxmr7D4Qg5LK9KVpUzojJK3OwVXI0pDlTcvlRx9wAYboOhTWSHm4rbnXObk2jswUsmNN51ikUeSVV7",
- "0zQGRYcpJ3axpX9jBTbGV+AC2HcuCEUDb21f7i7OlCPvEy1Wd+RKO/4B2dLbAyGPYtL/FtVK+EZwUCvF",
- "Kp7mCR+l6ShfAp4uFc3jk67MkqKLXtraat67L63jdbmnpBpHkvU+tq/TudW+1gc/lrKXjmaYcuPSxw1n",
- "u6qk2WLaMQg23m+LeNsPYkX9D2Mxfhvix+bB6MPshoEVRrB3EtQnjwwR+tFnprGSCxdgarfIkLIuh3WY",
- "VXxIdlvL4P4iXGYoAYmt5I6JnAftvSGVIhs7TMHZI56XHZLaF189S1JV8MCkDY7QW5J2mFx06PJoHSQx",
- "tYbhOg9mQIe2I7Q/hPCtXhgSd3w7m8Uh2zn+cAaHkz6xBPFPu4ba5Ktpg843ANy8Ma7/dcx7YG/II46q",
- "Hk1rkWf7mNtxO7alE8ix9otz0P5Dijf8YhNfhtvNvWO/zcHfZwIRJrLWzuTBVIFD8QBfohsW8RxSrcO0",
- "roTZUo6ctzTFL9G3Bz+AdF9CcB+WaTINXKDbftPM+b1XTe/2M1Q/KPtpiALNXzIFDRUBe7vhRZmD2xff",
- "PVr8AV788WV29OLZHxZ/PPrmKIWX37w6OuKvXvJnr148g+d//OblETxbfvtq8Tx7/vL54uXzl99+8yp9",
- "8fLZ4uW3r/7wyH8DyiLafl/pf1OFk+Tkw2lyjsi2NOGl+BG2tqYBirGvlsBT2olQcJFPjv1P/8PvsFmq",
- "iuCzte7XiQuCTNbGlPp4Pr++vp6FQ+YrKkubGFWn67mfZ1hz7cNp46C1iTXEUet7Q1EgpjpROKG2j2/P",
- "ztnJh9NZKzCT48nR7Gj2jIoSlSB5KSbHkxf0E+2eNfF97oRtcvzlZjqZr4HnZu3+KMBUIvVN+pqvVlDN",
- "XNkI/Onq+dz7d+ZfXDLJza62bjaPex4WDAjeF8+/dOoaZyFcen07/+IznYImW7d//oXcR6O/d9H4YjYi",
- "u5n7+mJuhKt/Pf/SFqS/sbsjh9jN3xfKbLtTAUz6To+2v+KG8PF7obvfL2i4e5ohV3HU66Y4f/g58k//",
- "RT/e+7n3LbPnR0f/xb7K9PKWK95pz3buf5GaLn/iGfOxJZr72deb+1TSay5UaMwq7Jvp5JuvufpTiSLP",
- "c0Y9g6yrIev/Ii+lupa+J56udVHwauu3se4oBf/JDdLhfKWpMG8lrvBS/5kqP8eCeSPKhT5/dWvlQt/0",
- "+qdy+VrK5ffxsbPnt9zgv/8V/1Od/t7U6ZlVd4erU2fK2fSFuS1T2Vp4/mX08Llw15od08nuqsMek59U",
- "wvUTlwJhwUaenjfhZpVZn4gvY+ZT9YLPWnR19kcHtFPl4EfY6n0K/HwN7FcHPhHZr5RmTcGHKVMV+5Xn",
- "efAblaPyZvssru/b58h7v2bcbtAYWksAn/RNOV2uujceZJfgH65bGnQClMOYflv0cgmjX7S3tQFDDeZE",
- "8NnR0VEsGaiPs/PfWIwpyf5aJTlcQT5k9RgSvffru77/PPqFrGHZgfDeHZE6qvi+gLYSwejnsLtv6W+D",
- "3RslHxl2zYX7yEhQu8p+Mq0Qxn8p3iYJuZTU5oyIf108QZAxXNp3MPc9vH9/1bpvdig7va5Npq7luOKi",
- "V3w8d2nwlJjeuBuMYh5Ao6lmzH/6N9/6b9czTulKqjatPwgH+5I0vY8SNEXTVkLSBLTLaRb73oMHecPu",
- "E1VDJXjmMHtvv+jV03vRL2tbHOP7Prbp7ytLQ0NjJ698CaPO33MUeTRX7RcLE6LQ0KVhgOdzl6jS+9WG",
- "k4Mfux8eiPw6b55QRhv7jppYq/Oj+E6thzT0OBKnGl/jp89IcMpKd0xsHWjH8zmFcNdKm/kEFU7XuRY2",
- "fm5o/MVz3tP65vPN/w8AAP//J+D4wN2NAAA=",
+ "rv/3K4CkREmU7f7Y7Jt676ekLRIEARAEARD8MklVUSoJ0ujJ8ZdJyStegIGK/uJpqmppEpHhXxnotBKl",
+ "EUpOjv03pk0l5GoynQj8teRmPZlOJC+gbYP9p5MK/l6LCrLJsalqmE50uoaCI2CzLbF1A2mTrFTiQJxY",
+ "EKdvJjc7PvAsq0DrIZY/y3zLhEzzOgNmKi41T/GTZtfCrJlZC81cZyYkUxKYWjKz7jRmSwF5pmd+kn+v",
+ "odoGs3SDj0/ppkUxqVQOQzxfq2IhJHisoEGqYQgzimWwpEZrbhiOgLj6hkYxDbxK12ypqj2oWiRCfEHW",
+ "xeT400SDzKAibqUgrui/ywrgN0gMr1ZgJp+nscktDVSJEUVkaqeO+hXoOjeaUVua40pcgWTYa8Z+qrVh",
+ "C2Bcso/fv2YvXrx4hRMpuDGQOSEbnVU7ejgn231yPMm4Af95KGs8X6mKyyxp2n/8/jWNf+YmeGgrrjXE",
+ "F8sJfmGnb8Ym4DtGREhIAyviQ0f6sUdkUbQ/L2CpKjiQJ7bxgzIlHP+fypWUm3RdKiFNhC+MvjL7OarD",
+ "gu67dFiDQKd9iZSqEOino+TV5y/Pps+Obv7l00ny7+7Pb17cHDj91w3cPRSINkzrqgKZbpNVBZxWy5rL",
+ "IT0+OnnQa1XnGVvzK2I+L0jVu74M+1rVecXzGuVEpJU6yVdKM+7EKIMlr3PD/MCsljmqKYTmpJ0JzcpK",
+ "XYkMsilq3+u1SNcs5dqCoHbsWuQ5ymCtIRuTtfjsdiymm5AkiNed6EET+o9LjHZeeygBG9IGSZorDYlR",
+ "e7Ynv+NwmbFwQ2n3Kn27zYqdr4HR4PjBbrZEO4kynedbZoivGeOacea3pikTS7ZVNbsm5uTikvq72SDV",
+ "CoZEI+Z09lFcvGPkGxAjQryFUjlwScTz625IMrkUq7oCza7XYNZuz6tAl0pqYGrxN0gNsv1/nv38nqmK",
+ "/QRa8xV84OklA5mqbJzHbtDYDv43rZDhhV6VPL2Mb9e5KEQE5Z/4RhR1wWRdLKBCfvn9wShWgakrOYaQ",
+ "hbhHzgq+GQ56XtUyJea2w3YMNRQlocucb2fsdMkKvvnuaOrQ0YznOStBZkKumNnIUSMNx96PXlKpWmYH",
+ "2DAGGRbsmrqEVCwFZKyBsgMTN8w+fIS8HT6tZRWg44GMotOMsgcdCZuIzODSxS+s5CsIRGbG/uI0F301",
+ "6hJko+DYYkufygquhKp102kERxp6t3ktlYGkrGApIjJ25siB2sO2ceq1cAZOqqThQkKGmpeQVgasJhrF",
+ "KRhw92FmuEUvuIZvX45t4O3XA7m/VH2u7+T4QdymRoldkpF9Eb+6BRs3mzr9Dzj8hWNrsUrszwNGitU5",
+ "biVLkdM28zfknydDrUkJdAjhNx4tVpKbuoLjC/kU/2IJOzNcZrzK8JfC/vRTnRtxJlb4U25/eqdWIj0T",
+ "qxFiNrhGT1PUrbD/ILy4Ojab6KHhnVKXdRlOKO2cShdbdvpmjMkW5m0F86Q5yoanivONP2nctofZNIwc",
+ "QXKUdiXHhpewrQCx5emS/tksSZ74svoN/ynLPEZTFGC30ZJTwDkLPrrf8Cdc8mDPBAhFpByJOqft8/hL",
+ "gNC/VrCcHE/+Zd56Sub2q547uDjizXRy0sJ5+JHannZ+vYNM+5kJablDTaf2TPjw+CDUKCZkqPZw+FOu",
+ "0ss74VBWqoTKCMvHBcIZrhQCz9bAM6hYxg2ftYcqa2eNyDt1/DP1o1MSVJEt7mf6D88ZfsZVyI0339B0",
+ "FRqNOBU4mjK0+Ow+YkfCBmSJKlZYI4+hcXYrLF+3g1sF3WjUT44sn/vQItx5a+1KRj38JHDq7anxZKGq",
+ "u8lLTxAka8/CjCPUxvrFmXc5S03rMnH0idjTtkEPUOt+HKrVkEJ98DFadahwZvg/gAoaoT4EFbqAHpoK",
+ "qihFDg+wXtdcr4eTQAPnxXN29ueTb549/+X5N9/iDl1WalXxgi22BjR77PYVps02hyfDmZGCr3MTh/7t",
+ "S3+C6sLdSyFCuIF9yIo6B9QMlmLM+gsQuzfVtqrlA5AQqkpVEZuXRMeoVOXJFVRaqIj74oNrwVwL1EPW",
+ "7u79brFl11wzHJuOY7XMoJrFKI/nLNrSDRR630ZhQZ9vZEsbB5BXFd8OOGDnG5mdG/cQnnSJ7617zUqo",
+ "ErORLINFvQr3KLasVME4y6gjKcT3KoMzw02tH0ALtMBaZJARIQp8oWrDOJMqwwWNjeP6YcSXSU4U8v2Y",
+ "UOWYtd1/FoDWccrr1dowNCtVjLVtx4SnlikJ7RV65OjXnNltKzuc9ZPlFfBsyxYAkqmFO1+5kx9NkpNb",
+ "xviIi9NOLVrNmaCDV1mpFLSGLHHhpb2o+XaWy2YHnQhxQrgZhWnFlry6I7JGGZ7vQZTaxNBtzAl3KB1i",
+ "fdjwuxjYHzxkI6/wjGmlAG0XXN05GBgj4YE0uYKKDmf/UP75Qe7KvrocCZ24HfhcFLh8meRSaUiVzHQU",
+ "WM61SfYtW2zUMRNwBsFKia1UAjziIHjHtbFHdCEzMhmtuqFxqA8NMY7w6I6CkP/qN5Mh7BT1pNS1bnYW",
+ "XZelqgxksTlI2OwY6z1smrHUMoDdbF9GsVrDPshjVArgO2LZmVgCceN8RI0Pazg5csfjPrCNkrKDREuI",
+ "XYic+VYBdUP38QgieL5oepLgCN2TnMZnPZ1oo8oS159Jatn0GyPTmW19Yv7Sth0KFzetXs8U4OjG4+Qw",
+ "v7aUtYGDNUfbjiCzgl/i3kSWmvUlDHHGxZhoIVNIdkk+LsszbBUugT2LdMRIdqHJYLTe4ujJb1ToRoVg",
+ "DxfGJjxisX+wHvDz1jv0AEbLGzBc5LoxTBo3ezsKeeT72RJoRVaQgjT5FmV1KarCBrVoO9P+N2v2ZG4U",
+ "G75pl5/MWAXXvMp8i+FpKZhMImQGm7h25R3fSAYbJuJIL5uRhWGpDznJEMAsutBtEC/NlRZyldjo4L5N",
+ "rQnqPdKslsJtYNdQObyWULlt1/joWGKUj6DtwmMXKZxz5i5EwK7xYS1ylls6FkSlD7gQC5FWitvYKBK1",
+ "N0FWQcERO4rSuW1/fMxdxH5tv/tQrXeRh7Ibh+vldVTDNCJ6vSZmoartEzGUejzagoaxiaxyteB5ggY/",
+ "JBnkZq/rDQ8S8IZa4n6t0mH3LsoXF5/y7OLiM3uHbelsAewStnOKWLN0zeUK2jBCuF7sqQE2kNbh1tIj",
+ "40EHQecr7WLfPQribFY6PoGVncDqH47nO7U6NVDEsCuVypPmQN4Pygw2w75UXIr0EjKG2pQUgNujH3Xl",
+ "Bwdhj3EB6iZsdb3eegO3LEFC9mTG2IlkUJRm67w/PXusN7h8ZHaNv6FRs5oi6FwymuTsQsYdLzb+fs8V",
+ "78HsXuc2Ie2eQ1kguwcyGzmy2Pk1hY8QXFR77PTdnlHPYGMe2BuBUFksDvFw/EBZWrzDZZHRYande3W9",
+ "KASlagXNpqjXffR86H8QZsbYOWk2PP5puIKK55SHor1bW2hWiNUa7bs0BciOL2TSwSRVhRv4cftfqzQv",
+ "6qOjF8COnvT7aIPGtDvp2jXQ7/sdO5raT0Qu9h27mFxMBpAqKNQVZPa0GMq17bUX7H9r4F7InwfbBiv4",
+ "1p4z/Vpkul4uRSos0XOFu85K9WxiqegLVIgeoBGgmTBT2miJonSWsHxpF+Akats9hEcqAhVPEbjRo7bz",
+ "MdOu7GgGG57iLDkpma21Vxo5G5poRpVJCCDqIN8xogtR6I72vuO6G+pz6x7Zjd95z0HSIUcgrrP9J4sB",
+ "MaIYHLL8T1ipkOvCZUf5FJpcaDNA0jlLKD7VCGRk05mx/6NqlnJav2VtoDl5qoqOc3TMxxFoZ/VjOjuy",
+ "pRDkUID1X9GXp0/7E3/61PFcaLaEa59SiA375Hj61C4Cpc29V0BPNDenEfOOwga4m0bSwNdcr2d7QwgE",
+ "96DIQQD69I0fkBaT1rTF4MQrpZYPMFuRbaI2C2xiM3WcI2fgI81Kvh01/ktEMJJLBtVlTpEGtexJJHP6",
+ "by1KBNnmvWwNdHJm/+/jfzv+dJL8O09+O0pe/ff55y8vb548Hfz4/Oa77/5f96cXN989+bd/jRkv2ohF",
+ "PCr1Z67XiKnTHBt5Km1cGe1NcidunZdCLb823j0RQ2Z6ygdTOkToPsQYItCUIGaTzJ3VZZlvH2CTsYBY",
+ "Be4EpDvOW22/qmWYMuskT2812uCD+Ift+svI2eyj950MpFTJXEhICiVhG70lIiT8RB+jtiGppZHOtEGM",
+ "9e37ljr499DqjnMIM+9LX+J2oIY+NAm8D8D8Ptxe6CtMFqaTDeQl4yzNBTn2ldSmqlNzITm5Dnumd08s",
+ "vEN03Jn82jeJe68jzmUH6kJyjTRsHIrRkOgSIqGC7wG8T1nXqxXoninOlgAX0rUSktxANBadZBLLsBIq",
+ "il3PbEu0Ppc8J9/3b1AptqhNd7unnEZrTds4HA7D1PJCcsNy4Nqwn4Q83xA4f5b2MiPBXKvqsqHCiM8C",
+ "JGihk7gi/cF+JX3qpr92upUumNjPXt987Q3A4x7LuHOYn75xpvDpG7J32gjcAPevFpYphEyiQoZH1EJI",
+ "StzuyRZ7jFabF6AnbSzPcf1Cmo1EQbriuci4uZs49FXcYC3a1dGTmg4jel52P9fPsSP2SiUlTy8pO2ay",
+ "EmZdL2apKub+CDBfqeY4MM84FErSt2zOSzHXJaTzq2d7zLF76CsWUVc304nTOvrB8/Ac4NiE+mM28S3/",
+ "t1Hs0Q9vz9nccUo/sum3FnSQNxk5tbnbnx0HAk7eXh+z+cd4gH4DSyEFfj++kBk3fL7gWqR6Xmuo/sRz",
+ "LlOYrRQ7Zg7kG244+Z16vv6xG57kCXTYlPUiFym7DLfidmmOuYovLj6hgFxcfB5Ew4cbpxsq7n6nAZJr",
+ "YdaqNomLl4z7rlr/HkG2nupdo06Zg20l0sVjHPyRkEBZ6iTwEcenX5Y5Tj8QQ82oE2VTMm1U5ZUgakbn",
+ "R0P+vlcuH6Di1/5OS61Bs18LXn4S0nxmifP5nJQlOaDJA/yr0zUok9sSDvcityi2wGJne5q4NahgYyqe",
+ "lHwFcd+yAV4S92mjLsiLlueMunW8zD6XjEC1E9jpVwzwuHWmL03uzPby4Z34FOgTsZDaoHZqveB35ReC",
+ "+rPKUcjuzK4ARpRLtVknuLajs9Io4p4zzbWzFepkH53XYiVxEbgbegtg6RrSS8goNEn+8Wmnu08AcTuc",
+ "Vx1C20t1NqGXbn6QK2QBrC4z7mwALrf9FHwNxvh7Bx/hErbnqr04cpuc+5vpxIXbEpSZsYVKkhpsRiis",
+ "4bL1Ibse8130lUJiZcls1MnmSnuxOG7kwvcZX8h2h3yARRwTioYMO+S95FWEEFb4R0hwh4kivHuJfjSK",
+ "xCsjUlHa+R8WNfvQ6YNA9m0u0e1ELfu7xkCpR5WYbZwsuI5vIIBfkB+4hvq5Vn4k61W0YXRGhRmc4C5y",
+ "COK92q1sXpHR5adtb5qPoRaXEqhku6t7NLoUCc2HtUtcEFdtugK5fA7ZaPeGi1GKfEaR6IZeBI6bwxUf",
+ "jYKN3og6DdKEgou2zX0nr9j6i2Ha3H2zNS/8vSh/GcrfgJpMb3WbaTpxmasxdihJVkYGOay4C/pQTqxP",
+ "h7CoPdIBgxCPn5fLXEhgSSzjiGutUmGzFFpd7sYANEKfMmYdPOxgCDExDtAmbzkBZu9VuDbl6jZIShDk",
+ "XuceNvnZg79hv7e5LT7izNu9ZuhQd7SLaNpeDrRsHHqhppOoSho7IXRaMdtkAYMjVUxEUTUN/TJD74+G",
+ "HGg7TjqaNbmMeevQqgASwzPfLTg2sMdiiZv8kyBoUsFKaAPtuRlXq3cEfV3fxZUykCxFpU1CR/bo9LDR",
+ "95qMwe+xaVz9dEjFbPUCkcW1Dw17CdskE3kd57Yb98c3OOz75vyk68UlbGmTAZ6u2YKqbeAu1Bke2+wY",
+ "2mbd7ZzwOzvhd/zB5nuYLGFTHLhSyvTG+J1IVU+f7FpMEQGMCceQa6Mk3aFegjyhoW4JzmQ2m4kyn2a7",
+ "vAaDxXTrXKtRzWshRecSGLo7Z2FT8mzWXVCsYngDZGQN8LIU2aZ3hrdQR8J2ZMDfwlC3Fn8kFDVpgO2h",
+ "QHBejyUZV+B9DpalwZ5py44MEjH3U6af/hkohHAooX3RrCGhULQpT24frc6B5z/C9q/YlqYzuZlO7nfk",
+ "j9HaQdxD6w8Ne6N0Jl+2PQJ2PHi3JDkvy0pd8TxxjpEx0azUlRNNau79KF9Z1cWP3+dvT959cOhTXinw",
+ "yqVT7poVtSt/N7PCE3Esa/E88IyQterPztYQC5jf3HQOnSk+BbZjy6EWc8Jll1frKAuWonOuLOMhtb2u",
+ "EufTs1Pc4duDsnHttSdi69nrevP4FRe5P4p6bPen7N5JK3Ryfu/rFQwTgB9U3QxWd3x1tNK1RyeFY+0o",
+ "0VLYKkSaKdlPLEITkk64JKoF36IEWef0UDnJukhw+SU6F2ncbSEXGoVDWp8vNmbUeMQYRYi1GAkhyFoE",
+ "sLCZPiBa1kMyGCNKTHIp7aDdQrnykbUUf6+BiQykwU+VSzTsLFRclz6zf7idxm8ROMDuIkED/j42BoIa",
+ "sy4Iid0GRuhhjtxh8QdOP9HGNY4/BI7BWwSqwhEHW+KOIJOTDyfNNtq/7nqKw2qPQ/2HgmErA+0vNend",
+ "FmuL6MgY0dKRo7vFyfhOQbdDDt8j2i2B0A03A5sTy3OtImBqec2lrQSH/SwNXW8N1meAva5VRVcqNUSj",
+ "9EIny0r9BvGT7BIZFcl9dKQkc5F6zyJX1fpKtPHKtDU+PX1DPEZFe8ySCz6ybiBxZIWTlAeuc0rm9g4u",
+ "Lq1Y26p1nfB1fHGEKSdzC79dHA7nQZpOzq8XPFbABQ0qxOmkDdJ0XHFGMd/Zc0E3dxic7AXxnqatsPcQ",
+ "S6jaBOXhnfc7Gke/L5HPIBUFz+NWUkbU715Qy8RK2NJ/tYagtpwDZGumWily9flsGKwlzemSHU2D6pWO",
+ "G5m4EloscqAWz2yLBddg78GFd+NcYpQBadaamj8/oPm6llkFmVlrS1itWGPA2itP3ve9AHMNINkRtXv2",
+ "ij0mr78WV/AEqehskcnxs1eUlmL/OIptdq7G5y69kpFi+V9OscTlmMIeFgZuUg7qLHon1hZmHldhO1aT",
+ "7XrIWqKWTuvtX0sFl3wF8WhusQcn25e4SU7DHl1kZquKalOpLRMmPj4YjvppJDUN1Z9Fw91RKXABGcW0",
+ "KlCe2sJxdlAPzpYodcWcPF7+I4VYSn/XqHdg/roOYruXx2ZNgbD3vIAuWaeM26vjdF3KlRxwCnHGTn0B",
+ "Cqpu1RS1srTBsXDqZNIhC6mIj5CGDlG1WSZ/ZOmaVzxF9TcbQzdZfPsyUtGrW8RH3g7xr073CjRUV3HS",
+ "VyNi760J15c9lkomBWqU7EmbChqsymgpHmV4Hk9q8Rq9n9O0G/ShBihCSUbFre6IGw809b0ET+4AeE9R",
+ "bOZzK3m89cy+umTWVVw8eI0c+svHd87KKFQVK0fULndncVRgKgFXlF8TZxLCvCcvqvwgLtwH+39ulKU9",
+ "ATRmmV/LsYPAn2qRZ39tU9t7RRErLtN1NMaxwI6/tFVcmynbdRy9AL/mUkIeBWf3zF/83hrZ/f+mDh2n",
+ "EPLAtv1ih3a6vcm1iHfR9Ej5AZG8wuQ4QEjVbq5vkxyWr1TGaJy21EorZcM7wEHht7/XoE3svjJ9sHmV",
+ "5MvCc4GtO8ZAZmRVz5i934u4dG5okjUrijq3t/0gW0HlnKx1mSueTRnCOX978o7ZUbWrpEH3Sqnu2cre",
+ "Fe/MoufDCOoy3eZq/1ga5uFwdueF4ay1ocIs2vCijGXYY4tz34DS+EO/Lpl5IXVm7I21sLW33+wgbQUH",
+ "1gzndDzJBP7HGJ6uyXTtaJNxkT+8YJ+XSh0Urm5qADelley1f6N8zT5bsm/KFJ4vroW2xffhCrpJ/c0N",
+ "F3d08kn+3elVtZRWUqI6etcNrLuQ3SNng/fe9RvFrEf4WxouWtVVCretX3hGvaJ3iPvFEAcVq+1twqZi",
+ "rH9UJeVSSZHSDd6g3H+Dsivkf0hc5IDLzn23lF/iboVGFle0BGOTHuSoOFqU0StCR7ihYzb4iky10mH/",
+ "NFQxfs0NW4HRTrNBNvVlNp2/REgNrlQWvekQ6ElVdWJNpCGj4cu2WM4txYhSfEcM4O/x23t3PKK0vEsh",
+ "yRByZHMZgNajQXXGDVpPwrCVAu3m072Sqz9hnxldS81g83nm65ITDBuqwWnbuOQQ1ImPUrqoILZ9jW0Z",
+ "hWXanzvpxHbQk7J0g0Zv1DYcjhUKHSVwJNqUeHd/QNwGfghth7jtTC+g/RQFDa4oOAkl7cMDwRgp8fL2",
+ "iue1lShbKcKm9USvgQkZQeOdkNBWzY9sEGl0SyDG0Hod6afTihtrAh6k086B5xSRjCk0bZyL9r6gegwm",
+ "ktAc/RjjbGzLxY4ojqZBa7hxuW2K9aN0B8bEa3olxBFyWPyVrCpnRGWUuNkrBxtTHKi4fSHl7gYwXAZD",
+ "m8h2NxW3K+c2O9HYhZdMaDzrFIs8kqr2pvkYlESmnNjFlv6NFdgYn4ELYN+5XBV1vLV9ubd0lEgTLVZ3",
+ "5Erb/0HZ4ita3a/2VG8thbyOraK3qJ7Cu4aDmitWgTVXASndR/lC93Q4aS6xdGWfFGb08NfWLN99+B2v",
+ "Pj4lFTuS9PexveXOrRa3vvyx1L90NFOVG5eGbjjbVQvOlgyPQbB5A7ZUuX32K+rHGMsVsKkC+HnQ+zD7",
+ "Y2DNEeydBPVJKEOEfvQZbqzkwgWq2qU2pKzLhR1mJx+SJdcyuD8Jl2FKQGIz8WtjZx7lO7U6KDHQpzGE",
+ "yZK7kxmu4sRjvXLzuVr5Nx4OKOOxc8J3zIA9SNEMxSKiusLcpT3r8bIjQ/aqXM8EVxU8sCwFtsctZWmY",
+ "lXXo9GgetERqDcN5HsyADm1HaH8I4VtFOCTuuP4yi0P0V/zGEXYnBWoJ4u/EDVfMV1N/nacd3Lgxrv91",
+ "zO1iXQsjHr4eTWuRZ/uY2/HXtjUnyCP5i/Ns/1OqXvxideFwubkCALexmPpMIMJE5toZPBgq8MQe4IR1",
+ "3SIuVyoSmdaVMFtKLvQmuvglemnjB5DugQv3XlCTouEyBOxTdS5gsGpat6+L/aDsix8FnhvIhjZUPe3t",
+ "hhdlDm5dfPdo8Qd48ceX2dGLZ39Y/PHom6MUXn7z6uiIv3rJn7168Qye//Gbl0fwbPntq8Xz7PnL54uX",
+ "z19++82r9MXLZ4uX3776wyP/tJdFtH02639TaZjk5MNpco7ItjThpfgRtrYYBIqxLzPBU1qJUHCRT479",
+ "T//Dr7BZqorgNWL368RFjyZrY0p9PJ9fX1/Pwi7zFVUbToyq0/XcjzMsVvfhtPFs24wk4qh1WqIoEFOd",
+ "KJzQt49vz87ZyYfTWSswk+PJ0exo9oyqOZUgeSkmx5MX9BOtnjXxfe6EbXL85WY6ma+B52bt/ijAVCL1",
+ "n/Q1X62gmrl6G/jT1fO5d4zNv7gsnJtd37ppUO5eXdAhuJg9/9IpV52FcOna8vyLTxELPtnnGOZfyO82",
+ "+nsXjS9mI7KbuS/M5nq4subzL+07Azd2deQQc5n4CqNtc6ocSs8vafsrLgif+CB091mKhrunGXIVe71u",
+ "3lwIX5n/9J/0TebPvSfqnh8d/Sd7bOvlLWe8057tHHgjxXD+xDPmg3I09rOvN/appGtwqNCYVdg308k3",
+ "X3P2pxJFnueMWgbpakPW/0VeSnUtfUvcXeui4NXWL2PdUQr+JRXS4XylqaJxJa64gclnKpkdi4KOKBd6",
+ "1ezWyoWeavsv5fK1lMvv4w2757dc4L//Gf+XOv29qdMzq+4OV6fOlLN5H3Nb37O18PyV8uE96641O6aT",
+ "3VGHPSbHsITrJy53xIKN3Nlv4vQqsz4RX//N5zgGr5V0dfZHB7RTHuJH2Op9Cvx8DexXBz4R2a+Un05R",
+ "mylTFfuV53nwG9Xx8mb7LK7v23vcex+pbhdoDK0lgM+Wp2Q4VxYdN7JL8Df+LQ06kd1hMkRbLXQJMPbC",
+ "sy2qGGowJ4LPjo6OYllUfZyd/8ZiTLcTrlWSwxXkQ1aPIdG7+L/rWe/Rh8+G9RrCc3dE6qhU/gLaEg6j",
+ "r5x3ixDcBrs3Sj4y7JoL93ZMUPTLvoRXCMMWsFT0Rp6pK+lyeZs9Iv5ofIIgY7i0F4juu3n//sqc3+xQ",
+ "dnpdm0xdy3HFRdcfee7uD1BGf+NuMIp5AI2mmjH/onO+ZWWlrkQGjFOel6pN6w/Czr6WT+81h6ba3EpI",
+ "GoBWOY1iL8rwIOHavTw2VIJnDrP39qG2nt6LPphucYyv+9iiv68sDQ2NnbzytZ86f89R5NFctQ9RJkSh",
+ "oUvDAM/nLsOn96uNwwc/dl9siPw6b+6eRj/2HTWxr86P4hu1HtLQ40icanyNnz4jwSmd3zGxdaAdz+cU",
+ "+14rbeYTVDhd51r48XND4y+e857WN59v/n8AAAD//wcUiJu0jwAA",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/private/types.go b/daemon/algod/api/server/v2/generated/private/types.go
index ac0f2e670..b436e9c90 100644
--- a/daemon/algod/api/server/v2/generated/private/types.go
+++ b/daemon/algod/api/server/v2/generated/private/types.go
@@ -307,6 +307,7 @@ type DryrunTxnResult struct {
LocalDeltas *[]AccountStateDelta `json:"local-deltas,omitempty"`
LogicSigMessages *[]string `json:"logic-sig-messages,omitempty"`
LogicSigTrace *[]DryrunState `json:"logic-sig-trace,omitempty"`
+ Logs *[]LogItem `json:"logs,omitempty"`
}
// ErrorResponse defines model for ErrorResponse.
@@ -336,6 +337,16 @@ type EvalDeltaKeyValue struct {
Value EvalDelta `json:"value"`
}
+// LogItem defines model for LogItem.
+type LogItem struct {
+
+ // unique application identifier
+ Id uint64 `json:"id"`
+
+ // base64 encoded log message
+ Value string `json:"value"`
+}
+
// StateDelta defines model for StateDelta.
type StateDelta []EvalDeltaKeyValue
@@ -565,6 +576,9 @@ type PendingTransactionResponse struct {
// \[ld\] Local state key/value changes for the application being executed by this transaction.
LocalStateDelta *[]AccountStateDelta `json:"local-state-delta,omitempty"`
+ // \[lg\] Logs for the application being executed by this transaction.
+ Logs *[]LogItem `json:"logs,omitempty"`
+
// Indicates that the transaction was kicked out of this node's transaction pool (and specifies why that happened). An empty string indicates the transaction wasn't kicked out of this node's txpool due to an error.
PoolError string `json:"pool-error"`
diff --git a/daemon/algod/api/server/v2/generated/routes.go b/daemon/algod/api/server/v2/generated/routes.go
index 952a49eb4..3b870f435 100644
--- a/daemon/algod/api/server/v2/generated/routes.go
+++ b/daemon/algod/api/server/v2/generated/routes.go
@@ -657,140 +657,141 @@ var swaggerSpec = []string{
"xHoHtwm12qn3QJGKMyfArkA6uOYgndjV3juWaOE9aJvg2IQKZ5y5DRJM1/i0Fji7WyrmRMUP5iAWLJWC",
"Wt+oQWpngURCQQ106KVzYn94zk3Ifmm/e1etN5GHtBsf19PrIIepSfRqiZtlWG0XiSHVm6stKBhayCIX",
"M5onRuGHJINcbzW9mYsEHGNLI69F2u/eBvn8/F2enZ+/J69NW7xbALmA9RQ91iRdUr6Axo0Qnhd7a4AV",
- "pFUoWjpo3Oki6GylbejbV8HxqBQiT+orb9ft0RM3XbxfsPQCMmL4FR4xJwUftHfITEIeGhJXtWPoarn2",
- "KmRZAofs0YSQI06gKPXa2Vc6Gk9ncv5Ab5p/hbNmFfqoKSe4yMk5j5s2rIf7jmfKD7P5JNmQrztOZQfZ",
- "PJFe8YHjRK/QQWOGi57PjdbRU+wZiL6eRA+IykKxiw3hB4yDoq1dZhleRxrppqpZwTAYKmg2NpzT+6f7",
- "N3ymJ4ScIe8wFywFlyBpjpEeyhuOmSIFMxd1VaUpQHZ4zpMWJKko3MQPm/9atnReHRw8BXLwqNtHaaOu",
- "urukPQPdvt+Sg7H9hOgi35Lz0fmoN5KEQlxCZu9jIV3bXluH/Zd63HP+c48xk4Ku7U3On0Wiqvmcpcwi",
- "PReGry9ER+vkAr+ANOCBEbOKMD1GUYYYRW3d7ktzAEdR7ek+bD6RUY2ebkSp4XbeK9mmHUVgRVOzSopM",
- "Zm01gprO+kqQFmUSDhA1QW+Y0TkBVIuP3/Lc9fm5NUBshu+sY4JooSMg18l23b2HjCgEuxz/I1IKs+vM",
- "xR/5IJWcKd0D0pkj0ANUE2RE6EzI/xEVSSme37LSUN/thMQLE16kzQwoY/2cTlNrMAQ5FGAtRPjlq6+6",
- "C//qK7fnTJE5XPmgPdOwi46vvrKHQCh95xPQIc3VSUSBQsO8kaaRQOslVcvJViM9jruTbT4Y+uTYT4iH",
- "SSkUMWbhUoj5PayWZauozgKr2ErdzqG57YEiJV0PqtelATASrQXyIkdbvph3KJI4/rdkpRmyiSxZa2hF",
- "pf7fh/9x+O4o+S+a/H6QvPj36fsPz64ffdX78cn1t9/+v/ZPT6+/ffQf/xZTXpRms7jf50eqlgZSxzlW",
- "/IRbz63RPNFgt3Z2ADH/1HB3SMxspsd8sKRdiO5tbEOYUSVws5HmTquyzNf3IGTsQESCu2OolnlU2a9i",
- "HgalOspTa6Wh6HsYbNffBm4/v3jrRI9KBc8Zh6QQHNbRdxiMw0/4MaobIlsa6IwCYqhv13rTgr8DVnue",
- "XTbzrvjF3Q7Y0Ns6RPYeNr87bse5FIbj4s0G8pJQkuYMTeeCKy2rVJ9zisa5jurdIQtvchw21770TeL2",
- "4Yj51g11zqkyOKxNdlGn4xwixvjvAbzVVlWLBaiOKk7mAOfctWIcDS04F95kErthJUj0Dk9sS6N9zmmO",
- "1uXfQQoyq3Rb3GPUoNWmrafLTEPE/JxTTXKgSpOfGD9b4XD+Vu1phoO+EvKixsKAVQA4KKaSOCP9wX5F",
- "fuqWv3S8FZ9w2M+e33xqAeBhj8W0OchPjp0qfHKM+k7j4+rB/skcHwXjSZTIzBW1YBxDozu0RR4arc0T",
- "0KPGW+Z2/ZzrFTeEdElzllF9O3LosrjeWbSno0M1rY3o2LH9Wt/HrtgLkZQ0vcD4k9GC6WU1m6SimPor",
- "wHQh6uvANKNQCI7fsikt2VSVkE4vH29Rx+7Ar0iEXV2PR47rqHuPdHMDxxbUnbP2IPm/tSAPfnh1RqZu",
- "p9QDG+Bqhw4iEyO3Nve+smVAMIu3D7RshK+5QB/DnHFmvh+e84xqOp1RxVI1rRTI72hOeQqThSCHxA15",
- "TDVFu1PHmj70hhJtgg6asprlLCUXoShujuaQMfb8/J0hkPPz9z1/c19wuqniBm6cILlieikqnTiPxLDt",
- "qrHv4cjWFrxp1jFxY1uKdB4PN/6A0b0sVRJYYePLL8vcLD8gQ0WwE8YrEqWF9EzQcEZnRzP7+0Y4j7uk",
- "V/7VSKVAkf8uaPmOcf2eJM7mc1SWaOJFG+t/O15jaHJdwu522gbEZrDY3R4XbhUqWGlJk5IuQEWXr4GW",
- "uPsoqAu0ouU5wW4te7OP1sKhmgVstCsGcNw4lhYXd2p7eQdKfAn4CbcQ2xju1NjDb7tfZqgfRW6I7Nbb",
- "FYwR3aVKLxNztqOrUobE/c7UD7sWhid7/7diC24OgXsDNwOSLiG9gAydf2gfH7e6+xALJ+E862DKPluz",
- "IbP4tgJNITMgVZlRpwNQvu4GuSvQ2kf2/wIXsD4TzdOMm0S1X49HzqGVGJoZOqhIqYEwMsQaHlvvFOts",
- "vvNvotOpLIn169hoZE8WhzVd+D7DB9lKyHs4xDGiqNGwgd5LKiOIsMQ/gIJbLNSMdyfSj3qRqNQsZaVd",
- "/25+qbetPmaQbcIlKk7EvCs1ekw9ysRs42RGVVyAgPli9sOcoW40k5/JWhWto5pg6gNHuLMcAo+qcieb",
- "SlS6/LLtW+4h0OJUApI3Ut2D0cZIqD4sXWgAu2wCAtDks4ug3eqQNVTkY3ZY2/XCzLw5XNJBL9jgm6OT",
- "IBAneMpavyjyjK17GMb16zKbVcK/PPLPjfwbo9H4Ru+FxiMXGxrbDsFRy8gghwV1Th+MOvUBBxa0ByrY",
- "IAPHz/N5zjiQJBbTQ5USKbNxAA0vd3OAUUK/IsQaeMjOI8TIOAAbreU4MHkjwrPJFzcBkgND8zr1Y6Od",
- "Pfgbtlubm/QeTr3dqob2eUdziMbN8zu7jX0r1HgUZUlDN4RWK2KbzKB3pYqRqGFNfbtM3/qjIAcUx0mL",
- "syYXMWud0SoAyfDUdwuuDeQhmxsh/yhwmkhYMKWhuTeb0+oNQZ/WdnEpNCRzJpVO8MoeXZ5p9L1CZfB7",
- "0zTOflqoIjY/AMvi3AenvYB1krG8iu+2m/evx2baN/X9SVWzC1ijkAGaLskM81kYKdSa3rTZMLWNa9u4",
- "4Nd2wa/pva13N1oyTc3EUgjdmeMLoaoOP9l0mCIEGCOO/q4NonQDewkicfq8JbiT2XghjC2abLIa9A7T",
- "jaOZBjmvHSm6lkDR3bgKG/Rm49qCdBD9NxYDZ4CWJctWnTu8HXXAbYcK/A0UdavxR1xRo3qwLRgI7uux",
- "MF4J3uZgtzSQmTaxRy/UcTtmugGWAUMIp2LKp6XqI8qQNkaibcPVGdD8r7D+m2mLyxldj0d3u/LHcO1G",
- "3ILrt/X2RvGMtmx7BWxZ8G6IclqWUlzSPHGGkSHSlOLSkSY293aUT8zq4tfvs1dHr9868DFyE6h0AYub",
- "VoXtyi9mVeZGHItaPAssI6it+ruzVcSCza/fEofGFB9k2tLlDBdzxGWPV2MoC46iM67M4y61raYSZ9Oz",
- "S9xg24OyNu01N2Jr2Wtb8+glZbm/inpotwfF3oortKJq72oVDENs75Xd9E53/HQ01LWFJ4VzbUiCUtg8",
- "P4oI3g0sMiok3nCRVAu6NhRkjdN95sSrIjHHL1E5S+NmCz5Thji4tfmaxgQbDyijZsSKDbgQeMWCsUwz",
- "tYO3rANkMEcUmWhS2oC7mXAJGivO/lkBYRlwbT5JF2jYOqjmXPrY+b44jcfpu4FdqH49/F10DDPUkHaB",
- "QGxWMEILc+SViL9w+oXWpnHzQ2AYvIGjKpyxJxI3OJkcfThqtt7+ZdtSHOZT7PM/Qxg29872ZI7ebLG0",
- "gA7MEU3OOCgtjoYlBb6/2F1GNCIBwQ2FgY2JpbkSkWEqfkW5zbVm+lkcut4KrM3A9LoSEh8tKoh66ZlK",
- "5lL8DvGb7NxsVCT20aES1UXsPYk8Busy0doq02TR9PgN4Rgk7SFNLvhI2o7EgROOVB6YzjGY2xu4KLdk",
- "bfPCtdzX8cMRhpxM7fjN4XAw98J0cno1o7EUKUahMjAdNU6alilOC+I7+11Q9RsGR3uBv6duy+xLvxJk",
- "E6Dcf1V+S+XoyyL5DFJW0DyuJWWI/fYTsIwtmE2uVykIsre5gWxWUktFLgOedYM1qDmZk4NxkB/S7UbG",
- "LplisxywxWPbYkYV2Jdm4eszFxilgeulwuZPdmi+rHgmIdNLZRGrBKkVWPuoyNu+Z6CvADg5wHaPX5CH",
- "aPVX7BIeGSw6XWR0+PgFhqXYPw5iws5l0dzEVzJkLP/pGEucjtHtYccwQsqNOom+OrWpj4dZ2IbTZLvu",
- "cpawpeN6289SQTldQNybW2yByfbF3USjYQcvPLN5O5WWYk2Yjs8Pmhr+NBCaZtifBcO9USnMAdKCKFEY",
- "empSs9lJ/XA2CahLl+Th8h/RxVL6t0adC/OnNRBbWR5bNTrC3tAC2mgdE2ofZ+NzKfeo3zHECTnxKR4w",
- "f1SdNsrixsxllo4qndlCTJPDuMZLVKXnyTckXVJJU8P+JkPgJrOvn0VyZrXT5PCbAf7J8S5BgbyMo14O",
- "kL3XJlxf8pALnhSGo2SPmlDQ4FRGk90ITfN4UIvn6N2Yps1D76qAmlGSQXKrWuRGA059J8LjGwa8IynW",
- "67kRPd54ZZ+cMisZJw9amR369ZfXTssohIwl/GmOu9M4JGjJ4BLja+KbZMa8417IfKdduAv0n9fL0twA",
- "arXMn+XYReC7iuXZ35rQ9k7aQUl5uoz6OGam429NntR6yfYcR5+YLynnkEeHszLzNy9bI9L/H2LXeQrG",
- "d2zbTSdol9tZXAN4G0wPlJ/QoJfp3EwQYrUd61sHh+ULkRGcp0lm0lBZ/w1wkFrtnxUoHXuvjB9sXCXa",
- "ssy9wGb2IsAz1KonxL7vNbC0XmiiNsuKKrev/SBbgHRG1qrMBc3GxIxz9uroNbGzKperAt+VYmaxhX0r",
- "3lpFx4YRZD66yeP5oTDM3cfZHBdmVq00pj5RmhZlLMLetDjzDTCMP7TropoXYmdCjq2Grbz+ZidpciSQ",
- "ejrH45EmzH+0pukSVdcWNxkm+d1T4nmqVEFq6DrLbp28yD7718JnxbNJ8cZEmPvFFVM2vT1cQjuov37h",
- "4q5OPsi/vTxZcW4pJcqjN73Aug3aPXDWee9Nv1HIOoi/oeKiRCVTuGmGwFPsFX1D3E032MsJbV8T1jlZ",
- "fdmSlHLBWYoveIOE+jXILlX+Ln6RHR47d81S/oi7Exo5XNEkh3V4kMPiYNpDzwgd4vqG2eCr2VRLHfZP",
- "jTnZl1STBWjlOBtkY5/I0tlLGFfgklFh1YSATwrZ8jUhh4y6L5t0NDckIwzxHVCAvzff3rjrEYblXTCO",
- "ipBDm4sAtBYNzOStjfbENFkIUG497Se56p3pM8FnqRms3k985m8cw7pqzLKtX7I/1JH3UjqvoGn70rQl",
- "6JZpfm6FE9tJj8rSTRp9UVvvcCwV5yCCI96mxJv7A+TW44ejbSC3jeEFKE8NocElOiehRDncI4yBFC+v",
- "LmleWYqymSJsWE/0GRjjETBeMw5NXvqIgEijIgE3Bs/rQD+VSqqtCrgTTzsDmqNHMsbQlHYm2rsO1dlg",
- "RAmu0c8xvI1NQtYBxlE3aBQ3ytd1OnxD3YEy8RLrcDhE9tOrolbllKgMAzc7CVdjjMMwbp+quC0A+seg",
- "rxPZ7lpSe3JuIomGHrxkTJm7TjHLI6Fqx/XHIOkwxsTO1vhvLMHG8AqcA/vWCaGw4431y83JmXKz94li",
- "i1vuStP/HrelcwbCPYpR/yvDVsI3gr1cKZbx1E/4MExH+BTweKmoH5+0aRYZXfTS1mTz3nxpHc7LPUbW",
- "OBCs90vzOp1a7mtt8EMhe+lghCnVLnxcU7IpS5pNph0bwfr7bRJvWxAran8Y8vFbF7/53Ou9m97Q08Jw",
- "7I0I9cEjfYD+6iPTSEmZczA1R6SPWRfD2o8q3iW6rdng7iJcZCgOElvJLQM5dzp7fSxFDnYYgrOFPC9a",
- "KLUvvjqapJBwz6gNROgNUdsPLtp1ebgOpJhKQX+dO29AC7cDuN8F8Q1f6CN3+Djr2S7HOf5wxnRHfmIR",
- "4p929bnJJ+MGrRoAbt7Yrv9tyHpgb8gDhqoOTiuWZ9s2t2V2bFInoGHtN2eg/SzJG36zgS/94+besd9E",
- "8Hc3ARETWWtr8mCqwKC4gy3RdYtYDjHXYVpJptcYI+c1TfZb9O3BD8BdJQRXWKaONHCOblvTzNm9F3Xr",
- "pgzVD8KWhiiM+ouqoMYkYK9WtChzcOfi2wezv8DTb55lB08f/2X2zcHzgxSePX9xcEBfPKOPXzx9DE++",
- "ef7sAB7Pv34xe5I9efZk9uzJs6+fv0ifPns8e/b1i7888DWgLKBNfaW/Y4aT5OjtSXJmgG1wQkv2V1jb",
- "nAaGjH22BJriSYSCsnx06H/6X/6ETVJRBGVr3a8j5wQZLbUu1eF0enV1NQm7TBeYljbRokqXUz9PP+fa",
- "25PaQGsDa3BHre3NkAJuqiOFI/z2y6vTM3L09mTSEMzocHQwOZg8xqREJXBastHh6Cn+hKdnifs+dcQ2",
- "OvxwPR5Nl0BzvXR/FKAlS/0ndUUXC5ATlzbC/HT5ZOrtO9MPLpjk2oy6iEUP+lSStX2xn01hbA0W5s5S",
- "p44MHuwp945vTGY2To647KU8QwugjYEyrK1G1kkWFMkOqjGNWzW+331BZStjeQ1jaSlihcjrlyTDheiC",
- "Wr2+Pu/zb64jjqb3neJiTw4OPkJBsXFrFI+XW1Yme3aPILZvUHcGtDtcjyv8RHNDN1AXmx3hgh5/sQs6",
- "4fhmy7AtYtny9Xj0/AveoRNuDg7NCbYMQrX6rPBXfsHFFfctjUiuioLKNQrcIFlEqFpdD7LcdpCke3U7",
- "zIchyLAZPNRvGbZna09nY6LqggqlZMIoDliaOYNUAkUxLyT6g5pcne45MtgKEj8d/R2txz8d/d0mwY2W",
- "rQ2mtwmh20z8B9CRXLLfrZvSixs5+udik+M/bKXfL0fm3VXU7DMSf7EZiXdg2vvd3eeb/mLzTX/ZKumq",
- "DnCnhAuecExccgkkMGvtddQ/tI76/ODpF7uaU5CXLAVyBkUpJJUsX5NfeR0RdDcVvOY5FQ9itDbyn151",
- "lUaLDtT3IIna9EOreFO23XjSyrqQtWps0Hjx6yC/lIsGHTdPySnPbCSH99WqsX9SjdY6m7vA7se49+B6",
- "ElPSA1fLd+uT41308taagpeeMd28ha+bldT/qBaLWxcm/5gSoAfHdzQjPmT0I/Pm3Zjps4Nnnw6CcBfe",
- "CE2+xyCzj8zSP6qdIE5WAbPBRIXTD/5R6A4Mxj24brOWbjX7GFMxJ3TsXoG4lPB1cSnDTywjtG/e+1zD",
- "zLArv+i/CY9xiuYd7B+FR9hEjRG67KJ3zxf2fOFOfKFLUA1HsJWNpx8wwDZkB70jiUVJ/kSOkiBDphSF",
- "T9EkyBx0urSZ9ru+7Ahb8YHJwzxl0/PdO/OXjncdt6j/fAnX4vy1+Kx0xyJy2PFH6z69Ho9SkBHi+9lH",
- "gZnPbI4pKOugc/9KHZ9q1ZW76zdb7mUrU8QQqBbExXoRs4s3gvJlM3nft45ouZ01aY/guyC4x9ReuSd0",
- "9ni5RXzpho9AWpKEvEF1CA+4j7n+M5o9PqZE/tgLeiM4EFgxhZlzLS3u3Y21ulCXYKvrsoTVNQZUh7bT",
- "8YNesex6WhdpG1Iq3rpaYhuVikZSMx6Unw/NK7QsgUp1ayG93R121pnx5DhM9SrqUCdCm1JtEVAMXm7o",
- "Sfz3XdyIf15v3b6e4L6e4O3qCX7SK3MTkGNZlfcTyQ7X+Kz3af1Z7tNvBE9Q2gLXXvNroeXz3a3xWUur",
- "5oJ/pMyFrWQoJCoJIR9Qk53EKwy6ElpMBUM6h8nYCduU6nRZldMP+B8MBr1uwi7ti/ypNbNtkre2cuPo",
- "XgMo9tU2v4Bqm5/fhHcndbSzWgllHYSG3nqk/+a0+Cz3/dTv7chk11wtK52JqyCOuakmMniSbIt7PUlv",
- "RAZ23HYsfz/DDLXV3ZUHonOAah4RTy7osdm0s8/emSIzQCM+rRZLbbOLRVMX1h0TmlrCT+x1ID5hEzRh",
- "W7lShlgmNJdAszWZAXAiZmbRzb7iIjv1UBwnjCeRaeAqpUhBKciSMK3IJtDqqHK0B+oNeELAEeB6FqIE",
- "mVN5S2AtS9gMaDefVg1ubfVxp74P9W7Tb9rA7uThNlIJTYlPLTCqJgdX7i2Cwh1xgqoq+8j75ye57fZV",
- "JWauiNT9tV/PWIHP3DjlQkEqeKaig2HRim3HFuu1BmtRYJM1+pPyKevC2iobQy/CzMjxgsd2DXV1nTqn",
- "jNW0IIum64PVhrnewKqeS8xjFZVtKtFtIw9hKRi/zjKja4sE1YFFwgwXWdwVy3P0zcb1jhYQDSI2AXLq",
- "WwXYDa/9A4Aw1SC6rnrUppwgzafSoizN+dNJxet+Q2g6ta2P9K9N2z5xuUBw5OuZABWq2Q7yK4tZm0Bq",
- "SRVxcJCCXjgNfeHisfswm8OYKMZTVwdmqDoaK+DUtAqPwJZD2lXywuPfKSTcOhwd+o0S3SARbNmFoQXH",
- "1Mo/hBJ401te137wEc2ebbU6UK8atdL+Pb2iTCdzIa3ETDBFccSD2p79PynTLjG2uwNr4cyWLsmxZShu",
- "nCB9mgqDWV3lOneOzO734yfMVN8LuZPDtrGtakHMwkjFNfPP7bDCqdcx/3jez732vNee99rzXnvea897",
- "7XmvPe+154+tPX+eCEySJJ5P++c1scc1ZPRFavhf0PuVT/ngpFH6a5UfLwlGRTfneGNkhgaaT13SUnSh",
- "CzUY4h0mQE3NdIyTMqdY/WSl/UNjLHwSpED3qfxsDiTDa0yDp0/I6Y9Hzx8/+e3J868N97E1d1ttH/qS",
- "BEqvc3jkItjqBCc+lA04xZyBGMlG/e0n9VEOVpufsxwIlt5/hc2P4RJyo8pbXycxl5H+9egMaP7SIcdy",
- "JVD6O5GtO4Rj1j9FVLRJpnGYM05lJA1nn1B6SNYCU/G6vLK9G9T1vcZMxOME+hu2ba8GKlBEyXsTvWyN",
- "C3AZ1N3Yu/jIzJ56dBKXwvOzsmyCEDkya9jTHyaSvluuzh0cbGu0Cnf+vtSod4/46MHDYzs2NJlVKWDl",
- "Y0dxq8Q0WgBPHFtIZiJb+1J1LiNwi8vaVK3DTPbVCtLKnCWExB2Dh+qRKzKPKadDU080VX5QVgJwvKYw",
- "6qdmnDbr6Ea+eXvqaNcwuHPMZHe4PtcIgi4eCkkWUlTlI1sUja/xSlyUlK+9GczoilgEwXSwcd73y6nr",
- "BNA9Prt7Dv/wvoKP9ru/W7SQK6p8Av/MZvCPZzHs5pnfjvEmi/K2rHd2vdGM7wP53fub6HfZBTrWpr8S",
- "ZKJXPJJ3uZNlef+46n+ESHgrxSUzF+coh+1HYTUMYbJVMsiAZaFo6KTa8LKhzU9/oVdh4o5deeoqcYrn",
- "nbXSJdjiw15Li+QlMfJSCpqlVOH7EVca4yNrrHp1ErE7IJiYX6of6WsE+GSrYonj7qRPtiO93YSYAEbZ",
- "RJqfV7tsok2P3HOdFjb2poA/iyngO3/4FKFE0qvu4QzK1ezApuiVXvEol5o2RbOjEW/Bgair7N6j7643",
- "fNuFF5SztS4IyEtCSZozdFAIrrSsUn3OKZpAwzLCffeeN+wOq1IvfZO4FT5iJHdDnXOKJQ5rw2hUpZpD",
- "rHgLgNfYVLVYgNIdTjwHOOeuFeNNOcWCpVIkNu7TiGvD0Se2ZUHXZE5ztOH/DlKQmblFhDlL0KCoNMtz",
- "50800xAxP+dUkxwM0/+JGYXODOdtTrWP3JVJ8liIP6xwGWUHKqP+YL/iowW3fG83QvOW/eyjocefJ+9z",
- "tOC5g/zk2OUTOznGFDGNJ7EH+ydzLxWMJ1EiMxLfeeS7tEUeuiq7SECPGp+k2/VzbpRpLQgyeqpvRw5d",
- "N0DvLNrT0aGa1kZ0vAV+re9jb1kXIjFXRiwzMVowvaxmmHnZv3GdLkT93nWaUSgEx2/ZlJZsqkpIp5eP",
- "t+gHd+BXJMKu9pL7z2PE75ZhrzfeKLG9vR+Qy/eQvvWPnbN1a4jSPkPqPkPqPofmPkPqfnf3GVL3+UP3",
- "+UP/p+YPnWzUEF3Oja0Z/VovjbE6LCUSUjtzzcDDZq3cf323JNMTQs6wHj41MgAuQdKcpFRZxYjbSLmC",
- "LZaaqCpNAbLDc560ILG1083ED5v/2mvueXVw8BTIwaNuH2u3CDhvvy+qqvjJFjD8lpyPzke9kSQU4hJc",
- "JjBsnlXoK7a9tg77L/W4P8ve1hV0bY0rS1qWYMSaquZzljKL8lyYy8BCdOL7uMAvIA1wNtEEYdomXUV8",
- "Ylyki86h7rV5TOnuy/cbFL456pDLPqnJx1Cwj0FTlqv6dULkPoU3my5lXVHVHN2aq/h0BqD8b85h7WbJ",
- "2QWEMbgYfXBFZeZbRAvPNml2fWHlvmmpnX80g5VXCbpAz+uZmbYZQ82Fs1cKsG/Zslk801yYO2tiCzxt",
- "i2zHilGm3wOFVlN70FBfRbjmIF3sPVqzcqEg0aLJ1DwMxyZUuJSLt0GCGkxSY4Gzu6VipQ3xg2GJaBWm",
- "aBRGpHYWaJgKNdBJfIZkY/+H59yE7Jf2u6u2VVsFOzb4yLieXgfDjGsSvULhglyvi8SQ6ufEZUgYMETb",
- "4sE2kOPWJYQ73XvVGfPs/Pw9eW0zZWNp0QtYT21Ru3RJ+QJUjaPwvNinQza8J4gv76Dx/soWG+mVDBQc",
- "P+nHnHfxfsHSC8iI4Vd4xFwofOQyQR7WaX/nDDn52r8jseLw0YSQI06gKPWaWA7bsXl3JucP9Kb5V6EA",
- "b0vGSPhiCuwS5B3PlB9m80lSYA7cHaeyg2yeSK/4wHGiV5Gr9a55ICM36c69NiAqC8V9GCj20nEvHffS",
- "cS8d99JxLx3/9NKxZ5Tam20+hdnmsxtu/kQ5sPfprv9gCwqDWVv1LO5gza6rdse0cWenbqrih1Xm0cpY",
- "15d/9/76vfkmL70BsimafjidolaxFEpPR9fjD52C6uFHw0rpwo7gDHylZJeYrf799f8PAAD//6SdqevR",
- "9wAA",
+ "pFUoWjpo3Oki6GylbejbV0GzmoWKL2BhF7D46HC+FosTDUUMulKIPKkv5F2nTE8YdqnigqUXkBHDTZEB",
+ "OBn9oE0/ZhLy0BxAVbutrpZrr+CWJXDIHk0IOeIEilKvnfWno491JucP9Kb5VzhrVqEHnXKCi5yc87jh",
+ "xfrf73ji/TCbz7kNSLvjVHaQzRPpFR847PQK3UdmuCj32Gi7PcWegWDu6RsBUVkodrFw/IBRWrS1yyzD",
+ "y1Ije1U1KxiGagXNxoave+953/7A9ISQM+Rs5vqn4BIkzTEORXmzNlOkYIul0e/SFCA7POdJC5JUFG7i",
+ "h81/LdM8rw4OngI5eNTto7RRpt1N156Bbt9vycHYfkJ0kW/J+eh81BtJQiEuIbO3xZCuba+tw/5LPe45",
+ "/7knNkhB1/ae6c8iUdV8zlJmkZ4LI3UWoqMTc4FfQBrwwCgBijA9RkGLGMW7hN2X5gCOorrdfVikIqOa",
+ "W4QR9IbbeZ9pm3YUgRVNzSopMpm11VdqOuuraFqUSThA1EC+YUbnolAt7n3Lc9fn59Y8shm+s46BpIWO",
+ "gFwn228WPWREIdjl+B+RUphdZy46yofQ5EzpHpDOWIL+qZogI0JnQv6PqEhK8fyWlYb65ikkXufwmm9m",
+ "QMnq53R6ZIMhyKEAa7/CL1991V34V1+5PWeKzOHKhxSahl10fPWVPQRC6TufgA5prk4i6h26DYw0jYSB",
+ "L6laTra6EHDcnTwHwdAnx35CPExKoYgxC5dCzO9htSxbRXUWWMVW6nYOjYEPFCnpelD5Lw2AkVgykBc5",
+ "ehrEvEORxPG/JSvNkE3cy1pDK2b2/z78j8N3R8l/0eT3g+TFv0/ff3h2/eir3o9Prr/99v+1f3p6/e2j",
+ "//i3mPKiNJvFvVI/UrU0kDrOseIn3PqVjb6J5sS1s1KI+aeGu0NiZjM95oMl7UJ0b2MbwowqgZuNNHda",
+ "lWW+vgchYwciEtwNSLWMt8p+FfMwZNZRnloro4P3/B+2628Dd7NfvO2kR6WC54xDUggO6+grEcbhJ/wY",
+ "1Q2RLQ10RgEx1LdrW2rB3wGrPc8um3lX/OJuB2zobR3Aew+b3x234/oKg4XxZgN5SShJc4aGfcGVllWq",
+ "zzlF02FH9e6QhTeIDhuTX/omcet1xLjshjrnVBkc1gbFqEt0DhFXwfcA3qasqsUCVEcVJ3OAc+5aMY5m",
+ "IJwLbzKJ3bASJPquJ7al0T7nNEfb9+8gBZlVui3uMabRatPWD2emIWJ+zqkmOVClyU+Mn61wOH+X9jTD",
+ "QV8JeVFjYcBmARwUU0mckf5gvyI/dctfOt6KD0zsZ89vPrUA8LDHIu4c5CfHThU+OUZ9p/HA9WD/ZG6Z",
+ "gvEkSmTmilowjoHbHdoiD43W5gnoUePLc7t+zvWKG0K6pDnLqL4dOXRZXO8s2tPRoZrWRnSs7H6t72NX",
+ "7IVISppeYHTMaMH0sppNUlFM/RVguhD1dWCaUSgEx2/ZlJZsqkpIp5ePt6hjd+BXJMKurscjx3XUvcfh",
+ "uYFjC+rOWfu3/N9akAc/vDojU7dT6oENv7VDB3GTkVube/3ZMiCYxdvnYzb+2Fygj2HOODPfD895RjWd",
+ "zqhiqZpWCuR3NKc8hclCkEPihjymmqLdqWPrH3rhiZZAB01ZzXKWkotQFDdHc8hUfH7+zhDI+fn7nje8",
+ "LzjdVHHzO06QXDG9FJVOnL9k2HbV2PdwZGup3jTrmLixLUU6f4wbf8AlUJYqCWzE8eWXZW6WH5ChItgJ",
+ "oymJ0kJ6Jmg4o7Ojmf19I1w8gKRX/k1LpUCR/y5o+Y5x/Z4kzuZzVJZogEYL8H87XmNocl3C7lbkBsRm",
+ "sNjdHhduFSpYaUmTki4gblvWQEvcfRTUBVrR8pxgt5aV2ceS4VDNAjbaFQM4bhzpi4s7tb28eye+BPyE",
+ "W4htDHdqrOC33S8z1I8iN0R26+0KxojuUqWXiTnb0VUpQ+J+Z+pnZwvDk713XrEFN4fAvdCbAUmXkF5A",
+ "hq5JtI+PW919AIiTcJ51MGUf1dmAXnz5gaaQGZCqzKjTAShfd0PwFWjt3x38AhewPhPNw5GbxNxfj0fO",
+ "3ZYYmhk6qEipgTAyxBoeW++y62y+876iS6wsifU62VhpTxaHNV34PsMH2UrIezjEMaKo0bCB3ksqI4iw",
+ "xD+Aglss1Ix3J9KPepGo1CxlpV3/bl6zt60+ZpBtwiUqTsS8KzV6TD3KxGzjZEZVXICA+WL2w5yhbqyV",
+ "n8laFa0bnWBiBke4sxwCf69yJ5tKVLr8su1L8yHQ4lQCkjdS3YPRxkioPixd4AK7bMIV0OSzi6Dd6i42",
+ "VOQjiljb9cLMvDlc0kEv2OCLqJMgTCh4aFu/d/KMrXsYxvXbN5vzwr+L8o+h/Auo0fhGr5nGIxe5GtsO",
+ "wVHLyCCHBXVOH4yJ9eEQFrQHKtggA8fP83nOOJAkFnFElRIps1EKDS93c4BRQr8ixBp4yM4jxMg4ABut",
+ "5TgweSPCs8kXNwGSA0PzOvVjo509+Bu2W5ub5CNOvd2qhvZ5R3OIxs3jQLuNfSvUeBRlSUM3hFYrYpvM",
+ "oHelipGoYU19u0zf+qMgBxTHSYuzJhcxa53RKgDJ8NR3C64N5CGbGyH/KHCaSFgwpaG5N5vT6g1Bn9Z2",
+ "cSk0JHMmlU7wyh5dnmn0vUJl8HvTNM5+WqgiNnsBy+LcB6e9gHWSsbyK77ab96/HZto39f1JVbMLWKOQ",
+ "AZouyQyzbRgp1JretNkwtY2627jg13bBr+m9rXc3WjJNzcRSCN2Z4wuhqg4/2XSYIgQYI47+rg2idAN7",
+ "CeKE+rwluJPZaCaMfJpsshr0DtONY60GOa8dKbqWQNHduAobkmej7oJkFf0XIANngJYly1adO7wddcBt",
+ "hwr8DRR1q/FHXFGjerAtGAju67EgYwne5mC3NJCZNu1ILxBzO2a64Z8BQwinYsonzeojypA2xsltw9UZ",
+ "0PyvsP6baYvLGV2PR3e78sdw7Ubcguu39fZG8Yy2bHsFbFnwbohyWpZSXNI8cYaRIdKU4tKRJjb3dpRP",
+ "zOri1++zV0ev3zrwMa4UqHThlJtWhe3KL2ZV5kYci1o8CywjqK36u7NVxILNr186h8YUHwLb0uUMF3PE",
+ "ZY9XYygLjqIzrszjLrWtphJn07NL3GDbg7I27TU3YmvZa1vz6CVlub+Kemi3h+zeiiu0Yn7vahUMA4Dv",
+ "ld30Tnf8dDTUtYUnhXNtSNFS2CxEigjeDSwyKiTecJFUC7o2FGSN033mxKsiMccvUTlL42YLPlOGOLi1",
+ "+ZrGBBsPKKNmxIoNuBB4xYKxTDO1g7esA2QwRxSZaFLagLuZcOkjK87+WQFhGXBtPkkXaNg6qOZc+sj+",
+ "vjiNvyJwA7uHBPXwd9ExzFBD2gUCsVnBCC3MkTcs/sLpF1qbxs0PgWHwBo6qcMaeSNzgZHL04ajZevuX",
+ "bUtxmO2xz/8MYdjMQNtTTXqzxdICOjBHNHXkoLQ4GpYU+DpkdxnRiAQENxQGNiaW5kpEhqn4FeU2E5zp",
+ "Z3HoeiuwNgPT60pIfFKpIOqlZyqZS/E7xG+yc7NRkdhHh0pUF7H3JPJUrctEa6tMk+PT4zeEY5C0hzS5",
+ "4CNpOxIHTjhSeWA6x2Bub+Ci3JK1zVrXcl/HD0cYcjK14zeHw8HcC9PJ6dWMxhK4GIXKwHTUOGlapjgt",
+ "iO/sd0HVbxgc7QX+nrots+8QS5BNgHL/zfstlaMvi+QzSFlB87iWlCH22w/UMrZgNvVfpSDILecGsjlT",
+ "LRW5/HzWDdag5mRODsZB9kq3Gxm7ZIrNcsAWj22LGVVg38GFb+NcYJQGrpcKmz/Zofmy4pmETC+VRawS",
+ "pFZg7ZMnb/uegb4C4OQA2z1+QR6i1V+xS3hksOh0kdHh4xcYlmL/OIgJO5fjcxNfyZCx/KdjLHE6RreH",
+ "HcMIKTfqJPom1iZmHmZhG06T7brLWcKWjuttP0sF5XQBcW9usQUm2xd3E42GHbzwzGYVVVqKNWE6Pj9o",
+ "avjTQGiaYX8WDPdGpTAHSAuiRGHoqUkcZyf1w9kUpS6Zk4fLf0QXS+nfGnUuzJ/WQGxleWzV6Ah7Qwto",
+ "o3VMqH06js+lXMoBxxAn5MQnoMDsVnVSK4sbM5dZOqp0ZgsxiQ/jGi9RlZ4n35B0SSVNDfubDIGbzL5+",
+ "Fsno1U7iw28G+CfHuwQF8jKOejlA9l6bcH3JQy54UhiOkj1qQkGDUxlNxSM0zeNBLZ6jd2OaNg+9qwJq",
+ "RkkGya1qkRsNOPWdCI9vGPCOpFiv50b0eOOVfXLKrGScPGhldujXX147LaMQMpaOqDnuTuOQoCWDS4yv",
+ "iW+SGfOOeyHznXbhLtB/Xi9LcwOo1TJ/lmMXge8qlmd/a0LbO0kRJeXpMurjmJmOvzVZXOsl23McfQC/",
+ "pJxDHh3OyszfvGyNSP9/iF3nKRjfsW032aFdbmdxDeBtMD1QfkKDXqZzM0GI1Xasbx0cli9ERnCeJtVK",
+ "Q2X9N8BB4rd/VqB07L0yfrBxlWjLMvcCm3eMAM9Qq54Q+77XwNJ6oYnaLCuq3L72g2wB0hlZqzIXNBsT",
+ "M87Zq6PXxM6qXCYNfFeKec8W9q14axUdG0aQl+kmT/uHwjB3H2dzXJhZtdKYmEVpWpSxCHvT4sw3wDD+",
+ "0K6Lal6InQk5thq28vqbnaTJ4EDq6RyPR5ow/9GapktUXVvcZJjkd0/Y56lSBYmr6xzAdWol++xfC5+z",
+ "z6bsGxNh7hdXTNnk+3AJ7aD++oWLuzr5IP/28mTFuaWUKI/e9ALrNmj3wFnnvTf9RiHrIP6GiosSlUzh",
+ "pvkLT7FX9A1xNxliL2O1fU1YZ4z1RVVSygVnKb7gDdL91yC7RP67+EV2eOzcNUv5I+5OaORwRVMw1uFB",
+ "DouDSRk9I3SI6xtmg69mUy112D81ZoxfUk0WoJXjbJCNfZpNZy9hXIFLlYU1HQI+KWTL14QcMuq+bJLl",
+ "3JCMMMR3QAH+3nx7465HGJZ3wTgqQg5tLgLQWjQwz7g22hPTZCFAufW0n+Sqd6bPBJ+lZrB6P/F5yXEM",
+ "66oxy7Z+yf5QR95L6byCpu1L05agW6b5uRVObCc9Kks3afRFbb3DsUShgwiOeJsSb+4PkFuPH462gdw2",
+ "hhegPDWEBpfonIQS5XCPMAZSvLy6pHllKcpmirBhPdFnYIxHwHjNODRZ8yMCIo2KBNwYPK8D/VQqqbYq",
+ "4E487Qxojh7JGENT2plo7zpUZ4MRJbhGP8fwNjbpYgcYR92gUdwoX9fJ+g11B8rES6wS4hDZT/6KWpVT",
+ "ojIM3Oykg40xDsO4fSLltgDoH4O+TmS7a0ntybmJJBp68JIxZe46xSyPhKod1x+DlMgYEztb47+xBBvD",
+ "K3AO7Funq8KON9Yvt6aOYmmi2OKWu9L0v9dt8Rmt7pZ7qnOWwr2OnaJXhj2Fbw17OVcsA6ufAmK4j/CJ",
+ "7vFyUj9iadM+Mszo5a/JWb758jucfXyMLHYg6O+X5pU7tVzc2vKHQv/SwUhVql0YuqZkUy44mzI8NoKN",
+ "G7Cpym3Zr6gdYyhWwIYKmM+93rvpHz1tDsfeiFAfhNIH6K8+wo2UlDlHVXPU+ph1sbD96ORdouSaDe4u",
+ "wkWY4iCxlfizsTGO8rVY7BQY6MMYwmDJzcEMl3HkkU66+VwsfI2HHdJ4bFzwLSNgd2I0fbKIsK4wdmnL",
+ "ebxo0ZB9KtdRwYWEe6alQPe4IS31o7J2XR6uA49IpaC/zp03oIXbAdzvgviGEfaRO8y/9GwX/hV/cWS6",
+ "IwO1CPFv4von5pOxv1ZpBzdvbNf/NmR2saaFAQtfB6cVy7Ntm9uy1zY5J9Ai+ZuzbH+WrBe/WV7YP24u",
+ "AcBNNKbuJiBiImttTR5MFVhidzDCum4RkysmiUwryfQagwu9is5+iz7a+AG4K3Dh6gXVIRouQsCWqnMO",
+ "g0Xduqku9oOwFT8Kc29AHVpj9rRXK1qUObhz8e2D2V/g6TfPsoOnj/8y++bg+UEKz56/ODigL57Rxy+e",
+ "PoYn3zx/dgCP51+/mD3Jnjx7Mnv25NnXz1+kT589nj37+sVfHvjSXhbQpmzW3zE1THL09iQ5M8A2OKEl",
+ "+yusbTIIQ8Y+zQRN8SRCQVk+OvQ//S9/wiapKIJqxO7XkfMejZZal+pwOr26upqEXaYLzDacaFGly6mf",
+ "p5+s7u1Jbdm2EUm4o9ZoaUgBN9WRwhF+++XV6Rk5ensyaQhmdDg6mBxMHmM2pxI4LdnocPQUf8LTs8R9",
+ "nzpiGx1+uB6PpkuguV66PwrQkqX+k7qiiwXIicu3YX66fDL1hrHpBxeFc21GXcTCLn0Oztow209DMbaW",
+ "HnPZq3NuBi8dlXsAOSYzG2BIXNpXnqHp1AaPGdZWI+skC2qfB0W2xq3S7e++oGqksYSQsXwesfry9ROc",
+ "4fqCQQlmX3b5+TfXEd3sfadm3JODg49QJ27cGsXj5ZYF557dI4jtK+OdAe0O1+MKP9Hc0A3UNYRHuKDH",
+ "X+yCTjg+djNsi1i2fD0ePf+Cd+iEm4NDc4Itgxi3Piv8lV9wccV9SyOSq6Kgco0CN8iyEapW14Mstx1d",
+ "6p4rD/NhCFKTBhkOWh6B2drT2Ziouk5GKZkwigNW3M4glUBRzAuJjrQmyal7xw22MMhPR39Hs/tPR3+3",
+ "2YOj1YiD6W0m7TYT/wF0JAnvd+umouZGjv652OT4D1vA+cuReXcVNftUzl9sKucdmPZ+d/eJur/YRN1f",
+ "tkq6ql8GUMIFTzhmfLkEEpi19jrqH1pHfX7w9ItdzSnIS5YCOYOiFJJKlq/Jr7wOpbqbCl7znIoHwW0b",
+ "+U+vLE2jRQfqe5B9bvqhVZMr2248aXtgWsVJaLymeZCYy4XRjps3+JRnNgTGO7nV2L9FR2udTfpg92Pc",
+ "e6k+iSnpgavlu/XJ8S56+ZBXKaabt/C1UUXvCa2ParG4db35jykBenB8RzPiY20/Mm/ejZk+O3j26SAI",
+ "d+GN0OR7jM77yCz9o9oJ4mQVMBvM8Dj94F/T7sBg3Ev1NmtxdfA2MhVzQsfu+YzLpV9X5TL8xDJCmyyg",
+ "zzXMDLvyi/5j+hinaB4Q/1F4hM1wGaHLLnr3fGHPF+7EF7oE1XAEW7B6+gEjk0N20DuSWM3lT+QoCVKL",
+ "SlH43FaCzEGnS1uioOvLjrAVH9E9zFM2vXu+M3/peNdxi/rvvnAtzl+L73F3rL6HHX+07tPr8SgFGSG+",
+ "n33Ym/nM5pi7s47W98/78Y1bXZC9fuzmngQzRQyBauFjf4jZxRtB+bKZvO9bR7Tczpq0R/BdENxjaq9c",
+ "nJc9Xm4RX7rhI5CWJCFvUB3CA+6D1f+MZo+PKZE/9oLeCA4EVkxhymFLi3t3Y60u1LXr6oI2YVmSAdWh",
+ "7XT8oFcsu57W1e2GlIq3rgjbRqWikdSsyUHZNq/QsgQq1a2F9HZ32FlnxpPjMEeuqEOdCG1q3EVAMXi5",
+ "oSfx33dxI/55vXX7Qoz7Qoy3K8T4Sa/MTUCOZVXeTyQ7XOOz3qf1Z7lPvxE8QWkLXHvNr4WWz3e3xnc8",
+ "rWIV/nU3F7YEpJCoJIR8QE12Eq8w6EpoMRUM6RwmYydsU6rTZVVOP+B/MBj0ugm7tKkMptbMtkne2pKX",
+ "o3sNoNiXKf0CypR+fhPendTRzmollHUQGnrrkf6b0+LLA/Rz5rcjk11ztax0Jq6COOamDMvgSbIt7vUk",
+ "vREZ2HHbsfz91DzUlsVXHojOAap5RDwro8dm087mC2CKzACN+LRaLLVNyxbN+Vh3TGhqCT+x14H4hE3Q",
+ "hG3lakBifdVcAs3WZAbAiZiZRTf7iovsFJJxnDCefaeBq5QiBaUgS8J8LJtAq6PK0R6oN+AJAUeA61mI",
+ "EmRO5S2BtSxhM6DdRGQ1uLXVx536PtS7Tb9pA7uTh9tIJTS1UbXAqJocXJ28CAp3xAmqquwj75+f5Lbb",
+ "V5WY8iNSMNl+PWMFPnPjlAsFqeCZig6G1T62HVssdBusRYHNculPyqcsqGvLkwy9CDMjxytF2zXUZYnq",
+ "ZDxW04IsmucQVhvmegOrei4xj5WitjlYt408hKVg/Do9j64tElQHFgkzXGRxVyzP0Tcb1ztaQDSI2ATI",
+ "qW8VYDe89g8AwlSD6LpcVJtygvyoSouyNOdPJxWv+w2h6dS2PtK/Nm37xOUCwZGvZwJUqGY7yK8sZm3m",
+ "rSVVxMFBCnrhNPSFi8fuw2wOY6IYT10BnaGycqyAU9MqPAJbDmlXyQuPf6cCc+twdOg3SnSDRLBlF4YW",
+ "HFMr/xBK4E1veV37wUc0e7bV6kC9atRK+/f0ijKdzIW0EjPB3M4RD2p79v+kTLuM4u4OrIUzW7rs0Jah",
+ "uHGCvHMqDGZ1Jf/cOTK734+fMFN9L+RODtvGtqoFMQsjFdfMP7fD0rBex/zjeT/32vNee95rz3vtea89",
+ "77Xnvfa8154/tvb8eSIwSZJ4Pu2f18Qe15DRF6nhf0HvVz7lg5NG6a9VfrwkGBXdnOONkRkaaD512V7R",
+ "hS7UYIh3mDk2NdMxTsqcYtmYlfYPjbt5unwORJsDyfAa0+DpE3L649Hzx09+e/L8a8N9bLHiVtuHvpaD",
+ "0uscHrkItjrBiQ9lA04x2SJGslF/+0l9lIPV5ucsB6IMsl5h82O4hNyo8tbXScxlpH89OgOav3TIsVwJ",
+ "lP5OZOsO4Zj1TxEVbZJpHOaMUxnJX9onlB6StcAcxi4hb+8GdX2vMRPxOIH+hm3bq4HSHVHy3kQvW+MC",
+ "XOp5N/YuPjKzpx6dxOU+/awsmyBEjswa9vSHiaTv1vlzBwfbGq3Cnb8vNerdIz568PDYjg1NZlUKWDLa",
+ "UdwqMY0WwBPHFpKZyNa+xp9LpdzisjbH7TCTfbWCtDJnCSFxx+CheuSq82Ou7tDUE60xENTjAByvqSj7",
+ "qRmnTde6kW/enjraxR/uHDPZHa7PNYKgi4dCkoUUVfnIVpPja7wSFyXla28GM7oiVo8wHWyc9/1y6jpz",
+ "do/P7l78ILyv4KP97u8WLeSKKl/5ILOlD+JZDLsJ+rdjvEk/vS3rnV1vNFX+QGL8/ib6XXaBjrXprwSZ",
+ "6BWPJKzupKfeP676HyES3kpxyczFOcph+1FYDUOYbJUMMmBZKBo6qTa8bGjz01/oVZi4Y1eeukqc4nln",
+ "rXQJtmqz19IieUmMvJSCZilV+H7E1RT5yBqrXp1E7A4IJuaX6kf6GgE+2apY4rg76ZPtSG83ISaAUTaR",
+ "5ufVLpto0yP3XKeFjb0p4M9iCvjOHz5FKJH0qns4gzo/O7ApeqVXPMqlpk218WjEW3Ag6vLE9+i76w3f",
+ "duEFdYCtCwLyklCS5gwdFIIrLatUn3OKJtCw/nLfvecNu8Oq1EvfJG6FjxjJ3VDnnGJtyNowGlWp5hCr",
+ "egPgNTZVLRagdIcTzwHOuWvFeFOHsmCpFImN+zTi2nD0iW1Z0DWZ0xxt+L+DFGRmbhFhzhI0KCrN8tz5",
+ "E800RMzPOdUkB8P0f2JGoTPDeZtT7SN39aU8FuIPK1xG2YGSsj/Yr/howS3f243QvGU/+2jo8efJ+xyt",
+ "FO8gPzl2+cROjjFFTONJ7MH+ydxLBeNJlMiMxHce+S5tkYeuPDES0KPGJ+l2/ZwbZVoLgoye6tuRQ9cN",
+ "0DuL9nR0qKa1ER1vgV/r+9hb1oVIzJUR62qMFkwvqxlmXvZvXKcLUb93nWYUCsHxWzalJZuqEtLp5eMt",
+ "+sEd+BWJsKu95P7zGPG79evrjTdKbG/vB+TyPaRv/WPnbN0aorTPkLrPkLrPobnPkLrf3X2G1H3+0H3+",
+ "0P+p+UMnGzVEl3Nja0a/1ktjLKtLiYTUzlwz8LBZK/df3y3J9ISQs6Xh/9TIALgESXOSUmUVI24j5Qq2",
+ "WGqiqjQFyA7PedKCxBadNxM/bP5rr7nn1cHBUyAHj7p9rN0i4Lz9vqiq4idbsfFbcj46H/VGklCIS3CZ",
+ "wLB5VqGv2PbaOuy/1OP+LHtbV9C1Na4saVmCEWuqms9ZyizKc2EuAwvRie/jAr+ANMDZRBOEaZt0FfGJ",
+ "cZEuOoe61+Yxpbsv329Q+OaoQy77pCYfQ8E+Bk1ZrurXCZH7FN5supR1RVVzdGuu4tMZgPK/OYe1myVn",
+ "FxDG4GL0wRWVmW8RrdjbpNn1Fan7pqV2/tEMVl4l6AI9r2dm2mYMNRfOXinAvmXLZvFMc2HurIkt8LQt",
+ "sh0rRpl+DxRaTe1BQ30V4ZqDdLH3aM3KhYJEiyZT8zAcm1DhUi7eBglqMEmNBc7uloqVNsQPhiWiVZii",
+ "URiR2lmgYSrUQCfxGZKN/R+ecxOyX9rvrtpWbRXs2OAj43p6HQwzrkn0CoULcr0uEkOqnxOXIWHAEG2r",
+ "LttAjlvXXu5071VnzLPz8/fktc2UjaVFL2A9tUXt0iXlC1A1jsLzYp8O2fCeIL68g8Z7rfccL26ZL+wC",
+ "Fh8dzsFyzeORka3JQB35k35EfJcqLlh6ARkx3BQZgAvUj1x1yMM6KfGcoZxZ+1cuVlg/mhByxAkUpV4T",
+ "y/87FvnO5PyB3jT/KlQv2nI7ElyZArsEeccT74fZfM4VGHZwx6nsIJsn0is+cNjpVeTiv2uWysg9v3Pr",
+ "DojKQnEf5pO97N7L7r3s3svuvezey+697P6ssrtn0NubvD6FyeuzG73+RPnD96nC/2ALCgOBW7VA7uAJ",
+ "qCuex+4Kzsbvy/wffggr9KOFtq7N/+799XvzTV56421TcP5wOkWdZymUno6uxx86xejDj4aV0oUdwRlH",
+ "S8kuMdP/++v/HwAA//96ww9J5PoAAA==",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/types.go b/daemon/algod/api/server/v2/generated/types.go
index f26919b69..08c8d0f2b 100644
--- a/daemon/algod/api/server/v2/generated/types.go
+++ b/daemon/algod/api/server/v2/generated/types.go
@@ -307,6 +307,7 @@ type DryrunTxnResult struct {
LocalDeltas *[]AccountStateDelta `json:"local-deltas,omitempty"`
LogicSigMessages *[]string `json:"logic-sig-messages,omitempty"`
LogicSigTrace *[]DryrunState `json:"logic-sig-trace,omitempty"`
+ Logs *[]LogItem `json:"logs,omitempty"`
}
// ErrorResponse defines model for ErrorResponse.
@@ -336,6 +337,16 @@ type EvalDeltaKeyValue struct {
Value EvalDelta `json:"value"`
}
+// LogItem defines model for LogItem.
+type LogItem struct {
+
+ // unique application identifier
+ Id uint64 `json:"id"`
+
+ // base64 encoded log message
+ Value string `json:"value"`
+}
+
// StateDelta defines model for StateDelta.
type StateDelta []EvalDeltaKeyValue
@@ -565,6 +576,9 @@ type PendingTransactionResponse struct {
// \[ld\] Local state key/value changes for the application being executed by this transaction.
LocalStateDelta *[]AccountStateDelta `json:"local-state-delta,omitempty"`
+ // \[lg\] Logs for the application being executed by this transaction.
+ Logs *[]LogItem `json:"logs,omitempty"`
+
// Indicates that the transaction was kicked out of this node's transaction pool (and specifies why that happened). An empty string indicates the transaction wasn't kicked out of this node's txpool due to an error.
PoolError string `json:"pool-error"`
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index 070baff87..7a6042544 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -513,6 +513,7 @@ func (v2 *Handlers) PendingTransactionInformation(ctx echo.Context, txid string,
ReceiverRewards *uint64 `codec:"receiver-rewards,omitempty"`
SenderRewards *uint64 `codec:"sender-rewards,omitempty"`
Txn transactions.SignedTxn `codec:"txn"`
+ Logs *[]generated.LogItem `codec:"logs,omitempty"`
}{
Txn: txn.Txn,
}
@@ -531,13 +532,15 @@ func (v2 *Handlers) PendingTransactionInformation(ctx echo.Context, txid string,
response.SenderRewards = &txn.ApplyData.SenderRewards.Raw
response.ReceiverRewards = &txn.ApplyData.ReceiverRewards.Raw
response.CloseRewards = &txn.ApplyData.CloseRewards.Raw
-
response.AssetIndex = computeAssetIndexFromTxn(txn, v2.Node.Ledger())
response.ApplicationIndex = computeAppIndexFromTxn(txn, v2.Node.Ledger())
-
response.LocalStateDelta, response.GlobalStateDelta = convertToDeltas(txn)
- }
+ response.Logs, err = convertToLogItems(txn, response.ApplicationIndex)
+ if err != nil {
+ return internalError(ctx, err, err.Error(), v2.Log)
+ }
+ }
data, err := encode(handle, response)
if err != nil {
return internalError(ctx, err, errFailedToEncodeResponse, v2.Log)
diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go
index 28d35c857..b248e40d9 100644
--- a/daemon/algod/api/server/v2/test/helpers.go
+++ b/daemon/algod/api/server/v2/test/helpers.go
@@ -266,7 +266,7 @@ func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*d
ad.AppLocalStates = map[basics.AppIndex]basics.AppLocalState{1: {}}
genesis[addr] = ad
- bootstrap := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ bootstrap := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
// generate test transactions
const inMem = true
diff --git a/daemon/algod/api/server/v2/utils.go b/daemon/algod/api/server/v2/utils.go
index e4b2dd24c..a3bfa1251 100644
--- a/daemon/algod/api/server/v2/utils.go
+++ b/daemon/algod/api/server/v2/utils.go
@@ -266,6 +266,35 @@ func convertToDeltas(txn node.TxnWithStatus) (*[]generated.AccountStateDelta, *g
return localStateDelta, stateDeltaToStateDelta(txn.ApplyData.EvalDelta.GlobalDelta)
}
+func convertToLogItems(txn node.TxnWithStatus, aidx *uint64) (*[]generated.LogItem, error) {
+ var logItems *[]generated.LogItem
+ if len(txn.ApplyData.EvalDelta.Logs) > 0 {
+ l := make([]generated.LogItem, 0, len(txn.ApplyData.EvalDelta.Logs))
+
+ for _, v := range txn.ApplyData.EvalDelta.Logs {
+ // Resolve appid from index
+ var appid uint64
+ if v.ID != 0 {
+ return nil, fmt.Errorf("logging for a foreign app is not supported")
+ } else if txn.Txn.Txn.ApplicationID == 0 {
+ if aidx == nil {
+ return nil, fmt.Errorf("app index cannot be nil")
+ }
+ appid = *aidx
+ } else {
+ appid = uint64(txn.Txn.Txn.ApplicationID)
+ }
+ l = append(l, generated.LogItem{
+ Id: appid,
+ Value: base64.StdEncoding.EncodeToString([]byte(v.Message)),
+ })
+ }
+
+ logItems = &l
+ }
+ return logItems, nil
+}
+
// printableUTF8OrEmpty checks to see if the entire string is a UTF8 printable string.
// If this is the case, the string is returned as is. Otherwise, the empty string is returned.
func printableUTF8OrEmpty(in string) string {
diff --git a/data/basics/msgp_gen.go b/data/basics/msgp_gen.go
index 463f0bf14..6127920e8 100644
--- a/data/basics/msgp_gen.go
+++ b/data/basics/msgp_gen.go
@@ -115,6 +115,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// LogItem
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// Round
// |-----> MarshalMsg
// |-----> CanMarshalMsg
@@ -4129,20 +4137,24 @@ func (z DeltaAction) MsgIsZero() bool {
func (z *EvalDelta) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0007Len := uint32(2)
- var zb0007Mask uint8 /* 3 bits */
+ zb0008Len := uint32(3)
+ var zb0008Mask uint8 /* 4 bits */
if len((*z).GlobalDelta) == 0 {
- zb0007Len--
- zb0007Mask |= 0x2
+ zb0008Len--
+ zb0008Mask |= 0x2
}
if len((*z).LocalDeltas) == 0 {
- zb0007Len--
- zb0007Mask |= 0x4
+ zb0008Len--
+ zb0008Mask |= 0x4
+ }
+ if len((*z).Logs) == 0 {
+ zb0008Len--
+ zb0008Mask |= 0x8
}
- // variable map header, size zb0007Len
- o = append(o, 0x80|uint8(zb0007Len))
- if zb0007Len != 0 {
- if (zb0007Mask & 0x2) == 0 { // if not empty
+ // variable map header, size zb0008Len
+ o = append(o, 0x80|uint8(zb0008Len))
+ if zb0008Len != 0 {
+ if (zb0008Mask & 0x2) == 0 { // if not empty
// string "gd"
o = append(o, 0xa2, 0x67, 0x64)
if (*z).GlobalDelta == nil {
@@ -4162,7 +4174,7 @@ func (z *EvalDelta) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
- if (zb0007Mask & 0x4) == 0 { // if not empty
+ if (zb0008Mask & 0x4) == 0 { // if not empty
// string "ld"
o = append(o, 0xa2, 0x6c, 0x64)
if (*z).LocalDeltas == nil {
@@ -4197,6 +4209,40 @@ func (z *EvalDelta) MarshalMsg(b []byte) (o []byte) {
}
}
}
+ if (zb0008Mask & 0x8) == 0 { // if not empty
+ // string "lg"
+ o = append(o, 0xa2, 0x6c, 0x67)
+ if (*z).Logs == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Logs)))
+ }
+ for zb0007 := range (*z).Logs {
+ // omitempty: check for empty values
+ zb0009Len := uint32(2)
+ var zb0009Mask uint8 /* 3 bits */
+ if (*z).Logs[zb0007].ID == 0 {
+ zb0009Len--
+ zb0009Mask |= 0x2
+ }
+ if (*z).Logs[zb0007].Message == "" {
+ zb0009Len--
+ zb0009Mask |= 0x4
+ }
+ // variable map header, size zb0009Len
+ o = append(o, 0x80|uint8(zb0009Len))
+ if (zb0009Mask & 0x2) == 0 { // if not empty
+ // string "i"
+ o = append(o, 0xa1, 0x69)
+ o = msgp.AppendUint64(o, (*z).Logs[zb0007].ID)
+ }
+ if (zb0009Mask & 0x4) == 0 { // if not empty
+ // string "m"
+ o = append(o, 0xa1, 0x6d)
+ o = msgp.AppendString(o, (*z).Logs[zb0007].Message)
+ }
+ }
+ }
}
return
}
@@ -4210,38 +4256,38 @@ func (_ *EvalDelta) CanMarshalMsg(z interface{}) bool {
func (z *EvalDelta) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0007 int
- var zb0008 bool
- zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0007 > 0 {
- zb0007--
- var zb0009 int
- var zb0010 bool
- zb0009, zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if zb0008 > 0 {
+ zb0008--
+ var zb0010 int
+ var zb0011 bool
+ zb0010, zb0011, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GlobalDelta")
return
}
- if zb0009 > config.MaxStateDeltaKeys {
- err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxStateDeltaKeys))
+ if zb0010 > config.MaxStateDeltaKeys {
+ err = msgp.ErrOverflow(uint64(zb0010), uint64(config.MaxStateDeltaKeys))
err = msgp.WrapError(err, "struct-from-array", "GlobalDelta")
return
}
- if zb0010 {
+ if zb0011 {
(*z).GlobalDelta = nil
} else if (*z).GlobalDelta == nil {
- (*z).GlobalDelta = make(StateDelta, zb0009)
+ (*z).GlobalDelta = make(StateDelta, zb0010)
}
- for zb0009 > 0 {
+ for zb0010 > 0 {
var zb0001 string
var zb0002 ValueDelta
- zb0009--
+ zb0010--
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GlobalDelta")
@@ -4255,55 +4301,55 @@ func (z *EvalDelta) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).GlobalDelta[zb0001] = zb0002
}
}
- if zb0007 > 0 {
- zb0007--
- var zb0011 int
- var zb0012 bool
- zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if zb0008 > 0 {
+ zb0008--
+ var zb0012 int
+ var zb0013 bool
+ zb0012, zb0013, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "LocalDeltas")
return
}
- if zb0011 > config.MaxEvalDeltaAccounts {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(config.MaxEvalDeltaAccounts))
+ if zb0012 > config.MaxEvalDeltaAccounts {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxEvalDeltaAccounts))
err = msgp.WrapError(err, "struct-from-array", "LocalDeltas")
return
}
- if zb0012 {
+ if zb0013 {
(*z).LocalDeltas = nil
} else if (*z).LocalDeltas == nil {
- (*z).LocalDeltas = make(map[uint64]StateDelta, zb0011)
+ (*z).LocalDeltas = make(map[uint64]StateDelta, zb0012)
}
- for zb0011 > 0 {
+ for zb0012 > 0 {
var zb0003 uint64
var zb0004 StateDelta
- zb0011--
+ zb0012--
zb0003, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "LocalDeltas")
return
}
- var zb0013 int
- var zb0014 bool
- zb0013, zb0014, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0014 int
+ var zb0015 bool
+ zb0014, zb0015, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "LocalDeltas", zb0003)
return
}
- if zb0013 > config.MaxStateDeltaKeys {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(config.MaxStateDeltaKeys))
+ if zb0014 > config.MaxStateDeltaKeys {
+ err = msgp.ErrOverflow(uint64(zb0014), uint64(config.MaxStateDeltaKeys))
err = msgp.WrapError(err, "struct-from-array", "LocalDeltas", zb0003)
return
}
- if zb0014 {
+ if zb0015 {
zb0004 = nil
} else if zb0004 == nil {
- zb0004 = make(StateDelta, zb0013)
+ zb0004 = make(StateDelta, zb0014)
}
- for zb0013 > 0 {
+ for zb0014 > 0 {
var zb0005 string
var zb0006 ValueDelta
- zb0013--
+ zb0014--
zb0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "LocalDeltas", zb0003)
@@ -4319,8 +4365,101 @@ func (z *EvalDelta) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).LocalDeltas[zb0003] = zb0004
}
}
- if zb0007 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0007)
+ if zb0008 > 0 {
+ zb0008--
+ var zb0016 int
+ var zb0017 bool
+ zb0016, zb0017, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Logs")
+ return
+ }
+ if zb0016 > config.MaxLogCalls {
+ err = msgp.ErrOverflow(uint64(zb0016), uint64(config.MaxLogCalls))
+ err = msgp.WrapError(err, "struct-from-array", "Logs")
+ return
+ }
+ if zb0017 {
+ (*z).Logs = nil
+ } else if (*z).Logs != nil && cap((*z).Logs) >= zb0016 {
+ (*z).Logs = ((*z).Logs)[:zb0016]
+ } else {
+ (*z).Logs = make([]LogItem, zb0016)
+ }
+ for zb0007 := range (*z).Logs {
+ var zb0018 int
+ var zb0019 bool
+ zb0018, zb0019, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0018, zb0019, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Logs", zb0007)
+ return
+ }
+ if zb0018 > 0 {
+ zb0018--
+ (*z).Logs[zb0007].ID, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Logs", zb0007, "struct-from-array", "ID")
+ return
+ }
+ }
+ if zb0018 > 0 {
+ zb0018--
+ (*z).Logs[zb0007].Message, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Logs", zb0007, "struct-from-array", "Message")
+ return
+ }
+ }
+ if zb0018 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0018)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Logs", zb0007, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Logs", zb0007)
+ return
+ }
+ if zb0019 {
+ (*z).Logs[zb0007] = LogItem{}
+ }
+ for zb0018 > 0 {
+ zb0018--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Logs", zb0007)
+ return
+ }
+ switch string(field) {
+ case "i":
+ (*z).Logs[zb0007].ID, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Logs", zb0007, "ID")
+ return
+ }
+ case "m":
+ (*z).Logs[zb0007].Message, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Logs", zb0007, "Message")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Logs", zb0007)
+ return
+ }
+ }
+ }
+ }
+ }
+ }
+ if zb0008 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0008)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -4331,11 +4470,11 @@ func (z *EvalDelta) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
- if zb0008 {
+ if zb0009 {
(*z) = EvalDelta{}
}
- for zb0007 > 0 {
- zb0007--
+ for zb0008 > 0 {
+ zb0008--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -4343,27 +4482,27 @@ func (z *EvalDelta) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
switch string(field) {
case "gd":
- var zb0015 int
- var zb0016 bool
- zb0015, zb0016, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0020 int
+ var zb0021 bool
+ zb0020, zb0021, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "GlobalDelta")
return
}
- if zb0015 > config.MaxStateDeltaKeys {
- err = msgp.ErrOverflow(uint64(zb0015), uint64(config.MaxStateDeltaKeys))
+ if zb0020 > config.MaxStateDeltaKeys {
+ err = msgp.ErrOverflow(uint64(zb0020), uint64(config.MaxStateDeltaKeys))
err = msgp.WrapError(err, "GlobalDelta")
return
}
- if zb0016 {
+ if zb0021 {
(*z).GlobalDelta = nil
} else if (*z).GlobalDelta == nil {
- (*z).GlobalDelta = make(StateDelta, zb0015)
+ (*z).GlobalDelta = make(StateDelta, zb0020)
}
- for zb0015 > 0 {
+ for zb0020 > 0 {
var zb0001 string
var zb0002 ValueDelta
- zb0015--
+ zb0020--
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "GlobalDelta")
@@ -4377,53 +4516,53 @@ func (z *EvalDelta) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).GlobalDelta[zb0001] = zb0002
}
case "ld":
- var zb0017 int
- var zb0018 bool
- zb0017, zb0018, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0022 int
+ var zb0023 bool
+ zb0022, zb0023, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LocalDeltas")
return
}
- if zb0017 > config.MaxEvalDeltaAccounts {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(config.MaxEvalDeltaAccounts))
+ if zb0022 > config.MaxEvalDeltaAccounts {
+ err = msgp.ErrOverflow(uint64(zb0022), uint64(config.MaxEvalDeltaAccounts))
err = msgp.WrapError(err, "LocalDeltas")
return
}
- if zb0018 {
+ if zb0023 {
(*z).LocalDeltas = nil
} else if (*z).LocalDeltas == nil {
- (*z).LocalDeltas = make(map[uint64]StateDelta, zb0017)
+ (*z).LocalDeltas = make(map[uint64]StateDelta, zb0022)
}
- for zb0017 > 0 {
+ for zb0022 > 0 {
var zb0003 uint64
var zb0004 StateDelta
- zb0017--
+ zb0022--
zb0003, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "LocalDeltas")
return
}
- var zb0019 int
- var zb0020 bool
- zb0019, zb0020, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0024 int
+ var zb0025 bool
+ zb0024, zb0025, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LocalDeltas", zb0003)
return
}
- if zb0019 > config.MaxStateDeltaKeys {
- err = msgp.ErrOverflow(uint64(zb0019), uint64(config.MaxStateDeltaKeys))
+ if zb0024 > config.MaxStateDeltaKeys {
+ err = msgp.ErrOverflow(uint64(zb0024), uint64(config.MaxStateDeltaKeys))
err = msgp.WrapError(err, "LocalDeltas", zb0003)
return
}
- if zb0020 {
+ if zb0025 {
zb0004 = nil
} else if zb0004 == nil {
- zb0004 = make(StateDelta, zb0019)
+ zb0004 = make(StateDelta, zb0024)
}
- for zb0019 > 0 {
+ for zb0024 > 0 {
var zb0005 string
var zb0006 ValueDelta
- zb0019--
+ zb0024--
zb0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LocalDeltas", zb0003)
@@ -4438,6 +4577,97 @@ func (z *EvalDelta) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
(*z).LocalDeltas[zb0003] = zb0004
}
+ case "lg":
+ var zb0026 int
+ var zb0027 bool
+ zb0026, zb0027, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Logs")
+ return
+ }
+ if zb0026 > config.MaxLogCalls {
+ err = msgp.ErrOverflow(uint64(zb0026), uint64(config.MaxLogCalls))
+ err = msgp.WrapError(err, "Logs")
+ return
+ }
+ if zb0027 {
+ (*z).Logs = nil
+ } else if (*z).Logs != nil && cap((*z).Logs) >= zb0026 {
+ (*z).Logs = ((*z).Logs)[:zb0026]
+ } else {
+ (*z).Logs = make([]LogItem, zb0026)
+ }
+ for zb0007 := range (*z).Logs {
+ var zb0028 int
+ var zb0029 bool
+ zb0028, zb0029, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0028, zb0029, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Logs", zb0007)
+ return
+ }
+ if zb0028 > 0 {
+ zb0028--
+ (*z).Logs[zb0007].ID, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Logs", zb0007, "struct-from-array", "ID")
+ return
+ }
+ }
+ if zb0028 > 0 {
+ zb0028--
+ (*z).Logs[zb0007].Message, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Logs", zb0007, "struct-from-array", "Message")
+ return
+ }
+ }
+ if zb0028 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0028)
+ if err != nil {
+ err = msgp.WrapError(err, "Logs", zb0007, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "Logs", zb0007)
+ return
+ }
+ if zb0029 {
+ (*z).Logs[zb0007] = LogItem{}
+ }
+ for zb0028 > 0 {
+ zb0028--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Logs", zb0007)
+ return
+ }
+ switch string(field) {
+ case "i":
+ (*z).Logs[zb0007].ID, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Logs", zb0007, "ID")
+ return
+ }
+ case "m":
+ (*z).Logs[zb0007].Message, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Logs", zb0007, "Message")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "Logs", zb0007)
+ return
+ }
+ }
+ }
+ }
+ }
default:
err = msgp.ErrNoField(string(field))
if err != nil {
@@ -4481,12 +4711,145 @@ func (z *EvalDelta) Msgsize() (s int) {
}
}
}
+ s += 3 + msgp.ArrayHeaderSize
+ for zb0007 := range (*z).Logs {
+ s += 1 + 2 + msgp.Uint64Size + 2 + msgp.StringPrefixSize + len((*z).Logs[zb0007].Message)
+ }
return
}
// MsgIsZero returns whether this is a zero value
func (z *EvalDelta) MsgIsZero() bool {
- return (len((*z).GlobalDelta) == 0) && (len((*z).LocalDeltas) == 0)
+ return (len((*z).GlobalDelta) == 0) && (len((*z).LocalDeltas) == 0) && (len((*z).Logs) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *LogItem) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(2)
+ var zb0001Mask uint8 /* 3 bits */
+ if (*z).ID == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).Message == "" {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "i"
+ o = append(o, 0xa1, 0x69)
+ o = msgp.AppendUint64(o, (*z).ID)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "m"
+ o = append(o, 0xa1, 0x6d)
+ o = msgp.AppendString(o, (*z).Message)
+ }
+ }
+ return
+}
+
+func (_ *LogItem) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*LogItem)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *LogItem) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).ID, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ID")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Message, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Message")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = LogItem{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "i":
+ (*z).ID, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ID")
+ return
+ }
+ case "m":
+ (*z).Message, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Message")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *LogItem) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*LogItem)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *LogItem) Msgsize() (s int) {
+ s = 1 + 2 + msgp.Uint64Size + 2 + msgp.StringPrefixSize + len((*z).Message)
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *LogItem) MsgIsZero() bool {
+ return ((*z).ID == 0) && ((*z).Message == "")
}
// MarshalMsg implements msgp.Marshaler
diff --git a/data/basics/msgp_gen_test.go b/data/basics/msgp_gen_test.go
index 558e1c091..c0e43a4fd 100644
--- a/data/basics/msgp_gen_test.go
+++ b/data/basics/msgp_gen_test.go
@@ -432,6 +432,66 @@ func BenchmarkUnmarshalEvalDelta(b *testing.B) {
}
}
+func TestMarshalUnmarshalLogItem(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := LogItem{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingLogItem(t *testing.T) {
+ protocol.RunEncodingTest(t, &LogItem{})
+}
+
+func BenchmarkMarshalMsgLogItem(b *testing.B) {
+ v := LogItem{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgLogItem(b *testing.B) {
+ v := LogItem{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalLogItem(b *testing.B) {
+ v := LogItem{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalStateDelta(t *testing.T) {
partitiontest.PartitionTest(t)
v := StateDelta{}
diff --git a/data/basics/overflow.go b/data/basics/overflow.go
index 7e6ac9511..7a2a3df8b 100644
--- a/data/basics/overflow.go
+++ b/data/basics/overflow.go
@@ -33,6 +33,13 @@ func OAdd16(a uint16, b uint16) (res uint16, overflowed bool) {
return
}
+// OAdd32 adds 2 uint32 values with overflow detection
+func OAdd32(a uint32, b uint32) (res uint32, overflowed bool) {
+ res = a + b
+ overflowed = res < a
+ return
+}
+
// OAdd adds 2 values with overflow detection
func OAdd(a uint64, b uint64) (res uint64, overflowed bool) {
res = a + b
@@ -47,6 +54,13 @@ func OSub(a uint64, b uint64) (res uint64, overflowed bool) {
return
}
+// OSub32 subtracts b from a with overflow detection
+func OSub32(a uint32, b uint32) (res uint32, overflowed bool) {
+ res = a - b
+ overflowed = res > a
+ return
+}
+
// OMul multiplies 2 values with overflow detection
func OMul(a uint64, b uint64) (res uint64, overflowed bool) {
if b == 0 {
@@ -78,6 +92,15 @@ func AddSaturate(a uint64, b uint64) uint64 {
return res
}
+// AddSaturate32 adds 2 uint32 values with saturation on overflow
+func AddSaturate32(a uint32, b uint32) uint32 {
+ res, overflowed := OAdd32(a, b)
+ if overflowed {
+ return math.MaxUint32
+ }
+ return res
+}
+
// SubSaturate subtracts 2 values with saturation on underflow
func SubSaturate(a uint64, b uint64) uint64 {
res, overflowed := OSub(a, b)
@@ -87,6 +110,15 @@ func SubSaturate(a uint64, b uint64) uint64 {
return res
}
+// SubSaturate32 subtracts 2 uint32 values with saturation on underflow
+func SubSaturate32(a uint32, b uint32) uint32 {
+ res, overflowed := OSub32(a, b)
+ if overflowed {
+ return 0
+ }
+ return res
+}
+
// Add16 adds 2 uint16 values with overflow detection
func (t *OverflowTracker) Add16(a uint16, b uint16) uint16 {
res, overflowed := OAdd16(a, b)
diff --git a/data/basics/teal.go b/data/basics/teal.go
index e95aaf2db..7dab26b83 100644
--- a/data/basics/teal.go
+++ b/data/basics/teal.go
@@ -123,6 +123,22 @@ func (sd StateDelta) Valid(proto *config.ConsensusParams) error {
return nil
}
+// LogItem is contains logs for an application
+// ID is the offset into Txn.ForeignApps
+type LogItem struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ ID uint64 `codec:"i"`
+ Message string `codec:"m"`
+}
+
+// Equal checks whether two LogItems are equal.
+func (l LogItem) Equal(o LogItem) bool {
+
+ return l.ID == o.ID && l.Message == o.Message
+
+}
+
// EvalDelta stores StateDeltas for an application's global key/value store, as
// well as StateDeltas for some number of accounts holding local state for that
// application
@@ -134,6 +150,8 @@ type EvalDelta struct {
// When decoding EvalDeltas, the integer key represents an offset into
// [txn.Sender, txn.Accounts[0], txn.Accounts[1], ...]
LocalDeltas map[uint64]StateDelta `codec:"ld,allocbound=config.MaxEvalDeltaAccounts"`
+
+ Logs []LogItem `codec:"lg,allocbound=config.MaxLogCalls"`
}
// Equal compares two EvalDeltas and returns whether or not they are
@@ -165,6 +183,17 @@ func (ed EvalDelta) Equal(o EvalDelta) bool {
return false
}
+ // Logs must be equal
+ if len(ed.Logs) != len(o.Logs) {
+ return false
+ }
+ for i, l := range ed.Logs {
+ ok := l.Equal(o.Logs[i])
+ if !ok {
+ return false
+ }
+ }
+
return true
}
diff --git a/data/basics/teal_test.go b/data/basics/teal_test.go
index 35b4331e8..526573aaf 100644
--- a/data/basics/teal_test.go
+++ b/data/basics/teal_test.go
@@ -156,12 +156,14 @@ func TestEvalDeltaEqual(t *testing.T) {
d2 = EvalDelta{
GlobalDelta: nil,
LocalDeltas: nil,
+ Logs: nil,
}
a.True(d1.Equal(d2))
d2 = EvalDelta{
GlobalDelta: StateDelta{},
LocalDeltas: map[uint64]StateDelta{},
+ Logs: []LogItem{},
}
a.True(d1.Equal(d2))
@@ -221,4 +223,34 @@ func TestEvalDeltaEqual(t *testing.T) {
},
}
a.False(d1.Equal(d2))
+
+ d2 = EvalDelta{
+ Logs: []LogItem{{ID: 0, Message: "val"}},
+ }
+ a.False(d1.Equal(d2))
+
+ d1 = EvalDelta{
+ Logs: []LogItem{{ID: 0, Message: "val2"}},
+ }
+ a.False(d1.Equal(d2))
+
+ d1 = EvalDelta{
+ Logs: []LogItem{{ID: 1, Message: "val"}},
+ }
+ a.False(d1.Equal(d2))
+
+ d1 = EvalDelta{
+ Logs: []LogItem{{ID: 1, Message: "val2"}},
+ }
+ a.False(d1.Equal(d2))
+
+ d1 = EvalDelta{
+ Logs: []LogItem{{ID: 0, Message: "val"}, {ID: 0, Message: "val2"}},
+ }
+ a.False(d1.Equal(d2))
+
+ d1 = EvalDelta{
+ Logs: []LogItem{{ID: 0, Message: "val"}},
+ }
+ a.True(d1.Equal(d2))
}
diff --git a/data/basics/units_test.go b/data/basics/units_test.go
index 6ef8ba43e..07455d13b 100644
--- a/data/basics/units_test.go
+++ b/data/basics/units_test.go
@@ -17,6 +17,7 @@
package basics
import (
+ "math"
"testing"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -33,6 +34,27 @@ func TestSubSaturate(t *testing.T) {
require.Equal(t, b.SubSaturate(a), Round(1))
}
+func TestSubSaturate32(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ require.Equal(t, uint32(0), SubSaturate32(0, 1))
+ require.Equal(t, uint32(0), SubSaturate32(1, 2))
+ require.Equal(t, uint32(0), SubSaturate32(1, 1))
+ require.Equal(t, uint32(0), SubSaturate32(1, math.MaxUint32))
+ require.Equal(t, uint32(1), SubSaturate32(2, 1))
+ require.Equal(t, uint32(math.MaxUint32-1), SubSaturate32(math.MaxUint32, 1))
+}
+
+func TestAddSaturate32(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ require.Equal(t, uint32(1), AddSaturate32(0, 1))
+ require.Equal(t, uint32(math.MaxUint32-1), AddSaturate32(math.MaxUint32-2, 1))
+ require.Equal(t, uint32(math.MaxUint32), AddSaturate32(math.MaxUint32, 0))
+ require.Equal(t, uint32(math.MaxUint32), AddSaturate32(math.MaxUint32-1, 1))
+ require.Equal(t, uint32(math.MaxUint32), AddSaturate32(math.MaxUint32, 2))
+}
+
func TestRoundUpToMultipleOf(t *testing.T) {
partitiontest.PartitionTest(t)
diff --git a/data/bookkeeping/block.go b/data/bookkeeping/block.go
index eaf788534..3998ad854 100644
--- a/data/bookkeeping/block.go
+++ b/data/bookkeeping/block.go
@@ -672,7 +672,11 @@ func (bh BlockHeader) DecodeSignedTxn(stb transactions.SignedTxnInBlock) (transa
st := stb.SignedTxn
ad := stb.ApplyData
- proto := config.Consensus[bh.CurrentProtocol]
+ proto, ok := config.Consensus[bh.CurrentProtocol]
+ if !ok {
+ return transactions.SignedTxn{}, transactions.ApplyData{},
+ fmt.Errorf("consensus protocol %s not found", bh.CurrentProtocol)
+ }
if !proto.SupportSignedTxnInBlock {
return st, transactions.ApplyData{}, nil
}
@@ -708,7 +712,11 @@ func (bh BlockHeader) DecodeSignedTxn(stb transactions.SignedTxnInBlock) (transa
func (bh BlockHeader) EncodeSignedTxn(st transactions.SignedTxn, ad transactions.ApplyData) (transactions.SignedTxnInBlock, error) {
var stb transactions.SignedTxnInBlock
- proto := config.Consensus[bh.CurrentProtocol]
+ proto, ok := config.Consensus[bh.CurrentProtocol]
+ if !ok {
+ return transactions.SignedTxnInBlock{},
+ fmt.Errorf("consensus protocol %s not found", bh.CurrentProtocol)
+ }
if !proto.SupportSignedTxnInBlock {
stb.SignedTxn = st
return stb, nil
diff --git a/data/bookkeeping/block_test.go b/data/bookkeeping/block_test.go
index 671905594..14d75e437 100644
--- a/data/bookkeeping/block_test.go
+++ b/data/bookkeeping/block_test.go
@@ -348,6 +348,7 @@ func TestEncodeDecodeSignedTxn(t *testing.T) {
var b Block
b.BlockHeader.GenesisID = "foo"
crypto.RandBytes(b.BlockHeader.GenesisHash[:])
+ b.CurrentProtocol = protocol.ConsensusFuture
var tx transactions.SignedTxn
tx.Txn.GenesisID = b.BlockHeader.GenesisID
diff --git a/data/bookkeeping/genesis.go b/data/bookkeeping/genesis.go
index f8ab7ff97..026eceb01 100644
--- a/data/bookkeeping/genesis.go
+++ b/data/bookkeeping/genesis.go
@@ -17,9 +17,15 @@
package bookkeeping
import (
+ "fmt"
"io/ioutil"
+ "time"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/committee"
+ "github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/protocol"
)
@@ -115,3 +121,70 @@ type GenesisAllocation struct {
func (genesis Genesis) ToBeHashed() (protocol.HashID, []byte) {
return protocol.Genesis, protocol.Encode(&genesis)
}
+
+// GenesisBalances contains the information needed to generate a new ledger
+type GenesisBalances struct {
+ Balances map[basics.Address]basics.AccountData
+ FeeSink basics.Address
+ RewardsPool basics.Address
+ Timestamp int64
+}
+
+// MakeGenesisBalances returns the information needed to bootstrap the ledger based on the current time
+func MakeGenesisBalances(balances map[basics.Address]basics.AccountData, feeSink, rewardsPool basics.Address) GenesisBalances {
+ return MakeTimestampedGenesisBalances(balances, feeSink, rewardsPool, time.Now().Unix())
+}
+
+// MakeTimestampedGenesisBalances returns the information needed to bootstrap the ledger based on a given time
+func MakeTimestampedGenesisBalances(balances map[basics.Address]basics.AccountData, feeSink, rewardsPool basics.Address, timestamp int64) GenesisBalances {
+ return GenesisBalances{Balances: balances, FeeSink: feeSink, RewardsPool: rewardsPool, Timestamp: timestamp}
+}
+
+// MakeGenesisBlock creates a genesis block, including setup of RewardsState.
+func MakeGenesisBlock(proto protocol.ConsensusVersion, genesisBal GenesisBalances, genesisID string, genesisHash crypto.Digest) (Block, error) {
+ params, ok := config.Consensus[proto]
+ if !ok {
+ return Block{}, fmt.Errorf("unsupported protocol %s", proto)
+ }
+
+ poolAddr := basics.Address(genesisBal.RewardsPool)
+ incentivePoolBalanceAtGenesis := genesisBal.Balances[poolAddr].MicroAlgos
+
+ genesisRewardsState := RewardsState{
+ FeeSink: genesisBal.FeeSink,
+ RewardsPool: genesisBal.RewardsPool,
+ RewardsLevel: 0,
+ RewardsResidue: 0,
+ RewardsRecalculationRound: basics.Round(params.RewardsRateRefreshInterval),
+ }
+
+ if params.InitialRewardsRateCalculation {
+ genesisRewardsState.RewardsRate = basics.SubSaturate(incentivePoolBalanceAtGenesis.Raw, params.MinBalance) / uint64(params.RewardsRateRefreshInterval)
+ } else {
+ genesisRewardsState.RewardsRate = incentivePoolBalanceAtGenesis.Raw / uint64(params.RewardsRateRefreshInterval)
+ }
+
+ genesisProtoState := UpgradeState{
+ CurrentProtocol: proto,
+ }
+
+ blk := Block{
+ BlockHeader: BlockHeader{
+ Round: 0,
+ Branch: BlockHash{},
+ Seed: committee.Seed(genesisHash),
+ TxnRoot: transactions.Payset{}.CommitGenesis(),
+ TimeStamp: genesisBal.Timestamp,
+ GenesisID: genesisID,
+ RewardsState: genesisRewardsState,
+ UpgradeState: genesisProtoState,
+ UpgradeVote: UpgradeVote{},
+ },
+ }
+
+ if params.SupportGenesisHash {
+ blk.BlockHeader.GenesisHash = genesisHash
+ }
+
+ return blk, nil
+}
diff --git a/data/common_test.go b/data/common_test.go
index 5390c1b5a..b02103d4d 100644
--- a/data/common_test.go
+++ b/data/common_test.go
@@ -26,6 +26,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
@@ -113,7 +114,7 @@ func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*L
genesis[poolAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 100000 * uint64(proto.RewardsRateRefreshInterval)})
- bootstrap := MakeGenesisBalances(genesis, poolAddr, sinkAddr)
+ bootstrap := bookkeeping.MakeGenesisBalances(genesis, poolAddr, sinkAddr)
// generate test transactions
const inMem = true
@@ -129,7 +130,7 @@ func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*L
if latest != 0 {
panic(fmt.Errorf("newly created ledger doesn't start on round 0"))
}
- bal := bootstrap.balances
+ bal := bootstrap.Balances
for i := 0; i < TXs; i++ {
send := gen.Int() % P
diff --git a/data/datatest/fabricateLedger.go b/data/datatest/fabricateLedger.go
index 6aa6889c2..55d992823 100644
--- a/data/datatest/fabricateLedger.go
+++ b/data/datatest/fabricateLedger.go
@@ -25,6 +25,7 @@ import (
"github.com/algorand/go-algorand/data"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
@@ -33,7 +34,7 @@ import (
var roundDeadline = 0 * time.Second
// FabricateLedger is a test-only helper to create a new in-memory Ledger and run the protocol through the specified Round with the given accounts
-func FabricateLedger(log logging.Logger, ledgerName string, accounts []account.Participation, genesis data.GenesisBalances, lastRound basics.Round) (*data.Ledger, error) {
+func FabricateLedger(log logging.Logger, ledgerName string, accounts []account.Participation, genesis bookkeeping.GenesisBalances, lastRound basics.Round) (*data.Ledger, error) {
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
diff --git a/data/genesisBalances.go b/data/genesisBalances.go
deleted file mode 100644
index b57660b4e..000000000
--- a/data/genesisBalances.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package data
-
-import (
- "time"
-
- "github.com/algorand/go-algorand/data/basics"
-)
-
-// GenesisBalances contains the information needed to generate a new ledger
-type GenesisBalances struct {
- balances map[basics.Address]basics.AccountData
- feeSink basics.Address
- rewardsPool basics.Address
- timestamp int64
-}
-
-// MakeGenesisBalances returns the information needed to bootstrap the ledger based on the current time
-func MakeGenesisBalances(balances map[basics.Address]basics.AccountData, feeSink, rewardsPool basics.Address) GenesisBalances {
- return MakeTimestampedGenesisBalances(balances, feeSink, rewardsPool, time.Now().Unix())
-}
-
-// MakeTimestampedGenesisBalances returns the information needed to bootstrap the ledger based on a given time
-func MakeTimestampedGenesisBalances(balances map[basics.Address]basics.AccountData, feeSink, rewardsPool basics.Address, timestamp int64) GenesisBalances {
- return GenesisBalances{balances: balances, feeSink: feeSink, rewardsPool: rewardsPool, timestamp: timestamp}
-}
diff --git a/data/ledger.go b/data/ledger.go
index 02de26a2e..3cc526889 100644
--- a/data/ledger.go
+++ b/data/ledger.go
@@ -17,7 +17,6 @@
package data
import (
- "fmt"
"sync/atomic"
"time"
@@ -76,75 +75,27 @@ type roundSeed struct {
elements [2]roundSeedPair
}
-func makeGenesisBlock(proto protocol.ConsensusVersion, genesisBal GenesisBalances, genesisID string, genesisHash crypto.Digest) (bookkeeping.Block, error) {
- params, ok := config.Consensus[proto]
- if !ok {
- return bookkeeping.Block{}, fmt.Errorf("unsupported protocol %s", proto)
- }
-
- poolAddr := basics.Address(genesisBal.rewardsPool)
- incentivePoolBalanceAtGenesis := genesisBal.balances[poolAddr].MicroAlgos
-
- genesisRewardsState := bookkeeping.RewardsState{
- FeeSink: genesisBal.feeSink,
- RewardsPool: genesisBal.rewardsPool,
- RewardsLevel: 0,
- RewardsResidue: 0,
- RewardsRecalculationRound: basics.Round(params.RewardsRateRefreshInterval),
- }
-
- if params.InitialRewardsRateCalculation {
- genesisRewardsState.RewardsRate = basics.SubSaturate(incentivePoolBalanceAtGenesis.Raw, params.MinBalance) / uint64(params.RewardsRateRefreshInterval)
- } else {
- genesisRewardsState.RewardsRate = incentivePoolBalanceAtGenesis.Raw / uint64(params.RewardsRateRefreshInterval)
- }
-
- genesisProtoState := bookkeeping.UpgradeState{
- CurrentProtocol: proto,
- }
-
- blk := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: 0,
- Branch: bookkeeping.BlockHash{},
- Seed: committee.Seed(genesisHash),
- TxnRoot: transactions.Payset{}.CommitGenesis(),
- TimeStamp: genesisBal.timestamp,
- GenesisID: genesisID,
- RewardsState: genesisRewardsState,
- UpgradeState: genesisProtoState,
- UpgradeVote: bookkeeping.UpgradeVote{},
- },
- }
-
- if params.SupportGenesisHash {
- blk.BlockHeader.GenesisHash = genesisHash
- }
-
- return blk, nil
-}
-
// LoadLedger creates a Ledger object to represent the ledger with the
// specified database file prefix, initializing it if necessary.
func LoadLedger(
log logging.Logger, dbFilenamePrefix string, memory bool,
- genesisProto protocol.ConsensusVersion, genesisBal GenesisBalances, genesisID string, genesisHash crypto.Digest,
+ genesisProto protocol.ConsensusVersion, genesisBal bookkeeping.GenesisBalances, genesisID string, genesisHash crypto.Digest,
blockListeners []ledger.BlockListener, cfg config.Local,
) (*Ledger, error) {
- if genesisBal.balances == nil {
- genesisBal.balances = make(map[basics.Address]basics.AccountData)
+ if genesisBal.Balances == nil {
+ genesisBal.Balances = make(map[basics.Address]basics.AccountData)
}
- genBlock, err := makeGenesisBlock(genesisProto, genesisBal, genesisID, genesisHash)
+ genBlock, err := bookkeeping.MakeGenesisBlock(genesisProto, genesisBal, genesisID, genesisHash)
if err != nil {
return nil, err
}
params := config.Consensus[genesisProto]
if params.ForceNonParticipatingFeeSink {
- sinkAddr := genesisBal.feeSink
- sinkData := genesisBal.balances[sinkAddr]
+ sinkAddr := genesisBal.FeeSink
+ sinkData := genesisBal.Balances[sinkAddr]
sinkData.Status = basics.NotParticipating
- genesisBal.balances[sinkAddr] = sinkData
+ genesisBal.Balances[sinkAddr] = sinkData
}
l := &Ledger{
@@ -152,7 +103,7 @@ func LoadLedger(
}
genesisInitState := ledger.InitState{
Block: genBlock,
- Accounts: genesisBal.balances,
+ Accounts: genesisBal.Balances,
GenesisHash: genesisHash,
}
l.log.Debugf("Initializing Ledger(%s)", dbFilenamePrefix)
diff --git a/data/transactions/application.go b/data/transactions/application.go
index 90dcc81d7..4588ed22f 100644
--- a/data/transactions/application.go
+++ b/data/transactions/application.go
@@ -233,3 +233,45 @@ func (ac *ApplicationCallTxnFields) IndexByAddress(target basics.Address, sender
return 0, fmt.Errorf("invalid Account reference %s", target)
}
+
+// AppIDByIndex converts an integer index into an application id associated with the
+// transaction. Index 0 corresponds to the current app, and an index > 0
+// corresponds to an offset into txn.ForeignApps. Returns an error if the index is
+// not valid.
+func (ac *ApplicationCallTxnFields) AppIDByIndex(i uint64) (basics.AppIndex, error) {
+
+ // Index 0 always corresponds to the current app
+ if i == 0 {
+ return ac.ApplicationID, nil
+ }
+
+ // An index > 0 corresponds to an offset into txn.ForeignApps. Check to
+ // make sure the index is valid.
+ if i > uint64(len(ac.ForeignApps)) {
+ err := fmt.Errorf("invalid Foreign App reference %d", i)
+ return basics.AppIndex(0), err
+ }
+
+ // aidx must be in [1, len(ac.ForeignApps)]
+ return ac.ForeignApps[i-1], nil
+}
+
+// IndexByAppID converts an application id into an integer offset into [current app,
+// txn.ForeignApps[0], ...], returning the index at the first match. It returns
+// an error if there is no such match.
+func (ac *ApplicationCallTxnFields) IndexByAppID(appID basics.AppIndex) (uint64, error) {
+
+ // Index 0 always corresponds to the current app
+ if appID == ac.ApplicationID {
+ return 0, nil
+ }
+
+ // Otherwise we index into ac.ForeignApps
+ for i, id := range ac.ForeignApps {
+ if appID == id {
+ return uint64(i) + 1, nil
+ }
+ }
+
+ return 0, fmt.Errorf("invalid Foreign App reference %d", appID)
+}
diff --git a/data/transactions/application_test.go b/data/transactions/application_test.go
index 78670bf18..5d50d7126 100644
--- a/data/transactions/application_test.go
+++ b/data/transactions/application_test.go
@@ -117,3 +117,30 @@ func TestEncodedAppTxnAllocationBounds(t *testing.T) {
}
}
}
+
+func TestIDByIndex(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+ ac := ApplicationCallTxnFields{}
+ ac.ApplicationID = 1
+ appID, err := ac.AppIDByIndex(0)
+ a.NoError(err)
+ a.Equal(basics.AppIndex(1), appID)
+ appID, err = ac.AppIDByIndex(1)
+ a.Contains(err.Error(), "invalid Foreign App reference")
+
+}
+
+func TestIndexByID(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+ ac := ApplicationCallTxnFields{}
+ ac.ApplicationID = 1
+ aidx, err := ac.IndexByAppID(1)
+ a.NoError(err)
+ a.Equal(uint64(0), aidx)
+ aidx, err = ac.IndexByAppID(2)
+ a.Contains(err.Error(), "invalid Foreign App reference")
+}
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index f6a219f1c..c2344409c 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -386,6 +386,7 @@ App fields used in the `app_params_get` opcode.
| `asset_holding_get i` | read from account A and asset B holding field X (imm arg) => {0 or 1 (top), value} |
| `asset_params_get i` | read from asset A params field X (imm arg) => {0 or 1 (top), value} |
| `app_params_get i` | read from app A params field X (imm arg) => {0 or 1 (top), value} |
+| `log` | write bytes to log state of the current application |
# Assembler Syntax
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index b262a8042..127716322 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -47,7 +47,6 @@ Ops have a 'cost' of 1 unless otherwise specified.
- Pushes: uint64
- for (data A, signature B, pubkey C) verify the signature of ("ProgData" || program_hash || data) against the pubkey => {0 or 1}
- **Cost**: 1900
-- Mode: Signature
The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.
@@ -1202,3 +1201,14 @@ bitlen interprets arrays as big-endian integers, unlike setbit/getbit
- Pushes: []byte
- push a byte-array of length X, containing all zero bytes
- LogicSigVersion >= 4
+
+## log
+
+- Opcode: 0xb0
+- Pops: *... stack*, []byte
+- Pushes: _None_
+- write bytes to log state of the current application
+- LogicSigVersion >= 5
+- Mode: Application
+
+`log` can be called up to MaxLogCalls times in a program, and log up to a total of 1k bytes.
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index 694c156c0..6b8f5793b 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -1101,6 +1101,56 @@ func typeDig(ops *OpStream, args []string) (StackTypes, StackTypes) {
return anys, returns
}
+func typeEquals(ops *OpStream, args []string) (StackTypes, StackTypes) {
+ top := len(ops.typeStack) - 1
+ if top >= 0 {
+ //Require arg0 and arg1 to have same type
+ return StackTypes{ops.typeStack[top], ops.typeStack[top]}, oneInt
+ }
+ return oneAny.plus(oneAny), oneInt
+}
+
+func typeDup(ops *OpStream, args []string) (StackTypes, StackTypes) {
+ top := len(ops.typeStack) - 1
+ if top >= 0 {
+ return StackTypes{ops.typeStack[top]}, StackTypes{ops.typeStack[top], ops.typeStack[top]}
+ }
+ return StackTypes{StackAny}, oneAny.plus(oneAny)
+}
+
+func typeDupTwo(ops *OpStream, args []string) (StackTypes, StackTypes) {
+ topTwo := oneAny.plus(oneAny)
+ top := len(ops.typeStack) - 1
+ if top >= 0 {
+ topTwo[1] = ops.typeStack[top]
+ if top >= 1 {
+ topTwo[0] = ops.typeStack[top-1]
+ }
+ }
+ result := topTwo.plus(topTwo)
+ return topTwo, result
+}
+
+func typeSelect(ops *OpStream, args []string) (StackTypes, StackTypes) {
+ selectArgs := twoAny.plus(oneInt)
+ top := len(ops.typeStack) - 1
+ if top >= 2 {
+ if ops.typeStack[top-1] == ops.typeStack[top-2] {
+ return selectArgs, StackTypes{ops.typeStack[top-1]}
+ }
+ }
+ return selectArgs, StackTypes{StackAny}
+}
+
+func typeSetBit(ops *OpStream, args []string) (StackTypes, StackTypes) {
+ setBitArgs := oneAny.plus(twoInts)
+ top := len(ops.typeStack) - 1
+ if top >= 2 {
+ return setBitArgs, StackTypes{ops.typeStack[top-2]}
+ }
+ return setBitArgs, StackTypes{StackAny}
+}
+
func typeCover(ops *OpStream, args []string) (StackTypes, StackTypes) {
if len(args) == 0 {
return oneAny, oneAny
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index c7d18c150..a8cd151f7 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -309,6 +309,7 @@ int 0
extract32bits
int 0
extract16bits
+log
`
var nonsense = map[uint64]string{
@@ -324,7 +325,7 @@ var compiled = map[uint64]string{
2: "022008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f",
3: "032008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f4478222105531421055427042106552105082106564c4d4b02210538212106391c0081e80780046a6f686e",
4: "042004010200b7a60c26040242420c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482a50512a632223524100034200004322602261222b634848222862482864286548482228236628226724286828692422700048482471004848361c0037001a0031183119311b311d311e311f312024221e312131223123312431253126312731283129312a312b312c312d312e312f44782522531422542b2355220823564c4d4b0222382123391c0081e80780046a6f686e2281d00f24231f880003420001892223902291922394239593a0a1a2a3a4a5a6a7a8a9aaabacadae23af3a00003b003c003d8164",
- 5: "052004010200b7a60c26040242420c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482a50512a632223524100034200004322602261222b634848222862482864286548482228236628226724286828692422700048482471004848361c0037001a0031183119311b311d311e311f312024221e312131223123312431253126312731283129312a312b312c312d312e312f44782522531422542b2355220823564c4d4b0222382123391c0081e80780046a6f686e2281d00f24231f880003420001892223902291922394239593a0a1a2a3a4a5a6a7a8a9aaabacadae23af3a00003b003c003d816472064e014f0180070123456789abcd57000824810858245b245a2459",
+ 5: "052004010200b7a60c26040242420c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482a50512a632223524100034200004322602261222b634848222862482864286548482228236628226724286828692422700048482471004848361c0037001a0031183119311b311d311e311f312024221e312131223123312431253126312731283129312a312b312c312d312e312f44782522531422542b2355220823564c4d4b0222382123391c0081e80780046a6f686e2281d00f24231f880003420001892223902291922394239593a0a1a2a3a4a5a6a7a8a9aaabacadae23af3a00003b003c003d816472064e014f0180070123456789abcd57000824810858245b245a2459b0",
}
func pseudoOp(opcode string) bool {
@@ -2165,6 +2166,40 @@ func TestDigAsm(t *testing.T) {
}
+func TestEqualsTypeCheck(t *testing.T) {
+ t.Parallel()
+ testProg(t, "int 1; byte 0x1234; ==", AssemblerMaxVersion, expect{3, "== arg 0..."})
+ testProg(t, "int 1; byte 0x1234; !=", AssemblerMaxVersion, expect{3, "!= arg 0..."})
+ testProg(t, "byte 0x1234; int 1; ==", AssemblerMaxVersion, expect{3, "== arg 0..."})
+ testProg(t, "byte 0x1234; int 1; !=", AssemblerMaxVersion, expect{3, "!= arg 0..."})
+}
+
+func TestDupTypeCheck(t *testing.T) {
+ t.Parallel()
+ testProg(t, "byte 0x1234; dup; int 1; +", AssemblerMaxVersion, expect{4, "+ arg 0..."})
+ testProg(t, "byte 0x1234; int 1; dup; +", AssemblerMaxVersion)
+ testProg(t, "byte 0x1234; int 1; dup2; +", AssemblerMaxVersion, expect{4, "+ arg 0..."})
+ testProg(t, "int 1; byte 0x1234; dup2; +", AssemblerMaxVersion, expect{4, "+ arg 1..."})
+
+ testProg(t, "byte 0x1234; int 1; dup; dig 1; len", AssemblerMaxVersion, expect{5, "len arg 0..."})
+ testProg(t, "int 1; byte 0x1234; dup; dig 1; !", AssemblerMaxVersion, expect{5, "! arg 0..."})
+
+ testProg(t, "byte 0x1234; int 1; dup2; dig 2; len", AssemblerMaxVersion, expect{5, "len arg 0..."})
+ testProg(t, "int 1; byte 0x1234; dup2; dig 2; !", AssemblerMaxVersion, expect{5, "! arg 0..."})
+}
+
+func TestSelectTypeCheck(t *testing.T) {
+ t.Parallel()
+ testProg(t, "int 1; int 2; int 3; select; len", AssemblerMaxVersion, expect{5, "len arg 0..."})
+ testProg(t, "byte 0x1234; byte 0x5678; int 3; select; !", AssemblerMaxVersion, expect{5, "! arg 0..."})
+}
+
+func TestSetBitTypeCheck(t *testing.T) {
+ t.Parallel()
+ testProg(t, "int 1; int 2; int 3; setbit; len", AssemblerMaxVersion, expect{5, "len arg 0..."})
+ testProg(t, "byte 0x1234; int 2; int 3; setbit; !", AssemblerMaxVersion, expect{5, "! arg 0..."})
+}
+
func TestCoverAsm(t *testing.T) {
t.Parallel()
testProg(t, `int 4; byte "john"; int 5; cover 2; pop; +`, AssemblerMaxVersion)
diff --git a/data/transactions/logic/backwardCompat_test.go b/data/transactions/logic/backwardCompat_test.go
index 68dcf73bc..d78275644 100644
--- a/data/transactions/logic/backwardCompat_test.go
+++ b/data/transactions/logic/backwardCompat_test.go
@@ -22,10 +22,8 @@ import (
"strings"
"testing"
- "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/stretchr/testify/require"
)
@@ -373,151 +371,6 @@ func TestBackwardCompatTEALv1(t *testing.T) {
}
-// ensure v2 fields error on pre TEAL v2 logicsig version
-// ensure v2 fields error in v1 program
-func TestBackwardCompatGlobalFields(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- t.Parallel()
- var fields []globalFieldSpec
- for _, fs := range globalFieldSpecs {
- if fs.version > 1 {
- fields = append(fields, fs)
- }
- }
- require.Greater(t, len(fields), 1)
-
- ledger := makeTestLedger(nil)
- for _, field := range fields {
- text := fmt.Sprintf("global %s", field.gfield.String())
- // check assembler fails if version before introduction
- testLine(t, text, assemblerNoVersion, "...available in version...")
- for v := uint64(0); v < field.version; v++ {
- testLine(t, text, v, "...available in version...")
- }
-
- ops := testProg(t, text, AssemblerMaxVersion)
-
- proto := config.Consensus[protocol.ConsensusV23]
- require.False(t, proto.Application)
- ep := defaultEvalParams(nil, nil)
- ep.Proto = &proto
- ep.Ledger = ledger
-
- // check failure with version check
- _, err := Eval(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "greater than protocol supported version")
- _, err = Eval(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "greater than protocol supported version")
-
- // check opcodes failures
- ops.Program[0] = 1 // set version to 1
- _, err = Eval(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid global[")
- _, err = Eval(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid global[")
-
- // check opcodes failures
- ops.Program[0] = 0 // set version to 0
- _, err = Eval(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid global[")
- _, err = Eval(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid global[")
- }
-}
-
-// ensure v2 fields error in v1 program
-func TestBackwardCompatTxnFields(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- t.Parallel()
- var fields []txnFieldSpec
- for _, fs := range txnFieldSpecs {
- if fs.version > 1 {
- fields = append(fields, fs)
- }
- }
- require.Greater(t, len(fields), 1)
-
- tests := []string{
- "txn %s",
- "gtxn 0 %s",
- }
-
- ledger := makeTestLedger(nil)
- txn := makeSampleTxn()
- // We'll reject too early if we have a nonzero RekeyTo, because that
- // field must be zero for every txn in the group if this is an old
- // TEAL version
- txn.Txn.RekeyTo = basics.Address{}
- txgroup := makeSampleTxnGroup(txn)
- for _, fs := range fields {
- field := fs.field.String()
- for _, command := range tests {
- text := fmt.Sprintf(command, field)
- asmError := "...available in version ..."
- if _, ok := txnaFieldSpecByField[fs.field]; ok {
- parts := strings.Split(text, " ")
- op := parts[0]
- asmError = fmt.Sprintf("found array field %#v in %s op", field, op)
- }
- // check assembler fails if version before introduction
- testLine(t, text, assemblerNoVersion, asmError)
- for v := uint64(0); v < fs.version; v++ {
- testLine(t, text, v, asmError)
- }
-
- ops, err := AssembleStringWithVersion(text, AssemblerMaxVersion)
- if _, ok := txnaFieldSpecByField[fs.field]; ok {
- // "txn Accounts" is invalid, so skip evaluation
- require.Error(t, err, asmError)
- continue
- } else {
- require.NoError(t, err)
- }
-
- proto := config.Consensus[protocol.ConsensusV23]
- require.False(t, proto.Application)
- ep := defaultEvalParams(nil, nil)
- ep.Proto = &proto
- ep.Ledger = ledger
- ep.TxnGroup = txgroup
-
- // check failure with version check
- _, err = Eval(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "greater than protocol supported version")
- _, err = Eval(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "greater than protocol supported version")
-
- // check opcodes failures
- ops.Program[0] = 1 // set version to 1
- _, err = Eval(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid txn field")
- _, err = Eval(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid txn field")
-
- // check opcodes failures
- ops.Program[0] = 0 // set version to 0
- _, err = Eval(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid txn field")
- _, err = Eval(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid txn field")
- }
- }
-}
-
func TestBackwardCompatAssemble(t *testing.T) {
partitiontest.PartitionTest(t)
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index e8e784eb3..cb1c323f3 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -150,6 +150,8 @@ var opDocByName = map[string]string{
"b&": "A bitwise-and B, where A and B are byte-arrays, zero-left extended to the greater of their lengths",
"b^": "A bitwise-xor B, where A and B are byte-arrays, zero-left extended to the greater of their lengths",
"b~": "X with all bits inverted",
+
+ "log": "write bytes to log state of the current application",
}
// OpDoc returns a description of the op
@@ -236,6 +238,7 @@ var opDocExtras = map[string]string{
"asset_holding_get": "params: Txn.Accounts offset (or, since v4, an account address that appears in Txn.Accounts or is Txn.Sender), asset id (or, since v4, a Txn.ForeignAssets offset). Return: did_exist flag (1 if exist and 0 otherwise), value.",
"asset_params_get": "params: Before v4, Txn.ForeignAssets offset. Since v4, Txn.ForeignAssets offset or an asset id that appears in Txn.ForeignAssets. Return: did_exist flag (1 if exist and 0 otherwise), value.",
"app_params_get": "params: Txn.ForeignApps offset or an app id that appears in Txn.ForeignApps. Return: did_exist flag (1 if exist and 0 otherwise), value.",
+ "log": "`log` can be called up to MaxLogCalls times in a program, and log up to a total of 1k bytes.",
}
// OpDocExtra returns extra documentation text about an op
@@ -251,7 +254,7 @@ var OpGroups = map[string][]string{
"Byteslice Logic": {"b|", "b&", "b^", "b~"},
"Loading Values": {"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "bzero", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "txn", "gtxn", "txna", "gtxna", "gtxns", "gtxnsa", "global", "load", "store", "gload", "gloads", "gaid", "gaids"},
"Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "dup", "dup2", "dig", "cover", "uncover", "swap", "select", "assert", "callsub", "retsub"},
- "State Access": {"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get", "app_params_get"},
+ "State Access": {"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get", "app_params_get", "log"},
}
// OpCost indicates the cost of an operation over the range of
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index f5937bde1..ced3baed3 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -53,6 +53,12 @@ const MaxStringSize = 4096
// MaxByteMathSize is the limit of byte strings supplied as input to byte math opcodes
const MaxByteMathSize = 64
+// MaxLogSize is the limit of total log size from n log calls in a program
+const MaxLogSize = 1024
+
+// MaxLogCalls is the limit of total log calls during a program execution
+const MaxLogCalls = config.MaxLogCalls
+
// stackValue is the type for the operand stack.
// Each stackValue is either a valid []byte value or a uint64 value.
// If (.Bytes != nil) the stackValue is a []byte value, otherwise uint64 value.
@@ -159,6 +165,8 @@ type LedgerForLogic interface {
DelGlobal(key string) error
GetDelta(txn *transactions.Transaction) (evalDelta basics.EvalDelta, err error)
+
+ AppendLog(txn *transactions.Transaction, value string) error
}
// EvalSideEffects contains data returned from evaluation
@@ -216,6 +224,9 @@ type EvalParams struct {
// determines eval mode: runModeSignature or runModeApplication
runModeFlags runMode
+
+ // Total pool of app call budget in a group transaction
+ PooledApplicationBudget *uint64
}
type opEvalFunc func(cx *evalContext)
@@ -255,6 +266,9 @@ func (ep EvalParams) budget() int {
if ep.runModeFlags == runModeSignature {
return int(ep.Proto.LogicSigMaxCost)
}
+ if ep.Proto.EnableAppCostPooling && ep.PooledApplicationBudget != nil {
+ return int(*ep.PooledApplicationBudget)
+ }
return ep.Proto.MaxAppProgramCost
}
@@ -281,7 +295,9 @@ type evalContext struct {
version uint64
scratch scratchSpace
- cost int // cost incurred so far
+ cost int // cost incurred so far
+ logCalls int // number of log calls so far
+ logSize int // log size of the program so far
// Set of PC values that branches we've seen so far might
// go. So, if checkStep() skips one, that branch is trying to
@@ -355,6 +371,10 @@ func EvalStateful(program []byte, params EvalParams) (pass bool, err error) {
cx.EvalParams = params
cx.runModeFlags = runModeApplication
pass, err = eval(program, &cx)
+ if cx.EvalParams.Proto.EnableAppCostPooling && cx.EvalParams.PooledApplicationBudget != nil {
+ // if eval passes, then budget is always greater than cost, so should not have underflow
+ *cx.EvalParams.PooledApplicationBudget = basics.SubSaturate(*cx.EvalParams.PooledApplicationBudget, uint64(cx.cost))
+ }
// set side effects
cx.PastSideEffects[cx.GroupIndex].setScratchSpace(cx.scratch)
@@ -630,7 +650,8 @@ func (cx *evalContext) step() {
}
cx.cost += deets.Cost
if cx.cost > cx.budget() {
- cx.err = fmt.Errorf("pc=%3d dynamic cost budget of %d exceeded, executing %s", cx.pc, cx.budget(), spec.Name)
+ cx.err = fmt.Errorf("pc=%3d dynamic cost budget exceeded, executing %s: remaining budget is %d but program cost was %d",
+ cx.pc, spec.Name, cx.budget(), cx.cost)
return
}
@@ -3154,3 +3175,26 @@ func opAppParamsGet(cx *evalContext) {
cx.stack[last] = value
cx.stack = append(cx.stack, stackValue{Uint: exist})
}
+
+func opLog(cx *evalContext) {
+ last := len(cx.stack) - 1
+
+ if cx.logCalls == MaxLogCalls {
+ cx.err = fmt.Errorf("too many log calls in program. up to %d is allowed", MaxLogCalls)
+ return
+ }
+ cx.logCalls++
+ log := cx.stack[last]
+ cx.logSize += len(log.Bytes)
+ if cx.logSize > MaxLogSize {
+ cx.err = fmt.Errorf("program logs too large. %d bytes > %d bytes limit", cx.logSize, MaxLogSize)
+ return
+ }
+ // write log to applyData
+ err := cx.Ledger.AppendLog(&cx.Txn.Txn, string(log.Bytes))
+ if err != nil {
+ cx.err = err
+ return
+ }
+ cx.stack = cx.stack[:last]
+}
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index 2cceb2a4f..d1c53a34a 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -60,6 +60,7 @@ type testLedger struct {
appID basics.AppIndex
creatorAddr basics.Address
mods map[basics.AppIndex]map[string]basics.ValueDelta
+ logs []basics.LogItem
}
func makeApp(li uint64, lb uint64, gi uint64, gb uint64) basics.AppParams {
@@ -464,9 +465,25 @@ func (l *testLedger) GetDelta(txn *transactions.Transaction) (evalDelta basics.E
}
}
}
+ evalDelta.Logs = l.logs
return
}
+func (l *testLedger) AppendLog(txn *transactions.Transaction, value string) error {
+
+ appIdx, err := txn.IndexByAppID(l.appID)
+ if err != nil {
+ return err
+ }
+ _, ok := l.applications[l.appID]
+ if !ok {
+ return fmt.Errorf("no such app")
+ }
+
+ l.logs = append(l.logs, basics.LogItem{ID: appIdx, Message: value})
+ return nil
+}
+
func TestEvalModes(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -590,6 +607,8 @@ asset_params_get AssetTotal
pop
&&
!=
+bytec_0
+log
`
type desc struct {
source string
@@ -678,8 +697,8 @@ pop
})
}
- // check ed25519verify and arg are not allowed in statefull mode
- disallowed := []string{
+ // check that ed25519verify and arg is not allowed in stateful mode between v2-v4
+ disallowedV4 := []string{
"byte 0x01\nbyte 0x01\nbyte 0x01\ned25519verify",
"arg 0",
"arg_0",
@@ -687,6 +706,24 @@ pop
"arg_2",
"arg_3",
}
+ for _, source := range disallowedV4 {
+ ops := testProg(t, source, 4)
+ ep := defaultEvalParams(nil, nil)
+ err := CheckStateful(ops.Program, ep)
+ require.Error(t, err)
+ _, err = EvalStateful(ops.Program, ep)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "not allowed in current mode")
+ }
+
+ // check that arg is not allowed in stateful mode beyond v5
+ disallowed := []string{
+ "arg 0",
+ "arg_0",
+ "arg_1",
+ "arg_2",
+ "arg_3",
+ }
for _, source := range disallowed {
ops := testProg(t, source, AssemblerMaxVersion)
ep := defaultEvalParams(nil, nil)
@@ -712,6 +749,7 @@ pop
"int 0\nint 0\nasset_holding_get AssetFrozen",
"int 0\nint 0\nasset_params_get AssetManager",
"int 0\nint 0\napp_params_get AppApprovalProgram",
+ "byte 0x01\nlog",
}
for _, source := range statefulOpcodeCalls {
@@ -792,6 +830,7 @@ func testApp(t *testing.T, program string, ep EvalParams, problems ...string) ba
require.NoError(t, err)
require.Empty(t, delta.GlobalDelta)
require.Empty(t, delta.LocalDeltas)
+ require.Empty(t, delta.Logs)
return delta
}
return basics.EvalDelta{}
@@ -2866,3 +2905,71 @@ func TestAppLoop(t *testing.T) {
// Infinite loop because multiply by one instead of two
testApp(t, stateful+"int 1; loop:; int 1; *; dup; int 10; <; bnz loop; int 16; ==", ep, "dynamic cost")
}
+
+func TestWriteLogs(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ t.Parallel()
+
+ ep := defaultEvalParams(nil, nil)
+ txn := makeSampleTxn()
+ txn.Txn.ApplicationID = 100
+ ep.Txn = &txn
+ ledger := makeTestLedger(
+ map[basics.Address]uint64{
+ txn.Txn.Sender: 1,
+ },
+ )
+ ep.Ledger = ledger
+ ledger.newApp(txn.Txn.Sender, 100, basics.AppParams{})
+
+ // write int and bytes values
+ source := `int 1
+loop: byte "a"
+log
+int 1
++
+dup
+int 30
+<
+bnz loop
+`
+ ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
+ require.NoError(t, err)
+ err = CheckStateful(ops.Program, ep)
+ require.NoError(t, err)
+ pass, err := EvalStateful(ops.Program, ep)
+ require.NoError(t, err)
+ require.True(t, pass)
+ delta, err := ledger.GetDelta(&ep.Txn.Txn)
+ require.NoError(t, err)
+ require.Empty(t, 0, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
+ require.Len(t, delta.Logs, 29)
+}
+
+func TestPooledAppCallsVerifyOp(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ source := `#pragma version 5
+ global CurrentApplicationID
+ pop
+ byte 0x01
+ byte "ZC9KNzlnWTlKZ1pwSkNzQXVzYjNBcG1xTU9YbkRNWUtIQXNKYVk2RzRBdExPakQx"
+ addr DROUIZXGT3WFJR3QYVZWTR5OJJXJCMOLS7G4FUGZDSJM5PNOVOREH6HIZE
+ ed25519verify
+ pop
+ int 1`
+
+ ep, _ := makeSampleEnv()
+ ep.Proto.EnableAppCostPooling = true
+ ep.PooledApplicationBudget = new(uint64)
+ // Simulate test with 2 grouped txn
+ *ep.PooledApplicationBudget = uint64(ep.Proto.MaxAppProgramCost * 2)
+ testApp(t, source, ep, "pc=107 dynamic cost budget exceeded, executing ed25519verify: remaining budget is 1400 but program cost was 1905")
+
+ // Simulate test with 3 grouped txn
+ *ep.PooledApplicationBudget = uint64(ep.Proto.MaxAppProgramCost * 3)
+ testApp(t, source, ep)
+}
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index df1da45f8..9a16c0016 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -52,6 +52,7 @@ func defaultEvalProtoWithVersion(version uint64) config.ConsensusParams {
return config.ConsensusParams{
LogicSigVersion: version,
LogicSigMaxCost: 20000,
+ Application: version >= appsEnabledVersion,
MaxAppProgramCost: 700,
MaxAppKeyLen: 64,
MaxAppBytesValueLen: 64,
@@ -1026,6 +1027,8 @@ func TestGlobal(t *testing.T) {
EvalStateful, CheckStateful,
},
}
+ // tests keys are versions so they must be in a range 1..AssemblerMaxVersion plus zero version
+ require.LessOrEqual(t, len(tests), AssemblerMaxVersion+1)
ledger := makeTestLedger(nil)
ledger.appID = 42
addr, err := basics.UnmarshalChecksumAddress(testAddr)
@@ -4061,14 +4064,14 @@ func TestAllowedOpcodesV2(t *testing.T) {
"dup2": "int 1; int 2; dup2",
"concat": "byte 0x41; dup; concat",
"substring": "byte 0x41; substring 0 1",
- "substring3": "byte 0x41; dup; dup; substring3",
+ "substring3": "byte 0x41; int 0; int 1; substring3",
"balance": "int 1; balance",
"app_opted_in": "int 0; dup; app_opted_in",
"app_local_get": "int 0; byte 0x41; app_local_get",
"app_local_get_ex": "int 0; dup; byte 0x41; app_local_get_ex",
"app_global_get": "int 0; byte 0x41; app_global_get",
"app_global_get_ex": "int 0; byte 0x41; app_global_get_ex",
- "app_local_put": "int 0; dup; byte 0x41; app_local_put",
+ "app_local_put": "int 0; byte 0x41; dup; app_local_put",
"app_global_put": "byte 0x41; dup; app_global_put",
"app_local_del": "int 0; byte 0x41; app_local_del",
"app_global_del": "byte 0x41; app_global_del",
@@ -4731,3 +4734,129 @@ func TestBytesConversions(t *testing.T) {
testAccepts(t, "byte 0x11; byte 0x10; b+; btoi; int 0x21; ==", 4)
testAccepts(t, "byte 0x0011; byte 0x10; b+; btoi; int 0x21; ==", 4)
}
+
+func TestLog(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ t.Parallel()
+ proto := defaultEvalProtoWithVersion(LogicVersion)
+ txn := transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ },
+ }
+ ledger := makeTestLedger(nil)
+ ledger.newApp(txn.Txn.Receiver, 0, basics.AppParams{})
+ sb := strings.Builder{}
+ ep := defaultEvalParams(&sb, &txn)
+ ep.Proto = &proto
+ ep.Ledger = ledger
+ testCases := []struct {
+ source string
+ loglen int
+ }{
+ {
+ source: `byte "a logging message"; log; int 1`,
+ loglen: 1,
+ },
+ {
+ source: `byte "a logging message"; log; byte "a logging message"; log; int 1`,
+ loglen: 2,
+ },
+ {
+ source: fmt.Sprintf(`%s int 1`, strings.Repeat(`byte "a logging message"; log;`, config.MaxLogCalls)),
+ loglen: MaxLogCalls,
+ },
+ {
+ source: `int 1; loop: byte "a logging message"; log; int 1; +; dup; int 30; <=; bnz loop;`,
+ loglen: 30,
+ },
+ {
+ source: fmt.Sprintf(`byte "%s"; log; int 1`, strings.Repeat("a", MaxLogSize)),
+ loglen: 1,
+ },
+ }
+
+ //track expected number of logs in ep.Ledger
+ count := 0
+ for i, s := range testCases {
+ ops := testProg(t, s.source, AssemblerMaxVersion)
+
+ err := CheckStateful(ops.Program, ep)
+ require.NoError(t, err, s)
+
+ pass, err := EvalStateful(ops.Program, ep)
+ require.NoError(t, err)
+ require.True(t, pass)
+ count += s.loglen
+ require.Equal(t, len(ledger.logs), count)
+ if i == len(testCases)-1 {
+ require.Equal(t, strings.Repeat("a", MaxLogSize), ledger.logs[count-1].Message)
+ } else {
+ for _, l := range ledger.logs[count-s.loglen:] {
+ require.Equal(t, "a logging message", l.Message)
+ }
+ }
+ }
+
+ msg := strings.Repeat("a", 400)
+ failCases := []struct {
+ source string
+ runMode runMode
+ errContains string
+ }{
+ {
+ source: fmt.Sprintf(`byte "%s"; log; int 1`, strings.Repeat("a", MaxLogSize+1)),
+ errContains: fmt.Sprintf("> %d bytes limit", MaxLogSize),
+ runMode: runModeApplication,
+ },
+ {
+ source: fmt.Sprintf(`byte "%s"; log; byte "%s"; log; byte "%s"; log; int 1`, msg, msg, msg),
+ errContains: fmt.Sprintf("> %d bytes limit", MaxLogSize),
+ runMode: runModeApplication,
+ },
+ {
+ source: fmt.Sprintf(`%s; int 1`, strings.Repeat(`byte "a"; log;`, config.MaxLogCalls+1)),
+ errContains: "too many log calls",
+ runMode: runModeApplication,
+ },
+ {
+ source: `int 1; loop: byte "a"; log; int 1; +; dup; int 35; <; bnz loop;`,
+ errContains: "too many log calls",
+ runMode: runModeApplication,
+ },
+ {
+ source: fmt.Sprintf(`int 1; loop: byte "%s"; log; int 1; +; dup; int 6; <; bnz loop;`, strings.Repeat(`a`, 400)),
+ errContains: fmt.Sprintf("> %d bytes limit", MaxLogSize),
+ runMode: runModeApplication,
+ },
+ {
+ source: `load 0; log`,
+ errContains: "log arg 0 wanted []byte but got uint64",
+ runMode: runModeApplication,
+ },
+ {
+ source: `byte "a logging message"; log; int 1`,
+ errContains: "log not allowed in current mode",
+ runMode: runModeSignature,
+ },
+ }
+
+ for _, c := range failCases {
+ ops := testProg(t, c.source, AssemblerMaxVersion)
+
+ err := CheckStateful(ops.Program, ep)
+ require.NoError(t, err, c)
+
+ var pass bool
+ switch c.runMode {
+ case runModeApplication:
+ pass, err = EvalStateful(ops.Program, ep)
+ default:
+ pass, err = Eval(ops.Program, ep)
+
+ }
+ require.Contains(t, err.Error(), c.errContains)
+ require.False(t, pass)
+ }
+}
diff --git a/data/transactions/logic/fields_test.go b/data/transactions/logic/fields_test.go
index bab2418a6..b36e715e3 100644
--- a/data/transactions/logic/fields_test.go
+++ b/data/transactions/logic/fields_test.go
@@ -17,12 +17,164 @@
package logic
import (
+ "fmt"
"testing"
"github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/test/partitiontest"
)
func TestArrayFields(t *testing.T) {
require.Equal(t, len(TxnaFieldNames), len(TxnaFieldTypes))
require.Equal(t, len(txnaFieldSpecByField), len(TxnaFieldTypes))
}
+
+// ensure v2+ fields fail in TEAL assembler and evaluator on a version before they introduced
+// ensure v2+ fields error in v1 program
+func TestGlobalFieldsVersions(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ var fields []globalFieldSpec
+ for _, fs := range globalFieldSpecs {
+ if fs.version > 1 {
+ fields = append(fields, fs)
+ }
+ }
+ require.Greater(t, len(fields), 1)
+
+ ledger := makeTestLedger(nil)
+ for _, field := range fields {
+ text := fmt.Sprintf("global %s", field.gfield.String())
+ // check assembler fails if version before introduction
+ testLine(t, text, assemblerNoVersion, "...available in version...")
+ for v := uint64(0); v < field.version; v++ {
+ testLine(t, text, v, "...available in version...")
+ }
+ testLine(t, text, field.version, "")
+
+ ops := testProg(t, text, AssemblerMaxVersion)
+
+ // check on a version before the field version
+ preLogicVersion := field.version - 1
+ proto := defaultEvalProtoWithVersion(preLogicVersion)
+ if preLogicVersion < appsEnabledVersion {
+ require.False(t, proto.Application)
+ }
+ ep := defaultEvalParams(nil, nil)
+ ep.Proto = &proto
+ ep.Ledger = ledger
+
+ // check failure with version check
+ _, err := Eval(ops.Program, ep)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "greater than protocol supported version")
+
+ // check opcodes failures
+ ops.Program[0] = byte(preLogicVersion) // set version
+ _, err = Eval(ops.Program, ep)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "invalid global[")
+
+ // check opcodes failures on 0 version
+ ops.Program[0] = 0 // set version to 0
+ _, err = Eval(ops.Program, ep)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "invalid global[")
+ }
+}
+
+// ensure v2+ fields error in programs of previous TEAL version, similarly to global fields test
+func TestTxnFieldVersions(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ t.Parallel()
+ var fields []txnFieldSpec
+ for _, fs := range txnFieldSpecs {
+ if fs.version > 1 {
+ fields = append(fields, fs)
+ }
+ }
+ require.Greater(t, len(fields), 1)
+
+ tests := []string{
+ "txn %s",
+ "gtxn 0 %s",
+ }
+ subs := map[string]string{
+ tests[0]: "txna %s 0",
+ tests[1]: "gtxna 0 %s 0",
+ }
+ txnaVersion := uint64(appsEnabledVersion)
+
+ ledger := makeTestLedger(nil)
+ txn := makeSampleTxn()
+ // We'll reject too early if we have a nonzero RekeyTo, because that
+ // field must be zero for every txn in the group if this is an old
+ // TEAL version
+ txn.Txn.RekeyTo = basics.Address{}
+ txgroup := makeSampleTxnGroup(txn)
+ asmDefaultError := "...available in version ..."
+ for _, fs := range fields {
+ field := fs.field.String()
+ for _, command := range tests {
+ text := fmt.Sprintf(command, field)
+ asmError := asmDefaultError
+ txnaMode := false
+ if _, ok := txnaFieldSpecByField[fs.field]; ok {
+ text = fmt.Sprintf(subs[command], field)
+ asmError = "...txna opcode was introduced in ..."
+ txnaMode = true
+ }
+ // check assembler fails if version before introduction
+ testLine(t, text, assemblerNoVersion, asmError)
+ for v := uint64(0); v < fs.version; v++ {
+ if txnaMode && v >= txnaVersion {
+ asmError = asmDefaultError
+ }
+ testLine(t, text, v, asmError)
+ }
+ testLine(t, text, fs.version, "")
+
+ ops, err := AssembleStringWithVersion(text, AssemblerMaxVersion)
+ require.NoError(t, err)
+
+ preLogicVersion := fs.version - 1
+ proto := defaultEvalProtoWithVersion(preLogicVersion)
+ if preLogicVersion < appsEnabledVersion {
+ require.False(t, proto.Application)
+ }
+ ep := defaultEvalParams(nil, nil)
+ ep.Proto = &proto
+ ep.Ledger = ledger
+ ep.TxnGroup = txgroup
+
+ // check failure with version check
+ _, err = Eval(ops.Program, ep)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "greater than protocol supported version")
+
+ // check opcodes failures
+ ops.Program[0] = byte(preLogicVersion) // set version
+ _, err = Eval(ops.Program, ep)
+ require.Error(t, err)
+ if txnaMode && preLogicVersion < txnaVersion {
+ require.Contains(t, err.Error(), "illegal opcode")
+ } else {
+ require.Contains(t, err.Error(), "invalid txn field")
+ }
+
+ // check opcodes failures on 0 version
+ ops.Program[0] = 0 // set version to 0
+ _, err = Eval(ops.Program, ep)
+ require.Error(t, err)
+ if txnaMode {
+ require.Contains(t, err.Error(), "illegal opcode")
+ } else {
+ require.Contains(t, err.Error(), "invalid txn field")
+ }
+ }
+ }
+}
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index 6350856c7..2ab59ed9c 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -142,6 +142,7 @@ var OpSpecs = []OpSpec{
{0x03, "sha512_256", opSHA512_256, asmDefault, disDefault, oneBytes, oneBytes, 2, modeAny, costly(45)},
{0x04, "ed25519verify", opEd25519verify, asmDefault, disDefault, threeBytes, oneInt, 1, runModeSignature, costly(1900)},
+ {0x04, "ed25519verify", opEd25519verify, asmDefault, disDefault, threeBytes, oneInt, 5, modeAny, costly(1900)},
{0x08, "+", opPlus, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
{0x09, "-", opMinus, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
{0x0a, "/", opDiv, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
@@ -152,8 +153,8 @@ var OpSpecs = []OpSpec{
{0x0f, ">=", opGe, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
{0x10, "&&", opAnd, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
{0x11, "||", opOr, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
- {0x12, "==", opEq, asmDefault, disDefault, twoAny, oneInt, 1, modeAny, opDefault},
- {0x13, "!=", opNeq, asmDefault, disDefault, twoAny, oneInt, 1, modeAny, opDefault},
+ {0x12, "==", opEq, asmDefault, disDefault, twoAny, oneInt, 1, modeAny, stacky(typeEquals)},
+ {0x13, "!=", opNeq, asmDefault, disDefault, twoAny, oneInt, 1, modeAny, stacky(typeEquals)},
{0x14, "!", opNot, asmDefault, disDefault, oneInt, oneInt, 1, modeAny, opDefault},
{0x15, "len", opLen, asmDefault, disDefault, oneBytes, oneInt, 1, modeAny, opDefault},
{0x16, "itob", opItob, asmDefault, disDefault, oneInt, oneBytes, 1, modeAny, opDefault},
@@ -212,13 +213,13 @@ var OpSpecs = []OpSpec{
{0x43, "return", opReturn, asmDefault, disDefault, oneInt, nil, 2, modeAny, opDefault},
{0x44, "assert", opAssert, asmDefault, disDefault, oneInt, nil, 3, modeAny, opDefault},
{0x48, "pop", opPop, asmDefault, disDefault, oneAny, nil, 1, modeAny, opDefault},
- {0x49, "dup", opDup, asmDefault, disDefault, oneAny, twoAny, 1, modeAny, opDefault},
- {0x4a, "dup2", opDup2, asmDefault, disDefault, twoAny, twoAny.plus(twoAny), 2, modeAny, opDefault},
+ {0x49, "dup", opDup, asmDefault, disDefault, oneAny, twoAny, 1, modeAny, stacky(typeDup)},
+ {0x4a, "dup2", opDup2, asmDefault, disDefault, twoAny, twoAny.plus(twoAny), 2, modeAny, stacky(typeDupTwo)},
// There must be at least one thing on the stack for dig, but
// it would be nice if we did better checking than that.
{0x4b, "dig", opDig, asmDefault, disDefault, oneAny, twoAny, 3, modeAny, stacky(typeDig, "n")},
{0x4c, "swap", opSwap, asmDefault, disDefault, twoAny, twoAny, 3, modeAny, stacky(typeSwap)},
- {0x4d, "select", opSelect, asmDefault, disDefault, twoAny.plus(oneInt), oneAny, 3, modeAny, opDefault},
+ {0x4d, "select", opSelect, asmDefault, disDefault, twoAny.plus(oneInt), oneAny, 3, modeAny, stacky(typeSelect)},
{0x4e, "cover", opCover, asmDefault, disDefault, oneAny, oneAny, 5, modeAny, stacky(typeCover, "n")},
{0x4f, "uncover", opUncover, asmDefault, disDefault, oneAny, oneAny, 5, modeAny, stacky(typeUncover, "n")},
@@ -226,7 +227,7 @@ var OpSpecs = []OpSpec{
{0x51, "substring", opSubstring, assembleSubstring, disDefault, oneBytes, oneBytes, 2, modeAny, immediates("s", "e")},
{0x52, "substring3", opSubstring3, asmDefault, disDefault, byteIntInt, oneBytes, 2, modeAny, opDefault},
{0x53, "getbit", opGetBit, asmDefault, disDefault, anyInt, oneInt, 3, modeAny, opDefault},
- {0x54, "setbit", opSetBit, asmDefault, disDefault, anyIntInt, oneAny, 3, modeAny, opDefault},
+ {0x54, "setbit", opSetBit, asmDefault, disDefault, anyIntInt, oneAny, 3, modeAny, stacky(typeSetBit)},
{0x55, "getbyte", opGetByte, asmDefault, disDefault, byteInt, oneInt, 3, modeAny, opDefault},
{0x56, "setbyte", opSetByte, asmDefault, disDefault, byteIntInt, oneBytes, 3, modeAny, opDefault},
{0x57, "extract", opExtract, asmDefault, disDefault, oneBytes, oneBytes, 5, modeAny, immediates("s", "l")},
@@ -294,6 +295,9 @@ var OpSpecs = []OpSpec{
{0xad, "b^", opBytesBitXor, asmDefault, disDefault, twoBytes, oneBytes, 4, modeAny, costly(6)},
{0xae, "b~", opBytesBitNot, asmDefault, disDefault, oneBytes, oneBytes, 4, modeAny, costly(4)},
{0xaf, "bzero", opBytesZero, asmDefault, disDefault, oneInt, oneBytes, 4, modeAny, opDefault},
+
+ // ABI support opcodes.
+ {0xb0, "log", opLog, asmDefault, disDefault, oneBytes, nil, 5, runModeApplication, opDefault},
}
type sortByOpcode []OpSpec
diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go
index cab9a98e3..cb2d6d875 100644
--- a/data/transactions/verify/txn.go
+++ b/data/transactions/verify/txn.go
@@ -100,6 +100,26 @@ func (g *GroupContext) Equal(other *GroupContext) bool {
// Txn verifies a SignedTxn as being signed and having no obviously inconsistent data.
// Block-assembly time checks of LogicSig and accounting rules may still block the txn.
func Txn(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext) error {
+ batchVerifier := crypto.MakeBatchVerifierDefaultSize()
+
+ if err := TxnBatchVerify(s, txnIdx, groupCtx, batchVerifier); err != nil {
+ return err
+ }
+
+ // this case is used for comapact certificate where no signature is supplied
+ if batchVerifier.GetNumberOfEnqueuedSignatures() == 0 {
+ return nil
+ }
+ if err := batchVerifier.Verify(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// TxnBatchVerify verifies a SignedTxn having no obviously inconsistent data.
+// Block-assembly time checks of LogicSig and accounting rules may still block the txn.
+// it is the caller responsibility to call batchVerifier.verify()
+func TxnBatchVerify(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext, verifier *crypto.BatchVerifier) error {
if !groupCtx.consensusParams.SupportRekeying && (s.AuthAddr != basics.Address{}) {
return errors.New("nonempty AuthAddr but rekeying not supported")
}
@@ -108,11 +128,31 @@ func Txn(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext) error {
return err
}
- return stxnVerifyCore(s, txnIdx, groupCtx)
+ return stxnVerifyCore(s, txnIdx, groupCtx, verifier)
}
// TxnGroup verifies a []SignedTxn as being signed and having no obviously inconsistent data.
func TxnGroup(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader, cache VerifiedTransactionCache) (groupCtx *GroupContext, err error) {
+ batchVerifier := crypto.MakeBatchVerifierDefaultSize()
+
+ if groupCtx, err = TxnGroupBatchVerify(stxs, contextHdr, cache, batchVerifier); err != nil {
+ return nil, err
+ }
+
+ if batchVerifier.GetNumberOfEnqueuedSignatures() == 0 {
+ return groupCtx, nil
+ }
+
+ if err := batchVerifier.Verify(); err != nil {
+ return nil, err
+ }
+
+ return
+}
+
+// TxnGroupBatchVerify verifies a []SignedTxn having no obviously inconsistent data.
+// it is the caller responsibility to call batchVerifier.verify()
+func TxnGroupBatchVerify(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader, cache VerifiedTransactionCache, verifier *crypto.BatchVerifier) (groupCtx *GroupContext, err error) {
groupCtx, err = PrepareGroupContext(stxs, contextHdr)
if err != nil {
return nil, err
@@ -121,7 +161,7 @@ func TxnGroup(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader,
minFeeCount := uint64(0)
feesPaid := uint64(0)
for i, stxn := range stxs {
- err = Txn(&stxn, i, groupCtx)
+ err = TxnBatchVerify(&stxn, i, groupCtx, verifier)
if err != nil {
err = fmt.Errorf("transaction %+v invalid : %w", stxn, err)
return
@@ -151,7 +191,7 @@ func TxnGroup(stxs []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader,
return
}
-func stxnVerifyCore(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext) error {
+func stxnVerifyCore(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext, batchVerifier *crypto.BatchVerifier) error {
numSigs := 0
hasSig := false
hasMsig := false
@@ -185,19 +225,20 @@ func stxnVerifyCore(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContex
}
if hasSig {
- if crypto.SignatureVerifier(s.Authorizer()).Verify(s.Txn, s.Sig) {
- return nil
- }
- return errors.New("signature validation failed")
+ batchVerifier.EnqueueSignature(crypto.SignatureVerifier(s.Authorizer()), s.Txn, s.Sig)
+ return nil
}
if hasMsig {
- if ok, _ := crypto.MultisigVerify(s.Txn, crypto.Digest(s.Authorizer()), s.Msig); ok {
+ if ok, _ := crypto.MultisigBatchVerify(s.Txn,
+ crypto.Digest(s.Authorizer()),
+ s.Msig,
+ batchVerifier); ok {
return nil
}
return errors.New("multisig validation failed")
}
if hasLogicSig {
- return logicSig(s, txnIdx, groupCtx)
+ return logicSigBatchVerify(s, txnIdx, groupCtx, batchVerifier)
}
return errors.New("has one mystery sig. WAT?")
}
@@ -205,6 +246,27 @@ func stxnVerifyCore(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContex
// LogicSigSanityCheck checks that the signature is valid and that the program is basically well formed.
// It does not evaluate the logic.
func LogicSigSanityCheck(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext) error {
+ batchVerifier := crypto.MakeBatchVerifierDefaultSize()
+
+ if err := LogicSigSanityCheckBatchVerify(txn, groupIndex, groupCtx, batchVerifier); err != nil {
+ return err
+ }
+
+ // in case of contract account the signature len might 0. that's ok
+ if batchVerifier.GetNumberOfEnqueuedSignatures() == 0 {
+ return nil
+ }
+
+ if err := batchVerifier.Verify(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// LogicSigSanityCheckBatchVerify checks that the signature is valid and that the program is basically well formed.
+// It does not evaluate the logic.
+// it is the caller responsibility to call batchVerifier.verify()
+func LogicSigSanityCheckBatchVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext, batchVerifier *crypto.BatchVerifier) error {
lsig := txn.Lsig
if groupCtx.consensusParams.LogicSigVersion == 0 {
@@ -260,20 +322,19 @@ func LogicSigSanityCheck(txn *transactions.SignedTxn, groupIndex int, groupCtx *
if !hasMsig {
program := logic.Program(lsig.Logic)
- if !crypto.SignatureVerifier(txn.Authorizer()).Verify(&program, lsig.Sig) {
- return errors.New("logic signature validation failed")
- }
+ batchVerifier.EnqueueSignature(crypto.PublicKey(txn.Authorizer()), &program, lsig.Sig)
} else {
program := logic.Program(lsig.Logic)
- if ok, _ := crypto.MultisigVerify(&program, crypto.Digest(txn.Authorizer()), lsig.Msig); !ok {
+ if ok, _ := crypto.MultisigBatchVerify(&program, crypto.Digest(txn.Authorizer()), lsig.Msig, batchVerifier); !ok {
return errors.New("logic multisig validation failed")
}
}
return nil
}
-// logicSig checks that the signature is valid, executing the program.
-func logicSig(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext) error {
+// logicSigBatchVerify checks that the signature is valid, executing the program.
+// it is the caller responsibility to call batchVerifier.verify()
+func logicSigBatchVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext, batchverifier *crypto.BatchVerifier) error {
err := LogicSigSanityCheck(txn, groupIndex, groupCtx)
if err != nil {
return err
@@ -336,15 +397,24 @@ func PaysetGroups(ctx context.Context, payset [][]transactions.SignedTxn, blkHea
if tasksCtx.Err() != nil {
return tasksCtx.Err()
}
+
txnGroups := arg.([][]transactions.SignedTxn)
groupCtxs := make([]*GroupContext, len(txnGroups))
+
+ batchVerifier := crypto.MakeBatchVerifier(len(payset))
for i, signTxnsGrp := range txnGroups {
- groupCtxs[i], grpErr = TxnGroup(signTxnsGrp, blkHeader, nil)
+ groupCtxs[i], grpErr = TxnGroupBatchVerify(signTxnsGrp, blkHeader, nil, batchVerifier)
// abort only if it's a non-cache error.
if grpErr != nil {
return grpErr
}
}
+ if batchVerifier.GetNumberOfEnqueuedSignatures() != 0 {
+ verifyErr := batchVerifier.Verify()
+ if verifyErr != nil {
+ return verifyErr
+ }
+ }
cache.AddPayset(txnGroups, groupCtxs)
return nil
}, nextWorkset, worksDoneCh)
diff --git a/data/txHandler_test.go b/data/txHandler_test.go
index 67374ad02..a53d83163 100644
--- a/data/txHandler_test.go
+++ b/data/txHandler_test.go
@@ -28,6 +28,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/pools"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
@@ -62,7 +63,7 @@ func BenchmarkTxHandlerProcessDecoded(b *testing.B) {
}
require.Equal(b, len(genesis), numUsers+1)
- genBal := MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
ledgerName := fmt.Sprintf("%s-mem-%d", b.Name(), b.N)
const inMem = true
cfg := config.GetDefaultLocal()
diff --git a/data/txntest/txn.go b/data/txntest/txn.go
new file mode 100644
index 000000000..98357f957
--- /dev/null
+++ b/data/txntest/txn.go
@@ -0,0 +1,183 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+// Copyright (C) 2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package txntest
+
+import (
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/compactcert"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// Txn exists purely to make it easier to write a
+// transaction.Transaction in Go source.
+type Txn struct {
+ Type protocol.TxType
+
+ Sender basics.Address
+ Fee basics.MicroAlgos
+ FirstValid basics.Round
+ LastValid basics.Round
+ Note []byte
+ GenesisID string
+ GenesisHash crypto.Digest
+ Group crypto.Digest
+ Lease [32]byte
+ RekeyTo basics.Address
+
+ VotePK crypto.OneTimeSignatureVerifier
+ SelectionPK crypto.VRFVerifier
+ VoteFirst basics.Round
+ VoteLast basics.Round
+ VoteKeyDilution uint64
+ Nonparticipation bool
+
+ Receiver basics.Address
+ Amount basics.MicroAlgos
+ CloseRemainderTo basics.Address
+
+ ConfigAsset basics.AssetIndex
+ AssetParams basics.AssetParams
+
+ XferAsset basics.AssetIndex
+ AssetAmount uint64
+ AssetSender basics.Address
+ AssetReceiver basics.Address
+ AssetCloseTo basics.Address
+
+ FreezeAccount basics.Address
+ FreezeAsset basics.AssetIndex
+ AssetFrozen bool
+
+ ApplicationID basics.AppIndex
+ OnCompletion transactions.OnCompletion
+ ApplicationArgs [][]byte
+ Accounts []basics.Address
+ ForeignApps []basics.AppIndex
+ ForeignAssets []basics.AssetIndex
+ LocalStateSchema basics.StateSchema
+ GlobalStateSchema basics.StateSchema
+ ApprovalProgram []byte
+ ClearStateProgram []byte
+ ExtraProgramPages uint32
+
+ CertRound basics.Round
+ CertType protocol.CompactCertType
+ Cert compactcert.Cert
+}
+
+// FillDefaults populates some obvious defaults from config params,
+// unless they have already been set.
+func (tx *Txn) FillDefaults(params config.ConsensusParams) {
+ if tx.Fee.IsZero() {
+ tx.Fee = basics.MicroAlgos{Raw: params.MinTxnFee}
+ }
+ if tx.LastValid == 0 {
+ tx.LastValid = tx.FirstValid + basics.Round(params.MaxTxnLife)
+ }
+}
+
+// Txn produces a transactions.Transaction from the fields in this Txn
+func (tx Txn) Txn() transactions.Transaction {
+ return transactions.Transaction{
+ Type: tx.Type,
+ Header: transactions.Header{
+ Sender: tx.Sender,
+ Fee: tx.Fee,
+ FirstValid: tx.FirstValid,
+ LastValid: tx.LastValid,
+ Note: tx.Note,
+ GenesisID: tx.GenesisID,
+ GenesisHash: tx.GenesisHash,
+ Group: tx.Group,
+ Lease: tx.Lease,
+ RekeyTo: tx.RekeyTo,
+ },
+ KeyregTxnFields: transactions.KeyregTxnFields{
+ VotePK: tx.VotePK,
+ SelectionPK: tx.SelectionPK,
+ VoteFirst: tx.VoteFirst,
+ VoteLast: tx.VoteLast,
+ VoteKeyDilution: tx.VoteKeyDilution,
+ Nonparticipation: tx.Nonparticipation,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: tx.Receiver,
+ Amount: tx.Amount,
+ CloseRemainderTo: tx.CloseRemainderTo,
+ },
+ AssetConfigTxnFields: transactions.AssetConfigTxnFields{
+ ConfigAsset: tx.ConfigAsset,
+ AssetParams: tx.AssetParams,
+ },
+ AssetTransferTxnFields: transactions.AssetTransferTxnFields{
+ XferAsset: tx.XferAsset,
+ AssetAmount: tx.AssetAmount,
+ AssetSender: tx.AssetSender,
+ AssetReceiver: tx.AssetReceiver,
+ AssetCloseTo: tx.AssetCloseTo,
+ },
+ AssetFreezeTxnFields: transactions.AssetFreezeTxnFields{
+ FreezeAccount: tx.FreezeAccount,
+ FreezeAsset: tx.FreezeAsset,
+ AssetFrozen: tx.AssetFrozen,
+ },
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: tx.ApplicationID,
+ OnCompletion: tx.OnCompletion,
+ ApplicationArgs: tx.ApplicationArgs,
+ Accounts: tx.Accounts,
+ ForeignApps: tx.ForeignApps,
+ ForeignAssets: tx.ForeignAssets,
+ LocalStateSchema: tx.LocalStateSchema,
+ GlobalStateSchema: tx.GlobalStateSchema,
+ ApprovalProgram: tx.ApprovalProgram,
+ ClearStateProgram: tx.ClearStateProgram,
+ ExtraProgramPages: tx.ExtraProgramPages,
+ },
+ CompactCertTxnFields: transactions.CompactCertTxnFields{
+ CertRound: tx.CertRound,
+ CertType: tx.CertType,
+ Cert: tx.Cert,
+ },
+ }
+}
+
+// SignedTxn produces a unsigned, transactions.SignedTransaction from
+// the fields in this Txn. This seemingly pointless operation exists,
+// again, for convenience when driving tests.
+func (tx Txn) SignedTxn() transactions.SignedTxn {
+ return transactions.SignedTxn{Txn: tx.Txn()}
+}
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index fd72cc75b..fe2719620 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -2033,13 +2033,13 @@ func (au *accountUpdates) roundOffset(rnd basics.Round) (offset uint64, err erro
return off, nil
}
-// commitSyncer is the syncer go-routine function which perform the database updates. Internally, it dequeues deferedCommits and
+// commitSyncer is the syncer go-routine function which perform the database updates. Internally, it dequeues deferredCommits and
// send the tasks to commitRound for completing the operation.
-func (au *accountUpdates) commitSyncer(deferedCommits chan deferredCommit) {
+func (au *accountUpdates) commitSyncer(deferredCommits chan deferredCommit) {
defer close(au.commitSyncerClosed)
for {
select {
- case committedOffset, ok := <-deferedCommits:
+ case committedOffset, ok := <-deferredCommits:
if !ok {
return
}
@@ -2049,7 +2049,7 @@ func (au *accountUpdates) commitSyncer(deferedCommits chan deferredCommit) {
drained := false
for !drained {
select {
- case <-deferedCommits:
+ case <-deferredCommits:
au.accountsWriting.Done()
default:
drained = true
diff --git a/ledger/appcow.go b/ledger/appcow.go
index 4b41c92a5..5cf3b568c 100644
--- a/ledger/appcow.go
+++ b/ledger/appcow.go
@@ -247,6 +247,12 @@ func (cb *roundCowState) AllocateApp(addr basics.Address, aidx basics.AppIndex,
Creator: addr,
Created: true,
}
+ } else {
+ aa := ledgercore.AccountApp{
+ Address: addr,
+ App: aidx,
+ }
+ cb.mods.ModifiedAppLocalStates[aa] = true
}
cb.trackCreatable(basics.CreatableIndex(aidx))
@@ -282,6 +288,12 @@ func (cb *roundCowState) DeallocateApp(addr basics.Address, aidx basics.AppIndex
Creator: addr,
Created: false,
}
+ } else {
+ aa := ledgercore.AccountApp{
+ Address: addr,
+ App: aidx,
+ }
+ cb.mods.ModifiedAppLocalStates[aa] = false
}
return nil
@@ -444,6 +456,12 @@ func (cb *roundCowState) DelKey(addr basics.Address, aidx basics.AppIndex, globa
return nil // note: deletion cannot cause us to violate maxCount
}
+// AppendLog adds message in logs. idx is expected to be an index in txn.ForeignApps
+func (cb *roundCowState) AppendLog(idx uint64, value string) error {
+ cb.logs = append(cb.logs, basics.LogItem{ID: idx, Message: value})
+ return nil
+}
+
// MakeDebugBalances creates a ledger suitable for dryrun and debugger
func MakeDebugBalances(l ledgerForCowBase, round basics.Round, proto protocol.ConsensusVersion, prevTimestamp int64) apply.Balances {
base := &roundCowBase{
@@ -490,8 +508,9 @@ func (cb *roundCowState) StatefulEval(params logic.EvalParams, aidx basics.AppIn
return pass, evalDelta, nil
}
-// BuildEvalDelta converts internal sdeltas into basics.EvalDelta
+// BuildEvalDelta converts internal sdeltas and logs into basics.EvalDelta
func (cb *roundCowState) BuildEvalDelta(aidx basics.AppIndex, txn *transactions.Transaction) (evalDelta basics.EvalDelta, err error) {
+ // sdeltas
foundGlobal := false
for addr, smod := range cb.sdeltas {
for aapp, sdelta := range smod {
@@ -538,6 +557,10 @@ func (cb *roundCowState) BuildEvalDelta(aidx basics.AppIndex, txn *transactions.
}
}
}
+
+ // logs
+ evalDelta.Logs = cb.logs
+
return
}
diff --git a/ledger/appcow_test.go b/ledger/appcow_test.go
index 7eb467dcc..92c226d5a 100644
--- a/ledger/appcow_test.go
+++ b/ledger/appcow_test.go
@@ -599,6 +599,36 @@ func TestCowBuildDelta(t *testing.T) {
},
ed,
)
+
+ // check logDelta is added
+ cow.logs = []basics.LogItem{{ID: 0, Message: "hello,world"}}
+ cow.sdeltas[sender][storagePtr{aidx, false}] = &storageDelta{
+ action: remainAllocAction,
+ kvCow: stateDelta{
+ "key1": valueDelta{
+ old: basics.TealValue{Type: basics.TealUintType, Uint: 1},
+ new: basics.TealValue{Type: basics.TealUintType, Uint: 2},
+ oldExists: true,
+ newExists: true,
+ },
+ },
+ accountIdx: 1,
+ }
+ ed, err = cow.BuildEvalDelta(aidx, &txn)
+ a.NoError(err)
+ a.Equal(
+ basics.EvalDelta{
+ GlobalDelta: basics.StateDelta(nil),
+ LocalDeltas: map[uint64]basics.StateDelta{
+ 0: {
+ "key1": basics.ValueDelta{Action: basics.SetUintAction, Uint: 2},
+ },
+ },
+ Logs: []basics.LogItem{{ID: 0, Message: "hello,world"}},
+ },
+ ed,
+ )
+
}
func TestCowDeltaSerialize(t *testing.T) {
@@ -1328,3 +1358,19 @@ func TestCowDelKey(t *testing.T) {
a.Panics(func() { c.DelKey(getRandomAddress(a), aidx, false, key, 0) })
a.Panics(func() { c.DelKey(addr, aidx+1, false, key, 0) })
}
+func TestCowAppendLog(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ addr := getRandomAddress(a)
+ aidx := basics.AppIndex(0)
+ c := getCow([]modsData{
+ {addr, basics.CreatableIndex(aidx), basics.AppCreatable},
+ })
+
+ c.logs = []basics.LogItem{}
+ err := c.AppendLog(uint64(aidx), "val")
+ a.NoError(err)
+ a.Equal(len(c.logs), 1)
+}
diff --git a/ledger/applications.go b/ledger/applications.go
index e03604a59..241b9c87b 100644
--- a/ledger/applications.go
+++ b/ledger/applications.go
@@ -40,6 +40,8 @@ type cowForLogicLedger interface {
SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error
DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error
+ AppendLog(idx uint64, value string) error
+
round() basics.Round
prevTimestamp() int64
allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error)
@@ -235,3 +237,14 @@ func (al *logicLedger) DelGlobal(key string) error {
func (al *logicLedger) GetDelta(txn *transactions.Transaction) (evalDelta basics.EvalDelta, err error) {
return al.cow.BuildEvalDelta(al.aidx, txn)
}
+
+func (al *logicLedger) AppendLog(txn *transactions.Transaction, value string) error {
+ idx, err := txn.IndexByAppID(txn.ApplicationID)
+ if idx != 0 {
+ return fmt.Errorf("index offset is not 0. logging is allowed for current app only")
+ }
+ if err != nil {
+ return err
+ }
+ return al.cow.AppendLog(idx, value)
+}
diff --git a/ledger/applications_test.go b/ledger/applications_test.go
index 14bdec0c7..f64eb5881 100644
--- a/ledger/applications_test.go
+++ b/ledger/applications_test.go
@@ -61,6 +61,7 @@ type mockCowForLogicLedger struct {
brs map[basics.Address]basics.AccountData
stores map[storeLocator]basics.TealKeyValue
tcs map[int]basics.CreatableIndex
+ logs []basics.LogItem
}
func (c *mockCowForLogicLedger) Get(addr basics.Address, withPendingRewards bool) (basics.AccountData, error) {
@@ -126,6 +127,11 @@ func (c *mockCowForLogicLedger) allocated(addr basics.Address, aidx basics.AppIn
return found, nil
}
+func (c *mockCowForLogicLedger) AppendLog(aidx uint64, value string) error {
+ c.logs = append(c.logs, basics.LogItem{ID: aidx, Message: value})
+ return nil
+}
+
func newCowMock(creatables []modsData) *mockCowForLogicLedger {
var m mockCowForLogicLedger
m.cr = make(map[creatableLocator]basics.Address, len(creatables))
@@ -450,6 +456,14 @@ return`
l, err := OpenLedger(logging.Base(), "TestAppAccountData", true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
+ l.accts.ctxCancel() // force commitSyncer to exit
+
+ // wait commitSyncer to exit
+ // the test calls commitRound directly and does not need commitSyncer/committedUpTo
+ select {
+ case <-l.accts.commitSyncerClosed:
+ break
+ }
txHeader := transactions.Header{
Sender: creator,
@@ -506,7 +520,7 @@ return`
// save data into DB and write into local state
l.accts.accountsWriting.Add(1)
l.accts.commitRound(3, 0, 0)
- l.reloadLedger()
+ l.accts.accountsWriting.Wait()
appCallFields = transactions.ApplicationCallTxnFields{
OnCompletion: 0,
@@ -527,7 +541,7 @@ return`
// save data into DB
l.accts.accountsWriting.Add(1)
l.accts.commitRound(1, 3, 0)
- l.reloadLedger()
+ l.accts.accountsWriting.Wait()
// dump accounts
var rowid int64
@@ -660,6 +674,14 @@ return`
l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
+ l.accts.ctxCancel() // force commitSyncer to exit
+
+ // wait commitSyncer to exit
+ // the test calls commitRound directly and does not need commitSyncer/committedUpTo
+ select {
+ case <-l.accts.commitSyncerClosed:
+ break
+ }
genesisID := t.Name()
txHeader := transactions.Header{
@@ -727,7 +749,7 @@ return`
// save data into DB and write into local state
l.accts.accountsWriting.Add(1)
l.accts.commitRound(3, 0, 0)
- l.reloadLedger()
+ l.accts.accountsWriting.Wait()
// check first write
blk, err := l.Block(2)
@@ -783,7 +805,7 @@ return`
// save data into DB
l.accts.accountsWriting.Add(1)
l.accts.commitRound(2, 3, 0)
- l.reloadLedger()
+ l.accts.accountsWriting.Wait()
// check first write
blk, err = l.Block(4)
@@ -907,6 +929,14 @@ return`
l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
+ l.accts.ctxCancel() // force commitSyncer to exit
+
+ // wait commitSyncer to exit
+ // the test calls commitRound directly and does not need commitSyncer/committedUpTo
+ select {
+ case <-l.accts.commitSyncerClosed:
+ break
+ }
genesisID := t.Name()
txHeader := transactions.Header{
@@ -1002,7 +1032,7 @@ return`
// save data into DB and write into local state
l.accts.accountsWriting.Add(1)
l.accts.commitRound(3, 0, 0)
- l.reloadLedger()
+ l.accts.accountsWriting.Wait()
// check first write
blk, err = l.Block(2)
@@ -1058,6 +1088,14 @@ return`
l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
+ l.accts.ctxCancel() // force commitSyncer to exit
+
+ // wait commitSyncer to exit
+ // the test calls commitRound directly and does not need commitSyncer/committedUpTo
+ select {
+ case <-l.accts.commitSyncerClosed:
+ break
+ }
genesisID := t.Name()
txHeader := transactions.Header{
@@ -1134,7 +1172,7 @@ return`
// save data into DB and write into local state
l.accts.accountsWriting.Add(1)
l.accts.commitRound(2, 0, 0)
- l.reloadLedger()
+ l.accts.accountsWriting.Wait()
// check first write
blk, err = l.Block(1)
@@ -1251,6 +1289,14 @@ func testAppAccountDeltaIndicesCompatibility(t *testing.T, source string, accoun
l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
+ l.accts.ctxCancel() // force commitSyncer to exit
+
+ // wait commitSyncer to exit
+ // the test calls commitRound directly and does not need commitSyncer/committedUpTo
+ select {
+ case <-l.accts.commitSyncerClosed:
+ break
+ }
genesisID := t.Name()
txHeader := transactions.Header{
@@ -1313,7 +1359,7 @@ func testAppAccountDeltaIndicesCompatibility(t *testing.T, source string, accoun
// save data into DB and write into local state
l.accts.accountsWriting.Add(1)
l.accts.commitRound(2, 0, 0)
- l.reloadLedger()
+ l.accts.accountsWriting.Wait()
// check first write
blk, err := l.Block(2)
@@ -1324,3 +1370,33 @@ func testAppAccountDeltaIndicesCompatibility(t *testing.T, source string, accoun
a.Contains(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas[accountIdx], "lk1")
a.Equal(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas[accountIdx]["lk1"].Bytes, "local1")
}
+
+func TestLogicLedgerAppendLog(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(t)
+
+ addr := getRandomAddress(a)
+ aidx := basics.AppIndex(1)
+ c := newCowMock([]modsData{
+ {addr, basics.CreatableIndex(1), basics.AppCreatable},
+ })
+ l, err := newLogicLedger(c, aidx)
+ a.NoError(err)
+ a.NotNil(l)
+
+ appCallFields := transactions.ApplicationCallTxnFields{
+ OnCompletion: transactions.NoOpOC,
+ ApplicationID: 0,
+ Accounts: []basics.Address{},
+ }
+ appCall := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ ApplicationCallTxnFields: appCallFields,
+ }
+
+ err = l.AppendLog(&appCall, "a")
+ a.NoError(err)
+ a.Equal(len(c.logs), 1)
+ a.Equal(c.logs[0].Message, "a")
+}
diff --git a/ledger/apply/application.go b/ledger/apply/application.go
index 99e4fc3d6..644c8e396 100644
--- a/ledger/apply/application.go
+++ b/ledger/apply/application.go
@@ -118,7 +118,7 @@ func createApplication(ac *transactions.ApplicationCallTxnFields, balances Balan
// Update the cached TotalExtraAppPages for this account, used
// when computing MinBalance
totalExtraPages := record.TotalExtraAppPages
- totalExtraPages += ac.ExtraProgramPages
+ totalExtraPages = basics.AddSaturate32(totalExtraPages, ac.ExtraProgramPages)
record.TotalExtraAppPages = totalExtraPages
// Write back to the creator's balance record
@@ -143,6 +143,8 @@ func deleteApplication(balances Balances, creator basics.Address, appIdx basics.
return err
}
+ record.AppParams = cloneAppParams(record.AppParams)
+
// Update the TotalAppSchema used for MinBalance calculation,
// since the creator no longer has to store the GlobalState
totalSchema := record.TotalAppSchema
@@ -150,18 +152,20 @@ func deleteApplication(balances Balances, creator basics.Address, appIdx basics.
totalSchema = totalSchema.SubSchema(globalSchema)
record.TotalAppSchema = totalSchema
- // Delete the AppParams
- record.AppParams = cloneAppParams(record.AppParams)
- delete(record.AppParams, appIdx)
-
// Delete app's extra program pages
totalExtraPages := record.TotalExtraAppPages
if totalExtraPages > 0 {
- extraPages := record.AppParams[appIdx].ExtraProgramPages
- totalExtraPages -= extraPages
+ proto := balances.ConsensusParams()
+ if proto.EnableExtraPagesOnAppUpdate {
+ extraPages := record.AppParams[appIdx].ExtraProgramPages
+ totalExtraPages = basics.SubSaturate32(totalExtraPages, extraPages)
+ }
record.TotalExtraAppPages = totalExtraPages
}
+ // Delete the AppParams
+ delete(record.AppParams, appIdx)
+
err = balances.Put(creator, record)
if err != nil {
return err
@@ -189,10 +193,17 @@ func updateApplication(ac *transactions.ApplicationCallTxnFields, balances Balan
proto := balances.ConsensusParams()
// when proto.EnableExtraPageOnAppUpdate is false, WellFormed rejects all updates with a multiple-page program
if proto.EnableExtraPagesOnAppUpdate {
- allowed := int(1+params.ExtraProgramPages) * proto.MaxAppProgramLen
- actual := len(ac.ApprovalProgram) + len(ac.ClearStateProgram)
- if actual > allowed {
- return fmt.Errorf("updateApplication app programs too long, %d. max total len %d bytes", actual, allowed)
+ lap := len(ac.ApprovalProgram)
+ lcs := len(ac.ClearStateProgram)
+ pages := int(1 + params.ExtraProgramPages)
+ if lap > pages*proto.MaxAppProgramLen {
+ return fmt.Errorf("updateApplication approval program too long. max len %d bytes", pages*proto.MaxAppProgramLen)
+ }
+ if lcs > pages*proto.MaxAppProgramLen {
+ return fmt.Errorf("updateApplication clear state program too long. max len %d bytes", pages*proto.MaxAppProgramLen)
+ }
+ if lap+lcs > pages*proto.MaxAppTotalProgramLen {
+ return fmt.Errorf("updateApplication app programs too long, %d. max total len %d bytes", lap+lcs, pages*proto.MaxAppTotalProgramLen)
}
}
diff --git a/ledger/apply/application_test.go b/ledger/apply/application_test.go
index e47e82484..3251f0470 100644
--- a/ledger/apply/application_test.go
+++ b/ledger/apply/application_test.go
@@ -242,6 +242,7 @@ func (b *testBalancesPass) Put(addr basics.Address, ad basics.AccountData) error
func (b *testBalancesPass) ConsensusParams() config.ConsensusParams {
return b.proto
}
+
func (b *testBalancesPass) Allocate(addr basics.Address, aidx basics.AppIndex, global bool, space basics.StateSchema) error {
b.allocatedAppIdx = aidx
return nil
@@ -268,6 +269,10 @@ func (b *testBalances) SetProto(name protocol.ConsensusVersion) {
b.proto = config.Consensus[name]
}
+func (b *testBalances) SetParams(params config.ConsensusParams) {
+ b.proto = params
+}
+
type testEvaluator struct {
pass bool
delta basics.EvalDelta
@@ -835,6 +840,15 @@ func TestAppCallClearState(t *testing.T) {
a.Equal(0, len(br.AppLocalStates))
a.Equal(basics.StateSchema{}, br.TotalAppSchema)
a.Equal(basics.EvalDelta{GlobalDelta: gd}, ad.EvalDelta)
+
+ b.ResetWrites()
+ b.pass = true
+ b.err = nil
+ logs := []basics.LogItem{{ID: 0, Message: "a"}}
+ b.delta = basics.EvalDelta{Logs: []basics.LogItem{{ID: 0, Message: "a"}}}
+ err = ApplicationCall(ac, h, &b, ad, &ep, txnCounter)
+ a.NoError(err)
+ a.Equal(basics.EvalDelta{Logs: logs}, ad.EvalDelta)
}
func TestAppCallApplyCloseOut(t *testing.T) {
@@ -917,6 +931,11 @@ func TestAppCallApplyCloseOut(t *testing.T) {
a.Equal(basics.EvalDelta{GlobalDelta: gd}, ad.EvalDelta)
a.Equal(basics.StateSchema{NumUint: 0}, br.TotalAppSchema)
+ logs := []basics.LogItem{{ID: 0, Message: "a"}}
+ b.delta = basics.EvalDelta{Logs: []basics.LogItem{{ID: 0, Message: "a"}}}
+ err = ApplicationCall(ac, h, &b, ad, &ep, txnCounter)
+ a.NoError(err)
+ a.Equal(basics.EvalDelta{Logs: logs}, ad.EvalDelta)
}
func TestAppCallApplyUpdate(t *testing.T) {
@@ -984,19 +1003,12 @@ func TestAppCallApplyUpdate(t *testing.T) {
a.Equal([]byte{2}, br.AppParams[appIdx].ClearStateProgram)
a.Equal(basics.EvalDelta{}, ad.EvalDelta)
- // check app program len
- appr := make([]byte, 6050)
+ //check program len check happens in future consensus proto version
+ b.SetProto(protocol.ConsensusFuture)
+ proto = b.ConsensusParams()
+ ep.Proto = &proto
- for i := range appr {
- appr[i] = 2
- }
- appr[0] = 4
- ac = transactions.ApplicationCallTxnFields{
- ApplicationID: appIdx,
- OnCompletion: transactions.UpdateApplicationOC,
- ApprovalProgram: appr,
- ClearStateProgram: []byte{2},
- }
+ // check app program len
params = basics.AppParams{
ApprovalProgram: []byte{1},
StateSchemas: basics.StateSchemas{
@@ -1018,21 +1030,40 @@ func TestAppCallApplyUpdate(t *testing.T) {
b.balances[creator] = cp
b.appCreators = map[basics.AppIndex]basics.Address{appIdx: creator}
- //check program len check happens in future consensus proto version
- b.SetProto(protocol.ConsensusFuture)
- proto = b.ConsensusParams()
- ep.Proto = &proto
-
- b.pass = true
+ logs := []basics.LogItem{{ID: 0, Message: "a"}}
+ b.delta = basics.EvalDelta{Logs: []basics.LogItem{{ID: 0, Message: "a"}}}
err = ApplicationCall(ac, h, &b, ad, &ep, txnCounter)
- a.Contains(err.Error(), "updateApplication app programs too long")
+ a.NoError(err)
+ a.Equal(basics.EvalDelta{Logs: logs}, ad.EvalDelta)
// check extraProgramPages is used
- appr = make([]byte, 3072)
+ appr := make([]byte, 2*proto.MaxAppProgramLen+1)
+ appr[0] = 4 // version 4
+
+ var tests = []struct {
+ name string
+ approval []byte
+ clear []byte
+ }{
+ {"approval", appr, []byte{2}},
+ {"clear state", []byte{2}, appr},
+ }
+ for _, test := range tests {
+ ac = transactions.ApplicationCallTxnFields{
+ ApplicationID: appIdx,
+ OnCompletion: transactions.UpdateApplicationOC,
+ ApprovalProgram: test.approval,
+ ClearStateProgram: test.clear,
+ }
- for i := range appr {
- appr[i] = 2
+ b.pass = true
+ err = ApplicationCall(ac, h, &b, ad, &ep, txnCounter)
+ a.Error(err)
+ a.Contains(err.Error(), fmt.Sprintf("updateApplication %s program too long", test.name))
}
+
+ // check extraProgramPages allows length of proto.MaxAppProgramLen + 1
+ appr = make([]byte, proto.MaxAppProgramLen+1)
appr[0] = 4
ac = transactions.ApplicationCallTxnFields{
ApplicationID: appIdx,
@@ -1044,6 +1075,18 @@ func TestAppCallApplyUpdate(t *testing.T) {
err = ApplicationCall(ac, h, &b, ad, &ep, txnCounter)
a.NoError(err)
+ // check extraProgramPages is used and long sum rejected
+ ac = transactions.ApplicationCallTxnFields{
+ ApplicationID: appIdx,
+ OnCompletion: transactions.UpdateApplicationOC,
+ ApprovalProgram: appr,
+ ClearStateProgram: appr,
+ }
+ b.pass = true
+ err = ApplicationCall(ac, h, &b, ad, &ep, txnCounter)
+ a.Error(err)
+ a.Contains(err.Error(), "updateApplication app programs too long")
+
}
func TestAppCallApplyDelete(t *testing.T) {
@@ -1065,6 +1108,7 @@ func TestAppCallApplyDelete(t *testing.T) {
StateSchemas: basics.StateSchemas{
GlobalStateSchema: basics.StateSchema{NumUint: 1},
},
+ ExtraProgramPages: 1,
}
h := transactions.Header{
Sender: sender,
@@ -1074,15 +1118,19 @@ func TestAppCallApplyDelete(t *testing.T) {
var b testBalances
b.balances = make(map[basics.Address]basics.AccountData)
+ // cbr is to ensure the original balance record is not modified but copied when updated in apply
cbr := basics.AccountData{
- AppParams: map[basics.AppIndex]basics.AppParams{appIdx: params},
+ AppParams: map[basics.AppIndex]basics.AppParams{appIdx: params},
+ TotalExtraAppPages: 1,
}
cp := basics.AccountData{
- AppParams: map[basics.AppIndex]basics.AppParams{appIdx: params},
+ AppParams: map[basics.AppIndex]basics.AppParams{appIdx: params},
+ TotalExtraAppPages: 1,
}
b.balances[creator] = cp
b.appCreators = map[basics.AppIndex]basics.Address{appIdx: creator}
+ // check if it fails nothing changes
b.SetProto(protocol.ConsensusFuture)
proto := b.ConsensusParams()
ep.Proto = &proto
@@ -1096,7 +1144,11 @@ func TestAppCallApplyDelete(t *testing.T) {
a.Equal(cbr, br)
a.Equal(basics.EvalDelta{}, ad.EvalDelta)
- // check deletion on empty balance record - happy case
+ // check calculation on ConsensusV28. TotalExtraAppPages does not change
+ b.SetProto(protocol.ConsensusV28)
+ proto = b.ConsensusParams()
+ ep.Proto = &proto
+
b.pass = true
b.balances[sender] = basics.AccountData{}
err = ApplicationCall(ac, h, &b, ad, &ep, txnCounter)
@@ -1109,7 +1161,48 @@ func TestAppCallApplyDelete(t *testing.T) {
a.Equal(basics.AppParams{}, br.AppParams[appIdx])
a.Equal(basics.StateSchema{}, br.TotalAppSchema)
a.Equal(basics.EvalDelta{}, ad.EvalDelta)
- a.Equal(uint32(0), br.TotalExtraAppPages)
+ a.Equal(uint32(1), br.TotalExtraAppPages)
+ b.ResetWrites()
+
+ b.SetProto(protocol.ConsensusFuture)
+ proto = b.ConsensusParams()
+ ep.Proto = &proto
+
+ // check deletion
+ for initTotalExtraPages := uint32(0); initTotalExtraPages < 3; initTotalExtraPages++ {
+ cbr = basics.AccountData{
+ AppParams: map[basics.AppIndex]basics.AppParams{appIdx: params},
+ TotalExtraAppPages: initTotalExtraPages,
+ }
+ cp := basics.AccountData{
+ AppParams: map[basics.AppIndex]basics.AppParams{appIdx: params},
+ TotalExtraAppPages: initTotalExtraPages,
+ }
+ b.balances[creator] = cp
+ b.pass = true
+ b.balances[sender] = basics.AccountData{}
+ err = ApplicationCall(ac, h, &b, ad, &ep, txnCounter)
+ a.NoError(err)
+ a.Equal(appIdx, b.deAllocatedAppIdx)
+ a.Equal(1, b.put)
+ br = b.balances[creator]
+ a.Equal(cbr, br)
+ br = b.putBalances[creator]
+ a.Equal(basics.AppParams{}, br.AppParams[appIdx])
+ a.Equal(basics.StateSchema{}, br.TotalAppSchema)
+ a.Equal(basics.EvalDelta{}, ad.EvalDelta)
+ if initTotalExtraPages <= params.ExtraProgramPages {
+ a.Equal(uint32(0), br.TotalExtraAppPages)
+ } else {
+ a.Equal(initTotalExtraPages-1, br.TotalExtraAppPages)
+ }
+ b.ResetWrites()
+ }
+ logs := []basics.LogItem{{ID: 0, Message: "a"}}
+ b.delta = basics.EvalDelta{Logs: []basics.LogItem{{ID: 0, Message: "a"}}}
+ err = ApplicationCall(ac, h, &b, ad, &ep, txnCounter)
+ a.NoError(err)
+ a.Equal(basics.EvalDelta{Logs: logs}, ad.EvalDelta)
}
func TestAppCallApplyCreateClearState(t *testing.T) {
@@ -1204,4 +1297,11 @@ func TestAppCallApplyCreateDelete(t *testing.T) {
a.Equal(basics.EvalDelta{GlobalDelta: gd}, ad.EvalDelta)
br := b.balances[creator]
a.Equal(basics.AppParams{}, br.AppParams[appIdx])
+
+ logs := []basics.LogItem{{ID: 0, Message: "a"}}
+ b.delta = basics.EvalDelta{Logs: []basics.LogItem{{ID: 0, Message: "a"}}}
+ err = ApplicationCall(ac, h, &b, ad, &ep, txnCounter)
+ a.NoError(err)
+ a.Equal(basics.EvalDelta{Logs: logs}, ad.EvalDelta)
+
}
diff --git a/ledger/apply/asset.go b/ledger/apply/asset.go
index 4c74649b7..1fe3c6fac 100644
--- a/ledger/apply/asset.go
+++ b/ledger/apply/asset.go
@@ -101,7 +101,11 @@ func AssetConfig(cc transactions.AssetConfigTxnFields, header transactions.Heade
}
// Tell the cow what asset we created
- return balances.AllocateAsset(header.Sender, newidx, true)
+ err = balances.AllocateAsset(header.Sender, newidx, true)
+ if err != nil {
+ return err
+ }
+ return balances.AllocateAsset(header.Sender, newidx, false)
}
// Re-configuration and destroying must be done by the manager key.
@@ -134,6 +138,10 @@ func AssetConfig(cc transactions.AssetConfigTxnFields, header transactions.Heade
if err != nil {
return err
}
+ err = balances.DeallocateAsset(creator, cc.ConfigAsset, false)
+ if err != nil {
+ return err
+ }
delete(record.Assets, cc.ConfigAsset)
delete(record.AssetParams, cc.ConfigAsset)
@@ -269,6 +277,11 @@ func AssetTransfer(ct transactions.AssetTransferTxnFields, header transactions.H
if err != nil {
return err
}
+
+ err = balances.AllocateAsset(source, ct.XferAsset, false)
+ if err != nil {
+ return err
+ }
}
}
@@ -363,6 +376,11 @@ func AssetTransfer(ct transactions.AssetTransferTxnFields, header transactions.H
if err != nil {
return err
}
+
+ err = balances.DeallocateAsset(source, ct.XferAsset, false)
+ if err != nil {
+ return err
+ }
}
return nil
diff --git a/ledger/assetcow.go b/ledger/assetcow.go
index d84ae3221..ca35788dd 100644
--- a/ledger/assetcow.go
+++ b/ledger/assetcow.go
@@ -30,6 +30,12 @@ func (cs *roundCowState) AllocateAsset(addr basics.Address, index basics.AssetIn
}
cs.trackCreatable(basics.CreatableIndex(index))
+ } else {
+ aa := ledgercore.AccountAsset{
+ Address: addr,
+ Asset: index,
+ }
+ cs.mods.ModifiedAssetHoldings[aa] = true
}
return nil
@@ -42,6 +48,12 @@ func (cs *roundCowState) DeallocateAsset(addr basics.Address, index basics.Asset
Creator: addr,
Created: false,
}
+ } else {
+ aa := ledgercore.AccountAsset{
+ Address: addr,
+ Asset: index,
+ }
+ cs.mods.ModifiedAssetHoldings[aa] = false
}
return nil
diff --git a/ledger/cow.go b/ledger/cow.go
index 56ff9e33b..c5595169b 100644
--- a/ledger/cow.go
+++ b/ledger/cow.go
@@ -63,6 +63,9 @@ type roundCowState struct {
// must be incorporated into mods.accts before passing deltas forward
sdeltas map[basics.Address]map[storagePtr]*storageDelta
+ // logs populated in AppCall transaction
+ logs []basics.LogItem
+
// either or not maintain compatibility with original app refactoring behavior
// this is needed for generating old eval delta in new code
compatibilityMode bool
@@ -83,6 +86,7 @@ func makeRoundCowState(b roundCowParent, hdr bookkeeping.BlockHeader, prevTimest
mods: ledgercore.MakeStateDelta(&hdr, prevTimestamp, hint, 0),
sdeltas: make(map[basics.Address]map[storagePtr]*storageDelta),
trackedCreatables: make(map[int]basics.CreatableIndex),
+ logs: make([]basics.LogItem, 0),
}
// compatibilityMode retains producing application' eval deltas under the following rule:
@@ -263,6 +267,12 @@ func (cb *roundCowState) commitToParent() {
}
}
cb.commitParent.mods.CompactCertNext = cb.mods.CompactCertNext
+ for index, created := range cb.mods.ModifiedAssetHoldings {
+ cb.commitParent.mods.ModifiedAssetHoldings[index] = created
+ }
+ for index, created := range cb.mods.ModifiedAppLocalStates {
+ cb.commitParent.mods.ModifiedAppLocalStates[index] = created
+ }
}
func (cb *roundCowState) modifiedAccounts() []basics.Address {
diff --git a/ledger/eval.go b/ledger/eval.go
index 54cdeedd7..fbd287c5c 100644
--- a/ledger/eval.go
+++ b/ledger/eval.go
@@ -672,12 +672,18 @@ func (eval *BlockEvaluator) prepareEvalParams(txgroup []transactions.SignedTxnWi
var groupNoAD []transactions.SignedTxn
var pastSideEffects []logic.EvalSideEffects
var minTealVersion uint64
+ pooledApplicationBudget := uint64(0)
res = make([]*logic.EvalParams, len(txgroup))
for i, txn := range txgroup {
// Ignore any non-ApplicationCall transactions
if txn.SignedTxn.Txn.Type != protocol.ApplicationCallTx {
continue
}
+ if eval.proto.EnableAppCostPooling {
+ pooledApplicationBudget += uint64(eval.proto.MaxAppProgramCost)
+ } else {
+ pooledApplicationBudget = uint64(eval.proto.MaxAppProgramCost)
+ }
// Initialize side effects and group without ApplyData lazily
if groupNoAD == nil {
@@ -690,12 +696,13 @@ func (eval *BlockEvaluator) prepareEvalParams(txgroup []transactions.SignedTxnWi
}
res[i] = &logic.EvalParams{
- Txn: &groupNoAD[i],
- Proto: &eval.proto,
- TxnGroup: groupNoAD,
- GroupIndex: i,
- PastSideEffects: pastSideEffects,
- MinTealVersion: &minTealVersion,
+ Txn: &groupNoAD[i],
+ Proto: &eval.proto,
+ TxnGroup: groupNoAD,
+ GroupIndex: i,
+ PastSideEffects: pastSideEffects,
+ MinTealVersion: &minTealVersion,
+ PooledApplicationBudget: &pooledApplicationBudget,
}
}
return
diff --git a/ledger/eval_test.go b/ledger/eval_test.go
index 9d3315a1b..0f2aea2b3 100644
--- a/ledger/eval_test.go
+++ b/ledger/eval_test.go
@@ -22,11 +22,14 @@ import (
"fmt"
"os"
"path/filepath"
+ "reflect"
"runtime/pprof"
+ "strings"
"testing"
"time"
"github.com/algorand/go-deadlock"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/agreement"
@@ -37,6 +40,8 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -339,9 +344,12 @@ func TestPrepareEvalParams(t *testing.T) {
TimeStamp: 1234,
Round: 2345,
},
- proto: config.ConsensusParams{
- Application: true,
- },
+ }
+
+ params := []config.ConsensusParams{
+ config.ConsensusParams{Application: true, MaxAppProgramCost: 700},
+ config.Consensus[protocol.ConsensusV29],
+ config.Consensus[protocol.ConsensusFuture],
}
// Create some sample transactions
@@ -383,48 +391,63 @@ func TestPrepareEvalParams(t *testing.T) {
// indicates if prepareAppEvaluators should return a non-nil
// appTealEvaluator for the txn at index i
expected []bool
+
+ numAppCalls int
+ // Used for checking transitive pointer equality in app calls
+ // If there are no app calls in the group, it is set to -1
+ firstAppCallIndex int
}
// Create some groups with these transactions
cases := []evalTestCase{
- {[]transactions.SignedTxnWithAD{payment}, []bool{false}},
- {[]transactions.SignedTxnWithAD{appcall1}, []bool{true}},
- {[]transactions.SignedTxnWithAD{payment, payment}, []bool{false, false}},
- {[]transactions.SignedTxnWithAD{appcall1, payment}, []bool{true, false}},
- {[]transactions.SignedTxnWithAD{payment, appcall1}, []bool{false, true}},
- {[]transactions.SignedTxnWithAD{appcall1, appcall2}, []bool{true, true}},
- {[]transactions.SignedTxnWithAD{appcall1, appcall2, appcall1}, []bool{true, true, true}},
- {[]transactions.SignedTxnWithAD{payment, appcall1, payment}, []bool{false, true, false}},
- {[]transactions.SignedTxnWithAD{appcall1, payment, appcall2}, []bool{true, false, true}},
- }
-
- for i, testCase := range cases {
- t.Run(fmt.Sprintf("i=%d", i), func(t *testing.T) {
- res := eval.prepareEvalParams(testCase.group)
- require.Equal(t, len(res), len(testCase.group))
-
- // Compute the expected transaction group without ApplyData for
- // the test case
- expGroupNoAD := make([]transactions.SignedTxn, len(testCase.group))
- for j := range testCase.group {
- expGroupNoAD[j] = testCase.group[j].SignedTxn
- }
+ {[]transactions.SignedTxnWithAD{payment}, []bool{false}, 0, -1},
+ {[]transactions.SignedTxnWithAD{appcall1}, []bool{true}, 1, 0},
+ {[]transactions.SignedTxnWithAD{payment, payment}, []bool{false, false}, 0, -1},
+ {[]transactions.SignedTxnWithAD{appcall1, payment}, []bool{true, false}, 1, 0},
+ {[]transactions.SignedTxnWithAD{payment, appcall1}, []bool{false, true}, 1, 1},
+ {[]transactions.SignedTxnWithAD{appcall1, appcall2}, []bool{true, true}, 2, 0},
+ {[]transactions.SignedTxnWithAD{appcall1, appcall2, appcall1}, []bool{true, true, true}, 3, 0},
+ {[]transactions.SignedTxnWithAD{payment, appcall1, payment}, []bool{false, true, false}, 1, 1},
+ {[]transactions.SignedTxnWithAD{appcall1, payment, appcall2}, []bool{true, false, true}, 2, 0},
+ }
- // Ensure non app calls have a nil evaluator, and that non-nil
- // evaluators point to the right transactions and values
- for j, present := range testCase.expected {
- if present {
- require.NotNil(t, res[j])
- require.NotNil(t, res[j].PastSideEffects)
- require.Equal(t, res[j].GroupIndex, j)
- require.Equal(t, res[j].TxnGroup, expGroupNoAD)
- require.Equal(t, *res[j].Proto, eval.proto)
- require.Equal(t, *res[j].Txn, testCase.group[j].SignedTxn)
- } else {
- require.Nil(t, res[j])
+ for i, param := range params {
+ for j, testCase := range cases {
+ t.Run(fmt.Sprintf("i=%d,j=%d", i, j), func(t *testing.T) {
+ eval.proto = param
+ res := eval.prepareEvalParams(testCase.group)
+ require.Equal(t, len(res), len(testCase.group))
+
+ // Compute the expected transaction group without ApplyData for
+ // the test case
+ expGroupNoAD := make([]transactions.SignedTxn, len(testCase.group))
+ for k := range testCase.group {
+ expGroupNoAD[k] = testCase.group[k].SignedTxn
}
- }
- })
+
+ // Ensure non app calls have a nil evaluator, and that non-nil
+ // evaluators point to the right transactions and values
+ for k, present := range testCase.expected {
+ if present {
+ require.NotNil(t, res[k])
+ require.NotNil(t, res[k].PastSideEffects)
+ require.Equal(t, res[k].GroupIndex, k)
+ require.Equal(t, res[k].TxnGroup, expGroupNoAD)
+ require.Equal(t, *res[k].Proto, eval.proto)
+ require.Equal(t, *res[k].Txn, testCase.group[k].SignedTxn)
+ require.Equal(t, res[k].MinTealVersion, res[testCase.firstAppCallIndex].MinTealVersion)
+ require.Equal(t, res[k].PooledApplicationBudget, res[testCase.firstAppCallIndex].PooledApplicationBudget)
+ if reflect.DeepEqual(param, config.Consensus[protocol.ConsensusV29]) {
+ require.Equal(t, *res[k].PooledApplicationBudget, uint64(eval.proto.MaxAppProgramCost))
+ } else if reflect.DeepEqual(param, config.Consensus[protocol.ConsensusFuture]) {
+ require.Equal(t, *res[k].PooledApplicationBudget, uint64(eval.proto.MaxAppProgramCost*testCase.numAppCalls))
+ }
+ } else {
+ require.Nil(t, res[k])
+ }
+ }
+ })
+ }
}
}
@@ -560,6 +583,140 @@ func TestEvalAppAllocStateWithTxnGroup(t *testing.T) {
require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addr[:])}, state["creator"])
}
+func testEvalAppPoolingGroup(t *testing.T, schema basics.StateSchema, approvalProgram string, consensusVersion protocol.ConsensusVersion) (*BlockEvaluator, error) {
+ genesisInitState, addrs, keys := genesis(10)
+
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0)
+ require.NoError(t, err)
+ eval.validate = true
+ eval.generate = false
+ eval.proto = config.Consensus[consensusVersion]
+
+ ops, err := logic.AssembleString(approvalProgram)
+ require.NoError(t, err, ops.Errors)
+ approval := ops.Program
+ ops, err = logic.AssembleString("#pragma version 4\nint 1")
+ require.NoError(t, err)
+ clear := ops.Program
+
+ genHash := genesisInitState.Block.BlockHeader.GenesisHash
+ header := transactions.Header{
+ Sender: addrs[0],
+ Fee: minFee,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round(),
+ GenesisHash: genHash,
+ }
+ appcall1 := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: header,
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ GlobalStateSchema: schema,
+ ApprovalProgram: approval,
+ ClearStateProgram: clear,
+ },
+ }
+
+ appcall2 := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: header,
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: basics.AppIndex(1),
+ },
+ }
+
+ appcall3 := appcall2
+ appcall3.Header.Sender = addrs[1]
+
+ var group transactions.TxGroup
+ group.TxGroupHashes = []crypto.Digest{crypto.HashObj(appcall1), crypto.HashObj(appcall2), crypto.HashObj(appcall3)}
+ appcall1.Group = crypto.HashObj(group)
+ appcall2.Group = crypto.HashObj(group)
+ appcall3.Group = crypto.HashObj(group)
+ stxn1 := appcall1.Sign(keys[0])
+ stxn2 := appcall2.Sign(keys[0])
+ stxn3 := appcall3.Sign(keys[1])
+
+ g := []transactions.SignedTxnWithAD{
+ {
+ SignedTxn: stxn1,
+ },
+ {
+ SignedTxn: stxn2,
+ },
+ {
+ SignedTxn: stxn3,
+ },
+ }
+ txgroup := []transactions.SignedTxn{stxn1, stxn2, stxn3}
+ err = eval.TestTransactionGroup(txgroup)
+ if err != nil {
+ return eval, err
+ }
+ err = eval.transactionGroup(g)
+ return eval, err
+}
+
+// TestEvalAppPooledBudgetWithTxnGroup ensures 3 app call txns can successfully pool
+// budgets in a group txn and return an error if the budget is exceeded
+func TestEvalAppPooledBudgetWithTxnGroup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ source := func(n int, m int) string {
+ return "#pragma version 4\nbyte 0x1337BEEF\n" + strings.Repeat("keccak256\n", n) +
+ strings.Repeat("substring 0 4\n", m) + "pop\nint 1\n"
+ }
+
+ params := []protocol.ConsensusVersion{
+ protocol.ConsensusV29,
+ protocol.ConsensusFuture,
+ }
+
+ cases := []struct {
+ prog string
+ isSuccessV29 bool
+ isSuccessVFuture bool
+ expectedErrorV29 string
+ expectedErrorVFuture string
+ }{
+ {source(5, 47), true, true,
+ "",
+ ""},
+ {source(5, 48), false, true,
+ "pc=157 dynamic cost budget exceeded, executing pushint: remaining budget is 700 but program cost was 701",
+ ""},
+ {source(16, 17), false, true,
+ "pc= 12 dynamic cost budget exceeded, executing keccak256: remaining budget is 700 but program cost was 781",
+ ""},
+ {source(16, 18), false, false,
+ "pc= 12 dynamic cost budget exceeded, executing keccak256: remaining budget is 700 but program cost was 781",
+ "pc= 78 dynamic cost budget exceeded, executing pushint: remaining budget is 2100 but program cost was 2101"},
+ }
+
+ for i, param := range params {
+ for j, testCase := range cases {
+ t.Run(fmt.Sprintf("i=%d,j=%d", i, j), func(t *testing.T) {
+ _, err := testEvalAppPoolingGroup(t, basics.StateSchema{NumByteSlice: 3}, testCase.prog, param)
+ if !testCase.isSuccessV29 && reflect.DeepEqual(param, protocol.ConsensusV29) {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), testCase.expectedErrorV29)
+ } else if !testCase.isSuccessVFuture && reflect.DeepEqual(param, protocol.ConsensusFuture) {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), testCase.expectedErrorVFuture)
+ }
+ })
+ }
+ }
+}
+
func BenchmarkBlockEvaluatorRAMCrypto(b *testing.B) {
benchmarkBlockEvaluator(b, true, true)
}
@@ -869,3 +1026,379 @@ func testnetFixupExecution(t *testing.T, headerRound basics.Round, poolBonus uin
require.Equal(t, nextPoolBalance, poolOld.MicroAlgos.Raw)
require.NoError(t, err)
}
+
+// Test that ModifiedAssetHoldings in StateDelta is set correctly.
+func TestModifiedAssetHoldings(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genBalances, addrs, _ := newTestGenesis()
+ l := newTestLedger(t, genBalances)
+ defer l.Close()
+
+ const assetid basics.AssetIndex = 1
+
+ createTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ Fee: basics.MicroAlgos{Raw: 2000},
+ AssetParams: basics.AssetParams{
+ Total: 3,
+ Decimals: 0,
+ Manager: addrs[0],
+ Reserve: addrs[0],
+ Freeze: addrs[0],
+ Clawback: addrs[0],
+ },
+ }
+
+ optInTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[1],
+ Fee: basics.MicroAlgos{Raw: 2000},
+ XferAsset: assetid,
+ AssetAmount: 0,
+ AssetReceiver: addrs[1],
+ }
+
+ eval := l.nextBlock(t)
+ eval.txns(t, &createTxn, &optInTxn)
+ vb := l.endBlock(t, eval)
+
+ {
+ aa := ledgercore.AccountAsset{
+ Address: addrs[0],
+ Asset: assetid,
+ }
+ created, ok := vb.delta.ModifiedAssetHoldings[aa]
+ require.True(t, ok)
+ assert.True(t, created)
+ }
+ {
+ aa := ledgercore.AccountAsset{
+ Address: addrs[1],
+ Asset: assetid,
+ }
+ created, ok := vb.delta.ModifiedAssetHoldings[aa]
+ require.True(t, ok)
+ assert.True(t, created)
+ }
+
+ optOutTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[1],
+ Fee: basics.MicroAlgos{Raw: 1000},
+ XferAsset: assetid,
+ AssetReceiver: addrs[0],
+ AssetCloseTo: addrs[0],
+ }
+
+ closeTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ Fee: basics.MicroAlgos{Raw: 1000},
+ ConfigAsset: assetid,
+ }
+
+ eval = l.nextBlock(t)
+ eval.txns(t, &optOutTxn, &closeTxn)
+ vb = l.endBlock(t, eval)
+
+ {
+ aa := ledgercore.AccountAsset{
+ Address: addrs[0],
+ Asset: assetid,
+ }
+ created, ok := vb.delta.ModifiedAssetHoldings[aa]
+ require.True(t, ok)
+ assert.False(t, created)
+ }
+ {
+ aa := ledgercore.AccountAsset{
+ Address: addrs[1],
+ Asset: assetid,
+ }
+ created, ok := vb.delta.ModifiedAssetHoldings[aa]
+ require.True(t, ok)
+ assert.False(t, created)
+ }
+}
+
+// newTestGenesis creates a bunch of accounts, splits up 10B algos
+// between them and the rewardspool and feesink, and gives out the
+// addresses and secrets it creates to enable tests. For special
+// scenarios, manipulate these return values before using newTestLedger.
+func newTestGenesis() (bookkeeping.GenesisBalances, []basics.Address, []*crypto.SignatureSecrets) {
+ // irrelevant, but deterministic
+ sink, err := basics.UnmarshalChecksumAddress("YTPRLJ2KK2JRFSZZNAF57F3K5Y2KCG36FZ5OSYLW776JJGAUW5JXJBBD7Q")
+ if err != nil {
+ panic(err)
+ }
+ rewards, err := basics.UnmarshalChecksumAddress("242H5OXHUEBYCGGWB3CQ6AZAMQB5TMCWJGHCGQOZPEIVQJKOO7NZXUXDQA")
+ if err != nil {
+ panic(err)
+ }
+
+ const count = 10
+ addrs := make([]basics.Address, count)
+ secrets := make([]*crypto.SignatureSecrets, count)
+ accts := make(map[basics.Address]basics.AccountData)
+
+ // 10 billion microalgos, across N accounts and pool and sink
+ amount := 10 * 1000000000 * 1000000 / uint64(count+2)
+
+ for i := 0; i < count; i++ {
+ // Create deterministic addresses, so that output stays the same, run to run.
+ var seed crypto.Seed
+ seed[0] = byte(i)
+ secrets[i] = crypto.GenerateSignatureSecrets(seed)
+ addrs[i] = basics.Address(secrets[i].SignatureVerifier)
+
+ adata := basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: amount},
+ }
+ accts[addrs[i]] = adata
+ }
+
+ accts[sink] = basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: amount},
+ Status: basics.NotParticipating,
+ }
+
+ accts[rewards] = basics.AccountData{MicroAlgos: basics.MicroAlgos{Raw: amount}}
+
+ genBalances := bookkeeping.MakeGenesisBalances(accts, sink, rewards)
+
+ return genBalances, addrs, secrets
+}
+
+// newTestLedger creates a in memory Ledger that is as realistic as
+// possible. It has Rewards and FeeSink properly configured.
+func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *Ledger {
+ var genHash crypto.Digest
+ crypto.RandBytes(genHash[:])
+ genBlock, err := bookkeeping.MakeGenesisBlock(protocol.ConsensusFuture,
+ balances, "test", genHash)
+
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ const inMem = true
+ l, err := OpenLedger(logging.Base(), dbName, inMem, InitState{
+ Block: genBlock,
+ Accounts: balances.Balances,
+ GenesisHash: genHash,
+ }, cfg)
+ require.NoError(t, err)
+ return l
+}
+
+// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
+func (ledger *Ledger) nextBlock(t testing.TB) *BlockEvaluator {
+ rnd := ledger.Latest()
+ hdr, err := ledger.BlockHdr(rnd)
+ require.NoError(t, err)
+ eval, err := startEvaluator(ledger, bookkeeping.MakeBlock(hdr).BlockHeader,
+ config.Consensus[hdr.CurrentProtocol], 0, false, true)
+ require.NoError(t, err)
+ return eval
+}
+
+// endBlock completes the block being created, returns the ValidatedBlock for inspection
+func (ledger *Ledger) endBlock(t testing.TB, eval *BlockEvaluator) *ValidatedBlock {
+ validatedBlock, err := eval.GenerateBlock()
+ require.NoError(t, err)
+ err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
+ require.NoError(t, err)
+ return validatedBlock
+}
+
+// lookup gets the current accountdaa for an address
+func (ledger *Ledger) lookup(t testing.TB, addr basics.Address) basics.AccountData {
+ rnd := ledger.Latest()
+ ad, err := ledger.Lookup(rnd, addr)
+ require.NoError(t, err)
+ return ad
+}
+
+func (eval *BlockEvaluator) txn(t testing.TB, txn *txntest.Txn) {
+ if txn.GenesisHash.IsZero() {
+ txn.GenesisHash = eval.genesisHash
+ }
+ if txn.FirstValid == 0 {
+ txn.FirstValid = eval.Round()
+ }
+ txn.FillDefaults(eval.proto)
+ stxn := txn.SignedTxn()
+ err := eval.testTransaction(stxn, eval.state.child(1))
+ require.NoError(t, err)
+ eval.Transaction(stxn, transactions.ApplyData{})
+ require.NoError(t, err)
+}
+
+func (eval *BlockEvaluator) txns(t testing.TB, txns ...*txntest.Txn) {
+ for _, txn := range txns {
+ eval.txn(t, txn)
+ }
+}
+
+func TestRewardsInAD(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genBalances, addrs, _ := newTestGenesis()
+ l := newTestLedger(t, genBalances)
+ defer l.Close()
+
+ payTxn := txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[1]}
+
+ // Build up Residue in RewardsState so it's ready to pay
+ for i := 1; i < 10; i++ {
+ eval := l.nextBlock(t)
+ l.endBlock(t, eval)
+ }
+
+ eval := l.nextBlock(t)
+ eval.txn(t, &payTxn)
+ payInBlock := eval.block.Payset[0]
+ require.Greater(t, payInBlock.ApplyData.SenderRewards.Raw, uint64(1000))
+ require.Greater(t, payInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
+ require.Equal(t, payInBlock.ApplyData.SenderRewards, payInBlock.ApplyData.ReceiverRewards)
+ l.endBlock(t, eval)
+}
+
+func TestMinBalanceChanges(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genBalances, addrs, _ := newTestGenesis()
+ l := newTestLedger(t, genBalances)
+ defer l.Close()
+
+ createTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 3,
+ Manager: addrs[1],
+ Reserve: addrs[2],
+ Freeze: addrs[3],
+ Clawback: addrs[4],
+ },
+ }
+
+ const expectedID basics.AssetIndex = 1
+ optInTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[5],
+ XferAsset: expectedID,
+ AssetReceiver: addrs[5],
+ }
+
+ ad0init := l.lookup(t, addrs[0])
+ ad5init := l.lookup(t, addrs[5])
+
+ eval := l.nextBlock(t)
+ eval.txns(t, &createTxn, &optInTxn)
+ l.endBlock(t, eval)
+
+ ad0new := l.lookup(t, addrs[0])
+ ad5new := l.lookup(t, addrs[5])
+
+ proto := config.Consensus[eval.block.BlockHeader.CurrentProtocol]
+ // Check balance and min balance requirement changes
+ require.Equal(t, ad0init.MicroAlgos.Raw, ad0new.MicroAlgos.Raw+1000) // fee
+ require.Equal(t, ad0init.MinBalance(&proto).Raw, ad0new.MinBalance(&proto).Raw-100000) // create
+ require.Equal(t, ad5init.MicroAlgos.Raw, ad5new.MicroAlgos.Raw+1000) // fee
+ require.Equal(t, ad5init.MinBalance(&proto).Raw, ad5new.MinBalance(&proto).Raw-100000) // optin
+
+ optOutTxn := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[5],
+ XferAsset: expectedID,
+ AssetReceiver: addrs[0],
+ AssetCloseTo: addrs[0],
+ }
+
+ closeTxn := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[1], // The manager, not the creator
+ ConfigAsset: expectedID,
+ }
+
+ eval = l.nextBlock(t)
+ eval.txns(t, &optOutTxn, &closeTxn)
+ l.endBlock(t, eval)
+
+ ad0final := l.lookup(t, addrs[0])
+ ad5final := l.lookup(t, addrs[5])
+ // Check we got our balance "back"
+ require.Equal(t, ad0final.MinBalance(&proto), ad0init.MinBalance(&proto))
+ require.Equal(t, ad5final.MinBalance(&proto), ad5init.MinBalance(&proto))
+}
+
+// Test that ModifiedAppLocalStates in StateDelta is set correctly.
+func TestModifiedAppLocalStates(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genBalances, addrs, _ := newTestGenesis()
+ l := newTestLedger(t, genBalances)
+ defer l.Close()
+
+ const appid basics.AppIndex = 1
+
+ createTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: []byte{0x02, 0x20, 0x01, 0x01, 0x22},
+ ClearStateProgram: []byte{0x02, 0x20, 0x01, 0x01, 0x22},
+ }
+
+ optInTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appid,
+ OnCompletion: transactions.OptInOC,
+ }
+
+ eval := l.nextBlock(t)
+ eval.txns(t, &createTxn, &optInTxn)
+ vb := l.endBlock(t, eval)
+
+ assert.Len(t, vb.delta.ModifiedAppLocalStates, 1)
+ {
+ aa := ledgercore.AccountApp{
+ Address: addrs[1],
+ App: appid,
+ }
+ created, ok := vb.delta.ModifiedAppLocalStates[aa]
+ require.True(t, ok)
+ assert.True(t, created)
+ }
+
+ optOutTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appid,
+ OnCompletion: transactions.CloseOutOC,
+ }
+
+ closeTxn := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appid,
+ OnCompletion: transactions.DeleteApplicationOC,
+ }
+
+ eval = l.nextBlock(t)
+ eval.txns(t, &optOutTxn, &closeTxn)
+ vb = l.endBlock(t, eval)
+
+ assert.Len(t, vb.delta.ModifiedAppLocalStates, 1)
+ {
+ aa := ledgercore.AccountApp{
+ Address: addrs[1],
+ App: appid,
+ }
+ created, ok := vb.delta.ModifiedAppLocalStates[aa]
+ require.True(t, ok)
+ assert.False(t, created)
+ }
+}
diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go
index 80d6ad7fe..1edb4e8c0 100644
--- a/ledger/ledgercore/statedelta.go
+++ b/ledger/ledgercore/statedelta.go
@@ -47,6 +47,18 @@ type ModifiedCreatable struct {
Ndeltas int
}
+// AccountAsset is used as a map key.
+type AccountAsset struct {
+ Address basics.Address
+ Asset basics.AssetIndex
+}
+
+// AccountApp is used as a map key.
+type AccountApp struct {
+ Address basics.Address
+ App basics.AppIndex
+}
+
// A Txlease is a transaction (sender, lease) pair which uniquely specifies a
// transaction lease.
type Txlease struct {
@@ -78,6 +90,11 @@ type StateDelta struct {
// previous block timestamp
PrevTimestamp int64
+ // Modified local creatable states. The value is true if the creatable local state
+ // is created and false if deleted. Used by indexer.
+ ModifiedAssetHoldings map[AccountAsset]bool
+ ModifiedAppLocalStates map[AccountApp]bool
+
// initial hint for allocating data structures for StateDelta
initialTransactionsCount int
}
@@ -105,9 +122,11 @@ func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int,
// asset or application creation are considered as rare events so do not pre-allocate space for them
Creatables: make(map[basics.CreatableIndex]ModifiedCreatable),
Hdr: hdr,
+ CompactCertNext: compactCertNext,
PrevTimestamp: prevTimestamp,
+ ModifiedAssetHoldings: make(map[AccountAsset]bool),
+ ModifiedAppLocalStates: make(map[AccountApp]bool),
initialTransactionsCount: hint,
- CompactCertNext: compactCertNext,
}
}
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index 87cfcbaa6..3193921dd 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -746,6 +746,16 @@ func (c *Client) PendingTransactionInformation(txid string) (resp v1.Transaction
return
}
+// PendingTransactionInformationV2 returns information about a recently issued
+// transaction based on its txid.
+func (c *Client) PendingTransactionInformationV2(txid string) (resp generatedV2.PendingTransactionResponse, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err == nil {
+ resp, err = algod.PendingTransactionInformationV2(txid)
+ }
+ return
+}
+
// Block takes a round and returns its block
func (c *Client) Block(round uint64) (resp v1.Block, err error) {
algod, err := c.ensureAlgodClient()
@@ -1025,7 +1035,7 @@ func MakeDryrunStateGenerated(client Client, txnOrStxn interface{}, other []tran
} else {
// otherwise need to fetch app state
var app generatedV2.Application
- if app, err = client.ApplicationInformation(uint64(tx.ApplicationID)); err != nil {
+ if app, err = client.ApplicationInformation(uint64(appIdx)); err != nil {
return
}
appParams = app.Params
diff --git a/node/assemble_test.go b/node/assemble_test.go
index 9a6274504..f094c8dd9 100644
--- a/node/assemble_test.go
+++ b/node/assemble_test.go
@@ -29,6 +29,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/pools"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
@@ -78,7 +79,7 @@ func BenchmarkAssembleBlock(b *testing.B) {
}
require.Equal(b, len(genesis), numUsers+1)
- genBal := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
ledgerName := fmt.Sprintf("%s-mem-%d", b.Name(), b.N)
const inMem = true
cfg := config.GetDefaultLocal()
@@ -208,7 +209,7 @@ func TestAssembleBlockTransactionPoolBehind(t *testing.T) {
}
require.Equal(t, len(genesis), numUsers+1)
- genBal := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
diff --git a/node/indexer/indexer_test.go b/node/indexer/indexer_test.go
index ce767c695..910ada11a 100644
--- a/node/indexer/indexer_test.go
+++ b/node/indexer/indexer_test.go
@@ -34,6 +34,10 @@ import (
"github.com/algorand/go-algorand/test/partitiontest"
)
+const testGenesisID string = "foo"
+
+var genesisHash = crypto.Digest{0x1, 0x2, 0x3}
+
type IndexSuite struct {
suite.Suite
idx *Indexer
@@ -62,14 +66,18 @@ func (s *IndexSuite) SetupSuite() {
var txnEnc []transactions.SignedTxnInBlock
b := bookkeeping.Block{
BlockHeader: bookkeeping.BlockHeader{
- Round: basics.Round(uint64(i + 2)),
- TimeStamp: time.Now().Unix(),
+ Round: basics.Round(uint64(i + 2)),
+ TimeStamp: time.Now().Unix(),
+ GenesisID: testGenesisID,
+ GenesisHash: genesisHash,
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: protocol.ConsensusFuture,
+ },
},
}
chunkSize := numOfTransactions / numOfBlocks
for t := i * chunkSize; t < (i+1)*chunkSize; t++ {
-
txid, err := b.EncodeSignedTxn(s.txns[t], transactions.ApplyData{})
require.NoError(s.T(), err)
txnEnc = append(txnEnc, txid)
@@ -230,10 +238,12 @@ func generateTestObjects(numTxs, numAccs int) ([]transactions.Transaction, []tra
txs[i] = transactions.Transaction{
Header: transactions.Header{
- Sender: addresses[s],
- Fee: basics.MicroAlgos{Raw: f},
- FirstValid: basics.Round(iss),
- LastValid: basics.Round(exp),
+ Sender: addresses[s],
+ Fee: basics.MicroAlgos{Raw: f},
+ FirstValid: basics.Round(iss),
+ LastValid: basics.Round(exp),
+ GenesisID: testGenesisID,
+ GenesisHash: genesisHash,
},
}
diff --git a/node/node.go b/node/node.go
index e91b2bc01..e0d2f437f 100644
--- a/node/node.go
+++ b/node/node.go
@@ -87,8 +87,6 @@ func (status StatusReport) TimeSinceLastRound() time.Duration {
// AlgorandFullNode specifies and implements a full Algorand node.
type AlgorandFullNode struct {
- nodeContextData
-
mu deadlock.Mutex
ctx context.Context
cancelCtx context.CancelFunc
@@ -193,7 +191,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
log.Errorf("Unable to create genesis directory: %v", err)
return nil, err
}
- var genalloc data.GenesisBalances
+ var genalloc bookkeeping.GenesisBalances
genalloc, err = bootstrapData(genesis, log)
if err != nil {
log.Errorf("Cannot load genesis allocation: %v", err)
@@ -304,20 +302,20 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
return node, err
}
-func bootstrapData(genesis bookkeeping.Genesis, log logging.Logger) (data.GenesisBalances, error) {
+func bootstrapData(genesis bookkeeping.Genesis, log logging.Logger) (bookkeeping.GenesisBalances, error) {
genalloc := make(map[basics.Address]basics.AccountData)
for _, entry := range genesis.Allocation {
addr, err := basics.UnmarshalChecksumAddress(entry.Address)
if err != nil {
log.Errorf("Cannot parse genesis addr %s: %v", entry.Address, err)
- return data.GenesisBalances{}, err
+ return bookkeeping.GenesisBalances{}, err
}
_, present := genalloc[addr]
if present {
err = fmt.Errorf("repeated allocation to %s", entry.Address)
log.Error(err)
- return data.GenesisBalances{}, err
+ return bookkeeping.GenesisBalances{}, err
}
genalloc[addr] = entry.State
@@ -326,16 +324,16 @@ func bootstrapData(genesis bookkeeping.Genesis, log logging.Logger) (data.Genesi
feeSink, err := basics.UnmarshalChecksumAddress(genesis.FeeSink)
if err != nil {
log.Errorf("Cannot parse fee sink addr %s: %v", genesis.FeeSink, err)
- return data.GenesisBalances{}, err
+ return bookkeeping.GenesisBalances{}, err
}
rewardsPool, err := basics.UnmarshalChecksumAddress(genesis.RewardsPool)
if err != nil {
log.Errorf("Cannot parse rewards pool addr %s: %v", genesis.RewardsPool, err)
- return data.GenesisBalances{}, err
+ return bookkeeping.GenesisBalances{}, err
}
- return data.MakeTimestampedGenesisBalances(genalloc, feeSink, rewardsPool, genesis.Timestamp), nil
+ return bookkeeping.MakeTimestampedGenesisBalances(genalloc, feeSink, rewardsPool, genesis.Timestamp), nil
}
// Config returns a copy of the node's Local configuration
diff --git a/node/nodeContext.go b/node/nodeContext.go
deleted file mode 100644
index 77bb6e660..000000000
--- a/node/nodeContext.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package node
-
-import (
- "github.com/algorand/go-deadlock"
-
- "github.com/algorand/go-algorand/data/basics"
-)
-
-type nodeContextData struct {
- muData deadlock.RWMutex
- lastRoundObserved basics.Round
-}
-
-// IsCatchingUp (implements NodeContext) returns true if our sync routine is currently running
-func (node *AlgorandFullNode) IsCatchingUp() bool {
- // Lock not required - catchupService doesn't change
- catchingUp, _ := node.catchupService.IsSynchronizing()
- return catchingUp
-}
-
-// IsInitialCatchupComplete (implements NodeContext) returns true if the initial sync has completed (doesn't mean it succeeded)
-func (node *AlgorandFullNode) IsInitialCatchupComplete() bool {
- // Lock not required - catchupService doesn't change
- _, initSyncComplete := node.catchupService.IsSynchronizing()
- return initSyncComplete
-}
-
-// HasCaughtUp (implements NodeContext) returns true if we have completely caught up at least once
-func (node *AlgorandFullNode) HasCaughtUp() bool {
- node.muData.RLock()
- defer node.muData.RUnlock()
-
- return node.lastRoundObserved != 0
-}
-
-// SetLastLiveRound is called to record observation of a round completion
-func (node *AlgorandFullNode) SetLastLiveRound(round basics.Round) {
- node.muData.Lock()
- defer node.muData.Unlock()
-
- node.lastRoundObserved = round
-}
diff --git a/node/node_test.go b/node/node_test.go
index 20a744ad6..411bb3360 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -144,7 +144,7 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP
genesis[short] = data
}
- bootstrap := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ bootstrap := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
for i, rootDirectory := range rootDirs {
genesisDir := filepath.Join(rootDirectory, g.ID())
diff --git a/node/topAccountListener_test.go b/node/topAccountListener_test.go
index bcacd1ae9..6faaa5f9d 100644
--- a/node/topAccountListener_test.go
+++ b/node/topAccountListener_test.go
@@ -20,6 +20,7 @@ import (
"fmt"
"testing"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -284,6 +285,9 @@ func TestInit(t *testing.T) {
func makeBlockWithTxnFor(senders []byte, receivers []byte) bookkeeping.Block {
var blk bookkeeping.Block
+ blk.BlockHeader.GenesisID = "foo"
+ crypto.RandBytes(blk.BlockHeader.GenesisHash[:])
+ blk.CurrentProtocol = protocol.ConsensusFuture
paysets := make([]transactions.SignedTxnInBlock, 0, len(receivers))
for i, b := range receivers {
@@ -291,7 +295,9 @@ func makeBlockWithTxnFor(senders []byte, receivers []byte) bookkeeping.Block {
Txn: transactions.Transaction{
Type: protocol.PaymentTx,
Header: transactions.Header{
- Sender: basics.Address{senders[i]},
+ Sender: basics.Address{senders[i]},
+ GenesisID: blk.BlockHeader.GenesisID,
+ GenesisHash: blk.BlockHeader.GenesisHash,
},
PaymentTxnFields: transactions.PaymentTxnFields{
Receiver: basics.Address{b},
diff --git a/protocol/consensus.go b/protocol/consensus.go
index 52c38c002..47e6148eb 100644
--- a/protocol/consensus.go
+++ b/protocol/consensus.go
@@ -153,6 +153,11 @@ const ConsensusV28 = ConsensusVersion(
"https://github.com/algorandfoundation/specs/tree/65b4ab3266c52c56a0fa7d591754887d68faad0a",
)
+// ConsensusV29 fixes application update by using ExtraProgramPages in size calculations
+const ConsensusV29 = ConsensusVersion(
+ "https://github.com/algorandfoundation/specs/tree/abc54f79f9ad679d2d22f0fb9909fb005c16f8a1",
+)
+
// ConsensusFuture is a protocol that should not appear in any production
// network, but is used to test features before they are released.
const ConsensusFuture = ConsensusVersion(
@@ -165,7 +170,7 @@ const ConsensusFuture = ConsensusVersion(
// ConsensusCurrentVersion is the latest version and should be used
// when a specific version is not provided.
-const ConsensusCurrentVersion = ConsensusV28
+const ConsensusCurrentVersion = ConsensusV29
// Error is used to indicate that an unsupported protocol has been detected.
type Error ConsensusVersion
diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go
index 3269f5c13..a48217b13 100644
--- a/rpcs/blockService_test.go
+++ b/rpcs/blockService_test.go
@@ -30,6 +30,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
@@ -284,7 +285,7 @@ func makeLedger(t *testing.T, namePostfix string) *data.Ledger {
}
log := logging.TestingLog(t)
- genBal := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
genHash := crypto.Digest{0x42}
cfg := config.GetDefaultLocal()
const inMem = true
diff --git a/scripts/buildtools/go.mod b/scripts/buildtools/go.mod
index 785b0819d..18710c58b 100644
--- a/scripts/buildtools/go.mod
+++ b/scripts/buildtools/go.mod
@@ -4,6 +4,7 @@ go 1.14
require (
github.com/algorand/msgp v1.1.48
+ github.com/algorand/oapi-codegen v1.3.5-algorand5
github.com/go-swagger/go-swagger v0.25.0
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/tools v0.1.1 // indirect
diff --git a/scripts/buildtools/go.sum b/scripts/buildtools/go.sum
index e912f0710..198ef473f 100644
--- a/scripts/buildtools/go.sum
+++ b/scripts/buildtools/go.sum
@@ -23,6 +23,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/algorand/msgp v1.1.48 h1:5P+gVmTnk0m37r+rA3ZsFZW219ZqmCLulW5f8Z+3nx8=
github.com/algorand/msgp v1.1.48/go.mod h1:LtOntbYiCHj/Sl/Sqxtf8CZOrDt2a8Dv3tLaS6mcnUE=
+github.com/algorand/oapi-codegen v1.3.5-algorand5 h1:y576Ca2/guQddQrQA7dtL5KcOx5xQgPeIupiuFMGyCI=
+github.com/algorand/oapi-codegen v1.3.5-algorand5/go.mod h1:/k0Ywn0lnt92uBMyE+yiRf/Wo3/chxHHsAfenD09EbY=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
@@ -45,6 +47,7 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyberdelia/templates v0.0.0-20191230040416-20a325f050d4/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
@@ -61,9 +64,13 @@ github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGE
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/getkin/kin-openapi v0.3.1 h1:9mLtayAmieqUnNACL0HqHbxkTc+z1+15sxXpLoJOGEQ=
+github.com/getkin/kin-openapi v0.3.1/go.mod h1:W8dhxZgpE84ciM+VIItFqkmZ4eHtuomrdIHtASQIqi0=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
+github.com/go-chi/chi v4.1.1+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
@@ -182,6 +189,7 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -258,6 +266,10 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/labstack/echo/v4 v4.1.16 h1:8swiwjE5Jkai3RPfZoahp8kjVCRNq+y7Q0hPji2Kz0o=
+github.com/labstack/echo/v4 v4.1.16/go.mod h1:awO+5TzAjvL8XpibdsfXxPgHr+orhtXZJZIQCVjogKI=
+github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=
+github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -268,10 +280,15 @@ github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8
github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
+github.com/matryer/moq v0.0.0-20200310130814-7721994d1b54/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@@ -360,9 +377,15 @@ github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9r
github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM=
github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31 h1:OXcKh35JaYsGMRzpvFkLv/MEyPuL49CThT1pZ8aSml4=
github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q=
+github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
+github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
+github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
+github.com/valyala/fasttemplate v1.1.0 h1:RZqt0yGBsps8NGvLSGW804QQqCUYYLsaOjTVHy1Ocw4=
+github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
@@ -390,6 +413,8 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899 h1:DZhuSZLsGlFL4CmhA8BcRA0mnthyA/nZ00AqCUo7vHg=
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -414,6 +439,7 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -436,6 +462,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
@@ -452,6 +479,7 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
@@ -463,6 +491,7 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -475,10 +504,12 @@ golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -523,6 +554,7 @@ golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200423205358-59e73619c742/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs=
diff --git a/scripts/buildtools/install_buildtools.sh b/scripts/buildtools/install_buildtools.sh
index d6c0da388..630b99310 100755
--- a/scripts/buildtools/install_buildtools.sh
+++ b/scripts/buildtools/install_buildtools.sh
@@ -10,14 +10,18 @@ function usage {
echo "By default all packages are installed."
echo "usage: $0 [-o packagename]"
echo " -o packagename when used only packagename is installed."
+ echo " -c commandname if it is one command from a package provide this."
echo " -h print this usage information."
}
-while getopts ":o:h" opt; do
+while getopts ":o:c:h" opt; do
case $opt in
o)
BUILDTOOLS_INSTALL="$OPTARG"
;;
+ c)
+ BUILDTOOLS_COMMAND="$OPTARG"
+ ;;
h)
usage
exit 0
@@ -81,7 +85,7 @@ function install_go_module {
}
if [[ "${BUILDTOOLS_INSTALL}" != "ALL" ]]; then
- install_go_module "${BUILDTOOLS_INSTALL}"
+ install_go_module "${BUILDTOOLS_INSTALL}" "${BUILDTOOLS_COMMAND}"
exit 0
fi
@@ -90,3 +94,4 @@ install_go_module golang.org/x/tools golang.org/x/tools/cmd/stringer
install_go_module github.com/go-swagger/go-swagger github.com/go-swagger/go-swagger/cmd/swagger
install_go_module github.com/algorand/msgp
install_go_module gotest.tools/gotestsum
+install_go_module github.com/algorand/oapi-codegen github.com/algorand/oapi-codegen/cmd/oapi-codegen
diff --git a/scripts/travis/build.sh b/scripts/travis/build.sh
index 991e25c5b..860341609 100755
--- a/scripts/travis/build.sh
+++ b/scripts/travis/build.sh
@@ -44,7 +44,7 @@ OS=$("${SCRIPTPATH}/../ostype.sh")
ARCH=$("${SCRIPTPATH}/../archtype.sh")
# Get the go build version.
-if [ -z "${NO_GIMME}" ]; then
+if [ -z "${SKIP_GO_INSTALLATION}" ]; then
GOLANG_VERSION=$(./scripts/get_golang_version.sh)
curl -sL -o ~/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
chmod +x ~/gimme
diff --git a/scripts/travis/codegen_verification.sh b/scripts/travis/codegen_verification.sh
index 0f9f2b595..395c85e90 100755
--- a/scripts/travis/codegen_verification.sh
+++ b/scripts/travis/codegen_verification.sh
@@ -78,6 +78,10 @@ GOPATH=$(go env GOPATH)
echo "Updating TEAL Specs"
make -C data/transactions/logic
+echo "Regenerate REST server"
+touch daemon/algod/api/algod.oas2.json
+make -C daemon/algod/api generate
+
echo Checking Enlistment...
if [[ -n $(git status --porcelain) ]]; then
echo Enlistment is dirty - did you forget to run make?
diff --git a/scripts/travis/run_tests.sh b/scripts/travis/run_tests.sh
index 0c6f278f4..cf47b0b97 100755
--- a/scripts/travis/run_tests.sh
+++ b/scripts/travis/run_tests.sh
@@ -4,14 +4,14 @@ set -e
if [ "${BUILD_TYPE}" = "integration" ]; then
# Run short tests when doing pull requests; leave the long testing for nightly runs.
- if [[ "${TRAVIS_BRANCH}" =~ ^rel/nightly ]]; then
+ if [[ "${TRAVIS_BRANCH}" =~ ^rel/nightly ]] || [[ "${TRAVIS_BRANCH}" =~ ^hotfix/ ]]; then
SHORTTEST=
else
SHORTTEST=-short
fi
export SHORTTEST
make integration
-elif [ "${TRAVIS_EVENT_TYPE}" = "cron" ] || [[ "${TRAVIS_BRANCH}" =~ ^rel/ ]]; then
+elif [ "${TRAVIS_EVENT_TYPE}" = "cron" ] || [[ "${TRAVIS_BRANCH}" =~ ^rel/ ]] || [[ "${TRAVIS_BRANCH}" =~ ^hotfix/ ]]; then
make fulltest -j2
else
make shorttest -j2
diff --git a/scripts/travis/test.sh b/scripts/travis/test.sh
index 629fc683b..486316dad 100755
--- a/scripts/travis/test.sh
+++ b/scripts/travis/test.sh
@@ -8,7 +8,7 @@ OS=$("${SCRIPTPATH}/../ostype.sh")
ARCH=$("${SCRIPTPATH}/../archtype.sh")
# Get the go build version.
-if [ -z "${NO_GIMME}" ]; then
+if [ -z "${SKIP_GO_INSTALLATION}" ]; then
GOLANG_VERSION=$(./scripts/get_golang_version.sh)
curl -sL -o ~/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
chmod +x ~/gimme
diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go
index 4cb68f0c2..39db221b3 100644
--- a/shared/pingpong/accounts.go
+++ b/shared/pingpong/accounts.go
@@ -18,9 +18,11 @@ package pingpong
import (
"fmt"
+ "io/ioutil"
"math"
"math/rand"
"os"
+ "path/filepath"
"sort"
"strings"
"time"
@@ -28,48 +30,83 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ algodAcct "github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/db"
)
-func ensureAccounts(ac libgoal.Client, initCfg PpConfig) (accounts map[string]uint64, cfg PpConfig, err error) {
- accounts = make(map[string]uint64)
+func (pps *WorkerState) ensureAccounts(ac libgoal.Client, initCfg PpConfig) (accounts map[string]*pingPongAccount, cfg PpConfig, err error) {
+ accounts = make(map[string]*pingPongAccount)
cfg = initCfg
- wallet, err := ac.GetUnencryptedWalletHandle()
+ genID, err2 := ac.GenesisID()
+ if err2 != nil {
+ err = err2
+ return
+ }
+ genesisDir := filepath.Join(ac.DataDir(), genID)
+ files, err2 := ioutil.ReadDir(genesisDir)
+ if err2 != nil {
+ err = err2
+ return
+ }
var srcAcctPresent bool
var richestAccount string
var richestBalance uint64
- addresses, err := ac.ListAddresses(wallet)
+ for _, info := range files {
+ var handle db.Accessor
- if err != nil {
- return nil, PpConfig{}, err
- }
+ // If it can't be a participation key database, skip it
+ if !config.IsRootKeyFilename(info.Name()) {
+ continue
+ }
- // find either srcAccount or the richest account
- for _, addr := range addresses {
- if addr == cfg.SrcAccount {
+ // Fetch a handle to this database
+ handle, err = db.MakeErasableAccessor(filepath.Join(genesisDir, info.Name()))
+ if err != nil {
+ // Couldn't open it, skip it
+ continue
+ }
+
+ // Fetch an account.Participation from the database
+ root, err := algodAcct.RestoreRoot(handle)
+ handle.Close()
+ if err != nil {
+ // Couldn't read it, skip it
+ continue
+ }
+
+ publicKey := root.Secrets().SignatureVerifier
+ accountAddress := basics.Address(publicKey)
+
+ if accountAddress.String() == cfg.SrcAccount {
srcAcctPresent = true
}
- amount, err := ac.GetBalance(addr)
+ amt, err := ac.GetBalance(accountAddress.String())
if err != nil {
return nil, PpConfig{}, err
}
- amt := amount
if !srcAcctPresent && amt > richestBalance {
- richestAccount = addr
+ richestAccount = accountAddress.String()
richestBalance = amt
}
- accounts[addr] = amt
+
if !initCfg.Quiet {
- fmt.Printf("Found local account: %s -> %v\n", addr, amt)
+ fmt.Printf("Found local account: %s -> %v\n", accountAddress.String(), amt)
+ }
+
+ accounts[accountAddress.String()] = &pingPongAccount{
+ balance: amt,
+ sk: root.Secrets(),
+ pk: accountAddress,
}
}
@@ -104,10 +141,7 @@ func ensureAccounts(ac libgoal.Client, initCfg PpConfig) (accounts map[string]ui
if len(accounts) != int(cfg.NumPartAccounts+1) {
fmt.Printf("Not enough accounts - creating %d more\n", int(cfg.NumPartAccounts+1)-len(accounts))
}
- accounts, err = generateAccounts(ac, accounts, cfg.NumPartAccounts, wallet)
- if err != nil {
- return
- }
+ accounts = generateAccounts(accounts, cfg.NumPartAccounts)
}
}
@@ -129,7 +163,7 @@ func throttleTransactionRate(startTime time.Time, cfg PpConfig, totalSent uint64
// Step 1) Create X assets for each of the participant accounts
// Step 2) For each participant account, opt-in to assets of all other participant accounts
// Step 3) Evenly distribute the assets across all participant accounts
-func (pps *WorkerState) prepareAssets(assetAccounts map[string]uint64, client libgoal.Client) (resultAssetMaps map[uint64]v1.AssetParams, optIns map[uint64][]string, err error) {
+func (pps *WorkerState) prepareAssets(assetAccounts map[string]*pingPongAccount, client libgoal.Client) (resultAssetMaps map[uint64]v1.AssetParams, optIns map[uint64][]string, err error) {
accounts := assetAccounts
cfg := pps.cfg
proto, err := getProto(client)
@@ -185,7 +219,7 @@ func (pps *WorkerState) prepareAssets(assetAccounts map[string]uint64, client li
fmt.Printf("Cannot fill asset creation txn\n")
return
}
-
+ tx.Note = pps.makeNextUniqueNoteField()
_, err = signAndBroadcastTransaction(accounts, addr, tx, client, cfg)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset creation failed with error %v\n", err)
@@ -277,6 +311,7 @@ func (pps *WorkerState) prepareAssets(assetAccounts map[string]uint64, client li
fmt.Printf("Cannot fill asset optin %v in account %v\n", k, addr)
return
}
+ tx.Note = pps.makeNextUniqueNoteField()
_, err = signAndBroadcastTransaction(accounts, addr, tx, client, cfg)
if err != nil {
@@ -354,14 +389,15 @@ func (pps *WorkerState) prepareAssets(assetAccounts map[string]uint64, client li
fmt.Printf("Distributing assets from %v to %v \n", creator, addr)
}
- tx, sendErr := pps.constructTxn(creator, addr, cfg.MaxFee, assetAmt, k, client)
+ tx, signer, sendErr := pps.constructTxn(creator, addr, cfg.MaxFee, assetAmt, k, client)
if sendErr != nil {
fmt.Printf("Cannot transfer asset %v from account %v\n", k, creator)
err = sendErr
return
}
- _, err = signAndBroadcastTransaction(accounts, creator, tx, client, cfg)
+ tx.Note = pps.makeNextUniqueNoteField()
+ _, err = signAndBroadcastTransaction(accounts, signer, tx, client, cfg)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset distribution failed with error %v\n", err)
return
@@ -408,9 +444,9 @@ func (pps *WorkerState) prepareAssets(assetAccounts map[string]uint64, client li
return
}
-func signAndBroadcastTransaction(accounts map[string]uint64, sender string, tx transactions.Transaction, client libgoal.Client, cfg PpConfig) (txID string, err error) {
+func signAndBroadcastTransaction(accounts map[string]*pingPongAccount, sender string, tx transactions.Transaction, client libgoal.Client, cfg PpConfig) (txID string, err error) {
var signedTx transactions.SignedTxn
- signedTx, err = signTxn(sender, tx, client, cfg)
+ signedTx, err = signTxn(sender, tx, accounts, cfg)
if err != nil {
fmt.Printf("Cannot sign trx %+v with account %v\nerror %v\n", tx, sender, err)
return
@@ -423,7 +459,7 @@ func signAndBroadcastTransaction(accounts map[string]uint64, sender string, tx t
if !cfg.Quiet {
fmt.Printf("Broadcast transaction %v\n", txID)
}
- accounts[sender] -= tx.Fee.Raw
+ accounts[sender].balance -= tx.Fee.Raw
return
}
@@ -585,7 +621,7 @@ func genAppProgram(numOps uint32, numHashes uint32, hashSize string, numGlobalKe
return ops.Program, progAsm
}
-func sendAsGroup(txgroup []transactions.Transaction, client libgoal.Client, h []byte) (err error) {
+func (pps *WorkerState) sendAsGroup(txgroup []transactions.Transaction, client libgoal.Client, senders []string) (err error) {
if len(txgroup) == 0 {
err = fmt.Errorf("sendAsGroup: empty group")
return
@@ -596,9 +632,14 @@ func sendAsGroup(txgroup []transactions.Transaction, client libgoal.Client, h []
return
}
var stxgroup []transactions.SignedTxn
- for _, txn := range txgroup {
+ for i, txn := range txgroup {
txn.Group = gid
- signedTxn, signErr := client.SignTransactionWithWallet(h, nil, txn)
+ signedTxn, signErr := signTxn(senders[i], txn, pps.accounts, pps.cfg)
+ if err != nil {
+ fmt.Printf("Cannot sign trx %+v with account %v\nerror %v\n", txn, senders[i], err)
+ return
+ }
+
if signErr != nil {
fmt.Printf("Cannot sign app creation txn\n")
err = signErr
@@ -635,7 +676,7 @@ func getProto(client libgoal.Client) (config.ConsensusParams, error) {
return *proto, nil
}
-func prepareApps(accounts map[string]uint64, client libgoal.Client, cfg PpConfig) (appParams map[uint64]v1.AppParams, optIns map[uint64][]string, err error) {
+func (pps *WorkerState) prepareApps(accounts map[string]*pingPongAccount, client libgoal.Client, cfg PpConfig) (appParams map[uint64]v1.AppParams, optIns map[uint64][]string, err error) {
proto, err := getProto(client)
if err != nil {
return
@@ -686,13 +727,6 @@ func prepareApps(accounts map[string]uint64, client libgoal.Client, cfg PpConfig
}
}
- // Get wallet handle token
- var h []byte
- h, err = client.GetUnencryptedWalletHandle()
- if err != nil {
- return
- }
-
// create apps
for idx, appAccount := range appAccounts {
begin := idx * appsPerAcct
@@ -701,6 +735,7 @@ func prepareApps(accounts map[string]uint64, client libgoal.Client, cfg PpConfig
end = toCreate
}
var txgroup []transactions.Transaction
+ var senders []string
for i := begin; i < end; i++ {
var tx transactions.Transaction
@@ -725,15 +760,14 @@ func prepareApps(accounts map[string]uint64, client libgoal.Client, cfg PpConfig
}
// Ensure different txids
- var note [8]byte
- crypto.RandBytes(note[:])
- tx.Note = note[:]
+ tx.Note = pps.makeNextUniqueNoteField()
txgroup = append(txgroup, tx)
- accounts[appAccount.Address] -= tx.Fee.Raw
+ accounts[appAccount.Address].balance -= tx.Fee.Raw
+ senders = append(senders, appAccount.Address)
}
- err = sendAsGroup(txgroup, client, h)
+ err = pps.sendAsGroup(txgroup, client, senders)
if err != nil {
return
}
@@ -776,6 +810,7 @@ func prepareApps(accounts map[string]uint64, client libgoal.Client, cfg PpConfig
optIns = make(map[uint64][]string)
for addr := range accounts {
var txgroup []transactions.Transaction
+ var senders []string
permAppIndices := rand.Perm(len(aidxs))
for i := uint32(0); i < cfg.NumAppOptIn; i++ {
j := permAppIndices[i]
@@ -794,15 +829,14 @@ func prepareApps(accounts map[string]uint64, client libgoal.Client, cfg PpConfig
}
// Ensure different txids
- var note [8]byte
- crypto.RandBytes(note[:])
- tx.Note = note[:]
+ tx.Note = pps.makeNextUniqueNoteField()
optIns[aidx] = append(optIns[aidx], addr)
txgroup = append(txgroup, tx)
+ senders = append(senders, addr)
if len(txgroup) == groupSize {
- err = sendAsGroup(txgroup, client, h)
+ err = pps.sendAsGroup(txgroup, client, senders)
if err != nil {
return
}
@@ -811,7 +845,7 @@ func prepareApps(accounts map[string]uint64, client libgoal.Client, cfg PpConfig
}
// broadcast leftovers
if len(txgroup) > 0 {
- err = sendAsGroup(txgroup, client, h)
+ err = pps.sendAsGroup(txgroup, client, senders)
if err != nil {
return
}
@@ -822,7 +856,7 @@ func prepareApps(accounts map[string]uint64, client libgoal.Client, cfg PpConfig
return
}
-func takeTopAccounts(allAccounts map[string]uint64, numAccounts uint32, srcAccount string) (accounts map[string]uint64) {
+func takeTopAccounts(allAccounts map[string]*pingPongAccount, numAccounts uint32, srcAccount string) (accounts map[string]*pingPongAccount) {
allAddrs := make([]string, len(allAccounts))
var i int
for addr := range allAccounts {
@@ -833,12 +867,12 @@ func takeTopAccounts(allAccounts map[string]uint64, numAccounts uint32, srcAccou
sort.SliceStable(allAddrs, func(i, j int) bool {
amt1 := allAccounts[allAddrs[i]]
amt2 := allAccounts[allAddrs[j]]
- return amt1 > amt2
+ return amt1.balance > amt2.balance
})
// Now populate a new map with just the accounts needed
accountsRequired := int(numAccounts + 1) // Participating and Src
- accounts = make(map[string]uint64)
+ accounts = make(map[string]*pingPongAccount)
accounts[srcAccount] = allAccounts[srcAccount]
for _, addr := range allAddrs {
accounts[addr] = allAccounts[addr]
@@ -849,19 +883,24 @@ func takeTopAccounts(allAccounts map[string]uint64, numAccounts uint32, srcAccou
return
}
-func generateAccounts(client libgoal.Client, allAccounts map[string]uint64, numAccounts uint32, wallet []byte) (map[string]uint64, error) {
+func generateAccounts(allAccounts map[string]*pingPongAccount, numAccounts uint32) map[string]*pingPongAccount {
// Compute the number of accounts to generate
accountsRequired := int(numAccounts+1) - len(allAccounts)
+ var seed crypto.Seed
+
for accountsRequired > 0 {
accountsRequired--
- addr, err := client.GenerateAddress(wallet)
- if err != nil {
- return allAccounts, err
- }
- allAccounts[addr] = 0
+ crypto.RandBytes(seed[:])
+ privateKey := crypto.GenerateSignatureSecrets(seed)
+ publicKey := basics.Address(privateKey.SignatureVerifier)
+
+ allAccounts[publicKey.String()] = &pingPongAccount{
+ sk: privateKey,
+ pk: publicKey,
+ }
}
- return allAccounts, nil
+ return allAccounts
}
diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go
index 45051fa9d..eb774b5d0 100644
--- a/shared/pingpong/pingpong.go
+++ b/shared/pingpong/pingpong.go
@@ -18,6 +18,7 @@ package pingpong
import (
"context"
+ "encoding/binary"
"fmt"
"math"
"math/rand"
@@ -29,6 +30,7 @@ import (
v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/libgoal"
)
@@ -39,38 +41,41 @@ type CreatablesInfo struct {
OptIns map[uint64][]string
}
+// pingPongAccount represents the account state for each account in the pingpong application
+// This includes the current balance and public/private keys tied to the account
+type pingPongAccount struct {
+ balance uint64
+ sk *crypto.SignatureSecrets
+ pk basics.Address
+}
+
// WorkerState object holds a running pingpong worker
type WorkerState struct {
cfg PpConfig
- accounts map[string]uint64
+ accounts map[string]*pingPongAccount
cinfo CreatablesInfo
- nftStartTime int64
- localNftIndex uint64
- nftHolders map[string]int
+ nftStartTime int64
+ localNftIndex uint64
+ nftHolders map[string]int
+ incTransactionSalt uint64
}
// PrepareAccounts to set up accounts and asset accounts required for Ping Pong run
func (pps *WorkerState) PrepareAccounts(ac libgoal.Client) (err error) {
- pps.accounts, pps.cfg, err = ensureAccounts(ac, pps.cfg)
+ pps.accounts, pps.cfg, err = pps.ensureAccounts(ac, pps.cfg)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "ensure accounts failed %v\n", err)
return
}
cfg := pps.cfg
- wallet, walletErr := ac.GetUnencryptedWalletHandle()
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "unable to access wallet %v\n", walletErr)
- err = walletErr
- return
- }
if cfg.NumAsset > 0 {
// zero out max amount for asset transactions
cfg.MaxAmt = 0
- var assetAccounts map[string]uint64
- assetAccounts, err = prepareNewAccounts(ac, cfg, wallet, pps.accounts)
+ var assetAccounts map[string]*pingPongAccount
+ assetAccounts, err = pps.prepareNewAccounts(ac, cfg, pps.accounts)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "prepare new accounts failed: %v\n", err)
return
@@ -84,28 +89,28 @@ func (pps *WorkerState) PrepareAccounts(ac libgoal.Client) (err error) {
if !cfg.Quiet {
for addr := range pps.accounts {
- fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, pps.accounts[addr])
+ fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, pps.accounts[addr].balance)
}
}
} else if cfg.NumApp > 0 {
- var appAccounts map[string]uint64
- appAccounts, err = prepareNewAccounts(ac, cfg, wallet, pps.accounts)
+ var appAccounts map[string]*pingPongAccount
+ appAccounts, err = pps.prepareNewAccounts(ac, cfg, pps.accounts)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "prepare new accounts failed: %v\n", err)
return
}
- pps.cinfo.AppParams, pps.cinfo.OptIns, err = prepareApps(appAccounts, ac, cfg)
+ pps.cinfo.AppParams, pps.cinfo.OptIns, err = pps.prepareApps(appAccounts, ac, cfg)
if err != nil {
return
}
if !cfg.Quiet {
for addr := range pps.accounts {
- fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, pps.accounts[addr])
+ fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, pps.accounts[addr].balance)
}
}
} else {
- err = fundAccounts(pps.accounts, ac, cfg)
+ err = pps.fundAccounts(pps.accounts, ac, cfg)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "fund accounts failed %v\n", err)
return
@@ -116,7 +121,7 @@ func (pps *WorkerState) PrepareAccounts(ac libgoal.Client) (err error) {
return
}
-func prepareNewAccounts(client libgoal.Client, cfg PpConfig, wallet []byte, accounts map[string]uint64) (newAccounts map[string]uint64, err error) {
+func (pps *WorkerState) prepareNewAccounts(client libgoal.Client, cfg PpConfig, accounts map[string]*pingPongAccount) (newAccounts map[string]*pingPongAccount, err error) {
// remove existing accounts except for src account
for k := range accounts {
if k != cfg.SrcAccount {
@@ -124,13 +129,13 @@ func prepareNewAccounts(client libgoal.Client, cfg PpConfig, wallet []byte, acco
}
}
// create new accounts for testing
- newAccounts = make(map[string]uint64)
- newAccounts, err = generateAccounts(client, newAccounts, cfg.NumPartAccounts-1, wallet)
+ newAccounts = make(map[string]*pingPongAccount)
+ newAccounts = generateAccounts(newAccounts, cfg.NumPartAccounts-1)
for k := range newAccounts {
accounts[k] = newAccounts[k]
}
- err = fundAccounts(accounts, client, cfg)
+ err = pps.fundAccounts(accounts, client, cfg)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "fund accounts failed %v\n", err)
return
@@ -193,8 +198,12 @@ func computeAccountMinBalance(client libgoal.Client, cfg PpConfig) (requiredBala
return
}
-func fundAccounts(accounts map[string]uint64, client libgoal.Client, cfg PpConfig) error {
- srcFunds := accounts[cfg.SrcAccount]
+func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, client libgoal.Client, cfg PpConfig) error {
+ srcFunds, err := client.GetBalance(cfg.SrcAccount)
+
+ if err != nil {
+ return err
+ }
startTime := time.Now()
var totalSent uint64
@@ -208,12 +217,17 @@ func fundAccounts(accounts map[string]uint64, client libgoal.Client, cfg PpConfi
}
fmt.Printf("adjusting account balance to %d\n", minFund)
- for addr, balance := range accounts {
+ for addr, acct := range accounts {
+
+ if addr == pps.cfg.SrcAccount {
+ continue
+ }
+
if !cfg.Quiet {
fmt.Printf("adjusting balance of account %v\n", addr)
}
- if balance < minFund {
- toSend := minFund - balance
+ if acct.balance < minFund {
+ toSend := minFund - acct.balance
if srcFunds <= toSend {
return fmt.Errorf("source account %s has insufficient funds %d - needs %d", cfg.SrcAccount, srcFunds, toSend)
}
@@ -221,13 +235,13 @@ func fundAccounts(accounts map[string]uint64, client libgoal.Client, cfg PpConfi
if !cfg.Quiet {
fmt.Printf("adjusting balance of account %v by %d\n ", addr, toSend)
}
- _, err := sendPaymentFromUnencryptedWallet(client, cfg.SrcAccount, addr, fee, toSend, nil)
+ _, err := pps.sendPaymentFromSourceAccount(client, addr, fee, toSend)
if err != nil {
return err
}
- accounts[addr] = minFund
+ accounts[addr].balance = minFund
if !cfg.Quiet {
- fmt.Printf("account balance for key %s is %d\n", addr, accounts[addr])
+ fmt.Printf("account balance for key %s is %d\n", addr, accounts[addr].balance)
}
totalSent++
@@ -237,19 +251,32 @@ func fundAccounts(accounts map[string]uint64, client libgoal.Client, cfg PpConfi
return nil
}
-func sendPaymentFromUnencryptedWallet(client libgoal.Client, from, to string, fee, amount uint64, note []byte) (transactions.Transaction, error) {
- wh, err := client.GetUnencryptedWalletHandle()
+func (pps *WorkerState) sendPaymentFromSourceAccount(client libgoal.Client, to string, fee, amount uint64) (transactions.Transaction, error) {
+ // generate a unique note to avoid duplicate transaction failures
+ note := pps.makeNextUniqueNoteField()
+
+ from := pps.cfg.SrcAccount
+ tx, err := client.ConstructPayment(from, to, fee, amount, note[:], "", [32]byte{}, 0, 0)
+
if err != nil {
return transactions.Transaction{}, err
}
- // generate a random lease to avoid duplicate transaction failures
- var lease [32]byte
- crypto.RandBytes(lease[:])
- return client.SendPaymentFromWalletWithLease(wh, nil, from, to, fee, amount, note, "", lease, 0, 0)
+ stxn, err := signTxn(from, tx, pps.accounts, pps.cfg)
+
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
+
+ _, err = client.BroadcastTransaction(stxn)
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
+
+ return tx, nil
}
-func refreshAccounts(accounts map[string]uint64, client libgoal.Client, cfg PpConfig) error {
+func (pps *WorkerState) refreshAccounts(accounts map[string]*pingPongAccount, client libgoal.Client, cfg PpConfig) error {
for addr := range accounts {
amount, err := client.GetBalance(addr)
if err != nil {
@@ -257,20 +284,20 @@ func refreshAccounts(accounts map[string]uint64, client libgoal.Client, cfg PpCo
return err
}
- accounts[addr] = amount
+ accounts[addr].balance = amount
}
- return fundAccounts(accounts, client, cfg)
+ return pps.fundAccounts(accounts, client, cfg)
}
// return a shuffled list of accounts with some minimum balance
-func listSufficientAccounts(accounts map[string]uint64, minimumAmount uint64, except string) []string {
+func listSufficientAccounts(accounts map[string]*pingPongAccount, minimumAmount uint64, except string) []string {
out := make([]string, 0, len(accounts))
for key, value := range accounts {
if key == except {
continue
}
- if value >= minimumAmount {
+ if value.balance >= minimumAmount {
out = append(out, key)
}
}
@@ -375,7 +402,7 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
}
if cfg.RefreshTime > 0 && time.Now().After(refreshTime) {
- err = refreshAccounts(pps.accounts, ac, cfg)
+ err = pps.refreshAccounts(pps.accounts, ac, cfg)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "error refreshing: %v\n", err)
}
@@ -438,15 +465,18 @@ func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64,
fee := pps.fee()
if (len(pps.nftHolders) == 0) || ((float64(int(pps.cfg.NftAsaAccountInFlight)-len(pps.nftHolders)) / float64(pps.cfg.NftAsaAccountInFlight)) >= rand.Float64()) {
var addr string
- var wallet []byte
- wallet, err = client.GetUnencryptedWalletHandle()
- if err != nil {
- return
- }
- addr, err = client.GenerateAddress(wallet)
- if err != nil {
- return
+
+ var seed [32]byte
+ crypto.RandBytes(seed[:])
+ privateKey := crypto.GenerateSignatureSecrets(seed)
+ publicKey := basics.Address(privateKey.SignatureVerifier)
+
+ pps.accounts[publicKey.String()] = &pingPongAccount{
+ sk: privateKey,
+ pk: publicKey,
}
+ addr = publicKey.String()
+
fmt.Printf("new NFT holder %s\n", addr)
var proto config.ConsensusParams
proto, err = getProto(client)
@@ -456,7 +486,7 @@ func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64,
// enough for the per-asa minbalance and more than enough for the txns to create them
toSend := proto.MinBalance * uint64(pps.cfg.NftAsaPerAccount+1) * 2
pps.nftHolders[addr] = 0
- _, err = sendPaymentFromUnencryptedWallet(client, pps.cfg.SrcAccount, addr, fee, toSend, nil)
+ _, err = pps.sendPaymentFromSourceAccount(client, addr, fee, toSend)
if err != nil {
return
}
@@ -497,7 +527,7 @@ func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64,
} else {
pps.nftHolders[sender] = senderNftCount + 1
}
- stxn, err := signTxn(sender, txn, client, pps.cfg)
+ stxn, err := signTxn(sender, txn, pps.accounts, pps.cfg)
if err != nil {
return
}
@@ -546,8 +576,10 @@ func (pps *WorkerState) sendFromTo(
if cfg.GroupSize == 1 {
// generate random assetID or appId if we send asset/app txns
aidx := getCreatableID(cfg, cinfo)
+ var txn transactions.Transaction
+ var consErr error
// Construct single txn
- txn, consErr := pps.constructTxn(from, to, fee, amt, aidx, client)
+ txn, from, consErr = pps.constructTxn(from, to, fee, amt, aidx, client)
if consErr != nil {
err = consErr
_, _ = fmt.Fprintf(os.Stderr, "constructTxn failed: %v\n", err)
@@ -555,7 +587,7 @@ func (pps *WorkerState) sendFromTo(
}
// would we have enough money after taking into account the current updated fees ?
- if accounts[from] <= (txn.Fee.Raw + amt + cfg.MinAccountFunds) {
+ if accounts[from].balance <= (txn.Fee.Raw + amt + cfg.MinAccountFunds) {
_, _ = fmt.Fprintf(os.Stdout, "Skipping sending %d : %s -> %s; Current cost too high.\n", amt, from, to)
continue
}
@@ -564,7 +596,7 @@ func (pps *WorkerState) sendFromTo(
toBalanceChange = int64(amt)
// Sign txn
- stxn, signErr := signTxn(from, txn, client, cfg)
+ stxn, signErr := signTxn(from, txn, pps.accounts, cfg)
if signErr != nil {
err = signErr
_, _ = fmt.Fprintf(os.Stderr, "signTxn failed: %v\n", err)
@@ -590,17 +622,16 @@ func (pps *WorkerState) sendFromTo(
var txn transactions.Transaction
var signer string
if j%2 == 0 {
- txn, err = pps.constructTxn(from, to, fee, amt, 0, client)
+ txn, signer, err = pps.constructTxn(from, to, fee, amt, 0, client)
fromBalanceChange -= int64(txn.Fee.Raw + amt)
toBalanceChange += int64(amt)
- signer = from
} else if cfg.GroupSize == 2 && cfg.Rekey {
- txn, err = pps.constructTxn(from, to, fee, amt, 0, client)
+ txn, _, err = pps.constructTxn(from, to, fee, amt, 0, client)
fromBalanceChange -= int64(txn.Fee.Raw + amt)
toBalanceChange += int64(amt)
signer = to
} else {
- txn, err = pps.constructTxn(to, from, fee, amt, 0, client)
+ txn, _, err = pps.constructTxn(to, from, fee, amt, 0, client)
toBalanceChange -= int64(txn.Fee.Raw + amt)
fromBalanceChange += int64(amt)
signer = to
@@ -630,11 +661,11 @@ func (pps *WorkerState) sendFromTo(
}
// would we have enough money after taking into account the current updated fees ?
- if int64(accounts[from])+fromBalanceChange <= int64(cfg.MinAccountFunds) {
+ if int64(accounts[from].balance)+fromBalanceChange <= int64(cfg.MinAccountFunds) {
_, _ = fmt.Fprintf(os.Stdout, "Skipping sending %d : %s -> %s; Current cost too high.\n", amt, from, to)
continue
}
- if int64(accounts[to])+toBalanceChange <= int64(cfg.MinAccountFunds) {
+ if int64(accounts[to].balance)+toBalanceChange <= int64(cfg.MinAccountFunds) {
_, _ = fmt.Fprintf(os.Stdout, "Skipping sending back %d : %s -> %s; Current cost too high.\n", amt, to, from)
continue
}
@@ -654,7 +685,7 @@ func (pps *WorkerState) sendFromTo(
var stxGroup []transactions.SignedTxn
for j, txn := range txGroup {
txn.Group = gid
- stxn, signErr := signTxn(txSigners[j], txn, client, cfg)
+ stxn, signErr := signTxn(txSigners[j], txn, pps.accounts, cfg)
if signErr != nil {
err = signErr
return
@@ -674,8 +705,8 @@ func (pps *WorkerState) sendFromTo(
}
successCount++
- accounts[from] = uint64(fromBalanceChange + int64(accounts[from]))
- accounts[to] = uint64(toBalanceChange + int64(accounts[to]))
+ accounts[from].balance = uint64(fromBalanceChange + int64(accounts[from].balance))
+ accounts[to].balance = uint64(toBalanceChange + int64(accounts[to].balance))
if cfg.DelayBetweenTxn > 0 {
time.Sleep(cfg.DelayBetweenTxn)
}
@@ -690,23 +721,30 @@ func (pps *WorkerState) nftSpamAssetName() string {
pps.localNftIndex++
return fmt.Sprintf("nft%d_%d", pps.nftStartTime, pps.localNftIndex)
}
+func (pps *WorkerState) makeNextUniqueNoteField() []byte {
+ noteField := make([]byte, binary.MaxVarintLen64)
+ usedBytes := binary.PutUvarint(noteField[:], pps.incTransactionSalt)
+ pps.incTransactionSalt++
+ return noteField[:usedBytes]
+}
-func (pps *WorkerState) constructTxn(from, to string, fee, amt, aidx uint64, client libgoal.Client) (txn transactions.Transaction, err error) {
+func (pps *WorkerState) constructTxn(from, to string, fee, amt, aidx uint64, client libgoal.Client) (txn transactions.Transaction, sender string, err error) {
cfg := pps.cfg
cinfo := pps.cinfo
+ sender = from
var noteField []byte
const pingpongTag = "pingpong"
- const tagLen = uint32(len(pingpongTag))
- const randomBaseLen = uint32(8)
- const maxNoteFieldLen = uint32(1024)
- var noteLength = uint32(tagLen) + randomBaseLen
+ const tagLen = len(pingpongTag)
// if random note flag set, then append a random number of additional bytes
if cfg.RandomNote {
- noteLength = noteLength + rand.Uint32()%(maxNoteFieldLen-noteLength)
+ const maxNoteFieldLen = 1024
+ noteLength := tagLen + int(rand.Uint32())%(maxNoteFieldLen-tagLen)
+ noteField = make([]byte, noteLength)
+ copy(noteField, pingpongTag)
+ crypto.RandBytes(noteField[tagLen:])
+ } else {
+ noteField = pps.makeNextUniqueNoteField()
}
- noteField = make([]byte, noteLength, noteLength)
- copy(noteField, pingpongTag)
- crypto.RandBytes(noteField[tagLen:])
// if random lease flag set, fill the lease field with random bytes
var lease [32]byte
@@ -745,6 +783,7 @@ func (pps *WorkerState) constructTxn(from, to string, fee, amt, aidx uint64, cli
indices := rand.Perm(len(cinfo.OptIns[aidx]))
from = cinfo.OptIns[aidx][indices[0]]
to = cinfo.OptIns[aidx][indices[1]]
+ sender = from
}
txn, err = client.MakeUnsignedAssetSendTx(aidx, amt, to, "", "")
if err != nil {
@@ -786,32 +825,27 @@ func (pps *WorkerState) constructTxn(from, to string, fee, amt, aidx uint64, cli
return
}
-func signTxn(signer string, txn transactions.Transaction, client libgoal.Client, cfg PpConfig) (stxn transactions.SignedTxn, err error) {
- // Get wallet handle token
- var h []byte
- h, err = client.GetUnencryptedWalletHandle()
- if err != nil {
- return
- }
+func signTxn(signer string, txn transactions.Transaction, accounts map[string]*pingPongAccount, cfg PpConfig) (stxn transactions.SignedTxn, err error) {
var psig crypto.Signature
if cfg.Rekey {
- stxn, err = client.SignTransactionWithWalletAndSigner(h, nil, signer, txn)
+ stxn, err = txn.Sign(accounts[signer].sk), nil
+
} else if len(cfg.Program) > 0 {
// If there's a program, sign it and use that in a lsig
- psig, err = client.SignProgramWithWallet(h, nil, signer, cfg.Program)
- if err != nil {
- return
- }
+ progb := logic.Program(cfg.Program)
+ psig = accounts[signer].sk.Sign(&progb)
+
// Fill in signed transaction
stxn.Txn = txn
stxn.Lsig.Logic = cfg.Program
stxn.Lsig.Sig = psig
stxn.Lsig.Args = cfg.LogicArgs
} else {
+
// Otherwise, just sign the transaction like normal
- stxn, err = client.SignTransactionWithWallet(h, nil, txn)
+ stxn, err = txn.Sign(accounts[signer].sk), nil
}
return
}
diff --git a/test/e2e-go/features/transactions/app_pages_test.go b/test/e2e-go/features/transactions/app_pages_test.go
new file mode 100644
index 000000000..e76e88888
--- /dev/null
+++ b/test/e2e-go/features/transactions/app_pages_test.go
@@ -0,0 +1,186 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package transactions
+
+import (
+ "encoding/base64"
+ "fmt"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestExtraProgramPages(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ t.Parallel()
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ var fixture fixtures.RestClientFixture
+ fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
+ defer fixture.Shutdown()
+ client := fixture.LibGoalClient
+
+ accountList, err := fixture.GetWalletsSortedByBalance()
+ a.NoError(err)
+ baseAcct := accountList[0].Address
+
+ walletHandle, err := client.GetUnencryptedWalletHandle()
+ a.NoError(err)
+
+ accountInfo, err := client.AccountInformationV2(baseAcct)
+ a.NoError(err)
+ if accountInfo.AppsTotalExtraPages != nil {
+ a.Equal(*accountInfo.AppsTotalExtraPages, uint64(0))
+ }
+
+ var inconsequentialBytes [2048]byte
+ srcBigProgram := fmt.Sprintf(`#pragma version 4
+byte base64(%s)
+pop
+int 1
+return
+`, base64.StdEncoding.EncodeToString(inconsequentialBytes[:]))
+
+ srcSmallProgram := `#pragma version 4
+int 1
+return
+`
+
+ bigProgramOps, err := logic.AssembleString(srcBigProgram)
+ a.NoError(err)
+ bigProgram := bigProgramOps.Program
+
+ smallProgramOps, err := logic.AssembleString(srcSmallProgram)
+ a.NoError(err)
+ smallProgram := smallProgramOps.Program
+
+ globalSchema := basics.StateSchema{
+ NumByteSlice: 1,
+ }
+ localSchema := basics.StateSchema{
+ NumByteSlice: 1,
+ }
+
+ status, err := client.Status()
+ a.NoError(err)
+
+ // create app 1 with 1 extra page
+ app1ExtraPages := uint32(1)
+ tx, err := client.MakeUnsignedAppCreateTx(transactions.NoOpOC, smallProgram, smallProgram, globalSchema, localSchema, nil, nil, nil, nil, app1ExtraPages)
+ a.NoError(err)
+ tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
+ a.NoError(err)
+ txid, err := client.SignAndBroadcastTransaction(walletHandle, nil, tx)
+ a.NoError(err)
+ _, err = fixture.WaitForConfirmedTxn(status.LastRound+5, baseAcct, txid)
+ a.NoError(err)
+
+ app1CreateTxn, err := client.PendingTransactionInformationV2(txid)
+ a.NoError(err)
+ a.NotNil(app1CreateTxn.ConfirmedRound)
+ a.NotNil(app1CreateTxn.ApplicationIndex)
+ app1ID := *app1CreateTxn.ApplicationIndex
+
+ accountInfo, err = client.AccountInformationV2(baseAcct)
+ a.NoError(err)
+ a.NotNil(accountInfo.AppsTotalExtraPages)
+ a.Equal(*accountInfo.AppsTotalExtraPages, uint64(app1ExtraPages))
+
+ // update app 1 and ensure the extra page still works
+ tx, err = client.MakeUnsignedAppUpdateTx(app1ID, nil, nil, nil, nil, bigProgram, smallProgram)
+ a.NoError(err)
+ tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
+ a.NoError(err)
+ txid, err = client.SignAndBroadcastTransaction(walletHandle, nil, tx)
+ a.NoError(err)
+ _, err = fixture.WaitForConfirmedTxn(*app1CreateTxn.ConfirmedRound+5, baseAcct, txid)
+ a.NoError(err)
+
+ app1UpdateTxn, err := client.PendingTransactionInformationV2(txid)
+ a.NoError(err)
+ a.NotNil(app1CreateTxn.ConfirmedRound)
+
+ accountInfo, err = client.AccountInformationV2(baseAcct)
+ a.NoError(err)
+ a.NotNil(accountInfo.AppsTotalExtraPages)
+ a.Equal(*accountInfo.AppsTotalExtraPages, uint64(app1ExtraPages))
+
+ // create app 2 with 2 extra pages
+ app2ExtraPages := uint32(2)
+ tx, err = client.MakeUnsignedAppCreateTx(transactions.NoOpOC, bigProgram, smallProgram, globalSchema, localSchema, nil, nil, nil, nil, app2ExtraPages)
+ a.NoError(err)
+ tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
+ a.NoError(err)
+ txid, err = client.SignAndBroadcastTransaction(walletHandle, nil, tx)
+ a.NoError(err)
+ _, err = fixture.WaitForConfirmedTxn(*app1UpdateTxn.ConfirmedRound+5, baseAcct, txid)
+ a.NoError(err)
+
+ app2CreateTxn, err := client.PendingTransactionInformationV2(txid)
+ a.NoError(err)
+ a.NotNil(app2CreateTxn.ConfirmedRound)
+ a.NotNil(app2CreateTxn.ApplicationIndex)
+ app2ID := *app2CreateTxn.ApplicationIndex
+
+ accountInfo, err = client.AccountInformationV2(baseAcct)
+ a.NoError(err)
+ a.NotNil(accountInfo.AppsTotalExtraPages)
+ a.Equal(*accountInfo.AppsTotalExtraPages, uint64(app1ExtraPages+app2ExtraPages))
+
+ // delete app 1
+ tx, err = client.MakeUnsignedAppDeleteTx(app1ID, nil, nil, nil, nil)
+ a.NoError(err)
+ tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
+ a.NoError(err)
+ txid, err = client.SignAndBroadcastTransaction(walletHandle, nil, tx)
+ a.NoError(err)
+ _, err = fixture.WaitForConfirmedTxn(*app2CreateTxn.ConfirmedRound+5, baseAcct, txid)
+ a.NoError(err)
+
+ app1DeleteTxn, err := client.PendingTransactionInformationV2(txid)
+ a.NoError(err)
+ a.NotNil(app1DeleteTxn.ConfirmedRound)
+
+ accountInfo, err = client.AccountInformationV2(baseAcct)
+ a.NoError(err)
+ a.NotNil(accountInfo.AppsTotalExtraPages)
+ a.Equal(*accountInfo.AppsTotalExtraPages, uint64(app2ExtraPages))
+
+ // delete app 2
+ tx, err = client.MakeUnsignedAppDeleteTx(app2ID, nil, nil, nil, nil)
+ a.NoError(err)
+ tx, err = client.FillUnsignedTxTemplate(baseAcct, 0, 0, 0, tx)
+ a.NoError(err)
+ txid, err = client.SignAndBroadcastTransaction(walletHandle, nil, tx)
+ a.NoError(err)
+ _, err = fixture.WaitForConfirmedTxn(*app1DeleteTxn.ConfirmedRound+5, baseAcct, txid)
+ a.NoError(err)
+
+ accountInfo, err = client.AccountInformationV2(baseAcct)
+ a.NoError(err)
+ if accountInfo.AppsTotalExtraPages != nil {
+ a.Equal(*accountInfo.AppsTotalExtraPages, uint64(0))
+ }
+}
diff --git a/test/e2e-go/features/transactions/application_test.go b/test/e2e-go/features/transactions/application_test.go
new file mode 100644
index 000000000..5ad255790
--- /dev/null
+++ b/test/e2e-go/features/transactions/application_test.go
@@ -0,0 +1,130 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package transactions
+
+import (
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func checkEqual(expected []basics.LogItem, actual []basics.LogItem) bool {
+ if len(expected) != len(actual) {
+ return false
+ }
+ for i, e := range expected {
+ if !e.Equal(actual[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+func TestApplication(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ t.Parallel()
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ var fixture fixtures.RestClientFixture
+ proto, ok := config.Consensus[protocol.ConsensusFuture]
+ a.True(ok)
+ proto.AgreementFilterTimeoutPeriod0 = 400 * time.Millisecond
+ proto.AgreementFilterTimeout = 400 * time.Millisecond
+ fixture.SetConsensus(config.ConsensusProtocols{protocol.ConsensusFuture: proto})
+
+ fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
+ defer fixture.Shutdown()
+
+ client := fixture.LibGoalClient
+ accountList, err := fixture.GetWalletsSortedByBalance()
+ a.NoError(err)
+
+ creator := accountList[0].Address
+ wh, err := client.GetUnencryptedWalletHandle()
+ a.NoError(err)
+
+ fee := uint64(1000)
+
+ counter := `#pragma version 5
+int 1
+loop: byte "a"
+log
+int 1
++
+dup
+int 30
+<=
+bnz loop
+byte "b"
+log
+byte "c"
+log
+`
+
+ approvalOps, err := logic.AssembleString(counter)
+ a.NoError(err)
+ clearstateOps, err := logic.AssembleString("#pragma version 5\nint 1")
+ a.NoError(err)
+ schema := basics.StateSchema{
+ NumUint: 1,
+ }
+
+ // create the app
+ tx, err := client.MakeUnsignedAppCreateTx(
+ transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, 0)
+ a.NoError(err)
+ tx, err = client.FillUnsignedTxTemplate(creator, 0, 0, fee, tx)
+ a.NoError(err)
+ wh, err = client.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ signedTxn, err := client.SignTransactionWithWallet(wh, nil, tx)
+ a.NoError(err)
+ round, err := client.CurrentRound()
+ a.NoError(err)
+ txid, err := client.BroadcastTransaction(signedTxn)
+ a.NoError(err)
+ confirmed := fixture.WaitForAllTxnsToConfirm(round+2, map[string]string{txid: signedTxn.Txn.Sender.String()})
+ a.True(confirmed)
+ round, err = client.CurrentRound()
+ a.NoError(err)
+
+ logs := make([]basics.LogItem, 32)
+ for i := range logs {
+ logs[i] = basics.LogItem{ID: 0, Message: "a"}
+ }
+ logs[30] = basics.LogItem{ID: 0, Message: "b"}
+ logs[31] = basics.LogItem{ID: 0, Message: "c"}
+
+ b, err := client.BookkeepingBlock(round)
+ for _, ps := range b.Payset {
+ ed := ps.ApplyData.EvalDelta
+ ok = checkEqual(logs, ed.Logs)
+ a.True(ok)
+ }
+
+}
diff --git a/test/e2e-go/restAPI/restClient_test.go b/test/e2e-go/restAPI/restClient_test.go
index 3c3faa2d0..69db074c8 100644
--- a/test/e2e-go/restAPI/restClient_test.go
+++ b/test/e2e-go/restAPI/restClient_test.go
@@ -18,6 +18,7 @@ package restapi
import (
"context"
+ "encoding/base64"
"errors"
"flag"
"math"
@@ -29,10 +30,12 @@ import (
"time"
"unicode"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
algodclient "github.com/algorand/go-algorand/daemon/algod/api/client"
kmdclient "github.com/algorand/go-algorand/daemon/kmd/client"
+ "github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -851,7 +854,6 @@ func TestClientTruncatesPendingTransactions(t *testing.T) {
a.NoError(err)
txIDsSeen[tx2.ID().String()] = true
}
-
statusResponse, err := testClient.GetPendingTransactions(uint64(MaxTxns))
a.NoError(err)
a.NotEmpty(statusResponse)
@@ -905,3 +907,130 @@ func TestClientPrioritizesPendingTransactions(t *testing.T) {
a.True(len(statusResponse.TruncatedTxns.Transactions) == MaxTxns)
a.True(statusResponse.TruncatedTxns.Transactions[0].TxID == txHigh.ID().String())
}
+
+func TestClientCanGetPendingTransactionInfo(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ a := require.New(fixtures.SynchronizedTest(t))
+ var localFixture fixtures.RestClientFixture
+ localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
+ defer localFixture.Shutdown()
+
+ testClient := localFixture.LibGoalClient
+
+ testClient.WaitForRound(1)
+
+ testClient.SetAPIVersionAffinity(algodclient.APIVersionV2, kmdclient.APIVersionV1)
+
+ wh, err := testClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ addresses, err := testClient.ListAddresses(wh)
+ a.NoError(err)
+ _, someAddress := getMaxBalAddr(t, testClient, addresses)
+ if someAddress == "" {
+ t.Error("no addr with funds")
+ }
+ a.NoError(err)
+ addr, err := basics.UnmarshalChecksumAddress(someAddress)
+
+ params, err := testClient.SuggestedParams()
+ a.NoError(err)
+
+ firstRound := basics.Round(params.LastRound + 1)
+ lastRound := basics.Round(params.LastRound + 1000)
+ var gh crypto.Digest
+ copy(gh[:], params.GenesisHash)
+
+ prog := `#pragma version 5
+byte "A"
+loop:
+int 0
+dup2
+getbyte
+int 1
++
+dup
+int 97 //ascii code of last char
+<=
+bz end
+setbyte
+dup
+log
+b loop
+end:
+int 1
+return
+`
+ ops, err := logic.AssembleString(prog)
+ approv := ops.Program
+ ops, err = logic.AssembleString("#pragma version 5 \nint 1")
+ clst := ops.Program
+
+ gl := basics.StateSchema{
+ NumByteSlice: 1,
+ }
+ lc := basics.StateSchema{
+ NumByteSlice: 1,
+ }
+ minTxnFee, _, err := localFixture.CurrentMinFeeAndBalance()
+
+ tx, err := testClient.MakeUnsignedApplicationCallTx(0, nil, addresses, nil, nil, transactions.NoOpOC, approv, clst, gl, lc, 0)
+ tx.Sender = addr
+ tx.Fee = basics.MicroAlgos{Raw: minTxnFee}
+ tx.FirstValid = firstRound
+ tx.LastValid = lastRound
+ tx.GenesisHash = gh
+
+ txid, err := testClient.SignAndBroadcastTransaction(wh, nil, tx)
+ a.NoError(err)
+ _, err = waitForTransaction(t, testClient, someAddress, txid, 60*time.Second)
+ a.NoError(err)
+ txn, err := testClient.PendingTransactionInformationV2(txid)
+ a.NoError(err)
+ a.NotNil(txn.Logs)
+ a.Equal(32, len(*txn.Logs))
+ for i, l := range *txn.Logs {
+ a.Equal(*txn.ApplicationIndex, l.Id)
+ assert.Equal(t, base64.StdEncoding.EncodeToString([]byte(string(rune('B'+i)))), l.Value)
+ }
+
+ //check non-create app call
+ expectedAppID := *txn.ApplicationIndex
+ wh, err = testClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ addresses, err = testClient.ListAddresses(wh)
+ a.NoError(err)
+ _, someAddress = getMaxBalAddr(t, testClient, addresses)
+ if someAddress == "" {
+ t.Error("no addr with funds")
+ }
+ a.NoError(err)
+ addr, err = basics.UnmarshalChecksumAddress(someAddress)
+
+ params, err = testClient.SuggestedParams()
+ a.NoError(err)
+
+ firstRound = basics.Round(params.LastRound + 1)
+ lastRound = basics.Round(params.LastRound + 1000)
+
+ tx, err = testClient.MakeUnsignedAppNoOpTx(*txn.ApplicationIndex, nil, addresses, nil, nil)
+ tx.Sender = addr
+ tx.Fee = basics.MicroAlgos{Raw: minTxnFee}
+ tx.FirstValid = firstRound
+ tx.LastValid = lastRound
+ tx.GenesisHash = gh
+
+ txid, err = testClient.SignAndBroadcastTransaction(wh, nil, tx)
+ a.NoError(err)
+ _, err = waitForTransaction(t, testClient, someAddress, txid, 60*time.Second)
+ a.NoError(err)
+ txn, err = testClient.PendingTransactionInformationV2(txid)
+ a.NoError(err)
+ a.NotNil(txn.Logs)
+ a.Equal(32, len(*txn.Logs))
+ for i, l := range *txn.Logs {
+ a.Equal(expectedAppID, l.Id)
+ assert.Equal(t, base64.StdEncoding.EncodeToString([]byte(string(rune('B'+i)))), l.Value)
+ }
+
+}
diff --git a/test/e2e-go/upgrades/rekey_support_test.go b/test/e2e-go/upgrades/rekey_support_test.go
index b7c11284f..22975ef0e 100644
--- a/test/e2e-go/upgrades/rekey_support_test.go
+++ b/test/e2e-go/upgrades/rekey_support_test.go
@@ -68,6 +68,9 @@ func TestRekeyUpgrade(t *testing.T) {
round := curStatus.LastRound
// no consensus upgrade took place (yet)
+ // in fact on slow environment it might happen faster than the test advances.
+ // that's why errors from BroadcastTransaction are checked for exact specific errors
+ // rather than simply "rekeying not yet enable" or "nonempty AuthAddr"
// Ensure no rekeying happened
ad, err := client.AccountData(accountA)
@@ -79,27 +82,42 @@ func TestRekeyUpgrade(t *testing.T) {
a.NoError(err)
tx.RekeyTo = addrB
-
rekey, err := client.SignTransactionWithWalletAndSigner(wh, nil, "", tx)
a.NoError(err)
_, err = client.BroadcastTransaction(rekey)
- a.Error(err)
- // should be either "transaction has RekeyTo set but rekeying not yet enable" or "txn dead"
- if !strings.Contains(err.Error(), "transaction has RekeyTo set but rekeying not yet enable") &&
- !strings.Contains(err.Error(), "txn dead") {
- a.NoErrorf(err, "error message should be one of :\n%s\n%s", "transaction has RekeyTo set but rekeying not yet enable", "txn dead")
+ // non empty err means the upgrade have not happened yet (as expected), ensure the error
+ if err != nil {
+ // should be either "transaction has RekeyTo set but rekeying not yet enable" or "txn dead"
+ if !strings.Contains(err.Error(), "transaction has RekeyTo set but rekeying not yet enable") &&
+ !strings.Contains(err.Error(), "txn dead") {
+ a.NoErrorf(err, "error message should be one of :\n%s\n%s", "transaction has RekeyTo set but rekeying not yet enable", "txn dead")
+ }
+ } else {
+ // if we had no error it must mean that we've upgraded already. Verify that.
+ curStatus, err := client.Status()
+ a.NoError(err)
+ a.NotEqual(consensusTestUnupgradedProtocol, protocol.ConsensusVersion(curStatus.LastVersion))
}
// use rekeyed key to authorize (AuthAddr check)
tx.RekeyTo = basics.Address{}
rekeyed, err := client.SignTransactionWithWalletAndSigner(wh, nil, accountB, tx)
a.NoError(err)
+
_, err = client.BroadcastTransaction(rekeyed)
- // should be either "nonempty AuthAddr but rekeying not supported" or "txn dead"
- if !strings.Contains(err.Error(), "nonempty AuthAddr but rekeying not supported") &&
- !strings.Contains(err.Error(), "txn dead") {
- a.NoErrorf(err, "error message should be one of :\n%s\n%s", "nonempty AuthAddr but rekeying not supported", "txn dead")
+ // non empty err means the upgrade have not happened yet (as expected), ensure the error
+ if err != nil {
+ // should be either "nonempty AuthAddr but rekeying not supported" or "txn dead"
+ if !strings.Contains(err.Error(), "nonempty AuthAddr but rekeying not supported") &&
+ !strings.Contains(err.Error(), "txn dead") {
+ a.NoErrorf(err, "error message should be one of :\n%s\n%s", "nonempty AuthAddr but rekeying not supported", "txn dead")
+ }
+ } else {
+ // if we had no error it must mean that we've upgraded already. Verify that.
+ curStatus, err := client.Status()
+ a.NoError(err)
+ a.NotEqual(consensusTestUnupgradedProtocol, protocol.ConsensusVersion(curStatus.LastVersion))
}
// go to upgrade
@@ -107,7 +125,6 @@ func TestRekeyUpgrade(t *testing.T) {
a.NoError(err)
startLoopTime := time.Now()
-
// wait until the network upgrade : this can take a while.
for protocol.ConsensusVersion(curStatus.LastVersion) != consensusTestFastUpgrade(firstProtocolWithApplicationSupport) {
curStatus, err = client.Status()
@@ -118,7 +135,7 @@ func TestRekeyUpgrade(t *testing.T) {
round = curStatus.LastRound
}
- // now that the network alrady upgraded:
+ // now that the network already upgraded:
tx, err = client.ConstructPayment(accountA, accountB, fee, amount, nil, "", lease, basics.Round(round), basics.Round(round+1000))
a.NoError(err)
diff --git a/components/nodeContext.go b/test/linttest/lintissues.go
index 98dd8bb8c..1a6a43618 100644
--- a/components/nodeContext.go
+++ b/test/linttest/lintissues.go
@@ -14,22 +14,23 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package components
+package linttest
-import "github.com/algorand/go-algorand/data/basics"
+import (
+ "fmt"
+)
-// NodeContext is an interface representing various context information regarding
-// a specific node instance (per AlgorandFullNode)
-type NodeContext interface {
- // IsCatchingUp returns true if our sync routine is currently running
- IsCatchingUp() bool
-
- // IsInitialCatchupComplete returns true if the initial sync has completed (doesn't mean it succeeded)
- IsInitialCatchupComplete() bool
+type myStruct struct {
+ a int32
+ b float64
+ c bool
+}
- // HasCaughtUp returns true if we have completely caught up at least once
- HasCaughtUp() bool
+func (m *myStruct) couldError() error {
+ return fmt.Errorf("an error occurred")
+}
- // SetLastLiveRound is called to record observation of a round completion
- SetLastLiveRound(round basics.Round)
+func doSomething() {
+ m := myStruct{a: 2, b: 2.0}
+ m.couldError()
}
diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh
index 8a0362149..626bb53df 100755
--- a/test/scripts/e2e.sh
+++ b/test/scripts/e2e.sh
@@ -100,53 +100,50 @@ export GOPATH=$(go env GOPATH)
# Change current directory to test/scripts so we can just use ./test.sh to exec.
cd "${SCRIPT_PATH}"
-if [ "${SKIP_E2E_SUBS}" = "" ]; then
+if [ -z $E2E_TEST_FILTER || $E2E_TEST_FILTER="SCRIPTS" ]; then
-./timeout 200 ./e2e_basic_start_stop.sh
-duration "e2e_basic_start_stop.sh"
+ ./timeout 200 ./e2e_basic_start_stop.sh
+ duration "e2e_basic_start_stop.sh"
-python3 -m venv "${TEMPDIR}/ve"
-. "${TEMPDIR}/ve/bin/activate"
-"${TEMPDIR}/ve/bin/pip3" install --upgrade pip
-"${TEMPDIR}/ve/bin/pip3" install --upgrade py-algorand-sdk cryptography
-duration "e2e client setup"
+ python3 -m venv "${TEMPDIR}/ve"
+ . "${TEMPDIR}/ve/bin/activate"
+ "${TEMPDIR}/ve/bin/pip3" install --upgrade pip
+ "${TEMPDIR}/ve/bin/pip3" install --upgrade py-algorand-sdk cryptography
+ duration "e2e client setup"
-"${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} "$SRCROOT"/test/scripts/e2e_subs/*.sh
-duration "parallel client runner"
+ "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} "$SRCROOT"/test/scripts/e2e_subs/*.sh
+ duration "parallel client runner"
-for vdir in "$SRCROOT"/test/scripts/e2e_subs/v??; do
- "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} --version "$(basename "$vdir")" "$vdir"/*.sh
-done
-duration "vdir client runners"
-
-for script in "$SRCROOT"/test/scripts/e2e_subs/serial/*; do
- "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} $script
-done
-duration "serial client runners"
+ for vdir in "$SRCROOT"/test/scripts/e2e_subs/v??; do
+ "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} --version "$(basename "$vdir")" "$vdir"/*.sh
+ done
+ duration "vdir client runners"
-deactivate
+ for script in "$SRCROOT"/test/scripts/e2e_subs/serial/*; do
+ "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} $script
+ done
+ duration "serial client runners"
-fi # if $SKIP_E2E_SUBS = ""
+ deactivate
+fi # if E2E_TEST_FILTER = "" or = "SCRIPTS"
-if [ "${E2E_SUBS_ONLY}" != "" ]; then
- exit 0
-fi
+if [ -z $E2E_TEST_FILTER || $E2E_TEST_FILTER="GO" ]; then
+ # Export our root temp folder as 'TESTDIR' for tests to use as their root test folder
+ # This allows us to clean up everything with our rm -rf trap.
+ export TESTDIR=${TEMPDIR}
+ export TESTDATADIR=${SRCROOT}/test/testdata
+ export SRCROOT=${SRCROOT}
-# Export our root temp folder as 'TESTDIR' for tests to use as their root test folder
-# This allows us to clean up everything with our rm -rf trap.
-export TESTDIR=${TEMPDIR}
-export TESTDATADIR=${SRCROOT}/test/testdata
-export SRCROOT=${SRCROOT}
+ ./e2e_go_tests.sh ${GO_TEST_ARGS}
+ duration "e2e_go_tests.sh"
-./e2e_go_tests.sh ${GO_TEST_ARGS}
-duration "e2e_go_tests.sh"
+ rm -rf "${TEMPDIR}"
-rm -rf "${TEMPDIR}"
-
-if ! ${NO_BUILD} ; then
- rm -rf ${PKG_ROOT}
-fi
+ if ! ${NO_BUILD} ; then
+ rm -rf ${PKG_ROOT}
+ fi
-echo "----------------------------------------------------------------------"
-echo " DONE: E2E"
-echo "----------------------------------------------------------------------"
+ echo "----------------------------------------------------------------------"
+ echo " DONE: E2E"
+ echo "----------------------------------------------------------------------"
+fi # if E2E_TEST_FILTER = "" or = "GO"
diff --git a/test/scripts/e2e_subs/e2e-app-extra-pages.sh b/test/scripts/e2e_subs/e2e-app-extra-pages.sh
index 9c32cef59..c20b0eddb 100755
--- a/test/scripts/e2e_subs/e2e-app-extra-pages.sh
+++ b/test/scripts/e2e_subs/e2e-app-extra-pages.sh
@@ -77,7 +77,7 @@ fi
# App create with extra pages, v4 teal
RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog "${BIG_TEAL_V4_FILE}" --clear-prog "${BIG_TEAL_V4_FILE}" --extra-pages 3 --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 2>&1 || true)
-EXPERROR="pc=704 dynamic cost budget of 700 exceeded, executing intc_0"
+EXPERROR="pc=704 dynamic cost budget exceeded, executing intc_0: remaining budget is 700 but program cost was 701"
if [[ $RES != *"${EXPERROR}"* ]]; then
date '+app-extra-pages-test FAIL the application creation should fail %Y%m%d_%H%M%S'
false
diff --git a/test/scripts/e2e_subs/teal-app-params.sh b/test/scripts/e2e_subs/teal-app-params.sh
index 89b30d836..6fc83e89e 100755
--- a/test/scripts/e2e_subs/teal-app-params.sh
+++ b/test/scripts/e2e_subs/teal-app-params.sh
@@ -17,7 +17,7 @@ gcmd="goal -w ${WALLET}"
ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
-APPID=$(${gcmd} app create --creator "$ACCOUNT" --approval-prog=${TEAL}/app-params.teal --clear-prog=${TEAL}/approve-all.teal --global-byteslices 1 --global-ints 2 --local-byteslices 3 --local-ints 4 --extra-pages 2 --app-arg "addr:$ACCOUNT" | grep Created | awk '{ print $6 }')
+APPID=$(${gcmd} app create --creator "$ACCOUNT" --approval-prog=${TEAL}/app-params.teal --clear-prog=${TEAL}/approve-all.teal --global-byteslices 1 --global-ints 2 --local-byteslices 3 --local-ints 4 --extra-pages 2 --app-arg "addr:$ACCOUNT" | grep Created | awk '{ print $6 }')
ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }')
${gcmd} clerk send -f "$ACCOUNT" -t "$ACCOUNTB" -a 1000000
@@ -25,6 +25,15 @@ ${gcmd} clerk send -f "$ACCOUNT" -t "$ACCOUNTB" -a 1000000
# Now call from a different account
${gcmd} app call --app-id="$APPID" --from="$ACCOUNTB" --app-arg "addr:$ACCOUNT"
+# The below checks use quine.teal to test "app_params_get AppApprovalProgram"
+# Verify "app_params_get AppApprovalProgram" works on create
+APPID_2=$(${gcmd} app create --creator "$ACCOUNTB" --approval-prog=${TEAL}/quine.teal --clear-prog=${TEAL}/approve-all.teal --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0 --extra-pages 0 | grep Created | awk '{ print $6 }')
+
+# Verify "app_params_get AppApprovalProgram" works on regular app call
+${gcmd} app call --app-id="$APPID_2" --from="$ACCOUNTB"
+
+# Verify "app_params_get AppApprovalProgram" works on update
+${gcmd} app update --app-id="$APPID_2" --from="$ACCOUNTB" --approval-prog=${TEAL}/approve-all.teal --clear-prog=${TEAL}/approve-all.teal
date "+${scriptname} OK %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/tealprogs/quine.teal b/test/scripts/e2e_subs/tealprogs/quine.teal
new file mode 100644
index 000000000..d951186a8
--- /dev/null
+++ b/test/scripts/e2e_subs/tealprogs/quine.teal
@@ -0,0 +1,22 @@
+#pragma version 5
+// To modify the program:
+// 1. Replace the first line with `pushbytes ""`.
+// 2. Compile the program.
+// 3. Replace the first line with `pushbytes <compiled bytecode>`
+// 4. Update the varuint length of the new bytecode (line 11)
+// 5. The quine is complete. Compile again.
+pushbytes 0x0580004957000280011a504f0149570300505081007200441243
+dup
+extract 0 2
+pushbytes 0x1a // the varuint length of 0x0580...
+concat
+uncover 1
+dup
+extract 3 0 // the range here must be updated if the varuint length is longer than 1 byte
+concat
+concat // after this line the whole program is on the stack
+pushint 0
+app_params_get AppApprovalProgram
+assert
+==
+return