summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWill Winder <wwinder.unh@gmail.com>2022-06-14 09:01:18 -0400
committerGitHub <noreply@github.com>2022-06-14 09:01:18 -0400
commit4279c6667b28c7790f0a7b4209401a41bfdb585e (patch)
tree36f7d87fd5de7b8ba542d197fa8ebbb329e0fb37
parent6e07cc5d4845cab1f10d00e955a64a9088602a1e (diff)
parentbd0f9d6d75fe55db47b02e629bda09cd628472d6 (diff)
Merge branch 'master' into 3989-goal-network-catchup-automatically-fetch-catchpoint-if-not-provided3989-goal-network-catchup-automatically-fetch-catchpoint-if-not-provided
-rw-r--r--.circleci/config.yml22
-rw-r--r--.github/workflows/README.md26
-rw-r--r--.github/workflows/benchmarks.yml51
-rw-r--r--agreement/fuzzer/bandwidthFilter_test.go4
-rw-r--r--agreement/gossip/network.go6
-rw-r--r--agreement/pseudonode.go4
-rw-r--r--cmd/algons/dnsCmd.go35
-rw-r--r--cmd/algorelay/relayCmd.go24
-rw-r--r--cmd/buildtools/genesis.go21
-rw-r--r--cmd/goal/account.go293
-rw-r--r--cmd/goal/clerk.go6
-rw-r--r--cmd/pingpong/runCmd.go18
-rw-r--r--cmd/tealdbg/main.go12
-rwxr-xr-xcmd/updater/update.sh123
-rw-r--r--components/mocks/mockParticipationRegistry.go5
-rw-r--r--config/consensus.go7
-rw-r--r--config/localTemplate.go5
-rw-r--r--config/local_defaults.go3
-rw-r--r--daemon/algod/api/algod.oas2.json10
-rw-r--r--daemon/algod/api/algod.oas3.yml10
-rw-r--r--daemon/algod/api/server/v2/dryrun.go23
-rw-r--r--daemon/algod/api/server/v2/dryrun_test.go65
-rw-r--r--daemon/algod/api/server/v2/generated/private/routes.go296
-rw-r--r--daemon/algod/api/server/v2/generated/private/types.go8
-rw-r--r--daemon/algod/api/server/v2/generated/routes.go398
-rw-r--r--daemon/algod/api/server/v2/generated/types.go8
-rw-r--r--daemon/algod/api/server/v2/handlers.go1
-rw-r--r--daemon/algod/api/server/v2/test/handlers_test.go1
-rw-r--r--daemon/algod/server.go12
-rw-r--r--data/account/participationRegistry.go15
-rw-r--r--data/accountManager.go7
-rw-r--r--data/bookkeeping/genesis.go45
-rw-r--r--data/bookkeeping/genesis_test.go157
-rw-r--r--data/transactions/logic/README.md6
-rw-r--r--data/transactions/logic/README_in.md3
-rw-r--r--data/transactions/logic/TEAL_opcodes.md37
-rw-r--r--data/transactions/logic/assembler.go120
-rw-r--r--data/transactions/logic/assembler_test.go109
-rw-r--r--data/transactions/logic/doc.go11
-rw-r--r--data/transactions/logic/eval.go129
-rw-r--r--data/transactions/logic/evalAppTxn_test.go27
-rw-r--r--data/transactions/logic/evalCrypto_test.go128
-rw-r--r--data/transactions/logic/evalStateful_test.go4
-rw-r--r--data/transactions/logic/eval_test.go229
-rw-r--r--data/transactions/logic/jsonspec.md6
-rw-r--r--data/transactions/logic/jsonspec_test.go4
-rw-r--r--data/transactions/logic/langspec.json40
-rw-r--r--data/transactions/logic/opcodes.go56
-rw-r--r--data/transactions/logic/pairing.go116
-rw-r--r--data/transactions/logic/pairing_test.go29
-rw-r--r--data/transactions/logic/teal.tmLanguage.json2
-rw-r--r--data/transactions/transaction.go6
-rw-r--r--go.mod8
-rw-r--r--go.sum17
-rw-r--r--installer/config.json.example3
-rw-r--r--installer/external/node_exporter-stable-darwin-x86_64.tar.gzbin5359650 -> 6901398 bytes
-rw-r--r--installer/external/node_exporter-stable-linux-x86_64.tar.gzbin4982242 -> 7142051 bytes
-rw-r--r--ledger/.gitignore1
-rw-r--r--ledger/accountdb_test.go4
-rw-r--r--ledger/acctupdates_test.go10
-rw-r--r--ledger/catchpointtracker_test.go4
-rw-r--r--ledger/internal/apptxn_test.go243
-rw-r--r--ledger/internal/eval_blackbox_test.go12
-rw-r--r--ledger/testing/randomAccounts.go96
-rw-r--r--libgoal/libgoal.go26
-rw-r--r--libgoal/participation.go208
-rw-r--r--libgoal/transactions.go109
-rw-r--r--logging/log.go6
-rw-r--r--logging/telemetryConfig.go10
-rw-r--r--logging/telemetryhook.go2
-rw-r--r--logging/telemetryspec/event.go22
-rw-r--r--netdeploy/networkTemplate.go1
-rw-r--r--netdeploy/remote/deployedNetwork.go2
-rw-r--r--netdeploy/remote/nodecfg/nodeConfigurator.go11
-rw-r--r--network/requestTracker.go10
-rw-r--r--network/wsNetwork.go179
-rw-r--r--network/wsNetwork_test.go540
-rw-r--r--network/wsPeer.go20
-rw-r--r--network/wsPeer_test.go4
-rw-r--r--node/node.go100
-rwxr-xr-xscripts/compute_branch.sh13
-rwxr-xr-xscripts/install_linux_deps.sh20
-rw-r--r--shared/pingpong/accounts.go40
-rw-r--r--shared/pingpong/config.go4
-rw-r--r--shared/pingpong/pingpong.go57
-rw-r--r--test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp2
-rw-r--r--test/e2e-go/cli/goal/expect/pingpongTest.exp44
-rw-r--r--test/e2e-go/features/participation/accountParticipationTransitions_test.go4
-rw-r--r--test/e2e-go/features/participation/onlineOfflineParticipation_test.go12
-rw-r--r--test/e2e-go/features/participation/overlappingParticipationKeys_test.go69
-rw-r--r--test/e2e-go/features/participation/participationExpiration_test.go8
-rw-r--r--test/e2e-go/features/transactions/onlineStatusChange_test.go19
-rw-r--r--test/e2e-go/stress/transactions/createManyAndGoOnline_test.go4
-rw-r--r--test/heapwatch/bwstart.sh4
-rwxr-xr-xtest/heapwatch/start.sh4
-rwxr-xr-xtest/scripts/e2e_subs/goal-partkey-commands.sh100
-rwxr-xr-xtest/scripts/e2e_subs/goal-partkey-information.sh47
-rw-r--r--test/testdata/configs/config-v22.json105
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/Makefile15
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py32
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/genesis.json64
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/net.json232
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/node.json10
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/nonPartNode.json5
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/recipe.json7
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/relay.json11
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/topology.json32
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json216
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json4
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json2
-rw-r--r--test/testdata/deployednettemplates/recipes/custom/configs/node.json4
-rw-r--r--test/testdata/deployednettemplates/recipes/custom/example/configs/node.json4
-rw-r--r--tools/network/cloudflare/cloudflare.go55
-rw-r--r--tools/network/cloudflare/createRecord.go14
-rw-r--r--tools/network/cloudflare/deleteRecord.go4
-rw-r--r--tools/network/cloudflare/helpers.go5
-rw-r--r--tools/network/cloudflare/listRecords.go4
-rw-r--r--tools/network/cloudflare/updateRecord.go8
-rw-r--r--tools/network/cloudflare/zones.go10
-rw-r--r--util/metrics/counter.go14
-rw-r--r--util/metrics/counter_test.go33
-rw-r--r--util/metrics/gauge_test.go2
-rw-r--r--util/metrics/metrics_test.go29
-rw-r--r--util/metrics/registryCommon.go8
-rw-r--r--util/metrics/registry_test.go2
-rw-r--r--util/metrics/runtime.go137
-rw-r--r--util/metrics/runtime_test.go59
-rw-r--r--util/metrics/segment.go90
-rw-r--r--util/metrics/segment_test.go119
-rw-r--r--util/metrics/tagcounter.go42
-rw-r--r--util/metrics/tagcounter_test.go34
131 files changed, 4499 insertions, 1795 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index b911cafed..56b1c9d80 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -217,10 +217,12 @@ commands:
- restore_libsodium
- restore_cache:
keys:
- - 'go-mod-1-14-7-v2-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
+ - 'go-mod-1.17.9-v3-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
- restore_cache:
keys:
- - 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-'
+ - 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}'
+ - 'go-cache-v3-{{ arch }}-{{ .Branch }}-'
+ - 'go-cache-v3-{{ arch }}-'
- run:
name: scripts/travis/build.sh --make_debug
command: |
@@ -233,11 +235,11 @@ commands:
scripts/travis/build.sh --make_debug
- cache_libsodium
- save_cache:
- key: 'go-mod-1-14-7-v2-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
+ key: 'go-mod-1.17.9-v3-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
paths:
- << parameters.build_dir >>/go/pkg/mod
- save_cache:
- key: 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-{{ .Environment.CIRCLE_BUILD_NUM }}'
+ key: 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}'
paths:
- tmp/go-cache
- persist_to_workspace:
@@ -257,7 +259,7 @@ commands:
mkdir -p tmp
find crypto/libsodium-fork -type f -exec openssl md5 "{}" + > tmp/libsodium.md5
- save_cache:
- key: 'libsodium-fork-v2-{{ .Environment.CIRCLE_STAGE }}-{{ checksum "tmp/libsodium.md5" }}'
+ key: 'libsodium-fork-v2-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}'
paths:
- crypto/libs
@@ -271,7 +273,7 @@ commands:
find crypto/libsodium-fork -type f -exec openssl md5 "{}" + > tmp/libsodium.md5
- restore_cache:
keys:
- - 'libsodium-fork-v2-{{ .Environment.CIRCLE_STAGE }}-{{ checksum "tmp/libsodium.md5" }}'
+ - 'libsodium-fork-v2-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}'
generic_test:
description: Run build tests from build workspace, for re-use by diferent architectures
@@ -301,7 +303,9 @@ commands:
touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/testresults.json
- restore_cache:
keys:
- - 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-'
+ - 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}'
+ - 'go-cache-v3-{{ arch }}-{{ .Branch }}-'
+ - 'go-cache-v3-{{ arch }}-'
- run:
name: Run build tests
no_output_timeout: << parameters.no_output_timeout >>
@@ -333,10 +337,6 @@ commands:
root: << parameters.result_path >>
paths:
- << parameters.result_subdir >>
- - save_cache:
- key: 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-{{ .Environment.CIRCLE_BUILD_NUM }}'
- paths:
- - tmp/go-cache
upload_coverage:
description: Collect coverage reports and upload them
diff --git a/.github/workflows/README.md b/.github/workflows/README.md
new file mode 100644
index 000000000..e1c3fa847
--- /dev/null
+++ b/.github/workflows/README.md
@@ -0,0 +1,26 @@
+# Github Actions Workflows
+
+## Benchmarking Performance Tests
+`benchmarks.yml` contains a workflow to check for any performance regressions or
+improvements in benchmark tests.
+
+It uses
+[github-action-benchmark](https://github.com/benchmark-action/github-action-benchmark)
+to check performance diffs between a PR and the `master` branch, comments if it
+there is a regression past a certain threshold (default: `200%`), and generates
+a performance diff JSON between consecutive commits in the `master` branch in
+the `gh-pages` branch (the JSON is visualized into a graph that can be seen at:
+https://algorand.github.io/go-algorand/dev/bench/).
+
+### Adding benchmark tests
+Add run steps or extend existing benchmark invocations in the `Run benchmark`
+step. Additional benchmarks can be run using the `-bench` flag. Since there's
+few benchmarks run by the workflow, there are _no_ formal groupings and/or
+naming conventions.
+
+### CI Variance
+There may be some variance between runs because github actions might spin up a
+different machine each time (e.g. Intel Xeon 8370C vs 8171M; the latter might
+run benchmarks slightly slower). Empirically, the variance seems to be 10~30%
+for the most part. Due to this environment variance, the workflow is most
+suitable for finding _large_ performance degradations.
diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml
new file mode 100644
index 000000000..65c0e90bf
--- /dev/null
+++ b/.github/workflows/benchmarks.yml
@@ -0,0 +1,51 @@
+name: "Benchmark workflow"
+on:
+ # Push and update benchmarks when a commit is merged into master.
+ push:
+ branches:
+ - master
+ # Trigger benchmark test on this PR's commit against master.
+ pull_request:
+ branches:
+ - master
+permissions:
+ # Push benchmark performance graph to gh-pages branch.
+ contents: write
+ deployments: write
+jobs:
+ benchmark:
+ name: Performance regression check
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-go@v3
+ with:
+ go-version-file: 'go.mod'
+ - run: go version
+ - name: Build go-algorand
+ run: scripts/travis/build.sh
+ # BenchmarkUintMath - Serves as a proxy for AVM `eval` performance.
+ # Performance degradations suggest either or both: (1) op code
+ # degradation, (2) `eval` degradation. (2) suggests a broader performance
+ # issue.
+ - name: Run benchmark
+ run: go test ./data/transactions/logic -bench 'BenchmarkUintMath' | tee benchmark_output.txt
+ - name: Push benchmark result to gh-pages branch
+ if: github.event_name == 'push'
+ uses: benchmark-action/github-action-benchmark@v1
+ with:
+ name: Go Benchmark
+ tool: 'go'
+ output-file-path: benchmark_output.txt
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ auto-push: true
+ - name: Evaluate benchmark on PR branch
+ if: github.event.pull_request
+ uses: benchmark-action/github-action-benchmark@v1
+ with:
+ name: Go Benchmark
+ tool: 'go'
+ output-file-path: benchmark_output.txt
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ alert-threshold: '200%'
+ comment-on-alert: true
diff --git a/agreement/fuzzer/bandwidthFilter_test.go b/agreement/fuzzer/bandwidthFilter_test.go
index e06ff7260..e5a31bf2e 100644
--- a/agreement/fuzzer/bandwidthFilter_test.go
+++ b/agreement/fuzzer/bandwidthFilter_test.go
@@ -167,7 +167,7 @@ func (n *BandwidthFilter) Tick(newClockTime int) bool {
n.downstreamMutex.Lock()
if n.downstreamDataSize < 0 && n.downstreamQueue.Len() == 0 {
if n.debugMessageLevel >= 1 {
- fmt.Printf("node: %d, tick: %d, reseting queued downstream capacity %d -> 0\n", n.nodeID, n.currentTick, n.upstreamDataSize)
+ fmt.Printf("node: %d, tick: %d, resetting queued downstream capacity %d -> 0\n", n.nodeID, n.currentTick, n.upstreamDataSize)
}
n.downstreamDataSize = 0
}
@@ -179,7 +179,7 @@ func (n *BandwidthFilter) Tick(newClockTime int) bool {
// adjust the upstream size.
if n.upstreamDataSize < 0 && n.upstreamQueue.Len() == 0 {
if n.debugMessageLevel >= 1 {
- fmt.Printf("node: %d, tick: %d, reseting queued upstream capacity %d -> 0\n", n.nodeID, n.currentTick, n.upstreamDataSize)
+ fmt.Printf("node: %d, tick: %d, resetting queued upstream capacity %d -> 0\n", n.nodeID, n.currentTick, n.upstreamDataSize)
}
n.upstreamDataSize = 0
}
diff --git a/agreement/gossip/network.go b/agreement/gossip/network.go
index e6b7d77aa..43eefd2db 100644
--- a/agreement/gossip/network.go
+++ b/agreement/gossip/network.go
@@ -32,9 +32,11 @@ import (
)
var messagesHandledTotal = metrics.MakeCounter(metrics.AgreementMessagesHandled)
-var messagesHandledByType = metrics.NewTagCounter("algod_agreement_handled_{TAG}", "Number of agreement messages handled per type")
+var messagesHandledByType = metrics.NewTagCounter("algod_agreement_handled_{TAG}", "Number of agreement {TAG} messages handled",
+ agreementVoteMessageType, agreementProposalMessageType, agreementBundleMessageType)
var messagesDroppedTotal = metrics.MakeCounter(metrics.AgreementMessagesDropped)
-var messagesDroppedByType = metrics.NewTagCounter("algod_agreement_dropped_{TAG}", "Number of agreement messages handled per type")
+var messagesDroppedByType = metrics.NewTagCounter("algod_agreement_dropped_{TAG}", "Number of agreement {TAG} messages dropped",
+ agreementVoteMessageType, agreementProposalMessageType, agreementBundleMessageType)
const (
agreementVoteMessageType = "vote"
diff --git a/agreement/pseudonode.go b/agreement/pseudonode.go
index bdaa2f359..06a91b210 100644
--- a/agreement/pseudonode.go
+++ b/agreement/pseudonode.go
@@ -44,8 +44,8 @@ var errPseudonodeVerifierClosedChannel = errors.New("crypto verifier closed the
var errPseudonodeNoVotes = errors.New("no valid participation keys to generate votes for given round")
var errPseudonodeNoProposals = errors.New("no valid participation keys to generate proposals for given round")
-var pseudonodeBacklogFullByType = metrics.NewTagCounter("algod_agreement_pseudonode_tasks_dropped_{TAG}", "Number of pseudonode tasks dropped per type")
-var pseudonodeResultTimeoutsByType = metrics.NewTagCounter("algod_agreement_pseudonode_tasks_timeouts_{TAG}", "Number of pseudonode task result timeouts per type")
+var pseudonodeBacklogFullByType = metrics.NewTagCounter("algod_agreement_pseudonode_tasks_dropped_{TAG}", "Number of pseudonode {TAG} tasks dropped", "proposal", "vote")
+var pseudonodeResultTimeoutsByType = metrics.NewTagCounter("algod_agreement_pseudonode_tasks_timeouts_{TAG}", "Number of pseudonode {TAG} task result timeouts", "vote", "pvote", "ppayload")
// A pseudonode creates proposals and votes with a KeyManager which holds participation keys.
//
diff --git a/cmd/algons/dnsCmd.go b/cmd/algons/dnsCmd.go
index b41b45f13..c6b6a54f8 100644
--- a/cmd/algons/dnsCmd.go
+++ b/cmd/algons/dnsCmd.go
@@ -183,12 +183,12 @@ var exportCmd = &cobra.Command{
}
func doAddDNS(from string, to string) (err error) {
- cfZoneID, cfEmail, cfKey, err := getClouldflareCredentials()
+ cfZoneID, cfToken, err := getClouldflareCredentials()
if err != nil {
return fmt.Errorf("error getting DNS credentials: %v", err)
}
- cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfEmail, cfKey)
+ cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfToken)
const priority = 1
const proxied = false
@@ -208,17 +208,16 @@ func doAddDNS(from string, to string) (err error) {
return
}
-func getClouldflareAuthCredentials() (email string, authKey string, err error) {
- email = os.Getenv("CLOUDFLARE_EMAIL")
- authKey = os.Getenv("CLOUDFLARE_AUTH_KEY")
- if email == "" || authKey == "" {
- err = fmt.Errorf("one or more credentials missing from ENV")
+func getClouldflareAuthCredentials() (token string, err error) {
+ token = os.Getenv("CLOUDFLARE_API_TOKEN")
+ if token == "" {
+ err = fmt.Errorf("CLOUDFLARE_API_TOKEN credential missing from ENV")
}
return
}
-func getClouldflareCredentials() (zoneID string, email string, authKey string, err error) {
- email, authKey, err = getClouldflareAuthCredentials()
+func getClouldflareCredentials() (zoneID string, token string, err error) {
+ token, err = getClouldflareAuthCredentials()
if err != nil {
return
}
@@ -309,13 +308,13 @@ func doDeleteDNS(network string, noPrompt bool, excludePattern string, includePa
return false
}
- cfZoneID, cfEmail, cfKey, err := getClouldflareCredentials()
+ cfZoneID, cfToken, err := getClouldflareCredentials()
if err != nil {
fmt.Fprintf(os.Stderr, "error getting DNS credentials: %v", err)
return false
}
- cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfEmail, cfKey)
+ cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfToken)
idsToDelete := make(map[string]string) // Maps record ID to Name
services := []string{"_algobootstrap", "_metrics"}
@@ -420,13 +419,13 @@ func doDeleteDNS(network string, noPrompt bool, excludePattern string, includePa
}
func listEntries(listNetwork string, recordType string) {
- cfZoneID, cfEmail, cfKey, err := getClouldflareCredentials()
+ cfZoneID, cfToken, err := getClouldflareCredentials()
if err != nil {
fmt.Fprintf(os.Stderr, "error getting DNS credentials: %v", err)
return
}
- cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfEmail, cfKey)
+ cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfToken)
recordTypes := []string{"A", "CNAME", "SRV"}
if recordType != "" {
recordTypes = []string{recordType}
@@ -447,12 +446,12 @@ func listEntries(listNetwork string, recordType string) {
}
func doExportZone(network string, outputFilename string) bool {
- cfEmail, cfKey, err := getClouldflareAuthCredentials()
+ cfToken, err := getClouldflareAuthCredentials()
if err != nil {
fmt.Fprintf(os.Stderr, "error getting DNS credentials: %v", err)
return false
}
- cloudflareCred := cloudflare.NewCred(cfEmail, cfKey)
+ cloudflareCred := cloudflare.NewCred(cfToken)
zones, err := cloudflareCred.GetZones(context.Background())
if err != nil {
fmt.Fprintf(os.Stderr, "Error retrieving zones entries: %v\n", err)
@@ -471,7 +470,7 @@ func doExportZone(network string, outputFilename string) bool {
fmt.Fprintf(os.Stderr, "No matching zoneID was found for %s\n", network)
return false
}
- cloudflareDNS := cloudflare.NewDNS(zoneID, cfEmail, cfKey)
+ cloudflareDNS := cloudflare.NewDNS(zoneID, cfToken)
exportedZone, err := cloudflareDNS.ExportZone(context.Background())
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to export zone : %v\n", err)
@@ -490,12 +489,12 @@ func doExportZone(network string, outputFilename string) bool {
}
func doListZones() bool {
- cfEmail, cfKey, err := getClouldflareAuthCredentials()
+ cfToken, err := getClouldflareAuthCredentials()
if err != nil {
fmt.Fprintf(os.Stderr, "error getting DNS credentials: %v", err)
return false
}
- cloudflareCred := cloudflare.NewCred(cfEmail, cfKey)
+ cloudflareCred := cloudflare.NewCred(cfToken)
zones, err := cloudflareCred.GetZones(context.Background())
if err != nil {
fmt.Fprintf(os.Stderr, "Error listing zones entries: %v\n", err)
diff --git a/cmd/algorelay/relayCmd.go b/cmd/algorelay/relayCmd.go
index 647c438a6..ff281313b 100644
--- a/cmd/algorelay/relayCmd.go
+++ b/cmd/algorelay/relayCmd.go
@@ -40,8 +40,7 @@ var (
dnsBootstrapArg string // e.g. mainnet or testnet
recordIDArg int64
- cfEmail string
- cfAuthKey string
+ cfToken string
)
var nameRecordTypes = []string{"A", "CNAME", "SRV"}
@@ -50,10 +49,9 @@ var srvRecordTypes = []string{"SRV"}
const metricsPort = uint16(9100)
func init() {
- cfEmail = os.Getenv("CLOUDFLARE_EMAIL")
- cfAuthKey = os.Getenv("CLOUDFLARE_AUTH_KEY")
- if cfEmail == "" || cfAuthKey == "" {
- panic(makeExitError(1, "One or more credentials missing from ENV"))
+ cfToken = os.Getenv("CLOUDFLARE_API_TOKEN")
+ if cfToken == "" {
+ panic(makeExitError(1, "CLOUDFLARE_API_TOKEN credentials missing from ENV"))
}
rootCmd.AddCommand(checkCmd)
@@ -125,7 +123,7 @@ type srvService struct {
}
func makeDNSContext() *dnsContext {
- cloudflareCred := cloudflare.NewCred(cfEmail, cfAuthKey)
+ cloudflareCred := cloudflare.NewCred(cfToken)
nameZoneID, err := cloudflareCred.GetZoneID(context.Background(), nameDomainArg)
if err != nil {
@@ -543,7 +541,7 @@ func getTargetDNSChain(nameEntries map[string]string, target string) (names []st
func getReverseMappedEntries(nameZoneID string, recordTypes []string) (reverseMap map[string]string, err error) {
reverseMap = make(map[string]string)
- cloudflareDNS := cloudflare.NewDNS(nameZoneID, cfEmail, cfAuthKey)
+ cloudflareDNS := cloudflare.NewDNS(nameZoneID, cfToken)
for _, recType := range recordTypes {
var records []cloudflare.DNSRecordResponseEntry
@@ -569,7 +567,7 @@ func getReverseMappedEntries(nameZoneID string, recordTypes []string) (reverseMa
func getSrvRecords(serviceName string, networkName, zoneID string) (service srvService, err error) {
service = makeService(serviceName, networkName)
- cloudflareDNS := cloudflare.NewDNS(zoneID, cfEmail, cfAuthKey)
+ cloudflareDNS := cloudflare.NewDNS(zoneID, cfToken)
var records []cloudflare.DNSRecordResponseEntry
records, err = cloudflareDNS.ListDNSRecord(context.Background(), "SRV", service.serviceName, "", "", "", "")
@@ -601,7 +599,7 @@ func getSrvRecords(serviceName string, networkName, zoneID string) (service srvS
}
func addDNSRecord(from string, to string, cfZoneID string) error {
- cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfEmail, cfAuthKey)
+ cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfToken)
const priority = 1
const proxied = false
@@ -620,7 +618,7 @@ func addDNSRecord(from string, to string, cfZoneID string) error {
}
func addSRVRecord(srvNetwork string, target string, port uint16, serviceShortName string, cfZoneID string) error {
- cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfEmail, cfAuthKey)
+ cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfToken)
const priority = 1
const weight = 1
@@ -629,7 +627,7 @@ func addSRVRecord(srvNetwork string, target string, port uint16, serviceShortNam
}
func clearSRVRecord(srvNetwork string, target string, serviceShortName string, cfZoneID string) error {
- cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfEmail, cfAuthKey)
+ cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfToken)
return cloudflareDNS.ClearSRVRecord(context.Background(), srvNetwork, target, serviceShortName, "_tcp")
}
@@ -642,7 +640,7 @@ func deleteDNSRecord(from string, to string, cfZoneID string) (err error) {
recordType = "CNAME"
}
- cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfEmail, cfAuthKey)
+ cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfToken)
var records []cloudflare.DNSRecordResponseEntry
records, err = cloudflareDNS.ListDNSRecord(context.Background(), recordType, "", "", "", "", "")
diff --git a/cmd/buildtools/genesis.go b/cmd/buildtools/genesis.go
index 079c76d61..98cb60ca6 100644
--- a/cmd/buildtools/genesis.go
+++ b/cmd/buildtools/genesis.go
@@ -152,8 +152,7 @@ var dumpGenesisHashCmd = &cobra.Command{
os.Exit(1)
}
- hash := crypto.HashObj(genesis)
- fmt.Print(hash.String())
+ fmt.Print(genesis.Hash().String())
},
}
@@ -237,7 +236,7 @@ func ensureReleaseGenesis(src bookkeeping.Genesis, releaseFile string) (err erro
return fmt.Errorf("error saving file: %v", err)
}
- hash := crypto.HashObj(releaseGenesis)
+ hash := releaseGenesis.Hash()
err = ioutil.WriteFile(releaseFileHash, []byte(hash.String()), 0666)
if err != nil {
return fmt.Errorf("error saving hash file '%s': %v", releaseFileHash, err)
@@ -261,8 +260,20 @@ func verifyReleaseGenesis(src bookkeeping.Genesis, releaseFile string) (updateGe
func verifyGenesisHashes(src, release bookkeeping.Genesis, hashFile string) (err error) {
src.Timestamp = release.Timestamp
- srcHash := crypto.HashObj(src)
- releaseHash := crypto.HashObj(release)
+ srcHash := src.Hash()
+ releaseHash := release.Hash()
+
+ srcHashCrypo := crypto.HashObj(src)
+ releaseHashCrypto := crypto.HashObj(release)
+
+ if srcHash != srcHashCrypo {
+ return fmt.Errorf("source hashes differ - genesis.json our hashing function isn't consistent")
+ }
+
+ if releaseHash != releaseHashCrypto {
+ return fmt.Errorf("release hashes differ - genesis.json our hashing function isn't consistent")
+ }
+
if srcHash != releaseHash {
return fmt.Errorf("source and release hashes differ - genesis.json may have diverge from released version")
}
diff --git a/cmd/goal/account.go b/cmd/goal/account.go
index 10cedda3b..68563e3d9 100644
--- a/cmd/goal/account.go
+++ b/cmd/goal/account.go
@@ -25,6 +25,7 @@ import (
"path/filepath"
"sort"
"strings"
+ "time"
"github.com/spf13/cobra"
@@ -825,7 +826,7 @@ var changeOnlineCmd = &cobra.Command{
reportErrorf(err.Error())
}
err = changeAccountOnlineStatus(
- accountAddress, part, online, statusChangeTxFile, walletName,
+ accountAddress, online, statusChangeTxFile, walletName,
firstTxRound, lastTxRound, transactionFee, scLeaseBytes(cmd), dataDir, client,
)
if err != nil {
@@ -834,12 +835,16 @@ var changeOnlineCmd = &cobra.Command{
},
}
-func changeAccountOnlineStatus(acct string, part *algodAcct.Participation, goOnline bool, txFile string, wallet string, firstTxRound, lastTxRound, fee uint64, leaseBytes [32]byte, dataDir string, client libgoal.Client) error {
+func changeAccountOnlineStatus(
+ acct string, goOnline bool, txFile string, wallet string,
+ firstTxRound, lastTxRound, fee uint64, leaseBytes [32]byte,
+ dataDir string, client libgoal.Client,
+) error {
// Generate an unsigned online/offline tx
var utx transactions.Transaction
var err error
if goOnline {
- utx, err = client.MakeUnsignedGoOnlineTx(acct, part, firstTxRound, lastTxRound, fee, leaseBytes)
+ utx, err = client.MakeUnsignedGoOnlineTx(acct, firstTxRound, lastTxRound, fee, leaseBytes)
} else {
utx, err = client.MakeUnsignedGoOfflineTx(acct, firstTxRound, lastTxRound, fee, leaseBytes)
}
@@ -870,8 +875,8 @@ func changeAccountOnlineStatus(acct string, part *algodAcct.Participation, goOnl
var addParticipationKeyCmd = &cobra.Command{
Use: "addpartkey",
- Short: "Generate a participation key for the specified account",
- Long: `Generate a participation key for the specified account. This participation key can then be used for going online and participating in consensus.`,
+ Short: "Generate and install participation key for the specified account",
+ Long: `Generate and install participation key for the specified account. This participation key can then be used for going online and participating in consensus.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
dataDir := ensureSingleDataDir()
@@ -886,8 +891,9 @@ var addParticipationKeyCmd = &cobra.Command{
reportInfof("Please stand by while generating keys. This might take a few minutes...")
var err error
+ var part algodAcct.Participation
participationGen := func() {
- _, _, err = client.GenParticipationKeysTo(accountAddress, roundFirstValid, roundLastValid, keyDilution, partKeyOutDir)
+ part, _, err = client.GenParticipationKeysTo(accountAddress, roundFirstValid, roundLastValid, keyDilution, partKeyOutDir)
}
util.RunFuncWithSpinningCursor(participationGen)
@@ -895,7 +901,7 @@ var addParticipationKeyCmd = &cobra.Command{
reportErrorf(errorRequestFail, err)
}
- reportInfof("Participation key generation successful")
+ reportInfof("Participation key generation successful. Participation ID: %s\n", part.ID())
version := config.GetCurrentVersion()
fmt.Println("\nGenerated with goal v" + version.String())
@@ -925,11 +931,22 @@ No --delete-input flag specified, exiting without installing key.`)
dataDir := ensureSingleDataDir()
client := ensureAlgodClient(dataDir)
- _, _, err := client.InstallParticipationKeys(partKeyFile)
+ addResponse, err := client.AddParticipationKey(partKeyFile)
if err != nil {
reportErrorf(errorRequestFail, err)
}
- fmt.Println("Participation key installed successfully")
+ // In an abundance of caution, check for ourselves that the key has been installed.
+ if err := client.VerifyParticipationKey(time.Minute, addResponse.PartId); err != nil {
+ err = fmt.Errorf("unable to verify key installation. Verify key installation with 'goal account partkeyinfo' and delete '%s', or retry the command. Error: %w", partKeyFile, err)
+ reportErrorf(errorRequestFail, err)
+ }
+
+ reportInfof("Participation key installed successfully, Participation ID: %s\n", addResponse.PartId)
+
+ // Delete partKeyFile
+ if nil != os.Remove(partKeyFile) {
+ reportErrorf("An error occurred while removing the partkey file, please delete it manually: %s", err)
+ }
},
}
@@ -960,14 +977,14 @@ var renewParticipationKeyCmd = &cobra.Command{
txRoundLastValid := currentRound + proto.MaxTxnLife
// Make sure we don't already have a partkey valid for (or after) specified roundLastValid
- parts, err := client.ListParticipationKeyFiles()
+ parts, err := client.ListParticipationKeys()
if err != nil {
reportErrorf(errorRequestFail, err)
}
for _, part := range parts {
- if part.Address().String() == accountAddress {
- if part.LastValid >= basics.Round(roundLastValid) {
- reportErrorf(errExistingPartKey, roundLastValid, part.LastValid)
+ if part.Address == accountAddress {
+ if part.Key.VoteLastValid >= roundLastValid {
+ reportErrorf(errExistingPartKey, roundLastValid, part.Key.VoteLastValid)
}
}
}
@@ -988,7 +1005,7 @@ func generateAndRegisterPartKey(address string, currentRound, keyLastValidRound,
var keyPath string
var err error
genFunc := func() {
- part, keyPath, err = client.GenParticipationKeysTo(address, currentRound, keyLastValidRound, dilution, "")
+ part, keyPath, err = client.GenParticipationKeys(address, currentRound, keyLastValidRound, dilution)
if err != nil {
err = fmt.Errorf(errorRequestFail, err)
}
@@ -1003,12 +1020,13 @@ func generateAndRegisterPartKey(address string, currentRound, keyLastValidRound,
// Now register it as our new online participation key
goOnline := true
txFile := ""
- err = changeAccountOnlineStatus(address, &part, goOnline, txFile, wallet, currentRound, txLastValidRound, fee, leaseBytes, dataDir, client)
+ err = changeAccountOnlineStatus(address, goOnline, txFile, wallet, currentRound, txLastValidRound, fee, leaseBytes, dataDir, client)
if err != nil {
os.Remove(keyPath)
fmt.Fprintf(os.Stderr, " Error registering keys - deleting newly-generated key file: %s\n", keyPath)
}
- return err
+ fmt.Printf("Participation key installed successfully, Participation ID: %s\n", part.ID())
+ return nil
}
var renewAllParticipationKeyCmd = &cobra.Command{
@@ -1031,19 +1049,19 @@ func renewPartKeysInDir(dataDir string, lastValidRound uint64, fee uint64, lease
client := ensureAlgodClient(dataDir)
// Build list of accounts to renew from all accounts with part keys present
- parts, err := client.ListParticipationKeyFiles()
+ parts, err := client.ListParticipationKeys()
if err != nil {
return fmt.Errorf(errorRequestFail, err)
}
- renewAccounts := make(map[basics.Address]algodAcct.Participation)
+ renewAccounts := make(map[string]generatedV2.ParticipationKey)
for _, part := range parts {
- if existing, has := renewAccounts[part.Address()]; has {
- if existing.LastValid >= part.LastValid {
+ if existing, has := renewAccounts[part.Address]; has {
+ if existing.Key.VoteFirstValid >= part.Key.VoteLastValid {
// We already saw a partkey that expires later
continue
}
}
- renewAccounts[part.Address()] = part
+ renewAccounts[part.Address] = part
}
currentRound, err := client.CurrentRound()
@@ -1068,18 +1086,18 @@ func renewPartKeysInDir(dataDir string, lastValidRound uint64, fee uint64, lease
// at least through lastValidRound, generate a new key and register it.
// Make sure we don't already have a partkey valid for (or after) specified roundLastValid
for _, renewPart := range renewAccounts {
- if renewPart.LastValid >= basics.Round(lastValidRound) {
- fmt.Printf(" Skipping account %s: Already has a part key valid beyond %d (currently %d)\n", renewPart.Address(), lastValidRound, renewPart.LastValid)
+ if renewPart.Key.VoteLastValid >= lastValidRound {
+ fmt.Printf(" Skipping account %s: Already has a part key valid beyond %d (currently %d)\n", renewPart.Address, lastValidRound, renewPart.Key.VoteLastValid)
continue
}
// If the account's latest partkey expired before the current round, don't automatically renew and instead instruct the user to explicitly renew it.
- if renewPart.LastValid < basics.Round(lastValidRound) {
- fmt.Printf(" Skipping account %s: This account has part keys that have expired. Please renew this account explicitly using 'renewpartkey'\n", renewPart.Address())
+ if renewPart.Key.VoteLastValid < lastValidRound {
+ fmt.Printf(" Skipping account %s: This account has part keys that have expired. Please renew this account explicitly using 'renewpartkey'\n", renewPart.Address)
continue
}
- address := renewPart.Address().String()
+ address := renewPart.Address
err = generateAndRegisterPartKey(address, currentRound, lastValidRound, txLastValidRound, fee, leaseBytes, dilution, wallet, dataDir, client)
if err != nil {
fmt.Fprintf(os.Stderr, " Error renewing part key for account %s: %v\n", address, err)
@@ -1103,53 +1121,6 @@ func uintToStr(number uint64) string {
return fmt.Sprintf("%d", number)
}
-// legacyListParticipationKeysCommand prints key information in the same
-// format as earlier versions of goal. Some users are using this information
-// in scripts and need some extra time to migrate to the REST API.
-// DEPRECATED
-func legacyListParticipationKeysCommand() {
- dataDir := ensureSingleDataDir()
-
- client := ensureGoalClient(dataDir, libgoal.DynamicClient)
- parts, err := client.ListParticipationKeyFiles()
- if err != nil {
- reportErrorf(errorRequestFail, err)
- }
-
- var filenames []string
- for fn := range parts {
- filenames = append(filenames, fn)
- }
- sort.Strings(filenames)
-
- rowFormat := "%-10s\t%-80s\t%-60s\t%12s\t%12s\t%12s\n"
- fmt.Printf(rowFormat, "Registered", "Filename", "Parent address", "First round", "Last round", "First key")
- for _, fn := range filenames {
- onlineInfoStr := "unknown"
- onlineAccountInfo, err := client.AccountInformation(parts[fn].Address().GetUserAddress())
- if err == nil {
- votingBytes := parts[fn].Voting.OneTimeSignatureVerifier
- vrfBytes := parts[fn].VRF.PK
- if onlineAccountInfo.Participation != nil &&
- (string(onlineAccountInfo.Participation.ParticipationPK) == string(votingBytes[:])) &&
- (string(onlineAccountInfo.Participation.VRFPK) == string(vrfBytes[:])) &&
- (onlineAccountInfo.Participation.VoteFirst == uint64(parts[fn].FirstValid)) &&
- (onlineAccountInfo.Participation.VoteLast == uint64(parts[fn].LastValid)) &&
- (onlineAccountInfo.Participation.VoteKeyDilution == parts[fn].KeyDilution) {
- onlineInfoStr = "yes"
- } else {
- onlineInfoStr = "no"
- }
- }
- // it's okay to proceed without algod info
- first, last := parts[fn].ValidInterval()
- fmt.Printf(rowFormat, onlineInfoStr, fn, parts[fn].Address().GetUserAddress(),
- fmt.Sprintf("%d", first),
- fmt.Sprintf("%d", last),
- fmt.Sprintf("%d.%d", parts[fn].Voting.FirstBatch, parts[fn].Voting.FirstOffset))
- }
-}
-
var listParticipationKeysCmd = &cobra.Command{
Use: "listpartkeys",
Short: "List participation keys summary",
@@ -1402,47 +1373,6 @@ func strOrNA(value *uint64) string {
return uintToStr(*value)
}
-// legacyPartkeyInfoCommand prints key information in the same
-// format as earlier versions of goal. Some users are using this information
-// in scripts and need some extra time to migrate to alternatives.
-// DEPRECATED
-func legacyPartkeyInfoCommand() {
- type partkeyInfo struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
- Address string `codec:"acct"`
- FirstValid basics.Round `codec:"first"`
- LastValid basics.Round `codec:"last"`
- VoteID crypto.OneTimeSignatureVerifier `codec:"vote"`
- SelectionID crypto.VRFVerifier `codec:"sel"`
- VoteKeyDilution uint64 `codec:"voteKD"`
- }
-
- onDataDirs(func(dataDir string) {
- fmt.Printf("Dumping participation key info from %s...\n", dataDir)
- client := ensureGoalClient(dataDir, libgoal.DynamicClient)
-
- // Make sure we don't already have a partkey valid for (or after) specified roundLastValid
- parts, err := client.ListParticipationKeyFiles()
- if err != nil {
- reportErrorf(errorRequestFail, err)
- }
-
- for filename, part := range parts {
- fmt.Println("------------------------------------------------------------------")
- info := partkeyInfo{
- Address: part.Address().String(),
- FirstValid: part.FirstValid,
- LastValid: part.LastValid,
- VoteID: part.VotingSecrets().OneTimeSignatureVerifier,
- SelectionID: part.VRFSecrets().PK,
- VoteKeyDilution: part.KeyDilution,
- }
- infoString := protocol.EncodeJSON(&info)
- fmt.Printf("File: %s\n%s\n", filename, string(infoString))
- }
- })
-}
-
var partkeyInfoCmd = &cobra.Command{
Use: "partkeyinfo",
Short: "Output details about all available part keys",
@@ -1534,3 +1464,138 @@ var markNonparticipatingCmd = &cobra.Command{
}
},
}
+
+// listParticipationKeyFiles returns the available participation keys,
+// as a map from database filename to Participation key object.
+// DEPRECATED
+func listParticipationKeyFiles(c *libgoal.Client) (partKeyFiles map[string]algodAcct.Participation, err error) {
+ genID, err := c.GenesisID()
+ if err != nil {
+ return
+ }
+
+ // Get a list of files in the participation keys directory
+ keyDir := filepath.Join(c.DataDir(), genID)
+ files, err := ioutil.ReadDir(keyDir)
+ if err != nil {
+ return
+ }
+
+ partKeyFiles = make(map[string]algodAcct.Participation)
+ for _, file := range files {
+ // If it can't be a participation key database, skip it
+ if !config.IsPartKeyFilename(file.Name()) {
+ continue
+ }
+
+ filename := file.Name()
+
+ // Fetch a handle to this database
+ handle, err := db.MakeErasableAccessor(filepath.Join(keyDir, filename))
+ if err != nil {
+ // Couldn't open it, skip it
+ continue
+ }
+
+ // Fetch an account.Participation from the database
+ part, err := algodAcct.RestoreParticipation(handle)
+ if err != nil {
+ // Couldn't read it, skip it
+ handle.Close()
+ continue
+ }
+
+ partKeyFiles[filename] = part.Participation
+ part.Close()
+ }
+
+ return
+}
+
+// legacyListParticipationKeysCommand prints key information in the same
+// format as earlier versions of goal. Some users are using this information
+// in scripts and need some extra time to migrate to the REST API.
+// DEPRECATED
+func legacyListParticipationKeysCommand() {
+ dataDir := ensureSingleDataDir()
+
+ client := ensureGoalClient(dataDir, libgoal.DynamicClient)
+ parts, err := listParticipationKeyFiles(&client)
+ if err != nil {
+ reportErrorf(errorRequestFail, err)
+ }
+
+ var filenames []string
+ for fn := range parts {
+ filenames = append(filenames, fn)
+ }
+ sort.Strings(filenames)
+
+ rowFormat := "%-10s\t%-80s\t%-60s\t%12s\t%12s\t%12s\n"
+ fmt.Printf(rowFormat, "Registered", "Filename", "Parent address", "First round", "Last round", "First key")
+ for _, fn := range filenames {
+ onlineInfoStr := "unknown"
+ onlineAccountInfo, err := client.AccountInformation(parts[fn].Address().GetUserAddress())
+ if err == nil {
+ votingBytes := parts[fn].Voting.OneTimeSignatureVerifier
+ vrfBytes := parts[fn].VRF.PK
+ if onlineAccountInfo.Participation != nil &&
+ (string(onlineAccountInfo.Participation.ParticipationPK) == string(votingBytes[:])) &&
+ (string(onlineAccountInfo.Participation.VRFPK) == string(vrfBytes[:])) &&
+ (onlineAccountInfo.Participation.VoteFirst == uint64(parts[fn].FirstValid)) &&
+ (onlineAccountInfo.Participation.VoteLast == uint64(parts[fn].LastValid)) &&
+ (onlineAccountInfo.Participation.VoteKeyDilution == parts[fn].KeyDilution) {
+ onlineInfoStr = "yes"
+ } else {
+ onlineInfoStr = "no"
+ }
+ }
+ // it's okay to proceed without algod info
+ first, last := parts[fn].ValidInterval()
+ fmt.Printf(rowFormat, onlineInfoStr, fn, parts[fn].Address().GetUserAddress(),
+ fmt.Sprintf("%d", first),
+ fmt.Sprintf("%d", last),
+ fmt.Sprintf("%d.%d", parts[fn].Voting.FirstBatch, parts[fn].Voting.FirstOffset))
+ }
+}
+
+// legacyPartkeyInfoCommand prints key information in the same
+// format as earlier versions of goal. Some users are using this information
+// in scripts and need some extra time to migrate to alternatives.
+// DEPRECATED
+func legacyPartkeyInfoCommand() {
+ type partkeyInfo struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+ Address string `codec:"acct"`
+ FirstValid basics.Round `codec:"first"`
+ LastValid basics.Round `codec:"last"`
+ VoteID crypto.OneTimeSignatureVerifier `codec:"vote"`
+ SelectionID crypto.VRFVerifier `codec:"sel"`
+ VoteKeyDilution uint64 `codec:"voteKD"`
+ }
+
+ onDataDirs(func(dataDir string) {
+ fmt.Printf("Dumping participation key info from %s...\n", dataDir)
+ client := ensureGoalClient(dataDir, libgoal.DynamicClient)
+
+ // Make sure we don't already have a partkey valid for (or after) specified roundLastValid
+ parts, err := listParticipationKeyFiles(&client)
+ if err != nil {
+ reportErrorf(errorRequestFail, err)
+ }
+
+ for filename, part := range parts {
+ fmt.Println(strings.Repeat("-", 40))
+ info := partkeyInfo{
+ Address: part.Address().String(),
+ FirstValid: part.FirstValid,
+ LastValid: part.LastValid,
+ VoteID: part.VotingSecrets().OneTimeSignatureVerifier,
+ SelectionID: part.VRFSecrets().PK,
+ VoteKeyDilution: part.KeyDilution,
+ }
+ infoString := protocol.EncodeJSON(&info)
+ fmt.Printf("File: %s\n%s\n", filename, string(infoString))
+ }
+ })
+}
diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go
index ac570ffb5..000304fa3 100644
--- a/cmd/goal/clerk.go
+++ b/cmd/goal/clerk.go
@@ -1214,6 +1214,12 @@ var dryrunRemoteCmd = &cobra.Command{
if txnResult.Cost != nil {
fmt.Fprintf(os.Stdout, "tx[%d] cost: %d\n", i, *txnResult.Cost)
}
+ if txnResult.BudgetConsumed != nil {
+ fmt.Fprintf(os.Stdout, "tx[%d] budget consumed: %d\n", i, *txnResult.BudgetConsumed)
+ }
+ if txnResult.BudgetAdded != nil {
+ fmt.Fprintf(os.Stdout, "tx[%d] budget added: %d\n", i, *txnResult.BudgetAdded)
+ }
fmt.Fprintf(os.Stdout, "tx[%d] messages:\n", i)
for _, msg := range msgs {
diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go
index cb6284036..21bd8c1aa 100644
--- a/cmd/pingpong/runCmd.go
+++ b/cmd/pingpong/runCmd.go
@@ -43,9 +43,7 @@ var minFee uint64
var randomFee, noRandomFee bool
var randomAmount, noRandomAmount bool
var randomDst bool
-var delayBetween string
var runTime string
-var restTime string
var refreshTime string
var saveConfig bool
var useDefault bool
@@ -84,9 +82,7 @@ func init() {
runCmd.Flags().BoolVar(&randomFee, "rf", false, "Set to enable random fees (between minf and mf)")
runCmd.Flags().BoolVar(&noRandomFee, "nrf", false, "Set to disable random fees")
runCmd.Flags().BoolVar(&randomDst, "rd", false, "Send money to randomly-generated addresses")
- runCmd.Flags().StringVar(&delayBetween, "delay", "", "Delay (ms) between every transaction (0 means none)")
runCmd.Flags().StringVar(&runTime, "run", "", "Duration of time (seconds) to run transfers before resting (0 means non-stop)")
- runCmd.Flags().StringVar(&restTime, "rest", "", "Duration of time (seconds) to rest between transfer periods (0 means no rest)")
runCmd.Flags().StringVar(&refreshTime, "refresh", "", "Duration of time (seconds) between refilling accounts with money (0 means no refresh)")
runCmd.Flags().StringVar(&logicProg, "program", "", "File containing the compiled program to include as a logic sig")
runCmd.Flags().BoolVar(&saveConfig, "save", false, "Save the effective configuration to disk")
@@ -187,13 +183,6 @@ var runCmd = &cobra.Command{
}
cfg.RandomizeDst = randomDst
cfg.Quiet = quietish
- if delayBetween != "" {
- val, err := strconv.ParseUint(delayBetween, 10, 32)
- if err != nil {
- reportErrorf("Invalid value specified for --delay: %v\n", err)
- }
- cfg.DelayBetweenTxn = time.Duration(uint32(val)) * time.Millisecond
- }
if runTime != "" {
val, err := strconv.ParseUint(runTime, 10, 32)
if err != nil {
@@ -201,13 +190,6 @@ var runCmd = &cobra.Command{
}
cfg.RunTime = time.Duration(uint32(val)) * time.Second
}
- if restTime != "" {
- val, err := strconv.ParseUint(restTime, 10, 32)
- if err != nil {
- reportErrorf("Invalid value specified for --rest: %v\n", err)
- }
- cfg.RestTime = time.Duration(uint32(val)) * time.Second
- }
if refreshTime != "" {
val, err := strconv.ParseUint(refreshTime, 10, 32)
if err != nil {
diff --git a/cmd/tealdbg/main.go b/cmd/tealdbg/main.go
index 97701b979..9ab3d3dec 100644
--- a/cmd/tealdbg/main.go
+++ b/cmd/tealdbg/main.go
@@ -23,11 +23,23 @@ import (
"github.com/gorilla/mux"
"github.com/spf13/cobra"
+ "github.com/spf13/cobra/doc"
cmdutil "github.com/algorand/go-algorand/cmd/util"
)
func main() {
+ // Hidden command to generate docs in a given directory
+ // tealdbg generate-docs [path]
+ if len(os.Args) == 3 && os.Args[1] == "generate-docs" {
+ err := doc.GenMarkdownTree(rootCmd, os.Args[2])
+ if err != nil {
+ log.Println(err)
+ os.Exit(1)
+ }
+ os.Exit(0)
+ }
+
if err := rootCmd.Execute(); err != nil {
log.Println(err)
os.Exit(1)
diff --git a/cmd/updater/update.sh b/cmd/updater/update.sh
index 60fbe0561..3ed4622ba 100755
--- a/cmd/updater/update.sh
+++ b/cmd/updater/update.sh
@@ -22,6 +22,7 @@ GENESIS_NETWORK_DIR_SPEC=""
SKIP_UPDATE=0
TOOLS_OUTPUT_DIR=""
DRYRUN=false
+VERIFY_UPDATER_ARCHIVE="0"
IS_ROOT=false
if [ $EUID -eq 0 ]; then
IS_ROOT=true
@@ -100,6 +101,10 @@ while [ "$1" != "" ]; do
shift
TOOLS_OUTPUT_DIR=$1
;;
+ -verify)
+ shift
+ VERIFY_UPDATER_ARCHIVE="1"
+ ;;
-z)
DRYRUN=true
;;
@@ -190,50 +195,134 @@ function get_updater_url() {
echo "This operation system ${UNAME} is not supported by updater."
exit 1
fi
- UPDATER_FILENAME="install_master_${OS}-${ARCH}.tar.gz"
- UPDATER_URL="https://github.com/algorand/go-algorand-doc/raw/master/downloads/installers/${OS}_${ARCH}/${UPDATER_FILENAME}"
+
+ # the updater will auto-update itself to the latest version, this means that the version of updater that is downloaded
+ # can be arbitrary as long as the self-updating functionality is working, hence the hard-coded version
+ UPDATER_URL="http://algorand-dev-deb-repo.s3-website-us-east-1.amazonaws.com/releases/stable/f9d842778_3.6.2/install_stable_${OS}-${ARCH}_3.6.2.tar.gz"
+ UPDATER_FILENAME="install_stable_${OS}-${ARCH}_3.6.2.tar.gz"
+
+ # if on linux, also set variables for signature and checksum validation
+ if [ "$OS" = "linux" ] && [ "$VERIFY_UPDATER_ARCHIVE" = "1" ]; then
+ UPDATER_PUBKEYURL="https://releases.algorand.com/key.pub"
+ UPDATER_SIGURL="http://algorand-dev-deb-repo.s3-website-us-east-1.amazonaws.com/releases/stable/f9d842778_3.6.2/install_stable_${OS}-${ARCH}_3.6.2.tar.gz.sig"
+ UPDATER_CHECKSUMURL="https://algorand-releases.s3.amazonaws.com/channel/stable/hashes_stable_${OS}_${ARCH}_3.6.2"
+ fi
}
# check to see if the binary updater exists. if not, it will automatically the correct updater binary for the current platform
function check_for_updater() {
+ local UNAME
+ UNAME="$(uname)"
+
# check if the updater binary exist and is not empty.
if [[ -s "${SCRIPTPATH}/updater" && -f "${SCRIPTPATH}/updater" ]]; then
return 0
fi
+
+ # set UPDATER_URL and UPDATER_ARCHIVE as a global that can be referenced here
+ # if linux, UPDATER_PUBKEYURL, UPDATER_SIGURL, UPDATER_CHECKSUMURL will be set to try verification
get_updater_url
- # check the curl is available.
- CURL_VER=$(curl -V 2>/dev/null || true)
- if [ "${CURL_VER}" = "" ]; then
+ # check if curl is available
+ if ! type curl &>/dev/null; then
# no curl is installed.
echo "updater binary is missing and cannot be downloaded since curl is missing."
- if [[ "$(uname)" = "Linux" ]]; then
+ if [ "$UNAME" = "Linux" ]; then
echo "To install curl, run the following command:"
echo "apt-get update; apt-get install -y curl"
fi
exit 1
fi
- CURL_OUT=$(curl -LJO --silent ${UPDATER_URL})
- if [ "$?" != "0" ]; then
- echo "failed to download updater binary from ${UPDATER_URL} using curl."
- echo "${CURL_OUT}"
+ # create temporary directory for updater archive
+ local UPDATER_TEMPDIR="" UPDATER_ARCHIVE=""
+ UPDATER_TEMPDIR="$(mktemp -d 2>/dev/null || mktemp -d -t "tmp")"
+ UPDATER_ARCHIVE="${UPDATER_TEMPDIR}/${UPDATER_FILENAME}"
+
+ # download updater archive
+ if ! curl -sSL "$UPDATER_URL" -o "$UPDATER_ARCHIVE"; then
+ echo "failed to download updater archive from ${UPDATER_URL} using curl."
exit 1
fi
- if [ ! -f "${SCRIPTPATH}/${UPDATER_FILENAME}" ]; then
- echo "downloaded file ${SCRIPTPATH}/${UPDATER_FILENAME} is missing."
+ if [ ! -f "$UPDATER_ARCHIVE" ]; then
+ echo "downloaded file ${UPDATER_ARCHIVE} is missing."
exit
fi
- tar -zxvf "${SCRIPTPATH}/${UPDATER_FILENAME}" updater
- if [ "$?" != "0" ]; then
- echo "failed to extract updater binary from ${SCRIPTPATH}/${UPDATER_FILENAME}"
+ # if -verify command line flag is set, try verifying updater archive
+ if [ "$VERIFY_UPDATER_ARCHIVE" = "1" ]; then
+ # if linux, check for checksum and signature validation dependencies
+ local GPG_VERIFY="0" CHECKSUM_VERIFY="0"
+ if [ "$UNAME" = "Linux" ]; then
+ if type gpg >&/dev/null; then
+ GPG_VERIFY="1"
+ else
+ echo "gpg is not available to perform signature validation."
+ fi
+
+ if type sha256sum &>/dev/null; then
+ CHECKSUM_VERIFY="1"
+ else
+ echo "sha256sum is not available to perform checksum validation."
+ fi
+ fi
+
+ # try signature validation
+ if [ "$GPG_VERIFY" = "1" ]; then
+ local UPDATER_SIGFILE="$UPDATER_TEMPDIR/updater.sig" UPDATER_PUBKEYFILE="key.pub"
+ # try downloading public key
+ if curl -sSL "$UPDATER_PUBKEYURL" -o "$UPDATER_PUBKEYFILE"; then
+ GNUPGHOME="$(mktemp -d)"; export GNUPGHOME
+ if gpg --import "$UPDATER_PUBKEYFILE"; then
+ if curl -sSL "$UPDATER_SIGURL" -o "$UPDATER_SIGFILE"; then
+ if ! gpg --verify "$UPDATER_SIGFILE" "$UPDATER_ARCHIVE"; then
+ echo "failed to verify signature of updater archive."
+ exit 1
+ fi
+ else
+ echo "failed download signature file, cannot perform signature validation."
+ fi
+ else
+ echo "failed importing GPG public key, cannot perform signature validation."
+ fi
+ # clean up temporary directory used for signature validation
+ rm -rf "$GNUPGHOME"; unset GNUPGHOME
+ else
+ echo "failed downloading GPG public key, cannot perform signature validation."
+ fi
+ fi
+
+ # try checksum validation
+ if [ "$CHECKSUM_VERIFY" = "1" ]; then
+ local UPDATER_CHECKSUMFILE="$UPDATER_TEMPDIR/updater.checksum"
+ # try downloading checksum file
+ if curl -sSL "$UPDATER_CHECKSUMURL" -o "$UPDATER_CHECKSUMFILE"; then
+ # have to be in same directory as archive
+ pushd "$UPDATER_TEMPDIR"
+ if ! sha256sum --quiet --ignore-missing -c "$UPDATER_CHECKSUMFILE"; then
+ echo "failed to verify checksum of updater archive."
+ popd
+ exit 1
+ fi
+ popd
+ else
+ echo "failed downloading checksum file, cannot perform checksum validation."
+ fi
+ fi
+ fi
+
+ # extract and install updater
+ if ! tar -zxf "$UPDATER_ARCHIVE" -C "$UPDATER_TEMPDIR" updater; then
+ echo "failed to extract updater binary from ${UPDATER_ARCHIVE}"
exit 1
+ else
+ mv "${UPDATER_TEMPDIR}/updater" "$SCRIPTPATH"
fi
- rm -f "${SCRIPTPATH}/${UPDATER_FILENAME}"
- echo "updater binary was downloaded"
+ # clean up temp directory
+ rm -rf "$UPDATER_TEMPDIR"
+ echo "updater binary was installed at ${SCRIPTPATH}/updater"
}
function check_for_update() {
diff --git a/components/mocks/mockParticipationRegistry.go b/components/mocks/mockParticipationRegistry.go
index 2ccce5ff1..d2aa53f26 100644
--- a/components/mocks/mockParticipationRegistry.go
+++ b/components/mocks/mockParticipationRegistry.go
@@ -69,6 +69,11 @@ func (m *MockParticipationRegistry) GetStateProofForRound(id account.Participati
return account.StateProofRecordForRound{}, nil
}
+// HasLiveKeys quickly tests to see if there is a valid participation key over some range of rounds
+func (m *MockParticipationRegistry) HasLiveKeys(from, to basics.Round) bool {
+ return false
+}
+
// Register updates the EffectiveFirst and EffectiveLast fields. If there are multiple records for the account
// then it is possible for multiple records to be updated.
func (m *MockParticipationRegistry) Register(id account.ParticipationID, on basics.Round) error {
diff --git a/config/consensus.go b/config/consensus.go
index 07aa9ba84..42e196b1b 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -910,6 +910,9 @@ func initConsensusProtocols() {
// Enable application support
v24.Application = true
+ // Although Inners were not allowed yet, this gates downgrade checks, which must be allowed
+ v24.MinInnerApplVersion = 6
+
// Enable rekeying
v24.SupportRekeying = true
@@ -1090,7 +1093,6 @@ func initConsensusProtocols() {
v31.LogicSigVersion = 6
v31.EnableInnerTransactionPooling = true
v31.IsolateClearState = true
- v31.MinInnerApplVersion = 6
// stat proof key registration
v31.EnableStateProofKeyregCheck = true
@@ -1144,7 +1146,7 @@ func initConsensusProtocols() {
vFuture.CompactCertWeightThreshold = (1 << 32) * 30 / 100
vFuture.CompactCertSecKQ = 128
- vFuture.LogicSigVersion = 7
+ vFuture.LogicSigVersion = 7 // When moving this to a release, put a new higher LogicSigVersion here
vFuture.MinInnerApplVersion = 4
vFuture.UnifyInnerTxIDs = true
@@ -1176,4 +1178,5 @@ func init() {
for _, p := range Consensus {
checkSetAllocBounds(p)
}
+
}
diff --git a/config/localTemplate.go b/config/localTemplate.go
index 1e95d5f99..74c69c0cf 100644
--- a/config/localTemplate.go
+++ b/config/localTemplate.go
@@ -41,7 +41,7 @@ type Local struct {
// Version tracks the current version of the defaults so we can migrate old -> new
// This is specifically important whenever we decide to change the default value
// for an existing parameter. This field tag must be updated any time we add a new version.
- Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21"`
+ Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22"`
// environmental (may be overridden)
// When enabled, stores blocks indefinitally, otherwise, only the most recents blocks
@@ -251,6 +251,9 @@ type Local struct {
// the algod api will be exposed to untrusted individuals
EnableProfiler bool `version[0]:"false"`
+ // EnableRuntimeMetrics exposes Go runtime metrics in /metrics and via node_exporter.
+ EnableRuntimeMetrics bool `version[22]:"false"`
+
// TelemetryToLog records messages to node.log that are normally sent to remote event monitoring
TelemetryToLog bool `version[5]:"true"`
diff --git a/config/local_defaults.go b/config/local_defaults.go
index 55eba24d1..b8354926b 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -20,7 +20,7 @@
package config
var defaultLocal = Local{
- Version: 21,
+ Version: 22,
AccountUpdatesStatsInterval: 5000000000,
AccountsRebuildSynchronousMode: 1,
AgreementIncomingBundlesQueueLength: 7,
@@ -68,6 +68,7 @@ var defaultLocal = Local{
EnableProcessBlockStats: false,
EnableProfiler: false,
EnableRequestLogger: false,
+ EnableRuntimeMetrics: false,
EnableTopAccountsReporting: false,
EnableVerbosedTransactionSyncLogging: false,
EndpointAddress: "127.0.0.1:0",
diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json
index 736ce0a26..8e2c69e54 100644
--- a/daemon/algod/api/algod.oas2.json
+++ b/daemon/algod/api/algod.oas2.json
@@ -2295,8 +2295,16 @@
"format": "byte"
}
},
+ "budget-added": {
+ "description": "Budget added during execution of app call transaction.",
+ "type": "integer"
+ },
+ "budget-consumed": {
+ "description": "Budget consumed during execution of app call transaction.",
+ "type": "integer"
+ },
"cost": {
- "description": "Execution cost of app call transaction",
+ "description": "Net cost of app execution. Field is DEPRECATED and is subject for removal. Instead, use `budget-added` and `budget-consumed.",
"type": "integer"
}
}
diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml
index d71a2dbd9..0fa09dd55 100644
--- a/daemon/algod/api/algod.oas3.yml
+++ b/daemon/algod/api/algod.oas3.yml
@@ -1328,8 +1328,16 @@
},
"type": "array"
},
+ "budget-added": {
+ "description": "Budget added during execution of app call transaction.",
+ "type": "integer"
+ },
+ "budget-consumed": {
+ "description": "Budget consumed during execution of app call transaction.",
+ "type": "integer"
+ },
"cost": {
- "description": "Execution cost of app call transaction",
+ "description": "Net cost of app execution. Field is DEPRECATED and is subject for removal. Instead, use `budget-added` and `budget-consumed.",
"type": "integer"
},
"disassembly": {
diff --git a/daemon/algod/api/server/v2/dryrun.go b/daemon/algod/api/server/v2/dryrun.go
index eea36a975..d3b85de39 100644
--- a/daemon/algod/api/server/v2/dryrun.go
+++ b/daemon/algod/api/server/v2/dryrun.go
@@ -541,8 +541,17 @@ func doDryrunRequest(dr *DryrunRequest, response *generated.DryrunResponse) {
err = fmt.Errorf("cost budget exceeded: budget is %d but program cost was %d", allowedBudget-cumulativeCost, cost)
}
}
- cost64 := uint64(cost)
- result.Cost = &cost64
+ // The cost is broken up into two fields: budgetAdded and budgetConsumed.
+ // This is necessary because the fields can only be represented as unsigned
+ // integers, so a negative cost would underflow. The two fields also provide
+ // more information, which can be useful for testing purposes.
+ // cost = budgetConsumed - budgetAdded
+ netCost := uint64(cost)
+ budgetAdded := uint64(proto.MaxAppProgramCost * numInnerTxns(delta))
+ budgetConsumed := uint64(cost) + budgetAdded
+ result.Cost = &netCost
+ result.BudgetAdded = &budgetAdded
+ result.BudgetConsumed = &budgetConsumed
maxCurrentBudget = pooledAppBudget
cumulativeCost += cost
@@ -624,3 +633,13 @@ func MergeAppParams(base *basics.AppParams, update *basics.AppParams) {
base.GlobalStateSchema = update.GlobalStateSchema
}
}
+
+// count all inner transactions contained within the eval delta
+func numInnerTxns(delta transactions.EvalDelta) (cnt int) {
+ cnt = len(delta.InnerTxns)
+ for _, itxn := range delta.InnerTxns {
+ cnt += numInnerTxns(itxn.EvalDelta)
+ }
+
+ return
+}
diff --git a/daemon/algod/api/server/v2/dryrun_test.go b/daemon/algod/api/server/v2/dryrun_test.go
index c2722627b..cfa87cb98 100644
--- a/daemon/algod/api/server/v2/dryrun_test.go
+++ b/daemon/algod/api/server/v2/dryrun_test.go
@@ -1260,18 +1260,20 @@ func TestDryrunCost(t *testing.T) {
msg string
numHashes int
}{
- {"REJECT", 12},
- {"PASS", 5},
+ {"REJECT", 22},
+ {"PASS", 16},
}
for _, test := range tests {
t.Run(test.msg, func(t *testing.T) {
- costs := make([]uint64, 2)
+ expectedCosts := make([]int64, 3)
+ expectedBudgetAdded := make([]uint64, 3)
ops, err := logic.AssembleString("#pragma version 5\nbyte 0x41\n" + strings.Repeat("keccak256\n", test.numHashes) + "pop\nint 1\n")
require.NoError(t, err)
- approval := ops.Program
- costs[0] = 3 + uint64(test.numHashes)*130
+ app1 := ops.Program
+ expectedCosts[0] = 3 + int64(test.numHashes)*130
+ expectedBudgetAdded[0] = 0
ops, err = logic.AssembleString("int 1")
require.NoError(t, err)
@@ -1279,8 +1281,26 @@ func TestDryrunCost(t *testing.T) {
ops, err = logic.AssembleString("#pragma version 5 \nint 1 \nint 2 \npop")
require.NoError(t, err)
- approv := ops.Program
- costs[1] = 3
+ app2 := ops.Program
+ expectedCosts[1] = 3
+ expectedBudgetAdded[1] = 0
+
+ ops, err = logic.AssembleString(`#pragma version 6
+itxn_begin
+int appl
+itxn_field TypeEnum
+int DeleteApplication
+itxn_field OnCompletion
+byte 0x068101 // #pragma version 6; int 1;
+itxn_field ApprovalProgram
+byte 0x068101 // #pragma version 6; int 1;
+itxn_field ClearStateProgram
+itxn_submit
+int 1`)
+ require.NoError(t, err)
+ app3 := ops.Program
+ expectedCosts[2] = -687
+ expectedBudgetAdded[2] = 700
var appIdx basics.AppIndex = 1
creator := randomAddress()
@@ -1307,13 +1327,23 @@ func TestDryrunCost(t *testing.T) {
},
},
},
+ {
+ Txn: transactions.Transaction{
+ Header: transactions.Header{Sender: sender},
+ Type: protocol.ApplicationCallTx,
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: appIdx + 2,
+ OnCompletion: transactions.OptInOC,
+ },
+ },
+ },
},
Apps: []generated.Application{
{
Id: uint64(appIdx),
Params: generated.ApplicationParams{
Creator: creator.String(),
- ApprovalProgram: approval,
+ ApprovalProgram: app1,
ClearStateProgram: clst,
LocalStateSchema: &generated.ApplicationStateSchema{NumByteSlice: 1},
},
@@ -1322,7 +1352,16 @@ func TestDryrunCost(t *testing.T) {
Id: uint64(appIdx + 1),
Params: generated.ApplicationParams{
Creator: creator.String(),
- ApprovalProgram: approv,
+ ApprovalProgram: app2,
+ ClearStateProgram: clst,
+ LocalStateSchema: &generated.ApplicationStateSchema{NumByteSlice: 1},
+ },
+ },
+ {
+ Id: uint64(appIdx + 2),
+ Params: generated.ApplicationParams{
+ Creator: creator.String(),
+ ApprovalProgram: app3,
ClearStateProgram: clst,
LocalStateSchema: &generated.ApplicationStateSchema{NumByteSlice: 1},
},
@@ -1340,13 +1379,15 @@ func TestDryrunCost(t *testing.T) {
var response generated.DryrunResponse
doDryrunRequest(&dr, &response)
require.Empty(t, response.Error)
- require.Equal(t, 2, len(response.Txns))
+ require.Equal(t, 3, len(response.Txns))
for i, txn := range response.Txns {
messages := *txn.AppCallMessages
require.GreaterOrEqual(t, len(messages), 1)
- require.NotNil(t, *txn.Cost)
- require.Equal(t, costs[i], *txn.Cost)
+ cost := int64(*txn.BudgetConsumed) - int64(*txn.BudgetAdded)
+ require.NotNil(t, cost)
+ require.Equal(t, expectedCosts[i], cost)
+ require.Equal(t, expectedBudgetAdded[i], *txn.BudgetAdded)
statusMatches := false
costExceedFound := false
for _, msg := range messages {
diff --git a/daemon/algod/api/server/v2/generated/private/routes.go b/daemon/algod/api/server/v2/generated/private/routes.go
index abb34906f..72eb4a7b1 100644
--- a/daemon/algod/api/server/v2/generated/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/private/routes.go
@@ -311,153 +311,155 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9a3PcNrbgX0H13Co/ttkt+ZEZqyp1V7ETjzaO47KUubvX9iZo8nQ3RiTAAKDUHa/+",
- "+xYOABIkwW7qMcp1XX+y1cTj4ODg4LzxeZKKohQcuFaTo8+TkkpagAaJf9E0FRXXCcvMXxmoVLJSM8En",
- "R/4bUVoyvppMJ8z8WlK9nkwnnBbQtDH9pxMJv1dMQjY50rKC6USlayioGVhvS9O6HmmTrETihji2Q5y8",
- "mlzt+ECzTIJSfSh/5vmWMJ7mVQZES8oVTc0nRS6ZXhO9Zoq4zoRxIjgQsSR63WpMlgzyTM38In+vQG6D",
- "VbrJh5d01YCYSJFDH86XolgwDh4qqIGqN4RoQTJYYqM11cTMYGD1DbUgCqhM12Qp5B5QLRAhvMCrYnL0",
- "YaKAZyBxt1JgF/jfpQT4AxJN5Qr05NM0trilBploVkSWduKwL0FVuVYE2+IaV+wCODG9ZuSnSmmyAEI5",
- "ef/DS/L06dMXZiEF1RoyR2SDq2pmD9dku0+OJhnV4D/3aY3mKyEpz5K6/fsfXuL8p26BY1tRpSB+WI7N",
- "F3LyamgBvmOEhBjXsMJ9aFG/6RE5FM3PC1gKCSP3xDa+000J5/9TdyWlOl2XgnEd2ReCX4n9HOVhQfdd",
- "PKwGoNW+NJiSZtAPB8mLT58Pp4cHV3/5cJz8p/vz+dOrkct/WY+7BwPRhmklJfB0m6wkUDwta8r7+Hjv",
- "6EGtRZVnZE0vcPNpgaze9SWmr2WdFzSvDJ2wVIrjfCUUoY6MMljSKtfET0wqnhs2ZUZz1E6YIqUUFyyD",
- "bGq47+WapWuSUmWHwHbkkuW5ocFKQTZEa/HV7ThMVyFKDFw3wgcu6L8uMpp17cEEbJAbJGkuFCRa7Lme",
- "/I1DeUbCC6W5q9T1LitytgaCk5sP9rJF3HFD03m+JRr3NSNUEUr81TQlbEm2oiKXuDk5O8f+bjUGawUx",
- "SMPNad2j5vAOoa+HjAjyFkLkQDkiz5+7Psr4kq0qCYpcrkGv3Z0nQZWCKyBi8U9Itdn2/3X681siJPkJ",
- "lKIreEfTcwI8FdnwHrtJYzf4P5UwG16oVUnT8/h1nbOCRUD+iW5YURWEV8UCpNkvfz9oQSToSvIhgOyI",
- "e+isoJv+pGey4ilubjNtS1AzpMRUmdPtjJwsSUE33x5MHTiK0DwnJfCM8RXRGz4opJm594OXSFHxbIQM",
- "o82GBbemKiFlSwYZqUfZAYmbZh88jF8PnkayCsDxgwyCU8+yBxwOmwjNmKNrvpCSriAgmRn5xXEu/KrF",
- "OfCawZHFFj+VEi6YqFTdaQBGnHq3eM2FhqSUsGQRGjt16DDcw7Zx7LVwAk4quKaMQ2Y4LwItNFhONAhT",
- "MOFuZaZ/RS+ogm+eDV3gzdeRu78U3V3fueOjdhsbJfZIRu5F89Ud2LjY1Oo/QvkL51ZsldifexvJVmfm",
- "KlmyHK+Zf5r982ioFDKBFiL8xaPYilNdSTj6yB+bv0hCTjXlGZWZ+aWwP/1U5ZqdspX5Kbc/vRErlp6y",
- "1QAya1ij2hR2K+w/Zrw4O9abqNLwRojzqgwXlLa00sWWnLwa2mQ75nUJ87hWZUOt4mzjNY3r9tCbeiMH",
- "gBzEXUlNw3PYSjDQ0nSJ/2yWSE90Kf8w/5RlHsOpIWB30aJRwBkLjssyZyk12HvvPpuv5vSDVQ9o02KO",
- "N+nR5wC2UooSpGZ2UFqWSS5SmidKU40j/ZuE5eRo8pd5Y1WZ2+5qHkz+xvQ6xU5GELXCTULL8hpjvDMC",
- "jdrBJQxnxk/IHyy/Q1GIcbt7hoaY4b05XFCuZ40i0mIE9cn94GZq8G1lGIvvjmI1iHBiGy5AWbnWNnyg",
- "SIB6gmgliFYUM1e5WNQ/PDwuywaD+P24LC0+UCYEhuIWbJjS6hEunzZHKJzn5NWMvA7HRgFb8HxrbgUr",
- "Y5hLYemuK3d91RYjt4ZmxAeK4HYKOTNb49FghPe7oDhUFtYiN+LOXloxjf/u2oZkZn4f1fnLILEQt8PE",
- "heqTw5zVXPCXQGV52KGcPuE4I86MHHf73oxszChxgrkRrezcTzvuDjzWKLyUtLQAui/2EmUcVS/byMJ6",
- "S246ktFFYQ7OcEBrCNWNz9re8xCFBEmhA8N3uUjP7+C8L8w4/WOHw5M10Awkyaimwbly5yV+WWPHv2M/",
- "5AggIxL9z/gfmhPz2RC+4Yt2WKOpM6RfEdjVM6PgWrHZzmQaoOItSGF1WmJ00WtB+bKZvMcjLFrG8Ijv",
- "rRpNsIdfhFl6YyQ7Xgh5M3rpEAInjemPUDNqcFymnZ3FplWZOPxEzAe2QWegxtvSlyJDDHWHj+GqhYVT",
- "Tf8FWFBm1LvAQnugu8aCKEqWwx2c1zVV6/4ijD739Ak5/fvx88Mnvz55/o1RSEopVpIWZLHVoMhDJ0YT",
- "pbc5POqvDOXZKtfx0b955g1G7XFj4yhRyRQKWvaHsoYoe2nZZsS062OtjWZcdQ3gmGN5Boa9WLQTa2M1",
- "oL1iytyJxeJONmMIYVkzS0YcJBnsJabrLq+ZZhsuUW5ldRfKB0gpZMQUgkdMi1TkyQVIxUTEqv3OtSCu",
- "hRdIyu7vFlpySRUxc6OVruIZyFmMsvSGI2hMQ6H2Xah26LMNb3DjBqRS0m0P/Xa9kdW5ecfsSxv53uij",
- "SAky0RtOMlhUq5bsupSiIJRk2BEvjrciA6N3VOoOuGUzWAOM2YgQBLoQlSaUcJEBKimVivPRARcX2tbR",
- "JaBD1qzX9p5egBGIU1qt1ppUJUGDd29rm44JTe2mJHinqgGLYG3Kta3sdNZ9kkugmRGUgROxcGY3ZxDE",
- "RVK01mvPiRwXj6gOLbhKKVJQyig4VmzdC5pvZ3dZ78ATAo4A17MQJciSyhsCq4Wm+R5AsU0M3FrscrbK",
- "PtTjpt+1gd3Jw22k0ug4lgqMjGdOdw4ahlA4EicXINFm9y/dPz/JTbevKgc86k5SOWMFqkqccqEgFTxT",
- "0cFyqnSy79iaRi1xyqwgOCmxk4oDD6jrb6jS1nLLeIaitWU3OI/V480UwwAP3ihm5H/4y6Q/dmr4JFeV",
- "qm8WVZWlkBqy2Bo4bHbM9RY29VxiGYxdX19akErBvpGHsBSM75BlV2IRRHVt53Cujf7i0Bpg7oFtFJUt",
- "IBpE7ALk1LcKsBt6FQcAMXpY3RMJh6kO5dSuzOlEaVGW5vzppOJ1vyE0ndrWx/qXpm2fuKhu+HomwMyu",
- "PUwO8kuLWetPXlMjA+PIpKDn5m5CidaamPswm8OYKMZTSHZRvjmWp6ZVeAT2HNIBZcJFrASzdQ5Hh36j",
- "RDdIBHt2YWjBA5rNOyo1S1mJksSPsL1zs0h3gqiFhGSgKTPSdvABGTjy3ro/sT6D7pg3E7RGCaF98HtS",
- "aGQ5OVN4YbSBP4ctmkrfWWf0WeDCvgNJMTKqOd2UEwTUu7jMhRw2gQ1Ndb4115xew5ZcggSiqkXBtLbR",
- "BW1BUosyCQeIKvg7ZnQmFuvI9TswxuZzikMFy+tvxXRixZbd8J11BJcWOpzAVAqRjzBF95ARhWCUqZqU",
- "wuw6c8EsPuLBU1ILSCfEoH2tZp4PVAvNuALyf0RFUspRAKs01DeCkMhm8fo1M5gLrJ7TGaUbDEEOBVi5",
- "Er88ftxd+OPHbs+ZIku49BFgpmEXHY8fo5b0TijdOlx3oPGa43YS4e1o+TAXhZPhujxltle1dyOP2cl3",
- "ncFrc4k5U0o5wjXLvzUD6JzMzZi1hzSypmq9f+047iijRjB0bN1236UQyzsypMUjAFA5cU5904osK26B",
- "qpRTR9DP5Q0aYjmtozxsdPcRwRCANfXWOPfnk+ffTKaN677+bu5k+/VTRKJk2SYWoJHBJrYn7oihNvXA",
- "qB5bBVGvGDJmsYzEaIE8z93KOqyDFGDOtFqz0gzZxJNsNbRiUf/vw38/+nCc/CdN/jhIXvyP+afPz64e",
- "Pe79+OTq22//X/unp1ffPvr3f4uaFTVbxM2ffze7JJbEsfgNP+HWgbEU0upjWyfmieX9w60lQAalXseC",
- "P0sJClmjDeIs9brZVICODaWU4gL4lLAZzLosNluB8sakHOgSgxBRpxBjnKL1cbD05okjwHq4kFF8LEY/",
- "6OJD2sTDbJSOfHsHwosdiMg2Pr2yruxXsQwjZ91BUVuloejbu2zXXwek/fdeVu4dKsFzxiEpBIdtNFmE",
- "cfgJP8Z62+tuoDMKHkN9u7pEC/4OWO15xmzmbfGLux3w93e1Y/sONr87bsfUGcYMo6kG8pJQkuYMDTmC",
- "Ky2rVH/kFFXFgFwj7iSvAA8bD176JnFrRcSY4Ib6yKkyOKwVyKgJfAmRK+sHAG9DUNVqBUp3hOYlwEfu",
- "WjFOKs40zlWY/UrshpUg0aczsy0LuiVLmqOt4w+Qgiwq3RYj8dJTmuW5s7uaaYhYfuRUGx6kNPmJ8bMN",
- "DucjCD3NcNCXQp7XWIhfUSvgoJhK4nz/tf2K7N8tf+2uAswzsZ89v7lvvu9hjwXeOchPXjkV6+QVytGN",
- "xbUH+72Z4QrGkyiRGbmoYBzjtzu0RR4abcAT0KPGdut2/SPXG24I6YLmLDOy003IocviemfRno4O1bQ2",
- "omNV8Wv9FAsbWImkpOk5eo0nK6bX1WKWimLuVcv5StRq5jyjUAiO37I5LdlclZDOLw73yLm34Fckwq6u",
- "phPHddSdG2LcwLEFdees7Zn+by3Ig9ffn5G52yn1wEbh2qGD8MmINcBFCLUcVmbxNovMhiF/5B/5K1gy",
- "zsz3o488o5rOF1SxVM0rBfI7mlOewmwlyJEPOnpFNf3Ieyx+MNEzCPciZbXIWUrOw6u4OZo2eac/wseP",
- "HwyBfPz4qef96F+cbqroGbUTJJdMr0WlE5edkEi4pDKLgK7q6HQc2eYW7Zp1StzYliJd9oMbP86qaVmq",
- "brBqf/llmZvlB2SoXCim2TKitJCeCRrOaKHB/X0rnMol6aVPbakUKPJbQcsPjOtPJPlYHRw8BdKK3vzN",
- "8RpDk9sSWnajGwXTdm1GuHArUMFGS5qUdAUqunwNtMTdx4u6QAtlnhPs1ooa9TEWOFSzAI+P4Q2wcFw7",
- "Ag4Xd2p7+TTT+BLwE24htjHcqTH833S/gjjSG29XJxa1t0uVXifmbEdXpQyJ+52ps89Whid7b4xiK24O",
- "gUvUWwBJ15CeQ4Y5Q1CUejttdfcOP3fDedbBlM2ts4FumACCJrYFkKrMqJMBKN92I/EVaO3TD97DOWzP",
- "RJM/cp3Q+3ZAuBo6qEipwWVkiDU8tm6M7uY75zEGwZalj6vGGEJPFkc1Xfg+wwfZ3pB3cIhjRNEKWB5C",
- "BJURRFjiH0DBDRZqxrsV6ceWZ8Sbhb35ImYez/uJa9JIbc4BHK4G47Dt9wIwUVdcKrKgCjIiXI6pDXoO",
- "uFil6AoGbE+hlXNkaHHLMoqD7Lv3ojedWHYvtN59EwXZNk7MmqOUAuaLIRU0E3bc/n4ma0jHFcwIlo5w",
- "CFvkKCbVEQeW6VDZsjbbXPgh0OIEDJI3AocHo42RULJZU+XTXzFL2J/lUTLAvzCIf1fO1kngsQ5SgeuM",
- "LM9zu+e0Z7d1mVs+XcvnaIVG2xH5VtOJC6KKbYfgKABlkMPKLtw29oTSJBQ0G2Tg+Hm5zBkHksSc31Qp",
- "kTKbv9xcM24OMPLxY0Ks7YmMHiFGxgHY6CDCgclbEZ5NvroOkNwlRFA/NrqWgr8hHglow5uMyCNKw8IZ",
- "HwhM8xyAuoiJ+v7qxO3gMITxKTFs7oLmhs05I2ozSC+DCMXWTr6Qc1E+GhJnd5j+7MVyrTXZq+gmqwll",
- "Jg90XKDbAfFuUSK2BQrx5VTfGldDd+mYqQeu7yFcPQxyj24EQMcS0ZTncZrfXg2tfTf3b7KGpU+bZFof",
- "mRmj/SH6ie7SAP76huA6W+hd97qOKult12U7USqQn2Ks2JyRvmm0b4BVkANKxElLgkjOYwZzI9gDsttT",
- "3y3Q3DEdi/Lto8AfLmHFlIbGdGVuJW+LvW93F8X0byGWw6vTpVya9b0XoubRNs3Quu/CZd77Ci6EhmTJ",
- "pNIJ2v2iSzCNflCoUf5gmsYFhbbH3VZCYVmcN+C057BNMpZXcXp18/74ykz7tjbCqGpxDlsUB4Gma7LA",
- "yj3ROJwdU9tQrZ0LfmMX/Ibe2XrHnQbT1EwsDbm05/hCzkWH8+5iBxECjBFHf9cGUbqDQeLF/wpyHctY",
- "CoQGezgz03C2y/TYO0yZH3uXohRAMXxH2ZGiawm05Z2rYBh9YNQ9poPCN/20gYEzQMuSZZuOIdCOOqgu",
- "0mtp+z6xuIMF3F032B4MBEa/WGSqBNXOIW+kW1vCiIdrm43CzFk70ztkCOFUTPkCfH1EGdLGKlH7cHUG",
- "NP8Rtv8wbXE5k6vp5HZ2wxiu3Yh7cP2u3t4ontEhZu1ILTfANVFOy1KKC5onzro6RJpSXDjSxObeGHvP",
- "rC5uwzv7/vjNOwf+1XSS5kBlUosKg6vCduUXsyqbrj5wQHyBL6PweJndipLB5tdpxKFF9nINrphSII32",
- "ij801vbgKDoL7TLul99rb3WOAbvEHQ4CKGv/QGO7su6BtkuAXlCWe6ORh3bAh46LG1dBJMoVwgFu7VoI",
- "PETJnbKb3umOn46GuvbwpHCuHeWeClvRTBHBuyFZRoREWxSSakGxdIM1CfSZE6+KxBy/ROUsjRsY+UIZ",
- "4uDWcWQaE2w8IIyaESs24IfkFQvGMs3UCEW3A2QwRxSZvgzIEO4WwpWirTj7vQLCMuDafJJ4KjsHFWtl",
- "OFNz/zo1skN/LjewNU83w99GxgjLlnRvPARit4ARuql64L6qVWa/0NocY34I7PHX8HaHM/auxB2eakcf",
- "jpptyNC67W4KK8f2+Z8hDFtlbH/ZWq+8uvopA3NEy9AylSyl+APieh6qx5GwdV+ohWHU5B/AZ5Hsny6L",
- "qa07TTXdZvbB7R6SbkIrVNtDP0D1uPOBTwqLYnjzLOV2q21VyFZcSJxgwliuuR2/IRgHcy/+LaeXCxqr",
- "GGKEDAPTceP9bBmStSC+s8e9s3kzVztnRgJHat2W2YSuEmSTUdJPHr6hwGCnHS0qNJIBUm0oE0yt8ytX",
- "IjJMxS8pt8VFTT97lFxvBdb4ZXpdConpmCpu884gZQXN45JDhthvp69mbMVsac1KQVC70Q1kaxJbKnL1",
- "L61/uUHNyZIcTIPqsG43MnbBFFvkgC0ObYsFVcjJa0NU3cUsD7heK2z+ZETzdcUzCZleK4tYJUgt1KF6",
- "U3tuFqAvATg5wHaHL8hD9FkpdgGPDBbd/Tw5OnyBRlf7x0HsAnA1dHdxkwzZyX84dhKnY3Ta2TEM43aj",
- "zqLJhbbw+TDj2nGabNcxZwlbOl63/ywVlNMVxMMkij0w2b64m2hI6+CFZ7Zqr9JSbAnT8flBU8OfBmI+",
- "DfuzYJBUFAXThfNsKFEYemoKM9pJ/XC2BLCrHuTh8h/RQVh6/0hHibxfo6m932KrRjfuW1pAG61TQm0O",
- "bs4a170v+EVOfCY/llOqqyhZ3Ji5zNJRzEFP/pKUknGNikWll8nfSLqmkqaG/c2GwE0W3zyLlJBqV43h",
- "1wP83vEuQYG8iKNeDpC9lyFcX/KQC54UhqNkj5oY6+BUDnoy49FinqN3gwV3Dz1WKDOjJIPkVrXIjQac",
- "+laEx3cMeEtSrNdzLXq89srunTIrGScPWpkd+uX9GydlFELG6ro0x91JHBK0ZHCBgWvxTTJj3nIvZD5q",
- "F24D/Z/refAiZyCW+bMcUwS+q1ie/aPJGelU4ZOUp+uo3X9hOv7aVEmul2zPcbSMyJpyDnl0OHtn/urv",
- "1sjt/08xdp6C8ZFtu9X17HI7i2sAb4PpgfITGvQynZsJQqy2g+jrqMt8JTKC8zQ1Kxoq6xcMDCpo/V6B",
- "0rGkPfxgIz/QvmP0AlvAiQDPUKqekdf2lZM1kFZKPUqzrKhym54N2QqkMzxWZS5oNiVmnLPvj98QO6vt",
- "Y0t+2gJSKxTm2qvo6PVBgZtxMYS+emc8vnn8OLsDLs2qlcYKF0rTooylrpgWZ74B5seEtk4U80LszMgr",
- "K2ErL7/ZSQw9LJksjGRaj2Z5PNKE+Y/WNF2j6NriJsMkP77ymadKFRSGr+u81jVq8NwZuF3xM1v7bEqE",
- "0S8umbKPW8AFtLNl6tQxpzr57Jn28mTFuaWUKI/eldp4E7R74KxD25tDo5B1EH9NwcUWDrxuIbhT7BUt",
- "+tCtKterCG+ziusSpf7RopRywVmKJReC5zRqkN1DGWN8BSOqU3SNUf6IuxMaOVzRWnZ1OJHD4mB1O88I",
- "HeL6xsrgq9lUSx32T40vMqypJivQynE2yKa+JKOzlzCuwNUcwjdTAj4pZMv/ghwy6tJLatPvNckIY+cH",
- "BOAfzLe3Tj3CoNJzxlEQcmhz8avWooF1/LWRnpgmKwHKraedmq8+mD4zTE/PYPNp5uv+4xjWfWGWbX11",
- "/aGOvefOecpM25emLbFRh/XPrTBFO+lxWbpJhwt2RuUBveGDCI54YBJvAg+QW48fjraD3Ha63PE+NYQG",
- "F+iwgxLv4R5h1MUrO9V6L2heWYrCFsSGukTzKxmPgPGGcWhepYhcEGn0SsCNwfM60E+lkmorAo7iaWdA",
- "c/TSxRia0s5Ee9uhOhuMKME1+jmGt7GpuznAOOoGjeBG+bZ+DMNQdyBMvMRXeBwi+1U0UapyQlSGYced",
- "upoxxmEYt6/c274A+segLxPZ7lpSe3KucxMNZZKlIiZvfr+BtLJOaKF8FDJJMTU7uC+iFs2mQmxkG8Iq",
- "tR61GCK+2OK/sRJLwyhxXuJrxyl5lzB2vLbA2h6pJ24aYkoUWyXjMYHM/PboaKa+GYU1/e+UxHKxagNy",
- "z7VQdrGXcI9ijOV7w7HDvOZe3TDL0+u0Y4wKEr7YPOprdcJcmx3gHdIrJIbW6Lpu+G57wHAF8CneOgOx",
- "gUEFGGovNuveGIoQTAcDWql2eSWakqZ8RZ8n2LLdsRFseIEtF25fGoyadoZCCmxEgfnc6z1OJOsJuDj2",
- "ToT6WJU+QD/6QDhSUuZ8dw2z6GPWhcz2g5jHBNM1G9xdhAtExUFiK+nVB9xNIb1A5CCY3pZxm41PaD+u",
- "HaPorsEi3Cvgrgp3O8RwdKDTcgmpZhd7Ar//wwjLTVDx1IvT9omHIA6c1YEz/kHKa0r5DUC74rJ3whNU",
- "zbg1OENhn+ewfaBIixqideWmnlBvki+JGMCKIokhEaFijger/ztbMFM1ZSAWvKPPdoemmNNgQd8gjeGG",
- "c3mSJDRMbdgx5YWIKRCj5jJdr5XwgzEgQ7Hh/ZKaw7fXK6xgqupi7PWLk0Ech9ETu/XeLl2+Jobp1yYv",
- "n7kJyv/mc3LsLPYl06bkMBoYL6nMfIuoxOyF8WQg2qobv2zDxFkc6GU9M2vCMvohvJE6Bxh8k+ZCMb5K",
- "hiKY2pEQ4WNI6O9B2wTWKkW4liBdqXHtH4pNtPBhHLvg2IUK93DPTZCgBqv2WeAGM37fNynNWNyJ2meC",
- "nS8rXCCRUFADnQwSj4fn3IXsl/a7j1n1xX06pbQi43p6TfZmDvuAHKZ6SAypfkncbbk/FvYmqgrj3L7k",
- "oGJZyNygMjRilVJkVWov6PBggFfpRuf472AlUSk/7a+yJ7DlWPHiTZBZcA7buRWa0jXlTemR9rG2xQjt",
- "GoJMvs5u36kWFxdY85VdwOpO4PwzNaHppBQiTwasVif9ZOruGThn6TlkxNwd3pU9UNSXPERjSe2WuFxv",
- "ffJwWQKH7NGMEKNLFaXeeg9Fu4xYZ3L+QO+af4OzZpWtb+CUtNlHHo/CsA9v35K/+WF2czUFhvndcio7",
- "yJ5s5c1AIrekl5ES12NfMYv4DLplhxuislDEpJQbpq6NOt99RS1C+mHSwR7957yl1dlCOR0/gZBwx9pd",
- "YCC9pnbXT6cYuzxcB3K1SkF/naM3oIXbAdyPQXxjmugjd9iioBdjLArxoh6mO5o0LEKwIg5BUMlvh78R",
- "CUuskCfI48c4wePHU9f0tyftz0b7evw4ejLvzZjReizNzRujmH8M+ZWt73QghKGzHxXLs32E0QpIaapV",
- "YsjFry5050+pl/mrVZH7R9WVDryOGbW7CYiYyFpbkwdTBaEmI6JMXLdZ9Dk7BWklmd5iRpHXqNiv0Uzt",
- "17URxr3AWceguxBo+9a9i4hqTDbN8+SvhX3+rjB3PRqxNdbz/35DizIHd1C+fbD4Kzz927Ps4OnhXxd/",
- "O3h+kMKz5y8ODuiLZ/TwxdNDePK3588O4HD5zYvFk+zJsyeLZ0+effP8Rfr02eHi2Tcv/vrAvw1uAW3e",
- "3f7fWFQ2OX53kpwZYBuc0JLVz3gYMvYFKmmKJ9HoJPnkyP/0P/0Jm6WiaIb3v05ceNxkrXWpjubzy8vL",
- "WdhlvkIdLdGiStdzP0//+YR3J3Xojk25wB21URmGFHBTHSkc47f335+ekeN3J7OGYCZHk4PZwewQ60CX",
- "wGnJJkeTp/gTnp417vvcEdvk6PPVdDJfA82xOLj5owAtWeo/qUu6WoGcuUqd5qeLJ3Pv+Z9/dvrp1a5v",
- "87DozfxzS43P9vTEuiDzzz7dZXfrVj6JM18EHUZCMTylfSBs/hn1wcHf22B81huWXc29+cn1cA/tzD83",
- "L19d2VOYQ8x0ZEO5aPBQ1tTo6/hwqrK/moPnI8iZaj+UVlPRSWaox/R6Wb8CFiTPH33oiV92IOJHwqNm",
- "6Kg5Ca2ZGmanZQVhPnfNylvtG4b+4SB58enz4fTw4OovhmG7P58/vRppA24eeiWnNTce2fBT5y39JwcH",
- "/82eyX12zRXvlLlbbrJIud7vaEZ8dCPOfXh/c59wtMAbxknsxXA1nTy/z9WfcEPyNCfYMsj76W/9L/yc",
- "i0vuW5pbvCoKKrf+GKsWU/Bv++FdQVcKNTDJLqiGySdU8WPu/QHmgu8RX5u54CPLX5nLfTGXL+P16SfX",
- "POBf/oq/stMvjZ2eWnY3np06Uc4G0M/tCySNhNcrL7uCaCQ/xtTTXe/tdTnsa9C95wMnt2Qxf9pLgv+9",
- "z8mzg2f3B0G7NuKPsCVvhSY/oNvrCz2z447PLkmooxllWY/ILfsHpb8T2XYHhgq1Kl3Qa0QuWTBuQO7f",
- "Lv23OXrP+53DllhXsDf5u+dt2/LQ1S15wBf7EuFXHvKVh0g7/dP7m/4U5AVLgZxBUQpJJcu35Bdepyzd",
- "XK3LsmiYXfvo93ia0UZSkcEKeOIYVrIQ2daXq2kNeA7WNN0TVOaf2zUnrflr0Cz1Cn+vn8LpA73YkpNX",
- "PQnGduty2u+22LSjMUZ0wi6IOzXDLi8aUMZ2kblZyEpoYrGQuUV9ZTxfGc+thJfRhycmv0S1CW/I6d7J",
- "U5+7G8tup7o/9Rid4089rv9lH3b/yhK+soSbs4TXEDmMeGodk4gQ3U0svX0GgZFXWbdyO4Yv+OZVTiVR",
- "MNZMcYwjOuPEfXCJ+1bSoriyOhrlBDZM4UskkQ27W73tK4v7yuK+IK/VfkbTFkSuremcw7agZa3fqHWl",
- "M3Fpa95EuSKWg6W5qx2H1dzqSAwtiB+gSXAiP7uMvnyLL6KzzIhxmhVgRKqa15nOPmy1iZs1IzRP+K0Y",
- "xwmQVeAstkgiDVIHFKSC2wevOr42B9lbqxPGmOzvFSBHc7hxME6mLWeL28ZIScJby19938jVDlt6/WpV",
- "6+/5JWU6WQrpMocQQ/0oDA00n7vqDp1fm7zO3hdMVg1+DGI34r/O6yq90Y/dqJPYVxcU4hs1YWVhmBbu",
- "YR2g9eGT2Qos8ua2t4k6OprPMdx+LZSeT66mnzsRSeHHTzX2P9c3r9uFq09X/z8AAP//Jlc5MCqxAAA=",
+ "H4sIAAAAAAAC/+x9/XPcNrLgv4KafVX+uOGM/JVdqyr1TrGcrC6O47KUfXfP9iUYsmcGKxJgAFCaiU//",
+ "+xUaAAmS4Az1scpzPf9kawg0Go1Go7vR3fg8SUVRCg5cq8nh50lJJS1Ag8S/aJqKiuuEZeavDFQqWamZ",
+ "4JND/40oLRlfTaYTZn4tqV5PphNOC2jamP7TiYTfKyYhmxxqWcF0otI1FNQA1tvStK4hbZKVSByIIwvi",
+ "5HhyteMDzTIJSvWx/JnnW8J4mlcZEC0pVzQ1nxS5ZHpN9Jop4joTxongQMSS6HWrMVkyyDM185P8vQK5",
+ "DWbpBh+e0lWDYiJFDn08X4liwTh4rKBGql4QogXJYImN1lQTM4LB1TfUgiigMl2TpZB7ULVIhPgCr4rJ",
+ "4YeJAp6BxNVKgV3gf5cS4A9INJUr0JNP09jklhpkolkRmdqJo74EVeVaEWyLc1yxC+DE9JqRnyqlyQII",
+ "5eT996/Is2fPXpqJFFRryByTDc6qGT2ck+0+OZxkVIP/3Oc1mq+EpDxL6vbvv3+F45+6CY5tRZWC+GY5",
+ "Ml/IyfHQBHzHCAsxrmGF69DiftMjsimanxewFBJGroltfKeLEo7/p65KSnW6LgXjOrIuBL8S+zkqw4Lu",
+ "u2RYjUCrfWkoJQ3QDwfJy0+fn0yfHFz95cNR8p/uzxfPrkZO/1UNdw8Fog3TSkrg6TZZSaC4W9aU9+nx",
+ "3vGDWosqz8iaXuDi0wJFvetLTF8rOi9oXhk+YakUR/lKKEIdG2WwpFWuiR+YVDw3YspAc9xOmCKlFBcs",
+ "g2xqpO/lmqVrklJlQWA7csny3PBgpSAb4rX47HZspquQJAavG9EDJ/RflxjNvPZQAjYoDZI0FwoSLfYc",
+ "T/7EoTwj4YHSnFXqeocVOVsDwcHNB3vYIu244ek83xKN65oRqggl/miaErYkW1GRS1ycnJ1jfzcbQ7WC",
+ "GKLh4rTOUbN5h8jXI0aEeAshcqAcief3XZ9kfMlWlQRFLteg1+7Mk6BKwRUQsfgnpNos+/86/fktEZL8",
+ "BErRFbyj6TkBnopseI3doLET/J9KmAUv1Kqk6Xn8uM5ZwSIo/0Q3rKgKwqtiAdKslz8ftCASdCX5EEIW",
+ "4h4+K+imP+iZrHiKi9sM21LUDCsxVeZ0OyMnS1LQzbcHU4eOIjTPSQk8Y3xF9IYPKmlm7P3oJVJUPBuh",
+ "w2izYMGpqUpI2ZJBRmooOzBxw+zDh/Hr4dNoVgE6HsggOvUoe9DhsInwjNm65gsp6QoClpmRX5zkwq9a",
+ "nAOvBRxZbPFTKeGCiUrVnQZwxKF3q9dcaEhKCUsW4bFTRw4jPWwbJ14Lp+CkgmvKOGRG8iLSQoOVRIM4",
+ "BQPuNmb6R/SCKvjm+dAB3nwdufpL0V31nSs+arWxUWK3ZORcNF/dho2rTa3+I4y/cGzFVon9ubeQbHVm",
+ "jpIly/GY+adZP0+GSqEQaBHCHzyKrTjVlYTDj/yx+Ysk5FRTnlGZmV8K+9NPVa7ZKVuZn3L70xuxYukp",
+ "Ww0Qs8Y1ak1ht8L+Y+DFxbHeRI2GN0KcV2U4obRllS625OR4aJEtzOsy5lFtyoZWxdnGWxrX7aE39UIO",
+ "IDlIu5KahuewlWCwpekS/9kskZ/oUv5h/inLPEZTw8DuoEWngHMWHJVlzlJqqPfefTZfze4Hax7QpsUc",
+ "T9LDzwFupRQlSM0sUFqWSS5SmidKU42Q/k3CcnI4+cu88arMbXc1DwZ/Y3qdYiejiFrlJqFleQ0Y74xC",
+ "o3ZICSOZ8RPKByvvUBVi3K6e4SFmZG8OF5TrWWOItARBvXM/uJEaelsdxtK7Y1gNEpzYhgtQVq+1DR8o",
+ "EpCeIFkJkhXVzFUuFvUPD4/KsqEgfj8qS0sP1AmBoboFG6a0eoTTp80WCsc5OZ6RH0LYqGALnm/NqWB1",
+ "DHMoLN1x5Y6v2mPk5tBAfKAILqeQM7M0ngxGeb8LjkNjYS1yo+7s5RXT+O+ubchm5vdRnb8MFgtpO8xc",
+ "aD45ylnLBX8JTJaHHc7pM45z4szIUbfvzdjGQIkzzI14Zed6Wrg76FiT8FLS0iLovthDlHE0vWwji+st",
+ "pelIQRfFOdjDAa8hVjfea3v3QxQTZIUODt/lIj2/g/2+MHD62w7BkzXQDCTJqKbBvnL7JX5YY8e/Yz+U",
+ "CCAjGv3P+B+aE/PZML6RixassdQZ8q8I/OqZMXCt2mxHMg3Q8BaksDYtMbbotbB81QzekxGWLGNkxGtr",
+ "RhPs4Sdhpt44yY4WQt6MXzqMwEnj+iPUQA22y7Szsti0KhNHn4j7wDboAGpuW/paZEihLvgYrVpUONX0",
+ "X0AFZaDeBRXagO6aCqIoWQ53sF/XVK37kzD23LOn5PTvRy+ePP316YtvjEFSSrGStCCLrQZFHjo1mii9",
+ "zeFRf2aoz1a5jkP/5rl3GLXhxuAoUckUClr2QVlHlD20bDNi2vWp1iYzzrpGcMy2PAMjXizZifWxGtSO",
+ "mTJnYrG4k8UYIljWjJIRh0kGe5nputNrhtmGU5RbWd2F8QFSChlxheAW0yIVeXIBUjER8Wq/cy2Ia+EV",
+ "krL7u8WWXFJFzNjopat4BnIW4yy94Yga01CofQeqBX224Q1tHEAqJd32yG/nG5mdG3fMurSJ750+ipQg",
+ "E73hJINFtWrprkspCkJJhh3x4HgrMjB2R6XuQFo2wBpkzEKEKNCFqDShhIsM0EipVFyODlxxoW8drwR0",
+ "KJr12p7TCzAKcUqr1VqTqiTo8O4tbdMxoaldlATPVDXgEaxdubaVHc5en+QSaGYUZeBELJzbzTkEcZIU",
+ "vfXaSyInxSOmQwuvUooUlDIGjlVb96Lm29lV1jvohIgjwvUoRAmypPKGyGqhab4HUWwTQ7dWu5yvso/1",
+ "uOF3LWB38HAZqTQ2juUCo+OZ3Z2DhiESjqTJBUj02f1L188PctPlq8qBG3WnqZyxAk0lTrlQkAqeqSiw",
+ "nCqd7Nu2plFLnTIzCHZKbKci4AFz/Q1V2npuGc9QtbbiBsexdrwZYhjhwRPFQP6HP0z6sFMjJ7mqVH2y",
+ "qKoshdSQxebAYbNjrLewqccSywB2fXxpQSoF+yAPUSmA74hlZ2IJRHXt53BXG/3JoTfAnAPbKClbSDSE",
+ "2IXIqW8VUDe8VRxAxNhhdU9kHKY6nFNfZU4nSouyNPtPJxWv+w2R6dS2PtK/NG37zEV1I9czAWZ07XFy",
+ "mF9aytr75DU1OjBCJgU9N2cTarTWxdzH2WzGRDGeQrKL8822PDWtwi2wZ5MOGBMuYiUYrbM5OvwbZbpB",
+ "JtizCkMTHrBs3lGpWcpK1CR+hO2du0W6A0Q9JCQDTZnRtoMPKMBR9tb9ib0z6MK8maI1Sgnto9/TQiPT",
+ "yZnCA6ON/Dls0VX6zl5GnwVX2HegKUagmt1NOUFE/RWXOZDDJrChqc635pjTa9iSS5BAVLUomNY2uqCt",
+ "SGpRJiGAqIG/Y0TnYrEXuX4Fxvh8ThFUML3+UkwnVm3Zjd9ZR3FpkcMpTKUQ+QhXdI8YUQxGuapJKcyq",
+ "MxfM4iMePCe1kHRKDPrXauH5QLXIjDMg/0dUJKUcFbBKQ30iCIliFo9fM4I5wOoxnVO6oRDkUIDVK/HL",
+ "48fdiT9+7NacKbKESx8BZhp2yfH4MVpJ74TSrc11Bxav2W4nEdmOng9zUDgdritTZntNewd5zEq+6wCv",
+ "3SVmTynlGNdM/9YCoLMzN2PmHvLImqr1/rkj3FFOjQB0bN523aUQyztypMUjANA4cZf6phVZVtwiVSln",
+ "juA9l3doiOW0jvKw0d2HBEMA1tR749yfT198M5k2V/f1d3Mm26+fIholyzaxAI0MNrE1cVsMrakHxvTY",
+ "KojeiqFgFstIjBbI89zNrCM6SAFmT6s1Kw3IJp5kq6EVi/p/H/774Yej5D9p8sdB8vJ/zD99fn716HHv",
+ "x6dX3377/9o/Pbv69tG//1vUrajZIu7+/LtZJbEkTsRv+Am3FxhLIa09tnVqnljeP95aAmRQ6nUs+LOU",
+ "oFA02iDOUq+bRQXo+FBKKS6ATwmbwawrYrMVKO9MyoEuMQgRbQox5lK03g6W3zxzBFQPJzJKjsX4B6/4",
+ "kDdxMxujI9/egfJiARHZpqc31pX9KpZh5KzbKGqrNBR9f5ft+uuAtv/e68q9TSV4zjgkheCwjSaLMA4/",
+ "4cdYb3vcDXRGxWOob9eWaOHfQas9zpjFvC19cbUD+f6uvti+g8Xvwu24OsOYYXTVQF4SStKcoSNHcKVl",
+ "leqPnKKpGLBr5DrJG8DDzoNXvkncWxFxJjhQHzlVhoa1ARl1gS8hcmR9D+B9CKparUDpjtK8BPjIXSvG",
+ "ScWZxrEKs16JXbASJN7pzGzLgm7Jkubo6/gDpCCLSrfVSDz0lGZ57vyuZhgilh851UYGKU1+Yvxsg+B8",
+ "BKHnGQ76UsjzmgrxI2oFHBRTSVzu/2C/ovh301+7owDzTOxnL2/uW+573GOBdw7zk2NnYp0cox7deFx7",
+ "uN+bG65gPIkymdGLCsYxfrvDW+ShsQY8Az1qfLdu1T9yveGGkS5ozjKjO92EHboirrcX7e7ocE1rITpe",
+ "FT/XT7GwgZVISpqe463xZMX0ulrMUlHMvWk5X4nazJxnFArB8Vs2pyWbqxLS+cWTPXruLeQViYirq+nE",
+ "SR11544YBzg2oe6YtT/T/60FefDD6zMydyulHtgoXAs6CJ+MeANchFDrwspM3maR2TDkj/wjP4Yl48x8",
+ "P/zIM6rpfEEVS9W8UiC/oznlKcxWghz6oKNjqulH3hPxg4meQbgXKatFzlJyHh7Fzda0yTt9CB8/fjAM",
+ "8vHjp97tR//gdENF96gdILlkei0qnbjshETCJZVZBHVVR6cjZJtbtGvUKXGwLUe67AcHPy6qaVmqbrBq",
+ "f/plmZvpB2yoXCimWTKitJBeCBrJaLHB9X0rnMkl6aVPbakUKPJbQcsPjOtPJPlYHRw8A9KK3vzNyRrD",
+ "k9sSWn6jGwXTdn1GOHGrUMFGS5qUdAUqOn0NtMTVx4O6QA9lnhPs1ooa9TEWCKqZgKfH8AJYPK4dAYeT",
+ "O7W9fJppfAr4CZcQ2xjp1Dj+b7peQRzpjZerE4vaW6VKrxOzt6OzUobF/crU2WcrI5P9bYxiK242gUvU",
+ "WwBJ15CeQ4Y5Q1CUejttdfcXfu6E86KDKZtbZwPdMAEEXWwLIFWZUacDUL7tRuIr0NqnH7yHc9ieiSZ/",
+ "5Dqh9+2AcDW0UZFTg8PIMGu4bR2M7uK7y2MMgi1LH1eNMYSeLQ5rvvB9hjeyPSHvYBPHmKIVsDxECCoj",
+ "hLDMP0CCG0zUwLsV68emZ9SbhT35Im4eL/uJa9Jobe4COJwNxmHb7wVgoq64VGRBFWREuBxTG/QcSLFK",
+ "0RUM+J5CL+fI0OKWZxSB7Dv3oiedWHYPtN55E0XZNk7MnKOcAuaLYRV0E3au/f1I1pGOM5gRLB3hCLbI",
+ "UU2qIw6s0KGy5W22ufBDqMUZGCRvFA6PRpsioWazpsqnv2KWsN/Lo3SAf2EQ/66crZPgxjpIBa4zsrzM",
+ "7e7Tnt/WZW75dC2foxU6bUfkW00nLogqthyCowKUQQ4rO3Hb2DNKk1DQLJDB4+flMmccSBK7/KZKiZTZ",
+ "/OXmmHFjgNGPHxNifU9kNIQYGwdo4wURAiZvRbg3+eo6SHKXEEE9bLxaCv6GeCSgDW8yKo8ojQhnfCAw",
+ "zUsA6iIm6vOrE7eDYAjjU2LE3AXNjZhzTtQGSC+DCNXWTr6Qu6J8NKTO7nD92YPlWnOyR9FNZhPqTB7p",
+ "uEK3A+PdqkRsCRTSy5m+Na2GztIxQw8c30O0ehjkHt0IgY4noinP4yy/vRZa+2zun2SNSJ82ybQ+MjPG",
+ "+0P8E12lAfr1HcF1ttC77nEdNdLbV5ftRKlAf4qJYrNH+q7RvgNWQQ6oESctDSI5jznMjWIPKG5PfbfA",
+ "csd0LMq3j4L7cAkrpjQ0ritzKnlf7H1fd1FM/xZiOTw7Xcqlmd97IWoZbdMM7fVdOM17n8GF0JAsmVQ6",
+ "Qb9fdAqm0fcKLcrvTdO4otC+cbeVUFgWlw047Dlsk4zlVZxf3bg/Hpth39ZOGFUtzmGL6iDQdE0WWLkn",
+ "GoezY2gbqrVzwm/shN/QO5vvuN1gmpqBpWGX9hhfyL7oSN5d4iDCgDHm6K/aIEl3CEg8+I8h17GMpUBp",
+ "sJszMw1nu1yPvc2Uedi7DKUAi+EzykKKziWwlnfOgmH0gTH3mA4K3/TTBgb2AC1Llm06jkALddBcpNey",
+ "9n1icYcKuLoO2B4KBE6/WGSqBNXOIW+0W1vCiIdzm42izFk70zsUCOFQTPkCfH1CGdbGKlH7aHUGNP8R",
+ "tv8wbXE6k6vp5HZ+wxitHcQ9tH5XL2+UznghZv1IrWuAa5KclqUUFzRPnHd1iDWluHCsic29M/aeRV3c",
+ "h3f2+ujNO4f+1XSS5kBlUqsKg7PCduUXMyubrj6wQXyBL2PweJ3dqpLB4tdpxKFH9nINrphSoI32ij80",
+ "3vZgKzoP7TJ+L7/X3+ouBuwUd1wQQFnfDzS+K3s90L4SoBeU5d5p5LEduEPHyY2rIBKVCiGAW18tBDdE",
+ "yZ2Km97uju+Ohrv2yKRwrB3lngpb0UwRwbshWUaFRF8UsmpBsXSDdQn0hROvisRsv0TlLI07GPlCGebg",
+ "9uLINCbYeEAZNRArNnAPySsWwDLN1AhDt4NkMEaUmL4MyBDtFsKVoq04+70CwjLg2nySuCs7GxVrZThX",
+ "c/84NbpDfywH2LqnG/C30THCsiXdEw+R2K1ghNdUPXSPa5PZT7R2x5gfAn/8NW67wxF7R+KOm2rHH46b",
+ "bcjQun3dFFaO7cs/wxi2ytj+srXeeHX1UwbGiJahZSpZSvEHxO08NI8jYeu+UAvDqMk/gM8i2T9dEVN7",
+ "d5pqus3og8s9pN2EXqj2Df0A1+PKB3dSWBTDu2cpt0ttq0K24kLiDBPGcs0t/IZhHM69+LecXi5orGKI",
+ "UTIMTkfN7WfLkawF8Z097Z3Pm7naOTMSXKTWbZlN6CpBNhkl/eThGyoMdtjRqkKjGSDXhjrB1F5+5UpE",
+ "wFT8knJbXNT0s1vJ9VZgnV+m16WQmI6p4j7vDFJW0DyuOWRI/Xb6asZWzJbWrBQEtRsdIFuT2HKRq39p",
+ "75cb0pwsycE0qA7rViNjF0yxRQ7Y4oltsaAKJXntiKq7mOkB12uFzZ+OaL6ueCYh02tlCasEqZU6NG/q",
+ "m5sF6EsATg6w3ZOX5CHeWSl2AY8MFd35PDl88hKdrvaPg9gB4Gro7pImGYqT/3DiJM7HeGlnYRjB7aDO",
+ "osmFtvD5sODasZts1zF7CVs6Wbd/LxWU0xXEwySKPTjZvria6Ejr0IVntmqv0lJsCdPx8UFTI58GYj6N",
+ "+LNokFQUBdOFu9lQojD81BRmtIN6cLYEsKse5PHyH/GCsPT3Ix0j8n6dpvZ8i80ar3Hf0gLaZJ0SanNw",
+ "c9Zc3fuCX+TEZ/JjOaW6ipKljRnLTB3VHLzJX5JSMq7RsKj0MvkbSddU0tSIv9kQusnim+eRElLtqjH8",
+ "eojfO90lKJAXcdLLAbb3OoTrSx5ywZPCSJTsURNjHezKwZvMeLSYl+jdYMHdoMcqZQZKMshuVYvdaCCp",
+ "b8V4fAfAW7JiPZ9r8eO1Z3bvnFnJOHvQyqzQL+/fOC2jEDJW16XZ7k7jkKAlgwsMXIsvkoF5y7WQ+ahV",
+ "uA32f+7Ng1c5A7XM7+WYIfBdxfLsH03OSKcKn6Q8XUf9/gvT8demSnI9ZbuPo2VE1pRzyKPg7Jn5qz9b",
+ "I6f/P8XYcQrGR7btVtez0+1MrkG8jaZHyg9oyMt0bgYIqdoOoq+jLvOVyAiO09SsaLisXzAwqKD1ewVK",
+ "x5L28ION/ED/jrELbAEnAjxDrXpGfrCvnKyBtFLqUZtlRZXb9GzIViCd47Eqc0GzKTFwzl4fvSF2VNvH",
+ "lvy0BaRWqMy1Z9Gx64MCN+NiCH31znh883g4uwMuzayVxgoXStOijKWumBZnvgHmx4S+TlTzQurMyLHV",
+ "sJXX3+wghh+WTBZGM62hWRmPPGH+ozVN16i6tqTJMMuPr3zmuVIFheHrOq91jRrcdwZvV/zM1j6bEmHs",
+ "i0um7OMWcAHtbJk6dcyZTj57pj09WXFuOSUqo3elNt6E7B45e6Ht3aFRzDqEv6biYgsHXrcQ3Cn2ihZ9",
+ "6FaV61WEt1nFdYlS/2hRSrngLMWSC8FzGjXK7qGMMXcFI6pTdJ1Rfou7HRrZXNFadnU4kaPiYHU7Lwgd",
+ "4frOyuCrWVTLHfZPjS8yrKkmK9DKSTbIpr4ko/OXMK7A1RzCN1MCOSlk6/4FJWT0Si+pXb/XZCOMnR9Q",
+ "gL8339468wiDSs8ZR0XIkc3Fr1qPBtbx10Z7YpqsBCg3n3Zqvvpg+swwPT2DzaeZr/uPMOz1hZm2vavr",
+ "gzryN3fupsy0fWXaEht1WP/cClO0gx6VpRt0uGBnVB/QGz5I4MgNTOJd4AFxa/ghtB3stvPKHc9Tw2hw",
+ "gRd2UOI53GOMunhlp1rvBc0ry1HYgthQl2h+JeMRNN4wDs2rFJEDIo0eCbgwuF8H+qlUUm1VwFEy7Qxo",
+ "jrd0MYGmtHPR3hZUZ4GRJDhHP8bwMjZ1NwcER92gUdwo39aPYRjuDpSJV/gKjyNkv4omalVOicow7LhT",
+ "VzMmOIzg9pV72wdAfxv0dSLbXUtqd851TqKhTLJFla1AJzTLYsXavsOvBL+SrELNATaQVnWxq7IkKWZs",
+ "t1PY+9zmBkoFV1WxYyzf4JbDpSKmR7/FAZSPq26AzwiKXyN6j1+/e//61dHZ62N7Xhiz3KaSGZ1bQmEE",
+ "orFjlQajOlcKyG8hGX/Dfr91JhxHM6inG2HasKavZ0QMqF9s8d9YQaphBnJ36teO6vIX6Njx2up9G1JP",
+ "OTdbL1FslYynBB59tydHM/TN9mPT/043ZC5WbUTuuXLMLmEcrlFMDL8251uYBd6rsmZPwDpJG2OohC/N",
+ "j9ZtnV7YFp544vbKrqHvvq6yvtt7MlwvfYpn9EAkZVAvh1o1wF4GDcVTpoPhv1S7LBxNyU5JiUXOYxBs",
+ "MIYtrm7fZYw6woYCMGz8hfnc6z1Oge2ZAwh7J0F9ZE8foR992CApKXM3nY2w6FPWBRj3Q77HhB42C9yd",
+ "hAvbRSCxmfSqKe7mkF7YdpB6YIvezcan/x/V18h4uYUly1fAXc3ydkDm6LCw5RJSzS72hMn/hzEtmhDs",
+ "qTc+7IMYQdQ8q8OM/POd17SJGoR2RbHvxCeoMXJrdIaCZM9h+0CRFjdEq/BNPaPeJLsUKYD1VxLDIkLF",
+ "rmmst8R5zpmqOQOp4K9FbXdoSl8Nlj8Okj5uOJZnSULDRJAdQ16ImLk1aizT9VrpURgxMxRJ3y9AOnx6",
+ "HWO9V1WXrq/f5wxUUWNVd6vjXbrsVkxqqB2EPs8VlP/NZzDZUey7r02BZnTHXlKZ+RZR+8KbLslAbFo3",
+ "2tsG1bM40st6ZNYEsfQDniNVITBUKc2FYnyVDMV7teNGwqej8HYMPTlY2RXxWoJ0hdm1f1Y30cIHvezC",
+ "Yxcp3DNHNyGCGqxxaJEbzI9+3ySAYyksah9Vdjd/4QSNsUENdjJI0x4ecxexX9nvPsLXl0IaYUY5fk32",
+ "5ln78CWmekQMuX5J3Gm5P3L4JqYK49y+e6FiOdvckDJ0+ZVSZFVqD+hwYzSG4diKCDtESVTLT/uz7Cls",
+ "OdYHeRPkYZzDdm6VpnRNeVOopb2tbelGO4cg77Gz2ndqxcUV1nxlJ7C6Ezz/TEtoOimFyJMBH99JP/W8",
+ "uwfOWXoOGTFnh7/4HyiBTB6ia6m+xLlcb32qdVkCh+zRjBBjSxWl3vr7nHbRtc7g/IHeNf4GR80qWw3C",
+ "GWmzjzwes2KfKb+lfPNgdks1BUb43XIoC2RPbvdmIO1d0stIQfCxb75Fbli6RZobprJYxLSUGyb6jdrf",
+ "fUMtwvphisYe++e8ZdXZskKdWxUh4Y6tu8CdfE3rrp98MnZ6OA+UapWC/jxHL0CLtgO0H0P4xjXRJ+6w",
+ "R0EvxngU4iVQTHd0aViCYP0ggqiS3578RiQssZ6gII8f4wCPH09d09+etj8b6+vx4+jOvDdnRutpOTdu",
+ "jGP+MXQLb2+aBwI+OutRsTzbxxit8J2mticGqPzqAp3+lOqiv1oTub9VXaHF67hRu4uAhInMtTV4MFQQ",
+ "mDMiJsd1m0Uf/1OQVpLpLeZfeYuK/RrNa/+hdsK490rriH0XMK7FOdQZfI3LpnnM/QdhHwsszFmPTmyN",
+ "rx+83tCizMFtlG8fLP4Kz/72PDt49uSvi78dvDhI4fmLlwcH9OVz+uTlsyfw9G8vnh/Ak+U3LxdPs6fP",
+ "ny6eP33+zYuX6bPnTxbPv3n51wf+JXWLaPNK+f/GErzJ0buT5Mwg29CElqx+9MSwsS/nSVPcicYmySeH",
+ "/qf/6XfYLBVFA97/OnHBhJO11qU6nM8vLy9nYZf5Cm20RIsqXc/9OP3HJt6d1IFONkEFV9TGsBhWwEV1",
+ "rHCE396/Pj0jR+9OZg3DTA4nB7OD2ROsml0CpyWbHE6e4U+4e9a47nPHbJPDz1fTyXwNNMdS6uaPArRk",
+ "qf+kLulqBXLm6pqany6ezn2cxPyzs0+vdn2bhyWC5p9bZny2pydWUZl/9slBu1u3sm+c+yLoMBKL4SHt",
+ "c2rzz2gPDv7eRuOz3rDsau7dT66He5Zo/rl5J+zK7sIcYq4jG/hGg2fFpsZex2dmlf3VbDwfb89U+1m5",
+ "motOMsM9pter+s20oNTA4Yee+mUBEQ8Jt5rho2YntEZqhJ2WFYTZ77Uob7VvBPqHg+Tlp89Ppk8Orv5i",
+ "BLb788Wzq5E+4OZZXHJaS+ORDT9hsDpas7hBnh4c/Dd7VPj5NWe8U+duXZNFiht/RzPiY0Fx7Cf3N/YJ",
+ "Rw+8EZzEHgxX08mL+5z9CTcsT3OCLYMsqf7S/8LPubjkvqU5xauioHLrt7FqCQX/EiKeFXSl0AKT7IJq",
+ "mHxCEz8WNDAgXPD15msLF3yS+qtwuS/h8mW81f30mhv8y5/xV3H6pYnTUyvuxotTp8rZdIO5fa+l0fB6",
+ "xXhXEM17wAwEuut1wq6E/QF077HFyS1FzJ/27uJ/733y/OD5/WHQriT5I2zJW6HJ93jt9YXu2XHbZ5cm",
+ "1LGMsqzH5Fb8g9LfiWy7g0KFWpUuRDiilywYNyj3T5f+Sya9xxDPYUvsVbB3+bvHgNv60NUtZcAX+27j",
+ "VxnyVYZIO/yz+xv+FOQFS4GcQVEKSSXLt+QXXid43dysy7JomF176/dkmrFGUpHBCnjiBFayENnWF/dp",
+ "ATwH65ruKSrzz+0Kndb9NeiWOsbf64eD+kgvtuTkuKfB2G5dSfvdFpt2LMaITdhFcadl2JVFA8bYLjY3",
+ "E1kJTSwVMjepr4Lnq+C5lfIyevPE9JeoNeEdOd0zeeoznWO1AKjuDz3G5vhTt+t/2Wfwv4qEryLh5iLh",
+ "B4hsRty1TkhEmO4mnt6+gMDIq6xb5x7DF3zzKqeSKBjrpjhCiM45cR9S4r6NtCitrI1GOYENU/huS2TB",
+ "7tZu+yrivoq4L+jWar+gaSsi17Z0zmFb0LK2b9S60pm4tBWColIRi+fS3FXaw9p3dSSGFsQDaBKcyM8u",
+ "oy/f4vvxLDNqnGYFGJWqlnWmsw9bbeJmDYTmwcMV4zgAigocxZaUpEHqgIJUcPs8WOeuzWH21tqEMSH7",
+ "ewUo0RxtHI6TaeuyxS1jpIDjrfWv/t3I1Q5fev3GV+vv+SVlOlkK6TKHkEL9KAwNNJ+7WhidX5u8zt4X",
+ "TFYNfgxiN+K/zuuaxtGP3aiT2FcXFOIbNWFlYZgWrmEdoPXhk1kKLInnlreJOjqczzHcfi2Unk+upp87",
+ "EUnhx0819T/XJ69bhatPV/8/AAD//75BkK9YsgAA",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/private/types.go b/daemon/algod/api/server/v2/generated/private/types.go
index 2a0c193e0..17485641d 100644
--- a/daemon/algod/api/server/v2/generated/private/types.go
+++ b/daemon/algod/api/server/v2/generated/private/types.go
@@ -316,7 +316,13 @@ type DryrunTxnResult struct {
AppCallMessages *[]string `json:"app-call-messages,omitempty"`
AppCallTrace *[]DryrunState `json:"app-call-trace,omitempty"`
- // Execution cost of app call transaction
+ // Budget added during execution of app call transaction.
+ BudgetAdded *uint64 `json:"budget-added,omitempty"`
+
+ // Budget consumed during execution of app call transaction.
+ BudgetConsumed *uint64 `json:"budget-consumed,omitempty"`
+
+ // Net cost of app execution. Field is DEPRECATED and is subject for removal. Instead, use `budget-added` and `budget-consumed.
Cost *uint64 `json:"cost,omitempty"`
// Disassembled program line by line.
diff --git a/daemon/algod/api/server/v2/generated/routes.go b/daemon/algod/api/server/v2/generated/routes.go
index 065a90fff..fa62e543f 100644
--- a/daemon/algod/api/server/v2/generated/routes.go
+++ b/daemon/algod/api/server/v2/generated/routes.go
@@ -788,204 +788,206 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9a3fbOJLoX8HV7jlJvKLkPGfic/rsdSf98J0knRO7Z2e3ndsNkSUJYxLgAKAtda7/",
- "+z0oACRIgpL8SNJJ+1NiEY9CoVAo1PPDKBVFKThwrUYHH0YllbQADRL/omkqKq4Tlpm/MlCpZKVmgo8O",
- "/DeitGR8MRqPmPm1pHo5Go84LaBpY/qPRxL+VTEJ2ehAywrGI5UuoaBmYL0uTet6pFWyEIkb4tAOcfRy",
- "dLnhA80yCUr1ofyJ52vCeJpXGRAtKVc0NZ8UuWB6SfSSKeI6E8aJ4EDEnOhlqzGZM8gzNfGL/FcFch2s",
- "0k0+vKTLBsREihz6cL4QxYxx8FBBDVS9IUQLksEcGy2pJmYGA6tvqAVRQGW6JHMht4BqgQjhBV4Vo4Nf",
- "Rgp4BhJ3KwV2jv+dS4DfIdFULkCP3o9ji5trkIlmRWRpRw77ElSVa0WwLa5xwc6BE9NrQl5XSpMZEMrJ",
- "u+9fkMePHz83Cymo1pA5IhtcVTN7uCbbfXQwyqgG/7lPazRfCEl5ltTt333/Auc/dgvctRVVCuKH5dB8",
- "IUcvhxbgO0ZIiHENC9yHFvWbHpFD0fw8g7mQsOOe2Ma3uinh/J91V1Kq02UpGNeRfSH4ldjPUR4WdN/E",
- "w2oAWu1LgylpBv1lP3n+/sPD8cP9y3/75TD5H/fn08eXOy7/RT3uFgxEG6aVlMDTdbKQQPG0LCnv4+Od",
- "owe1FFWekSU9x82nBbJ615eYvpZ1ntO8MnTCUikO84VQhDoyymBOq1wTPzGpeG7YlBnNUTthipRSnLMM",
- "srHhvhdLli5JSpUdAtuRC5bnhgYrBdkQrcVXt+EwXYYoMXBdCx+4oD8uMpp1bcEErJAbJGkuFCRabLme",
- "/I1DeUbCC6W5q9TVLitysgSCk5sP9rJF3HFD03m+Jhr3NSNUEUr81TQmbE7WoiIXuDk5O8P+bjUGawUx",
- "SMPNad2j5vAOoa+HjAjyZkLkQDkiz5+7Psr4nC0qCYpcLEEv3Z0nQZWCKyBi9k9Itdn2/3P80xsiJHkN",
- "StEFvKXpGQGeimx4j92ksRv8n0qYDS/UoqTpWfy6zlnBIiC/pitWVAXhVTEDafbL3w9aEAm6knwIIDvi",
- "Fjor6Ko/6YmseIqb20zbEtQMKTFV5nQ9IUdzUtDVN/tjB44iNM9JCTxjfEH0ig8KaWbu7eAlUlQ820GG",
- "0WbDgltTlZCyOYOM1KNsgMRNsw0exq8GTyNZBeD4QQbBqWfZAg6HVYRmzNE1X0hJFxCQzIT87DgXftXi",
- "DHjN4MhsjZ9KCedMVKruNAAjTr1ZvOZCQ1JKmLMIjR07dBjuYds49lo4AScVXFPGITOcF4EWGiwnGoQp",
- "mHDzY6Z/Rc+ogmdPhi7w5uuOuz8X3V3fuOM77TY2SuyRjNyL5qs7sHGxqdV/h8dfOLdii8T+3NtItjgx",
- "V8mc5XjN/NPsn0dDpZAJtBDhLx7FFpzqSsLBKd8zf5GEHGvKMyoz80thf3pd5Zods4X5Kbc/vRILlh6z",
- "xQAya1ijrynsVth/zHhxdqxX0UfDKyHOqjJcUNp6lc7W5Ojl0CbbMa9KmIf1UzZ8VZys/Evjqj30qt7I",
- "ASAHcVdS0/AM1hIMtDSd4z+rOdITncvfzT9lmcdwagjYXbSoFHDKgsOyzFlKDfbeuc/mqzn9YJ8HtGkx",
- "xZv04EMAWylFCVIzOygtyyQXKc0TpanGkf5dwnx0MPq3aaNVmdruahpM/sr0OsZORhC1wk1Cy/IKY7w1",
- "Ao3awCUMZ8ZPyB8sv0NRiHG7e4aGmOG9OZxTrifNQ6TFCOqT+4ubqcG3lWEsvjsPq0GEE9twBsrKtbbh",
- "PUUC1BNEK0G0opi5yMWs/uH+YVk2GMTvh2Vp8YEyITAUt2DFlFYPcPm0OULhPEcvJ+SHcGwUsAXP1+ZW",
- "sDKGuRTm7rpy11etMXJraEa8pwhup5ATszUeDUZ4vw2Kw8fCUuRG3NlKK6bxj65tSGbm9506fxkkFuJ2",
- "mLjw+eQwZ18u+EvwZLnfoZw+4TglzoQcdvtej2zMKHGCuRatbNxPO+4GPNYovJC0tAC6L/YSZRyfXraR",
- "hfWG3HRHRheFOTjDAa0hVNc+a1vPQxQSJIUODN/mIj27hfM+M+P0jx0OT5ZAM5Ako5oG58qdl/hljR1/",
- "xH7IEUBGJPqf8D80J+azIXzDF+2w5qXOkH5FoFfPzAPXis12JtMAH96CFPZNS8xb9EpQvmgm7/EIi5Zd",
- "eMR39hlNsIdfhFl6oyQ7nAl5PXrpEAInjeqPUDNqcFzGnZ3FplWZOPxE1Ae2QWegxtrSlyJDDHWHj+Gq",
- "hYVjTT8CFpQZ9Taw0B7otrEgipLlcAvndUnVsr8I8557/Igc/3j49OGjXx89fWYeJKUUC0kLMltrUOS+",
- "E6OJ0uscHvRXhvJslev46M+eeIVRe9zYOEpUMoWClv2hrCLKXlq2GTHt+lhroxlXXQO4y7E8AcNeLNqJ",
- "1bEa0F4yZe7EYnYrmzGEsKyZJSMOkgy2EtNVl9dMsw6XKNeyuo3HB0gpZEQVgkdMi1TkyTlIxUREq/3W",
- "tSCuhRdIyu7vFlpyQRUxc6OWruIZyEmMsvSKI2hMQ6G2Xah26JMVb3DjBqRS0nUP/Xa9kdW5eXfZlzby",
- "vdJHkRJkolecZDCrFi3ZdS5FQSjJsCNeHG9EBubdUalb4JbNYA0wZiNCEOhMVJpQwkUG+EipVJyPDpi4",
- "ULeOJgEdsma9tPf0DIxAnNJqsdSkKgkqvHtb23RMaGo3JcE7VQ1oBGtVrm1lp7Pmk1wCzYygDJyImVO7",
- "OYUgLpKitl57TuS4eOTp0IKrlCIFpcwDx4qtW0Hz7ewu6w14QsAR4HoWogSZU3lNYLXQNN8CKLaJgVuL",
- "XU5X2Yd6t+k3bWB38nAbqTRvHEsFRsYzpzsHDUMo3BEn5yBRZ/dR989Pct3tq8oBi7qTVE5YgU8lTrlQ",
- "kAqeqehgOVU62XZsTaOWOGVWEJyU2EnFgQee66+o0lZzy3iGorVlNziPfcebKYYBHrxRzMh/95dJf+zU",
- "8EmuKlXfLKoqSyE1ZLE1cFhtmOsNrOq5xDwYu76+tCCVgm0jD2EpGN8hy67EIojqWs/hTBv9xaE2wNwD",
- "6ygqW0A0iNgEyLFvFWA3tCoOAGLeYXVPJBymOpRTmzLHI6VFWZrzp5OK1/2G0HRsWx/qn5u2feKiuuHr",
- "mQAzu/YwOcgvLGatPXlJjQyMI5OCnpm7CSVaq2Luw2wOY6IYTyHZRPnmWB6bVuER2HJIBx4TzmMlmK1z",
- "ODr0GyW6QSLYsgtDCx542bylUrOUlShJ/A3Wt64W6U4Q1ZCQDDRlRtoOPiADR95b9yfWZtAd83qC1k5C",
- "aB/8nhQaWU7OFF4YbeDPYI2q0rfWGH0SmLBvQVKMjGpON+UEAfUmLnMhh01gRVOdr801p5ewJhcggahq",
- "VjCtrXdBW5DUokzCAaIP/A0zOhWLNeT6HdhF53OMQwXL62/FeGTFls3wnXQElxY6nMBUCpHvoIruISMK",
- "wU6qalIKs+vMObN4jwdPSS0gnRCD+rWaed5TLTTjCsh/i4qklKMAVmmobwQhkc3i9WtmMBdYPadTSjcY",
- "ghwKsHIlftnb6y58b8/tOVNkDhfeA8w07KJjbw9fSW+F0q3DdQsvXnPcjiK8HTUf5qJwMlyXp0y2Pu3d",
- "yLvs5NvO4LW6xJwppRzhmuXfmAF0TuZql7WHNLKkarl97TjuTkqNYOjYuu2+SyHmt6RIi3sA4OPEGfVN",
- "KzKvuAWqUu45gnYur9AQ83Ht5WG9uw8IugAsqdfGuT8fPX02Gjem+/q7uZPt1/cRiZJlq5iDRgar2J64",
- "I4avqXvm6bFWELWKIWMW84iPFsiz3K2swzpIAeZMqyUrzZCNP8laQ8sX9f/e/8+DXw6T/6HJ7/vJ8/+Y",
- "vv/w5PLBXu/HR5fffPP/2j89vvzmwX/+e1StqNksrv780eySmBPH4lf8iFsDxlxI+x5bOzFPzD893FoC",
- "ZFDqZcz5s5SgkDVaJ85SL5tNBejoUEopzoGPCZvApMtiswUor0zKgc7RCRHfFGIXo2h9HCy9eeIIsB4u",
- "ZCc+FqMfNPEhbeJhNo+OfH0LwosdiMg2Pv1jXdmvYh56zrqDotZKQ9HXd9muvw5I+++8rNw7VILnjENS",
- "CA7raLAI4/AaP8Z62+tuoDMKHkN9u2+JFvwdsNrz7LKZN8Uv7nbA39/Whu1b2PzuuB1VZ+gzjKoayEtC",
- "SZozVOQIrrSsUn3KKT4VA3KNmJP8A3hYefDCN4lrKyLKBDfUKafK4LB+QEZV4HOIXFnfA3gdgqoWC1C6",
- "IzTPAU65a8U4qTjTOFdh9iuxG1aCRJvOxLYs6JrMaY66jt9BCjKrdFuMxEtPaZbnTu9qpiFifsqpNjxI",
- "afKa8ZMVDuc9CD3NcNAXQp7VWIhfUQvgoJhK4nz/B/sV2b9b/tJdBRhnYj97fvOp+b6HPeZ45yA/eume",
- "WEcvUY5uNK492D+ZGq5gPIkSmZGLCsbRf7tDW+S+eQ14AnrQ6G7drp9yveKGkM5pzjIjO12HHLosrncW",
- "7enoUE1rIzpaFb/W9zG3gYVISpqeodV4tGB6Wc0mqSim/mk5XYj6mTnNKBSC47dsSks2VSWk0/OHW+Tc",
- "G/ArEmFXl+OR4zrq1hUxbuDYgrpz1vpM/7cW5N4P352Qqdspdc964dqhA/fJiDbAeQi1DFZm8TaKzLoh",
- "n/JT/hLmjDPz/eCUZ1TT6YwqlqpppUB+S3PKU5gsBDnwTkcvqaanvMfiBwM9A3cvUlaznKXkLLyKm6Np",
- "g3f6I5ye/mII5PT0fc/60b843VTRM2onSC6YXopKJy46IZFwQWUWAV3V3uk4so0t2jTrmLixLUW66Ac3",
- "fpxV07JUXWfV/vLLMjfLD8hQOVdMs2VEaSE9EzSc0UKD+/tGuCeXpBc+tKVSoMhvBS1/YVy/J8lptb//",
- "GEjLe/M3x2sMTa5LaOmNruVM29UZ4cKtQAUrLWlS0gWo6PI10BJ3Hy/qAjWUeU6wW8tr1PtY4FDNAjw+",
- "hjfAwnFlDzhc3LHt5cNM40vAT7iF2MZwp0bxf939CvxIr71dHV/U3i5VepmYsx1dlTIk7nemjj5bGJ7s",
- "rTGKLbg5BC5QbwYkXUJ6BhnGDEFR6vW41d0b/NwN51kHUza2zjq6YQAIqthmQKoyo04GoHzd9cRXoLUP",
- "P3gHZ7A+EU38yFVc79sO4WrooCKlBpeRIdbw2LoxupvvjMfoBFuW3q8afQg9WRzUdOH7DB9ke0PewiGO",
- "EUXLYXkIEVRGEGGJfwAF11ioGe9GpB9bnhFvZvbmi6h5PO8nrkkjtTkDcLga9MO23wvAQF1xociMKsiI",
- "cDGm1uk54GKVogsY0D2FWs4dXYtbmlEcZNu9F73pxLx7ofXumyjItnFi1hylFDBfDKmgmrBj9vczWUU6",
- "rmBCMHWEQ9gsRzGp9jiwTIfKlrbZxsIPgRYnYJC8ETg8GG2MhJLNkiof/opRwv4s7yQDfEQn/k0xW0eB",
- "xToIBa4jsjzP7Z7Tnt7WRW75cC0foxUqbXeItxqPnBNVbDsERwEogxwWduG2sSeUJqCg2SADx0/zec44",
- "kCRm/KZKiZTZ+OXmmnFzgJGP9wixuiey8wgxMg7ARgMRDkzeiPBs8sVVgOQuIIL6sdG0FPwNcU9A695k",
- "RB5RGhbO+IBjmucA1HlM1PdXx28HhyGMj4lhc+c0N2zOKVGbQXoRRCi2duKFnInywZA4u0H1Zy+WK63J",
- "XkXXWU0oM3mg4wLdBog3ixKxLVCIL/f0rXE1dJfuMvXA9T2Eq/tB7NG1AOhoIpr0PO7lt/WF1r6b+zdZ",
- "w9LHTTCt98yM0f4Q/UR3aQB/fUVwHS30tntdRx/pbdNlO1AqkJ9irNickb5qtK+AVZADSsRJS4JIzmIK",
- "cyPYA7LbY98teLljOBbl6weBPVzCgikNjerK3EpeF/upzV0Uw7+FmA+vTpdybtb3ToiaR9swQ2u+C5f5",
- "yVdwLjQkcyaVTlDvF12CafS9whfl96ZpXFBoW9xtJhSWxXkDTnsG6yRjeRWnVzfv316aad/UShhVzc5g",
- "jeIg0HRJZpi5J+qHs2Fq66q1ccGv7IJf0Vtb726nwTQ1E0tDLu05vpBz0eG8m9hBhABjxNHftUGUbmCQ",
- "ePG/hFzHIpYCocEezsw0nGxSPfYOU+bH3vRQCqAYvqPsSNG1BK/ljatg6H1gnntMB4lv+mEDA2eAliXL",
- "Vh1FoB118LlIr/Ta94HFHSzg7rrBtmAgUPrFPFMlqHYMeSPd2hRGPFzbZCfMnLQjvUOGEE7FlE/A10eU",
- "IW3MErUNVydA87/B+u+mLS5ndDke3UxvGMO1G3ELrt/W2xvFMxrErB6pZQa4IsppWUpxTvPEaVeHSFOK",
- "c0ea2NwrYz8xq4vr8E6+O3z11oF/OR6lOVCZ1KLC4KqwXfnFrMqGqw8cEJ/gyzx4vMxuRclg8+sw4lAj",
- "e7EEl0wpkEZ7yR8abXtwFJ2Gdh63y2/VtzrDgF3iBgMBlLV9oNFdWfNA2yRAzynLvdLIQztgQ8fF7ZZB",
- "JMoVwgFubFoILETJrbKb3umOn46GurbwpHCuDemeCpvRTBHBuy5ZRoREXRSSakExdYNVCfSZE6+KxBy/",
- "ROUsjSsY+UwZ4uDWcGQaE2w8IIyaESs2YIfkFQvGMs3UDg/dDpDBHFFk+jQgQ7ibCZeKtuLsXxUQlgHX",
- "5pPEU9k5qJgrw6ma+9epkR36c7mBrXq6Gf4mMkaYtqR74yEQmwWM0EzVA/dl/WT2C63VMeaHQB9/BWt3",
- "OGPvStxgqXb04ajZugwt2+amMHNsn/8ZwrBZxranrfWPV5c/ZWCOaBpappK5FL9D/J2Hz+OI27pP1MLQ",
- "a/J34JNI9E+XxdTanSabbjP74HYPSTehFqptoR+getz5wCaFSTG8epZyu9U2K2TLLyROMKEv19SO3xCM",
- "g7nn/5bTixmNZQwxQoaB6bCxfrYUyVoQ39nj3um8mcudMyGBIbVuy2xAVwmyiSjpBw9fU2Cw0+4sKjSS",
- "AVJtKBOMrfErVyIyTMUvKLfJRU0/e5RcbwVW+WV6XQiJ4ZgqrvPOIGUFzeOSQ4bYb4evZmzBbGrNSkGQ",
- "u9ENZHMSWypy+S+tfblBzdGc7I+D7LBuNzJ2zhSb5YAtHtoWM6qQk9eKqLqLWR5wvVTY/NEOzZcVzyRk",
- "eqksYpUgtVCHz5vacjMDfQHAyT62e/ic3EeblWLn8MBg0d3Po4OHz1Hpav/Yj10ALofuJm6SITv5L8dO",
- "4nSMRjs7hmHcbtRJNLjQJj4fZlwbTpPtustZwpaO120/SwXldAFxN4liC0y2L+4mKtI6eOGZzdqrtBRr",
- "wnR8ftDU8KcBn0/D/iwYJBVFwXThLBtKFIaemsSMdlI/nE0B7LIHebj8RzQQlt4+0nlEflqlqb3fYqtG",
- "M+4bWkAbrWNCbQxuzhrTvU/4RY58JD+mU6qzKFncmLnM0lHMQUv+nJSScY0Pi0rPk7+SdEklTQ37mwyB",
- "m8yePYmkkGpnjeFXA/yT412CAnkeR70cIHsvQ7i+5D4XPCkMR8keND7WwakctGTGvcU8R+86C24eeleh",
- "zIySDJJb1SI3GnDqGxEe3zDgDUmxXs+V6PHKK/vklFnJOHnQyuzQz+9eOSmjEDKW16U57k7ikKAlg3N0",
- "XItvkhnzhnsh85124SbQf17Lgxc5A7HMn+XYQ+DbiuXZ35uYkU4WPkl5uozq/Wem469NluR6yfYcR9OI",
- "LCnnkEeHs3fmr/5ujdz+/xS7zlMwvmPbbnY9u9zO4hrA22B6oPyEBr1M52aCEKttJ/ra6zJfiIzgPE3O",
- "iobK+gkDgwxa/6pA6VjQHn6wnh+o3zHvApvAiQDPUKqekB9slZMlkFZIPUqzrKhyG54N2QKkUzxWZS5o",
- "NiZmnJPvDl8RO6vtY1N+2gRSCxTm2qvovOuDBDe7+RD67J1x/+bdx9nscGlWrTRmuFCaFmUsdMW0OPEN",
- "MD4m1HWimBdiZ0JeWglbefnNTmLoYc5kYSTTejTL45EmzH+0pukSRdcWNxkm+d0zn3mqVEFi+DrPa52j",
- "Bs+dgdslP7O5z8ZEmPfFBVO2uAWcQztapg4dc08nHz3TXp6sOLeUEuXRm0Ibr4N2D5w1aHt1aBSyDuKv",
- "KLjYxIFXTQR3jL2iSR+6WeV6GeFtVHGdotQXLUopF5ylmHIhKKdRg+wKZexiK9ghO0VXGeWPuDuhkcMV",
- "zWVXuxM5LA5mt/OM0CGur6wMvppNtdRh/9RYkWFJNVmAVo6zQTb2KRmdvoRxBS7nENZMCfikkC37C3LI",
- "qEkvqVW/VyQj9J0fEIC/N9/euOcROpWeMY6CkEOb81+1Gg3M46+N9MQ0WQhQbj3t0Hz1i+kzwfD0DFbv",
- "Jz7vP45hzRdm2dZW1x/q0FvunKXMtH1h2hLrdVj/3HJTtJMelqWbdDhhZ1Qe0Cs+iOCIBSbxKvAAufX4",
- "4WgbyG2jyR3vU0NocI4GOyjxHu4RRp28spOt95zmlaUobEGsq0s0vpLxCBivGIemKkXkgkijVwJuDJ7X",
- "gX4qlVRbEXAnnnYCNEcrXYyhKe1UtDcdqrPBiBJco59jeBubvJsDjKNu0AhulK/rYhiGugNh4gVW4XGI",
- "7GfRRKnKCVEZuh138mrGGIdh3D5zb/sC6B+Dvkxku2tJ7cm5yk00FEmWipi8+d0K0soaoYXyXsgkxdDs",
- "4L6IajSbDLGRbQiz1HrUoov4bI3/xlIsDaPEWYmv7KfkTcLY8coCa3uknrhpiClRbJHsjglk5jdHRzP1",
- "9Sis6X+rJJaLRRuQT5wLZRN7Cfcoxli+Mxw7jGvu5Q2zPL0OO0avIOGTzeN7rQ6Ya7MDvEN6icRQG13n",
- "Dd+sDxjOAD7GW2fANzDIAEPtxWbNG0MegumgQyvVLq5EU9Kkr+jzBJu2OzaCdS+w6cJtpcGoamfIpcB6",
- "FJjPvd67iWQ9ARfH3ohQ76vSB+hv3hGOlJQ5213DLPqYdS6zfSfmXZzpmg3uLsI5ouIgsZX08gNuppCe",
- "I3LgTG/TuE12D2g/rA2jaK7BJNwL4C4Ld9vFcGdHp/kcUs3Otzh+/5cRlhun4rEXp22Jh8APnNWOM74g",
- "5RWl/AagTX7ZG+EJsmbcGJwht88zWN9TpEUN0bxyY0+o14mXRAxgRpHEkIhQMcODff87XTBTNWUgFryh",
- "z3aHJpnTYELfIIzhmnN5kiQ0DG3YMOW5iD0gdprLdL1SwA/6gAz5hvdTag7fXi8xg6mqk7HXFScDPw7z",
- "Tuzme7tw8Zropl+rvHzkJij/m4/JsbPYSqZNymFUMF5QmfkWUYnZC+PJgLdV13/ZuomzONDzembWuGX0",
- "XXgjeQ7Q+SbNhWJ8kQx5MLU9IcJiSGjvQd0E5ipFuOYgXapx7QvFJlp4N45NcGxChSvccx0kqMGsfRa4",
- "wYjfd01IMyZ3orZMsLNlhQskEgpqoJNB4PHwnJuQ/cJ+9z6rPrlPJ5VWZFxPr8nWyGHvkMNUD4kh1c+J",
- "uy23+8Je56nCOLeVHFQsCpkbVIZKrFKKrErtBR0eDPBPup1j/DewkqiUn/ZX2RPYcsx48SqILDiD9dQK",
- "TemS8ib1SPtY22SEdg1BJF9nt2/1FRcXWPOFXcDiVuD8nC+h8agUIk8GtFZH/WDq7hk4Y+kZZMTcHd6U",
- "PZDUl9xHZUltlrhYrn3wcFkCh+zBhBDzlipKvfYWinYasc7k/J7eNP8KZ80qm9/APdImpzzuhWELb9+Q",
- "v/lhNnM1BYb53XAqO8iWaOXVQCC3pBeRFNe7VjGL2Ay6aYcborJQxKSUa4au7XS++w+1COmHQQdb3j9n",
- "rVedTZTTsRMICbf8ugsUpFd83fXDKXZdHq4DuVqloL/OnTeghdsB3O+C+EY10UfusEZBz3bRKMSTepju",
- "qNKwCMGMOARBJb89/I1ImGOGPEH29nCCvb2xa/rbo/Zn8/ra24uezE+mzGgVS3Pzxijm70N2ZWs7HXBh",
- "6OxHxfJsG2G0HFKabJXocvGrc935LPkyf7VP5P5RdakDr6JG7W4CIiay1tbkwVSBq8kOXiau2yRazk5B",
- "Wkmm1xhR5F9U7NdopPYPtRLGVeCsfdCdC7Stde88ohqVTVOe/Adhy98V5q5HJbbGfP7frWhR5uAOyjf3",
- "Zn+Bx399ku0/fviX2V/3n+6n8OTp8/19+vwJffj88UN49NenT/bh4fzZ89mj7NGTR7Mnj548e/o8ffzk",
- "4ezJs+d/uedrg1tAm7rb/8Ckssnh26PkxADb4ISWrC7jYcjYJ6ikKZ5E8ybJRwf+p//tT9gkFUUzvP91",
- "5NzjRkutS3UwnV5cXEzCLtMFvtESLap0OfXz9MsnvD2qXXdsyAXuqPXKMKSAm+pI4RC/vfvu+IQcvj2a",
- "NAQzOhjtT/YnDzEPdAmclmx0MHqMP+HpWeK+Tx2xjQ4+XI5H0yXQHJODmz8K0JKl/pO6oIsFyInL1Gl+",
- "On809Zb/6Qf3Pr00oy5isVbWCSmsMNxLYOl0XWhPsk5GrYRQyuUnGtdpwpz4yDP0DbFPPsPaamQdZU1K",
- "kKOgbK0LjLKR4ge/RBInz9mikp3CQ7U23+UQZIrYKpKSvLY697c0PQv9L2J14B0ri5WBd14ahVqUbZNm",
- "o+mPlSiJZQLFmc0+B5Raq4oaTqRlBSEkDV81vHI/ef7+w9O/Xo52AAT1lq4Q7m80z3+zlaFghcqfdk1q",
- "NR4qgD5uVA+dstNjtMnWX8MMlXWbtifQb1xw+G1oGxxg0X2geW4aCg6xPXjfqbf/aH//I5SuHrdG8STx",
- "WWtgP7nFhbYtaDdebne43qK/pRlmDQSl7VIefrFLOeJoOjAcn9gb7XI8evoF780RNzyH5gRbBvFP/Vvk",
- "Z37GxQX3LY00UxUFlWuUVYLUpqFUejl4W03DNGzTDy3Fcnaju6yXgfLo5Zbr7Z4aYor9xACdLG/me53H",
- "DFWPYW1/9WBCfgh7X6+AfwNbWMZ/4LINXut39+5HvXcP21qHVmR5DJgWiW+EqWd5uunF1/d76iTpvlYS",
- "7CCf3DWy8nzUTKHdqtpD5Qt3YLB3uBsq/Tgg3gTw1pJOOw/gx+e79v0WXBOt++AjcuUvXFh7TXNDJ8Fy",
- "Oz7YNt3CnRD3pxHiamcEW2sDMwxtEuswjej0g8+OcQuinMsOsoMQF750g75B9ob7HU7xYGJTXYRtrscO",
- "nGPBVvEMc5bcCWYfWzDrJ/uJgdGkcPl8whjCsGyyAV2lwEUree+VshZ9odLXnxhZg+KWgXS7oHUN3tgT",
- "ohwn/mg886sUnhzS7sSmP7XYZH35NghOrUxczvFzWHaCoO52UMuk5Xg2W3s6HBOF5f3NT6VkQjK9HhPG",
- "SQbm7KHFUEgMOm4qeDsnI+D439eH/0DX09eH/yDfkP1xLYJhTFZkeuvc05aBfgAdqTD/7fqwFgc2ykJ/",
- "GAHjpEbSQAV4LXwyLURaQVffDKFsZe2KMfGsoKvRRklk/OVIizcVmjrRlH0qckU0bQV5V/il7VKlCKxo",
- "qvM1oXj/rK3vL9YW95mwOtXQu6X5Y/FGG2b0dSViUWNX9eqKhKz3yuIPlAseolKXcQ6LuGwXTHrIiEJw",
- "PSnvbne/2N3ti6WkFOZMM0yJ0Nwn/q5qAdlUF3DgDjisTsh/iwqdXWzxLIil88QZ0LnXz+kE0CAfb46l",
- "y2rs7O11F7635/acKTKHC+SglGPDLjr29r4CkXVVZ1GkhAuecKztdA4k8JC7k1v/0HLr0/3HX+xqjkGe",
- "sxTICRSlkFSyfE1+5nXamZuJ5TXPqXiQCGgj/+l5yjdSdCC+38h23bVNM91Ihq3AqUCFUJfgc2/lcZPD",
- "37zlMV2ID1hXY286Qcc/a1Wx+zHuGVYmMSE9sOB8uz56uYtc/oUYQndOWxW51+J787FvgKg/zbtP40+z",
- "GzN9sv/k00EQ7sIbocn3qC77yCz9o+oO4mQVMJsrW1Qai0nIWlwg4kamYk7o2KUaxdyXa1IHChl+Yhmh",
- "LTbQ5xpmhl35xR9YP79Dmd0IXXbRe8cX7vjCjfhCl6AajoDh9mr6AU0FITvoHclvTcuvyMQY2FukKLzB",
- "RZA56HRp0xB0w2IibMVnvxvmKZtyxN+y/Q+BjuTIxbW40A/MXb5jQCB2/NFGYlyORynICPH95PPhmM9s",
- "jmGddWZDXwoBzTnMZweuEwO79OlMeZ9zl/WGmF28EpQvmsn7YTqIltuwGd4h+GoI7jG171yeZnu83CK+",
- "Bq90n8Q3IW9QHMID7hP7fY1qj495I3/sBb0RHKxd2kislhbvTJC1uIDVVBApPguCNTy6Aq1x0aFtdPyg",
- "Vyy7nNZpeoaEirfYYItQ0dzUrKlh2Vav0LIEKtW1L+nt5rCTzoxHL0M/jVZWoTqfUAQUg5crWhL/Y7Sj",
- "NIMBP2JOllQtybziFtC6WhK6rHgnCjEf18pacxrE/ICc8j2ilvTpw0e/Pnr6zP/56OmzAXnMzOPij/sS",
- "WTOQ+WyH2UUs+3rNjm1Rokbewafeyqvt0HjEslU0hQisfCak8Fw43Scyh3uKlHQ9mHloIInXa5BnuS84",
- "3jbykALMhaqWrPwcBdTZLF5D6EezS2JO6szeR/zbmn+eg2RzLIRV84VPnBlGAmRQ6uXGlAy2jlepl82m",
- "gqs0yZRLfVNKcQ58TNgEJl1jWLZokuTmQOd16hQhdnFVC3iJoTdPHAHWw4XsImq+jdEPhkO6FHOfWqnS",
- "uHTZy8wjT3bulc+qcdGfRePyRvAE5THg2r8NWmj5fNoXzHYzDhScda0ELjQqNoVEMTJkW2qykwAGg8am",
- "Fg+0rpODZOzEsZTqdFmV0w/4H8w8cNnE+NvCIFOriN0kkR3bFrfqYmPHJLLNbXyyC6ccFnPymqVSHGJW",
- "JHeNqLXSUPTLRtquv24qORG9cgTPGYekEDyWJ+Mn/PoaP0bzLqHZfqAzOlAM9e0W+2nB3wGrPc8urO6m",
- "+J38MZS8N3qwdFYroazdFNGfA+m/OS2txLfNMWn9PP3Q+tPZS1xLtax0Ji6CvjavxcazZVvc6tl6IzKw",
- "47ZTycT8R7nIwKXf6B+pmmvEJVKP36ZdRzhIabVYalv2MFpTte6Y0NQeBZs7Vm1Ltmlb+aRy50BoLoFm",
- "azID4ETMzKLbSYsJVXUdWyQOyxvjOSMbuEopUlAKsiSsd7QJtDqpCUo+egOeEHAEuJ6FKEHmVF4TWMsk",
- "NgPaLfRXg1trCh0f6EO92/SbNrA7ebiNVALxDBFfNKIoc3BvmggKd8QJytrsI++fn+S621eVWFInkvXU",
- "fj1hBebt4JQLBangmRrOTbzt2GI24mAtCmwVWX9SopVKzMADV+srqrSr6NRK4RjktDZTbEimPJSQzIz8",
- "9zodWW/s1PBLrirVFLuyshdk0TqisNow1xtY1XOJeTB2LdzZGsfbRh7CUjB+Xf4qyI6sAy2WGS6yOAyC",
- "oU4Ui9TiD4FoELEJkGPfKsBuqGEZAISpBtF1ytM25QT1h5UWZWnOn04qXvcbQtOxbX2of27a9onLBQ8g",
- "X88EqFDwdpBfWMzaynZLqoiDgxT0zMnsC+fD34fZHMZEMZ66lO5D8VmsgGPTKjwCWw5pV+wLj3/rnHUO",
- "R4d+o0Q3SARbdmFowTFB8w8hFl713dfV231EVXlb0A7Eq0bQtH9PLyjTyVxIly4fa6dHrO6dbFyUaVex",
- "372KtXCqbld93TIUN05Q11GFDtAWBB+EY3a/73NjpvpeyJ2M/I0+XgtiFkYqrpmPpDbnrZYx/3gW8zvp",
- "+U56vpOe76TnO+n5Tnq+k57vpOePLT1/Hq9dkiSeT3vTcCwgi4y+SAn/C4p5+pRBSo3QX4v8+EgwIro5",
- "xxu9eTTQfOqqKaO3QrR2qA0LCCszp2Y6xkmZUyMNwUr74HQyowqePfE+GXUNTJu+3/Aa0+DxI3L846F3",
- "VFg6S3q77X1fWU7pdQ4PnNdjnV/buz8Cp1hxE70fqX/9pM6hxArzc5YDUQZX32Hrl3AOuZHkrfGTmLdI",
- "/3V0AjR/4XCz5XHUyqBsRvtt3HqTObQVtPQij18rVYSiU0snAfKc5mo4A7Idr6BlLLy/5tP22YSs4VuR",
- "rTvkbnZtihvYJvTGT4FxKiNVjfvk3SMNLbCyuSvT3Xv3Xd66U02faPtkto3C4qVk4tWHN1H5cHFss2G9",
- "oaxH07xDJ9H0/13fiVEN4C4GQ0PPfk+IK6v8WW8rghC5I9Zw5j9M4Em3tp5jGtjWCFSO9XypQSIe8dHT",
- "i2d/7GuPEaYVcRS3SkyjBfDE8ZZkJrJ10uJM7QumKcm79ZIJWSMepvpeqWu2D15Bn+eGCMo+jzax25Ae",
- "VonjrQOM1zqI7cZ2a2zhiI7zBhj/2Nx3iEOGIBDHemJv5272sivys6Dc8x1Pu+NpwWnsXPaMO9/ELhOZ",
- "XI+nYYX0YXZm6+2DIuEhva8eGJaFGF3pluY+g1m1WNgqeV0tNGbRqos9fh4uZ5e7K4O7GnHYwevQ05tG",
- "TXSH6zOOwKnuvpBkIUVVPrA5DfkaFZxFSfnaGzXMy7+oclf8FiO9bpeH1hUbe3KjV64N6+XeevVboH1y",
- "t2j7d4sWrPNo9xcyUvEMZLyc2qpTJGs7xk9WvOHAG0to+WKCvdW5eXfh/n6XXYRAbcgpbWlVe6Bah8n5",
- "KduTO7kLr/5z3Ahvbe7QAQbb97JtGML2i0EGLAtvhk6yLX81tPnpO3oRpu66LaFx99f6EvBOrF+vkcxk",
- "RoyUgmYpVajU4KAvhDz7yLKkXh1FtMgIJmaY7AeemDfJZKtQiePuJFK2Y738q7yaFUzZqnyfV7hsogkO",
- "XcBuCxt3it2vRbH7rT98ilAs8Ns5nNaGg2dyBzZFL/SKR7nUtLQZqof8l4MD4XJZ36onRm/4tkNGkB/a",
- "GpQhLwklac7Q3Cy40rJK9SmnaNDqVD/uOGt4M92wKPXCN4nbVCMmTzfUKTdC1ZzUZq6oSDWHiAH7ewAv",
- "salqsQClO5x4DnDKXSvGScWZxrmwmHRi/frNdW04+sS2LOiazGmOFtnfQQoyM4+IMGsZmoeUZnnuvEPM",
- "NETMTznVJAfD9F8zI9CZ4bwFofZ4snRXY2GgSL4tT5nEtbM/2K8YQ+eW760AaKywn320y/jzFJFNWDYI",
- "+dFLl1H06CUmiWv8QnqwfzJngYLxJEpk5sZ3/lVd2iL3jYznCehB42Hidv2UG2FaC4KMnurrkUPXqNs7",
- "i/Z0dKimtREd269f6/tYNouFSMyTkS7M7wuml9UMy7j6LBfThagzXkwzCoXg+C2b0pJNVQnp9PzhFvng",
- "BvyKRNjV3c399ZhkQzowp6XeeKyc0N37gXv5FhK4/7Gztm91OL3LkX6XI/0ui/ZdjvS73b3LkX6XQfwu",
- "g/ifNYP4ZKOE6LJubc3pq3uqTUokpHbmmoGHzVrZf/tWSaYnhJwsDf+n5g6Ac5A0JylVVjDi1u+5YIul",
- "JqpKU4Ds4JQnLUhSUbiJ7zf/tc/c02p//zGQ/QfdPlZvEXDefl8UVfETmprIN+R0dDrqjSShEOfgcoFi",
- "86xC9xfba+uw/6se9yfZ27qCrq1yZUnLEsy1pqr5nKXMojwX5jGwEB1vbS7wC0gDnM17RJi2adcRn+jl",
- "7nxiqMsmEhO6+/f7FYpGHnaz03zStGZfr4C9iU/1N+z2eODGsXsM8Y5lfAqW8dmZxleUgfUu2eofbEGh",
- "IbWVTf0GklRdRjSid/IyklUnG96MI0BaSabXeMPRkv16Bub/7w0fVyDP/eVXyXx0MFpqXR5Mp1jvZCmU",
- "no7M1dR8U52P5n6gCzuCu1xKyc4xV/L7y/8fAAD//xxN9dmyGAEA",
+ "H4sIAAAAAAAC/+x9a3PbuLLgX8Hq3qokvqLkvOaeuGrqrhNnZrwnyaRiz7mPcXYCkS0JxyTAA4C2NNn8",
+ "9y00ABIkQUl+JJnM8afEIh6NRqPR6OfHUSqKUnDgWo0OPo5KKmkBGiT+RdNUVFwnLDN/ZaBSyUrNBB8d",
+ "+G9Eacn4YjQeMfNrSfVyNB5xWkDTxvQfjyT8o2ISstGBlhWMRypdQkHNwHpdmtb1SKtkIRI3xKEd4vho",
+ "9GnDB5plEpTqQ/kzz9eE8TSvMiBaUq5oaj4pcsn0kuglU8R1JowTwYGIOdHLVmMyZ5BnauIX+Y8K5DpY",
+ "pZt8eEmfGhATKXLow/lCFDPGwUMFNVD1hhAtSAZzbLSkmpgZDKy+oRZEAZXpksyF3AKqBSKEF3hVjA5+",
+ "HSngGUjcrRTYBf53LgF+h0RTuQA9ej+OLW6uQSaaFZGlHTvsS1BVrhXBtrjGBbsATkyvCXldKU1mQCgn",
+ "7354QR4/fvzMLKSgWkPmiGxwVc3s4Zps99HBKKMa/Oc+rdF8ISTlWVK3f/fDC5z/xC1w11ZUKYgflkPz",
+ "hRwfDS3Ad4yQEOMaFrgPLeo3PSKHovl5BnMhYcc9sY1vdVPC+b/qrqRUp8tSMK4j+0LwK7Gfozws6L6J",
+ "h9UAtNqXBlPSDPrrfvLs/ceH44f7n/7l18Pkf9yfTx9/2nH5L+pxt2Ag2jCtpASerpOFBIqnZUl5Hx/v",
+ "HD2opajyjCzpBW4+LZDVu77E9LWs84LmlaETlkpxmC+EItSRUQZzWuWa+IlJxXPDpsxojtoJU6SU4oJl",
+ "kI0N971csnRJUqrsENiOXLI8NzRYKciGaC2+ug2H6VOIEgPXtfCBC/rjIqNZ1xZMwAq5QZLmQkGixZbr",
+ "yd84lGckvFCau0pd7bIip0sgOLn5YC9bxB03NJ3na6JxXzNCFaHEX01jwuZkLSpyiZuTs3Ps71ZjsFYQ",
+ "gzTcnNY9ag7vEPp6yIggbyZEDpQj8vy566OMz9mikqDI5RL00t15ElQpuAIiZn+HVJtt/z8nP78hQpLX",
+ "oBRdwFuanhPgqciG99hNGrvB/66E2fBCLUqansev65wVLALya7piRVUQXhUzkGa//P2gBZGgK8mHALIj",
+ "bqGzgq76k57Kiqe4uc20LUHNkBJTZU7XE3I8JwVdfb8/duAoQvOclMAzxhdEr/igkGbm3g5eIkXFsx1k",
+ "GG02LLg1VQkpmzPISD3KBkjcNNvgYfxq8DSSVQCOH2QQnHqWLeBwWEVoxhxd84WUdAEByUzIL45z4Vct",
+ "zoHXDI7M1viplHDBRKXqTgMw4tSbxWsuNCSlhDmL0NiJQ4fhHraNY6+FE3BSwTVlHDLDeRFoocFyokGY",
+ "ggk3P2b6V/SMKvjuydAF3nzdcffnorvrG3d8p93GRok9kpF70Xx1BzYuNrX67/D4C+dWbJHYn3sbyRan",
+ "5iqZsxyvmb+b/fNoqBQygRYi/MWj2IJTXUk4OON75i+SkBNNeUZlZn4p7E+vq1yzE7YwP+X2p1diwdIT",
+ "thhAZg1r9DWF3Qr7jxkvzo71KvpoeCXEeVWGC0pbr9LZmhwfDW2yHfOqhHlYP2XDV8Xpyr80rtpDr+qN",
+ "HAByEHclNQ3PYS3BQEvTOf6zmiM90bn83fxTlnkMp4aA3UWLSgGnLDgsy5yl1GDvnftsvprTD/Z5QJsW",
+ "U7xJDz4GsJVSlCA1s4PSskxykdI8UZpqHOlfJcxHB6N/mTZalantrqbB5K9MrxPsZARRK9wktCyvMMZb",
+ "I9CoDVzCcGb8hPzB8jsUhRi3u2doiBnem8MF5XrSPERajKA+ub+6mRp8WxnG4rvzsBpEOLENZ6CsXGsb",
+ "3lMkQD1BtBJEK4qZi1zM6h/uH5Zlg0H8fliWFh8oEwJDcQtWTGn1AJdPmyMUznN8NCE/hmOjgC14vja3",
+ "gpUxzKUwd9eVu75qjZFbQzPiPUVwO4WcmK3xaDDC+21QHD4WliI34s5WWjGNf3JtQzIzv+/U+dsgsRC3",
+ "w8SFzyeHOftywV+CJ8v9DuX0CccpcSbksNv3emRjRokTzLVoZeN+2nE34LFG4aWkpQXQfbGXKOP49LKN",
+ "LKw35KY7MroozMEZDmgNobr2Wdt6HqKQICl0YHiei/T8Fs77zIzTP3Y4PFkCzUCSjGoanCt3XuKXNXb8",
+ "CfshRwAZkeh/xv/QnJjPhvANX7TDmpc6Q/oVgV49Mw9cKzbbmUwDfHgLUtg3LTFv0StB+aKZvMcjLFp2",
+ "4REv7TOaYA+/CLP0Rkl2OBPyevTSIQROGtUfoWbU4LiMOzuLTasycfiJqA9sg85AjbWlL0WGGOoOH8NV",
+ "Cwsnmn4GLCgz6m1goT3QbWNBFCXL4RbO65KqZX8R5j33+BE5+enw6cNHvz16+p15kJRSLCQtyGytQZH7",
+ "TowmSq9zeNBfGcqzVa7jo3/3xCuM2uPGxlGikikUtOwPZRVR9tKyzYhp18daG8246hrAXY7lKRj2YtFO",
+ "rI7VgHbElLkTi9mtbMYQwrJmlow4SDLYSkxXXV4zzTpcolzL6jYeHyClkBFVCB4xLVKRJxcgFRMRrfZb",
+ "14K4Fl4gKbu/W2jJJVXEzI1auopnICcxytIrjqAxDYXadqHaoU9XvMGNG5BKSdc99Nv1Rlbn5t1lX9rI",
+ "90ofRUqQiV5xksGsWrRk17kUBaEkw454cbwRGZh3R6VugVs2gzXAmI0IQaAzUWlCCRcZ4COlUnE+OmDi",
+ "Qt06mgR0yJr10t7TMzACcUqrxVKTqiSo8O5tbdMxoandlATvVDWgEaxVubaVnc6aT3IJNDOCMnAiZk7t",
+ "5hSCuEiK2nrtOZHj4pGnQwuuUooUlDIPHCu2bgXNt7O7rDfgCQFHgOtZiBJkTuU1gdVC03wLoNgmBm4t",
+ "djldZR/q3abftIHdycNtpNK8cSwVGBnPnO4cNAyhcEecXIBEnd1n3T8/yXW3ryoHLOpOUjllBT6VOOVC",
+ "QSp4pqKD5VTpZNuxNY1a4pRZQXBSYicVBx54rr+iSlvNLeMZitaW3eA89h1vphgGePBGMSP/zV8m/bFT",
+ "wye5qlR9s6iqLIXUkMXWwGG1Ya43sKrnEvNg7Pr60oJUCraNPISlYHyHLLsSiyCqaz2HM230F4faAHMP",
+ "rKOobAHRIGITICe+VYDd0Ko4AIh5h9U9kXCY6lBObcocj5QWZWnOn04qXvcbQtOJbX2of2na9omL6oav",
+ "ZwLM7NrD5CC/tJi19uQlNTIwjkwKem7uJpRorYq5D7M5jIliPIVkE+WbY3liWoVHYMshHXhMOI+VYLbO",
+ "4ejQb5ToBolgyy4MLXjgZfOWSs1SVqIk8VdY37papDtBVENCMtCUGWk7+IAMHHlv3Z9Ym0F3zOsJWjsJ",
+ "oX3we1JoZDk5U3hhtIE/hzWqSt9aY/RpYMK+BUkxMqo53ZQTBNSbuMyFHDaBFU11vjbXnF7CmlyCBKKq",
+ "WcG0tt4FbUFSizIJB4g+8DfM6FQs1pDrd2AXnc8JDhUsr78V45EVWzbDd9oRXFrocAJTKUS+gyq6h4wo",
+ "BDupqkkpzK4z58ziPR48JbWAdEIM6tdq5nlPtdCMKyD/LSqSUo4CWKWhvhGERDaL16+ZwVxg9ZxOKd1g",
+ "CHIowMqV+GVvr7vwvT2350yROVx6DzDTsIuOvT18Jb0VSrcO1y28eM1xO47wdtR8mIvCyXBdnjLZ+rR3",
+ "I++yk287g9fqEnOmlHKEa5Z/YwbQOZmrXdYe0siSquX2teO4Oyk1gqFj67b7LoWY35IiLe4BgI8TZ9Q3",
+ "rci84haoSrnnCNq5vEJDzMe1l4f17j4g6AKwpF4b5/589PS70bgx3dffzZ1sv76PSJQsW8UcNDJYxfbE",
+ "HTF8Td0zT4+1gqhVDBmzmEd8tECe525lHdZBCjBnWi1ZaYZs/EnWGlq+qP/3/n8c/HqY/A9Nft9Pnv3b",
+ "9P3HJ58e7PV+fPTp++//X/unx5++f/Af/xpVK2o2i6s/fzK7JObEsfgVP+bWgDEX0r7H1k7ME/MvD7eW",
+ "ABmUehlz/iwlKGSN1omz1MtmUwE6OpRSigvgY8ImMOmy2GwByiuTcqBzdELEN4XYxShaHwdLb544AqyH",
+ "C9mJj8XoB018SJt4mM2jI1/fgvBiByKyjU//WFf2q5iHnrPuoKi10lD09V22628D0v47Lyv3DpXgOeOQ",
+ "FILDOhoswji8xo+x3va6G+iMgsdQ3+5bogV/B6z2PLts5k3xi7sd8Pe3tWH7Fja/O25H1Rn6DKOqBvKS",
+ "UJLmDBU5gistq1SfcYpPxYBcI+Yk/wAeVh688E3i2oqIMsENdcapMjisH5BRFfgcIlfWDwBeh6CqxQKU",
+ "7gjNc4Az7loxTirONM5VmP1K7IaVINGmM7EtC7omc5qjruN3kILMKt0WI/HSU5rludO7mmmImJ9xqg0P",
+ "Upq8Zvx0hcN5D0JPMxz0pZDnNRbiV9QCOCimkjjf/9F+Rfbvlr90VwHGmdjPnt98ab7vYY853jnIj4/c",
+ "E+v4COXoRuPag/2LqeEKxpMokRm5qGAc/bc7tEXum9eAJ6AHje7W7foZ1ytuCOmC5iwzstN1yKHL4npn",
+ "0Z6ODtW0NqKjVfFrfR9zG1iIpKTpOVqNRwuml9Vskopi6p+W04Won5nTjEIhOH7LprRkU1VCOr14uEXO",
+ "vQG/IhF29Wk8clxH3boixg0cW1B3zlqf6f/Wgtz78eUpmbqdUvesF64dOnCfjGgDnIdQy2BlFm+jyKwb",
+ "8hk/40cwZ5yZ7wdnPKOaTmdUsVRNKwXyOc0pT2GyEOTAOx0dUU3PeI/FDwZ6Bu5epKxmOUvJeXgVN0fT",
+ "Bu/0Rzg7+9UQyNnZ+571o39xuqmiZ9ROkFwyvRSVTlx0QiLhksosArqqvdNxZBtbtGnWMXFjW4p00Q9u",
+ "/DirpmWpus6q/eWXZW6WH5Chcq6YZsuI0kJ6Jmg4o4UG9/eNcE8uSS99aEulQJEPBS1/ZVy/J8lZtb//",
+ "GEjLe/OD4zWGJtcltPRG13Km7eqMcOFWoIKVljQp6QJUdPkaaIm7jxd1gRrKPCfYreU16n0scKhmAR4f",
+ "wxtg4biyBxwu7sT28mGm8SXgJ9xCbGO4U6P4v+5+BX6k196uji9qb5cqvUzM2Y6uShkS9ztTR58tDE/2",
+ "1hjFFtwcAheoNwOSLiE9hwxjhqAo9Xrc6u4Nfu6G86yDKRtbZx3dMAAEVWwzIFWZUScDUL7ueuIr0NqH",
+ "H7yDc1ifiiZ+5Cqu922HcDV0UJFSg8vIEGt4bN0Y3c13xmN0gi1L71eNPoSeLA5quvB9hg+yvSFv4RDH",
+ "iKLlsDyECCojiLDEP4CCayzUjHcj0o8tz4g3M3vzRdQ8nvcT16SR2pwBOFwN+mHb7wVgoK64VGRGFWRE",
+ "uBhT6/QccLFK0QUM6J5CLeeOrsUtzSgOsu3ei950Yt690Hr3TRRk2zgxa45SCpgvhlRQTdgx+/uZrCId",
+ "VzAhmDrCIWyWo5hUexxYpkNlS9tsY+GHQIsTMEjeCBwejDZGQslmSZUPf8UoYX+Wd5IBPqMT/6aYrePA",
+ "Yh2EAtcRWZ7nds9pT2/rIrd8uJaP0QqVtjvEW41Hzokqth2CowCUQQ4Lu3Db2BNKE1DQbJCB4+f5PGcc",
+ "SBIzflOlRMps/HJzzbg5wMjHe4RY3RPZeYQYGQdgo4EIByZvRHg2+eIqQHIXEEH92GhaCv6GuCegdW8y",
+ "Io8oDQtnfMAxzXMA6jwm6vur47eDwxDGx8SwuQuaGzbnlKjNIL0IIhRbO/FCzkT5YEic3aD6sxfLldZk",
+ "r6LrrCaUmTzQcYFuA8SbRYnYFijEl3v61rgaukt3mXrg+h7C1f0g9uhaAHQ0EU16Hvfy2/pCa9/N/Zus",
+ "YenjJpjWe2bGaH+IfqK7NIC/viK4jhZ6272uo4/0tumyHSgVyE8xVmzOSF812lfAKsgBJeKkJUEk5zGF",
+ "uRHsAdntie8WvNwxHIvy9YPAHi5hwZSGRnVlbiWvi/3S5i6K4d9CzIdXp0s5N+t7J0TNo22YoTXfhcv8",
+ "4iu4EBqSOZNKJ6j3iy7BNPpB4YvyB9M0Lii0Le42EwrL4rwBpz2HdZKxvIrTq5v3r0dm2je1EkZVs3NY",
+ "ozgINF2SGWbuifrhbJjaumptXPAru+BX9NbWu9tpME3NxNKQS3uOb+RcdDjvJnYQIcAYcfR3bRClGxgk",
+ "XvxHkOtYxFIgNNjDmZmGk02qx95hyvzYmx5KARTDd5QdKbqW4LW8cRUMvQ/Mc4/pIPFNP2xg4AzQsmTZ",
+ "qqMItKMOPhfplV77PrC4gwXcXTfYFgwESr+YZ6oE1Y4hb6Rbm8KIh2ub7ISZ03akd8gQwqmY8gn4+ogy",
+ "pI1Zorbh6hRo/ldY/820xeWMPo1HN9MbxnDtRtyC67f19kbxjAYxq0dqmQGuiHJallJc0Dxx2tUh0pTi",
+ "wpEmNvfK2C/M6uI6vNOXh6/eOvA/jUdpDlQmtagwuCpsV34zq7Lh6gMHxCf4Mg8eL7NbUTLY/DqMONTI",
+ "Xi7BJVMKpNFe8odG2x4cRaehncft8lv1rc4wYJe4wUAAZW0faHRX1jzQNgnQC8pyrzTy0A7Y0HFxu2UQ",
+ "iXKFcIAbmxYCC1Fyq+ymd7rjp6Ohri08KZxrQ7qnwmY0U0TwrkuWESFRF4WkWlBM3WBVAn3mxKsiMccv",
+ "UTlL4wpGPlOGOLg1HJnGBBsPCKNmxIoN2CF5xYKxTDO1w0O3A2QwRxSZPg3IEO5mwqWirTj7RwWEZcC1",
+ "+STxVHYOKubKcKrm/nVqZIf+XG5gq55uhr+JjBGmLeneeAjEZgEjNFP1wD2qn8x+obU6xvwQ6OOvYO0O",
+ "Z+xdiRss1Y4+HDVbl6Fl29wUZo7t8z9DGDbL2Pa0tf7x6vKnDMwRTUPLVDKX4neIv/PweRxxW/eJWhh6",
+ "Tf4OfBKJ/umymFq702TTbWYf3O4h6SbUQrUt9ANUjzsf2KQwKYZXz1Jut9pmhWz5hcQJJvTlmtrxG4Jx",
+ "MPf833J6OaOxjCFGyDAwHTbWz5YiWQviO3vcO503c7lzJiQwpNZtmQ3oKkE2ESX94OFrCgx22p1FhUYy",
+ "QKoNZYKxNX7lSkSGqfgl5Ta5qOlnj5LrrcAqv0yvSyExHFPFdd4ZpKygeVxyyBD77fDVjC2YTa1ZKQhy",
+ "N7qBbE5iS0Uu/6W1LzeoOZ6T/XGQHdbtRsYumGKzHLDFQ9tiRhVy8loRVXcxywOulwqbP9qh+bLimYRM",
+ "L5VFrBKkFurweVNbbmagLwE42cd2D5+R+2izUuwCHhgsuvt5dPDwGSpd7R/7sQvA5dDdxE0yZCf/6dhJ",
+ "nI7RaGfHMIzbjTqJBhfaxOfDjGvDabJddzlL2NLxuu1nqaCcLiDuJlFsgcn2xd1ERVoHLzyzWXuVlmJN",
+ "mI7PD5oa/jTg82nYnwWDpKIomC6cZUOJwtBTk5jRTuqHsymAXfYgD5f/iAbC0ttHOo/IL6s0tfdbbNVo",
+ "xn1DC2ijdUyojcHNWWO69wm/yLGP5Md0SnUWJYsbM5dZOoo5aMmfk1IyrvFhUel58heSLqmkqWF/kyFw",
+ "k9l3TyIppNpZY/jVAP/ieJegQF7EUS8HyN7LEK4vuc8FTwrDUbIHjY91cCoHLZlxbzHP0bvOgpuH3lUo",
+ "M6Mkg+RWtciNBpz6RoTHNwx4Q1Ks13Mlerzyyr44ZVYyTh60Mjv0y7tXTsoohIzldWmOu5M4JGjJ4AId",
+ "1+KbZMa84V7IfKdduAn0X9fy4EXOQCzzZzn2EHhesTz7WxMz0snCJylPl1G9/8x0/K3Jklwv2Z7jaBqR",
+ "JeUc8uhw9s78zd+tkdv/72LXeQrGd2zbza5nl9tZXAN4G0wPlJ/QoJfp3EwQYrXtRF97XeYLkRGcp8lZ",
+ "0VBZP2FgkEHrHxUoHQvaww/W8wP1O+ZdYBM4EeAZStUT8qOtcrIE0gqpR2mWFVVuw7MhW4B0iseqzAXN",
+ "xsSMc/ry8BWxs9o+NuWnTSC1QGGuvYrOuz5IcLObD6HP3hn3b959nM0Ol2bVSmOGC6VpUcZCV0yLU98A",
+ "42NCXSeKeSF2JuTIStjKy292EkMPcyYLI5nWo1kejzRh/qM1TZcoura4yTDJ7575zFOlChLD13le6xw1",
+ "eO4M3C75mc19NibCvC8umbLFLeAC2tEydeiYezr56Jn28mTFuaWUKI/eFNp4HbR74KxB26tDo5B1EH9F",
+ "wcUmDrxqIrgT7BVN+tDNKtfLCG+jiusUpb5oUUq54CzFlAtBOY0aZFcoYxdbwQ7ZKbrKKH/E3QmNHK5o",
+ "LrvanchhcTC7nWeEDnF9ZWXw1WyqpQ77p8aKDEuqyQK0cpwNsrFPyej0JYwrcDmHsGZKwCeFbNlfkENG",
+ "TXpJrfq9Ihmh7/yAAPyD+fbGPY/QqfSccRSEHNqc/6rVaGAef22kJ6bJQoBy62mH5qtfTZ8JhqdnsHo/",
+ "8Xn/cQxrvjDLtra6/lCH3nLnLGWm7QvTllivw/rnlpuinfSwLN2kwwk7o/KAXvFBBEcsMIlXgQfIrccP",
+ "R9tAbhtN7nifGkKDCzTYQYn3cI8w6uSVnWy9FzSvLEVhC2JdXaLxlYxHwHjFODRVKSIXRBq9EnBj8LwO",
+ "9FOppNqKgDvxtFOgOVrpYgxNaaeivelQnQ1GlOAa/RzD29jk3RxgHHWDRnCjfF0XwzDUHQgTL7AKj0Nk",
+ "P4smSlVOiMrQ7biTVzPGOAzj9pl72xdA/xj0ZSLbXUtqT85VbqKhSLJZlS1AJzTLYsnanuNXgl9JVqHk",
+ "ACtIqzrZVVmSFCO22yHsfWpzE6WCq6rYMJdvcMPpUhGTo9/gBMr7VTeDTwiyX8N6j16+fffyxeHpyyN7",
+ "X5hnuQ0lMzK3hMIwRPOOVRqM6FwpIB9CNH7Afh86C46DGeTTjRBtmNPXEyI61M/W+G8sIdUwATmb+pW9",
+ "urwBHTteWbxvj9QTzs3RSxRbJLtjAq++m6Ojmfp657Hpf6sHMheLNiBfOHPMJmYc7lGMDb8091sYBd7L",
+ "smZvwDpIG32ohE/Nj6/bOrywzTzxxu2lXUPdfZ1lfbP2ZDhf+hjv6AFPyiBfDrVigDUGDflTpoPuv1S7",
+ "KBxNyUZOiUnOYyNYZwybXN3WZYwqwoYcMKz/hfnc672bANt7DuDYGxHqPXv6AP3Vuw2SkjJn6WyYRR+z",
+ "zsG47/K9i+ths8HdRTi3XRwktpJeNsXNFNJz2w5CD2zSu8nu4f+HtRkZjVuYsnwB3OUsbztk7uwWNp9D",
+ "qtnFFjf5/zRPi8YFe+wfH7YgRuA1z2o3I1++84pvogagTV7sG+EJcozcGJwhJ9lzWN9TpEUN0Sx8Y0+o",
+ "14kuRQxg/pXEkIhQMTON1ZY4zTlTNWUgFrxZ1HaHJvXVYPrjIOjjmnN5kiQ0DATZMOWFiD23dprLdL1S",
+ "eBR6zAx50vcTkA7fXkeY71XVqevr+pyBKGpe1d3seJcuuhWDGmoFoY9zBeV/8xFMdhZb97VJ0Izq2Esq",
+ "M98i+r7wT5dkwDet6+1tnepZHOh5PTNrnFj6Ds+RrBDoqpTmQjG+SIb8vdp+I2HpKLSOoSYHM7siXHOQ",
+ "LjG79mV1Ey2808smODahwpU5ug4S1GCOQwvcYHz0uyYAHFNhUVtU2Vn+wgWaxwY10MkgTHt4zk3IfmG/",
+ "ew9fnwpph2eUo9dka5y1d19iqofEkOrnxN2W2z2Hr/NUYZzbuhcqFrPNDSpDlV8pRVal9oIOD0bzMNw1",
+ "I8IGVhKV8tP+KnsCW475QV4FcRjnsJ5aoSldUt4kamkfa5u60a4hiHvs7PatvuLiAmu+sAtY3AqcX/Ml",
+ "NB6VQuTJgI7vuB963j0D5yw9h4yYu8Mb/gdSIJP7qFqqjTiXy7UPtS5L4JA9mBBi3lJFqdfentNOutaZ",
+ "nN/Tm+Zf4axZZbNBuEfa5IzHfVZsmfIb8jc/zGaupsAwvxtOZQfZEtu9Ggh7l/QykhB815pvEQtLN0lz",
+ "Q1QWipiUcs1Av53Od/+hFiH9MERjy/vnvPWqs2mFOlYVIeGWX3eBOvmKr7t+8Mmuy8N1IFerFPTXufMG",
+ "tHA7gPtdEN+oJvrIHdYo6NkuGoV4ChTTHVUaFiGYP4ggqOTDww9EwhzzCQqyt4cT7O2NXdMPj9qfzetr",
+ "by96Mr+YMqNVWs7NG6OYvw1Z4a2lecDho7MfFcuzbYTRct9pcnuig8pvztHpq2QX/c0+kftH1SVavIoa",
+ "tbsJiJjIWluTB1MFjjk7+OS4bpNo8T8FaSWZXmP8lX9Rsd+ice0/1koYV6+09th3DuNanEMdwdeobJpi",
+ "7j8KWyywMHc9KrE1Vj94uaJFmYM7KN/fm/07PP7Lk2z/8cN/n/1l/+l+Ck+ePtvfp8+e0IfPHj+ER395",
+ "+mQfHs6/ezZ7lD168mj25NGT754+Sx8/eTh78t2zf7/nK6lbQJsq5f+FKXiTw7fHyakBtsEJLVld9MSQ",
+ "sU/nSVM8ieZNko8O/E//25+wSSqKZnj/68g5E46WWpfqYDq9vLychF2mC3yjJVpU6XLq5+kXm3h7XDs6",
+ "2QAV3FHrw2JIATfVkcIhfnv38uSUHL49njQEMzoY7U/2Jw8xa3YJnJZsdDB6jD/h6Vnivk8dsY0OPn4a",
+ "j6ZLoDmmUjd/FKAlS/0ndUkXC5ATl9fU/HTxaOr9JKYf3fv0kxl1EYtMsy5bYT3mXrpPp+tCu5d1yWql",
+ "z1Ium9O4TqrmxEeeoSeNffIZ1lYj6zhrEqgcB0V+XRiZjas/+DWSZnrOFpXslGmqtfku4yJTxNbclOS1",
+ "1bm/pel56K0Sq5rvWFmsaL7zaSnUomwbgBtNf6ygSyxvKs5s9jmg1FpV1HAiLSsIIWn4quGV+8mz9x+f",
+ "/uXTaAdAUG/pygZ/oHn+wdbRghUqf9oVvNV4qFz8uFE9dIp0j9GCXX8N83nWbdp+Ux+44PBhaBscYNF9",
+ "oHluGgoOsT14jw7tSAl4iB7t73+GQt/j1iieJL5qxfAnt7jQtgXtxsvtDtdb9HOaYY5FUNou5eE3u5Rj",
+ "jqYDw/GJvdE+jUdPv+G9OeaG59CcYMsgWqx/i/zCz7m45L6lkWaqoqByjbJKkAg2lEo/Dd5W0zBp3fRj",
+ "S7Gc3egu6+XrPD7acr3dU0NMsZ9GoZMTz3yvs76h6tEl/oMVU1o9mJAfw97ImDEqwfr8V5I3laVKKS5Y",
+ "Zliss8n54M0GtnsqDNiIXrbBa/3u3v2s9+5hW+vQisOPAdMi8Y0w9SxPN734+l5inZTm10oZHmTfu0YO",
+ "o8+aV7Vbg3yo2OMODPYOd0OFMgfEmwDeWtJpZ038/HzXvt+Ca6J1H3xGrvyNC2uvaW7oJFhux2PdJqe4",
+ "E+L+aYS42hnBVibBfEybxDpMujr96HOJ3IIo53Kp7CDEhS/doG+Q6+J+h1M8mNjEIGGb67ED51iwVTzD",
+ "DC93gtnnFsz6qZFiYDQJb76eMIYwLJvcSVcpB9JKdXylHE/fqPT1T4ysQXHLQLpd0LoGb+wJUY4Tfzae",
+ "+acUnhzS7sSmf2qxyfrybRCcWnnLnOPnsOwEQZXyoPJLy/FstvZ0OCZKSOf+VEomJNPrMWGcZGDOHloM",
+ "hcQQ7abeuXMyAo7/fX34X+h6+vrwv8j3ZH9ci2AYwRaZ3jr3tGWgH0FH6vE/Xx/W4sBGWegPI2Cc1kga",
+ "qJevhU89hkgr6Or7IZStrF0xJp4VdDXaKImMvx1p8aZCUyf2tE9FruSorbfvyuS0XaoUgRVNdb4mFO+f",
+ "tfX9xUrsPm9Yp3Z8p55/NN5ow4y+CkcsauyqXl2RAH+sdrEZvtNOjqUWOlx+Pix5s10w6SEjCsH1pLy7",
+ "3f1md7cvlpJSmDPNMIFEc5/4u6oFZFOLwYE74LA6If8tKnR2saXGIJb8FGdA514/pxNAg+zFORZ6q7Gz",
+ "t9dd+N6e23OmyBwukYNSjg276Njb+xOIrKs65yQlXPCEYyWsCyCBh9yd3PqHlluf7j/+ZldzAvKCpUBO",
+ "oSiFpJLla/ILr5P03Ewsr3lOxYO0SRv5T89TvpGiA/H9Rrbrrm2a6UYybAVOBSqEumCheyuPm4oH5i2P",
+ "yVV8wLoae9MJOv5Zq4rdj3HPsDKJCemBBef5+vhoF7n8GzGE7pzkK3Kvxffmc98AUX+ad1/Gn2Y3Zvpk",
+ "/8mXgyDchTdCkx9QXfaZWfpn1R3EySpgNle2qDQWk5C1uEDEjUzFnNCxS8yKmULXpA4UMvzEMkJbmqHP",
+ "NcwMu/KLP7B+foeixBG67KL3ji/c8YUb8YUuQTUcAcPt1fQjmgpCdtA7ks9Nyz+RiTGwt0hReIOLIHPQ",
+ "6dKmIeiGxUTYis8VOMxTNmXUv2X7HwIdSVWFa3GhH5jpfceAQOz4k43E+DQepSAjxPezz4djPrM5hnXW",
+ "eSB94Qg05zCfS7lOo+ySzTPlfc5d1htidvFKUL5oJu+H6SBabsNmeIfgqyG4x9ReuqzW9ni5RfwZvNJ9",
+ "yuOEvEFxCA+4T4P4Z1R7fM4b+XMv6I3gYO3SRmK1tHhngqzFBaw9g0jxWRCs4dGVs42LDm2j40e9Ytmn",
+ "aZ2mZ0ioeIsNtggVzU3NmoqfbfUKLUugUl37kt5uDjvtzHh8FPpptLIK1fmEIqAYvFzRkvhvox2lGQz4",
+ "EXOypGpJ5hW3gNa1pdBlxTtRiPm4Vtaa0yDmB+SM7xG1pE8fPvrt0dPv/J+Pnn43II+ZeVz8cV8iawYy",
+ "n+0wu4hlf16zY1uUqJF38KW38mo7NB6xbBVNIQIrnwkpPBdO94nM4Z4iJV0PZh4aSOL1GuR57suzt408",
+ "pABzoaolK79GuXk2i1dc+snskpiTOg/6MX9e888LkGyOZcNqvvCFM8NIgAxKvdyYksFWPSv1stlUcHU5",
+ "mXKpb0opLoCPCZvApGsMyxZNSuEc6LxOnSLELq5qAS8x9OaJI8B6uJBdRM23MfrBcEiXYu5LK1Ualy57",
+ "mXnkyc698lU1LvqraFzeCJ6gPAZc+7dBCy1fT/uC2W7GgYKzrizBhUbFppAoRoZsS012EsBg0NjU4oHW",
+ "dXKQjJ04llKdLqty+hH/g5kHPjUx/raMytQqYjdJZCe2xa262NgxiWxzG5/swimHxZy8ZqkUh5gVyV0j",
+ "aq00FP0im7brb5sKdESvHMFzxiEpBI/lyfgZv77Gj9G8S2i2H+iMDhRDfbulkVrwd8Bqz7MLq7spfid/",
+ "DCXvjR4sndVKKGs3RfTnQPpvTksr8W1zTFo/Tz+2/nT2EtdSLSudicugr81rsfFs2Ra3erbeiAzsuO1U",
+ "MjH/US4ycOk3+keq5hpxidTjt2nXEQ5SWi2W2haJjFagrTsmNLVHweaOVduSbdpWPqncBRCaS6DZmswA",
+ "OBEzs+h20mJCVV31F4nD8sZ4zsgGrlKKFJSCLAmrQ20CrU5qgpKP3oAnBBwBrmchSpA5ldcE1jKJzYB2",
+ "yyLW4NaaQscH+lDvNv2mDexOHm4jlUA8Q8QXjSjKHNybJoLCHXGCsjb7zPvnJ7nu9lUlFiCKZD21X09Z",
+ "gXk7OOVCQSp4poZzE287tpiNOFiLAltz15+UaF0XM/DA1fqKKu3qX7VSOAY5rc0UG5IpDyUkMyP/rU5H",
+ "1hs7NfySq0o1pcGs7AVZtOoqrDbM9QZW9VxiHoxdC3e2IvS2kYewFIxfFwsLsiPrQItlhossDoNgqBPF",
+ "+qhsAdEgYhMgJ75VgN1QwzIACFMNouuUp23KCao1Ky3K0pw/nVS87jeEphPb+lD/0rTtE5cLHkC+nglQ",
+ "oeDtIL+0mLV1AJdUEQcHKei5k9kXzoe/D7M5jIliPHUp3Yfis1gBJ6ZVeAS2HNKu2Bce/9Y56xyODv1G",
+ "iW6QCLbswtCCY4LmH0IsvOq7r6u3+4yq8ragHYhXjaBp/55eUqaTuZAuXT5Wmo9Y3TvZuCjTyj3/7KtY",
+ "C6fqdrXqLUNx4wRVMFXoAG1B8EE4Zvf7Pjdmqh+E3MnI3+jjtSBmYaTimvlIanPeahnzj2cxv5Oe76Tn",
+ "O+n5Tnq+k57vpOc76flOev7c0vPX8dolSeL5tDcNxwKyyOiblPC/oZinLxmk1Aj9tciPjwQjoptzvNGb",
+ "RwPNp672NHorRCuS2rCAsI51aqZjnJQ5NdIQrLQPTiczquC7J94no66BadP3G15jGjx+RE5+OvSOCktn",
+ "SW+3ve8ryym9zuGB83qs82t790fgFCtuovcj9a+f1DmUWGF+znIgyuDqJbY+ggvIjSRvjZ/EvEX6r6NT",
+ "oPkLh5stj6NWBmUz2odx603m0FbQ0os8fq1UEYpOLZ0EyHOaq+EMyHa8gpax8P6aT9tnE7KG5yJbd8jd",
+ "7NoUN7BN6I2fAuNURmpA98m7RxpaYB14V9S89+77dOtONX2i7ZPZNgqLl5KJ12reROXDpcTNhvWGsh5N",
+ "8w6dRNP/d30nRjWAuxgMDT37PSGuCPVXva0IQuSOWMOZ/zCBJ93aeo5pYFsjUDnW860GiXjER08vnv2x",
+ "rz1GmFbEUdwqMY0WwBPHW5KZyNZJizO1L5imJO/WSyZkjXiY6nulrnA/eAV9nRsiKPs82sRuQ3pYJY63",
+ "DjBe6yC2G9utsYUjOs4bYPxzc98hDhmCQBzrib2du9nLrsjPgnLPdzztjqcFp7Fz2TPufBO7TGRyPZ6G",
+ "FdKH2dlLWzBQkfCQ3lcPDMtCjK50S3OfwaxaLGyVvK4WGrNo1cUevw6Xs8vdlcFdjTjs4HXo6U2jJrrD",
+ "9RlH4FR3X0iykKIqH9ichnyNCs6ipHztjRrm5V9UuSt+i5Fet8tD64qNPbnRK9eG9XJvvfot0D65W7T9",
+ "u0UL1nm0+wsZqXgGMl5ObdUpkrUd46cr3nDgjSW0fDHB3urcvLtwf7/LLkKgNuSUtrSqPVCtw+T8lO3J",
+ "ndyFV/9z3Ahvbe7QAQbb97JtGML2i0EGLAtvhk6yLX81tPnpO3oZpu66LaFx99f6EvBOrF+vkcxkRoyU",
+ "gmYpVajU4KAvhTz/zLKkXh1HtMgIJmaY7AeemDfJZKtQiePuJFK2Y738q7yaFUzZqnxfV7hsogkOXcBu",
+ "Cxt3it0/i2L3uT98ilAs8Ns5nNaGg2dyBzZFL/WKR7nUtLQZqof8l4MD4XJZ36onRm/4tkNGkB/aGpQh",
+ "Lwklac7Q3Cy40rJK9RmnaNDqVD/uOGt4M92wKPXCN4nbVCMmTzfUGTdC1ZzUZq6oSDWHiAH7BwAvsalq",
+ "sQClO5x4DnDGXSvGScWZxrmwmHRi/frNdW04+sS2LOiazGmOFtnfQQoyM4+IMGsZmoeUZnnuvEPMNETM",
+ "zzjVJAfD9F8zI9CZ4bwFofZ4snRXY2GgSL4tT5nEtbM/2q8YQ+eW760AaKywn320y/jrFJFNWDYI+fGR",
+ "yyh6fIRJ4hq/kB7sX8xZoGA8iRKZufGdf1WXtsh9I+N5AnrQeJi4XT/jRpjWgiCjp/p65NA16vbOoj0d",
+ "HappbUTH9uvX+j6WzWIhEvNkpAvz+4LpZTXDMq4+y8V0IeqMF9OMQiE4fsumtGRTVUI6vXi4RT64Ab8i",
+ "EXZ1d3P/eUyyIR2Y01JvPFZO6O79wL18Cwnc/9hZ27c6nN7lSL/LkX6XRfsuR/rd7t7lSL/LIH6XQfyf",
+ "NYP4ZKOE6LJubc3pq3uqTUokpHbmmoGHzVrZf/tWSaYnhJwuDf+n5g6AC5A0JylVVjDi1u+5YIulJqpK",
+ "U4Ds4IwnLUhSUbiJ7zf/tc/cs2p//zGQ/QfdPlZvEXDefl8UVfETmprI9+RsdDbqjSShEBfgcoFi86xC",
+ "9xfba+uw/6se92fZ27qCrq1yZUnLEsy1pqr5nKXMojwX5jGwEB1vbS7wC0gDnM17RJi2adcRn+jl7nxi",
+ "qMsmEhO6+/f7FYpGHnaz03zRtGZ/XgF7E5/qb9jt8cCNY/cY4h3L+BIs46szjT9RBta7ZKt/sAWFhtRW",
+ "NvUbSFJ1GdGI3snLSFadbHgzjgBpJZle4w1HS/bbOZj/vzd8XIG88JdfJfPRwWipdXkwnWK9k6VQejoy",
+ "V1PzTXU+mvuBLuwI7nIpJbvAXMnvP/3/AAAA//+MsCXg4BkBAA==",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/types.go b/daemon/algod/api/server/v2/generated/types.go
index 9372cc565..53983915d 100644
--- a/daemon/algod/api/server/v2/generated/types.go
+++ b/daemon/algod/api/server/v2/generated/types.go
@@ -316,7 +316,13 @@ type DryrunTxnResult struct {
AppCallMessages *[]string `json:"app-call-messages,omitempty"`
AppCallTrace *[]DryrunState `json:"app-call-trace,omitempty"`
- // Execution cost of app call transaction
+ // Budget added during execution of app call transaction.
+ BudgetAdded *uint64 `json:"budget-added,omitempty"`
+
+ // Budget consumed during execution of app call transaction.
+ BudgetConsumed *uint64 `json:"budget-consumed,omitempty"`
+
+ // Net cost of app execution. Field is DEPRECATED and is subject for removal. Instead, use `budget-added` and `budget-consumed.
Cost *uint64 `json:"cost,omitempty"`
// Disassembled program line by line.
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index 4e7a41b0f..9e18ca7d2 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -638,6 +638,7 @@ func (v2 *Handlers) GetProof(ctx echo.Context, round uint64, txid string, params
Stibhash: stibhash[:],
Idx: uint64(idx),
Treedepth: uint64(proof.TreeDepth),
+ Hashtype: hashtype,
}
return ctx.JSON(http.StatusOK, response)
diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go
index f569c9a50..af970f4b1 100644
--- a/daemon/algod/api/server/v2/test/handlers_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_test.go
@@ -936,6 +936,7 @@ func TestGetProofDefault(t *testing.T) {
var resp generatedV2.ProofResponse
err = json.Unmarshal(rec.Body.Bytes(), &resp)
a.NoError(err)
+ a.Equal("sha512_256", resp.Hashtype)
l := handler.Node.LedgerForAPI()
blkHdr, err := l.BlockHdr(1)
diff --git a/daemon/algod/server.go b/daemon/algod/server.go
index d3b9ff2f5..c423e8de2 100644
--- a/daemon/algod/server.go
+++ b/daemon/algod/server.go
@@ -147,7 +147,7 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes
fmt.Fprintln(logWriter, "Logging Starting")
if s.log.GetTelemetryUploadingEnabled() {
// May or may not be logging to node.log
- fmt.Fprintf(logWriter, "Telemetry Enabled: %s\n", s.log.GetTelemetryHostName())
+ fmt.Fprintf(logWriter, "Telemetry Enabled: %s\n", s.log.GetTelemetryGUID())
fmt.Fprintf(logWriter, "Session: %s\n", s.log.GetTelemetrySession())
} else {
// May or may not be logging to node.log
@@ -158,6 +158,12 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes
metricLabels := map[string]string{}
if s.log.GetTelemetryEnabled() {
metricLabels["telemetry_session"] = s.log.GetTelemetrySession()
+ if h := s.log.GetTelemetryGUID(); h != "" {
+ metricLabels["telemetry_host"] = h
+ }
+ if i := s.log.GetInstanceName(); i != "" {
+ metricLabels["telemetry_instance"] = i
+ }
}
s.metricCollector = metrics.MakeMetricService(
&metrics.ServiceConfig{
@@ -203,6 +209,10 @@ func (s *Server) Start() {
cfg := s.node.Config()
+ if cfg.EnableRuntimeMetrics {
+ metrics.DefaultRegistry().Register(metrics.NewRuntimeMetrics())
+ }
+
if cfg.EnableMetricReporting {
if err := s.metricCollector.Start(context.Background()); err != nil {
// log this error
diff --git a/data/account/participationRegistry.go b/data/account/participationRegistry.go
index 41b4e64a1..d1c35c2ce 100644
--- a/data/account/participationRegistry.go
+++ b/data/account/participationRegistry.go
@@ -250,6 +250,9 @@ type ParticipationRegistry interface {
// GetStateProofForRound fetches a record with stateproof secrets for a particular round.
GetStateProofForRound(id ParticipationID, round basics.Round) (StateProofRecordForRound, error)
+ // HasLiveKeys quickly tests to see if there is a valid participation key over some range of rounds
+ HasLiveKeys(from, to basics.Round) bool
+
// Register updates the EffectiveFirst and EffectiveLast fields. If there are multiple records for the account
// then it is possible for multiple records to be updated.
Register(id ParticipationID, on basics.Round) error
@@ -726,6 +729,18 @@ func (db *participationDB) GetAll() []ParticipationRecord {
return results
}
+func (db *participationDB) HasLiveKeys(from, to basics.Round) bool {
+ db.mutex.RLock()
+ defer db.mutex.RUnlock()
+
+ for _, record := range db.cache {
+ if record.OverlapsInterval(from, to) {
+ return true
+ }
+ }
+ return false
+}
+
// GetStateProofForRound returns the state proof data required to sign the compact certificate for this round
func (db *participationDB) GetStateProofForRound(id ParticipationID, round basics.Round) (StateProofRecordForRound, error) {
partRecord, err := db.GetForRound(id, round)
diff --git a/data/accountManager.go b/data/accountManager.go
index 2d8efb409..7cadd071d 100644
--- a/data/accountManager.go
+++ b/data/accountManager.go
@@ -92,12 +92,7 @@ func (manager *AccountManager) HasLiveKeys(from, to basics.Round) bool {
manager.mu.Lock()
defer manager.mu.Unlock()
- for _, part := range manager.registry.GetAll() {
- if part.OverlapsInterval(from, to) {
- return true
- }
- }
- return false
+ return manager.registry.HasLiveKeys(from, to)
}
// AddParticipation adds a new account.Participation to be managed.
diff --git a/data/bookkeeping/genesis.go b/data/bookkeeping/genesis.go
index d763203fe..114bb37f8 100644
--- a/data/bookkeeping/genesis.go
+++ b/data/bookkeeping/genesis.go
@@ -101,6 +101,51 @@ func (genesis Genesis) ID() string {
return string(genesis.Network) + "-" + genesis.SchemaID
}
+// Hash is the genesis hash.
+func (genesis Genesis) Hash() crypto.Digest {
+ return crypto.HashObj(genesis)
+}
+
+// Balances returns the genesis account balances.
+func (genesis Genesis) Balances() (GenesisBalances, error) {
+ genalloc := make(map[basics.Address]basics.AccountData)
+ for _, entry := range genesis.Allocation {
+ addr, err := basics.UnmarshalChecksumAddress(entry.Address)
+ if err != nil {
+ return GenesisBalances{}, fmt.Errorf("cannot parse genesis addr %s: %w", entry.Address, err)
+ }
+
+ _, present := genalloc[addr]
+ if present {
+ return GenesisBalances{}, fmt.Errorf("repeated allocation to %s", entry.Address)
+ }
+
+ genalloc[addr] = entry.State
+ }
+
+ feeSink, err := basics.UnmarshalChecksumAddress(genesis.FeeSink)
+ if err != nil {
+ return GenesisBalances{}, fmt.Errorf("cannot parse fee sink addr %s: %w", genesis.FeeSink, err)
+ }
+
+ rewardsPool, err := basics.UnmarshalChecksumAddress(genesis.RewardsPool)
+ if err != nil {
+ return GenesisBalances{}, fmt.Errorf("cannot parse rewards pool addr %s: %w", genesis.RewardsPool, err)
+ }
+
+ return MakeTimestampedGenesisBalances(genalloc, feeSink, rewardsPool, genesis.Timestamp), nil
+}
+
+// Block computes the genesis block.
+func (genesis Genesis) Block() (Block, error) {
+ genBal, err := genesis.Balances()
+ if err != nil {
+ return Block{}, err
+ }
+
+ return MakeGenesisBlock(genesis.Proto, genBal, genesis.ID(), genesis.Hash())
+}
+
// A GenesisAllocation object represents an allocation of algos to
// an address in the genesis block. Address is the checksummed
// short address. Comment is a note about what this address is
diff --git a/data/bookkeeping/genesis_test.go b/data/bookkeeping/genesis_test.go
new file mode 100644
index 000000000..9ca60bd5e
--- /dev/null
+++ b/data/bookkeeping/genesis_test.go
@@ -0,0 +1,157 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package bookkeeping
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestGenesis_Balances(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ containsErrorFunc := func(str string) assert.ErrorAssertionFunc {
+ return func(_ assert.TestingT, err error, i ...interface{}) bool {
+ require.ErrorContains(t, err, str)
+ return true
+ }
+ }
+ mustAddr := func(addr string) basics.Address {
+ address, err := basics.UnmarshalChecksumAddress(addr)
+ require.NoError(t, err)
+ return address
+ }
+ makeAddr := func(addr uint64) basics.Address {
+ var address basics.Address
+ address[0] = byte(addr)
+ return address
+ }
+ acctWith := func(algos uint64, addr string) GenesisAllocation {
+ return GenesisAllocation{
+ _struct: struct{}{},
+ Address: addr,
+ Comment: "",
+ State: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: algos},
+ },
+ }
+ }
+ goodAddr := makeAddr(100)
+ allocation1 := acctWith(1000, makeAddr(1).String())
+ allocation2 := acctWith(2000, makeAddr(2).String())
+ badAllocation := acctWith(1234, "El Toro Loco")
+ type fields struct {
+ Allocation []GenesisAllocation
+ FeeSink string
+ RewardsPool string
+ }
+ tests := []struct {
+ name string
+ fields fields
+ want GenesisBalances
+ wantErr assert.ErrorAssertionFunc
+ }{
+ {
+ name: "basic test",
+ fields: fields{
+ Allocation: []GenesisAllocation{allocation1},
+ FeeSink: goodAddr.String(),
+ RewardsPool: goodAddr.String(),
+ },
+ want: GenesisBalances{
+ Balances: map[basics.Address]basics.AccountData{
+ mustAddr(allocation1.Address): allocation1.State,
+ },
+ FeeSink: goodAddr,
+ RewardsPool: goodAddr,
+ Timestamp: 0,
+ },
+ wantErr: assert.NoError,
+ },
+ {
+ name: "two test",
+ fields: fields{
+ Allocation: []GenesisAllocation{allocation1, allocation2},
+ FeeSink: goodAddr.String(),
+ RewardsPool: goodAddr.String(),
+ },
+ want: GenesisBalances{
+ Balances: map[basics.Address]basics.AccountData{
+ mustAddr(allocation1.Address): allocation1.State,
+ mustAddr(allocation2.Address): allocation2.State,
+ },
+ FeeSink: goodAddr,
+ RewardsPool: goodAddr,
+ Timestamp: 0,
+ },
+ wantErr: assert.NoError,
+ },
+ {
+ name: "bad fee sink",
+ fields: fields{
+ Allocation: []GenesisAllocation{allocation1, allocation2},
+ RewardsPool: goodAddr.String(),
+ },
+ wantErr: containsErrorFunc("cannot parse fee sink addr"),
+ },
+ {
+ name: "bad rewards pool",
+ fields: fields{
+ Allocation: []GenesisAllocation{allocation1, allocation2},
+ FeeSink: goodAddr.String(),
+ },
+ wantErr: containsErrorFunc("cannot parse rewards pool addr"),
+ },
+ {
+ name: "bad genesis addr",
+ fields: fields{
+ Allocation: []GenesisAllocation{badAllocation},
+ FeeSink: goodAddr.String(),
+ RewardsPool: goodAddr.String(),
+ },
+ wantErr: containsErrorFunc("cannot parse genesis addr"),
+ },
+ {
+ name: "repeat address",
+ fields: fields{
+ Allocation: []GenesisAllocation{allocation1, allocation1},
+ FeeSink: goodAddr.String(),
+ RewardsPool: goodAddr.String(),
+ },
+ wantErr: containsErrorFunc("repeated allocation to"),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ genesis := Genesis{
+ Allocation: tt.fields.Allocation,
+ FeeSink: tt.fields.FeeSink,
+ RewardsPool: tt.fields.RewardsPool,
+ }
+ got, err := genesis.Balances()
+ if tt.wantErr(t, err, fmt.Sprintf("Balances()")) {
+ return
+ }
+ assert.Equalf(t, tt.want, got, "Balances()")
+ })
+ }
+}
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index 96b771f7c..44d68bd1e 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -180,6 +180,9 @@ _available_.
associated account of a contract that was created earlier in the
group is _available_.
+ * Since v7, the account associated with any contract present in the
+ `txn.ForeignApplications` field is _available_.
+
## Constants
Constants can be pushed onto the stack in two different ways:
@@ -273,6 +276,9 @@ return stack matches the name of the input value.
| `ecdsa_verify v` | for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1} |
| `ecdsa_pk_recover v` | for (data A, recovery id B, signature C, D) recover a public key |
| `ecdsa_pk_decompress v` | decompress pubkey A into components X, Y |
+| `bn256_add` | for (curve points A and B) return the curve point A + B |
+| `bn256_scalar_mul` | for (curve point A, scalar K) return the curve point KA |
+| `bn256_pairing` | for (points in G1 group G1s, points in G2 group G2s), return whether they are paired => {0 or 1} |
| `+` | A plus B. Fail on overflow. |
| `-` | A minus B. Fail if B > A. |
| `/` | A divided by B (truncated division). Fail if B == 0. |
diff --git a/data/transactions/logic/README_in.md b/data/transactions/logic/README_in.md
index 87b7c15d7..8464ec2dc 100644
--- a/data/transactions/logic/README_in.md
+++ b/data/transactions/logic/README_in.md
@@ -180,6 +180,9 @@ _available_.
associated account of a contract that was created earlier in the
group is _available_.
+ * Since v7, the account associated with any contract present in the
+ `txn.ForeignApplications` field is _available_.
+
## Constants
Constants can be pushed onto the stack in two different ways:
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index 5ac8198bc..c86bc6136 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -748,10 +748,10 @@ When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on
## base64_decode e
-- Opcode: 0x5c {uint8 encoding index}
+- Opcode: 0x5e {uint8 encoding index}
- Stack: ..., A: []byte &rarr; ..., []byte
- decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E
-- **Cost**: 1 + 1 per 16 bytes
+- **Cost**: 1 + 1 per 16 bytes of A
- Availability: v7
`base64` Encodings:
@@ -766,9 +766,10 @@ Decodes A using the base64 encoding E. Specify the encoding with an immediate ar
## json_ref r
-- Opcode: 0x5d {string return type}
+- Opcode: 0x5f {string return type}
- Stack: ..., A: []byte, B: []byte &rarr; ..., any
- return key B's value from a [valid](jsonspec.md) utf-8 encoded json object A
+- **Cost**: 25 + 2 per 7 bytes of A
- Availability: v7
`json_ref` Types:
@@ -1097,6 +1098,36 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
- **Cost**: 130
- Availability: v7
+## bn256_add
+
+- Opcode: 0x99
+- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
+- for (curve points A and B) return the curve point A + B
+- **Cost**: 70
+- Availability: v7
+
+A, B are curve points in G1 group. Each point consists of (X, Y) where X and Y are 256 bit integers, big-endian encoded. The encoded point is 64 bytes from concatenation of 32 byte X and 32 byte Y.
+
+## bn256_scalar_mul
+
+- Opcode: 0x9a
+- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
+- for (curve point A, scalar K) return the curve point KA
+- **Cost**: 970
+- Availability: v7
+
+A is a curve point in G1 Group and encoded as described in `bn256_add`. Scalar K is a big-endian encoded big integer that has no padding zeros.
+
+## bn256_pairing
+
+- Opcode: 0x9b
+- Stack: ..., A: []byte, B: []byte &rarr; ..., uint64
+- for (points in G1 group G1s, points in G2 group G2s), return whether they are paired => {0 or 1}
+- **Cost**: 8700
+- Availability: v7
+
+G1s are encoded by the concatenation of encoded G1 points, as described in `bn256_add`. G2s are encoded by the concatenation of encoded G2 points. Each G2 is in form (XA0+i*XA1, YA0+i*YA1) and encoded by big-endian field element XA0, XA1, YA0 and YA1 in sequence.
+
## b+
- Opcode: 0xa0
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index 8c9d5955e..6a05596b2 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -252,12 +252,18 @@ type OpStream struct {
// newOpStream constructs OpStream instances ready to invoke assemble. A new
// OpStream must be used for each call to assemble().
func newOpStream(version uint64) OpStream {
- return OpStream{
+ o := OpStream{
labels: make(map[string]int),
OffsetToLine: make(map[int]int),
typeTracking: true,
Version: version,
}
+
+ for i := range o.known.scratchSpace {
+ o.known.scratchSpace[i] = StackUint64
+ }
+
+ return o
}
// ProgramKnowledge tracks statically known information as we assemble
@@ -279,6 +285,8 @@ type ProgramKnowledge struct {
// deadcode indicates that the program is in deadcode, so no type checking
// errors should be reported.
deadcode bool
+
+ scratchSpace [256]StackType
}
func (pgm *ProgramKnowledge) pop() StackType {
@@ -312,6 +320,9 @@ func (pgm *ProgramKnowledge) reset() {
pgm.stack = nil
pgm.bottom = StackAny
pgm.deadcode = false
+ for i := range pgm.scratchSpace {
+ pgm.scratchSpace[i] = StackAny
+ }
}
// createLabel inserts a label to point to the next instruction, reporting an
@@ -334,7 +345,7 @@ func (ops *OpStream) referToLabel(pc int, label string) {
ops.labelReferences = append(ops.labelReferences, labelReference{ops.sourceLine, pc, label})
}
-type refineFunc func(pgm ProgramKnowledge, immediates []string) (StackTypes, StackTypes)
+type refineFunc func(pgm *ProgramKnowledge, immediates []string) (StackTypes, StackTypes)
// returns allows opcodes like `txn` to be specific about their return value
// types, based on the field requested, rather than use Any as specified by
@@ -969,7 +980,19 @@ func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func typeSwap(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+// Interprets the arg at index argIndex as byte-long immediate
+func getByteImm(args []string, argIndex int) (byte, bool) {
+ if len(args) <= argIndex {
+ return 0, false
+ }
+ n, err := strconv.ParseUint(args[argIndex], 0, 8)
+ if err != nil {
+ return 0, false
+ }
+ return byte(n), true
+}
+
+func typeSwap(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
topTwo := StackTypes{StackAny, StackAny}
top := len(pgm.stack) - 1
if top >= 0 {
@@ -982,12 +1005,9 @@ func typeSwap(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return nil, reversed
}
-func typeDig(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
- if len(args) == 0 {
- return nil, nil
- }
- n, err := strconv.ParseUint(args[0], 0, 64)
- if err != nil {
+func typeDig(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ n, ok := getByteImm(args, 0)
+ if !ok {
return nil, nil
}
depth := int(n) + 1
@@ -1008,7 +1028,7 @@ func typeDig(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return anys, returns
}
-func typeEquals(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeEquals(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
top := len(pgm.stack) - 1
if top >= 0 {
//Require arg0 and arg1 to have same type
@@ -1017,7 +1037,7 @@ func typeEquals(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return nil, nil
}
-func typeDup(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeDup(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
top := len(pgm.stack) - 1
if top >= 0 {
return StackTypes{pgm.stack[top]}, StackTypes{pgm.stack[top], pgm.stack[top]}
@@ -1025,7 +1045,7 @@ func typeDup(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return nil, nil
}
-func typeDupTwo(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeDupTwo(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
topTwo := StackTypes{StackAny, StackAny}
top := len(pgm.stack) - 1
if top >= 0 {
@@ -1037,7 +1057,7 @@ func typeDupTwo(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return nil, append(topTwo, topTwo...)
}
-func typeSelect(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeSelect(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
top := len(pgm.stack) - 1
if top >= 2 {
if pgm.stack[top-1] == pgm.stack[top-2] {
@@ -1047,7 +1067,7 @@ func typeSelect(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return nil, nil
}
-func typeSetBit(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeSetBit(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
top := len(pgm.stack) - 1
if top >= 2 {
return nil, StackTypes{pgm.stack[top-2]}
@@ -1055,12 +1075,9 @@ func typeSetBit(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return nil, nil
}
-func typeCover(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
- if len(args) == 0 {
- return nil, nil
- }
- n, err := strconv.ParseUint(args[0], 0, 64)
- if err != nil {
+func typeCover(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ n, ok := getByteImm(args, 0)
+ if !ok {
return nil, nil
}
depth := int(n) + 1
@@ -1086,12 +1103,9 @@ func typeCover(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return anys, returns
}
-func typeUncover(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
- if len(args) == 0 {
- return nil, nil
- }
- n, err := strconv.ParseUint(args[0], 0, 64)
- if err != nil {
+func typeUncover(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ n, ok := getByteImm(args, 0)
+ if !ok {
return nil, nil
}
depth := int(n) + 1
@@ -1114,7 +1128,7 @@ func typeUncover(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return anys, returns
}
-func typeTxField(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeTxField(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
if len(args) != 1 {
return nil, nil
}
@@ -1125,6 +1139,51 @@ func typeTxField(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return StackTypes{fs.ftype}, nil
}
+func typeStore(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ scratchIndex, ok := getByteImm(args, 0)
+ if !ok {
+ return nil, nil
+ }
+ top := len(pgm.stack) - 1
+ if top >= 0 {
+ pgm.scratchSpace[scratchIndex] = pgm.stack[top]
+ }
+ return nil, nil
+}
+
+func typeStores(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ top := len(pgm.stack) - 1
+ if top < 0 {
+ return nil, nil
+ }
+ for i := range pgm.scratchSpace {
+ // We can't know what slot stacktop is being stored in, but we can at least keep the slots that are the same type as stacktop
+ if pgm.scratchSpace[i] != pgm.stack[top] {
+ pgm.scratchSpace[i] = StackAny
+ }
+ }
+ return nil, nil
+}
+
+func typeLoad(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ scratchIndex, ok := getByteImm(args, 0)
+ if !ok {
+ return nil, nil
+ }
+ return nil, StackTypes{pgm.scratchSpace[scratchIndex]}
+}
+
+func typeLoads(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ scratchType := pgm.scratchSpace[0]
+ for _, item := range pgm.scratchSpace {
+ // If all the scratch slots are one type, then we can say we are loading that type
+ if item != scratchType {
+ return nil, nil
+ }
+ }
+ return nil, StackTypes{scratchType}
+}
+
// keywords or "pseudo-ops" handle parsing and assembling special asm language
// constructs like 'addr' We use an OpSpec here, but it's somewhat degenerate,
// since they don't have opcodes or eval functions. But it does need a lot of
@@ -1353,10 +1412,11 @@ func (ops *OpStream) assemble(text string) error {
// enough to report follow-on errors. Of course, we still have to
// bail out on the assembly as a whole.
spec, ok = OpsByName[AssemblerMaxVersion][opstring]
- if !ok {
+ if ok {
+ ops.errorf("%s opcode was introduced in TEAL v%d", opstring, spec.Version)
+ } else {
spec, ok = keywords[opstring]
}
- ops.errorf("%s opcode was introduced in TEAL v%d", opstring, spec.Version)
}
if ok {
ops.trace("%3d: %s\t", ops.sourceLine, opstring)
@@ -1366,7 +1426,7 @@ func (ops *OpStream) assemble(text string) error {
}
args, returns := spec.Arg.Types, spec.Return.Types
if spec.OpDetails.refine != nil {
- nargs, nreturns := spec.OpDetails.refine(ops.known, fields[1:])
+ nargs, nreturns := spec.OpDetails.refine(&ops.known, fields[1:])
if nargs != nil {
args = nargs
}
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index 609366ed2..5a057047b 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -25,6 +25,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -376,11 +378,11 @@ pushbytes 0x012345
dup
dup
ed25519verify_bare
-`
+` + pairingNonsense
const v6Compiled = "2004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b53a03b6b7043cb8033a0c2349c42a9631007300810881088120978101c53a8101c6003a"
-const v7Compiled = v6Compiled + "5c005d018120af060180070123456789abcd49490501988003012345494984"
+const v7Compiled = v6Compiled + "5e005f018120af060180070123456789abcd49490501988003012345494984" + pairingCompiled
var nonsense = map[uint64]string{
1: v1Nonsense,
@@ -448,6 +450,19 @@ func TestAssemble(t *testing.T) {
}
}
+var experiments = []uint64{fidoVersion, pairingVersion}
+
+// TestExperimental forces a conscious choice to promote "experimental" opcode
+// groups. This will fail when we increment vFuture's LogicSigVersion. If we had
+// intended to release the opcodes, they should have been removed from
+// `experiments`.
+func TestExperimental(t *testing.T) {
+ futureV := config.Consensus[protocol.ConsensusFuture].LogicSigVersion
+ for _, v := range experiments {
+ require.Equal(t, futureV, v)
+ }
+}
+
func TestAssembleAlias(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -574,6 +589,7 @@ func testLine(t *testing.T, line string, ver uint64, expected string) {
func TestAssembleTxna(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
testLine(t, "txna Accounts 256", AssemblerMaxVersion, "txna i beyond 255: 256")
testLine(t, "txna ApplicationArgs 256", AssemblerMaxVersion, "txna i beyond 255: 256")
@@ -606,6 +622,7 @@ func TestAssembleTxna(t *testing.T) {
func TestAssembleGlobal(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
testLine(t, "global", AssemblerMaxVersion, "global expects 1 immediate argument")
testLine(t, "global a", AssemblerMaxVersion, "global unknown field: \"a\"")
@@ -619,6 +636,7 @@ func TestAssembleGlobal(t *testing.T) {
func TestAssembleDefault(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
source := `byte 0x1122334455
int 1
@@ -635,6 +653,7 @@ func mutateProgVersion(version uint64, prog string) string {
func TestOpUint(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
@@ -651,8 +670,8 @@ func TestOpUint(t *testing.T) {
func TestOpUint64(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
t.Parallel()
@@ -668,8 +687,8 @@ func TestOpUint64(t *testing.T) {
func TestOpBytes(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := newOpStream(v)
@@ -684,7 +703,6 @@ func TestOpBytes(t *testing.T) {
func TestAssembleInt(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
expectedDefaultConsts := "012001bef5fad70c22"
@@ -717,8 +735,8 @@ base64.b16encode(raw.encode())
func TestAssembleBytes(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
variations := []string{
"byte b32 MFRGGZDFMY",
"byte base32 MFRGGZDFMY",
@@ -776,6 +794,7 @@ func TestAssembleBytes(t *testing.T) {
func TestAssembleBytesString(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
@@ -787,7 +806,6 @@ func TestAssembleBytesString(t *testing.T) {
func TestAssembleOptimizedConstants(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
t.Run("Bytes", func(t *testing.T) {
@@ -996,7 +1014,6 @@ bytec_1 // 0x0103
func TestAssembleOptimizedUint(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
program := `
@@ -1020,6 +1037,7 @@ int ClearState
func TestFieldsFromLine(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
line := "op arg"
fields := fieldsFromLine(line)
@@ -1228,8 +1246,8 @@ func TestFieldsFromLine(t *testing.T) {
func TestAssembleRejectNegJump(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
source := `wat:
int 1
bnz wat
@@ -1248,8 +1266,8 @@ int 2`
func TestAssembleBase64(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
text := `byte base64 //GWRM+yy3BCavBDXO/FYTNZ6o2Jai5edsMCBdDEz+0=
byte base64 avGWRM+yy3BCavBDXO/FYTNZ6o2Jai5edsMCBdDEz//=
//
@@ -1283,8 +1301,8 @@ byte b64 avGWRM+yy3BCavBDXO/FYTNZ6o2Jai5edsMCBdDEz//=
func TestAssembleRejectUnkLabel(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
source := `int 1
bnz nowhere
int 2`
@@ -1297,8 +1315,8 @@ int 2`
func TestAssembleJumpToTheEnd(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
source := `intcblock 1
intc 0
intc 0
@@ -1313,8 +1331,8 @@ done:`
func TestMultipleErrors(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
source := `int 1
bnz nowhere
// comment
@@ -1331,9 +1349,9 @@ int 2`
func TestAssembleDisassemble(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
// Specifically constructed program text that should be recreated by Disassemble()
- t.Parallel()
text := fmt.Sprintf(`#pragma version %d
intcblock 0 1 2 3 4 5
bytecblock 0xcafed00d 0x1337 0x68656c6c6f 0xdeadbeef 0x70077007 0x0102030405060708091011121314151617181920212223242526272829303132
@@ -1459,11 +1477,10 @@ itxn LastLog
func TestAssembleDisassembleCycle(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
// Test that disassembly re-assembles to the same program bytes.
// Disassembly won't necessarily perfectly recreate the source text, but assembling the result of Disassemble() should be the same program bytes.
- t.Parallel()
-
// This confirms that each program compiles to the same bytes
// (except the leading version indicator), when compiled under
// original version, unspecified version (so it should pick up
@@ -1489,7 +1506,6 @@ func TestAssembleDisassembleCycle(t *testing.T) {
func TestConstantDisassembly(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
ops := testProg(t, "int 47", AssemblerMaxVersion)
@@ -1522,6 +1538,7 @@ func TestConstantDisassembly(t *testing.T) {
func TestConstantArgs(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
+
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
testProg(t, "int", v, Expect{1, "int needs one argument"})
testProg(t, "intc", v, Expect{1, "intc operation needs one argument"})
@@ -1538,7 +1555,6 @@ func TestConstantArgs(t *testing.T) {
func TestAssembleDisassembleErrors(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
source := `txn Sender`
@@ -1656,8 +1672,8 @@ func TestAssembleDisassembleErrors(t *testing.T) {
func TestAssembleVersions(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
testLine(t, "txna Accounts 0", AssemblerMaxVersion, "")
testLine(t, "txna Accounts 0", 2, "")
testLine(t, "txna Accounts 0", 1, "txna opcode was introduced in TEAL v2")
@@ -1665,7 +1681,6 @@ func TestAssembleVersions(t *testing.T) {
func TestAssembleBalance(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
source := `byte 0x00
@@ -1682,7 +1697,6 @@ int 1
func TestAssembleMinBalance(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
source := `byte 0x00
@@ -1699,8 +1713,8 @@ int 1
func TestAssembleAsset(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
testProg(t, "asset_holding_get ABC 1", v,
Expect{1, "asset_holding_get ABC 1 expects 2 stack arguments..."})
@@ -1729,7 +1743,6 @@ func TestAssembleAsset(t *testing.T) {
func TestDisassembleSingleOp(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
@@ -1746,8 +1759,8 @@ func TestDisassembleSingleOp(t *testing.T) {
func TestDisassembleInt(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
txnSample := fmt.Sprintf("#pragma version %d\nint 17\nint 27\nint 37\nint 47\nint 5\nint 17\n", AssemblerMaxVersion)
ops := testProg(t, txnSample, AssemblerMaxVersion)
disassembled, err := Disassemble(ops.Program)
@@ -1764,8 +1777,8 @@ func TestDisassembleInt(t *testing.T) {
func TestDisassembleTxna(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
// txn was 1, but this tests both
introduction := OpsByName[LogicVersion]["gtxna"].Version
for v := introduction; v <= AssemblerMaxVersion; v++ {
@@ -1793,8 +1806,8 @@ func TestDisassembleTxna(t *testing.T) {
func TestDisassembleGtxna(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
// check gtxn and gtxna are properly disassembled
introduction := OpsByName[LogicVersion]["gtxna"].Version
@@ -1822,8 +1835,8 @@ func TestDisassembleGtxna(t *testing.T) {
func TestDisassemblePushConst(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
// check pushint and pushbytes are properly disassembled
intSample := fmt.Sprintf("#pragma version %d\npushint 1\n", AssemblerMaxVersion)
expectedIntSample := intSample
@@ -1852,7 +1865,6 @@ func TestDisassemblePushConst(t *testing.T) {
func TestDisassembleLastLabel(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
// starting from TEAL v2 branching to the last line are legal
@@ -1875,8 +1887,8 @@ label1:
func TestAssembleOffsets(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
source := "err"
ops := testProg(t, source, AssemblerMaxVersion)
require.Equal(t, 2, len(ops.Program))
@@ -1975,8 +1987,8 @@ err
func TestHasStatefulOps(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
source := "int 1"
ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
@@ -1998,8 +2010,8 @@ err
func TestStringLiteralParsing(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
s := `"test"`
e := []byte(`test`)
result, err := parseStringLiteral(s)
@@ -2095,8 +2107,8 @@ func TestStringLiteralParsing(t *testing.T) {
func TestPragmas(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
text := fmt.Sprintf("#pragma version %d", v)
ops := testProg(t, text, v)
@@ -2162,8 +2174,8 @@ func TestPragmas(t *testing.T) {
func TestAssemblePragmaVersion(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
text := `#pragma version 1
int 1
`
@@ -2216,8 +2228,8 @@ len
func TestAssembleConstants(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
+
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
testLine(t, "intc 1", v, "intc 1 is not defined")
@@ -2231,6 +2243,7 @@ func TestAssembleConstants(t *testing.T) {
func TestErrShortBytecblock(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
text := `intcblock 0x1234567812345678 0x1234567812345671 0x1234567812345672 0x1234567812345673 4 5 6 7 8`
ops, err := AssembleStringWithVersion(text, 1)
@@ -2246,6 +2259,7 @@ func TestErrShortBytecblock(t *testing.T) {
func TestMethodWarning(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
tests := []struct {
method string
@@ -2292,6 +2306,7 @@ func TestMethodWarning(t *testing.T) {
func TestBranchAssemblyTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
text := `
int 0 // current app id [0]
@@ -2326,6 +2341,7 @@ flip: // [x]
func TestSwapTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
+
/* reconfirm that we detect this type error */
testProg(t, "int 1; byte 0x1234; +", AssemblerMaxVersion, Expect{3, "+ arg 1..."})
/* despite swap, we track types */
@@ -2392,6 +2408,29 @@ func TestSetBitTypeCheck(t *testing.T) {
testProg(t, "byte 0x1234; int 2; int 3; setbit; !", AssemblerMaxVersion, Expect{5, "! arg 0..."})
}
+func TestScratchTypeCheck(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ // All scratch slots should start as uint64
+ testProg(t, "load 0; int 1; +", AssemblerMaxVersion)
+ // Check load and store accurately using the scratch space
+ testProg(t, "byte 0x01; store 0; load 0; int 1; +", AssemblerMaxVersion, Expect{5, "+ arg 0..."})
+ // Loads should know the type it's loading if all the slots are the same type
+ testProg(t, "int 0; loads; btoi", AssemblerMaxVersion, Expect{3, "btoi arg 0..."})
+ // Loads doesn't know the type when slot types vary
+ testProg(t, "byte 0x01; store 0; int 1; loads; btoi", AssemblerMaxVersion)
+ // Stores should only set slots to StackAny if they are not the same type as what is being stored
+ testProg(t, "byte 0x01; store 0; int 3; byte 0x01; stores; load 0; int 1; +", AssemblerMaxVersion, Expect{8, "+ arg 0..."})
+ // ScratchSpace should reset after hitting label in deadcode
+ testProg(t, "byte 0x01; store 0; b label1; label1:; load 0; int 1; +", AssemblerMaxVersion)
+ // But it should reset to StackAny not uint64
+ testProg(t, "int 1; store 0; b label1; label1:; load 0; btoi", AssemblerMaxVersion)
+ // Callsubs should also reset the scratch space
+ testProg(t, "callsub A; load 0; btoi; return; A: byte 0x01; store 0; retsub", AssemblerMaxVersion)
+ // But the scratchspace should still be tracked after the callsub
+ testProg(t, "callsub A; int 1; store 0; load 0; btoi; return; A: retsub", AssemblerMaxVersion, Expect{5, "btoi arg 0..."})
+}
+
func TestCoverAsm(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -2424,6 +2463,8 @@ func TestTxTypes(t *testing.T) {
}
func TestBadInnerFields(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 5, Expect{3, "...is not allowed."})
testProg(t, "itxn_begin; int 1000; itxn_field FirstValidTime", 5, Expect{3, "...is not allowed."})
testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 5, Expect{3, "...is not allowed."})
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index fdad62548..6ebd98478 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -32,6 +32,9 @@ var opDocByName = map[string]string{
"ecdsa_verify": "for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1}",
"ecdsa_pk_decompress": "decompress pubkey A into components X, Y",
"ecdsa_pk_recover": "for (data A, recovery id B, signature C, D) recover a public key",
+ "bn256_add": "for (curve points A and B) return the curve point A + B",
+ "bn256_scalar_mul": "for (curve point A, scalar K) return the curve point KA",
+ "bn256_pairing": "for (points in G1 group G1s, points in G2 group G2s), return whether they are paired => {0 or 1}",
"+": "A plus B. Fail on overflow.",
"-": "A minus B. Fail if B > A.",
@@ -262,6 +265,9 @@ var opDocExtras = map[string]string{
"ecdsa_verify": "The 32 byte Y-component of a public key is the last element on the stack, preceded by X-component of a pubkey, preceded by S and R components of a signature, preceded by the data that is fifth element on the stack. All values are big-endian encoded. The signed data must be 32 bytes long, and signatures in lower-S form are only accepted.",
"ecdsa_pk_decompress": "The 33 byte public key in a compressed form to be decompressed into X and Y (top) components. All values are big-endian encoded.",
"ecdsa_pk_recover": "S (top) and R elements of a signature, recovery id and data (bottom) are expected on the stack and used to deriver a public key. All values are big-endian encoded. The signed data must be 32 bytes long.",
+ "bn256_add": "A, B are curve points in G1 group. Each point consists of (X, Y) where X and Y are 256 bit integers, big-endian encoded. The encoded point is 64 bytes from concatenation of 32 byte X and 32 byte Y.",
+ "bn256_scalar_mul": "A is a curve point in G1 Group and encoded as described in `bn256_add`. Scalar K is a big-endian encoded big integer that has no padding zeros.",
+ "bn256_pairing": "G1s are encoded by the concatenation of encoded G1 points, as described in `bn256_add`. G2s are encoded by the concatenation of encoded G2 points. Each G2 is in form (XA0+i*XA1, YA0+i*YA1) and encoded by big-endian field element XA0, XA1, YA0 and YA1 in sequence.",
"bnz": "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Starting at v4, the offset is treated as a signed 16 bit integer allowing for backward branches and looping. In prior version (v1 to v3), branch offsets are limited to forward branches only, 0-0x7fff.\n\nAt v2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before v2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)",
"bz": "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.",
"b": "See `bnz` for details on how branches work. `b` always jumps to the offset.",
@@ -319,7 +325,7 @@ func OpDocExtra(opName string) string {
// here is the order args opcodes are presented, so place related
// opcodes consecutively, even if their opcode values are not.
var OpGroups = map[string][]string{
- "Arithmetic": {"sha256", "keccak256", "sha512_256", "sha3_256", "ed25519verify", "ed25519verify_bare", "ecdsa_verify", "ecdsa_pk_recover", "ecdsa_pk_decompress", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "shl", "shr", "sqrt", "bitlen", "exp", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "divw", "divmodw", "expw", "getbit", "setbit", "getbyte", "setbyte", "concat"},
+ "Arithmetic": {"sha256", "keccak256", "sha512_256", "sha3_256", "ed25519verify", "ed25519verify_bare", "ecdsa_verify", "ecdsa_pk_recover", "ecdsa_pk_decompress", "bn256_add", "bn256_scalar_mul", "bn256_pairing", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "shl", "shr", "sqrt", "bitlen", "exp", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "divw", "divmodw", "expw", "getbit", "setbit", "getbyte", "setbyte", "concat"},
"Byte Array Manipulation": {"substring", "substring3", "extract", "extract3", "extract_uint16", "extract_uint32", "extract_uint64", "base64_decode", "json_ref"},
"Byte Array Arithmetic": {"b+", "b-", "b/", "b*", "b<", "b>", "b<=", "b>=", "b==", "b!=", "b%", "bsqrt"},
"Byte Array Logic": {"b|", "b&", "b^", "b~"},
@@ -350,7 +356,8 @@ func OpAllCosts(opName string) []VerCost {
if !ok {
continue
}
- cost := spec.OpDetails.docCost()
+ argLen := len(spec.Arg.Types)
+ cost := spec.OpDetails.docCost(argLen)
if costs == nil || cost != costs[len(costs)-1].Cost {
costs = append(costs, VerCost{v, v, cost})
} else {
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index e621ef38e..9ddb422f4 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -1033,13 +1033,13 @@ func (cx *EvalContext) step() error {
return nil
}
-// oneBlank is a boring stack provided to deets.Cost during checkStep. It is
+// blankStack is a boring stack provided to deets.Cost during checkStep. It is
// good enough to allow Cost() to not crash. It would be incorrect to provide
// this stack if there were linear cost opcodes before backBranchEnabledVersion,
// because the static cost would be wrong. But then again, a static cost model
// wouldn't work before backBranchEnabledVersion, so such an opcode is already
// unacceptable. TestLinearOpcodes ensures.
-var oneBlank = []stackValue{{Bytes: []byte{}}}
+var blankStack = make([]stackValue, 5)
func (cx *EvalContext) checkStep() (int, error) {
cx.instructionStarts[cx.pc] = true
@@ -1055,7 +1055,7 @@ func (cx *EvalContext) checkStep() (int, error) {
if deets.Size != 0 && (cx.pc+deets.Size > len(cx.program)) {
return 0, fmt.Errorf("%s program ends short of immediate values", spec.Name)
}
- opcost := deets.Cost(cx.program, cx.pc, oneBlank)
+ opcost := deets.Cost(cx.program, cx.pc, blankStack)
if opcost <= 0 {
return 0, fmt.Errorf("%s reported non-positive cost", spec.Name)
}
@@ -3524,12 +3524,12 @@ func opExtract64Bits(cx *EvalContext) error {
}
// accountReference yields the address and Accounts offset designated by a
-// stackValue. If the stackValue is the app account or an account of an app in
-// created.apps, and it is not be in the Accounts array, then len(Accounts) + 1
-// is returned as the index. This would let us catch the mistake if the index is
-// used for set/del. If the txn somehow "psychically" predicted the address, and
-// therefore it IS in txn.Accounts, then happy day, we can set/del it. Return
-// the proper index.
+// stackValue. If the stackValue is the app account, an account of an app in
+// created.apps, or an account of an app in foreignApps, and it is not in the
+// Accounts array, then len(Accounts) + 1 is returned as the index. This would
+// let us catch the mistake if the index is used for set/del. If the txn somehow
+// "psychically" predicted the address, and therefore it IS in txn.Accounts,
+// then happy day, we can set/del it. Return the proper index.
// If we ever want apps to be able to change local state on these accounts
// (which includes this app's own account!), we will need a change to
@@ -3558,6 +3558,16 @@ func (cx *EvalContext) accountReference(account stackValue) (basics.Address, uin
}
}
+ // Allow an address for an app that was provided in the foreign apps array.
+ if err != nil && cx.version >= appAddressAvailableVersion {
+ for _, appID := range cx.txn.Txn.ForeignApps {
+ foreignAddress := cx.getApplicationAddress(appID)
+ if addr == foreignAddress {
+ return addr, invalidIndex, nil
+ }
+ }
+ }
+
// this app's address is also allowed
if err != nil {
appAddr := cx.getApplicationAddress(cx.appID)
@@ -4716,6 +4726,21 @@ func base64Decode(encoded []byte, encoding *base64.Encoding) ([]byte, error) {
return decoded[:n], err
}
+// base64padded returns true iff `encoded` has padding chars at the end
+func base64padded(encoded []byte) bool {
+ for i := len(encoded) - 1; i > 0; i-- {
+ switch encoded[i] {
+ case '=':
+ return true
+ case '\n', '\r':
+ /* nothing */
+ default:
+ return false
+ }
+ }
+ return false
+}
+
func opBase64Decode(cx *EvalContext) error {
last := len(cx.stack) - 1
encodingField := Base64Encoding(cx.program[cx.pc+1])
@@ -4728,64 +4753,63 @@ func opBase64Decode(cx *EvalContext) error {
if encodingField == StdEncoding {
encoding = base64.StdEncoding
}
- encoding = encoding.Strict()
- bytes, err := base64Decode(cx.stack[last].Bytes, encoding)
+ encoded := cx.stack[last].Bytes
+ if !base64padded(encoded) {
+ encoding = encoding.WithPadding(base64.NoPadding)
+ }
+ bytes, err := base64Decode(encoded, encoding.Strict())
if err != nil {
return err
}
cx.stack[last].Bytes = bytes
return nil
}
-func hasDuplicateKeys(jsonText []byte) (bool, map[string]json.RawMessage, error) {
+
+func isPrimitiveJSON(jsonText []byte) (bool, error) {
dec := json.NewDecoder(bytes.NewReader(jsonText))
- parsed := make(map[string]json.RawMessage)
t, err := dec.Token()
if err != nil {
- return false, nil, err
+ return false, err
}
t, ok := t.(json.Delim)
if !ok || t.(json.Delim).String() != "{" {
- return false, nil, fmt.Errorf("only json object is allowed")
- }
- for dec.More() {
- var value json.RawMessage
- // get JSON key
- key, err := dec.Token()
- if err != nil {
- return false, nil, err
- }
- // end of json
- if key == '}' {
- break
- }
- // decode value
- err = dec.Decode(&value)
- if err != nil {
- return false, nil, err
- }
- // check for duplicates
- if _, ok := parsed[key.(string)]; ok {
- return true, nil, nil
- }
- parsed[key.(string)] = value
+ return true, nil
}
- return false, parsed, nil
+ return false, nil
}
func parseJSON(jsonText []byte) (map[string]json.RawMessage, error) {
- if !json.Valid(jsonText) {
+ // parse JSON with Algorand's standard JSON library
+ var parsed map[interface{}]json.RawMessage
+ err := protocol.DecodeJSON(jsonText, &parsed)
+
+ if err != nil {
+ // if the error was caused by duplicate keys
+ if strings.Contains(err.Error(), "cannot decode into a non-pointer value") {
+ return nil, fmt.Errorf("invalid json text, duplicate keys not allowed")
+ }
+
+ // if the error was caused by non-json object
+ if strings.Contains(err.Error(), "read map - expect char '{' but got char") {
+ return nil, fmt.Errorf("invalid json text, only json object is allowed")
+ }
+
return nil, fmt.Errorf("invalid json text")
}
- // parse json text and check for duplicate keys
- hasDuplicates, parsed, err := hasDuplicateKeys(jsonText)
- if hasDuplicates {
- return nil, fmt.Errorf("invalid json text, duplicate keys not allowed")
- }
- if err != nil {
- return nil, fmt.Errorf("invalid json text, %v", err)
+
+ // check whether any keys are not strings
+ stringMap := make(map[string]json.RawMessage)
+ for k, v := range parsed {
+ key, ok := k.(string)
+ if !ok {
+ return nil, fmt.Errorf("invalid json text")
+ }
+ stringMap[key] = v
}
- return parsed, nil
+
+ return stringMap, nil
}
+
func opJSONRef(cx *EvalContext) error {
// get json key
last := len(cx.stack) - 1
@@ -4809,6 +4833,17 @@ func opJSONRef(cx *EvalContext) error {
var stval stackValue
_, ok = parsed[key]
if !ok {
+ // if the key is not found, first check whether the JSON text is the null value
+ // by checking whether it is a primitive JSON value. Any other primitive
+ // (or array) would have thrown an error previously during `parseJSON`.
+ isPrimitive, err := isPrimitiveJSON(cx.stack[last].Bytes)
+ if err == nil && isPrimitive {
+ err = fmt.Errorf("invalid json text, only json object is allowed")
+ }
+ if err != nil {
+ return fmt.Errorf("error while parsing JSON text, %v", err)
+ }
+
return fmt.Errorf("key %s not found in JSON text", key)
}
diff --git a/data/transactions/logic/evalAppTxn_test.go b/data/transactions/logic/evalAppTxn_test.go
index 7815996e0..f1ae3c40d 100644
--- a/data/transactions/logic/evalAppTxn_test.go
+++ b/data/transactions/logic/evalAppTxn_test.go
@@ -2907,3 +2907,30 @@ itxn_submit
TestApp(t, source, ep, "appl depth (8) exceeded")
}
+
+func TestForeignAppAccountAccess(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ep, tx, ledger := MakeSampleEnv()
+ ledger.NewAccount(appAddr(888), 50_000)
+ tx.ForeignApps = []basics.AppIndex{basics.AppIndex(111)}
+
+ ledger.NewApp(tx.Sender, 111, basics.AppParams{
+ ApprovalProgram: TestProg(t, "int 1", AssemblerMaxVersion).Program,
+ ClearStateProgram: TestProg(t, "int 1", AssemblerMaxVersion).Program,
+ })
+
+ TestApp(t, `
+itxn_begin
+int pay
+itxn_field TypeEnum
+int 100
+itxn_field Amount
+txn Applications 1
+app_params_get AppAddress
+assert
+itxn_field Receiver
+itxn_submit
+int 1
+`, ep)
+}
diff --git a/data/transactions/logic/evalCrypto_test.go b/data/transactions/logic/evalCrypto_test.go
index 6d7734897..84a88f6e4 100644
--- a/data/transactions/logic/evalCrypto_test.go
+++ b/data/transactions/logic/evalCrypto_test.go
@@ -25,9 +25,11 @@ import (
"encoding/hex"
"fmt"
"math/big"
+ mrand "math/rand"
"strconv"
"testing"
+ "github.com/consensys/gnark-crypto/ecc/bn254"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/crypto"
@@ -771,3 +773,129 @@ int 1`
benchmarkEcdsa(b, source, Secp256k1)
})
}
+
+type benchmarkBn256Data struct {
+ a []byte
+ k []byte
+ g1 []byte
+ g2 []byte
+ programs []byte
+}
+
+func benchmarkBn256DataGenData(b *testing.B) (data []benchmarkBn256Data) {
+ data = make([]benchmarkBn256Data, b.N)
+ var g1Gen bn254.G1Jac
+ var g1GenAff bn254.G1Affine
+ g1Gen.X.SetString("1")
+ g1Gen.Y.SetString("2")
+ g1Gen.Z.SetString("1")
+ g1GenAff.FromJacobian(&g1Gen)
+ var a bn254.G1Affine
+ a.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(mrand.Uint64()))
+
+ for i := 0; i < b.N; i++ {
+ var a bn254.G1Affine
+ a.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(mrand.Uint64()))
+
+ data[i].a = bN254G1ToBytes(&a)
+ data[i].k = new(big.Int).SetUint64(mrand.Uint64()).Bytes()
+
+ // Pair one g1 and one g2
+ data[i].g1, _ = hex.DecodeString("0ebc9fc712b13340c800793386a88385e40912a21bacad2cc7db17d36e54c802238449426931975cced7200f08681ab9a86a2e5c2336cf625451cf2413318e32")
+ data[i].g2, _ = hex.DecodeString("217fbd9a9db5719cfbe3580e3d8750cada058fdfffe95c440a0528ffc608f36e05d6a67604658d40b3e4cac3c46150f2702d87739b7774d79a8147f7271773b420f9429ee13c1843404bfd70e75efa886c173e57dde32970274d8bc53dfd562403f6276318990d053785b4ca342ebc4581a23a39285804bb74e079aa2ef3ba66")
+ }
+ return data
+}
+
+func benchmarkBn256(b *testing.B, source string) {
+ data := benchmarkBn256DataGenData(b)
+ ops, err := AssembleStringWithVersion(source, 7)
+ require.NoError(b, err)
+ for i := 0; i < b.N; i++ {
+ data[i].programs = ops.Program
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ var txn transactions.SignedTxn
+ txn.Lsig.Logic = data[i].programs
+ txn.Lsig.Args = [][]byte{data[i].a, data[i].k, data[i].g1, data[i].g2}
+ ep := defaultEvalParams(&txn)
+ pass, err := EvalSignature(0, ep)
+ if !pass {
+ b.Log(hex.EncodeToString(data[i].programs))
+ b.Log(ep.Trace.String())
+ }
+ if err != nil {
+ require.NoError(b, err)
+ }
+ if !pass {
+ require.True(b, pass)
+ }
+ }
+}
+
+func BenchmarkBn256AddRaw(b *testing.B) {
+ data := benchmarkBn256DataGenData(b)
+ a1 := bytesToBN254G1(data[0].g1)
+ a2 := bytesToBN254G1(data[0].g1)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = new(bn254.G1Affine).Add(&a1, &a2)
+ }
+}
+
+func BenchmarkBn256AddWithMarshal(b *testing.B) {
+ b.ResetTimer()
+ var v [][]byte
+ v = make([][]byte, b.N)
+ g1, _ := hex.DecodeString("0ebc9fc712b13340c800793386a88385e40912a21bacad2cc7db17d36e54c802238449426931975cced7200f08681ab9a86a2e5c2336cf625451cf2413318e32")
+
+ for i := 0; i < b.N; i++ {
+ a1 := bytesToBN254G1(g1)
+ a2 := bytesToBN254G1(g1)
+ r := new(bn254.G1Affine).Add(&a1, &a2)
+ v[i] = r.Marshal()
+ }
+}
+
+func BenchmarkBn256PairingRaw(b *testing.B) {
+ data := benchmarkBn256DataGenData(b)
+ g1s := bytesToBN254G1s(data[0].g1)
+ g2s := bytesToBN254G2s(data[0].g2)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ ok, _ := bn254.PairingCheck(g1s, g2s)
+ require.False(b, ok)
+ }
+}
+
+func BenchmarkBn256(b *testing.B) {
+ b.Run("bn256 add", func(b *testing.B) {
+ benchmarkOperation(b, "byte 0x0ebc9fc712b13340c800793386a88385e40912a21bacad2cc7db17d36e54c802238449426931975cced7200f08681ab9a86a2e5c2336cf625451cf2413318e32", "dup; bn256_add", "pop; int 1")
+ })
+
+ b.Run("bn256 scalar mul", func(b *testing.B) {
+ source := `#pragma version 7
+arg 0
+arg 1
+bn256_scalar_mul
+pop
+int 1
+`
+ benchmarkBn256(b, source)
+ })
+
+ b.Run("bn256 pairing", func(b *testing.B) {
+ source := `#pragma version 7
+arg 2
+arg 3
+bn256_pairing
+pop
+int 1
+`
+ benchmarkBn256(b, source)
+ })
+}
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index 730978fd8..e861883a3 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -2388,6 +2388,10 @@ func TestReturnTypes(t *testing.T) {
"ecdsa_verify": true,
"ecdsa_pk_recover": true,
"ecdsa_pk_decompress": true,
+
+ "bn256_add": true,
+ "bn256_scalar_mul": true,
+ "bn256_pairing": true,
}
byName := OpsByName[LogicVersion]
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index 04b3741eb..3fbd46af1 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -20,6 +20,7 @@ import (
"encoding/base64"
"encoding/binary"
"encoding/hex"
+ "encoding/json"
"fmt"
"strconv"
"strings"
@@ -114,7 +115,7 @@ func benchmarkEvalParams(txn *transactions.SignedTxn) *EvalParams {
ep := defaultEvalParamsWithVersion(txn, LogicVersion)
ep.Trace = nil // Tracing would slow down benchmarks
clone := *ep.Proto
- bigBudget := 1000 * 1000 // Allow long run times
+ bigBudget := 2 * 1000 * 1000 // Allow long run times
clone.LogicSigMaxCost = uint64(bigBudget)
clone.MaxAppProgramCost = bigBudget
ep.Proto = &clone
@@ -260,6 +261,22 @@ func TestWrongProtoVersion(t *testing.T) {
}
}
+func TestBlankStackSufficient(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ t.Parallel()
+ for v := 0; v <= LogicVersion; v++ {
+ t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
+ for i := 0; i < 256; i++ {
+ spec := opsByOpcode[v][i]
+ argLen := len(spec.Arg.Types)
+ blankStackLen := len(blankStack)
+ require.GreaterOrEqual(t, blankStackLen, argLen)
+ }
+ })
+ }
+}
+
func TestSimpleMath(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -3564,9 +3581,89 @@ func BenchmarkCheckx5(b *testing.B) {
}
}
+func makeNestedKeys(depth int) string {
+ if depth <= 0 {
+ return `{\"key0\":\"value0\"}`
+ }
+ return fmt.Sprintf(`{\"key0\":%s}`, makeNestedKeys(depth-1))
+}
+
+func BenchmarkJsonRef(b *testing.B) {
+ // base case
+ oneKey := `{\"key0\":\"value0\"}`
+
+ // many keys
+ sb := &strings.Builder{}
+ sb.WriteString(`{`)
+ for i := 0; i < 100; i++ {
+ sb.WriteString(fmt.Sprintf(`\"key%d\":\"value%d\",`, i, i))
+ }
+ sb.WriteString(`\"key100\":\"value100\"}`) // so there is no trailing comma
+ manyKeys := sb.String()
+
+ lenOfManyKeys := len(manyKeys)
+ longTextLen := lenOfManyKeys - 36 // subtract the difference
+ mediumText := strings.Repeat("a", longTextLen/2)
+ longText := strings.Repeat("a", longTextLen)
+
+ // medium key
+ mediumKey := fmt.Sprintf(`{\"%s\":\"value\",\"key1\":\"value2\"}`, mediumText)
+
+ // long key
+ longKey := fmt.Sprintf(`{\"%s\":\"value\",\"key1\":\"value2\"}`, longText)
+
+ // long value
+ longValue := fmt.Sprintf(`{\"key0\":\"%s\",\"key1\":\"value2\"}`, longText)
+
+ // nested keys
+ nestedKeys := makeNestedKeys(200)
+
+ jsonLabels := []string{"one key", "many keys", "medium key", "long key", "long val", "nested keys"}
+ jsonSamples := []string{oneKey, manyKeys, mediumKey, longKey, longValue, nestedKeys}
+ keys := [][]string{
+ {"key0"},
+ {"key0", "key100"},
+ {mediumText, "key1"},
+ {longText, "key1"},
+ {"key0", "key1"},
+ {"key0"},
+ }
+ valueFmt := [][]string{
+ {"JSONString"},
+ {"JSONString", "JSONString"},
+ {"JSONString", "JSONString"},
+ {"JSONString", "JSONString"},
+ {"JSONString", "JSONString"},
+ {"JSONObject"},
+ }
+ benches := [][]string{}
+ for i, label := range jsonLabels {
+ for j, key := range keys[i] {
+ prog := fmt.Sprintf(`byte "%s"; byte "%s"; json_ref %s; pop;`, jsonSamples[i], key, valueFmt[i][j])
+
+ // indicate long key
+ keyLabel := key
+ if len(key) > 50 {
+ keyLabel = fmt.Sprintf("long_key_%d", len(key))
+ }
+
+ benches = append(benches, []string{
+ fmt.Sprintf("%s_%s", label, keyLabel), // label
+ "", // prefix
+ prog, // operation
+ "int 1", // suffix
+ })
+ }
+ }
+ for _, bench := range benches {
+ b.Run(bench[0], func(b *testing.B) {
+ benchmarkOperation(b, bench[1], bench[2], bench[3])
+ })
+ }
+}
+
func TestEvalVersions(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
text := `intcblock 1
@@ -4534,6 +4631,8 @@ func TestLog(t *testing.T) {
source string
runMode runMode
errContains string
+ // For cases where assembly errors, we manually put in the bytes
+ assembledBytes []byte
}{
{
source: fmt.Sprintf(`byte "%s"; log; int 1`, strings.Repeat("a", maxLogSize+1)),
@@ -4561,9 +4660,10 @@ func TestLog(t *testing.T) {
runMode: modeApp,
},
{
- source: `load 0; log`,
- errContains: "log arg 0 wanted []byte but got uint64",
- runMode: modeApp,
+ source: `load 0; log`,
+ errContains: "log arg 0 wanted []byte but got uint64",
+ runMode: modeApp,
+ assembledBytes: []byte{byte(ep.Proto.LogicSigVersion), 0x34, 0x00, 0xb0},
},
{
source: `byte "a logging message"; log; int 1`,
@@ -4575,7 +4675,11 @@ func TestLog(t *testing.T) {
for _, c := range failCases {
switch c.runMode {
case modeApp:
- testApp(t, c.source, ep, c.errContains)
+ if c.assembledBytes == nil {
+ testApp(t, c.source, ep, c.errContains)
+ } else {
+ testAppBytes(t, c.assembledBytes, ep, c.errContains)
+ }
default:
testLogic(t, c.source, AssemblerMaxVersion, ep, c.errContains, c.errContains)
}
@@ -4649,12 +4753,12 @@ By Herman Melville`, "",
{"cGFk=", "StdEncoding", "pad", "input byte 4"},
{"cGFk==", "StdEncoding", "pad", "input byte 4"},
{"cGFk===", "StdEncoding", "pad", "input byte 4"},
- // Ensures that even correct padding is illegal if not needed
+ // Ensures that extra padding, even if 0%4
{"cGFk====", "StdEncoding", "pad", "input byte 4"},
- // Test that padding must be present to make len = 0 mod 4.
+ // Test that padding must be correct or absent
{"bm9wYWQ=", "StdEncoding", "nopad", ""},
- {"bm9wYWQ", "StdEncoding", "nopad", "illegal"},
+ {"bm9wYWQ", "StdEncoding", "nopad", ""},
{"bm9wYWQ==", "StdEncoding", "nopad", "illegal"},
{"YWJjMTIzIT8kKiYoKSctPUB+", "StdEncoding", "abc123!?$*&()'-=@~", ""},
@@ -4690,15 +4794,15 @@ By Herman Melville`, "",
{"\rS\r\nQ=\n=\r\r\n", "StdEncoding", "I", ""},
{"\rS\r\nQ=\n=\r\r\n", "URLEncoding", "I", ""},
- // Padding necessary? - Yes it is! And exactly the expected place and amount.
+ // If padding is there, it must be correct, but if absent, that's fine.
{"SQ==", "StdEncoding", "I", ""},
{"SQ==", "URLEncoding", "I", ""},
{"S=Q=", "StdEncoding", "", "byte 1"},
{"S=Q=", "URLEncoding", "", "byte 1"},
{"=SQ=", "StdEncoding", "", "byte 0"},
{"=SQ=", "URLEncoding", "", "byte 0"},
- {"SQ", "StdEncoding", "", "byte 0"},
- {"SQ", "URLEncoding", "", "byte 0"},
+ {"SQ", "StdEncoding", "I", ""},
+ {"SQ", "URLEncoding", "I", ""},
{"SQ=", "StdEncoding", "", "byte 3"},
{"SQ=", "URLEncoding", "", "byte 3"},
{"SQ===", "StdEncoding", "", "byte 4"},
@@ -4721,17 +4825,6 @@ By Herman Melville`, "",
if LogicVersion < fidoVersion {
testProg(t, source, AssemblerMaxVersion, Expect{0, "unknown opcode..."})
} else {
- // sanity check - test the helper function first:
- encoding := base64.URLEncoding
- if tc.alph == "StdEncoding" {
- encoding = base64.StdEncoding
- }
- encoding = encoding.Strict()
- decoded, err := base64Decode([]byte(tc.encoded), encoding)
- require.NoError(t, err)
- require.Equal(t, string(decoded), tc.decoded)
-
- // now check eval:
testAccepts(t, source, fidoVersion)
}
} else {
@@ -4739,6 +4832,7 @@ By Herman Melville`, "",
testProg(t, source, AssemblerMaxVersion, Expect{0, "unknown opcode..."})
} else {
err := testPanics(t, source, fidoVersion)
+ require.Error(t, err)
require.Contains(t, err.Error(), tc.error)
}
}
@@ -4790,42 +4884,55 @@ int ` + fmt.Sprintf("%d", 20_000-3-6) + ` // base64_decode cost = 6 (68 bytes ->
testAccepts(t, source, fidoVersion)
}
-func TestHasDuplicateKeys(t *testing.T) {
+func TestIsPrimitive(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
testCases := []struct {
text []byte
}{
{
- text: []byte(`{"key0": "1","key0": "2", "key1":1}`),
+ text: []byte(`null`),
},
{
- text: []byte(`{"key0": "1","key1": [1], "key0":{"key2": "a"}}`),
+ text: []byte(`[1, 2, 3]`),
+ },
+ {
+ text: []byte(`2`),
},
}
for _, s := range testCases {
- hasDuplicates, _, err := hasDuplicateKeys(s.text)
+ isPrimitive, err := isPrimitiveJSON(s.text)
require.Nil(t, err)
- require.True(t, hasDuplicates)
+ require.True(t, isPrimitive)
}
- noDuplicates := []struct {
+ notPrimitive := []struct {
text []byte
}{
{
text: []byte(`{"key0": "1","key1": "2", "key2":3}`),
},
{
- text: []byte(`{"key0": "1","key1": [{"key0":1,"key0":2},{"key0":1,"key0":2}], "key2":{"key5": "a","key5": "b"}}`),
+ text: []byte(`{}`),
},
}
- for _, s := range noDuplicates {
- hasDuplicates, _, err := hasDuplicateKeys(s.text)
+ for _, s := range notPrimitive {
+ primitive, err := isPrimitiveJSON(s.text)
require.Nil(t, err)
- require.False(t, hasDuplicates)
+ require.False(t, primitive)
}
}
+func TestProtocolParseDuplicateErrMsg(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ text := `{"key0": "algo", "key0": "algo"}`
+ var parsed map[string]json.RawMessage
+ err := protocol.DecodeJSON([]byte(text), &parsed)
+ require.Contains(t, err.Error(), "cannot decode into a non-pointer value")
+ require.Error(t, err)
+}
+
func TestOpJSONRef(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -4963,6 +5070,33 @@ func TestOpJSONRef(t *testing.T) {
==`,
previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
},
+ // JavaScript MAX_SAFE_INTEGER
+ {
+ source: `byte "{\"maxSafeInt\": 9007199254740991}";
+ byte "maxSafeInt";
+ json_ref JSONUint64;
+ int 9007199254740991;
+ ==`,
+ previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ },
+ // maximum uint64
+ {
+ source: `byte "{\"maxUint64\": 18446744073709551615}";
+ byte "maxUint64";
+ json_ref JSONUint64;
+ int 18446744073709551615;
+ ==`,
+ previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ },
+ // larger-than-uint64s are allowed if not requested
+ {
+ source: `byte "{\"maxUint64\": 18446744073709551616, \"smallUint64\": 0}";
+ byte "smallUint64";
+ json_ref JSONUint64;
+ int 0;
+ ==`,
+ previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ },
}
for _, s := range testCases {
@@ -4988,6 +5122,9 @@ func TestOpJSONRef(t *testing.T) {
pass, _, err := EvalContract(ops.Program, 0, 888, ep)
require.NoError(t, err)
require.True(t, pass)
+
+ // reset pooled budget for new "app call"
+ *ep.PooledApplicationBudget = ep.Proto.MaxAppProgramCost
}
failedCases := []struct {
@@ -5106,11 +5243,11 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONObject;
byte "key4";
json_ref JSONObject;
- byte "key40"
+ byte "key40";
json_ref JSONString
`,
error: "error while parsing JSON text, invalid json text, duplicate keys not allowed",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}, {12, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}, {13, "unknown opcode: json_ref"}},
},
{
source: `byte "[1,2,3]";
@@ -5152,6 +5289,25 @@ func TestOpJSONRef(t *testing.T) {
error: "error while parsing JSON text, invalid json text, only json object is allowed",
previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
},
+ {
+ source: `byte "{noquotes: \"shouldn't work\"}";
+ byte "noquotes";
+ json_ref JSONString;
+ byte "shouldn't work";
+ ==`,
+ error: "error while parsing JSON text, invalid json text",
+ previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ },
+ // max uint64 + 1 should fail
+ {
+ source: `byte "{\"tooBig\": 18446744073709551616}";
+ byte "tooBig";
+ json_ref JSONUint64;
+ int 1;
+ return`,
+ error: "json: cannot unmarshal number 18446744073709551616 into Go value of type uint64",
+ previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ },
}
for _, s := range failedCases {
@@ -5180,6 +5336,9 @@ func TestOpJSONRef(t *testing.T) {
require.False(t, pass)
require.Error(t, err)
require.EqualError(t, err, s.error)
+
+ // reset pooled budget for new "app call"
+ *ep.PooledApplicationBudget = ep.Proto.MaxAppProgramCost
}
}
diff --git a/data/transactions/logic/jsonspec.md b/data/transactions/logic/jsonspec.md
index e747e40f5..817c01ece 100644
--- a/data/transactions/logic/jsonspec.md
+++ b/data/transactions/logic/jsonspec.md
@@ -12,7 +12,7 @@ Additional specifications used by **json_ref** that are extensions to the RFC715
- The byte order mark (BOM), "\uFEFF", is not allowed at the beginning of a JSON text
- Raw non-unicode characters not accepted
-## Invalid JSON text
+### Invalid JSON text
```json
\uFEFF{"key0": 1}
@@ -105,10 +105,6 @@ Comment blocks are not accepted.
```
```json
-{"key0": "algo"}/*comment*/
-```
-
-```json
{"key0": [1,/*comment*/,3]}
```
diff --git a/data/transactions/logic/jsonspec_test.go b/data/transactions/logic/jsonspec_test.go
index 7ab173a0f..3ebe131e8 100644
--- a/data/transactions/logic/jsonspec_test.go
+++ b/data/transactions/logic/jsonspec_test.go
@@ -58,9 +58,6 @@ func TestParseComments(t *testing.T) {
text := `{"key0": /*comment*/"algo"}`
_, err := parseJSON([]byte(text))
require.Error(t, err)
- text = `{"key0": "algo"}/*comment*/`
- _, err = parseJSON([]byte(text))
- require.Error(t, err)
text = `{"key0": [1,/*comment*/,3]}`
_, err = parseJSON([]byte(text))
require.Error(t, err)
@@ -210,7 +207,6 @@ func TestParseKeys(t *testing.T) {
text = `{1: 1}`
_, err = parseJSON([]byte(text))
require.Error(t, err)
-
}
func TestParseFileEncoding(t *testing.T) {
diff --git a/data/transactions/logic/langspec.json b/data/transactions/logic/langspec.json
index 7a6cef2d9..35c239c14 100644
--- a/data/transactions/logic/langspec.json
+++ b/data/transactions/logic/langspec.json
@@ -1239,7 +1239,7 @@
]
},
{
- "Opcode": 92,
+ "Opcode": 94,
"Name": "base64_decode",
"Args": "B",
"Returns": "B",
@@ -1252,7 +1252,7 @@
]
},
{
- "Opcode": 93,
+ "Opcode": 95,
"Name": "json_ref",
"Args": "BB",
"Returns": ".",
@@ -1638,6 +1638,42 @@
]
},
{
+ "Opcode": 153,
+ "Name": "bn256_add",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "for (curve points A and B) return the curve point A + B",
+ "DocExtra": "A, B are curve points in G1 group. Each point consists of (X, Y) where X and Y are 256 bit integers, big-endian encoded. The encoded point is 64 bytes from concatenation of 32 byte X and 32 byte Y.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 154,
+ "Name": "bn256_scalar_mul",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "for (curve point A, scalar K) return the curve point KA",
+ "DocExtra": "A is a curve point in G1 Group and encoded as described in `bn256_add`. Scalar K is a big-endian encoded big integer that has no padding zeros.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
+ "Opcode": 155,
+ "Name": "bn256_pairing",
+ "Args": "BB",
+ "Returns": "U",
+ "Size": 1,
+ "Doc": "for (points in G1 group G1s, points in G2 group G2s), return whether they are paired =\u003e {0 or 1}",
+ "DocExtra": "G1s are encoded by the concatenation of encoded G1 points, as described in `bn256_add`. G2s are encoded by the concatenation of encoded G2 points. Each G2 is in form (XA0+i*XA1, YA0+i*YA1) and encoded by big-endian field element XA0, XA1, YA0 and YA1 in sequence.",
+ "Groups": [
+ "Arithmetic"
+ ]
+ },
+ {
"Opcode": 160,
"Name": "b+",
"Args": "BB",
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index a486a8d3d..bd4f5ca05 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -57,13 +57,22 @@ const txnEffectsVersion = 6
// the Foreign arrays.
const createdResourcesVersion = 6
-// experimental-
-const fidoVersion = 7 // base64, json, secp256r1
+// appAddressAvailableVersion is the first version that allows access to the
+// accounts of applications that were provided in the foreign apps transaction
+// field.
+const appAddressAvailableVersion = 7
+
+// EXPERIMENTAL. These should be revisited whenever a new LogicSigVersion is
+// moved from vFuture to a new consensus version. If they remain unready, bump
+// their version, and fixup TestAssemble() in assembler_test.go.
+const fidoVersion = 7 // base64, json, secp256r1
+const pairingVersion = 7 // bn256 opcodes. will add bls12-381, and unify the available opcodes.// experimental-
type linearCost struct {
baseCost int
chunkCost int
chunkSize int
+ depth int
}
// divideCeilUnsafely provides `math.Ceil` semantics using integer division. The technique avoids slower floating point operations as suggested in https://stackoverflow.com/a/2745086.
@@ -76,22 +85,24 @@ func (lc *linearCost) compute(stack []stackValue) int {
cost := lc.baseCost
if lc.chunkCost != 0 && lc.chunkSize != 0 {
// Uses divideCeilUnsafely rather than (len/size) to match how Ethereum discretizes hashing costs.
- cost += divideCeilUnsafely(lc.chunkCost*len(stack[len(stack)-1].Bytes), lc.chunkSize)
+ cost += divideCeilUnsafely(lc.chunkCost*len(stack[len(stack)-1-lc.depth].Bytes), lc.chunkSize)
}
return cost
}
-func (lc *linearCost) docCost() string {
+func (lc *linearCost) docCost(argLen int) string {
if *lc == (linearCost{}) {
return ""
}
if lc.chunkCost == 0 {
return strconv.Itoa(lc.baseCost)
}
+ idxFromStart := argLen - lc.depth - 1
+ stackArg := rune(int('A') + idxFromStart)
if lc.chunkSize == 1 {
- return fmt.Sprintf("%d + %d per byte", lc.baseCost, lc.chunkCost)
+ return fmt.Sprintf("%d + %d per byte of %c", lc.baseCost, lc.chunkCost, stackArg)
}
- return fmt.Sprintf("%d + %d per %d bytes", lc.baseCost, lc.chunkCost, lc.chunkSize)
+ return fmt.Sprintf("%d + %d per %d bytes of %c", lc.baseCost, lc.chunkCost, lc.chunkSize, stackArg)
}
// OpDetails records details such as non-standard costs, immediate arguments, or
@@ -110,8 +121,8 @@ type OpDetails struct {
Immediates []immediate // details of each immediate arg to opcode
}
-func (d *OpDetails) docCost() string {
- cost := d.FullCost.docCost()
+func (d *OpDetails) docCost(argLen int) string {
+ cost := d.FullCost.docCost(argLen)
if cost != "" {
return cost
}
@@ -139,7 +150,7 @@ func (d *OpDetails) docCost() string {
// both static (the program, which can be used to find the immediate values
// supplied), and dynamic (the stack, which can be used to find the run-time
// arguments supplied). Cost is used at run-time. docCost returns similar
-// information in human-reable form.
+// information in human-readable form.
func (d *OpDetails) Cost(program []byte, pc int, stack []stackValue) int {
cost := d.FullCost.compute(stack)
if cost != 0 {
@@ -206,9 +217,9 @@ func (d OpDetails) only(m runMode) OpDetails {
return clone
}
-func (d OpDetails) costByLength(initial, perChunk, chunkSize int) OpDetails {
+func (d OpDetails) costByLength(initial, perChunk, chunkSize, depth int) OpDetails {
clone := d
- clone.FullCost = costByLength(initial, perChunk, chunkSize).FullCost
+ clone.FullCost = costByLength(initial, perChunk, chunkSize, depth).FullCost
return clone
}
@@ -255,12 +266,12 @@ func costByField(immediate string, group *FieldGroup, costs []int) OpDetails {
return opd
}
-func costByLength(initial int, perChunk int, chunkSize int) OpDetails {
+func costByLength(initial, perChunk, chunkSize, depth int) OpDetails {
if initial < 1 || perChunk <= 0 || chunkSize < 1 || chunkSize > maxStringSize {
panic("bad cost configuration")
}
d := opDefault()
- d.FullCost = linearCost{initial, perChunk, chunkSize}
+ d.FullCost = linearCost{initial, perChunk, chunkSize, depth}
return d
}
@@ -435,8 +446,8 @@ var OpSpecs = []OpSpec{
{0x32, "global", opGlobal, proto(":a"), 1, field("f", &GlobalFields)},
{0x33, "gtxn", opGtxn, proto(":a"), 1, immediates("t", "f").field("f", &TxnScalarFields)},
{0x33, "gtxn", opGtxn, proto(":a"), 2, immediates("t", "f").field("f", &TxnFields).assembler(asmGtxn2)},
- {0x34, "load", opLoad, proto(":a"), 1, immediates("i")},
- {0x35, "store", opStore, proto("a:"), 1, immediates("i")},
+ {0x34, "load", opLoad, proto(":a"), 1, stacky(typeLoad, "i")},
+ {0x35, "store", opStore, proto("a:"), 1, stacky(typeStore, "i")},
{0x36, "txna", opTxna, proto(":a"), 2, immediates("f", "i").field("f", &TxnArrayFields)},
{0x37, "gtxna", opGtxna, proto(":a"), 2, immediates("t", "f", "i").field("f", &TxnArrayFields)},
// Like gtxn, but gets txn index from stack, rather than immediate arg
@@ -450,8 +461,8 @@ var OpSpecs = []OpSpec{
{0x3d, "gaids", opGaids, proto("i:i"), 4, only(modeApp)},
// Like load/store, but scratch slot taken from TOS instead of immediate
- {0x3e, "loads", opLoads, proto("i:a"), 5, opDefault()},
- {0x3f, "stores", opStores, proto("ia:"), 5, opDefault()},
+ {0x3e, "loads", opLoads, proto("i:a"), 5, stacky(typeLoads)},
+ {0x3f, "stores", opStores, proto("ia:"), 5, stacky(typeStores)},
{0x40, "bnz", opBnz, proto("i:"), 1, opBranch()},
{0x41, "bz", opBz, proto("i:"), 2, opBranch()},
@@ -482,8 +493,9 @@ var OpSpecs = []OpSpec{
{0x59, "extract_uint16", opExtract16Bits, proto("bi:i"), 5, opDefault()},
{0x5a, "extract_uint32", opExtract32Bits, proto("bi:i"), 5, opDefault()},
{0x5b, "extract_uint64", opExtract64Bits, proto("bi:i"), 5, opDefault()},
- {0x5c, "base64_decode", opBase64Decode, proto("b:b"), fidoVersion, field("e", &Base64Encodings).costByLength(1, 1, 16)},
- {0x5d, "json_ref", opJSONRef, proto("bb:a"), fidoVersion, field("r", &JSONRefTypes)},
+
+ {0x5e, "base64_decode", opBase64Decode, proto("b:b"), fidoVersion, field("e", &Base64Encodings).costByLength(1, 1, 16, 0)},
+ {0x5f, "json_ref", opJSONRef, proto("bb:a"), fidoVersion, field("r", &JSONRefTypes).costByLength(25, 2, 7, 1)},
{0x60, "balance", opBalance, proto("i:i"), 2, only(modeApp)},
{0x60, "balance", opBalance, proto("a:i"), directRefEnabledVersion, only(modeApp)},
@@ -532,11 +544,15 @@ var OpSpecs = []OpSpec{
{0x96, "bsqrt", opBytesSqrt, proto("b:b"), 6, costly(40)},
{0x97, "divw", opDivw, proto("iii:i"), 6, opDefault()},
{0x98, "sha3_256", opSHA3_256, proto("b:b"), 7, costly(130)},
-
/* Will end up following keccak256 -
{0x98, "sha3_256", opSHA3_256, proto("b:b"), unlimitedStorage, costByLength(58, 4, 8)},},
*/
+ {0x99, "bn256_add", opBn256Add, proto("bb:b"), pairingVersion, costly(70)},
+ {0x9a, "bn256_scalar_mul", opBn256ScalarMul, proto("bb:b"), pairingVersion, costly(970)},
+ {0x9b, "bn256_pairing", opBn256Pairing, proto("bb:i"), pairingVersion, costly(8700)},
+ // leave room here for eip-2537 style opcodes
+
// Byteslice math.
{0xa0, "b+", opBytesPlus, proto("bb:b"), 4, costly(10)},
{0xa1, "b-", opBytesMinus, proto("bb:b"), 4, costly(10)},
diff --git a/data/transactions/logic/pairing.go b/data/transactions/logic/pairing.go
new file mode 100644
index 000000000..cb43efeb5
--- /dev/null
+++ b/data/transactions/logic/pairing.go
@@ -0,0 +1,116 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+import (
+ "errors"
+ "math/big"
+
+ "github.com/consensys/gnark-crypto/ecc/bn254"
+ "github.com/consensys/gnark-crypto/ecc/bn254/fp"
+)
+
+func bytesToBN254Field(b []byte) (ret fp.Element) {
+ ret.SetBytes(b)
+ return
+}
+
+func bytesToBN254G1(b []byte) (ret bn254.G1Affine) {
+ ret.X = bytesToBN254Field(b[:32])
+ ret.Y = bytesToBN254Field(b[32:64])
+ return
+}
+
+func bytesToBN254G1s(b []byte) (ret []bn254.G1Affine) {
+ for i := 0; i < len(b)/64; i++ {
+ ret = append(ret, bytesToBN254G1(b[(i*64):(i*64+64)]))
+ }
+ return
+}
+
+func bytesToBN254G2(b []byte) (ret bn254.G2Affine) {
+ ret.X.A0 = bytesToBN254Field(b[:32])
+ ret.X.A1 = bytesToBN254Field(b[32:64])
+ ret.Y.A0 = bytesToBN254Field(b[64:96])
+ ret.Y.A1 = bytesToBN254Field(b[96:128])
+ return
+}
+
+func bytesToBN254G2s(b []byte) (ret []bn254.G2Affine) {
+ for i := 0; i < len(b)/128; i++ {
+ ret = append(ret, bytesToBN254G2(b[(i*128):(i*128+128)]))
+ }
+ return
+}
+
+func bN254G1ToBytes(g1 *bn254.G1Affine) (ret []byte) {
+ retX := g1.X.Bytes()
+ retY := g1.Y.Bytes()
+ ret = append(retX[:], retY[:]...)
+ return
+}
+
+func opBn256Add(cx *EvalContext) error {
+ last := len(cx.stack) - 1
+ prev := last - 1
+ aBytes := cx.stack[prev].Bytes
+ bBytes := cx.stack[last].Bytes
+ if len(aBytes) != 64 || len(bBytes) != 64 {
+ return errors.New("expect G1 in 64 bytes")
+ }
+ a := bytesToBN254G1(aBytes)
+ b := bytesToBN254G1(bBytes)
+ res := new(bn254.G1Affine).Add(&a, &b)
+ resBytes := bN254G1ToBytes(res)
+ cx.stack = cx.stack[:last]
+ cx.stack[prev].Bytes = resBytes
+ return nil
+}
+
+func opBn256ScalarMul(cx *EvalContext) error {
+ last := len(cx.stack) - 1
+ prev := last - 1
+ aBytes := cx.stack[prev].Bytes
+ if len(aBytes) != 64 {
+ return errors.New("expect G1 in 64 bytes")
+ }
+ a := bytesToBN254G1(aBytes)
+ kBytes := cx.stack[last].Bytes
+ k := new(big.Int).SetBytes(kBytes[:])
+ res := new(bn254.G1Affine).ScalarMultiplication(&a, k)
+ resBytes := bN254G1ToBytes(res)
+ cx.stack = cx.stack[:last]
+ cx.stack[prev].Bytes = resBytes
+ return nil
+}
+
+func opBn256Pairing(cx *EvalContext) error {
+ last := len(cx.stack) - 1
+ prev := last - 1
+ g1Bytes := cx.stack[prev].Bytes
+ g2Bytes := cx.stack[last].Bytes
+ g1 := bytesToBN254G1s(g1Bytes)
+ g2 := bytesToBN254G2s(g2Bytes)
+ ok, err := bn254.PairingCheck(g1, g2)
+ if err != nil {
+ return errors.New("pairing failed")
+ }
+ cx.stack = cx.stack[:last]
+ cx.stack[prev].Uint = boolToUint(ok)
+ cx.stack[prev].Bytes = nil
+ return nil
+}
diff --git a/data/transactions/logic/pairing_test.go b/data/transactions/logic/pairing_test.go
new file mode 100644
index 000000000..75f6e2bc2
--- /dev/null
+++ b/data/transactions/logic/pairing_test.go
@@ -0,0 +1,29 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+const pairingNonsense = `
+ pushbytes 0x012345
+ dup
+ bn256_add
+ dup
+ bn256_scalar_mul
+ dup
+ bn256_pairing
+`
+
+const pairingCompiled = "80030123454999499a499b"
diff --git a/data/transactions/logic/teal.tmLanguage.json b/data/transactions/logic/teal.tmLanguage.json
index efc093602..5219dbcd4 100644
--- a/data/transactions/logic/teal.tmLanguage.json
+++ b/data/transactions/logic/teal.tmLanguage.json
@@ -76,7 +76,7 @@
},
{
"name": "keyword.operator.teal",
- "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|btoi|concat|divmodw|divw|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|exp|expw|getbit|getbyte|itob|keccak256|len|mulw|setbit|setbyte|sha256|sha3_256|sha512_256|shl|shr|sqrt|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|extract|extract3|extract_uint16|extract_uint32|extract_uint64|json_ref|substring|substring3|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b"
+ "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|bn256_add|bn256_pairing|bn256_scalar_mul|btoi|concat|divmodw|divw|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|exp|expw|getbit|getbyte|itob|keccak256|len|mulw|setbit|setbyte|sha256|sha3_256|sha512_256|shl|shr|sqrt|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|extract|extract3|extract_uint16|extract_uint32|extract_uint64|json_ref|substring|substring3|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b"
}
]
},
diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go
index 5c67f64dd..1c63c0c8a 100644
--- a/data/transactions/transaction.go
+++ b/data/transactions/transaction.go
@@ -726,9 +726,9 @@ func ProgramVersion(bytecode []byte) (version uint64, length int, err error) {
// matching versions between approval and clearstate.
const syncProgramsVersion = 6
-// CheckContractVersions ensures that for v6 and higher two programs are version
-// matched, and that they are not a downgrade. If proto.AllowV4InnerAppls, then
-// no downgrades are allowed, regardless of version.
+// CheckContractVersions ensures that for syncProgramsVersion and higher, two programs are version
+// matched, and that they are not a downgrade. If either program version is
+// >= proto.MinInnerApplVersion, downgrade of that program is not allowed.
func CheckContractVersions(approval []byte, clear []byte, previous basics.AppParams, proto *config.ConsensusParams) error {
av, _, err := ProgramVersion(approval)
if err != nil {
diff --git a/go.mod b/go.mod
index d9a52951f..5ab375956 100644
--- a/go.mod
+++ b/go.mod
@@ -13,6 +13,7 @@ require (
github.com/algorand/websocket v1.4.5
github.com/aws/aws-sdk-go v1.16.5
github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e
+ github.com/consensys/gnark-crypto v0.7.0
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018
github.com/dchest/siphash v1.2.1
github.com/fatih/color v1.7.0
@@ -30,8 +31,8 @@ require (
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v0.0.3
github.com/stretchr/testify v1.7.1
- golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
- golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
+ golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064
+ golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8
golang.org/x/text v0.3.7
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009
)
@@ -53,6 +54,7 @@ require (
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.7 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
+ github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
@@ -62,7 +64,7 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.1 // indirect
- golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f // indirect
+ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
diff --git a/go.sum b/go.sum
index 1c807fefb..93b075f40 100644
--- a/go.sum
+++ b/go.sum
@@ -19,6 +19,8 @@ github.com/aws/aws-sdk-go v1.16.5 h1:NVxzZXIuwX828VcJrpNxxWjur1tlOBISdMdDdHIKHcc
github.com/aws/aws-sdk-go v1.16.5/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e h1:CHPYEbz71w8DqJ7DRIq+MXyCQsdibK08vdcQTY4ufas=
github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e/go.mod h1:6Xhs0ZlsRjXLIiSMLKafbZxML/j30pg9Z1priLuha5s=
+github.com/consensys/gnark-crypto v0.7.0 h1:rwdy8+ssmLYRqKp+ryRRgQJl/rCq2uv+n83cOydm5UE=
+github.com/consensys/gnark-crypto v0.7.0/go.mod h1:KPSuJzyxkJA8xZ/+CV47tyqkr9MmpZA3PXivK4VPrVg=
github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M=
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
github.com/cyberdelia/templates v0.0.0-20191230040416-20a325f050d4/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
@@ -53,6 +55,7 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@@ -81,6 +84,7 @@ github.com/labstack/echo/v4 v4.1.17 h1:PQIBaRplyRy3OjwILGkPg89JRtH2x5bssi59G2EL3
github.com/labstack/echo/v4 v4.1.17/go.mod h1:Tn2yRQL/UclUalpb5rPdXDevbkJ+lp/2svdyFBg6CHQ=
github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
+github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
@@ -99,6 +103,9 @@ github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK86
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/miekg/dns v1.1.27 h1:aEH/kqUzUxGJ/UHcEKdJY+ugH6WEzsEBBSPa8zuy1aM=
github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
+github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
+github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
+github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ=
@@ -140,8 +147,9 @@ golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064 h1:S25/rfnfsMVgORT4/J61MJ7rdyseOZOyvLIrZEZ7s6s=
+golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
@@ -153,8 +161,9 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
@@ -174,8 +183,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 h1:OH54vjqzRWmbJ62fjuhxy7AxFFgoHN0/DPc/UrL8cAs=
+golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/installer/config.json.example b/installer/config.json.example
index b7369acfa..c569e4c93 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -1,5 +1,5 @@
{
- "Version": 21,
+ "Version": 22,
"AccountUpdatesStatsInterval": 5000000000,
"AccountsRebuildSynchronousMode": 1,
"AgreementIncomingBundlesQueueLength": 7,
@@ -47,6 +47,7 @@
"EnableProcessBlockStats": false,
"EnableProfiler": false,
"EnableRequestLogger": false,
+ "EnableRuntimeMetrics": false,
"EnableTopAccountsReporting": false,
"EnableVerbosedTransactionSyncLogging": false,
"EndpointAddress": "127.0.0.1:0",
diff --git a/installer/external/node_exporter-stable-darwin-x86_64.tar.gz b/installer/external/node_exporter-stable-darwin-x86_64.tar.gz
index 26faf2d51..2b25a6e30 100644
--- a/installer/external/node_exporter-stable-darwin-x86_64.tar.gz
+++ b/installer/external/node_exporter-stable-darwin-x86_64.tar.gz
Binary files differ
diff --git a/installer/external/node_exporter-stable-linux-x86_64.tar.gz b/installer/external/node_exporter-stable-linux-x86_64.tar.gz
index e3e7290b6..c9368910c 100644
--- a/installer/external/node_exporter-stable-linux-x86_64.tar.gz
+++ b/installer/external/node_exporter-stable-linux-x86_64.tar.gz
Binary files differ
diff --git a/ledger/.gitignore b/ledger/.gitignore
new file mode 100644
index 000000000..5def1ed41
--- /dev/null
+++ b/ledger/.gitignore
@@ -0,0 +1 @@
+catchpoints
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index b771d1532..ca4432b6a 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -237,7 +237,7 @@ func TestAccountDBRound(t *testing.T) {
numElementsPerSegment := 10
// lastCreatableID stores asset or app max used index to get rid of conflicts
- lastCreatableID := crypto.RandUint64() % 512
+ lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512)
ctbsList, randomCtbs := randomCreatables(numElementsPerSegment)
expectedDbImage := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
var baseAccounts lruAccounts
@@ -247,7 +247,7 @@ func TestAccountDBRound(t *testing.T) {
baseResources.init(nil, 100, 80)
for i := 1; i < 10; i++ {
var updates ledgercore.AccountDeltas
- updates, newacctsTotals, _, lastCreatableID = ledgertesting.RandomDeltasFull(20, accts, 0, lastCreatableID)
+ updates, newacctsTotals, _ = ledgertesting.RandomDeltasFull(20, accts, 0, &lastCreatableID)
totals = ledgertesting.CalculateNewRoundAccountTotals(t, updates, 0, proto, accts, totals)
accts = applyPartialDeltas(accts, updates)
ctbsWithDeletes := randomCreatableSampling(i, ctbsList, randomCtbs,
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index d2faf6ec7..d597dfdbc 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -441,7 +441,7 @@ func TestAcctUpdates(t *testing.T) {
checkAcctUpdates(t, au, 0, 9, accts, rewardsLevels, proto)
// lastCreatableID stores asset or app max used index to get rid of conflicts
- lastCreatableID := crypto.RandUint64() % 512
+ lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512)
knownCreatables := make(map[basics.CreatableIndex]bool)
start := basics.Round(10)
@@ -452,7 +452,8 @@ func TestAcctUpdates(t *testing.T) {
var updates ledgercore.AccountDeltas
var totals map[basics.Address]ledgercore.AccountData
base := accts[i-1]
- updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
+ updates, totals = ledgertesting.RandomDeltasBalancedFull(
+ 1, base, rewardLevel, &lastCreatableID)
prevRound, prevTotals, err := au.LatestTotals()
require.Equal(t, i-1, prevRound)
require.NoError(t, err)
@@ -2221,7 +2222,7 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates,
checkAcctUpdates(t, au, 0, 9, accts, rewardsLevels, proto)
// lastCreatableID stores asset or app max used index to get rid of conflicts
- lastCreatableID := crypto.RandUint64() % 512
+ lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512)
knownCreatables := make(map[basics.CreatableIndex]bool)
for i := basics.Round(10); i < basics.Round(proto.MaxBalLookback+15); i++ {
@@ -2230,7 +2231,8 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates,
var updates ledgercore.AccountDeltas
var totals map[basics.Address]ledgercore.AccountData
base := accts[i-1]
- updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
+ updates, totals = ledgertesting.RandomDeltasBalancedFull(
+ 1, base, rewardLevel, &lastCreatableID)
prevRound, prevTotals, err := au.LatestTotals()
require.Equal(t, i-1, prevRound)
require.NoError(t, err)
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
index eb812937f..025c7ebfe 100644
--- a/ledger/catchpointtracker_test.go
+++ b/ledger/catchpointtracker_test.go
@@ -433,7 +433,7 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
const testCatchpointLabelsCount = 5
// lastCreatableID stores asset or app max used index to get rid of conflicts
- lastCreatableID := crypto.RandUint64() % 512
+ lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512)
knownCreatables := make(map[basics.CreatableIndex]bool)
catchpointLabels := make(map[basics.Round]string)
ledgerHistory := make(map[basics.Round]*mockLedgerForTracker)
@@ -444,7 +444,7 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
var updates ledgercore.AccountDeltas
var totals map[basics.Address]ledgercore.AccountData
base := accts[i-1]
- updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
+ updates, totals = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, &lastCreatableID)
prevRound, prevTotals, err := au.LatestTotals()
require.Equal(t, i-1, prevRound)
require.NoError(t, err)
diff --git a/ledger/internal/apptxn_test.go b/ledger/internal/apptxn_test.go
index a4d9da8e0..94f72b0e2 100644
--- a/ledger/internal/apptxn_test.go
+++ b/ledger/internal/apptxn_test.go
@@ -1890,7 +1890,7 @@ func TestInnerAppVersionCalling(t *testing.T) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // 31 allowed inner appls. vFuture enables proto.AllowV4InnerAppls (presumed v33, below)
+ // 31 allowed inner appls. v33 lowered proto.MinInnerApplVersion
testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
defer dl.Close()
@@ -1994,7 +1994,7 @@ itxn_submit`,
createAndOptin.ApplicationArgs = [][]byte{six.Program, six.Program}
dl.txn(&createAndOptin, "overspend") // passed the checks, but is an overspend
} else {
- // after 32 proto.AllowV4InnerAppls should be in effect, so calls and optins to v5 are ok
+ // after 32 proto.MinInnerApplVersion is lowered to 4, so calls and optins to v5 are ok
dl.txn(&call, "overspend") // it tried to execute, but test doesn't bother funding
dl.txn(&optin, "overspend") // it tried to execute, but test doesn't bother funding
optin.ForeignApps[0] = v5withv3csp // but we can't optin to a v5 if it has an old csp
@@ -2070,6 +2070,10 @@ func TestAppDowngrade(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
+ two, err := logic.AssembleStringWithVersion("int 1", 2)
+ require.NoError(t, err)
+ three, err := logic.AssembleStringWithVersion("int 1", 3)
+ require.NoError(t, err)
four, err := logic.AssembleStringWithVersion("int 1", 4)
require.NoError(t, err)
five, err := logic.AssembleStringWithVersion("int 1", 5)
@@ -2078,6 +2082,40 @@ func TestAppDowngrade(t *testing.T) {
require.NoError(t, err)
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+
+ // Confirm that in old protocol version, downgrade is legal
+ // Start at 28 because we want to v4 app to downgrade to v3
+ testConsensusRange(t, 28, 30, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
+
+ create := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: four.Program,
+ ClearStateProgram: four.Program,
+ }
+
+ vb := dl.fullBlock(&create)
+ app := vb.Block().Payset[0].ApplicationID
+
+ update := txntest.Txn{
+ Type: "appl",
+ ApplicationID: app,
+ OnCompletion: transactions.UpdateApplicationOC,
+ Sender: addrs[0],
+ ApprovalProgram: three.Program,
+ ClearStateProgram: three.Program,
+ }
+
+ // No change - legal
+ dl.fullBlock(&update)
+
+ update.ApprovalProgram = two.Program
+ // Also legal, and let's check mismatched version while we're at it.
+ dl.fullBlock(&update)
+ })
+
testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
defer dl.Close()
@@ -2112,7 +2150,7 @@ func TestAppDowngrade(t *testing.T) {
update.ClearStateProgram = five.Program
dl.fullBlock(&update)
- // Downgrade (allowed for pre 6 programs until AllowV4InnerAppls)
+ // Downgrade (allowed for pre 6 programs until MinInnerApplVersion was lowered)
update.ClearStateProgram = four.Program
if ver <= 32 {
dl.fullBlock(update.Noted("actually a repeat of first upgrade"))
@@ -3058,3 +3096,202 @@ check:
txns(t, l, eval, &fundA, &callA)
endBlock(t, l, eval)
}
+
+func TestForeignAppAccountsAccessible(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ testConsensusRange(t, 32, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
+
+ appA := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ }
+
+ appB := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
+itxn_begin
+ int pay; itxn_field TypeEnum
+ int 100; itxn_field Amount
+ txn Applications 1
+ app_params_get AppAddress
+ assert
+ itxn_field Receiver
+itxn_submit
+`),
+ }
+
+ vb := dl.fullBlock(&appA, &appB)
+ index0 := vb.Block().Payset[0].ApplicationID
+ index1 := vb.Block().Payset[1].ApplicationID
+
+ fund1 := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: index1.Address(),
+ Amount: 1_000_000_000,
+ }
+ fund0 := fund1
+ fund0.Receiver = index0.Address()
+
+ callTx := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[2],
+ ApplicationID: index1,
+ ForeignApps: []basics.AppIndex{index0},
+ }
+
+ dl.beginBlock()
+ if ver <= 32 {
+ dl.txgroup("invalid Account reference", &fund0, &fund1, &callTx)
+ dl.endBlock()
+ return
+ }
+
+ dl.txgroup("", &fund0, &fund1, &callTx)
+ vb = dl.endBlock()
+
+ require.Equal(t, index0.Address(), vb.Block().Payset[2].EvalDelta.InnerTxns[0].Txn.Receiver)
+ require.Equal(t, uint64(100), vb.Block().Payset[2].EvalDelta.InnerTxns[0].Txn.Amount.Raw)
+ })
+}
+
+// While accounts of foreign apps are available in most contexts, they still
+// cannot be used as mutable references; ie the accounts cannot be used by
+// opcodes that modify local storage.
+func TestForeignAppAccountsImmutable(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ testConsensusRange(t, 32, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
+
+ appA := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ }
+
+ appB := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
+txn Applications 1
+app_params_get AppAddress
+byte "X"
+byte "ABC"
+app_local_put
+int 1
+`),
+ }
+
+ vb := dl.fullBlock(&appA, &appB)
+ index0 := vb.Block().Payset[0].ApplicationID
+ index1 := vb.Block().Payset[1].ApplicationID
+
+ fund1 := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: index1.Address(),
+ Amount: 1_000_000_000,
+ }
+ fund0 := fund1
+ fund0.Receiver = index0.Address()
+
+ callTx := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[2],
+ ApplicationID: index1,
+ ForeignApps: []basics.AppIndex{index0},
+ }
+
+ dl.beginBlock()
+ dl.txgroup("invalid Account reference", &fund0, &fund1, &callTx)
+ dl.endBlock()
+ })
+}
+
+// In the case where the foreign app account is also provided in the
+// transaction's account field, mutable references should be allowed.
+func TestForeignAppAccountsMutable(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ testConsensusRange(t, 32, 0, func(t *testing.T, ver int) {
+ dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
+ defer dl.Close()
+
+ appA := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
+itxn_begin
+ int appl
+ itxn_field TypeEnum
+ txn Applications 1
+ itxn_field ApplicationID
+ int OptIn
+ itxn_field OnCompletion
+itxn_submit
+`),
+ }
+
+ appB := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
+txn OnCompletion
+int OptIn
+==
+bnz done
+txn Applications 1
+app_params_get AppAddress
+assert
+byte "X"
+byte "Y"
+app_local_put
+done:
+`),
+ LocalStateSchema: basics.StateSchema{
+ NumByteSlice: 1,
+ },
+ }
+
+ vb := dl.fullBlock(&appA, &appB)
+ index0 := vb.Block().Payset[0].ApplicationID
+ index1 := vb.Block().Payset[1].ApplicationID
+
+ fund1 := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: index1.Address(),
+ Amount: 1_000_000_000,
+ }
+ fund0 := fund1
+ fund0.Receiver = index0.Address()
+ fund1.Receiver = index1.Address()
+
+ callA := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[2],
+ ApplicationID: index0,
+ ForeignApps: []basics.AppIndex{index1},
+ }
+
+ callB := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[2],
+ ApplicationID: index1,
+ ForeignApps: []basics.AppIndex{index0},
+ Accounts: []basics.Address{index0.Address()},
+ }
+
+ vb = dl.fullBlock(&fund0, &fund1, &callA, &callB)
+
+ require.Equal(t, "Y", vb.Block().Payset[3].EvalDelta.LocalDeltas[1]["X"].Bytes)
+ })
+}
diff --git a/ledger/internal/eval_blackbox_test.go b/ledger/internal/eval_blackbox_test.go
index 79d22d189..21240467c 100644
--- a/ledger/internal/eval_blackbox_test.go
+++ b/ledger/internal/eval_blackbox_test.go
@@ -882,7 +882,10 @@ var consensusByNumber = []protocol.ConsensusVersion{
protocol.ConsensusFuture,
}
-func TestContainsLatestVersion(t *testing.T) {
+// TestReleasedVersion ensures that the necessary tidying is done when a new
+// protocol release happens. The new version must be added to
+// consensusByNumber, and a new LogicSigVersion must be added to vFuture.
+func TestReleasedVersion(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -891,6 +894,13 @@ func TestContainsLatestVersion(t *testing.T) {
require.Len(t, config.Consensus[consensusByNumber[len(consensusByNumber)-2]].ApprovedUpgrades, 0)
// And no funny business with vFuture
require.Equal(t, protocol.ConsensusFuture, consensusByNumber[len(consensusByNumber)-1])
+
+ // Ensure that vFuture gets a new LogicSigVersion when we promote the
+ // existing one. That allows TestExperimental in the logic package to
+ // prevent unintended releases of experimental opcodes.
+ relV := config.Consensus[consensusByNumber[len(consensusByNumber)-2]].LogicSigVersion
+ futureV := config.Consensus[protocol.ConsensusFuture].LogicSigVersion
+ require.Equal(t, relV+1, futureV)
}
// testConsensusRange allows for running tests against a range of consensus
diff --git a/ledger/testing/randomAccounts.go b/ledger/testing/randomAccounts.go
index 6cbc7cbce..860031519 100644
--- a/ledger/testing/randomAccounts.go
+++ b/ledger/testing/randomAccounts.go
@@ -187,7 +187,7 @@ func RandomAppLocalState() basics.AppLocalState {
}
// RandomFullAccountData generates a random AccountData
-func RandomFullAccountData(rewardsLevel uint64, knownCreatables map[basics.CreatableIndex]basics.CreatableType, lastCreatableID uint64) (basics.AccountData, map[basics.CreatableIndex]basics.CreatableType, uint64) {
+func RandomFullAccountData(rewardsLevel uint64, lastCreatableID *basics.CreatableIndex, assets map[basics.AssetIndex]struct{}, apps map[basics.AppIndex]struct{}) basics.AccountData {
data := RandomAccountData(rewardsLevel)
crypto.RandBytes(data.VoteID[:])
@@ -202,28 +202,26 @@ func RandomFullAccountData(rewardsLevel uint64, knownCreatables map[basics.Creat
data.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, createdAssetsCount)
for i := uint64(0); i < createdAssetsCount; i++ {
ap := RandomAssetParams()
- lastCreatableID++
- data.AssetParams[basics.AssetIndex(lastCreatableID)] = ap
- knownCreatables[basics.CreatableIndex(lastCreatableID)] = basics.AssetCreatable
+ *lastCreatableID++
+ data.AssetParams[basics.AssetIndex(*lastCreatableID)] = ap
+ assets[basics.AssetIndex(*lastCreatableID)] = struct{}{}
}
}
- if (crypto.RandUint64()%2) == 1 && lastCreatableID > 0 {
+ if (crypto.RandUint64()%2 == 1) && (len(assets) > 0) {
// if account owns assets
ownedAssetsCount := crypto.RandUint64()%20 + 1
data.Assets = make(map[basics.AssetIndex]basics.AssetHolding, ownedAssetsCount)
for i := uint64(0); i < ownedAssetsCount; i++ {
ah := RandomAssetHolding(false)
- aidx := crypto.RandUint64() % lastCreatableID
+ var aidx basics.AssetIndex
for {
- ctype, ok := knownCreatables[basics.CreatableIndex(aidx)]
- if !ok || ctype == basics.AssetCreatable {
+ aidx = basics.AssetIndex(crypto.RandUint64()%uint64(*lastCreatableID) + 1)
+ if _, ok := assets[aidx]; ok {
break
}
- aidx = crypto.RandUint64() % lastCreatableID
}
- data.Assets[basics.AssetIndex(aidx)] = ah
- knownCreatables[basics.CreatableIndex(aidx)] = basics.AssetCreatable
+ data.Assets[aidx] = ah
}
}
if (crypto.RandUint64() % 5) == 1 {
@@ -235,26 +233,24 @@ func RandomFullAccountData(rewardsLevel uint64, knownCreatables map[basics.Creat
data.AppParams = make(map[basics.AppIndex]basics.AppParams, appParamsCount)
for i := uint64(0); i < appParamsCount; i++ {
ap := RandomAppParams()
- lastCreatableID++
- data.AppParams[basics.AppIndex(lastCreatableID)] = ap
- knownCreatables[basics.CreatableIndex(lastCreatableID)] = basics.AppCreatable
+ *lastCreatableID++
+ data.AppParams[basics.AppIndex(*lastCreatableID)] = ap
+ apps[basics.AppIndex(*lastCreatableID)] = struct{}{}
}
}
- if (crypto.RandUint64()%3) == 1 && lastCreatableID > 0 {
+ if (crypto.RandUint64()%3 == 1) && (len(apps) > 0) {
appStatesCount := crypto.RandUint64()%20 + 1
data.AppLocalStates = make(map[basics.AppIndex]basics.AppLocalState, appStatesCount)
for i := uint64(0); i < appStatesCount; i++ {
ap := RandomAppLocalState()
- aidx := crypto.RandUint64() % lastCreatableID
+ var aidx basics.AppIndex
for {
- ctype, ok := knownCreatables[basics.CreatableIndex(aidx)]
- if !ok || ctype == basics.AppCreatable {
+ aidx = basics.AppIndex(crypto.RandUint64()%uint64(*lastCreatableID) + 1)
+ if _, ok := apps[aidx]; ok {
break
}
- aidx = crypto.RandUint64() % lastCreatableID
}
data.AppLocalStates[basics.AppIndex(aidx)] = ap
- knownCreatables[basics.CreatableIndex(aidx)] = basics.AppCreatable
}
}
@@ -264,7 +260,8 @@ func RandomFullAccountData(rewardsLevel uint64, knownCreatables map[basics.Creat
NumByteSlice: crypto.RandUint64() % 50,
}
}
- return data, knownCreatables, lastCreatableID
+
+ return data
}
// RandomAccounts generates a random set of accounts map
@@ -275,10 +272,11 @@ func RandomAccounts(niter int, simpleAccounts bool) map[basics.Address]basics.Ac
res[RandomAddress()] = RandomAccountData(0)
}
} else {
- lastCreatableID := crypto.RandUint64() % 512
- knownCreatables := make(map[basics.CreatableIndex]basics.CreatableType)
+ lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512)
+ assets := make(map[basics.AssetIndex]struct{})
+ apps := make(map[basics.AppIndex]struct{})
for i := 0; i < niter; i++ {
- res[RandomAddress()], knownCreatables, lastCreatableID = RandomFullAccountData(0, knownCreatables, lastCreatableID)
+ res[RandomAddress()] = RandomFullAccountData(0, &lastCreatableID, assets, apps)
}
}
return res
@@ -286,18 +284,20 @@ func RandomAccounts(niter int, simpleAccounts bool) map[basics.Address]basics.Ac
// RandomDeltas generates a random set of accounts delta
func RandomDeltas(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64) {
- updates, totals, imbalance, _ = RandomDeltasImpl(niter, base, rewardsLevel, true, 0)
+ var lastCreatableID basics.CreatableIndex
+ updates, totals, imbalance =
+ RandomDeltasImpl(niter, base, rewardsLevel, true, &lastCreatableID)
return
}
// RandomDeltasFull generates a random set of accounts delta
-func RandomDeltasFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64, lastCreatableID uint64) {
- updates, totals, imbalance, lastCreatableID = RandomDeltasImpl(niter, base, rewardsLevel, false, lastCreatableIDIn)
+func RandomDeltasFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableID *basics.CreatableIndex) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64) {
+ updates, totals, imbalance = RandomDeltasImpl(niter, base, rewardsLevel, false, lastCreatableID)
return
}
// RandomDeltasImpl generates a random set of accounts delta
-func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64, lastCreatableID uint64) {
+func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableID *basics.CreatableIndex) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
totals = make(map[basics.Address]ledgercore.AccountData)
@@ -309,30 +309,21 @@ func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew
}
// if making a full delta then need to determine max asset/app id to get rid of conflicts
- lastCreatableID = lastCreatableIDIn
- knownCreatables := make(map[basics.CreatableIndex]basics.CreatableType)
+ assets := make(map[basics.AssetIndex]struct{})
+ apps := make(map[basics.AppIndex]struct{})
if !simple {
for _, ad := range base {
for aid := range ad.AssetParams {
- if uint64(aid) > lastCreatableID {
- lastCreatableID = uint64(aid)
- }
- knownCreatables[basics.CreatableIndex(aid)] = basics.AssetCreatable
+ assets[aid] = struct{}{}
}
for aid := range ad.Assets {
- // do not check lastCreatableID since lastCreatableID is only incremented for new params
- knownCreatables[basics.CreatableIndex(aid)] = basics.AssetCreatable
+ assets[aid] = struct{}{}
}
-
for aid := range ad.AppParams {
- if uint64(aid) > lastCreatableID {
- lastCreatableID = uint64(aid)
- }
- knownCreatables[basics.CreatableIndex(aid)] = basics.AppCreatable
+ apps[aid] = struct{}{}
}
for aid := range ad.AppLocalStates {
- // do not check lastCreatableID since lastCreatableID is only incremented for new params
- knownCreatables[basics.CreatableIndex(aid)] = basics.AppCreatable
+ apps[aid] = struct{}{}
}
}
}
@@ -357,7 +348,7 @@ func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew
new = ledgercore.ToAccountData(data)
updates.Upsert(addr, new)
} else {
- data, knownCreatables, lastCreatableID = RandomFullAccountData(rewardsLevel, knownCreatables, lastCreatableID)
+ data = RandomFullAccountData(rewardsLevel, lastCreatableID, assets, apps)
new = ledgercore.ToAccountData(data)
updates.Upsert(addr, new)
appResources := make(map[basics.AppIndex]ledgercore.AppResourceRecord)
@@ -442,7 +433,7 @@ func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew
new = ledgercore.ToAccountData(data)
updates.Upsert(addr, new)
} else {
- data, knownCreatables, lastCreatableID = RandomFullAccountData(rewardsLevel, knownCreatables, lastCreatableID)
+ data = RandomFullAccountData(rewardsLevel, lastCreatableID, assets, apps)
new = ledgercore.ToAccountData(data)
updates.Upsert(addr, new)
appResources := make(map[basics.AppIndex]ledgercore.AppResourceRecord)
@@ -489,23 +480,26 @@ func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew
// RandomDeltasBalanced generates a random set of accounts delta
func RandomDeltasBalanced(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData) {
- updates, totals, _ = RandomDeltasBalancedImpl(niter, base, rewardsLevel, true, 0)
+ var lastCreatableID basics.CreatableIndex
+ updates, totals = RandomDeltasBalancedImpl(
+ niter, base, rewardsLevel, true, &lastCreatableID)
return
}
// RandomDeltasBalancedFull generates a random set of accounts delta
-func RandomDeltasBalancedFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, lastCreatableID uint64) {
- updates, totals, lastCreatableID = RandomDeltasBalancedImpl(niter, base, rewardsLevel, false, lastCreatableIDIn)
+func RandomDeltasBalancedFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableID *basics.CreatableIndex) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData) {
+ updates, totals = RandomDeltasBalancedImpl(niter, base, rewardsLevel, false, lastCreatableID)
return
}
// RandomDeltasBalancedImpl generates a random set of accounts delta
-func RandomDeltasBalancedImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, lastCreatableID uint64) {
+func RandomDeltasBalancedImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableID *basics.CreatableIndex) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData) {
var imbalance int64
if simple {
updates, totals, imbalance = RandomDeltas(niter, base, rewardsLevel)
} else {
- updates, totals, imbalance, lastCreatableID = RandomDeltasFull(niter, base, rewardsLevel, lastCreatableIDIn)
+ updates, totals, imbalance =
+ RandomDeltasFull(niter, base, rewardsLevel, lastCreatableID)
}
oldPool := base[testPoolAddr]
@@ -516,5 +510,5 @@ func RandomDeltasBalancedImpl(niter int, base map[basics.Address]basics.AccountD
updates.Upsert(testPoolAddr, newPool)
totals[testPoolAddr] = newPool
- return updates, totals, lastCreatableID
+ return updates, totals
}
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index f766363f2..ea9954528 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -18,10 +18,12 @@ package libgoal
import (
"encoding/json"
+ "errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
+ "time"
algodclient "github.com/algorand/go-algorand/daemon/algod/api/client"
v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
@@ -938,6 +940,30 @@ func (c *Client) GetPendingTransactionsByAddress(addr string, maxTxns uint64) (r
return
}
+// VerifyParticipationKey checks if a given participationID is installed in a loop until timeout has elapsed.
+func (c *Client) VerifyParticipationKey(timeout time.Duration, participationID string) error {
+ start := time.Now()
+
+ for {
+ keysResp, err := c.GetParticipationKeys()
+ if err != nil {
+ return err
+ }
+ for _, key := range keysResp {
+ if key.Id == participationID {
+ // Installation successful.
+ return nil
+ }
+ }
+
+ if time.Since(start) > timeout {
+ return errors.New("timeout waiting for key to appear")
+ }
+
+ time.Sleep(1 * time.Second)
+ }
+}
+
// AddParticipationKey takes a participation key file and sends it to the node.
// The key will be loaded into the system when the function returns successfully.
func (c *Client) AddParticipationKey(keyfile string) (resp generated.PostParticipationResponse, err error) {
diff --git a/libgoal/participation.go b/libgoal/participation.go
index 2dbbbde98..88a1151a7 100644
--- a/libgoal/participation.go
+++ b/libgoal/participation.go
@@ -18,7 +18,6 @@ package libgoal
import (
"fmt"
- "io/ioutil"
"math"
"os"
"path/filepath"
@@ -27,76 +26,34 @@ import (
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
)
// chooseParticipation chooses which participation keys to use for going online
// based on the address, round number, and available participation databases
-func (c *Client) chooseParticipation(address basics.Address, round basics.Round) (part account.Participation, err error) {
- genID, err := c.GenesisID()
+func (c *Client) chooseParticipation(address basics.Address, round basics.Round) (part generated.ParticipationKey, err error) {
+ parts, err := c.ListParticipationKeys()
if err != nil {
return
}
- // Get a list of files in the participation keys directory
- keyDir := filepath.Join(c.DataDir(), genID)
- files, err := ioutil.ReadDir(keyDir)
- if err != nil {
- return
- }
- // This lambda will be used for finding the desired file.
- checkIfFileIsDesiredKey := func(file os.FileInfo, expiresAfter basics.Round) (part account.Participation, err error) {
- var handle db.Accessor
- var partCandidate account.PersistedParticipation
-
- // If it can't be a participation key database, skip it
- if !config.IsPartKeyFilename(file.Name()) {
- return
- }
-
- filename := file.Name()
-
- // Fetch a handle to this database
- handle, err = db.MakeErasableAccessor(filepath.Join(keyDir, filename))
- if err != nil {
- // Couldn't open it, skip it
- return
- }
-
- // Fetch an account.Participation from the database
- partCandidate, err = account.RestoreParticipation(handle)
- if err != nil {
- // Couldn't read it, skip it
- handle.Close()
- return
- }
- defer partCandidate.Close()
-
- // Return the Participation valid for this round that relates to the passed address
+ // Loop through each of the participation keys; pick the one that expires farthest in the future.
+ var expiry uint64 = 0
+ for _, info := range parts {
+ // Choose the Participation valid for this round that relates to the passed address
// that expires farthest in the future.
// Note that algod will sign votes with all possible Participations. so any should work
// in the short-term.
// In the future we should allow the user to specify exactly which partkeys to register.
- if partCandidate.FirstValid <= round && round <= partCandidate.LastValid && partCandidate.Parent == address && partCandidate.LastValid > expiresAfter {
- part = partCandidate.Participation
+ if info.Key.VoteFirstValid <= uint64(round) && uint64(round) <= info.Key.VoteLastValid && info.Address == address.String() && info.Key.VoteLastValid > expiry {
+ part = info
+ expiry = part.Key.VoteLastValid
}
- return
- }
- // Loop through each of the files; pick the one that expires farthest in the future.
- var expiry basics.Round
- for _, info := range files {
- // Use above lambda so the deferred handle closure happens each loop
- partCandidate, err := checkIfFileIsDesiredKey(info, expiry)
- if err == nil && (!partCandidate.Parent.IsZero()) {
- part = partCandidate
- expiry = part.LastValid
- }
}
- if part.Parent.IsZero() {
+ if part.Address == "" {
// Couldn't find one
- err = fmt.Errorf("Couldn't find a participation key database for address %v valid at round %v in directory %v", address.GetUserAddress(), round, keyDir)
+ err = fmt.Errorf("couldn't find a participation key database for address %v valid at round %v in participation registry", address.GetUserAddress(), round)
return
}
return
@@ -117,8 +74,12 @@ func (c *Client) GenParticipationKeys(address string, firstValid, lastValid, key
}
// GenParticipationKeysTo creates a .partkey database for a given address, fills
-// it with keys, and saves it in the specified output directory.
+// it with keys, and saves it in the specified output directory. If the output
+// directory is empty, the key will be installed.
func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, keyDilution uint64, outDir string) (part account.Participation, filePath string, err error) {
+
+ install := outDir == ""
+
// Parse the address
parsedAddr, err := basics.UnmarshalChecksumAddress(address)
if err != nil {
@@ -127,16 +88,9 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k
firstRound, lastRound := basics.Round(firstValid), basics.Round(lastValid)
- // If output directory wasn't specified, store it in the current ledger directory.
- if outDir == "" {
- // Get the GenesisID for use in the participation key path
- var genID string
- genID, err = c.GenesisID()
- if err != nil {
- return
- }
-
- outDir = filepath.Join(c.DataDir(), genID)
+ // If we are installing, generate in the temp dir
+ if install {
+ outDir = os.TempDir()
}
// Connect to the database
partKeyPath, err := participationKeysPath(outDir, parsedAddr, firstRound, lastRound)
@@ -152,6 +106,14 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k
return
}
+ // If the key is being installed, remove it afterwards.
+ if install {
+ // Explicitly ignore any errors
+ defer func(name string) {
+ _ = os.Remove(name)
+ }(partKeyPath)
+ }
+
partdb, err := db.MakeErasableAccessor(partKeyPath)
if err != nil {
return
@@ -165,79 +127,15 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k
newPart, err := account.FillDBWithParticipationKeys(partdb, parsedAddr, firstRound, lastRound, keyDilution)
part = newPart.Participation
partdb.Close()
- return part, partKeyPath, err
-}
-
-// InstallParticipationKeys creates a .partkey database for a given address,
-// based on an existing database from inputfile. On successful install, it
-// deletes the input file.
-func (c *Client) InstallParticipationKeys(inputfile string) (part account.Participation, filePath string, err error) {
- proto, ok := c.consensus[protocol.ConsensusCurrentVersion]
- if !ok {
- err = fmt.Errorf("Unknown consensus protocol %s", protocol.ConsensusCurrentVersion)
- return
- }
-
- // Get the GenesisID for use in the participation key path
- var genID string
- genID, err = c.GenesisID()
- if err != nil {
- return
- }
-
- outDir := filepath.Join(c.DataDir(), genID)
-
- inputdb, err := db.MakeErasableAccessor(inputfile)
- if err != nil {
- return
- }
- defer inputdb.Close()
-
- partkey, err := account.RestoreParticipationWithSecrets(inputdb)
- if err != nil {
- return
- }
-
- if partkey.Parent == (basics.Address{}) {
- err = fmt.Errorf("Cannot install partkey with missing (zero) parent address")
- return
- }
-
- newdbpath, err := participationKeysPath(outDir, partkey.Parent, partkey.FirstValid, partkey.LastValid)
- if err != nil {
- return
- }
- newdb, err := db.MakeErasableAccessor(newdbpath)
if err != nil {
return
}
- newpartkey := partkey
- newpartkey.Store = newdb
- err = newpartkey.PersistWithSecrets()
- if err != nil {
- newpartkey.Close()
- return
+ if install {
+ _, err = c.AddParticipationKey(partKeyPath)
}
-
- // After successful install, remove the input copy of the
- // partkey so that old keys cannot be recovered after they
- // are used by algod. We try to delete the data inside
- // sqlite first, so the key material is zeroed out from
- // disk blocks, but regardless of whether that works, we
- // delete the input file. The consensus protocol version
- // is irrelevant for the maxuint64 round number we pass in.
- errCh := partkey.DeleteOldKeys(basics.Round(math.MaxUint64), proto)
- err = <-errCh
- if err != nil {
- newpartkey.Close()
- return
- }
- os.Remove(inputfile)
- part = newpartkey.Participation
- newpartkey.Close()
- return part, newdbpath, nil
+ return part, partKeyPath, err
}
// ListParticipationKeys returns the available participation keys,
@@ -249,49 +147,3 @@ func (c *Client) ListParticipationKeys() (partKeyFiles generated.ParticipationKe
}
return
}
-
-// ListParticipationKeyFiles returns the available participation keys,
-// as a map from database filename to Participation key object.
-func (c *Client) ListParticipationKeyFiles() (partKeyFiles map[string]account.Participation, err error) {
- genID, err := c.GenesisID()
- if err != nil {
- return
- }
-
- // Get a list of files in the participation keys directory
- keyDir := filepath.Join(c.DataDir(), genID)
- files, err := ioutil.ReadDir(keyDir)
- if err != nil {
- return
- }
-
- partKeyFiles = make(map[string]account.Participation)
- for _, file := range files {
- // If it can't be a participation key database, skip it
- if !config.IsPartKeyFilename(file.Name()) {
- continue
- }
-
- filename := file.Name()
-
- // Fetch a handle to this database
- handle, err := db.MakeErasableAccessor(filepath.Join(keyDir, filename))
- if err != nil {
- // Couldn't open it, skip it
- continue
- }
-
- // Fetch an account.Participation from the database
- part, err := account.RestoreParticipation(handle)
- if err != nil {
- // Couldn't read it, skip it
- handle.Close()
- continue
- }
-
- partKeyFiles[filename] = part.Participation
- part.Close()
- }
-
- return
-}
diff --git a/libgoal/transactions.go b/libgoal/transactions.go
index bf704cc9e..a03a9d551 100644
--- a/libgoal/transactions.go
+++ b/libgoal/transactions.go
@@ -20,7 +20,10 @@ import (
"errors"
"fmt"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
@@ -191,8 +194,98 @@ func (c *Client) SignAndBroadcastTransaction(walletHandle, pw []byte, utx transa
return c.BroadcastTransaction(stx)
}
+// generateRegistrationTransaction returns a transaction object for registering a Participation with its parent this is
+// similar to account.Participation.GenerateRegistrationTransaction.
+func generateRegistrationTransaction(part generated.ParticipationKey, fee basics.MicroAlgos, txnFirstValid, txnLastValid basics.Round, leaseBytes [32]byte) (transactions.Transaction, error) {
+ addr, err := basics.UnmarshalChecksumAddress(part.Address)
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
+
+ if len(part.Key.VoteParticipationKey) != 32 {
+ return transactions.Transaction{}, fmt.Errorf("voting key is the wrong size, should be 32 but it is %d", len(part.Key.VoteParticipationKey))
+ }
+
+ var votePk [32]byte
+ copy(votePk[:], part.Key.VoteParticipationKey[:])
+
+ if len(part.Key.SelectionParticipationKey) != 32 {
+ return transactions.Transaction{}, fmt.Errorf("selection key is the wrong size, should be 32 but it is %d", len(part.Key.VoteParticipationKey))
+ }
+
+ var selectionPk [32]byte
+ copy(selectionPk[:], part.Key.SelectionParticipationKey[:])
+
+ if part.Key.StateProofKey == nil {
+ return transactions.Transaction{}, fmt.Errorf("state proof key pointer is nil")
+ }
+
+ if len(*part.Key.StateProofKey) != len(merklesignature.Verifier{}) {
+ return transactions.Transaction{}, fmt.Errorf("state proof key is the wrong size, should be %d but it is %d", len(merklesignature.Verifier{}), len(*part.Key.StateProofKey))
+ }
+
+ var stateProofPk merklesignature.Verifier
+ copy(stateProofPk[:], (*part.Key.StateProofKey)[:])
+
+ t := transactions.Transaction{
+ Type: protocol.KeyRegistrationTx,
+ Header: transactions.Header{
+ Sender: addr,
+ Fee: fee,
+ FirstValid: txnFirstValid,
+ LastValid: txnLastValid,
+ Lease: leaseBytes,
+ },
+ KeyregTxnFields: transactions.KeyregTxnFields{
+ VotePK: votePk,
+ SelectionPK: selectionPk,
+ StateProofPK: stateProofPk,
+ },
+ }
+ t.KeyregTxnFields.VoteFirst = basics.Round(part.Key.VoteFirstValid)
+ t.KeyregTxnFields.VoteLast = basics.Round(part.Key.VoteLastValid)
+ t.KeyregTxnFields.VoteKeyDilution = part.Key.VoteKeyDilution
+
+ return t, nil
+}
+
+// MakeRegistrationTransactionWithGenesisID Generates a Registration transaction with the genesis ID set from the suggested parameters of the client
+func (c *Client) MakeRegistrationTransactionWithGenesisID(part account.Participation, fee, txnFirstValid, txnLastValid uint64, leaseBytes [32]byte, includeStateProofKeys bool) (transactions.Transaction, error) {
+
+ // Get current round, protocol, genesis ID
+ params, err := c.SuggestedParams()
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
+
+ cparams, ok := c.consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
+ if !ok {
+ return transactions.Transaction{}, errors.New("unknown consensus version")
+ }
+
+ txnFirstValid, txnLastValid, err = computeValidityRounds(txnFirstValid, txnLastValid, 0, params.LastRound, cparams.MaxTxnLife)
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
+
+ goOnlineTx := part.GenerateRegistrationTransaction(
+ basics.MicroAlgos{Raw: fee},
+ basics.Round(txnFirstValid),
+ basics.Round(txnLastValid),
+ leaseBytes, includeStateProofKeys)
+
+ goOnlineTx.Header.GenesisID = params.GenesisID
+
+ // Check if the protocol supports genesis hash
+ if config.Consensus[protocol.ConsensusFuture].SupportGenesisHash {
+ copy(goOnlineTx.Header.GenesisHash[:], params.GenesisHash)
+ }
+
+ return goOnlineTx, nil
+}
+
// MakeUnsignedGoOnlineTx creates a transaction that will bring an address online using available participation keys
-func (c *Client) MakeUnsignedGoOnlineTx(address string, part *account.Participation, firstValid, lastValid, fee uint64, leaseBytes [32]byte) (transactions.Transaction, error) {
+func (c *Client) MakeUnsignedGoOnlineTx(address string, firstValid, lastValid, fee uint64, leaseBytes [32]byte) (transactions.Transaction, error) {
// Parse the address
parsedAddr, err := basics.UnmarshalChecksumAddress(address)
if err != nil {
@@ -217,19 +310,19 @@ func (c *Client) MakeUnsignedGoOnlineTx(address string, part *account.Participat
// Choose which participation keys to go online with;
// need to do this after filling in the round number.
- if part == nil {
- bestPart, err := c.chooseParticipation(parsedAddr, basics.Round(firstValid))
- if err != nil {
- return transactions.Transaction{}, err
- }
- part = &bestPart
+ part, err := c.chooseParticipation(parsedAddr, basics.Round(firstValid))
+ if err != nil {
+ return transactions.Transaction{}, err
}
parsedFrstValid := basics.Round(firstValid)
parsedLastValid := basics.Round(lastValid)
parsedFee := basics.MicroAlgos{Raw: fee}
- goOnlineTransaction := part.GenerateRegistrationTransaction(parsedFee, parsedFrstValid, parsedLastValid, leaseBytes, cparams.EnableStateProofKeyregCheck)
+ goOnlineTransaction, err := generateRegistrationTransaction(part, parsedFee, parsedFrstValid, parsedLastValid, leaseBytes)
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
if cparams.SupportGenesisHash {
var genHash crypto.Digest
copy(genHash[:], params.GenesisHash)
diff --git a/logging/log.go b/logging/log.go
index 527d6decb..d0384d0a8 100644
--- a/logging/log.go
+++ b/logging/log.go
@@ -157,7 +157,7 @@ type Logger interface {
EventWithDetails(category telemetryspec.Category, identifier telemetryspec.Event, details interface{})
StartOperation(category telemetryspec.Category, identifier telemetryspec.Operation) TelemetryOperation
GetTelemetrySession() string
- GetTelemetryHostName() string
+ GetTelemetryGUID() string
GetInstanceName() string
GetTelemetryURI() string
CloseTelemetry()
@@ -401,11 +401,11 @@ func (l logger) GetTelemetryVersion() string {
return l.loggerState.telemetry.telemetryConfig.Version
}
-func (l logger) GetTelemetryHostName() string {
+func (l logger) GetTelemetryGUID() string {
if !l.GetTelemetryEnabled() {
return ""
}
- return l.loggerState.telemetry.telemetryConfig.getHostName()
+ return l.loggerState.telemetry.telemetryConfig.getHostGUID()
}
func (l logger) GetInstanceName() string {
diff --git a/logging/telemetryConfig.go b/logging/telemetryConfig.go
index 0ef98e450..452202f91 100644
--- a/logging/telemetryConfig.go
+++ b/logging/telemetryConfig.go
@@ -105,13 +105,13 @@ func (cfg TelemetryConfig) Save(configPath string) error {
return err
}
-// getHostName returns the HostName for telemetry (GUID:Name -- :Name is optional if blank)
-func (cfg TelemetryConfig) getHostName() string {
- hostName := cfg.GUID
+// getHostGUID returns the Host GUID for telemetry (GUID:Name -- :Name is optional if blank)
+func (cfg TelemetryConfig) getHostGUID() string {
+ ret := cfg.GUID
if cfg.Enable && len(cfg.Name) > 0 {
- hostName += ":" + cfg.Name
+ ret += ":" + cfg.Name
}
- return hostName
+ return ret
}
// getInstanceName allows us to distinguish between multiple instances running on the same node.
diff --git a/logging/telemetryhook.go b/logging/telemetryhook.go
index 1a8c29729..b74d8a447 100644
--- a/logging/telemetryhook.go
+++ b/logging/telemetryhook.go
@@ -242,7 +242,7 @@ func createElasticHook(cfg TelemetryConfig) (hook logrus.Hook, err error) {
err = fmt.Errorf("Unable to create new elastic client on '%s' using '%s:%s' : %w", cfg.URI, cfg.UserName, cfg.Password, err)
return nil, err
}
- hostName := cfg.getHostName()
+ hostName := cfg.getHostGUID()
hook, err = elogrus.NewElasticHook(client, hostName, cfg.MinLogLevel, cfg.ChainID)
if err != nil {
diff --git a/logging/telemetryspec/event.go b/logging/telemetryspec/event.go
index dcd3d231c..81d228324 100644
--- a/logging/telemetryspec/event.go
+++ b/logging/telemetryspec/event.go
@@ -191,10 +191,10 @@ const ConnectPeerEvent Event = "ConnectPeer"
// PeerEventDetails contains details for the ConnectPeerEvent
type PeerEventDetails struct {
- Address string
- HostName string
- Incoming bool
- InstanceName string
+ Address string
+ TelemetryGUID string `json:"HostName"`
+ Incoming bool
+ InstanceName string
// Endpoint is the dialed-to address, for an outgoing connection. Not being used for incoming connection.
Endpoint string `json:",omitempty"`
// MessageDelay is the avarage relative message delay. Not being used for incoming connection.
@@ -206,11 +206,11 @@ const ConnectPeerFailEvent Event = "ConnectPeerFail"
// ConnectPeerFailEventDetails contains details for the ConnectPeerFailEvent
type ConnectPeerFailEventDetails struct {
- Address string
- HostName string
- Incoming bool
- InstanceName string
- Reason string
+ Address string
+ TelemetryGUID string `json:"HostName"`
+ Incoming bool
+ InstanceName string
+ Reason string
}
// DisconnectPeerEvent event
@@ -282,8 +282,8 @@ type PeersConnectionDetails struct {
type PeerConnectionDetails struct {
// Address is the IP address of the remote connected socket
Address string
- // The HostName is the TelemetryGUID passed via the X-Algorand-TelId header during the http connection handshake.
- HostName string
+ // The TelemetryGUID is the TelemetryGUID passed via the X-Algorand-TelId header during the http connection handshake.
+ TelemetryGUID string `json:"HostName"`
// InstanceName is the node-specific hashed instance name that was passed via X-Algorand-InstanceName header during the http connection handshake.
InstanceName string
// ConnectionDuration is the duration of the connection, in seconds.
diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go
index dc485879e..293fbaa86 100644
--- a/netdeploy/networkTemplate.go
+++ b/netdeploy/networkTemplate.go
@@ -246,6 +246,7 @@ func createConfigFile(node remote.NodeConfigGoal, configFile string, numNodes in
cfg.EndpointAddress = "127.0.0.1:0"
cfg.DNSBootstrapID = ""
cfg.EnableProfiler = true
+ cfg.EnableRuntimeMetrics = true
if relaysCount == 0 {
cfg.DisableNetworking = true
}
diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go
index dcad2d390..3382edee0 100644
--- a/netdeploy/remote/deployedNetwork.go
+++ b/netdeploy/remote/deployedNetwork.go
@@ -394,7 +394,7 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g
roundTxnCnt: fileCfgs.RoundTransactionsCount,
round: basics.Round(0),
genesisID: genesis.ID(),
- genesisHash: crypto.HashObj(genesis),
+ genesisHash: genesis.Hash(),
poolAddr: poolAddr,
sinkAddr: sinkAddr,
}
diff --git a/netdeploy/remote/nodecfg/nodeConfigurator.go b/netdeploy/remote/nodecfg/nodeConfigurator.go
index 7c4bb4217..2d286f7de 100644
--- a/netdeploy/remote/nodecfg/nodeConfigurator.go
+++ b/netdeploy/remote/nodecfg/nodeConfigurator.go
@@ -176,12 +176,12 @@ func (nc *nodeConfigurator) prepareNodeDirs(configs []remote.NodeConfig, rootCon
}
func (nc *nodeConfigurator) registerDNSRecords() (err error) {
- cfZoneID, cfEmail, cfKey, err := getClouldflareCredentials()
+ cfZoneID, cfToken, err := getClouldflareCredentials()
if err != nil {
return fmt.Errorf("error getting DNS credentials: %v", err)
}
- cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfEmail, cfKey)
+ cloudflareDNS := cloudflare.NewDNS(cfZoneID, cfToken)
const priority = 1
const weight = 1
@@ -234,11 +234,10 @@ func (nc *nodeConfigurator) registerDNSRecords() (err error) {
return
}
-func getClouldflareCredentials() (zoneID string, email string, authKey string, err error) {
+func getClouldflareCredentials() (zoneID string, token string, err error) {
zoneID = os.Getenv("CLOUDFLARE_ZONE_ID")
- email = os.Getenv("CLOUDFLARE_EMAIL")
- authKey = os.Getenv("CLOUDFLARE_AUTH_KEY")
- if zoneID == "" || email == "" || authKey == "" {
+ token = os.Getenv("CLOUDFLARE_API_TOKEN")
+ if zoneID == "" || token == "" {
err = fmt.Errorf("one or more credentials missing from ENV")
}
return
diff --git a/network/requestTracker.go b/network/requestTracker.go
index 13cb2f205..fd78dadca 100644
--- a/network/requestTracker.go
+++ b/network/requestTracker.go
@@ -482,11 +482,11 @@ func (rt *RequestTracker) ServeHTTP(response http.ResponseWriter, request *http.
rt.log.With("connection", "http").With("count", originConnections).Debugf("Rejected connection due to excessive connections attempt rate")
rt.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent,
telemetryspec.ConnectPeerFailEventDetails{
- Address: trackedRequest.remoteHost,
- HostName: trackedRequest.otherTelemetryGUID,
- Incoming: true,
- InstanceName: trackedRequest.otherInstanceName,
- Reason: "Remote IP Connection Rate Limit",
+ Address: trackedRequest.remoteHost,
+ TelemetryGUID: trackedRequest.otherTelemetryGUID,
+ Incoming: true,
+ InstanceName: trackedRequest.otherInstanceName,
+ Reason: "Remote IP Connection Rate Limit",
})
response.Header().Add(TooManyRequestsRetryAfterHeader, fmt.Sprintf("%d", rt.config.ConnectionsRateLimitingWindowSeconds))
response.WriteHeader(http.StatusTooManyRequests)
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 59de32eef..eefd3b032 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -301,6 +301,19 @@ func Propagate(msg IncomingMessage) OutgoingMessage {
// Contains {genesisID} param to be handled by gorilla/mux
const GossipNetworkPath = "/v1/{genesisID}/gossip"
+// NodeInfo helps the network get information about the node it is running on
+type NodeInfo interface {
+ // IsParticipating returns true if this node has stake and may vote on blocks or propose blocks.
+ IsParticipating() bool
+}
+
+type nopeNodeInfo struct {
+}
+
+func (nnni *nopeNodeInfo) IsParticipating() bool {
+ return false
+}
+
// WebsocketNetwork implements GossipNode
type WebsocketNetwork struct {
listener net.Listener
@@ -397,21 +410,34 @@ type WebsocketNetwork struct {
// to be sent to new peers. This is filled in at network start,
// at which point messagesOfInterestEncoded is set to prevent
// further changes.
- messagesOfInterestEnc []byte
- messagesOfInterestEncoded bool
+ messagesOfInterestEnc []byte
+ messagesOfInterestEncoded bool
+ messagesOfInterestGeneration uint32
// messagesOfInterestMu protects messagesOfInterest and ensures
// that messagesOfInterestEnc does not change once it is set during
// network start.
- messagesOfInterestMu deadlock.Mutex
+ messagesOfInterestMu deadlock.Mutex
+ messagesOfInterestRefresh chan struct{}
// peersConnectivityCheckTicker is the timer for testing that all the connected peers
// are still transmitting or receiving information. The channel produced by this ticker
// is consumed by any of the messageHandlerThread(s). The ticker itself is created during
// Start(), and being shut down when Stop() is called.
peersConnectivityCheckTicker *time.Ticker
+
+ nodeInfo NodeInfo
+
+ // atomic {0:unknown, 1:yes, 2:no}
+ wantTXGossip uint32
}
+const (
+ wantTXGossipUnk = 0
+ wantTXGossipYes = 1
+ wantTXGossipNo = 2
+)
+
type broadcastRequest struct {
tags []Tag
data [][]byte
@@ -661,6 +687,9 @@ func (wn *WebsocketNetwork) setup() {
if wn.config.DNSSecurityRelayAddrEnforced() {
preferredResolver = dnssec.MakeDefaultDnssecResolver(wn.config.FallbackDNSResolverAddress, wn.log)
}
+ if wn.nodeInfo == nil {
+ wn.nodeInfo = &nopeNodeInfo{}
+ }
maxIdleConnsPerHost := int(wn.config.ConnectionsRateLimitingCount)
wn.dialer = makeRateLimitingDialer(wn.phonebook, preferredResolver)
wn.transport = makeRateLimitingTransport(wn.phonebook, 10*time.Second, &wn.dialer, maxIdleConnsPerHost)
@@ -684,6 +713,9 @@ func (wn *WebsocketNetwork) setup() {
wn.server.MaxHeaderBytes = httpServerMaxHeaderBytes
wn.ctx, wn.ctxCancel = context.WithCancel(context.Background())
wn.relayMessages = wn.config.NetAddress != "" || wn.config.ForceRelayMessages
+ if wn.relayMessages || wn.config.ForceFetchTransactions {
+ wn.wantTXGossip = wantTXGossipYes
+ }
// roughly estimate the number of messages that could be seen at any given moment.
// For the late/redo/down committee, which happen in parallel, we need to allocate
// extra space there.
@@ -732,6 +764,8 @@ func (wn *WebsocketNetwork) setup() {
SupportedProtocolVersions = []string{wn.config.NetworkProtocolVersion}
}
+ wn.messagesOfInterestRefresh = make(chan struct{}, 2)
+ wn.messagesOfInterestGeneration = 1 // something nonzero so that any new wsPeer needs updating
if wn.relayMessages {
wn.RegisterMessageInterest(protocol.CompactCertSigTag)
}
@@ -798,6 +832,9 @@ func (wn *WebsocketNetwork) Start() {
wn.wg.Add(1)
go wn.prioWeightRefresh()
}
+
+ go wn.postMessagesOfInterestThread()
+
wn.log.Infof("serving genesisID=%s on %#v with RandomID=%s", wn.GenesisID, wn.PublicAddress(), wn.RandomID)
}
@@ -880,7 +917,7 @@ func (wn *WebsocketNetwork) ClearHandlers() {
}
func (wn *WebsocketNetwork) setHeaders(header http.Header) {
- localTelemetryGUID := wn.log.GetTelemetryHostName()
+ localTelemetryGUID := wn.log.GetTelemetryGUID()
localInstanceName := wn.log.GetInstanceName()
header.Set(TelemetryIDHeader, localTelemetryGUID)
header.Set(InstanceNameHeader, localInstanceName)
@@ -933,11 +970,11 @@ func (wn *WebsocketNetwork) checkIncomingConnectionLimits(response http.Response
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_limit"})
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent,
telemetryspec.ConnectPeerFailEventDetails{
- Address: remoteHost,
- HostName: otherTelemetryGUID,
- Incoming: true,
- InstanceName: otherInstanceName,
- Reason: "Connection Limit",
+ Address: remoteHost,
+ TelemetryGUID: otherTelemetryGUID,
+ Incoming: true,
+ InstanceName: otherInstanceName,
+ Reason: "Connection Limit",
})
response.WriteHeader(http.StatusServiceUnavailable)
return http.StatusServiceUnavailable
@@ -948,11 +985,11 @@ func (wn *WebsocketNetwork) checkIncomingConnectionLimits(response http.Response
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_per_ip_limit"})
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent,
telemetryspec.ConnectPeerFailEventDetails{
- Address: remoteHost,
- HostName: otherTelemetryGUID,
- Incoming: true,
- InstanceName: otherInstanceName,
- Reason: "Remote IP Connection Limit",
+ Address: remoteHost,
+ TelemetryGUID: otherTelemetryGUID,
+ Incoming: true,
+ InstanceName: otherInstanceName,
+ Reason: "Remote IP Connection Limit",
})
response.WriteHeader(http.StatusServiceUnavailable)
return http.StatusServiceUnavailable
@@ -1117,24 +1154,35 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt
wn.log.With("event", "ConnectedIn").With("remote", trackedRequest.otherPublicAddr).With("local", localAddr).Infof("Accepted incoming connection from peer %s", trackedRequest.otherPublicAddr)
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerEvent,
telemetryspec.PeerEventDetails{
- Address: trackedRequest.remoteHost,
- HostName: trackedRequest.otherTelemetryGUID,
- Incoming: true,
- InstanceName: trackedRequest.otherInstanceName,
+ Address: trackedRequest.remoteHost,
+ TelemetryGUID: trackedRequest.otherTelemetryGUID,
+ Incoming: true,
+ InstanceName: trackedRequest.otherInstanceName,
})
- // We are careful to encode this prior to starting the server to avoid needing 'messagesOfInterestMu' here.
- if wn.messagesOfInterestEnc != nil {
- err = peer.Unicast(wn.ctx, wn.messagesOfInterestEnc, protocol.MsgOfInterestTag)
- if err != nil {
- wn.log.Infof("ws send msgOfInterest: %v", err)
- }
- }
+ wn.maybeSendMessagesOfInterest(peer, nil)
peers.Set(float64(wn.NumPeers()), nil)
incomingPeers.Set(float64(wn.numIncomingPeers()), nil)
}
+func (wn *WebsocketNetwork) maybeSendMessagesOfInterest(peer *wsPeer, messagesOfInterestEnc []byte) {
+ messagesOfInterestGeneration := atomic.LoadUint32(&wn.messagesOfInterestGeneration)
+ peerMessagesOfInterestGeneration := atomic.LoadUint32(&peer.messagesOfInterestGeneration)
+ if peerMessagesOfInterestGeneration != messagesOfInterestGeneration {
+ if messagesOfInterestEnc == nil {
+ wn.messagesOfInterestMu.Lock()
+ messagesOfInterestEnc = wn.messagesOfInterestEnc
+ wn.messagesOfInterestMu.Unlock()
+ }
+ if messagesOfInterestEnc != nil {
+ peer.sendMessagesOfInterest(messagesOfInterestGeneration, messagesOfInterestEnc)
+ } else {
+ wn.log.Infof("msgOfInterest Enc=nil, MOIGen=%d", messagesOfInterestGeneration)
+ }
+ }
+}
+
func (wn *WebsocketNetwork) messageHandlerThread(peersConnectivityCheckCh <-chan time.Time) {
defer wn.wg.Done()
@@ -1677,6 +1725,13 @@ func (wn *WebsocketNetwork) OnNetworkAdvance() {
wn.lastNetworkAdvanceMu.Lock()
defer wn.lastNetworkAdvanceMu.Unlock()
wn.lastNetworkAdvance = time.Now().UTC()
+ if wn.nodeInfo != nil && !wn.relayMessages && !wn.config.ForceFetchTransactions {
+ select {
+ case wn.messagesOfInterestRefresh <- struct{}{}:
+ default:
+ // if the notify chan is full, it will get around to updating the latest when it actually runs
+ }
+ }
}
// sendPeerConnectionsTelemetryStatus sends a snapshot of the currently connected peers
@@ -1695,7 +1750,7 @@ func (wn *WebsocketNetwork) sendPeerConnectionsTelemetryStatus() {
for _, peer := range peers {
connDetail := telemetryspec.PeerConnectionDetails{
ConnectionDuration: uint(now.Sub(peer.createTime).Seconds()),
- HostName: peer.TelemetryGUID,
+ TelemetryGUID: peer.TelemetryGUID,
InstanceName: peer.InstanceName,
}
if peer.outgoing {
@@ -2039,13 +2094,15 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
wn.log.With("event", "ConnectedOut").With("remote", addr).With("local", localAddr).Infof("Made outgoing connection to peer %v", addr)
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerEvent,
telemetryspec.PeerEventDetails{
- Address: justHost(conn.RemoteAddr().String()),
- HostName: peer.TelemetryGUID,
- Incoming: false,
- InstanceName: peer.InstanceName,
- Endpoint: peer.GetAddress(),
+ Address: justHost(conn.RemoteAddr().String()),
+ TelemetryGUID: peer.TelemetryGUID,
+ Incoming: false,
+ InstanceName: peer.InstanceName,
+ Endpoint: peer.GetAddress(),
})
+ wn.maybeSendMessagesOfInterest(peer, nil)
+
peers.Set(float64(wn.NumPeers()), nil)
outgoingPeers.Set(float64(wn.numOutgoingPeers()), nil)
@@ -2085,7 +2142,7 @@ func (wn *WebsocketNetwork) SetPeerData(peer Peer, key string, value interface{}
}
// NewWebsocketNetwork constructor for websockets based gossip network
-func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID) (wn *WebsocketNetwork, err error) {
+func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, nodeInfo NodeInfo) (wn *WebsocketNetwork, err error) {
phonebook := MakePhonebook(config.ConnectionsRateLimitingCount,
time.Duration(config.ConnectionsRateLimitingWindowSeconds)*time.Second)
phonebook.ReplacePeerList(phonebookAddresses, config.DNSBootstrapID, PhoneBookEntryRelayRole)
@@ -2095,6 +2152,7 @@ func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddre
phonebook: phonebook,
GenesisID: genesisID,
NetworkID: networkID,
+ nodeInfo: nodeInfo,
}
wn.setup()
@@ -2103,7 +2161,7 @@ func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddre
// NewWebsocketGossipNode constructs a websocket network node and returns it as a GossipNode interface implementation
func NewWebsocketGossipNode(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID) (gn GossipNode, err error) {
- return NewWebsocketNetwork(log, config, phonebookAddresses, genesisID, networkID)
+ return NewWebsocketNetwork(log, config, phonebookAddresses, genesisID, networkID, nil)
}
// SetPrioScheme specifies the network priority scheme for a network node
@@ -2144,10 +2202,10 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) {
}
}
eventDetails := telemetryspec.PeerEventDetails{
- Address: peerAddr,
- HostName: peer.TelemetryGUID,
- Incoming: !peer.outgoing,
- InstanceName: peer.InstanceName,
+ Address: peerAddr,
+ TelemetryGUID: peer.TelemetryGUID,
+ Incoming: !peer.outgoing,
+ InstanceName: peer.InstanceName,
}
if peer.outgoing {
eventDetails.Endpoint = peer.GetAddress()
@@ -2254,10 +2312,23 @@ func (wn *WebsocketNetwork) RegisterMessageInterest(t protocol.Tag) error {
wn.messagesOfInterestMu.Lock()
defer wn.messagesOfInterestMu.Unlock()
- if wn.messagesOfInterestEncoded {
- return fmt.Errorf("network already started")
+ if wn.messagesOfInterest == nil {
+ wn.messagesOfInterest = make(map[protocol.Tag]bool)
+ for tag, flag := range defaultSendMessageTags {
+ wn.messagesOfInterest[tag] = flag
+ }
}
+ wn.messagesOfInterest[t] = true
+ wn.updateMessagesOfInterestEnc()
+ return nil
+}
+
+// DeregisterMessageInterest will tell peers to no longer send us traffic with a protocol Tag
+func (wn *WebsocketNetwork) DeregisterMessageInterest(t protocol.Tag) error {
+ wn.messagesOfInterestMu.Lock()
+ defer wn.messagesOfInterestMu.Unlock()
+
if wn.messagesOfInterest == nil {
wn.messagesOfInterest = make(map[protocol.Tag]bool)
for tag, flag := range defaultSendMessageTags {
@@ -2265,10 +2336,38 @@ func (wn *WebsocketNetwork) RegisterMessageInterest(t protocol.Tag) error {
}
}
- wn.messagesOfInterest[t] = true
+ delete(wn.messagesOfInterest, t)
+ wn.updateMessagesOfInterestEnc()
return nil
}
+func (wn *WebsocketNetwork) updateMessagesOfInterestEnc() {
+ // must run inside wn.messagesOfInterestMu.Lock
+ wn.messagesOfInterestEnc = MarshallMessageOfInterestMap(wn.messagesOfInterest)
+ wn.messagesOfInterestEncoded = true
+ atomic.AddUint32(&wn.messagesOfInterestGeneration, 1)
+ var peers []*wsPeer
+ peers, _ = wn.peerSnapshot(peers)
+ for _, peer := range peers {
+ wn.maybeSendMessagesOfInterest(peer, wn.messagesOfInterestEnc)
+ }
+}
+
+func (wn *WebsocketNetwork) postMessagesOfInterestThread() {
+ for {
+ <-wn.messagesOfInterestRefresh
+ // if we're not a relay, and not participating, we don't need txn pool
+ wantTXGossip := wn.nodeInfo.IsParticipating()
+ if wantTXGossip && (wn.wantTXGossip != wantTXGossipYes) {
+ wn.RegisterMessageInterest(protocol.TxnTag)
+ atomic.StoreUint32(&wn.wantTXGossip, wantTXGossipYes)
+ } else if !wantTXGossip && (wn.wantTXGossip != wantTXGossipNo) {
+ wn.DeregisterMessageInterest(protocol.TxnTag)
+ atomic.StoreUint32(&wn.wantTXGossip, wantTXGossipNo)
+ }
+ }
+}
+
// SubstituteGenesisID substitutes the "{genesisID}" with their network-specific genesisID.
func (wn *WebsocketNetwork) SubstituteGenesisID(rawURL string) string {
return strings.Replace(rawURL, "{genesisID}", wn.GenesisID, -1)
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index f7e5db2da..424586f01 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -219,6 +219,13 @@ func waitReady(t testing.TB, wn *WebsocketNetwork, timeout <-chan time.Time) boo
}
}
+func netStop(t testing.TB, wn *WebsocketNetwork, name string) {
+ t.Logf("stopping %s", name)
+ wn.Stop()
+ time.Sleep(time.Millisecond) // Stop is imperfect and some worker threads can log an error after Stop and that causes a testing error
+ t.Logf("%s done", name)
+}
+
// Set up two nodes, test that a.Broadcast is received by B
func TestWebsocketNetworkBasic(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -226,7 +233,7 @@ func TestWebsocketNetworkBasic(t *testing.T) {
netA := makeTestWebsocketNode(t)
netA.config.GossipFanout = 1
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
netB := makeTestWebsocketNode(t)
netB.config.GossipFanout = 1
addrA, postListen := netA.Address()
@@ -234,7 +241,7 @@ func TestWebsocketNetworkBasic(t *testing.T) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
counter := newMessageCounter(t, 2)
counterDone := counter.done
netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
@@ -262,7 +269,7 @@ func TestWebsocketNetworkUnicast(t *testing.T) {
netA := makeTestWebsocketNode(t)
netA.config.GossipFanout = 1
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
netB := makeTestWebsocketNode(t)
netB.config.GossipFanout = 1
addrA, postListen := netA.Address()
@@ -270,7 +277,7 @@ func TestWebsocketNetworkUnicast(t *testing.T) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
counter := newMessageCounter(t, 2)
counterDone := counter.done
netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
@@ -303,7 +310,7 @@ func TestWebsocketPeerData(t *testing.T) {
netA := makeTestWebsocketNode(t)
netA.config.GossipFanout = 1
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
netB := makeTestWebsocketNode(t)
netB.config.GossipFanout = 1
addrA, postListen := netA.Address()
@@ -311,7 +318,7 @@ func TestWebsocketPeerData(t *testing.T) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
counter := newMessageCounter(t, 2)
netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
@@ -341,7 +348,7 @@ func TestWebsocketNetworkArray(t *testing.T) {
netA := makeTestWebsocketNode(t)
netA.config.GossipFanout = 1
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
netB := makeTestWebsocketNode(t)
netB.config.GossipFanout = 1
addrA, postListen := netA.Address()
@@ -349,7 +356,7 @@ func TestWebsocketNetworkArray(t *testing.T) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
counter := newMessageCounter(t, 3)
counterDone := counter.done
netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
@@ -378,7 +385,7 @@ func TestWebsocketNetworkCancel(t *testing.T) {
netA := makeTestWebsocketNode(t)
netA.config.GossipFanout = 1
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
netB := makeTestWebsocketNode(t)
netB.config.GossipFanout = 1
addrA, postListen := netA.Address()
@@ -386,7 +393,7 @@ func TestWebsocketNetworkCancel(t *testing.T) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
counter := newMessageCounter(t, 100)
counterDone := counter.done
netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
@@ -461,7 +468,7 @@ func TestWebsocketNetworkNoAddress(t *testing.T) {
netA := makeTestWebsocketNode(t)
netA.config.GossipFanout = 1
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
noAddressConfig := defaultConfig
noAddressConfig.NetAddress = ""
@@ -472,7 +479,7 @@ func TestWebsocketNetworkNoAddress(t *testing.T) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
counter := newMessageCounter(t, 2)
counterDone := counter.done
netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
@@ -886,7 +893,7 @@ func TestDupFilter(t *testing.T) {
netA := makeTestFilterWebsocketNode(t, "a")
netA.config.GossipFanout = 1
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
netB := makeTestFilterWebsocketNode(t, "b")
netB.config.GossipFanout = 2
addrA, postListen := netA.Address()
@@ -894,7 +901,7 @@ func TestDupFilter(t *testing.T) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
counter := &messageCounterHandler{t: t, limit: 1, done: make(chan struct{})}
netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.AgreementVoteTag, MessageHandler: counter}})
debugTag2 := protocol.ProposalPayloadTag
@@ -1032,7 +1039,7 @@ func BenchmarkWebsocketNetworkBasic(t *testing.B) {
netA := makeTestWebsocketNode(t)
netA.config.GossipFanout = 1
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
netB := makeTestWebsocketNode(t)
netB.config.GossipFanout = 1
addrA, postListen := netA.Address()
@@ -1040,7 +1047,7 @@ func BenchmarkWebsocketNetworkBasic(t *testing.B) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
returns := make(chan uint64, 100)
bhandler := benchmarkHandler{returns}
netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: &bhandler}})
@@ -1109,7 +1116,7 @@ func TestWebsocketNetworkPrio(t *testing.T) {
netA.config.GossipFanout = 1
netA.prioResponseChan = make(chan *wsPeer, 10)
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
prioB := netPrioStub{}
crypto.RandBytes(prioB.addr[:])
@@ -1122,7 +1129,7 @@ func TestWebsocketNetworkPrio(t *testing.T) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
// Wait for response message to propagate from B to A
select {
@@ -1154,7 +1161,7 @@ func TestWebsocketNetworkPrioLimit(t *testing.T) {
netA.config.GossipFanout = 2
netA.prioResponseChan = make(chan *wsPeer, 10)
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
addrA, postListen := netA.Address()
require.True(t, postListen)
@@ -1170,7 +1177,7 @@ func TestWebsocketNetworkPrioLimit(t *testing.T) {
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counterB}})
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
counterC := newMessageCounter(t, 1)
counterCdone := counterC.done
@@ -1382,7 +1389,7 @@ func TestDelayedMessageDrop(t *testing.T) {
netA := makeTestWebsocketNode(t)
netA.config.GossipFanout = 1
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
noAddressConfig := defaultConfig
noAddressConfig.NetAddress = ""
@@ -1393,7 +1400,7 @@ func TestDelayedMessageDrop(t *testing.T) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
counter := newMessageCounter(t, 5)
counterDone := counter.done
netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
@@ -1435,7 +1442,7 @@ func TestSlowPeerDisconnection(t *testing.T) {
netA := wn
netA.config.GossipFanout = 1
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
noAddressConfig := defaultConfig
noAddressConfig.NetAddress = ""
@@ -1446,7 +1453,7 @@ func TestSlowPeerDisconnection(t *testing.T) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
readyTimeout := time.NewTimer(2 * time.Second)
waitReady(t, netA, readyTimeout.C)
@@ -1456,11 +1463,24 @@ func TestSlowPeerDisconnection(t *testing.T) {
peers, _ = netA.peerSnapshot(peers)
require.Equalf(t, len(peers), 1, "Expected number of peers should be 1")
peer := peers[0]
+ // On connection may send a MOI message, wait for it to go out
+ now := time.Now()
+ expire := now.Add(5 * time.Second)
+ for {
+ time.Sleep(time.Millisecond)
+ if len(peer.sendBufferHighPrio)+len(peer.sendBufferBulk) == 0 {
+ break
+ }
+ now = time.Now()
+ if now.After(expire) {
+ t.Errorf("wait for empty peer outbound queue expired")
+ }
+ }
// modify the peer on netA and
beforeLoopTime := time.Now()
atomic.StoreInt64(&peer.intermittentOutgoingMessageEnqueueTime, beforeLoopTime.Add(-maxMessageQueueDuration).Add(time.Second).UnixNano())
// wait up to 10 seconds for the monitor to figure out it needs to disconnect.
- expire := beforeLoopTime.Add(2 * slowWritingPeerMonitorInterval)
+ expire = beforeLoopTime.Add(2 * slowWritingPeerMonitorInterval)
for {
peers, _ = netA.peerSnapshot(peers)
if len(peers) == 0 || peers[0] != peer {
@@ -1495,7 +1515,7 @@ func TestForceMessageRelaying(t *testing.T) {
netA := wn
netA.config.GossipFanout = 1
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
counter := newMessageCounter(t, 5)
counterDone := counter.done
@@ -1510,7 +1530,7 @@ func TestForceMessageRelaying(t *testing.T) {
netB.config.GossipFanout = 1
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
noAddressConfig.ForceRelayMessages = true
netC := makeTestWebsocketNodeWithConfig(t, noAddressConfig)
@@ -1659,7 +1679,7 @@ func TestWebsocketNetworkTopicRoundtrip(t *testing.T) {
netA := makeTestWebsocketNode(t)
netA.config.GossipFanout = 1
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
netB := makeTestWebsocketNode(t)
netB.config.GossipFanout = 1
addrA, postListen := netA.Address()
@@ -1667,7 +1687,7 @@ func TestWebsocketNetworkTopicRoundtrip(t *testing.T) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
netB.RegisterHandlers([]TaggedMessageHandler{
{
@@ -1707,6 +1727,50 @@ func TestWebsocketNetworkTopicRoundtrip(t *testing.T) {
assert.Equal(t, 5, int(sum[0]))
}
+var (
+ ft1 = protocol.Tag("F1")
+ ft2 = protocol.Tag("F2")
+ ft3 = protocol.Tag("F3")
+ ft4 = protocol.Tag("F4")
+
+ testTags = []protocol.Tag{ft1, ft2, ft3, ft4}
+)
+
+func waitPeerInternalChanQuiet(t *testing.T, netA *WebsocketNetwork) {
+ // okay, but now we need to wait for asynchronous thread within netA to _apply_ the MOI to its peer for netB...
+ timeout := time.Now().Add(100 * time.Millisecond)
+ waiting := true
+ for waiting {
+ time.Sleep(1 * time.Millisecond)
+ peers := netA.GetPeers(PeersConnectedIn)
+ for _, pg := range peers {
+ wp := pg.(*wsPeer)
+ if len(wp.sendBufferHighPrio)+len(wp.sendBufferBulk) == 0 {
+ waiting = false
+ break
+ }
+ }
+ if time.Now().After(timeout) {
+ for _, pg := range peers {
+ wp := pg.(*wsPeer)
+ if len(wp.sendBufferHighPrio)+len(wp.sendBufferBulk) == 0 {
+ t.Fatalf("netA peer buff empty timeout len(high)=%d, len(bulk)=%d", len(wp.sendBufferHighPrio), len(wp.sendBufferBulk))
+ }
+ }
+ }
+ }
+}
+
+func waitForMOIRefreshQuiet(netB *WebsocketNetwork) {
+ for {
+ // wait for async messagesOfInterestRefresh
+ time.Sleep(time.Millisecond)
+ if len(netB.messagesOfInterestRefresh) == 0 {
+ break
+ }
+ }
+}
+
// Set up two nodes, have one of them request a certain message tag mask, and verify the other follow that.
func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1716,7 +1780,7 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
netA.config.EnablePingHandler = false
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
netB := makeTestWebsocketNode(t)
netB.config.GossipFanout = 1
netB.config.EnablePingHandler = false
@@ -1725,12 +1789,129 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
+
+ incomingMsgSync := deadlock.Mutex{}
+ msgCounters := make(map[protocol.Tag]int)
+ expectedCounts := make(map[protocol.Tag]int)
+ expectedCounts[ft1] = 5
+ expectedCounts[ft2] = 5
+ var failed uint32
+ messageArriveWg := sync.WaitGroup{}
+ msgHandler := func(msg IncomingMessage) (out OutgoingMessage) {
+ t.Logf("A->B %s", msg.Tag)
+ incomingMsgSync.Lock()
+ defer incomingMsgSync.Unlock()
+ expected := expectedCounts[msg.Tag]
+ if expected < 1 {
+ atomic.StoreUint32(&failed, 1)
+ t.Logf("UNEXPECTED A->B %s", msg.Tag)
+ return
+ }
+ msgCounters[msg.Tag] = msgCounters[msg.Tag] + 1
+ messageArriveWg.Done()
+ return
+ }
+ messageFilterArriveWg := sync.WaitGroup{}
+ messageFilterArriveWg.Add(1)
+ waitMessageArriveHandler := func(msg IncomingMessage) (out OutgoingMessage) {
+ messageFilterArriveWg.Done()
+ return
+ }
+
+ // register all the handlers.
+ taggedHandlers := []TaggedMessageHandler{}
+ for _, tag := range testTags {
+ taggedHandlers = append(taggedHandlers, TaggedMessageHandler{
+ Tag: tag,
+ MessageHandler: HandlerFunc(msgHandler),
+ })
+ }
+ netB.RegisterHandlers(taggedHandlers)
+ netA.RegisterHandlers([]TaggedMessageHandler{
+ {
+ Tag: protocol.VoteBundleTag,
+ MessageHandler: HandlerFunc(waitMessageArriveHandler),
+ }})
+
+ readyTimeout := time.NewTimer(2 * time.Second)
+ waitReady(t, netA, readyTimeout.C)
+ waitReady(t, netB, readyTimeout.C)
+
+ // have netB asking netA to send it only AgreementVoteTag and ProposalPayloadTag
+ require.NoError(t, netB.RegisterMessageInterest(ft1))
+ require.NoError(t, netB.RegisterMessageInterest(ft2))
+ // send another message which we can track, so that we'll know that the first message was delivered.
+ netB.Broadcast(context.Background(), protocol.VoteBundleTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ messageFilterArriveWg.Wait()
+ waitPeerInternalChanQuiet(t, netA)
+
+ messageArriveWg.Add(5 * 2) // we're expecting exactly 10 messages.
+ // send 5 messages of few types.
+ for i := 0; i < 5; i++ {
+ if atomic.LoadUint32(&failed) != 0 {
+ t.Errorf("failed")
+ break
+ }
+ netA.Broadcast(context.Background(), ft1, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), ft3, []byte{0, 1, 2, 3, 4}, true, nil) // NOT in MOI
+ netA.Broadcast(context.Background(), ft2, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), ft4, []byte{0, 1, 2, 3, 4}, true, nil) // NOT in MOI
+ }
+ if atomic.LoadUint32(&failed) != 0 {
+ t.Errorf("failed")
+ }
+ // wait until all the expected messages arrive.
+ messageArriveWg.Wait()
+ incomingMsgSync.Lock()
+ defer incomingMsgSync.Unlock()
+ require.Equal(t, 2, len(msgCounters))
+ for tag, count := range msgCounters {
+ if atomic.LoadUint32(&failed) != 0 {
+ t.Errorf("failed")
+ break
+ }
+ if tag == ft1 || tag == ft2 {
+ require.Equal(t, 5, count)
+ } else {
+ require.Equal(t, 0, count)
+ }
+ }
+}
+
+// Set up two nodes, have one of them work through TX gossip message-of-interest logic
+// test:
+// * wn.config.ForceFetchTransactions
+// * wn.config.ForceRelayMessages
+// * NodeInfo.IsParticipating() + WebsocketNetwork.OnNetworkAdvance()
+func TestWebsocketNetworkTXMessageOfInterestRelay(t *testing.T) {
+ // Tests that A->B follows MOI
+ partitiontest.PartitionTest(t)
+
+ netA := makeTestWebsocketNode(t)
+ netA.config.GossipFanout = 1
+ netA.config.EnablePingHandler = false
+
+ netA.Start()
+ defer netStop(t, netA, "A")
+ bConfig := defaultConfig
+ bConfig.NetAddress = ""
+ bConfig.ForceRelayMessages = true
+ netB := makeTestWebsocketNodeWithConfig(t, bConfig)
+ netB.config.GossipFanout = 1
+ netB.config.EnablePingHandler = false
+ addrA, postListen := netA.Address()
+ require.True(t, postListen)
+ t.Log(addrA)
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
+ netB.Start()
+ defer netStop(t, netB, "B")
incomingMsgSync := deadlock.Mutex{}
msgCounters := make(map[protocol.Tag]int)
messageArriveWg := sync.WaitGroup{}
msgHandler := func(msg IncomingMessage) (out OutgoingMessage) {
+ t.Logf("A->B %s", msg.Tag)
incomingMsgSync.Lock()
defer incomingMsgSync.Unlock()
msgCounters[msg.Tag] = msgCounters[msg.Tag] + 1
@@ -1763,13 +1944,13 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
waitReady(t, netA, readyTimeout.C)
waitReady(t, netB, readyTimeout.C)
- // have netB asking netA to send it only AgreementVoteTag and ProposalPayloadTag
- netB.Broadcast(context.Background(), protocol.MsgOfInterestTag, MarshallMessageOfInterest([]protocol.Tag{protocol.AgreementVoteTag, protocol.ProposalPayloadTag}), true, nil)
+ netB.OnNetworkAdvance()
+ waitForMOIRefreshQuiet(netB)
// send another message which we can track, so that we'll know that the first message was delivered.
netB.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte{0, 1, 2, 3, 4}, true, nil)
messageFilterArriveWg.Wait()
- messageArriveWg.Add(5 * 2) // we're expecting exactly 10 messages.
+ messageArriveWg.Add(5 * 4) // we're expecting exactly 20 messages.
// send 5 messages of few types.
for i := 0; i < 5; i++ {
netA.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte{0, 1, 2, 3, 4}, true, nil)
@@ -1779,13 +1960,296 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
}
// wait until all the expected messages arrive.
messageArriveWg.Wait()
+ incomingMsgSync.Lock()
+ require.Equal(t, 4, len(msgCounters))
+ for _, count := range msgCounters {
+ require.Equal(t, 5, count)
+ }
+ incomingMsgSync.Unlock()
+}
+
+func TestWebsocketNetworkTXMessageOfInterestForceTx(t *testing.T) {
+ // Tests that A->B follows MOI
+ partitiontest.PartitionTest(t)
+
+ netA := makeTestWebsocketNode(t)
+ netA.config.GossipFanout = 1
+ netA.config.EnablePingHandler = false
+
+ netA.Start()
+ defer netStop(t, netA, "A")
+ bConfig := defaultConfig
+ bConfig.NetAddress = ""
+ bConfig.ForceFetchTransactions = true
+ netB := makeTestWebsocketNodeWithConfig(t, bConfig)
+ netB.config.GossipFanout = 1
+ netB.config.EnablePingHandler = false
+ addrA, postListen := netA.Address()
+ require.True(t, postListen)
+ t.Log(addrA)
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
+ netB.Start()
+ defer netStop(t, netB, "B")
+
+ incomingMsgSync := deadlock.Mutex{}
+ msgCounters := make(map[protocol.Tag]int)
+ messageArriveWg := sync.WaitGroup{}
+ msgHandler := func(msg IncomingMessage) (out OutgoingMessage) {
+ t.Logf("A->B %s", msg.Tag)
+ incomingMsgSync.Lock()
+ defer incomingMsgSync.Unlock()
+ msgCounters[msg.Tag] = msgCounters[msg.Tag] + 1
+ messageArriveWg.Done()
+ return
+ }
+ messageFilterArriveWg := sync.WaitGroup{}
+ messageFilterArriveWg.Add(1)
+ waitMessageArriveHandler := func(msg IncomingMessage) (out OutgoingMessage) {
+ messageFilterArriveWg.Done()
+ return
+ }
+
+ // register all the handlers.
+ taggedHandlers := []TaggedMessageHandler{}
+ for tag := range defaultSendMessageTags {
+ taggedHandlers = append(taggedHandlers, TaggedMessageHandler{
+ Tag: tag,
+ MessageHandler: HandlerFunc(msgHandler),
+ })
+ }
+ netB.RegisterHandlers(taggedHandlers)
+ netA.RegisterHandlers([]TaggedMessageHandler{
+ {
+ Tag: protocol.AgreementVoteTag,
+ MessageHandler: HandlerFunc(waitMessageArriveHandler),
+ }})
+
+ readyTimeout := time.NewTimer(2 * time.Second)
+ waitReady(t, netA, readyTimeout.C)
+ waitReady(t, netB, readyTimeout.C)
+
+ netB.OnNetworkAdvance()
+ waitForMOIRefreshQuiet(netB)
+ // send another message which we can track, so that we'll know that the first message was delivered.
+ netB.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ messageFilterArriveWg.Wait()
+
+ messageArriveWg.Add(5 * 4) // we're expecting exactly 20 messages.
+ // send 5 messages of few types.
+ for i := 0; i < 5; i++ {
+ netA.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), protocol.ProposalPayloadTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), protocol.VoteBundleTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ }
+ // wait until all the expected messages arrive.
+ messageArriveWg.Wait()
+ incomingMsgSync.Lock()
+ require.Equal(t, 4, len(msgCounters))
+ for _, count := range msgCounters {
+ require.Equal(t, 5, count)
+ }
+ incomingMsgSync.Unlock()
+}
+func TestWebsocketNetworkTXMessageOfInterestNPN(t *testing.T) {
+ // Tests that A->B follows MOI
+ partitiontest.PartitionTest(t)
+
+ netA := makeTestWebsocketNode(t)
+ netA.config.GossipFanout = 1
+ netA.config.EnablePingHandler = false
+ netA.Start()
+ defer netStop(t, netA, "A")
+
+ bConfig := defaultConfig
+ bConfig.NetAddress = ""
+ netB := makeTestWebsocketNodeWithConfig(t, bConfig)
+ netB.config.GossipFanout = 1
+ netB.config.EnablePingHandler = false
+ addrA, postListen := netA.Address()
+ require.True(t, postListen)
+ t.Log(addrA)
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
+ netB.Start()
+ defer netStop(t, netB, "B")
+ require.False(t, netB.relayMessages)
+ require.Equal(t, uint32(wantTXGossipUnk), atomic.LoadUint32(&netB.wantTXGossip))
+
+ incomingMsgSync := deadlock.Mutex{}
+ msgCounters := make(map[protocol.Tag]int)
+ messageArriveWg := sync.WaitGroup{}
+ msgHandler := func(msg IncomingMessage) (out OutgoingMessage) {
+ t.Logf("A->B %s", msg.Tag)
+ incomingMsgSync.Lock()
+ defer incomingMsgSync.Unlock()
+ msgCounters[msg.Tag] = msgCounters[msg.Tag] + 1
+ messageArriveWg.Done()
+ return
+ }
+ messageFilterArriveWg := sync.WaitGroup{}
+ messageFilterArriveWg.Add(1)
+ waitMessageArriveHandler := func(msg IncomingMessage) (out OutgoingMessage) {
+ messageFilterArriveWg.Done()
+ return
+ }
+
+ // register all the handlers.
+ taggedHandlers := []TaggedMessageHandler{}
+ for tag := range defaultSendMessageTags {
+ taggedHandlers = append(taggedHandlers, TaggedMessageHandler{
+ Tag: tag,
+ MessageHandler: HandlerFunc(msgHandler),
+ })
+ }
+ netB.RegisterHandlers(taggedHandlers)
+ netA.RegisterHandlers([]TaggedMessageHandler{
+ {
+ Tag: protocol.AgreementVoteTag,
+ MessageHandler: HandlerFunc(waitMessageArriveHandler),
+ }})
+
+ readyTimeout := time.NewTimer(2 * time.Second)
+ waitReady(t, netA, readyTimeout.C)
+ waitReady(t, netB, readyTimeout.C)
+
+ netB.OnNetworkAdvance()
+ waitForMOIRefreshQuiet(netB)
+ for i := 0; i < 10; i++ {
+ if atomic.LoadUint32(&netB.wantTXGossip) == uint32(wantTXGossipNo) {
+ break
+ }
+ time.Sleep(time.Millisecond)
+ }
+ require.Equal(t, uint32(wantTXGossipNo), atomic.LoadUint32(&netB.wantTXGossip))
+ // send another message which we can track, so that we'll know that the first message was delivered.
+ netB.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ messageFilterArriveWg.Wait()
+ waitPeerInternalChanQuiet(t, netA)
+
+ messageArriveWg.Add(5 * 3) // we're expecting exactly 15 messages.
+ // send 5 messages of few types.
+ for i := 0; i < 5; i++ {
+ netA.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte{0, 1, 2, 3, 4}, true, nil) // THESE WILL BE DROPPED
+ netA.Broadcast(context.Background(), protocol.ProposalPayloadTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), protocol.VoteBundleTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ }
+ // wait until all the expected messages arrive.
+ messageArriveWg.Wait()
+ incomingMsgSync.Lock()
+ require.Equal(t, 3, len(msgCounters), msgCounters)
for tag, count := range msgCounters {
- if tag == protocol.AgreementVoteTag || tag == protocol.ProposalPayloadTag {
+ if tag == protocol.TxnTag {
+ require.Equal(t, 0, count)
+ } else {
+ require.Equal(t, 5, count)
+ }
+ }
+ incomingMsgSync.Unlock()
+}
+
+type participatingNodeInfo struct {
+}
+
+func (nnni *participatingNodeInfo) IsParticipating() bool {
+ return true
+}
+
+func TestWebsocketNetworkTXMessageOfInterestPN(t *testing.T) {
+ // Tests that A->B follows MOI
+ partitiontest.PartitionTest(t)
+
+ netA := makeTestWebsocketNode(t)
+ netA.config.GossipFanout = 1
+ netA.config.EnablePingHandler = false
+ netA.Start()
+ defer netStop(t, netA, "A")
+
+ bConfig := defaultConfig
+ bConfig.NetAddress = ""
+ netB := makeTestWebsocketNodeWithConfig(t, bConfig)
+ netB.nodeInfo = &participatingNodeInfo{}
+ netB.config.GossipFanout = 1
+ netB.config.EnablePingHandler = false
+ addrA, postListen := netA.Address()
+ require.True(t, postListen)
+ t.Log(addrA)
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
+ netB.Start()
+ defer netStop(t, netB, "B")
+ require.False(t, netB.relayMessages)
+ require.Equal(t, uint32(wantTXGossipUnk), atomic.LoadUint32(&netB.wantTXGossip))
+
+ incomingMsgSync := deadlock.Mutex{}
+ msgCounters := make(map[protocol.Tag]int)
+ messageArriveWg := sync.WaitGroup{}
+ msgHandler := func(msg IncomingMessage) (out OutgoingMessage) {
+ t.Logf("A->B %s", msg.Tag)
+ incomingMsgSync.Lock()
+ defer incomingMsgSync.Unlock()
+ msgCounters[msg.Tag] = msgCounters[msg.Tag] + 1
+ messageArriveWg.Done()
+ return
+ }
+ messageFilterArriveWg := sync.WaitGroup{}
+ messageFilterArriveWg.Add(1)
+ waitMessageArriveHandler := func(msg IncomingMessage) (out OutgoingMessage) {
+ messageFilterArriveWg.Done()
+ return
+ }
+
+ // register all the handlers.
+ taggedHandlers := []TaggedMessageHandler{}
+ for tag := range defaultSendMessageTags {
+ taggedHandlers = append(taggedHandlers, TaggedMessageHandler{
+ Tag: tag,
+ MessageHandler: HandlerFunc(msgHandler),
+ })
+ }
+ netB.RegisterHandlers(taggedHandlers)
+ netA.RegisterHandlers([]TaggedMessageHandler{
+ {
+ Tag: protocol.AgreementVoteTag,
+ MessageHandler: HandlerFunc(waitMessageArriveHandler),
+ }})
+
+ readyTimeout := time.NewTimer(2 * time.Second)
+ waitReady(t, netA, readyTimeout.C)
+ waitReady(t, netB, readyTimeout.C)
+
+ netB.OnNetworkAdvance()
+ waitForMOIRefreshQuiet(netB)
+ for i := 0; i < 10; i++ {
+ if atomic.LoadUint32(&netB.wantTXGossip) == uint32(wantTXGossipYes) {
+ break
+ }
+ time.Sleep(time.Millisecond)
+ }
+ require.Equal(t, uint32(wantTXGossipYes), atomic.LoadUint32(&netB.wantTXGossip))
+ // send another message which we can track, so that we'll know that the first message was delivered.
+ netB.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ messageFilterArriveWg.Wait()
+
+ messageArriveWg.Add(5 * 4) // we're expecting exactly 20 messages.
+ // send 5 messages of few types.
+ for i := 0; i < 5; i++ {
+ netA.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), protocol.ProposalPayloadTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), protocol.VoteBundleTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ }
+ // wait until all the expected messages arrive.
+ messageArriveWg.Wait()
+ incomingMsgSync.Lock()
+ require.Equal(t, 4, len(msgCounters))
+ for tag, count := range msgCounters {
+ if tag == protocol.TxnTag {
require.Equal(t, 5, count)
} else {
- require.Equal(t, 0, count)
+ require.Equal(t, 5, count)
}
}
+ incomingMsgSync.Unlock()
}
// Set up two nodes, have one of them disconnect from the other, and monitor disconnection error on the side that did not issue the disconnection.
@@ -1803,7 +2267,7 @@ func TestWebsocketDisconnection(t *testing.T) {
netA.log = dl
netA.Start()
- defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ defer netStop(t, netA, "A")
netB := makeTestWebsocketNode(t)
netB.config.GossipFanout = 1
netB.config.EnablePingHandler = false
@@ -1812,7 +2276,7 @@ func TestWebsocketDisconnection(t *testing.T) {
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
- defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+ defer netStop(t, netB, "B")
msgHandlerA := func(msg IncomingMessage) (out OutgoingMessage) {
// if we received a message, send a message back.
diff --git a/network/wsPeer.go b/network/wsPeer.go
index 337dae07b..2b451478e 100644
--- a/network/wsPeer.go
+++ b/network/wsPeer.go
@@ -48,14 +48,14 @@ const averageMessageLength = 2 * 1024 // Most of the messages are smaller tha
const msgsInReadBufferPerPeer = 10
var networkSentBytesTotal = metrics.MakeCounter(metrics.NetworkSentBytesTotal)
-var networkSentBytesByTag = metrics.NewTagCounter("algod_network_sent_bytes_{TAG}", "Number of bytes that were sent over the network per message tag")
+var networkSentBytesByTag = metrics.NewTagCounter("algod_network_sent_bytes_{TAG}", "Number of bytes that were sent over the network for {TAG} messages")
var networkReceivedBytesTotal = metrics.MakeCounter(metrics.NetworkReceivedBytesTotal)
-var networkReceivedBytesByTag = metrics.NewTagCounter("algod_network_received_bytes_{TAG}", "Number of bytes that were received from the network per message tag")
+var networkReceivedBytesByTag = metrics.NewTagCounter("algod_network_received_bytes_{TAG}", "Number of bytes that were received from the network for {TAG} messages")
var networkMessageReceivedTotal = metrics.MakeCounter(metrics.NetworkMessageReceivedTotal)
-var networkMessageReceivedByTag = metrics.NewTagCounter("algod_network_message_received_{TAG}", "Number of complete messages that were received from the network per message tag")
+var networkMessageReceivedByTag = metrics.NewTagCounter("algod_network_message_received_{TAG}", "Number of complete messages that were received from the network for {TAG} messages")
var networkMessageSentTotal = metrics.MakeCounter(metrics.NetworkMessageSentTotal)
-var networkMessageSentByTag = metrics.NewTagCounter("algod_network_message_sent_{TAG}", "Number of complete messages that were sent to the network per message tag")
+var networkMessageSentByTag = metrics.NewTagCounter("algod_network_message_sent_{TAG}", "Number of complete messages that were sent to the network for {TAG} messages")
var networkConnectionsDroppedTotal = metrics.MakeCounter(metrics.NetworkConnectionsDroppedTotal)
var networkMessageQueueMicrosTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_message_sent_queue_micros_total", Description: "Total microseconds message spent waiting in queue to be sent"})
@@ -206,6 +206,9 @@ type wsPeer struct {
// only gurentee is that it's being accessed only during startup and/or by the sending loop go routine.
sendMessageTag map[protocol.Tag]bool
+ // messagesOfInterestGeneration is this node's messagesOfInterest version that we have seent to this peer.
+ messagesOfInterestGeneration uint32
+
// connMonitor used to measure the relative performance of the connection
// compared to the other outgoing connections. Incoming connections would have this
// field set to nil.
@@ -877,3 +880,12 @@ func (wp *wsPeer) setPeerData(key string, value interface{}) {
wp.clientDataStore[key] = value
}
}
+
+func (wp *wsPeer) sendMessagesOfInterest(messagesOfInterestGeneration uint32, messagesOfInterestEnc []byte) {
+ err := wp.Unicast(wp.net.ctx, messagesOfInterestEnc, protocol.MsgOfInterestTag)
+ if err != nil {
+ wp.net.log.Errorf("ws send msgOfInterest: %v", err)
+ } else {
+ atomic.StoreUint32(&wp.messagesOfInterestGeneration, messagesOfInterestGeneration)
+ }
+}
diff --git a/network/wsPeer_test.go b/network/wsPeer_test.go
index 60a0e3a79..550eb5fbd 100644
--- a/network/wsPeer_test.go
+++ b/network/wsPeer_test.go
@@ -22,6 +22,7 @@ import (
"time"
"unsafe"
+ "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/stretchr/testify/require"
)
@@ -32,6 +33,9 @@ func TestCheckSlowWritingPeer(t *testing.T) {
now := time.Now()
peer := wsPeer{
intermittentOutgoingMessageEnqueueTime: 0,
+ wsPeerCore: wsPeerCore{net: &WebsocketNetwork{
+ log: logging.TestingLog(t),
+ }},
}
require.Equal(t, peer.CheckSlowWritingPeer(now), false)
diff --git a/node/node.go b/node/node.go
index c8ff6aa3e..55668c1b0 100644
--- a/node/node.go
+++ b/node/node.go
@@ -166,7 +166,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
node.rootDir = rootDir
node.log = log.With("name", cfg.NetAddress)
node.genesisID = genesis.ID()
- node.genesisHash = crypto.HashObj(genesis)
+ node.genesisHash = genesis.Hash()
node.devMode = genesis.DevMode
if node.devMode {
@@ -175,7 +175,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
node.config = cfg
// tie network, block fetcher, and agreement services together
- p2pNode, err := network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesis.ID(), genesis.Network)
+ p2pNode, err := network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesis.ID(), genesis.Network, node)
if err != nil {
log.Errorf("could not create websocket node: %v", err)
return nil, err
@@ -195,8 +195,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
log.Errorf("Unable to create genesis directory: %v", err)
return nil, err
}
- var genalloc bookkeeping.GenesisBalances
- genalloc, err = bootstrapData(genesis, log)
+ genalloc, err := genesis.Balances()
if err != nil {
log.Errorf("Cannot load genesis allocation: %v", err)
return nil, err
@@ -313,40 +312,6 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
return node, err
}
-func bootstrapData(genesis bookkeeping.Genesis, log logging.Logger) (bookkeeping.GenesisBalances, error) {
- genalloc := make(map[basics.Address]basics.AccountData)
- for _, entry := range genesis.Allocation {
- addr, err := basics.UnmarshalChecksumAddress(entry.Address)
- if err != nil {
- log.Errorf("Cannot parse genesis addr %s: %v", entry.Address, err)
- return bookkeeping.GenesisBalances{}, err
- }
-
- _, present := genalloc[addr]
- if present {
- err = fmt.Errorf("repeated allocation to %s", entry.Address)
- log.Error(err)
- return bookkeeping.GenesisBalances{}, err
- }
-
- genalloc[addr] = entry.State
- }
-
- feeSink, err := basics.UnmarshalChecksumAddress(genesis.FeeSink)
- if err != nil {
- log.Errorf("Cannot parse fee sink addr %s: %v", genesis.FeeSink, err)
- return bookkeeping.GenesisBalances{}, err
- }
-
- rewardsPool, err := basics.UnmarshalChecksumAddress(genesis.RewardsPool)
- if err != nil {
- log.Errorf("Cannot parse rewards pool addr %s: %v", genesis.RewardsPool, err)
- return bookkeeping.GenesisBalances{}, err
- }
-
- return bookkeeping.MakeTimestampedGenesisBalances(genalloc, feeSink, rewardsPool, genesis.Timestamp), nil
-}
-
// Config returns a copy of the node's Local configuration
func (node *AlgorandFullNode) Config() config.Local {
return node.config
@@ -403,12 +368,7 @@ func (node *AlgorandFullNode) Start() {
// startMonitoringRoutines starts the internal monitoring routines used by the node.
func (node *AlgorandFullNode) startMonitoringRoutines() {
- node.monitoringRoutinesWaitGroup.Add(3)
-
- // PKI TODO: Remove this with #2596
- // Periodically check for new participation keys
- go node.checkForParticipationKeys(node.ctx.Done())
-
+ node.monitoringRoutinesWaitGroup.Add(2)
go node.txPoolGaugeThread(node.ctx.Done())
// Delete old participation keys
go node.oldKeyDeletionThread(node.ctx.Done())
@@ -781,24 +741,6 @@ func ensureParticipationDB(genesisDir string, log logging.Logger) (account.Parti
return account.MakeParticipationRegistry(accessor, log)
}
-// Reload participation keys from disk periodically
-func (node *AlgorandFullNode) checkForParticipationKeys(done <-chan struct{}) {
- defer node.monitoringRoutinesWaitGroup.Done()
- ticker := time.NewTicker(node.config.ParticipationKeysRefreshInterval)
- for {
- select {
- case <-ticker.C:
- err := node.loadParticipationKeys()
- if err != nil {
- node.log.Errorf("Could not refresh participation keys: %v", err)
- }
- case <-done:
- ticker.Stop()
- return
- }
- }
-}
-
// ListParticipationKeys returns all participation keys currently installed on the node
func (node *AlgorandFullNode) ListParticipationKeys() (partKeys []account.ParticipationRecord, err error) {
return node.accountManager.Registry().GetAll(), nil
@@ -916,7 +858,7 @@ func (node *AlgorandFullNode) InstallParticipationKey(partKeyBinary []byte) (acc
}
defer inputdb.Close()
- partkey, err := account.RestoreParticipation(inputdb)
+ partkey, err := account.RestoreParticipationWithSecrets(inputdb)
if err != nil {
return account.ParticipationID{}, err
}
@@ -927,20 +869,19 @@ func (node *AlgorandFullNode) InstallParticipationKey(partKeyBinary []byte) (acc
}
// Tell the AccountManager about the Participation (dupes don't matter) so we ignore the return value
- _ = node.accountManager.AddParticipation(partkey)
+ added := node.accountManager.AddParticipation(partkey)
+ if !added {
+ return account.ParticipationID{}, fmt.Errorf("ParticipationRegistry: cannot register duplicate participation key")
+ }
- err = node.accountManager.Registry().Flush(participationRegistryFlushMaxWaitDuration)
+ err = insertStateProofToRegistry(partkey, node)
if err != nil {
return account.ParticipationID{}, err
}
- newFilename := config.PartKeyFilename(partkey.ID().String(), uint64(partkey.FirstValid), uint64(partkey.LastValid))
- newFullyQualifiedFilename := filepath.Join(outDir, filepath.Base(newFilename))
-
- err = os.Rename(fullyQualifiedTempFile, newFullyQualifiedFilename)
-
+ err = node.accountManager.Registry().Flush(participationRegistryFlushMaxWaitDuration)
if err != nil {
- return account.ParticipationID{}, nil
+ return account.ParticipationID{}, err
}
return partkey.ID(), nil
@@ -1379,3 +1320,20 @@ func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []
func (node *AlgorandFullNode) Record(account basics.Address, round basics.Round, participationType account.ParticipationAction) {
node.accountManager.Record(account, round, participationType)
}
+
+// IsParticipating implements network.NodeInfo
+//
+// This function is not fully precise. node.ledger and
+// node.accountManager might move relative to each other and there is
+// no synchronization. This is good-enough for current uses of
+// IsParticipating() which is used in networking code to determine if
+// the node should ask for transaction gossip (or skip it to save
+// bandwidth). The current transaction pool size is about 3
+// rounds. Starting to receive transaction gossip 10 rounds in the
+// future when we might propose or vote on blocks in that future is a
+// little extra buffer but seems reasonable at this time. -- bolson
+// 2022-05-18
+func (node *AlgorandFullNode) IsParticipating() bool {
+ round := node.ledger.Latest() + 1
+ return node.accountManager.HasLiveKeys(round, round+10)
+}
diff --git a/scripts/compute_branch.sh b/scripts/compute_branch.sh
index abfbf472e..f0ff025d9 100755
--- a/scripts/compute_branch.sh
+++ b/scripts/compute_branch.sh
@@ -1,7 +1,16 @@
#!/usr/bin/env bash
-if [ -z "${TRAVIS_BRANCH}" ]; then
- BRANCH=$(git rev-parse --abbrev-ref HEAD)
+if [[ -n $(git status --porcelain) ]]; then
+ # If the branch isn't clean, default to HEAD to match old behavior.
+ BRANCH="HEAD"
+elif [ -z "${TRAVIS_BRANCH}" ]; then
+ # if there is no travis branch, set based on tag or branch
+ case "$(git describe --tags)" in
+ *"beta") BRANCH="rel/beta" ;;
+ *"stable") BRANCH="rel/stable" ;;
+ *"nightly") BRANCH="rel/nightly" ;;
+ *) BRANCH=$(git rev-parse --abbrev-ref HEAD)
+ esac
else
BRANCH="${TRAVIS_BRANCH}"
fi
diff --git a/scripts/install_linux_deps.sh b/scripts/install_linux_deps.sh
index 791cad080..a51fdf7b5 100755
--- a/scripts/install_linux_deps.sh
+++ b/scripts/install_linux_deps.sh
@@ -8,11 +8,15 @@ ARCH_DEPS="boost boost-libs expect jq autoconf shellcheck sqlite python-virtuale
UBUNTU_DEPS="libtool libboost-math-dev expect jq autoconf shellcheck sqlite3 python3-venv build-essential"
FEDORA_DEPS="boost-devel expect jq autoconf ShellCheck sqlite python-virtualenv"
-if [ "${DISTRIB}" = "arch" ]; then
- pacman -S --refresh --needed --noconfirm $ARCH_DEPS
-elif [ "${DISTRIB}" = "fedora" ]; then
- dnf -y install $FEDORA_DEPS
-else
- apt-get update
- apt-get -y install $UBUNTU_DEPS
-fi
+case $DISTRIB in
+ "arch" | "manjaro")
+ pacman -S --refresh --needed --noconfirm $ARCH_DEPS
+ ;;
+ "fedora")
+ dnf -y install $FEDORA_DEPS
+ ;;
+ *)
+ apt-get update
+ apt-get -y install $UBUNTU_DEPS
+ ;;
+esac
diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go
index 7cf8de405..fa8cfecdf 100644
--- a/shared/pingpong/accounts.go
+++ b/shared/pingpong/accounts.go
@@ -19,7 +19,6 @@ package pingpong
import (
"fmt"
"io/ioutil"
- "math"
"math/rand"
"os"
"path/filepath"
@@ -36,7 +35,6 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/util"
"github.com/algorand/go-algorand/util/db"
)
@@ -132,17 +130,6 @@ func (pps *WorkerState) ensureAccounts(ac libgoal.Client, initCfg PpConfig) (acc
return
}
-// throttle transaction rate
-func throttleTransactionRate(startTime time.Time, cfg PpConfig, totalSent uint64) {
- localTimeDelta := time.Since(startTime)
- currentTps := float64(totalSent) / localTimeDelta.Seconds()
- if currentTps > float64(cfg.TxnPerSec) {
- sleepSec := float64(totalSent)/float64(cfg.TxnPerSec) - localTimeDelta.Seconds()
- sleepTime := time.Duration(int64(math.Round(sleepSec*1000))) * time.Millisecond
- util.NanoSleep(sleepTime)
- }
-}
-
// Prepare assets for asset transaction testing
// Step 1) Create X assets for each of the participant accounts
// Step 2) For each participant account, opt-in to assets of all other participant accounts
@@ -153,13 +140,14 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie
return
}
- var startTime = time.Now()
- var totalSent uint64 = 0
resultAssetMaps = make(map[uint64]v1.AssetParams)
// optIns contains own and explicitly opted-in assets
optIns = make(map[uint64][]string)
numCreatedAssetsByAddr := make(map[string]int, len(accounts))
+
+ nextSendTime := time.Now()
+
// 1) Create X assets for each of the participant accounts
for addr := range accounts {
if addr == pps.cfg.SrcAccount {
@@ -179,6 +167,7 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie
fmt.Printf("cfg.NumAsset %v, addrAccount.AssetParams %v\n", pps.cfg.NumAsset, addrAccount.AssetParams)
totalSupply := pps.cfg.MinAccountAsset * uint64(pps.cfg.NumPartAccounts) * 9 * uint64(pps.cfg.GroupSize) * uint64(pps.cfg.RefreshTime.Seconds()) / pps.cfg.TxnPerSec
+
// create assets in participant account
for i := 0; i < toCreate; i++ {
var metaLen = 32
@@ -205,14 +194,12 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie
return
}
tx.Note = pps.makeNextUniqueNoteField()
+ schedule(pps.cfg.TxnPerSec, &nextSendTime)
_, err = signAndBroadcastTransaction(accounts[addr], tx, client)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset creation failed with error %v\n", err)
return
}
-
- totalSent++
- throttleTransactionRate(startTime, pps.cfg, totalSent)
}
}
@@ -255,10 +242,6 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie
// optInsByAddr tracks only explicitly opted-in assetsA
optInsByAddr := make(map[string]map[uint64]bool)
- // reset rate-control
- startTime = time.Now()
- totalSent = 0
-
// 2) For each participant account, opt-in up to proto.MaxAssetsPerAccount assets of all other participant accounts
for addr := range accounts {
if addr == pps.cfg.SrcAccount {
@@ -308,17 +291,14 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie
}
tx.Note = pps.makeNextUniqueNoteField()
+ schedule(pps.cfg.TxnPerSec, &nextSendTime)
_, err = signAndBroadcastTransaction(accounts[addr], tx, client)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset optin failed with error %v\n", err)
return
}
- totalSent++
-
optIns[k] = append(optIns[k], addr)
optInsByAddr[addr][k] = true
-
- throttleTransactionRate(startTime, pps.cfg, totalSent)
}
}
@@ -354,10 +334,6 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie
}
}
- // reset rate-control
- startTime = time.Now()
- totalSent = 0
-
// Step 3) Evenly distribute the assets across all opted-in accounts
for k, creator := range allAssets {
if !pps.cfg.Quiet {
@@ -403,14 +379,12 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie
}
}
+ schedule(pps.cfg.TxnPerSec, &nextSendTime)
_, err = signAndBroadcastTransaction(accounts[creator], tx, client)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset distribution failed with error %v\n", err)
return
}
-
- totalSent++
- throttleTransactionRate(startTime, pps.cfg, totalSent)
}
// append the asset to the result assets
resultAssetMaps[k] = assetParams[k]
diff --git a/shared/pingpong/config.go b/shared/pingpong/config.go
index 73ad4e4ec..db6cbb4ed 100644
--- a/shared/pingpong/config.go
+++ b/shared/pingpong/config.go
@@ -31,7 +31,6 @@ const ConfigFilename = "ppconfig.json"
// PpConfig defines configuration structure for
type PpConfig struct {
SrcAccount string
- DelayBetweenTxn time.Duration
RandomizeFee bool
RandomizeAmt bool
RandomizeDst bool
@@ -41,7 +40,6 @@ type PpConfig struct {
TxnPerSec uint64
NumPartAccounts uint32
RunTime time.Duration
- RestTime time.Duration
RefreshTime time.Duration
MinAccountFunds uint64
Quiet bool
@@ -71,7 +69,6 @@ type PpConfig struct {
// DefaultConfig object for Ping Pong
var DefaultConfig = PpConfig{
SrcAccount: "",
- DelayBetweenTxn: 100,
RandomizeFee: false,
RandomizeAmt: false,
RandomizeDst: false,
@@ -81,7 +78,6 @@ var DefaultConfig = PpConfig{
TxnPerSec: 200,
NumPartAccounts: 10,
RunTime: 10 * time.Second,
- RestTime: 1 * time.Hour, // Long default rest to avoid accidental DoS
RefreshTime: 10 * time.Second,
MinAccountFunds: 100000,
GroupSize: 1,
diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go
index 71d787f79..5fcde0373 100644
--- a/shared/pingpong/pingpong.go
+++ b/shared/pingpong/pingpong.go
@@ -262,6 +262,16 @@ func computeAccountMinBalance(client libgoal.Client, cfg PpConfig) (fundingRequi
return
}
+// Wait for `*nextSendTime` and update it afterwards.
+func schedule(tps uint64, nextSendTime *time.Time) {
+ dur := time.Until(*nextSendTime)
+ if dur > 0 {
+ time.Sleep(dur)
+ }
+
+ *nextSendTime = nextSendTime.Add(time.Second / time.Duration(tps))
+}
+
func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, client libgoal.Client, cfg PpConfig) error {
var srcFunds, minFund uint64
var err error
@@ -272,7 +282,6 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
return err
}
- startTime := time.Now()
var totalSent uint64
// Fee of 0 will make cause the function to use the suggested one by network
@@ -282,12 +291,12 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
if err != nil {
return err
}
-
fmt.Printf("adjusting account balance to %d\n", minFund)
+
+ nextSendTime := time.Now()
for {
accountsAdjusted := 0
for addr, acct := range accounts {
-
if addr == pps.cfg.SrcAccount {
continue
}
@@ -307,6 +316,7 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
fmt.Printf("adjusting balance of account %v by %d\n ", addr, toSend)
}
+ schedule(cfg.TxnPerSec, &nextSendTime)
tx, err = pps.sendPaymentFromSourceAccount(client, addr, fee, toSend)
if err != nil {
if strings.Contains(err.Error(), "broadcast queue full") {
@@ -323,7 +333,6 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
}
totalSent++
- throttleTransactionRate(startTime, cfg, totalSent)
}
accounts[cfg.SrcAccount].setBalance(srcFunds)
// wait until all the above transactions are sent, or that we have no more transactions
@@ -462,7 +471,6 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
if cfg.MaxRuntime > 0 {
endTime = time.Now().Add(cfg.MaxRuntime)
}
- restTime := cfg.RestTime
refreshTime := time.Now().Add(cfg.RefreshTime)
var nftThrottler *throttler
@@ -473,6 +481,7 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
lastLog := time.Now()
nextLog := lastLog.Add(logPeriod)
+ nextSendTime := time.Now()
for {
if ctx.Err() != nil {
_, _ = fmt.Fprintf(os.Stderr, "error bad context in RunPingPong: %v\n", ctx.Err())
@@ -520,7 +529,7 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
}
toList := listSufficientAccounts(pps.accounts, minimumAmount, cfg.SrcAccount)
- sent, succeeded, err := pps.sendFromTo(fromList, toList, ac)
+ sent, succeeded, err := pps.sendFromTo(fromList, toList, ac, &nextSendTime)
totalSent += sent
totalSucceeded += succeeded
if err != nil {
@@ -535,16 +544,10 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
refreshTime = refreshTime.Add(cfg.RefreshTime)
}
-
- throttleTransactionRate(startTime, cfg, totalSent)
}
timeDelta := time.Since(startTime)
_, _ = fmt.Fprintf(os.Stdout, "Sent %d transactions (%d attempted) in %d seconds\n", totalSucceeded, totalSent, int(math.Round(timeDelta.Seconds())))
- if cfg.RestTime > 0 {
- _, _ = fmt.Fprintf(os.Stdout, "Pausing %d seconds before sending more transactions\n", int(math.Round(cfg.RestTime.Seconds())))
- time.Sleep(restTime)
- }
}
}
@@ -672,7 +675,7 @@ func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64,
func (pps *WorkerState) sendFromTo(
fromList, toList []string,
- client libgoal.Client,
+ client libgoal.Client, nextSendTime *time.Time,
) (sentCount, successCount uint64, err error) {
accounts := pps.accounts
cinfo := pps.cinfo
@@ -693,8 +696,6 @@ func (pps *WorkerState) sendFromTo(
*ap = p
assetsByCreator[c] = append(assetsByCreator[c], ap)
}
- lastTransactionTime := time.Now()
- timeCredit := time.Duration(0)
for i := 0; i < len(fromList); i = (i + 1) % len(fromList) {
from := fromList[i]
@@ -770,6 +771,7 @@ func (pps *WorkerState) sendFromTo(
return
}
+ schedule(cfg.TxnPerSec, nextSendTime)
sentCount++
_, sendErr = client.BroadcastTransaction(stxn)
} else {
@@ -856,6 +858,7 @@ func (pps *WorkerState) sendFromTo(
}
}
+ schedule(cfg.TxnPerSec, nextSendTime)
sentCount++
sendErr = client.BroadcastTransactionGroup(stxGroup)
}
@@ -871,30 +874,6 @@ func (pps *WorkerState) sendFromTo(
accounts[from].addBalance(fromBalanceChange)
// avoid updating the "to" account.
- // the logic here would sleep for the remaining of time to match the desired cfg.DelayBetweenTxn
- if cfg.DelayBetweenTxn > 0 {
- time.Sleep(cfg.DelayBetweenTxn)
- }
- if cfg.TxnPerSec > 0 {
- timeCredit += time.Second / time.Duration(cfg.TxnPerSec)
-
- now := time.Now()
- took := now.Sub(lastTransactionTime)
- timeCredit -= took
- if timeCredit > 0 {
- time.Sleep(timeCredit)
- timeCredit -= time.Since(now)
- } else if timeCredit < -1000*time.Millisecond {
- // cap the "time debt" to 1000 ms.
- timeCredit = -1000 * time.Millisecond
- }
- lastTransactionTime = time.Now()
-
- // since we just slept enough here, we can take it off the counters
- sentCount--
- successCount--
- // fmt.Printf("itration took %v\n", took)
- }
}
return
}
diff --git a/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp b/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp
index 75d81faeb..26a2b24e1 100644
--- a/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp
@@ -9,7 +9,7 @@ proc TestGoalDryrun { DRREQ_FILE TEST_PRIMARY_NODE_DIR } {
spawn goal clerk dryrun-remote -d $TEST_PRIMARY_NODE_DIR -D $DRREQ_FILE -v
expect {
timeout { ::AlgorandGoal::Abort "goal clerk dryrun-remote timeout" }
- "cost:" {set COST 1; exp_continue}
+ "budget consumed:" {set COST 1; exp_continue}
-re {(ApprovalProgram)} {set PROGRAM_TYPE $expect_out(1,string); exp_continue}
"PASS" {set PASSED 1; close}
}
diff --git a/test/e2e-go/cli/goal/expect/pingpongTest.exp b/test/e2e-go/cli/goal/expect/pingpongTest.exp
index 40aec03c8..99fb9a3ee 100644
--- a/test/e2e-go/cli/goal/expect/pingpongTest.exp
+++ b/test/e2e-go/cli/goal/expect/pingpongTest.exp
@@ -51,28 +51,28 @@ proc pingpongTest { TEST_ALGO_DIR TEST_DATA_DIR} {
set pingpong_duration 5
- set pingpongArray(1_smallops_smallhash) "--appprogops 2 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 5 --minaccount 100000000"
- set pingpongArray(2_smallops_mediumhash) "--appprogops 2 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(3_smallops_bighash) "--appprogops 2 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(4_mediumops_smallhash) "--appprogops 200 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(5_mediumops_mediumhash) "--appprogops 200 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(6_mediumops_bighash) "--appprogops 200 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(7_bigops_smallhash) "--appprogops 500 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(8_bigops_mediumhash) "--appprogops 300 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(9_bigops_bighash) "--appprogops 220 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
-
- set pingpongArray(10_payment_transaction) "--tps 200 --rest 0 --refresh 10 --numaccounts 50"
- set pingpongArray(11_teal_light_transaction) "--teal=light --tps 200 --rest 0 --refresh 10 --numaccounts 50"
- set pingpongArray(10_teal_normal_transaction) "--teal=normal --tps 200 --rest 0 --refresh 10 --numaccounts 50"
- set pingpongArray(12_teal_heavy_transaction) "--teal=heavy --tps 200 --rest 0 --refresh 10 --numaccounts 50"
- set pingpongArray(13_atomic_transfer_small_transaction) "--groupsize=5 --tps 200 --rest 0 --refresh 10 --numaccounts 50"
- set pingpongArray(14_atomic_transfer_large_transaction) "--groupsize=12 --tps 200 --rest 0 --refresh 10 --numaccounts 50"
- set pingpongArray(15_asset_transfer_small_transaction) "--tps 200 --numasset=5 --mf 0 --rest 0 --numaccounts 10 --refresh 10 --mf=1000"
- set pingpongArray(16_asset_transfer_large_transaction) "--tps 200 --numasset=10 --mf 0 --rest 0 --numaccounts 10 --refresh 10 --mf=1000"
- set pingpongArray(17_stateful_teal_small_transaction) "--numapp 10 --appprogops 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(18_stateful_teal_medium_transaction) "--numapp 10 --appprogops 200 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(19_stateful_teal_large_transaction) "--numapp 10 --appprogops 600 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(20_rekey_payment_transaction) "--rekey=true --groupsize=2 --randomnote=true --tps 200 --rest 0 --refresh 10 --numaccounts 50"
+ set pingpongArray(1_smallops_smallhash) "--appprogops 2 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 5 --minaccount 100000000"
+ set pingpongArray(2_smallops_mediumhash) "--appprogops 2 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(3_smallops_bighash) "--appprogops 2 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(4_mediumops_smallhash) "--appprogops 200 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(5_mediumops_mediumhash) "--appprogops 200 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(6_mediumops_bighash) "--appprogops 200 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(7_bigops_smallhash) "--appprogops 500 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(8_bigops_mediumhash) "--appprogops 300 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(9_bigops_bighash) "--appprogops 220 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+
+ set pingpongArray(10_payment_transaction) "--tps 200 --refresh 10 --numaccounts 50"
+ set pingpongArray(11_teal_light_transaction) "--teal=light --tps 200 --refresh 10 --numaccounts 50"
+ set pingpongArray(10_teal_normal_transaction) "--teal=normal --tps 200 --refresh 10 --numaccounts 50"
+ set pingpongArray(12_teal_heavy_transaction) "--teal=heavy --tps 200 --refresh 10 --numaccounts 50"
+ set pingpongArray(13_atomic_transfer_small_transaction) "--groupsize=5 --tps 200 --refresh 10 --numaccounts 50"
+ set pingpongArray(14_atomic_transfer_large_transaction) "--groupsize=12 --tps 200 --refresh 10 --numaccounts 50"
+ set pingpongArray(15_asset_transfer_small_transaction) "--tps 200 --numasset=5 --mf 0 --numaccounts 10 --refresh 10 --mf=1000"
+ set pingpongArray(16_asset_transfer_large_transaction) "--tps 200 --numasset=10 --mf 0 --numaccounts 10 --refresh 10 --mf=1000"
+ set pingpongArray(17_stateful_teal_small_transaction) "--numapp 10 --appprogops 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(18_stateful_teal_medium_transaction) "--numapp 10 --appprogops 200 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(19_stateful_teal_large_transaction) "--numapp 10 --appprogops 600 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(20_rekey_payment_transaction) "--rekey=true --groupsize=2 --randomnote=true --tps 200 --refresh 10 --numaccounts 50"
foreach index [array names pingpongArray] {
diff --git a/test/e2e-go/features/participation/accountParticipationTransitions_test.go b/test/e2e-go/features/participation/accountParticipationTransitions_test.go
index a151ec2e2..9cf58310a 100644
--- a/test/e2e-go/features/participation/accountParticipationTransitions_test.go
+++ b/test/e2e-go/features/participation/accountParticipationTransitions_test.go
@@ -60,8 +60,8 @@ func registerParticipationAndWait(t *testing.T, client libgoal.Client, part acco
sAccount := part.Address().String()
sWH, err := client.GetUnencryptedWalletHandle()
require.NoError(t, err)
- goOnlineTx, err := client.MakeUnsignedGoOnlineTx(sAccount, &part, txParams.LastRound+1, txParams.LastRound+1, txParams.Fee, [32]byte{})
- require.NoError(t, err)
+ goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(part, txParams.Fee, txParams.LastRound+1, txParams.LastRound+1, [32]byte{}, true)
+ assert.NoError(t, err)
require.Equal(t, sAccount, goOnlineTx.Src().String())
onlineTxID, err := client.SignAndBroadcastTransaction(sWH, nil, goOnlineTx)
require.NoError(t, err)
diff --git a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
index c89ca262f..a09b566a7 100644
--- a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
+++ b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
@@ -177,11 +177,18 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) {
partKeyFirstValid := uint64(0)
partKeyValidityPeriod := uint64(10000)
partKeyLastValid := partKeyFirstValid + partKeyValidityPeriod
+
+ maxTxnLife := consensus[protocol.ConsensusVersion("shortpartkeysprotocol")].MaxTxnLife
+
+ if partKeyLastValid > maxTxnLife {
+ partKeyLastValid = maxTxnLife
+ }
+
partkeyResponse, _, err := client.GenParticipationKeys(newAccount, partKeyFirstValid, partKeyLastValid, 0)
a.NoError(err, "rest client should be able to add participation key to new account")
a.Equal(newAccount, partkeyResponse.Parent.String(), "partkey response should echo queried account")
// account uses part key to go online
- goOnlineTx, err := client.MakeUnsignedGoOnlineTx(newAccount, &partkeyResponse, 0, 0, transactionFee, [32]byte{})
+ goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(partkeyResponse, transactionFee, partKeyFirstValid, partKeyLastValid, [32]byte{}, true)
a.NoError(err, "should be able to make go online tx")
a.Equal(newAccount, goOnlineTx.Src().String(), "go online response should echo queried account")
onlineTxID, err := client.SignAndBroadcastTransaction(wh, nil, goOnlineTx)
@@ -290,7 +297,8 @@ func TestAccountGoesOnlineForShortPeriod(t *testing.T) {
a.NoError(err, "rest client should be able to add participation key to new account")
a.Equal(newAccount, partkeyResponse.Parent.String(), "partkey response should echo queried account")
// account uses part key to go online
- goOnlineTx, err := client.MakeUnsignedGoOnlineTx(newAccount, &partkeyResponse, partKeyFirstValid, partKeyLastValid, transactionFee, [32]byte{})
+ goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(partkeyResponse, transactionFee, partKeyFirstValid, partKeyLastValid, [32]byte{}, true)
+ a.NoError(err)
a.Equal(goOnlineTx.KeyregTxnFields.StateProofPK.IsEmpty(), false, "stateproof key should not be zero")
a.NoError(err, "should be able to make go online tx")
a.Equal(newAccount, goOnlineTx.Src().String(), "go online response should echo queried account")
diff --git a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go
index 18bdec369..45ac6c97c 100644
--- a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go
+++ b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go
@@ -39,6 +39,17 @@ import (
"github.com/algorand/go-algorand/util/db"
)
+// TestOverlappingParticipationKeys is a test that "overlaps" participation keys across
+// various nodes. Keys are installed in a rotating fashion across the nodes where:
+// ((Network Round - 1) Mod 10) = nodeIdx and nodeIdx is used to pull out from an
+// "array" of nodes similar to {Node1, Node2, Node3} etc. The Mod 10 simply pulls the
+// "digit" from the number:
+// Round: 13 -> 13 - 1 = 12 -> 12 Mod 10 -> 2 -> Node3 with nodeIdx == 2
+//
+// The keys are overlapped in the sense that a key is registered to a node and
+// "overlaps" with other installed keys that are also valid. Meaning there might be:
+// PKI 1 (Valid 3-15) and PKI 2 (Valid 13-25) and PKI 3 (Valid 23-35) all installed
+// on the same node
func TestOverlappingParticipationKeys(t *testing.T) {
partitiontest.PartitionTest(t)
defer fixtures.ShutdownSynchronizedTest(t)
@@ -50,6 +61,7 @@ func TestOverlappingParticipationKeys(t *testing.T) {
shortPartKeysProtocol := config.Consensus[protocol.ConsensusCurrentVersion]
shortPartKeysProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
// keys round = current - 2 * (2 * 1) (see selector.go)
+ // --> return r.SubSaturate(basics.Round(2 * cparams.SeedRefreshInterval * cparams.SeedLookback))
// new keys must exist at least 4 rounds prior use
shortPartKeysProtocol.SeedLookback = 2
shortPartKeysProtocol.SeedRefreshInterval = 1
@@ -68,17 +80,10 @@ func TestOverlappingParticipationKeys(t *testing.T) {
defer fixture.Shutdown()
accountsNum := len(fixture.NodeDataDirs())
- for _, dataDir := range fixture.NodeDataDirs() {
- cfg, err := config.LoadConfigFromDisk(dataDir)
- a.NoError(err)
- cfg.ParticipationKeysRefreshInterval = 500 * time.Millisecond
- err = cfg.SaveToDisk(dataDir)
- a.NoError(err)
- }
genesis, err := bookkeeping.LoadGenesisFromFile(filepath.Join(fixture.PrimaryDataDir(), "genesis.json"))
a.NoError(err)
- genesisHash := crypto.HashObj(genesis)
+ genesisHash := genesis.Hash()
rootKeys := make(map[int]*account.Root)
regTransactions := make(map[int]transactions.SignedTxn)
lastRound := uint64(39) // check 3 rounds of keys rotations
@@ -89,10 +94,23 @@ func TestOverlappingParticipationKeys(t *testing.T) {
continue
}
acctIdx := (round - 1) % 10
+
+ // Prepare the registration keys ahead of time. Note that the + 10 is because we use Mod 10
+
+ // These variables control when the transaction will be sent out to be valid from.
+ // These variables will also be the name of the file produced EXCEPT
+ // prepareParticipationKey() will add 2 to the txStartRound for the filename.
+ // so the file for round 1 will be 3.15
+ // For round 11 (the next round that Mod 10 will index to 1), that means the filename will be
+ // 13.25 which results in a 2 round overlap
txStartRound := round
txEndRound := txStartRound + 10 + 4
+ // The registration variables here control when the participation key will actually be valid from
+ // For round 1, that means from 1-16 (one round of overlap)
+ // For round 11 (the next round that Mod 10 will index to 1), that means the 11-26
regStartRound := round
regEndRound := regStartRound + 11 + 4
+
err = prepareParticipationKey(a, &fixture, acctIdx, txStartRound, txEndRound, regStartRound, regEndRound, genesisHash, rootKeys, regTransactions, config.Consensus[protocol.ConsensusCurrentVersion])
a.NoError(err)
}
@@ -100,17 +118,39 @@ func TestOverlappingParticipationKeys(t *testing.T) {
fixture.Start()
currentRound := uint64(0)
fixture.AlgodClient = fixture.GetAlgodClientForController(fixture.NC)
+
+ // ******** IMPORTANT ********
+ // It is CRITICAL that this for loop NOT BLOCK.
+ // This loop assumes that it stays current with the round of the network.
+ // Remember: this test is running while the network is advancing rounds in parallel
+ // If this test blocks for more than a couple seconds, then the network round count will have advanced
+ // farther than the current "currentRound" variable. This will mean that the "addParticipationKey" function
+ // will NOT install the participation key in time for the shortened SeedLookback variable resulting
+ // in a network stall and a test failure
for {
err := fixture.WaitForRoundWithTimeout(currentRound + 1)
a.NoError(err)
+
+ // A sanity check that makes sure that the round of the network is the same as our
+ // current round variable
+ sts, err := fixture.GetAlgodClientForController(fixture.NC).Status()
+ a.NoError(err, "the network stalled, see test comments and review node.log in each nodes data directory for details.")
+ a.Equal(sts.LastRound, currentRound+1)
+
currentRound++
if (currentRound-1)%10 < uint64(accountsNum) {
acctIdx := (currentRound - 1) % 10
+
+ // We do a plus two because the filenames were stored with a plus 2
startRound := currentRound + 2 // +2 and -2 below to balance, start/end must match in part key file name
endRound := startRound + 10 + 4 - 2
+
regStartRound := currentRound
regEndRound := regStartRound + 11 + 4
+ // This cannot block! (See above)
+ // We pull the files from the disk according to their start round end round filenames
+ // and install them as well as send out a transaction
pk, err := addParticipationKey(a, &fixture, acctIdx, startRound, endRound, regTransactions)
a.NoError(err)
t.Logf("[.] Round %d, Added reg key for node %d range [%d..%d] %s\n", currentRound, acctIdx, regStartRound, regEndRound, hex.EncodeToString(pk[:8]))
@@ -128,17 +168,20 @@ func TestOverlappingParticipationKeys(t *testing.T) {
func addParticipationKey(a *require.Assertions, fixture *fixtures.RestClientFixture, acctNum uint64, startRound, endRound uint64, regTransactions map[int]transactions.SignedTxn) (crypto.OneTimeSignatureVerifier, error) {
dataDir := fixture.NodeDataDirs()[acctNum]
nc := fixture.GetNodeControllerForDataDir(dataDir)
- genesisDir, err := nc.GetGenesisDir()
partKeyName := filepath.Join(dataDir, config.PartKeyFilename("Wallet", startRound, endRound))
- partKeyNameTarget := filepath.Join(genesisDir, config.PartKeyFilename("Wallet", startRound, endRound))
- // make the rename in the background to ensure it won't take too long. We have ~4 rounds to complete this.
- go os.Rename(partKeyName, partKeyNameTarget)
+ // This function can take more than a couple seconds, we can't have this function block so
+ // we wrap it in a go routine
+ go func() {
+ clientController := fixture.GetLibGoalClientFromNodeController(nc)
+ _, err := clientController.AddParticipationKey(partKeyName)
+ a.NoError(err)
+ }()
signedTxn := regTransactions[int(startRound-2)]
a.NotEmpty(signedTxn.Sig)
- _, err = fixture.GetAlgodClientForController(nc).SendRawTransaction(signedTxn)
+ _, err := fixture.GetAlgodClientForController(nc).SendRawTransaction(signedTxn)
a.NoError(err)
return signedTxn.Txn.KeyregTxnFields.VotePK, err
}
diff --git a/test/e2e-go/features/participation/participationExpiration_test.go b/test/e2e-go/features/participation/participationExpiration_test.go
index 126b5acf0..06b392856 100644
--- a/test/e2e-go/features/participation/participationExpiration_test.go
+++ b/test/e2e-go/features/participation/participationExpiration_test.go
@@ -31,7 +31,7 @@ import (
"github.com/algorand/go-algorand/test/partitiontest"
)
-func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, finalStatus basics.Status, protocolCheck string) {
+func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, finalStatus basics.Status, protocolCheck string, includeStateProofs bool) {
a := require.New(fixtures.SynchronizedTest(t))
pClient := fixture.GetLibGoalClientForNamedNode("Primary")
@@ -84,7 +84,7 @@ func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, f
a.Equal(sAccount, partkeyResponse.Parent.String())
// account uses part key to go online
- goOnlineTx, err := sClient.MakeUnsignedGoOnlineTx(sAccount, &partkeyResponse, 0, 0, transactionFee, [32]byte{})
+ goOnlineTx, err := sClient.MakeRegistrationTransactionWithGenesisID(partkeyResponse, transactionFee, 0, 0, [32]byte{}, includeStateProofs)
a.NoError(err)
a.Equal(sAccount, goOnlineTx.Src().String())
@@ -191,7 +191,7 @@ func TestParticipationAccountsExpirationFuture(t *testing.T) {
fixture.Start()
defer fixture.Shutdown()
- testExpirationAccounts(t, &fixture, basics.Offline, "future")
+ testExpirationAccounts(t, &fixture, basics.Offline, "future", true)
}
// TestParticipationAccountsExpirationNonFuture tests that sending a transaction to an account with
@@ -214,5 +214,5 @@ func TestParticipationAccountsExpirationNonFuture(t *testing.T) {
fixture.Start()
defer fixture.Shutdown()
- testExpirationAccounts(t, &fixture, basics.Online, string(protocol.ConsensusV29))
+ testExpirationAccounts(t, &fixture, basics.Online, string(protocol.ConsensusV29), false)
}
diff --git a/test/e2e-go/features/transactions/onlineStatusChange_test.go b/test/e2e-go/features/transactions/onlineStatusChange_test.go
index d12715beb..be2ff60ff 100644
--- a/test/e2e-go/features/transactions/onlineStatusChange_test.go
+++ b/test/e2e-go/features/transactions/onlineStatusChange_test.go
@@ -18,6 +18,8 @@ package transactions
import (
"fmt"
+ "io/ioutil"
+ "os"
"path/filepath"
"testing"
@@ -86,7 +88,7 @@ func testAccountsCanChangeOnlineState(t *testing.T, templatePath string) {
a.NoError(err, "should be no errors when creating partkeys")
a.Equal(initiallyOffline, partkeyResponse.Address().String(), "successful partkey creation should echo account")
- goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(initiallyOffline, nil, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{})
+ goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(initiallyOffline, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{})
a.NoError(err, "should be able to make go online tx")
wh, err := client.GetUnencryptedWalletHandle()
a.NoError(err, "should be able to get unencrypted wallet handle")
@@ -168,13 +170,20 @@ func TestCloseOnError(t *testing.T) {
// get the current round for partkey creation
_, curRound := fixture.GetBalanceAndRound(initiallyOnline)
+ tempDir, err := ioutil.TempDir(os.TempDir(), "test-close-on-error")
+ require.NoError(t, err)
+ defer os.RemoveAll(tempDir)
+
+ var partkeyFile string
+ _, partkeyFile, err = client.GenParticipationKeysTo(initiallyOffline, 0, curRound+1000, 0, tempDir)
+
// make a participation key for initiallyOffline
- _, _, err = client.GenParticipationKeys(initiallyOffline, 0, curRound+1000, 0)
+ _, err = client.AddParticipationKey(partkeyFile)
a.NoError(err)
// check duplicate keys does not crash
- _, _, err = client.GenParticipationKeys(initiallyOffline, 0, curRound+1000, 0)
- errMsg := fmt.Sprintf("ParticipationKeys exist for the range 0 to %d", curRound+1000)
- a.Equal(errMsg, err.Error())
+ _, err = client.AddParticipationKey(partkeyFile)
+ a.Error(err)
+ a.Contains(err.Error(), "cannot register duplicate participation key")
// check lastValid < firstValid does not crash
_, _, err = client.GenParticipationKeys(initiallyOffline, curRound+1001, curRound+1000, 0)
expected := fmt.Sprintf("FillDBWithParticipationKeys: firstValid %d is after lastValid %d", int(curRound+1001), int(curRound+1000))
diff --git a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go
index 77c11ccb4..064a61596 100644
--- a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go
+++ b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go
@@ -128,7 +128,7 @@ func TestManyAccountsCanGoOnline(t *testing.T) {
partkeyResponse, _, err := client.GenParticipationKeys(account, curRound-10, curRound+1000, 0)
a.NoError(err, "should be no errors when creating many partkeys, creation number %v", i)
a.Equal(account, partkeyResponse.Address, "successful partkey creation should echo account")
- goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(account, nil, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{})
+ goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(account, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{})
a.NoError(err, "should be able to make go online tx %v", i)
wh, err := client.GetUnencryptedWalletHandle()
a.NoError(err, "should be able to get unencrypted wallet handle")
@@ -149,7 +149,7 @@ func TestManyAccountsCanGoOnline(t *testing.T) {
a.NoError(err, "should be no errors when creating many partkeys, creation number %v", i)
a.Equal(account, partkeyResponse.Address, "successful partkey creation should echo account")
- goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(account, nil, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{})
+ goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(account, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{})
a.NoError(err, "should be able to make go online tx %v", i)
wh, err := client.GetUnencryptedWalletHandle()
a.NoError(err, "should be able to get unencrypted wallet handle")
diff --git a/test/heapwatch/bwstart.sh b/test/heapwatch/bwstart.sh
index 3770136f7..a2fa8ef28 100644
--- a/test/heapwatch/bwstart.sh
+++ b/test/heapwatch/bwstart.sh
@@ -35,10 +35,10 @@ python3 "${REPO_ROOT}/test/heapwatch/heapWatch.py" -o "${TESTDIR}/heaps" --no-he
echo "$!" > .heapWatch.pid
# TODO: other pingpong modes
-pingpong run -d "${TESTDIR}/node1" --tps 20 --rest 0 --run 0 &
+pingpong run -d "${TESTDIR}/node1" --tps 20 --run 0 &
echo "$!" > .pingpong1.pid
-pingpong run -d "${TESTDIR}/node2" --tps 20 --rest 0 --run 0 &
+pingpong run -d "${TESTDIR}/node2" --tps 20 --run 0 &
echo "$!" > .pingpong2.pid
diff --git a/test/heapwatch/start.sh b/test/heapwatch/start.sh
index cb4b37eca..82560f118 100755
--- a/test/heapwatch/start.sh
+++ b/test/heapwatch/start.sh
@@ -25,10 +25,10 @@ python3 "${REPO_ROOT}/test/heapwatch/heapWatch.py" -o "${TESTDIR}/heaps" --perio
echo "$!" > .heapWatch.pid
# TODO: other pingpong modes
-pingpong run -d "${TESTDIR}/Node1" --tps 10 --rest 0 --run 0 --nftasapersecond 200 &
+pingpong run -d "${TESTDIR}/Node1" --tps 10 --run 0 --nftasapersecond 200 &
echo "$!" > .pingpong1.pid
-pingpong run -d "${TESTDIR}/Node2" --tps 10 --rest 0 --run 0 --nftasapersecond 200 &
+pingpong run -d "${TESTDIR}/Node2" --tps 10 --run 0 --nftasapersecond 200 &
echo "$!" > .pingpong2.pid
diff --git a/test/scripts/e2e_subs/goal-partkey-commands.sh b/test/scripts/e2e_subs/goal-partkey-commands.sh
new file mode 100755
index 000000000..94c831c86
--- /dev/null
+++ b/test/scripts/e2e_subs/goal-partkey-commands.sh
@@ -0,0 +1,100 @@
+#!/usr/bin/env bash
+# TIMEOUT=300
+
+# errors are handled manually, so no -e
+set -x
+
+date "+$0 start %Y%m%d_%H%M%S"
+
+# Registered Account ParticipationID Last Used First round Last round
+# yes LFMT...RHJQ 4UPT6AQC... 4 0 3000000
+OUTPUT=$(goal account listpartkeys)
+# In case there are multiple keys, make sure we are checking the correct one.
+OUTPUT=$(echo "$OUTPUT"|grep "yes.*3000"|tr -s ' ')
+if [[ "$OUTPUT" != yes* ]]; then echo "Registered should be 'yes' but wasn't."; exit 1; fi
+if [[ $(echo "$OUTPUT" | cut -d' ' -f 4) == 0 ]]; then echo "Last Used shouldn't be 0 but was."; exit 1; fi
+if [[ $(echo "$OUTPUT" | cut -d' ' -f 5) != 0 ]]; then echo "First round should be 0 but wasn't."; exit 1; fi
+if [[ $(echo "$OUTPUT" | cut -d' ' -f 6) != 3000 ]]; then echo "Last round should be 3000 but wasn't."; exit 1; fi
+
+#Dumping participation key info from /tmp/tmpwtomya9x/net/Node...
+#
+#Participation ID: 4UPT6AQCFZU5ZDN3WKVPCFYOH2SFJ7SPHK7XPWI2CIDYKK7K3WMQ
+#Parent address: LFMTCXCY6WGSFSGLSNTFH532KVERJVNRD7W5H7GIQ4MPGM7SSVYMQYRHJQ
+#Last vote round: 3
+#Last block proposal round: 4
+#Effective first round: 0
+#Effective last round: 3000
+#First round: 0
+#Last round: 3000
+#Key dilution: 10000
+#Selection key: esIsBJB86P+sLeqO3gVoLBGfpuwYlWN4lNzz2AYslTo=
+#Voting key: W1OcXLZsaATyOd5FbhRgXHmcywvn++xEVUAQ0NejmW4=
+OUTPUT=$(goal account partkeyinfo)
+if ! echo "$OUTPUT" | grep -q 'First round:[[:space:]]* 0'; then echo "First round should have been 0."; exit 1; fi
+if ! echo "$OUTPUT" | grep -q 'Last round:[[:space:]]* 3000'; then echo "Last round should have been 3000."; exit 1; fi
+if ! echo "$OUTPUT" | grep -q 'Effective last round:[[:space:]]* 3000'; then echo "Effective last round should have been 3000."; exit 1; fi
+# 100 or 10000 due to arm64 bug
+if ! echo "$OUTPUT" | grep -q 'Key dilution:[[:space:]]* 100\(00\)\?'; then echo "Key dilution should have been 10000."; exit 1; fi
+if ! echo "$OUTPUT" | grep -q 'Participation ID:[[:space:]]*[[:alnum:]]\{52\}'; then echo "There should be a participation ID."; exit 1; fi
+
+# Test multiple data directory supported
+NUM_OUTPUT_1=$(echo "$OUTPUT"|grep -c 'Participation ID')
+OUTPUT=$(goal account partkeyinfo -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2")
+NUM_OUTPUT_2=$(echo "$OUTPUT"|grep -c 'Participation ID')
+if (( "$NUM_OUTPUT_2" <= "$NUM_OUTPUT_1" )); then echo "Should have found more participation keys when checking both data directories."; exit 1; fi
+
+# get stderr from this one
+OUTPUT=$(goal account listpartkeys -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2" 2>&1)
+EXPECTED_ERR="Only one data directory can be specified for this command."
+if [[ "$OUTPUT" != "$EXPECTED_ERR" ]]; then echo -e "Unexpected output from multiple data directories with 'listpartkeys': \n$OUTPUT"; exit 1; fi
+
+create_and_fund_account () {
+ local TEMP_ACCT=$(${gcmd} account new|awk '{ print $6 }')
+ ${gcmd} clerk send -f "$INITIAL_ACCOUNT" -t "$TEMP_ACCT" -a 1000000 > /dev/null
+ echo "$TEMP_ACCT"
+}
+
+# given key should be installed and have the expected yes/no state
+# $1 - yes or no
+# $2 - a participation id
+# $3 - error message
+verify_registered_state () {
+ # look for participation ID anywhere in the partkeyinfo output
+ if ! goal account partkeyinfo | grep -q "$2"; then
+ fail_test "Key was not installed properly: $3"
+ fi
+
+ # looking for yes/no, and the 8 character head of participation id in this line:
+ # yes LFMT...RHJQ 4UPT6AQC... 4 0 3000
+ if ! goal account listpartkeys | grep -q "$1.*$(echo "$2" | cut -c1-8)\.\.\."; then
+ fail_test "Unexpected key state: $3"
+ fi
+}
+
+# goal account installpartkey
+# install manually generated participation keys (do not register)
+NEW_ACCOUNT_1=$(create_and_fund_account)
+algokey part generate --keyfile test_partkey --first 0 --last 3000 --parent "$NEW_ACCOUNT_1"
+PARTICIPATION_ID_1=$(goal account installpartkey --delete-input --partkey test_partkey|awk '{ print $7 }')
+verify_registered_state "no" "$PARTICIPATION_ID_1" "goal account installpartkey"
+
+# goal account addpartkey
+# generate and install participation keys (do not register)
+NEW_ACCOUNT_2=$(create_and_fund_account)
+PARTICIPATION_ID_2=$(goal account addpartkey -a "$NEW_ACCOUNT_2" --roundFirstValid 0 --roundLastValid 3000|awk '{ print $7 }')
+verify_registered_state "no" "$PARTICIPATION_ID_2" "goal account addpartkey"
+
+# goal account renewpartkeys
+# generate, install, and register
+NEW_ACCOUNT_3=$(create_and_fund_account)
+PARTICIPATION_ID_3=$(${gcmd} account renewpartkey --roundLastValid 3000 -a "$NEW_ACCOUNT_3"|tail -n 1|awk '{ print $7 }')
+verify_registered_state "yes" "$PARTICIPATION_ID_3" "goal account renewpartkey"
+
+# goal account changeonlinstatus (--account)
+verify_registered_state "no" "$PARTICIPATION_ID_1" "goal account installpartkey (before)"
+${gcmd} account changeonlinestatus -a "$NEW_ACCOUNT_1"
+verify_registered_state "yes" "$PARTICIPATION_ID_1" "goal account installpartkey (after)"
+
+# goal account renewallpartkeys
+# goal account changeonlinstatus (--partkey)
+# These do not work as I expected them to. Do they work? I don't know, we should try to remove it.
diff --git a/test/scripts/e2e_subs/goal-partkey-information.sh b/test/scripts/e2e_subs/goal-partkey-information.sh
deleted file mode 100755
index 6d5069c55..000000000
--- a/test/scripts/e2e_subs/goal-partkey-information.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env bash
-# TIMEOUT=300
-
-# errors are handled manually, so no -e
-set -x
-
-date "+$0 start %Y%m%d_%H%M%S"
-
-# Registered Account ParticipationID Last Used First round Last round
-# yes LFMT...RHJQ 4UPT6AQC... 4 0 3000000
-OUTPUT=$(goal account listpartkeys)
-OUTPUT=$(echo "$OUTPUT"|tail -n 1|tr -s ' ')
-if [[ "$OUTPUT" != yes* ]]; then echo "Registered should be 'yes' but wasn't."; exit 1; fi
-if [[ $(echo "$OUTPUT" | cut -d' ' -f 4) == 0 ]]; then echo "Last Used shouldn't be 0 but was."; exit 1; fi
-if [[ $(echo "$OUTPUT" | cut -d' ' -f 5) != 0 ]]; then echo "First round should be 0 but wasn't."; exit 1; fi
-if [[ $(echo "$OUTPUT" | cut -d' ' -f 6) != 3000 ]]; then echo "Last round should be 3000 but wasn't."; exit 1; fi
-
-#Dumping participation key info from /tmp/tmpwtomya9x/net/Node...
-#
-#Participation ID: 4UPT6AQCFZU5ZDN3WKVPCFYOH2SFJ7SPHK7XPWI2CIDYKK7K3WMQ
-#Parent address: LFMTCXCY6WGSFSGLSNTFH532KVERJVNRD7W5H7GIQ4MPGM7SSVYMQYRHJQ
-#Last vote round: 3
-#Last block proposal round: 4
-#Effective first round: 0
-#Effective last round: 3000000
-#First round: 0
-#Last round: 3000000
-#Key dilution: 10000
-#Selection key: esIsBJB86P+sLeqO3gVoLBGfpuwYlWN4lNzz2AYslTo=
-#Voting key: W1OcXLZsaATyOd5FbhRgXHmcywvn++xEVUAQ0NejmW4=
-OUTPUT=$(goal account partkeyinfo)
-if ! echo "$OUTPUT" | grep -q 'First round:[[:space:]]* 0'; then echo "First round should have been 0."; exit 1; fi
-if ! echo "$OUTPUT" | grep -q 'Last round:[[:space:]]* 3000'; then echo "Last round should have been 3000."; exit 1; fi
-if ! echo "$OUTPUT" | grep -q 'Effective last round:[[:space:]]* 3000'; then echo "Effective last round should have been 3000."; exit 1; fi
-# 100 or 10000 due to arm64 bug
-if ! echo "$OUTPUT" | grep -q 'Key dilution:[[:space:]]* 100\(00\)\?'; then echo "Key dilution should have been 10000."; exit 1; fi
-if ! echo "$OUTPUT" | grep -q 'Participation ID:[[:space:]]*[[:alnum:]]\{52\}'; then echo "There should be a participation ID."; exit 1; fi
-
-# Test multiple data directory supported
-OUTPUT=$(goal account partkeyinfo -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2")
-OUTPUT=$(echo "$OUTPUT"|grep -c 'Participation ID')
-if [[ "$OUTPUT" != "2" ]]; then echo "Two Participation IDs should have been found."; exit 1; fi
-
-# get stderr from this one
-OUTPUT=$(goal account listpartkeys -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2" 2>&1)
-EXPECTED_ERR="Only one data directory can be specified for this command."
-if [[ "$OUTPUT" != "$EXPECTED_ERR" ]]; then echo -e "Unexpected output from multiple data directories with 'listpartkeys': \n$OUTPUT"; exit 1; fi
diff --git a/test/testdata/configs/config-v22.json b/test/testdata/configs/config-v22.json
new file mode 100644
index 000000000..c569e4c93
--- /dev/null
+++ b/test/testdata/configs/config-v22.json
@@ -0,0 +1,105 @@
+{
+ "Version": 22,
+ "AccountUpdatesStatsInterval": 5000000000,
+ "AccountsRebuildSynchronousMode": 1,
+ "AgreementIncomingBundlesQueueLength": 7,
+ "AgreementIncomingProposalsQueueLength": 25,
+ "AgreementIncomingVotesQueueLength": 10000,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BlockServiceCustomFallbackEndpoints": "",
+ "BroadcastConnectionsLimit": -1,
+ "CadaverSizeTarget": 1073741824,
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchpointTracking": 0,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "DNSBootstrapID": "<network>.algorand.network",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DeadlockDetectionThreshold": 30,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": true,
+ "EnableCatchupFromArchiveServers": false,
+ "EnableDeveloperAPI": false,
+ "EnableGossipBlockService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableRuntimeMetrics": false,
+ "EnableTopAccountsReporting": false,
+ "EnableVerbosedTransactionSyncLogging": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceFetchTransactions": false,
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "IncomingConnectionsLimit": 800,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "IsIndexerActive": false,
+ "LedgerSynchronousMode": 2,
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogSizeLimit": 1073741824,
+ "MaxAPIResourcesPerAccount": 100000,
+ "MaxCatchpointDownloadDuration": 7200000000000,
+ "MaxConnectionsPerIP": 30,
+ "MinCatchpointFileDownloadBytesPerSecond": 20480,
+ "NetAddress": "",
+ "NetworkMessageTraceServer": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "ParticipationKeysRefreshInterval": 60000000000,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "ProposalAssemblyTime": 250000000,
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestConnectionsHardLimit": 2048,
+ "RestConnectionsSoftLimit": 1024,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TransactionSyncDataExchangeRate": 0,
+ "TransactionSyncSignificantMessageThreshold": 0,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 15000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": "",
+ "VerifiedTranscationsCacheSize": 30000
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/Makefile b/test/testdata/deployednettemplates/recipes/alphanet/Makefile
new file mode 100644
index 000000000..13130934d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/Makefile
@@ -0,0 +1,15 @@
+PARAMS=-w 8 -R 1 -N 4 -n 8 -H 2 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+
+all: topology.json net.json genesis.json
+
+topology.json: gen_topology.py
+ python gen_topology.py
+
+net.json: node.json relay.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile
+ netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS}
+
+genesis.json: node.json relay.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile
+ netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS}
+
+clean:
+ rm -f net.json genesis.json topology.json
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py b/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py
new file mode 100644
index 000000000..7298256d8
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py
@@ -0,0 +1,32 @@
+import json
+import os
+
+node_types = {"R":1, "N":4, "NPN":2}
+node_size = {"R":"-m5d.4xl", "N":"-m5d.4xl", "NPN":"-m5d.4xl"}
+regions = [
+ "AWS-US-EAST-2",
+ "AWS-US-WEST-2",
+ "AWS-EU-CENTRAL-1",
+ "AWS-EU-WEST-2",
+ "AWS-AP-SOUTHEAST-1",
+ "AWS-AP-SOUTHEAST-2"
+]
+
+network = "alphanet"
+
+host_elements = []
+region_count = len(regions)
+for node_type in node_types.keys():
+ node_count = node_types[node_type]
+ region_size = node_size[node_type]
+ for i in range(node_count):
+ host = {}
+ node_name = node_type + str(i + 1) + "-" + network
+ region = regions[i % region_count]
+ host["Name"] = node_name
+ host["Template"] = region + region_size
+ host_elements.append(host)
+
+ec2_hosts = {"Hosts": host_elements}
+with open("topology.json", "w") as f:
+ f.write(json.dumps(ec2_hosts, indent = 2) + os.linesep)
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/genesis.json b/test/testdata/deployednettemplates/recipes/alphanet/genesis.json
new file mode 100644
index 000000000..1d78dd782
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/genesis.json
@@ -0,0 +1,64 @@
+{
+ "NetworkName": "alphanet",
+ "VersionModifier": "",
+ "ConsensusProtocol": "alpha1",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": 3000000,
+ "PartKeyDilution": 0,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet4",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet5",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet6",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet7",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet8",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet9",
+ "Stake": 25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet10",
+ "Stake": 25,
+ "Online": false
+ }
+ ],
+ "FeeSink": "OOZZ32IHB6SS6ZTARKJ2PQP3QKE7R3IWQTOPXRGLTAGPVCDS3FHJOEOYVM",
+ "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "DevMode": false,
+ "Comment": ""
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/net.json b/test/testdata/deployednettemplates/recipes/alphanet/net.json
new file mode 100644
index 000000000..e75a91d29
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/net.json
@@ -0,0 +1,232 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay1",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N1-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node1",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ },
+ {
+ "Name": "node5",
+ "Wallets": [
+ {
+ "Name": "Wallet2",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N2-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node2",
+ "Wallets": [
+ {
+ "Name": "Wallet3",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ },
+ {
+ "Name": "node6",
+ "Wallets": [
+ {
+ "Name": "Wallet4",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N3-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node3",
+ "Wallets": [
+ {
+ "Name": "Wallet5",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ },
+ {
+ "Name": "node7",
+ "Wallets": [
+ {
+ "Name": "Wallet6",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N4-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node4",
+ "Wallets": [
+ {
+ "Name": "Wallet7",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ },
+ {
+ "Name": "node8",
+ "Wallets": [
+ {
+ "Name": "Wallet8",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "NPN1-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode1",
+ "Wallets": [
+ {
+ "Name": "Wallet9",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN2-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode2",
+ "Wallets": [
+ {
+ "Name": "Wallet10",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/node.json b/test/testdata/deployednettemplates/recipes/alphanet/node.json
new file mode 100644
index 000000000..d3b429ee3
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/node.json
@@ -0,0 +1,10 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/nonPartNode.json b/test/testdata/deployednettemplates/recipes/alphanet/nonPartNode.json
new file mode 100644
index 000000000..5b0a52d9d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/nonPartNode.json
@@ -0,0 +1,5 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/recipe.json b/test/testdata/deployednettemplates/recipes/alphanet/recipe.json
new file mode 100644
index 000000000..a2f88f63b
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/recipe.json
@@ -0,0 +1,7 @@
+{
+ "GenesisFile":"genesis.json",
+ "NetworkFile":"net.json",
+ "ConfigFile": "../../configs/reference.json",
+ "HostTemplatesFile": "../../hosttemplates/hosttemplates.json",
+ "TopologyFile": "topology.json"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/relay.json b/test/testdata/deployednettemplates/recipes/alphanet/relay.json
new file mode 100644
index 000000000..db8fb939d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/relay.json
@@ -0,0 +1,11 @@
+{
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/topology.json b/test/testdata/deployednettemplates/recipes/alphanet/topology.json
new file mode 100644
index 000000000..8760eae20
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/topology.json
@@ -0,0 +1,32 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1-alphanet",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N1-alphanet",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N2-alphanet",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N3-alphanet",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N4-alphanet",
+ "Template": "AWS-EU-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN1-alphanet",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN2-alphanet",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json
index b971cdce5..0afb4418a 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json
@@ -16,7 +16,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -36,7 +36,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -56,7 +56,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -76,7 +76,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -96,7 +96,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -116,7 +116,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -136,7 +136,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -156,7 +156,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -179,7 +179,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node21",
@@ -196,7 +196,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node41",
@@ -213,7 +213,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
},
{
"Name": "node61",
@@ -230,7 +230,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node81",
@@ -247,7 +247,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -270,7 +270,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node22",
@@ -287,7 +287,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node42",
@@ -304,7 +304,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node62",
@@ -321,7 +321,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node82",
@@ -338,7 +338,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -361,7 +361,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node23",
@@ -378,7 +378,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node43",
@@ -395,7 +395,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node63",
@@ -412,7 +412,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node83",
@@ -429,7 +429,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -452,7 +452,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node24",
@@ -469,7 +469,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node44",
@@ -486,7 +486,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node64",
@@ -503,7 +503,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node84",
@@ -520,7 +520,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -543,7 +543,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node25",
@@ -560,7 +560,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node45",
@@ -577,7 +577,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node65",
@@ -594,7 +594,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
},
{
"Name": "node85",
@@ -611,7 +611,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -634,7 +634,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node26",
@@ -651,7 +651,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node46",
@@ -668,7 +668,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node66",
@@ -685,7 +685,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node86",
@@ -702,7 +702,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -725,7 +725,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
},
{
"Name": "node27",
@@ -742,7 +742,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node47",
@@ -759,7 +759,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node67",
@@ -776,7 +776,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node87",
@@ -793,7 +793,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -816,7 +816,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
},
{
"Name": "node28",
@@ -833,7 +833,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node48",
@@ -850,7 +850,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node68",
@@ -867,7 +867,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node88",
@@ -884,7 +884,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -907,7 +907,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
},
{
"Name": "node29",
@@ -924,7 +924,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node49",
@@ -941,7 +941,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node69",
@@ -958,7 +958,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node89",
@@ -975,7 +975,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -998,7 +998,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node30",
@@ -1015,7 +1015,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node50",
@@ -1032,7 +1032,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node70",
@@ -1049,7 +1049,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node90",
@@ -1066,7 +1066,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1089,7 +1089,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node31",
@@ -1106,7 +1106,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node51",
@@ -1123,7 +1123,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node71",
@@ -1140,7 +1140,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
},
{
"Name": "node91",
@@ -1157,7 +1157,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1180,7 +1180,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node32",
@@ -1197,7 +1197,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
},
{
"Name": "node52",
@@ -1214,7 +1214,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node72",
@@ -1231,7 +1231,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node92",
@@ -1248,7 +1248,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1271,7 +1271,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node33",
@@ -1288,7 +1288,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
},
{
"Name": "node53",
@@ -1305,7 +1305,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node73",
@@ -1322,7 +1322,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node93",
@@ -1339,7 +1339,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1362,7 +1362,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node34",
@@ -1379,7 +1379,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node54",
@@ -1396,7 +1396,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
},
{
"Name": "node74",
@@ -1413,7 +1413,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node94",
@@ -1430,7 +1430,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1453,7 +1453,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node35",
@@ -1470,7 +1470,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node55",
@@ -1487,7 +1487,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node75",
@@ -1504,7 +1504,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node95",
@@ -1521,7 +1521,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -1544,7 +1544,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node36",
@@ -1561,7 +1561,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
},
{
"Name": "node56",
@@ -1578,7 +1578,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
},
{
"Name": "node76",
@@ -1595,7 +1595,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node96",
@@ -1612,7 +1612,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1635,7 +1635,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node37",
@@ -1652,7 +1652,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node57",
@@ -1669,7 +1669,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node77",
@@ -1686,7 +1686,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node97",
@@ -1703,7 +1703,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1726,7 +1726,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node38",
@@ -1743,7 +1743,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
},
{
"Name": "node58",
@@ -1760,7 +1760,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node78",
@@ -1777,7 +1777,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node98",
@@ -1794,7 +1794,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -1817,7 +1817,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node39",
@@ -1834,7 +1834,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node59",
@@ -1851,7 +1851,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node79",
@@ -1868,7 +1868,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node99",
@@ -1885,7 +1885,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1908,7 +1908,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node40",
@@ -1925,7 +1925,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node60",
@@ -1942,7 +1942,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node80",
@@ -1959,7 +1959,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
},
{
"Name": "node100",
@@ -1976,7 +1976,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json
index 412f2937a..10e25e767 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json
@@ -5,7 +5,7 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}",
"AltConfigs": [
{
"APIToken": "{{APIToken}}",
@@ -14,7 +14,7 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }",
"FractionApply": 0.2
}
]
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json
index db8fb939d..563543a7b 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json
@@ -7,5 +7,5 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
diff --git a/test/testdata/deployednettemplates/recipes/custom/configs/node.json b/test/testdata/deployednettemplates/recipes/custom/configs/node.json
index 4cfd52b5c..0b310ce09 100644
--- a/test/testdata/deployednettemplates/recipes/custom/configs/node.json
+++ b/test/testdata/deployednettemplates/recipes/custom/configs/node.json
@@ -6,7 +6,7 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0 }",
"AltConfigs": [
{
"APIEndpoint": "{{APIEndpoint}}",
@@ -16,7 +16,7 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0 }",
"FractionApply": 1.0
}
]
diff --git a/test/testdata/deployednettemplates/recipes/custom/example/configs/node.json b/test/testdata/deployednettemplates/recipes/custom/example/configs/node.json
index a61c7506d..85238047c 100644
--- a/test/testdata/deployednettemplates/recipes/custom/example/configs/node.json
+++ b/test/testdata/deployednettemplates/recipes/custom/example/configs/node.json
@@ -5,7 +5,7 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0 }",
"AltConfigs": [
{
"APIToken": "{{APIToken}}",
@@ -14,7 +14,7 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0 }",
"FractionApply": 1.0
}
]
diff --git a/tools/network/cloudflare/cloudflare.go b/tools/network/cloudflare/cloudflare.go
index 565070257..414f81232 100644
--- a/tools/network/cloudflare/cloudflare.go
+++ b/tools/network/cloudflare/cloudflare.go
@@ -38,8 +38,7 @@ var ErrDuplicateZoneNameFound = fmt.Errorf("more than a single zone name found t
// Cred contains the credentials used to authenticate with the cloudflare API.
type Cred struct {
- authEmail string
- authKey string
+ authToken string
}
// DNS is the cloudflare package main access class. Initiate an instance of this class to access the clouldflare APIs.
@@ -49,20 +48,18 @@ type DNS struct {
}
// NewCred creates a new credential structure used to authenticate with the cloudflare service.
-func NewCred(authEmail string, authKey string) *Cred {
+func NewCred(authToken string) *Cred {
return &Cred{
- authEmail: authEmail,
- authKey: authKey,
+ authToken: authToken,
}
}
// NewDNS create a new instance of clouldflare DNS services class
-func NewDNS(zoneID string, authEmail string, authKey string) *DNS {
+func NewDNS(zoneID string, authToken string) *DNS {
return &DNS{
zoneID: zoneID,
Cred: Cred{
- authEmail: authEmail,
- authKey: authKey,
+ authToken: authToken,
},
}
}
@@ -120,7 +117,7 @@ func (d *DNS) ListDNSRecord(ctx context.Context, recordType string, name string,
queryContent = ""
}
for {
- request, err := listDNSRecordRequest(d.zoneID, d.authEmail, d.authKey, recordType, name, queryContent, pageIndex, perPage, order, direction, match)
+ request, err := listDNSRecordRequest(d.zoneID, d.authToken, recordType, name, queryContent, pageIndex, perPage, order, direction, match)
if err != nil {
return []DNSRecordResponseEntry{}, err
}
@@ -135,7 +132,7 @@ func (d *DNS) ListDNSRecord(ctx context.Context, recordType string, name string,
return []DNSRecordResponseEntry{}, fmt.Errorf("failed to list DNS records. Request url = '%v', response error : %v", request.URL, err)
}
if len(parsedReponse.Errors) > 0 {
- return []DNSRecordResponseEntry{}, fmt.Errorf("Failed to list DNS entries. %+v", parsedReponse.Errors)
+ return []DNSRecordResponseEntry{}, fmt.Errorf("failed to list DNS entries. %+v", parsedReponse.Errors)
}
result = append(result, parsedReponse.Result...)
if parsedReponse.ResultInfo.TotalPages <= int(pageIndex) {
@@ -156,7 +153,7 @@ func (d *DNS) ListDNSRecord(ctx context.Context, recordType string, name string,
// CreateDNSRecord creates the DNS record with the given content.
func (d *DNS) CreateDNSRecord(ctx context.Context, recordType string, name string, content string, ttl uint, priority uint, proxied bool) error {
- request, err := createDNSRecordRequest(d.zoneID, d.authEmail, d.authKey, recordType, name, content, ttl, priority, proxied)
+ request, err := createDNSRecordRequest(d.zoneID, d.authToken, recordType, name, content, ttl, priority, proxied)
if err != nil {
return err
}
@@ -170,8 +167,8 @@ func (d *DNS) CreateDNSRecord(ctx context.Context, recordType string, name strin
if err != nil {
return fmt.Errorf("failed to create DNS record. Request url = '%v', response error : %v", request.URL, err)
}
- if parsedResponse.Success == false {
- request, _ := createDNSRecordRequest(d.zoneID, d.authEmail, d.authKey, recordType, name, content, ttl, priority, proxied)
+ if !parsedResponse.Success {
+ request, _ := createDNSRecordRequest(d.zoneID, d.authToken, recordType, name, content, ttl, priority, proxied)
requestBody, _ := request.GetBody()
bodyBytes, _ := ioutil.ReadAll(requestBody)
return fmt.Errorf("failed to create DNS record. Request url = '%v', body = %s, parsed response : %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
@@ -181,7 +178,7 @@ func (d *DNS) CreateDNSRecord(ctx context.Context, recordType string, name strin
// CreateSRVRecord creates the DNS record with the given content.
func (d *DNS) CreateSRVRecord(ctx context.Context, name string, target string, ttl uint, priority uint, port uint, service string, protocol string, weight uint) error {
- request, err := createSRVRecordRequest(d.zoneID, d.authEmail, d.authKey, name, service, protocol, weight, port, ttl, priority, target)
+ request, err := createSRVRecordRequest(d.zoneID, d.authToken, name, service, protocol, weight, port, ttl, priority, target)
if err != nil {
return err
}
@@ -195,8 +192,8 @@ func (d *DNS) CreateSRVRecord(ctx context.Context, name string, target string, t
if err != nil {
return fmt.Errorf("failed to create SRV record. Request url = '%v', response error : %v", request.URL, err)
}
- if parsedResponse.Success == false {
- request, _ := createSRVRecordRequest(d.zoneID, d.authEmail, d.authKey, name, service, protocol, weight, port, ttl, priority, target)
+ if !parsedResponse.Success {
+ request, _ := createSRVRecordRequest(d.zoneID, d.authToken, name, service, protocol, weight, port, ttl, priority, target)
requestBody, _ := request.GetBody()
bodyBytes, _ := ioutil.ReadAll(requestBody)
return fmt.Errorf("failed to create SRV record. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
@@ -206,7 +203,7 @@ func (d *DNS) CreateSRVRecord(ctx context.Context, name string, target string, t
// DeleteDNSRecord deletes a single DNS entry
func (d *DNS) DeleteDNSRecord(ctx context.Context, recordID string) error {
- request, err := deleteDNSRecordRequest(d.zoneID, d.authEmail, d.authKey, recordID)
+ request, err := deleteDNSRecordRequest(d.zoneID, d.authToken, recordID)
if err != nil {
return err
}
@@ -220,8 +217,8 @@ func (d *DNS) DeleteDNSRecord(ctx context.Context, recordID string) error {
if err != nil {
return fmt.Errorf("failed to delete DNS record. Request url = '%v', response error : %v", request.URL, err)
}
- if parsedResponse.Success == false {
- request, _ := deleteDNSRecordRequest(d.zoneID, d.authEmail, d.authKey, recordID)
+ if !parsedResponse.Success {
+ request, _ := deleteDNSRecordRequest(d.zoneID, d.authToken, recordID)
requestBody, _ := request.GetBody()
bodyBytes, _ := ioutil.ReadAll(requestBody)
return fmt.Errorf("failed to delete DNS record. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
@@ -231,7 +228,7 @@ func (d *DNS) DeleteDNSRecord(ctx context.Context, recordID string) error {
// UpdateDNSRecord update the DNS record with the given content.
func (d *DNS) UpdateDNSRecord(ctx context.Context, recordID string, recordType string, name string, content string, ttl uint, priority uint, proxied bool) error {
- request, err := updateDNSRecordRequest(d.zoneID, d.authEmail, d.authKey, recordID, recordType, name, content, ttl, priority, proxied)
+ request, err := updateDNSRecordRequest(d.zoneID, d.authToken, recordID, recordType, name, content, ttl, priority, proxied)
if err != nil {
return err
}
@@ -246,8 +243,8 @@ func (d *DNS) UpdateDNSRecord(ctx context.Context, recordID string, recordType s
return fmt.Errorf("failed to update DNS record. Request url = '%v', response error : %v", request.URL, err)
}
- if parsedResponse.Success == false {
- request, _ := updateDNSRecordRequest(d.zoneID, d.authEmail, d.authKey, recordID, recordType, name, content, ttl, priority, proxied)
+ if !parsedResponse.Success {
+ request, _ := updateDNSRecordRequest(d.zoneID, d.authToken, recordID, recordType, name, content, ttl, priority, proxied)
requestBody, _ := request.GetBody()
bodyBytes, _ := ioutil.ReadAll(requestBody)
return fmt.Errorf("failed to update DNS record. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
@@ -258,7 +255,7 @@ func (d *DNS) UpdateDNSRecord(ctx context.Context, recordID string, recordType s
// UpdateSRVRecord update the DNS record with the given content.
func (d *DNS) UpdateSRVRecord(ctx context.Context, recordID string, name string, target string, ttl uint, priority uint, port uint, service string, protocol string, weight uint) error {
- request, err := updateSRVRecordRequest(d.zoneID, d.authEmail, d.authKey, recordID, name, service, protocol, weight, port, ttl, priority, target)
+ request, err := updateSRVRecordRequest(d.zoneID, d.authToken, recordID, name, service, protocol, weight, port, ttl, priority, target)
if err != nil {
return err
}
@@ -272,8 +269,8 @@ func (d *DNS) UpdateSRVRecord(ctx context.Context, recordID string, name string,
if err != nil {
return fmt.Errorf("failed to update SRV record. Request url = '%v', response error : %v", request.URL, err)
}
- if parsedResponse.Success == false {
- request, _ := updateSRVRecordRequest(d.zoneID, d.authEmail, d.authKey, recordID, name, service, protocol, weight, port, ttl, priority, target)
+ if !parsedResponse.Success {
+ request, _ := updateSRVRecordRequest(d.zoneID, d.authToken, recordID, name, service, protocol, weight, port, ttl, priority, target)
requestBody, _ := request.GetBody()
bodyBytes, _ := ioutil.ReadAll(requestBody)
return fmt.Errorf("failed to update SRV record. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
@@ -289,7 +286,7 @@ type Zone struct {
// GetZones returns a list of zones that are associated with cloudflare.
func (c *Cred) GetZones(ctx context.Context) (zones []Zone, err error) {
- request, err := getZonesRequest(c.authEmail, c.authKey)
+ request, err := getZonesRequest(c.authToken)
if err != nil {
return nil, err
}
@@ -303,8 +300,8 @@ func (c *Cred) GetZones(ctx context.Context) (zones []Zone, err error) {
if err != nil {
return nil, fmt.Errorf("failed to get zones. Request url = '%v', response error : %v", request.URL, err)
}
- if parsedResponse.Success == false {
- request, _ := getZonesRequest(c.authEmail, c.authKey)
+ if !parsedResponse.Success {
+ request, _ := getZonesRequest(c.authToken)
requestBody, _ := request.GetBody()
bodyBytes, _ := ioutil.ReadAll(requestBody)
return nil, fmt.Errorf("failed to retrieve zone records. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
@@ -353,7 +350,7 @@ func (c *Cred) GetZoneID(ctx context.Context, zoneDomainName string) (zoneID str
// ExportZone exports the zone into a BIND config bytes array
func (d *DNS) ExportZone(ctx context.Context) (exportedZoneBytes []byte, err error) {
- request, err := exportZoneRequest(d.zoneID, d.authEmail, d.authKey)
+ request, err := exportZoneRequest(d.zoneID, d.authToken)
if err != nil {
return nil, err
}
diff --git a/tools/network/cloudflare/createRecord.go b/tools/network/cloudflare/createRecord.go
index e8c2b2851..747dc57ae 100644
--- a/tools/network/cloudflare/createRecord.go
+++ b/tools/network/cloudflare/createRecord.go
@@ -51,7 +51,7 @@ type createSRVRecord struct {
}
// createDNSRecordRequest construct a http request that would create a new dns record
-func createDNSRecordRequest(zoneID string, authEmail string, authKey string, recordType string, name string, content string, ttl uint, priority uint, proxied bool) (*http.Request, error) {
+func createDNSRecordRequest(zoneID string, authToken string, recordType string, name string, content string, ttl uint, priority uint, proxied bool) (*http.Request, error) {
// verify input arguments
ttl = clampTTL(ttl)
priority = clampPriority(priority)
@@ -77,12 +77,12 @@ func createDNSRecordRequest(zoneID string, authEmail string, authKey string, rec
if err != nil {
return nil, err
}
- addHeaders(request, authEmail, authKey)
+ addHeaders(request, authToken)
return request, nil
}
// createSRVRecordRequest construct a http request that would create a new dns record
-func createSRVRecordRequest(zoneID string, authEmail string, authKey string, name string, service string, protocol string, weight uint, port uint, ttl uint, priority uint, target string) (*http.Request, error) {
+func createSRVRecordRequest(zoneID string, authToken string, name string, service string, protocol string, weight uint, port uint, ttl uint, priority uint, target string) (*http.Request, error) {
// verify input arguments
ttl = clampTTL(ttl)
priority = clampPriority(priority)
@@ -112,7 +112,7 @@ func createSRVRecordRequest(zoneID string, authEmail string, authKey string, nam
if err != nil {
return nil, err
}
- addHeaders(request, authEmail, authKey)
+ addHeaders(request, authToken)
return request, nil
}
@@ -149,7 +149,7 @@ func parseCreateDNSRecordResponse(response *http.Response) (*CreateDNSRecordResp
return nil, err
}
if response.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("Response status code %d; body = %s", response.StatusCode, string(body))
+ return nil, fmt.Errorf("response status code %d; body = %s", response.StatusCode, string(body))
}
var parsedReponse CreateDNSRecordResponse
if err := json.Unmarshal(body, &parsedReponse); err != nil {
@@ -176,9 +176,7 @@ func clampTTL(ttl uint) uint {
// clampPriority clamps the input priority value to the accepted range of 0..65535
// see documentation at https://api.cloudflare.com/#dns-records-for-a-zone-create-dns-record
func clampPriority(priority uint) uint {
- if priority < 0 {
- priority = 0
- } else if priority > 65535 {
+ if priority > 65535 {
priority = 65535
}
return priority
diff --git a/tools/network/cloudflare/deleteRecord.go b/tools/network/cloudflare/deleteRecord.go
index eaa775761..9770be898 100644
--- a/tools/network/cloudflare/deleteRecord.go
+++ b/tools/network/cloudflare/deleteRecord.go
@@ -25,7 +25,7 @@ import (
)
// deleteDNSRecordRequest creates a new http request for deleting a single DNS records.
-func deleteDNSRecordRequest(zoneID string, authEmail string, authKey string, recordID string) (*http.Request, error) {
+func deleteDNSRecordRequest(zoneID string, authToken string, recordID string) (*http.Request, error) {
// construct the query
uri, err := url.Parse(fmt.Sprintf("%szones/%s/dns_records/%s", cloudFlareURI, zoneID, recordID))
if err != nil {
@@ -35,7 +35,7 @@ func deleteDNSRecordRequest(zoneID string, authEmail string, authKey string, rec
if err != nil {
return nil, err
}
- addHeaders(request, authEmail, authKey)
+ addHeaders(request, authToken)
return request, nil
}
diff --git a/tools/network/cloudflare/helpers.go b/tools/network/cloudflare/helpers.go
index c221cb36d..e4b995f67 100644
--- a/tools/network/cloudflare/helpers.go
+++ b/tools/network/cloudflare/helpers.go
@@ -20,8 +20,7 @@ import (
"net/http"
)
-func addHeaders(request *http.Request, authEmail string, authKey string) {
- request.Header.Add("X-Auth-Email", authEmail)
- request.Header.Add("X-Auth-Key", authKey)
+func addHeaders(request *http.Request, authToken string) {
+ request.Header.Add("Authorization", "Bearer "+authToken)
request.Header.Add("Content-Type", "application/json")
}
diff --git a/tools/network/cloudflare/listRecords.go b/tools/network/cloudflare/listRecords.go
index ba8372d2a..263e8adf8 100644
--- a/tools/network/cloudflare/listRecords.go
+++ b/tools/network/cloudflare/listRecords.go
@@ -25,7 +25,7 @@ import (
)
// listDNSRecordRequest creates a new http request for listing of DNS records.
-func listDNSRecordRequest(zoneID string, authEmail string, authKey string, recordType string, name string, content string, page uint, perPage uint, order string, direction string, match string) (*http.Request, error) {
+func listDNSRecordRequest(zoneID string, authToken string, recordType string, name string, content string, page uint, perPage uint, order string, direction string, match string) (*http.Request, error) {
// verify and validate input parameters.
if page == 0 {
page = 1
@@ -73,7 +73,7 @@ func listDNSRecordRequest(zoneID string, authEmail string, authKey string, recor
if err != nil {
return nil, err
}
- addHeaders(request, authEmail, authKey)
+ addHeaders(request, authToken)
return request, nil
}
diff --git a/tools/network/cloudflare/updateRecord.go b/tools/network/cloudflare/updateRecord.go
index 18607187c..c4be7c362 100644
--- a/tools/network/cloudflare/updateRecord.go
+++ b/tools/network/cloudflare/updateRecord.go
@@ -25,7 +25,7 @@ import (
)
// updateDNSRecordRequest construct a http request that would update an existing dns record
-func updateDNSRecordRequest(zoneID string, authEmail string, authKey string, recordID string, recordType string, name string, content string, ttl uint, priority uint, proxied bool) (*http.Request, error) {
+func updateDNSRecordRequest(zoneID string, authToken string, recordID string, recordType string, name string, content string, ttl uint, priority uint, proxied bool) (*http.Request, error) {
// verify input arguments
ttl = clampTTL(ttl)
priority = clampPriority(priority)
@@ -51,12 +51,12 @@ func updateDNSRecordRequest(zoneID string, authEmail string, authKey string, rec
if err != nil {
return nil, err
}
- addHeaders(request, authEmail, authKey)
+ addHeaders(request, authToken)
return request, nil
}
// updateSRVRecordRequest construct a http request that would update an existing srv record
-func updateSRVRecordRequest(zoneID string, authEmail string, authKey string, recordID string, name string, service string, protocol string, weight uint, port uint, ttl uint, priority uint, target string) (*http.Request, error) {
+func updateSRVRecordRequest(zoneID string, authToken string, recordID string, name string, service string, protocol string, weight uint, port uint, ttl uint, priority uint, target string) (*http.Request, error) {
// verify input arguments
ttl = clampTTL(ttl)
priority = clampPriority(priority)
@@ -86,7 +86,7 @@ func updateSRVRecordRequest(zoneID string, authEmail string, authKey string, rec
if err != nil {
return nil, err
}
- addHeaders(request, authEmail, authKey)
+ addHeaders(request, authToken)
return request, nil
}
diff --git a/tools/network/cloudflare/zones.go b/tools/network/cloudflare/zones.go
index 49e6d6078..d73829ea5 100644
--- a/tools/network/cloudflare/zones.go
+++ b/tools/network/cloudflare/zones.go
@@ -24,7 +24,7 @@ import (
"net/url"
)
-func getZonesRequest(authEmail, authKey string) (*http.Request, error) {
+func getZonesRequest(authToken string) (*http.Request, error) {
// construct the query
requestURI, err := url.Parse(cloudFlareURI)
if err != nil {
@@ -35,7 +35,7 @@ func getZonesRequest(authEmail, authKey string) (*http.Request, error) {
if err != nil {
return nil, err
}
- addHeaders(request, authEmail, authKey)
+ addHeaders(request, authToken)
return request, nil
}
@@ -76,7 +76,7 @@ func parseGetZonesResponse(response *http.Response) (*GetZonesResult, error) {
return nil, err
}
if response.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("Response status code %d", response.StatusCode)
+ return nil, fmt.Errorf("response status code %d", response.StatusCode)
}
var parsedReponse GetZonesResult
if err := json.Unmarshal(body, &parsedReponse); err != nil {
@@ -85,7 +85,7 @@ func parseGetZonesResponse(response *http.Response) (*GetZonesResult, error) {
return &parsedReponse, nil
}
-func exportZoneRequest(zoneID, authEmail, authKey string) (*http.Request, error) {
+func exportZoneRequest(zoneID, authToken string) (*http.Request, error) {
// construct the query
requestURI, err := url.Parse(cloudFlareURI)
if err != nil {
@@ -96,6 +96,6 @@ func exportZoneRequest(zoneID, authEmail, authKey string) (*http.Request, error)
if err != nil {
return nil, err
}
- addHeaders(request, authEmail, authKey)
+ addHeaders(request, authToken)
return request, nil
}
diff --git a/util/metrics/counter.go b/util/metrics/counter.go
index 73debb3e9..06ea4b0c4 100644
--- a/util/metrics/counter.go
+++ b/util/metrics/counter.go
@@ -155,9 +155,6 @@ func (counter *Counter) WriteMetric(buf *strings.Builder, parentLabels string) {
counter.Lock()
defer counter.Unlock()
- if len(counter.values) < 1 {
- return
- }
buf.WriteString("# HELP ")
buf.WriteString(counter.name)
buf.WriteString(" ")
@@ -165,6 +162,17 @@ func (counter *Counter) WriteMetric(buf *strings.Builder, parentLabels string) {
buf.WriteString("\n# TYPE ")
buf.WriteString(counter.name)
buf.WriteString(" counter\n")
+ // if counter is zero, report 0 using parentLabels and no tags
+ if len(counter.values) == 0 {
+ buf.WriteString(counter.name)
+ if len(parentLabels) > 0 {
+ buf.WriteString("{" + parentLabels + "}")
+ }
+ buf.WriteString(" 0")
+ buf.WriteString("\n")
+ return
+ }
+ // otherwise iterate through values and write one line per label
for _, l := range counter.values {
buf.WriteString(counter.name)
buf.WriteString("{")
diff --git a/util/metrics/counter_test.go b/util/metrics/counter_test.go
index 43ef0c27a..75512b80e 100644
--- a/util/metrics/counter_test.go
+++ b/util/metrics/counter_test.go
@@ -19,6 +19,7 @@ package metrics
import (
"context"
"fmt"
+ "strings"
"testing"
"time"
@@ -67,7 +68,7 @@ func TestMetricCounter(t *testing.T) {
defer test.Unlock()
// the the loop above we've created a single metric name with five different labels set ( host0, host1 .. host 4)
// let's see if we received all the 5 different labels.
- require.Equal(t, 5, len(test.metrics), "Missing metric counts were reported.")
+ require.Equal(t, 5, len(test.metrics), "Missing metric counts were reported: %+v", test.metrics)
for k, v := range test.metrics {
// we have increased each one of the labels exactly 4 times. See that the counter was counting correctly.
@@ -114,7 +115,7 @@ func TestMetricCounterFastInts(t *testing.T) {
defer test.Unlock()
// the the loop above we've created a single metric name with five different labels set ( host0, host1 .. host 4)
// let's see if we received all the 5 different labels.
- require.Equal(t, 1, len(test.metrics), "Missing metric counts were reported.")
+ require.Equal(t, 1, len(test.metrics), "Missing metric counts were reported: %+v", test.metrics)
for k, v := range test.metrics {
// we have increased each one of the labels exactly 4 times. See that the counter was counting correctly.
@@ -163,7 +164,7 @@ func TestMetricCounterMixed(t *testing.T) {
defer test.Unlock()
// the the loop above we've created a single metric name with five different labels set ( host0, host1 .. host 4)
// let's see if we received all the 5 different labels.
- require.Equal(t, 1, len(test.metrics), "Missing metric counts were reported.")
+ require.Equal(t, 1, len(test.metrics), "Missing metric counts were reported: %+v", test.metrics)
for k, v := range test.metrics {
// we have increased each one of the labels exactly 4 times. See that the counter was counting correctly.
@@ -171,3 +172,29 @@ func TestMetricCounterMixed(t *testing.T) {
require.Equal(t, "35.5", v, fmt.Sprintf("The metric '%s' reached value '%s'", k, v))
}
}
+
+func TestCounterWriteMetric(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ c := MakeCounter(MetricName{Name: "testname", Description: "testhelp"})
+ c.Deregister(nil)
+
+ // ensure 0 counters are still logged
+ sbOut := strings.Builder{}
+ c.WriteMetric(&sbOut, `host="myhost"`)
+ expected := `# HELP testname testhelp
+# TYPE testname counter
+testname{host="myhost"} 0
+`
+ require.Equal(t, expected, sbOut.String())
+
+ c.Add(2.3, nil)
+ // ensure non-zero counters are logged
+ sbOut = strings.Builder{}
+ c.WriteMetric(&sbOut, `host="myhost"`)
+ expected = `# HELP testname testhelp
+# TYPE testname counter
+testname{host="myhost"} 2.3
+`
+ require.Equal(t, expected, sbOut.String())
+}
diff --git a/util/metrics/gauge_test.go b/util/metrics/gauge_test.go
index 301ae22a7..9a228c067 100644
--- a/util/metrics/gauge_test.go
+++ b/util/metrics/gauge_test.go
@@ -68,7 +68,7 @@ func TestMetricGauge(t *testing.T) {
// the the loop above we've created a single metric name with five different labels set ( host0, host1 .. host 4)
// let's see if we received all the 5 different labels.
- require.Equal(t, 5, len(test.metrics), "Missing metric counts were reported.")
+ require.Equal(t, 5, len(test.metrics), "Missing metric counts were reported: %+v", test.metrics)
// iterate through the metrics and check the each of the metrics reached it's correct count.
for k, v := range test.metrics {
diff --git a/util/metrics/metrics_test.go b/util/metrics/metrics_test.go
index 03369c7a1..fddb9eda6 100644
--- a/util/metrics/metrics_test.go
+++ b/util/metrics/metrics_test.go
@@ -25,6 +25,7 @@ import (
"testing"
"time"
+ "github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-deadlock"
"github.com/stretchr/testify/require"
)
@@ -95,6 +96,8 @@ func (p *MetricTest) testMetricsHandler(w http.ResponseWriter, r *http.Request)
}
func TestSanitizeTelemetryName(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
for _, tc := range []struct{ in, out string }{
{in: "algod_counter_x", out: "algod_counter_x"},
{in: "algod_counter_x{a=b}", out: "algod_counter_x_a_b_"},
@@ -112,3 +115,29 @@ func TestSanitizeTelemetryName(t *testing.T) {
})
}
}
+
+func TestSanitizePrometheusName(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ for _, tc := range []struct{ in, out string }{
+ {in: "algod_counter_x", out: "algod_counter_x"},
+ {in: "algod_counter_x{a=b}", out: "algod_counter_x_a_b_"},
+ {in: "this_is1-a-name0", out: "this_is1_a_name0"},
+ {in: "myMetricName1:a=yes", out: "myMetricName1_a_yes"},
+ {in: "myMetricName1:a=yes,b=no", out: "myMetricName1_a_yes_b_no"},
+ {in: "0myMetricName1", out: "_myMetricName1"},
+ {in: "myMetricName1{hello=x}", out: "myMetricName1_hello_x_"},
+ {in: "myMetricName1.moreNames-n.3", out: "myMetricName1_moreNames_n_3"},
+ {in: "-my-metric-name", out: "_my_metric_name"},
+ {in: `label-counter:label="a label value"`, out: "label_counter_label__a_label_value_"},
+ {in: "go/gc/cycles/total:gc-cycles", out: "go_gc_cycles_total_gc_cycles"},
+ {in: "go/gc/heap/allocs:bytes", out: "go_gc_heap_allocs_bytes"},
+ {in: "go/gc/heap/allocs:objects", out: "go_gc_heap_allocs_objects"},
+ {in: "go/memory/classes/os-stacks:bytes", out: "go_memory_classes_os_stacks_bytes"},
+ {in: "go/memory/classes/heap/free:bytes", out: "go_memory_classes_heap_free_bytes"},
+ } {
+ t.Run(tc.in, func(t *testing.T) {
+ require.Equal(t, tc.out, sanitizePrometheusName(tc.in))
+ })
+ }
+}
diff --git a/util/metrics/registryCommon.go b/util/metrics/registryCommon.go
index 2eb8d6c53..8a0f53464 100644
--- a/util/metrics/registryCommon.go
+++ b/util/metrics/registryCommon.go
@@ -25,7 +25,9 @@ import (
// Metric represent any collectable metric
type Metric interface {
+ // WriteMetric adds metrics in Prometheus exposition format to buf, including parentLabels tags if provided.
WriteMetric(buf *strings.Builder, parentLabels string)
+ // AddMetric adds metrics to a map, used for reporting in telemetry heartbeat messages.
AddMetric(values map[string]float64)
}
@@ -42,3 +44,9 @@ var sanitizeTelemetryCharactersRegexp = regexp.MustCompile("(^[^a-zA-Z_]|[^a-zA-
func sanitizeTelemetryName(name string) string {
return sanitizeTelemetryCharactersRegexp.ReplaceAllString(name, "_")
}
+
+// sanitizePrometheusName ensures a metric name reported to telemetry doesn't contain any
+// non-alphanumeric characters (apart from _) and doesn't start with a number.
+func sanitizePrometheusName(name string) string {
+ return strings.ReplaceAll(sanitizeTelemetryName(name), "-", "_")
+}
diff --git a/util/metrics/registry_test.go b/util/metrics/registry_test.go
index aa4851630..2256993f2 100644
--- a/util/metrics/registry_test.go
+++ b/util/metrics/registry_test.go
@@ -37,7 +37,7 @@ func TestWriteAdd(t *testing.T) {
results := make(map[string]float64)
DefaultRegistry().AddMetrics(results)
- require.Equal(t, 2, len(results))
+ require.Equal(t, 2, len(results), "results", results)
require.Contains(t, results, "gauge-name")
require.InDelta(t, 12.34, results["gauge-name"], 0.01)
require.Contains(t, results, "label-counter_label__a_label_value_")
diff --git a/util/metrics/runtime.go b/util/metrics/runtime.go
new file mode 100644
index 000000000..3f89ea761
--- /dev/null
+++ b/util/metrics/runtime.go
@@ -0,0 +1,137 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package metrics
+
+import (
+ "runtime/metrics"
+ "strconv"
+ "strings"
+
+ "github.com/algorand/go-deadlock"
+)
+
+// defaultRuntimeMetrics contains all the Go runtime metrics, except histograms.
+var defaultRuntimeMetrics = []string{
+ "/gc/cycles/automatic:gc-cycles",
+ "/gc/cycles/forced:gc-cycles",
+ "/gc/cycles/total:gc-cycles",
+ "/gc/heap/allocs:bytes",
+ "/gc/heap/allocs:objects",
+ "/gc/heap/frees:bytes",
+ "/gc/heap/frees:objects",
+ "/gc/heap/goal:bytes",
+ "/gc/heap/objects:objects",
+ "/gc/heap/tiny/allocs:objects",
+ "/memory/classes/heap/free:bytes",
+ "/memory/classes/heap/objects:bytes",
+ "/memory/classes/heap/released:bytes",
+ "/memory/classes/heap/stacks:bytes",
+ "/memory/classes/heap/unused:bytes",
+ "/memory/classes/metadata/mcache/free:bytes",
+ "/memory/classes/metadata/mcache/inuse:bytes",
+ "/memory/classes/metadata/mspan/free:bytes",
+ "/memory/classes/metadata/mspan/inuse:bytes",
+ "/memory/classes/metadata/other:bytes",
+ "/memory/classes/os-stacks:bytes",
+ "/memory/classes/other:bytes",
+ "/memory/classes/profiling/buckets:bytes",
+ "/memory/classes/total:bytes",
+ "/sched/goroutines:goroutines",
+}
+
+// RuntimeMetrics gathers selected metrics from Go's builtin runtime.metrics package
+// and makes them available as Prometheus metrics.
+type RuntimeMetrics struct {
+ descriptions []metrics.Description
+ samples []metrics.Sample
+ deadlock.Mutex
+}
+
+// NewRuntimeMetrics creates a RuntimeMetrics object, provided a list of metric names matching
+// names in Go's metrics.All(). Otherwise, a default list of runtime metrics will be used.
+func NewRuntimeMetrics(enabledMetrics ...string) *RuntimeMetrics {
+ enabled := make(map[string]bool)
+ if len(enabledMetrics) == 0 {
+ enabledMetrics = defaultRuntimeMetrics
+ }
+ for _, name := range enabledMetrics {
+ enabled[name] = true
+ }
+
+ // create []metrics.Sample and get metric descriptions
+ rm := &RuntimeMetrics{}
+ descs := metrics.All()
+ for _, desc := range descs {
+ if enabled[desc.Name] {
+ rm.descriptions = append(rm.descriptions, desc)
+ rm.samples = append(rm.samples, metrics.Sample{Name: desc.Name})
+ }
+ }
+
+ return rm
+}
+
+// WriteMetric writes runtime metrics to the output stream in prometheus exposition format.
+func (rm *RuntimeMetrics) WriteMetric(buf *strings.Builder, parentLabels string) {
+ rm.Lock()
+ defer rm.Unlock()
+
+ metrics.Read(rm.samples)
+ for i, s := range rm.samples {
+ name := "algod_go" + sanitizePrometheusName(s.Name)
+ desc := rm.descriptions[i]
+
+ buf.WriteString("# HELP " + name + " " + desc.Description + "\n")
+ if desc.Cumulative {
+ buf.WriteString("# TYPE " + name + " counter\n")
+ } else {
+ buf.WriteString("# TYPE " + name + " gauge\n")
+ }
+ buf.WriteString(name)
+ if len(parentLabels) > 0 {
+ buf.WriteString("{" + parentLabels + "}")
+ }
+ buf.WriteRune(' ')
+ switch s.Value.Kind() {
+ case metrics.KindUint64:
+ buf.WriteString(strconv.FormatUint(s.Value.Uint64(), 10))
+ case metrics.KindFloat64:
+ buf.WriteString(strconv.FormatFloat(s.Value.Float64(), 'f', -1, 64))
+ default:
+ }
+ buf.WriteRune('\n')
+ }
+}
+
+// AddMetric adds runtime metrics to the map used for heartbeat metrics.
+func (rm *RuntimeMetrics) AddMetric(m map[string]float64) {
+ rm.Lock()
+ defer rm.Unlock()
+
+ metrics.Read(rm.samples)
+ for _, s := range rm.samples {
+ name := "go" + sanitizeTelemetryName(s.Name)
+
+ switch s.Value.Kind() {
+ case metrics.KindUint64:
+ m[name] = float64(s.Value.Uint64())
+ case metrics.KindFloat64:
+ m[name] = s.Value.Float64()
+ default:
+ }
+ }
+}
diff --git a/util/metrics/runtime_test.go b/util/metrics/runtime_test.go
new file mode 100644
index 000000000..103248446
--- /dev/null
+++ b/util/metrics/runtime_test.go
@@ -0,0 +1,59 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package metrics
+
+import (
+ "bufio"
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRuntimeMetrics(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rm := NewRuntimeMetrics()
+ var sb strings.Builder
+ rm.WriteMetric(&sb, `host="x"`)
+ scanner := bufio.NewScanner(strings.NewReader(sb.String()))
+
+ // assert default metrics correctly created
+ cur := 0
+ for scanner.Scan() {
+ curName := "algod_go" + defaultRuntimeMetrics[cur]
+ curName = strings.ReplaceAll(curName, ":", "_")
+ curName = strings.ReplaceAll(curName, "-", "_")
+ curName = strings.ReplaceAll(curName, "/", "_")
+ require.Regexp(t, `^# HELP `+curName, scanner.Text())
+ require.True(t, scanner.Scan())
+ require.Regexp(t, `^# TYPE `+curName, scanner.Text())
+ require.True(t, scanner.Scan())
+ require.Regexp(t, `^`+curName+`{host="x"}`, scanner.Text())
+ cur++
+ }
+ require.NoError(t, scanner.Err())
+ require.Len(t, defaultRuntimeMetrics, cur)
+
+ m := make(map[string]float64)
+ rm.AddMetric(m)
+ for _, name := range defaultRuntimeMetrics {
+ tname := strings.ReplaceAll(strings.ReplaceAll("go"+name, ":", "_"), "/", "_")
+ require.Contains(t, m, tname)
+ }
+}
diff --git a/util/metrics/segment.go b/util/metrics/segment.go
deleted file mode 100644
index 21db004f3..000000000
--- a/util/metrics/segment.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package metrics
-
-import (
- "sync/atomic"
- "time"
-)
-
-// Segment represent a single segment variable.
-type Segment struct {
- duration *Gauge
- totalDuration *Counter
- counter *Counter
- concurrentInstances *Gauge
- concurrentCounter uint32
-}
-
-// SegmentInstance is generated once a segments starts.
-type SegmentInstance struct {
- segment *Segment
- start time.Time
- labels map[string]string
-}
-
-// MakeSegment create a new segment with the provided name and description.
-func MakeSegment(metric *MetricName) *Segment {
- c := &Segment{
- duration: MakeGauge(MetricName{Name: metric.Name + "_sec", Description: metric.Description + "(duration)"}),
- totalDuration: MakeCounter(MetricName{Name: metric.Name + "_sec_total", Description: metric.Description + "(total duration)"}),
- counter: MakeCounter(MetricName{Name: metric.Name + "_total", Description: metric.Description + "(total count)"}),
- concurrentInstances: MakeGauge(MetricName{Name: metric.Name + "_concurrent", Description: metric.Description + "(concurrent instances)"}),
- }
- return c
-}
-
-// EnterSegment is called when a segment is entered.
-func (segment *Segment) EnterSegment(labels map[string]string) (*SegmentInstance, error) {
- segment.counter.Inc(labels)
- concurrentCounter := atomic.AddUint32(&segment.concurrentCounter, uint32(1))
- segment.concurrentInstances.Set(float64(concurrentCounter), labels)
- return &SegmentInstance{
- segment: segment,
- start: time.Now(),
- labels: labels,
- }, nil
-}
-
-// Register registers the counter with the default/specific registry
-func (segment *Segment) Register(reg *Registry) {
- segment.duration.Register(reg)
- segment.totalDuration.Register(reg)
- segment.counter.Register(reg)
- segment.concurrentInstances.Register(reg)
-}
-
-// Deregister deregisters the counter with the default/specific registry
-func (segment *Segment) Deregister(reg *Registry) {
- segment.duration.Deregister(reg)
- segment.totalDuration.Deregister(reg)
- segment.counter.Deregister(reg)
- segment.concurrentInstances.Deregister(reg)
-}
-
-// LeaveSegment is expected to be called via a "defer" statement.
-func (segInstance *SegmentInstance) LeaveSegment() error {
- if segInstance == nil {
- return nil
- }
- concurrentCounter := atomic.AddUint32(&segInstance.segment.concurrentCounter, ^uint32(0))
- seconds := time.Since(segInstance.start).Seconds()
- segInstance.segment.duration.Set(seconds, segInstance.labels)
- segInstance.segment.totalDuration.Add(seconds, segInstance.labels)
- segInstance.segment.concurrentInstances.Set(float64(concurrentCounter), segInstance.labels)
- return nil
-}
diff --git a/util/metrics/segment_test.go b/util/metrics/segment_test.go
deleted file mode 100644
index c5cae321d..000000000
--- a/util/metrics/segment_test.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package metrics
-
-import (
- "context"
- "fmt"
- "strconv"
- "strings"
- "testing"
- "time"
-
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
-)
-
-type SegmentTest struct {
- MetricTest
-}
-
-func TestMetricSegment(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- const initialSleepDuration = 10 * time.Millisecond
- const maxSleepDuration = 4 * time.Second
- done := false
- for sleepDuration := initialSleepDuration; sleepDuration <= maxSleepDuration; sleepDuration *= 2 {
- done = testMetricSegmentHelper(t, sleepDuration)
- if done {
- break
- }
- }
- if !done {
- require.Fail(t, "test failed")
- }
-}
-
-func testMetricSegmentHelper(t *testing.T, functionTime time.Duration) bool {
-
- test := &SegmentTest{
- MetricTest: NewMetricTest(),
- }
- // create a http listener.
- port := test.createListener(":0")
-
- metricService := MakeMetricService(&ServiceConfig{
- NodeExporterListenAddress: fmt.Sprintf("localhost:%d", port),
- Labels: map[string]string{
- "host_name": "host_one",
- "session_id": "AFX-229"},
- })
- metricService.Start(context.Background())
-
- acceptedFunctionThreshold := 1.1 // 10 percent.
- segment := MakeSegment(&MetricName{Name: "test_segment_name1", Description: "this is the metric test for segment object"})
- segmentTest := func() {
- inst, _ := segment.EnterSegment(map[string]string{"pid": "123"})
- defer inst.LeaveSegment()
- time.Sleep(functionTime)
- }
- segmentTest()
- segmentTest()
- // wait two reporting cycles to ensure we received all the messages.
- time.Sleep(test.sampleRate * 2)
-
- metricService.Shutdown()
-
- segment.Deregister(nil)
-
- test.Lock()
- defer test.Unlock()
-
- // test the metrics values. see if we received all the 4 metrics back correctly.
- // we expect the get 4 metrics : test_segment_name1_sec, test_segment_name1_sec_total, test_segment_name1_total and test_segment_name1_concurrent
- // ( we don't know in which order they would appear, but the total count should be 4 )
- require.Equal(t, 4, len(test.metrics), "Missing metric counts were reported.")
-
- for k, v := range test.metrics {
- if strings.Contains(k, "test_segment_name1_sec{") {
- // should be around 400 milliseconds.
- if elapsedTime, err := strconv.ParseFloat(v, 64); err != nil {
- t.Fatalf("The metric '%s' has unexpected value of '%s'", k, v)
- } else {
- if elapsedTime < functionTime.Seconds() || elapsedTime > functionTime.Seconds()*acceptedFunctionThreshold {
- return false
- }
- }
- }
- if strings.Contains(k, "test_segment_name1_sec_total{") {
- // should be around 800 milliseconds.
- if elapsedTime, err := strconv.ParseFloat(v, 64); err != nil {
- t.Fatalf("The metric '%s' has unexpected value of '%s'", k, v)
- } else {
- if elapsedTime < 2*functionTime.Seconds() || elapsedTime > 2*functionTime.Seconds()*acceptedFunctionThreshold {
- return false
- }
- }
- }
- if strings.Contains(k, "test_segment_name1_total{") {
- // should be 2, since we had 2 calls.
- require.Equal(t, "2", v, "The metric '%s' has unexpected value of '%s'", k, v)
- }
- }
- return true
-}
diff --git a/util/metrics/tagcounter.go b/util/metrics/tagcounter.go
index 53cce7ba6..d110b8d4f 100644
--- a/util/metrics/tagcounter.go
+++ b/util/metrics/tagcounter.go
@@ -26,8 +26,12 @@ import (
// NewTagCounter makes a set of metrics under rootName for tagged counting.
// "{TAG}" in rootName is replaced by the tag, otherwise "_{TAG}" is appended.
-func NewTagCounter(rootName, desc string) *TagCounter {
+// Optionally provided declaredTags counters for these names up front (making them easier to discover).
+func NewTagCounter(rootName, desc string, declaredTags ...string) *TagCounter {
tc := &TagCounter{Name: rootName, Description: desc}
+ for _, tag := range declaredTags {
+ tc.Add(tag, 0)
+ }
DefaultRegistry().Register(tc)
return tc
}
@@ -98,32 +102,34 @@ func (tc *TagCounter) WriteMetric(buf *strings.Builder, parentLabels string) {
// no values, nothing to say.
return
}
- // TODO: what to do with "parentLabels"? obsolete part of interface?
- buf.WriteString("# ")
- buf.WriteString(tc.Name)
- buf.WriteString(" ")
- buf.WriteString(tc.Description)
- buf.WriteString("\n")
isTemplate := strings.Contains(tc.Name, "{TAG}")
tags := tagptr.(map[string]*uint64)
for tag, tagcount := range tags {
if tagcount == nil {
continue
}
+ var name string
if isTemplate {
- name := strings.ReplaceAll(tc.Name, "{TAG}", tag)
- buf.WriteString(name)
- buf.WriteRune(' ')
- buf.WriteString(strconv.FormatUint(*tagcount, 10))
- buf.WriteRune('\n')
+ name = strings.ReplaceAll(tc.Name, "{TAG}", tag)
} else {
- buf.WriteString(tc.Name)
- buf.WriteRune('_')
- buf.WriteString(tag)
- buf.WriteRune(' ')
- buf.WriteString(strconv.FormatUint(*tagcount, 10))
- buf.WriteRune('\n')
+ name = tc.Name + "_" + tag
+ }
+ buf.WriteString("# HELP ")
+ buf.WriteString(name)
+ buf.WriteRune(' ')
+ buf.WriteString(strings.ReplaceAll(tc.Description, "{TAG}", tag))
+ buf.WriteString("\n# TYPE ")
+ buf.WriteString(name)
+ buf.WriteString(" counter\n")
+ buf.WriteString(name)
+ if len(parentLabels) > 0 {
+ buf.WriteRune('{')
+ buf.WriteString(parentLabels)
+ buf.WriteRune('}')
}
+ buf.WriteRune(' ')
+ buf.WriteString(strconv.FormatUint(*tagcount, 10))
+ buf.WriteRune('\n')
}
}
diff --git a/util/metrics/tagcounter_test.go b/util/metrics/tagcounter_test.go
index b76202c53..feb464a35 100644
--- a/util/metrics/tagcounter_test.go
+++ b/util/metrics/tagcounter_test.go
@@ -40,6 +40,7 @@ func TestTagCounter(t *testing.T) {
}
tc := NewTagCounter("tc", "wat")
+ DefaultRegistry().Deregister(tc)
// check that empty TagCounter cleanly returns no results
var sb strings.Builder
@@ -80,6 +81,39 @@ func TestTagCounter(t *testing.T) {
}
}
+func TestTagCounterWriteMetric(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ tc := NewTagCounter("count_msgs_{TAG}", "number of {TAG} messages")
+ DefaultRegistry().Deregister(tc)
+
+ tc.Add("TX", 100)
+ tc.Add("TX", 1)
+ tc.Add("RX", 0)
+
+ var sbOut strings.Builder
+ tc.WriteMetric(&sbOut, `host="myhost"`)
+ txExpected := `# HELP count_msgs_TX number of TX messages
+# TYPE count_msgs_TX counter
+count_msgs_TX{host="myhost"} 101
+`
+ rxExpected := `# HELP count_msgs_RX number of RX messages
+# TYPE count_msgs_RX counter
+count_msgs_RX{host="myhost"} 0
+`
+ expfmt := sbOut.String()
+ require.True(t, expfmt == txExpected+rxExpected || expfmt == rxExpected+txExpected, "bad fmt: %s", expfmt)
+
+ tc2 := NewTagCounter("declared", "number of {TAG}s", "A", "B")
+ DefaultRegistry().Deregister(tc2)
+ aExpected := "# HELP declared_A number of As\n# TYPE declared_A counter\ndeclared_A{host=\"h\"} 0\n"
+ bExpected := "# HELP declared_B number of Bs\n# TYPE declared_B counter\ndeclared_B{host=\"h\"} 0\n"
+ sbOut = strings.Builder{}
+ tc2.WriteMetric(&sbOut, `host="h"`)
+ expfmt = sbOut.String()
+ require.True(t, expfmt == aExpected+bExpected || expfmt == bExpected+aExpected, "bad fmt: %s", expfmt)
+}
+
func BenchmarkTagCounter(b *testing.B) {
b.Logf("b.N = %d", b.N)
t := b