summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2021-03-09 20:34:24 -0500
committerGitHub <noreply@github.com>2021-03-09 20:34:24 -0500
commitaa02b688208e0796e2247f604e3eaca277e3c6e8 (patch)
tree5aa349df60d17279a161c6486c5605a12c05b7b1
parent71b58951893a322c2aae02f8809dee8f24a01eec (diff)
parent35691b6a0a4c43726367b48c082aa6edda8f9a2c (diff)
Merge pull request #1954 from onetechnical/onetechnical/relbeta2.5.1v2.5.1-beta
go-algorand 2.5.1-beta
-rw-r--r--.github/ISSUE_TEMPLATE/algorand-engineering-team-issue-template.md6
-rw-r--r--.github/ISSUE_TEMPLATE/bug_report.md19
-rw-r--r--.github/ISSUE_TEMPLATE/config.yml4
-rw-r--r--.github/ISSUE_TEMPLATE/feature_request.md25
-rw-r--r--.github/ISSUE_TEMPLATE/question.md19
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md4
-rw-r--r--SECURITY.md7
-rw-r--r--THANKS.md1
-rw-r--r--agreement/selector.go2
-rw-r--r--buildnumber.dat2
-rw-r--r--catchup/catchpointService.go2
-rw-r--r--catchup/fetcher.go15
-rw-r--r--catchup/fetcher_test.go22
-rw-r--r--catchup/pref_test.go2
-rw-r--r--catchup/service.go14
-rw-r--r--catchup/service_test.go18
-rw-r--r--catchup/wsFetcher.go61
-rw-r--r--cmd/catchupsrv/main.go2
-rw-r--r--cmd/dispenser/server.go7
-rw-r--r--cmd/opdoc/opdoc.go22
-rw-r--r--cmd/tealdbg/local.go7
-rw-r--r--cmd/tealdbg/local_test.go2
-rw-r--r--config/consensus.go33
-rw-r--r--crypto/compactcert/verifier.go8
-rw-r--r--crypto/merklearray/merkle.go10
-rw-r--r--crypto/merklearray/merkle_test.go20
-rw-r--r--daemon/algod/api/Makefile11
-rw-r--r--daemon/algod/api/algod.oas2.json105
-rw-r--r--daemon/algod/api/algod.oas3.yml151
-rw-r--r--daemon/algod/api/client/restClient.go7
-rw-r--r--daemon/algod/api/server/v2/generated/private/routes.go252
-rw-r--r--daemon/algod/api/server/v2/generated/private/types.go13
-rw-r--r--daemon/algod/api/server/v2/generated/routes.go383
-rw-r--r--daemon/algod/api/server/v2/generated/types.go20
-rw-r--r--daemon/algod/api/server/v2/handlers.go58
-rw-r--r--daemon/kmd/lib/kmdapi/requests.go5
-rw-r--r--data/basics/teal.go2
-rw-r--r--data/basics/units.go2
-rw-r--r--data/basics/userBalance.go9
-rw-r--r--data/bookkeeping/block.go33
-rw-r--r--data/bookkeeping/block_test.go54
-rw-r--r--data/bookkeeping/txn_merkle.go98
-rw-r--r--data/bookkeeping/txn_merkle_test.go140
-rw-r--r--data/ledger.go53
-rw-r--r--data/ledger_test.go104
-rw-r--r--data/transactions/logic/Makefile27
-rw-r--r--data/transactions/logic/README.md78
-rw-r--r--data/transactions/logic/README_in.md2
-rw-r--r--data/transactions/logic/TEAL_opcodes.md223
-rw-r--r--data/transactions/logic/assembler.go589
-rw-r--r--data/transactions/logic/assembler_test.go368
-rw-r--r--data/transactions/logic/backwardCompat_test.go33
-rw-r--r--data/transactions/logic/doc.go114
-rw-r--r--data/transactions/logic/doc_test.go14
-rw-r--r--data/transactions/logic/eval.go372
-rw-r--r--data/transactions/logic/evalStateful_test.go804
-rw-r--r--data/transactions/logic/eval_test.go885
-rw-r--r--data/transactions/logic/fields.go46
-rw-r--r--data/transactions/logic/fields_string.go23
-rw-r--r--data/transactions/logic/opcodes.go271
-rw-r--r--data/transactions/logic/opcodes_test.go56
-rw-r--r--data/transactions/signedtxn.go12
-rw-r--r--data/transactions/signedtxn_test.go6
-rw-r--r--ledger/acctupdates.go6
-rw-r--r--ledger/applications.go15
-rw-r--r--ledger/ledger_test.go9
-rw-r--r--libgoal/libgoal.go9
-rw-r--r--network/wsNetwork.go153
-rw-r--r--network/wsNetwork_test.go7
-rw-r--r--network/wsPeer.go8
-rw-r--r--node/node.go10
-rw-r--r--protocol/consensus.go7
-rw-r--r--protocol/hash.go2
-rw-r--r--protocol/tags.go24
-rw-r--r--rpcs/blockService.go69
-rw-r--r--rpcs/blockService_test.go6
-rw-r--r--rpcs/wsFetcherService.go202
-rwxr-xr-xscripts/buildhost/start_ec2_instance.sh2
-rw-r--r--scripts/release/test/deb/testDebian.exp4
-rw-r--r--test/README.md9
-rw-r--r--test/e2e-go/cli/goal/expect/basicGoalTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/corsTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/createWalletTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/doubleSpendingTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/goalAccountInfoTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/goalAccountTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/goalAppAccountAddressTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/goalAssetTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/goalClerkGroupTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/goalExpectCommon.exp10
-rw-r--r--test/e2e-go/cli/goal/expect/goalNodeStatusTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/goalNodeTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/goalTxValidityTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/ledgerTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/limitOrderTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/listExpiredParticipationKeyTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/multisigCreationDeletionTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/pingpongTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/reportTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/statefulTealAppInfoTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/statefulTealAppReadTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/statefulTealCreateAppTest.exp4
-rw-r--r--test/e2e-go/cli/goal/expect/tealAndStatefulTealTest.exp4
-rw-r--r--test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp69
-rw-r--r--test/e2e-go/features/catchup/basicCatchup_test.go34
-rw-r--r--test/e2e-go/features/transactions/proof_test.go105
-rw-r--r--test/e2e-go/upgrades/application_support_test.go20
-rw-r--r--test/framework/fixtures/libgoalFixture.go2
-rw-r--r--test/muleCI/mule.yaml44
-rwxr-xr-xtest/scripts/e2e.sh18
-rwxr-xr-xtest/scripts/e2e_client_runner.py23
-rwxr-xr-xtest/scripts/e2e_subs/asset-misc.sh6
-rwxr-xr-xtest/scripts/e2e_subs/dynamic-fee-teal-test.sh2
-rwxr-xr-xtest/scripts/e2e_subs/e2e-teal.sh43
-rwxr-xr-xtest/scripts/e2e_subs/htlc-teal-test.sh2
-rwxr-xr-xtest/scripts/e2e_subs/keyreg-teal-test.sh4
-rwxr-xr-xtest/scripts/e2e_subs/limit-swap-test.sh8
-rwxr-xr-xtest/scripts/e2e_subs/periodic-teal-test.sh2
-rwxr-xr-xtest/scripts/e2e_subs/teal-split-test.sh2
-rwxr-xr-xtest/scripts/e2e_subs/v24/teal-v2-only.sh97
-rw-r--r--test/testdata/nettemplates/TwoNodes50EachV24.json29
122 files changed, 4356 insertions, 2555 deletions
diff --git a/.github/ISSUE_TEMPLATE/algorand-engineering-team-issue-template.md b/.github/ISSUE_TEMPLATE/algorand-engineering-team-issue-template.md
index 18bf27ad0..254724fe9 100644
--- a/.github/ISSUE_TEMPLATE/algorand-engineering-team-issue-template.md
+++ b/.github/ISSUE_TEMPLATE/algorand-engineering-team-issue-template.md
@@ -9,10 +9,10 @@ assignees: ''
---
## Summary
-*Describe the problem identified or the general goal of this issue*
+<!-- *Describe the problem identified or the general goal of this issue* -->
## Scope/Requirements
-*What's involved in this issue? What's required to achieve the goal?*
+<!-- *What's involved in this issue? What's required to achieve the goal?* -->
## Urgency/Relative Priority
-*How urgent is this issue? What are the timing considerations to take into account?*
+<!-- *How urgent is this issue? What are the timing considerations to take into account?* -->
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 6ed88b6ac..880031609 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -7,27 +7,24 @@ assignees: ''
---
-<!--
-NOTE: If this issue relates to security, please use the vulnerability disclosure form here:
-https://www.algorand.com/resources/blog/security
-
-General, developer or support questions concerning Algorand should be directed to the Algorand Forums https://forum.algorand.org/.
--->
-
### Subject of the issue
-Describe your issue here.
+
+<!-- Describe your issue here. -->
### Your environment
+
+<!--
* Software version: `algod -v`
* Node status if applicable: `goal node status`
* Operating System details.
* In many cases log files and cadaver files are also useful to include. Since these files may be large, an Algorand developer may request them later. These files may include public addresses that you're participating with. If that is a concern please be sure to scrub that data.
+-->
### Steps to reproduce
-Tell us how to reproduce this issue.
+
+1.
+2.
### Expected behaviour
-Tell us what should happen
### Actual behaviour
-Tell us what happens instead
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 000000000..fc246db67
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,4 @@
+contact_links:
+ - name: ❓ Question and Help
+ url: https://forum.algorand.org/
+ about: The issue tracker is not for support questions. Please join our community for help and discussion.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
index 2565b902f..b3df74a87 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -7,27 +7,14 @@ assignees: ''
---
-<!--
-NOTE: If this issue relates to security, please use the vulnerability disclosure form here:
-https://www.algorand.com/resources/blog/security
+## Is your feature request related to a problem? Please describe.
-General, developer or support questions concerning Algorand should be directed to the Algorand Forums https://forum.algorand.org/.
--->
+<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
-<!--
-This project is focused on low level platform concerns. A good feature request would be related
-to protocol and network usage, debugging and monitoring tools, and goal commands.
+## Describe the solution you'd like.
-Higher level suggestions relating to layer 2 applications (like wallet apps) are not appropriate here.
--->
-**Is your feature request related to a problem? Please describe.**
+<!-- A clear and concise description of what you want to happen. -->
-A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+## Additional context.
-**Describe the solution you'd like**
-
-A clear and concise description of what you want to happen.
-
-**Additional context**
-
-Add any other context or screenshots about the feature request here.
+<!-- Add any other context or screenshots about the feature request here. -->
diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md
deleted file mode 100644
index e400247ec..000000000
--- a/.github/ISSUE_TEMPLATE/question.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-name: "❓ Question"
-about: General questions related to the algorand platform.
-title: ''
-labels: question
-assignees: ''
-
----
-
-🚨 The issue tracker is not for questions. 🚨
-
-As questions here are likely to be closed, listed below are some resources that may be helpful in getting your question answered.
-
-If you have general, developer or support questions concerning Algorand please see the Algorand Forums https://forum.algorand.org/.
-
-Additional Developer information is available here: https://developer.algorand.org/
-
-NOTE: If this issue relates to security, please use the vulnerability disclosure form here:
-https://www.algorand.com/resources/blog/security
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 88382d7b3..7c6fe044a 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -15,8 +15,8 @@ It is also a good idea to run tests:
## Summary
-Explain the goal of this change and what problem it is solving. Format this cleanly so that it may be used for a commit message, as your changes will be squash-merged.
+<!-- Explain the goal of this change and what problem it is solving. Format this cleanly so that it may be used for a commit message, as your changes will be squash-merged. -->
## Test Plan
-How did you test these changes? Please provide the exact scenarios you tested in as much detail as possible including commands, output and rationale.
+<!-- How did you test these changes? Please provide the exact scenarios you tested in as much detail as possible including commands, output and rationale. -->
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 000000000..52b3bde6c
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,7 @@
+# Vulnerability Disclosures
+
+Algorand takes the security of the platform and of its users very seriously. We recognize the important role of external security researchers and developers in helping keep our community safe. As with most security reward programs, we ask that you use common sense when looking for security bugs. Vulnerabilities must be disclosed to us privately with reasonable time to respond, and avoid compromise of other users and accounts, or loss of funds that are not your own. We do not reward denial of service, spam, or social engineering vulnerabilities.
+
+If you believe that you have found a security vulnerability you may disclose it here:
+
+https://www.algorand.com/resources/blog/security
diff --git a/THANKS.md b/THANKS.md
index 74e157198..72190d586 100644
--- a/THANKS.md
+++ b/THANKS.md
@@ -12,6 +12,7 @@ In no particular order:
- jsign
- RomitKumar
- jeapostrophe
+- aybehrouz
### Bug Reports
- Nanyan
diff --git a/agreement/selector.go b/agreement/selector.go
index f669865c8..623c4d23e 100644
--- a/agreement/selector.go
+++ b/agreement/selector.go
@@ -55,7 +55,7 @@ func seedRound(r basics.Round, cparams config.ConsensusParams) basics.Round {
return r.SubSaturate(basics.Round(cparams.SeedLookback))
}
-// a helper function for obtaining memberhship verification parameters.
+// a helper function for obtaining membership verification parameters.
func membership(l LedgerReader, addr basics.Address, r basics.Round, p period, s step) (m committee.Membership, err error) {
cparams, err := l.ConsensusParams(ParamsRound(r))
if err != nil {
diff --git a/buildnumber.dat b/buildnumber.dat
index 573541ac9..d00491fd7 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-0
+1
diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go
index 654a95a7d..efe577768 100644
--- a/catchup/catchpointService.go
+++ b/catchup/catchpointService.go
@@ -407,7 +407,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed when calling VerifyCatchpoint : %v", err))
}
- // give a rank to the download, as the download was successfull.
+ // give a rank to the download, as the download was successful.
peerRank := cs.blocksDownloadPeerSelector.PeerDownloadDurationToRank(peer, blockDownloadDuration)
cs.blocksDownloadPeerSelector.RankPeer(peer, peerRank)
diff --git a/catchup/fetcher.go b/catchup/fetcher.go
index cba9e8a8d..c1f2be604 100644
--- a/catchup/fetcher.go
+++ b/catchup/fetcher.go
@@ -54,14 +54,13 @@ type FetcherFactory interface {
// Create a new fetcher
New() Fetcher
// Create a new fetcher that also fetches from backup peers over gossip network utilising given message tag
- NewOverGossip(requestTag protocol.Tag) Fetcher
+ NewOverGossip() Fetcher
}
// NetworkFetcherFactory creates network fetchers
type NetworkFetcherFactory struct {
net network.GossipNode
peerLimit int
- fs *rpcs.WsFetcherService
cfg *config.Local
log logging.Logger
@@ -78,12 +77,11 @@ func (factory NetworkFetcherFactory) makeHTTPFetcherFromPeer(log logging.Logger,
// MakeNetworkFetcherFactory returns a network fetcher factory, that associates fetchers with no more than peerLimit peers from the aggregator.
// WSClientSource can be nil, if no network exists to create clients from (defaults to http clients)
-func MakeNetworkFetcherFactory(net network.GossipNode, peerLimit int, fs *rpcs.WsFetcherService, cfg *config.Local) NetworkFetcherFactory {
+func MakeNetworkFetcherFactory(net network.GossipNode, peerLimit int, cfg *config.Local) NetworkFetcherFactory {
var factory NetworkFetcherFactory
factory.net = net
factory.peerLimit = peerLimit
factory.log = logging.Base()
- factory.fs = fs
factory.cfg = cfg
return factory
}
@@ -119,19 +117,14 @@ func (factory NetworkFetcherFactory) New() Fetcher {
// NewOverGossip returns a fetcher using the given message tag.
// If there are gossip peers, then it returns a fetcher over gossip
// Otherwise, it returns an HTTP fetcher
-// We should never build two fetchers utilising the same tag. Why?
-func (factory NetworkFetcherFactory) NewOverGossip(tag protocol.Tag) Fetcher {
+func (factory NetworkFetcherFactory) NewOverGossip() Fetcher {
gossipPeers := factory.net.GetPeers(network.PeersConnectedIn)
factory.log.Debugf("%d gossip peers", len(gossipPeers))
if len(gossipPeers) == 0 {
factory.log.Info("no gossip peers for NewOverGossip")
return factory.New()
}
- if factory.fs == nil {
- factory.log.Info("WsFetcherService not available; fetch over gossip disabled")
- return factory.New()
- }
- f := MakeWsFetcher(factory.log, tag, gossipPeers, factory.fs, factory.cfg)
+ f := MakeWsFetcher(factory.log, gossipPeers, factory.cfg)
return &ComposedFetcher{fetchers: []Fetcher{factory.New(), f}}
}
diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go
index 05832e2fb..fc56eab52 100644
--- a/catchup/fetcher_test.go
+++ b/catchup/fetcher_test.go
@@ -131,7 +131,7 @@ func getAllClientsSelectedForRound(t *testing.T, fetcher *NetworkFetcher, round
func TestSelectValidRemote(t *testing.T) {
network := makeMockClientAggregator(t, false, false)
cfg := config.GetDefaultLocal()
- factory := MakeNetworkFetcherFactory(network, numberOfPeers, nil, &cfg)
+ factory := MakeNetworkFetcherFactory(network, numberOfPeers, &cfg)
factory.log = logging.TestingLog(t)
fetcher := factory.New()
require.Equal(t, numberOfPeers, len(fetcher.(*NetworkFetcher).peers))
@@ -596,7 +596,7 @@ func TestGetBlockHTTP(t *testing.T) {
_, ok := net.GetPeers(network.PeersConnectedOut)[0].(network.HTTPPeer)
require.True(t, ok)
cfg := config.GetDefaultLocal()
- factory := MakeNetworkFetcherFactory(net, numberOfPeers, nil, &cfg)
+ factory := MakeNetworkFetcherFactory(net, numberOfPeers, &cfg)
factory.log = logging.TestingLog(t)
fetcher := factory.New()
// we have one peer, the HTTP block server
@@ -709,7 +709,7 @@ func TestGetBlockMocked(t *testing.T) {
require.NoError(t, ledgerA.AddBlock(b, agreement.Certificate{Round: next}))
// B tries to fetch block
- factory := MakeNetworkFetcherFactory(nodeB, 10, nil, &cfg)
+ factory := MakeNetworkFetcherFactory(nodeB, 10, &cfg)
factory.log = logging.TestingLog(t)
nodeBRPC := factory.New()
ctx, cf := context.WithTimeout(context.Background(), time.Second)
@@ -759,7 +759,7 @@ func TestGetFutureBlock(t *testing.T) {
rpcs.MakeBlockService(config.GetDefaultLocal(), ledgerA, nodeA, "test genesisID")
// B tries to fetch block 4
- factory := MakeNetworkFetcherFactory(nodeB, 10, nil, &cfg)
+ factory := MakeNetworkFetcherFactory(nodeB, 10, &cfg)
factory.log = logging.TestingLog(t)
nodeBRPC := factory.New()
ctx, cf := context.WithTimeout(context.Background(), time.Second)
@@ -851,7 +851,7 @@ func makeTestUnicastPeer(gn network.GossipNode, version string, t *testing.T) ne
// A quick GetBlock over websockets test hitting a mocked websocket server (no actual connection)
func TestGetBlockWS(t *testing.T) {
// test the WS fetcher:
- // 1. fetcher sends UniCatchupReqTag to http peer
+ // 1. fetcher sends UniEnsBlockReqTag to http peer
// 2. peer send message to gossip node
// 3. gossip node send message to ledger service
// 4. ledger service responds with UniCatchupResTag sending it back to the http peer
@@ -867,7 +867,7 @@ func TestGetBlockWS(t *testing.T) {
cfg := config.GetDefaultLocal()
- versions := []string{"1", "2.1"}
+ versions := []string{"2.1"}
for _, version := range versions { // range network.SupportedProtocolVersions {
net := &httpTestPeerSource{}
@@ -881,14 +881,11 @@ func TestGetBlockWS(t *testing.T) {
up := makeTestUnicastPeer(net, version, t)
net.peers = append(net.peers, up)
- fs := rpcs.MakeWsFetcherService(logging.TestingLog(t), net)
- fs.Start()
-
_, ok := net.GetPeers(network.PeersConnectedIn)[0].(network.UnicastPeer)
require.True(t, ok)
- factory := MakeNetworkFetcherFactory(net, numberOfPeers, fs, &cfg)
+ factory := MakeNetworkFetcherFactory(net, numberOfPeers, &cfg)
factory.log = logging.TestingLog(t)
- fetcher := factory.NewOverGossip(protocol.UniCatchupReqTag)
+ fetcher := factory.NewOverGossip()
// we have one peer, the Ws block server
require.Equal(t, fetcher.NumPeers(), 1)
@@ -896,12 +893,9 @@ func TestGetBlockWS(t *testing.T) {
var cert *agreement.Certificate
var client FetcherClient
- // start := time.Now()
block, cert, client, err = fetcher.FetchBlock(context.Background(), next)
require.NotNil(t, client)
require.NoError(t, err)
- // end := time.Now()
- // require.True(t, end.Sub(start) < 10*time.Second)
require.Equal(t, &b, block)
if err == nil {
require.NotEqual(t, nil, block)
diff --git a/catchup/pref_test.go b/catchup/pref_test.go
index 5bad041c4..89ce25118 100644
--- a/catchup/pref_test.go
+++ b/catchup/pref_test.go
@@ -56,7 +56,7 @@ func BenchmarkServiceFetchBlocks(b *testing.B) {
require.NoError(b, err)
// Make Service
- syncer := MakeService(logging.Base(), defaultConfig, net, local, nil, new(mockedAuthenticator), nil)
+ syncer := MakeService(logging.Base(), defaultConfig, net, local, new(mockedAuthenticator), nil)
syncer.fetcherFactory = makeMockFactory(&MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int), latency: 100 * time.Millisecond, predictable: true})
b.StartTimer()
diff --git a/catchup/service.go b/catchup/service.go
index 6b74eefd7..dd83f05b2 100644
--- a/catchup/service.go
+++ b/catchup/service.go
@@ -33,7 +33,6 @@ import (
"github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/rpcs"
)
const catchupPeersForSync = 10
@@ -103,17 +102,16 @@ type BlockAuthenticator interface {
}
// MakeService creates a catchup service instance from its constituent components
-// If wsf is nil, then fetch over gossip is disabled.
-func MakeService(log logging.Logger, config config.Local, net network.GossipNode, ledger Ledger, wsf *rpcs.WsFetcherService, auth BlockAuthenticator, unmatchedPendingCertificates <-chan PendingUnmatchedCertificate) (s *Service) {
+func MakeService(log logging.Logger, config config.Local, net network.GossipNode, ledger Ledger, auth BlockAuthenticator, unmatchedPendingCertificates <-chan PendingUnmatchedCertificate) (s *Service) {
s = &Service{}
s.cfg = config
- s.fetcherFactory = MakeNetworkFetcherFactory(net, catchupPeersForSync, wsf, &config)
+ s.fetcherFactory = MakeNetworkFetcherFactory(net, catchupPeersForSync, &config)
s.ledger = ledger
s.net = net
s.auth = auth
s.unmatchedPendingCertificates = unmatchedPendingCertificates
- s.latestRoundFetcherFactory = MakeNetworkFetcherFactory(net, blockQueryPeerLimit, wsf, &config)
+ s.latestRoundFetcherFactory = MakeNetworkFetcherFactory(net, blockQueryPeerLimit, &config)
s.log = log.With("Context", "sync")
s.parallelBlocks = config.CatchupParallelBlocks
s.deadlineTimeout = agreement.DeadlineTimeout()
@@ -324,7 +322,7 @@ func (s *Service) pipelineCallback(fetcher Fetcher, r basics.Round, thisFetchCom
// TODO the following code does not handle the following case: seedLookback upgrades during fetch
func (s *Service) pipelinedFetch(seedLookback uint64) {
- fetcher := s.fetcherFactory.NewOverGossip(protocol.UniCatchupReqTag)
+ fetcher := s.fetcherFactory.NewOverGossip()
defer fetcher.Close()
// make sure that we have at least one peer
@@ -559,7 +557,7 @@ func (s *Service) syncCert(cert *PendingUnmatchedCertificate) {
// TODO this doesn't actually use the digest from cert!
func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.AsyncVoteVerifier) {
blockHash := bookkeeping.BlockHash(cert.Proposal.BlockDigest) // semantic digest (i.e., hash of the block header), not byte-for-byte digest
- fetcher := s.latestRoundFetcherFactory.NewOverGossip(protocol.UniEnsBlockReqTag)
+ fetcher := s.latestRoundFetcherFactory.NewOverGossip()
defer func() {
fetcher.Close()
}()
@@ -569,7 +567,7 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy
// refresh peers and try again
logging.Base().Warn("fetchRound found no outgoing peers")
s.net.RequestConnectOutgoing(true, s.ctx.Done())
- fetcher = s.latestRoundFetcherFactory.NewOverGossip(protocol.UniEnsBlockReqTag)
+ fetcher = s.latestRoundFetcherFactory.NewOverGossip()
}
// Ask the fetcher to get the block somehow
block, fetchedCert, rpcc, err := s.innerFetch(fetcher, cert.Round)
diff --git a/catchup/service_test.go b/catchup/service_test.go
index b7adf2295..417801276 100644
--- a/catchup/service_test.go
+++ b/catchup/service_test.go
@@ -60,7 +60,7 @@ func (factory *MockedFetcherFactory) New() Fetcher {
return factory.fetcher
}
-func (factory *MockedFetcherFactory) NewOverGossip(tag protocol.Tag) Fetcher {
+func (factory *MockedFetcherFactory) NewOverGossip() Fetcher {
return factory.New()
}
@@ -177,7 +177,7 @@ func TestServiceFetchBlocksSameRange(t *testing.T) {
net := &mocks.MockNetwork{}
// Make Service
- syncer := MakeService(logging.Base(), defaultConfig, net, local, nil, &mockedAuthenticator{errorRound: -1}, nil)
+ syncer := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil)
syncer.fetcherFactory = makeMockFactory(&MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)})
syncer.testStart()
@@ -195,7 +195,7 @@ func TestPeriodicSync(t *testing.T) {
require.True(t, 0 == initialLocalRound)
// Make Service
- s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, nil, auth, nil)
+ s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, auth, nil)
s.deadlineTimeout = 2 * time.Second
factory := MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}}
@@ -230,7 +230,7 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) {
net := &mocks.MockNetwork{}
// Make Service
- s := MakeService(logging.Base(), defaultConfig, net, local, nil, &mockedAuthenticator{errorRound: -1}, nil)
+ s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil)
factory := MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}}
s.fetcherFactory = &factory
@@ -271,7 +271,7 @@ func TestAbruptWrites(t *testing.T) {
lastRound := local.LastRound()
// Make Service
- s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, nil, &mockedAuthenticator{errorRound: -1}, nil)
+ s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: -1}, nil)
factory := MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}}
s.fetcherFactory = &factory
@@ -308,7 +308,7 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) {
lastRoundAtStart := local.LastRound()
// Make Service
- syncer := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, nil, &mockedAuthenticator{errorRound: -1}, nil)
+ syncer := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: -1}, nil)
syncer.fetcherFactory = &MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}}
// Start the service ( dummy )
@@ -340,7 +340,7 @@ func TestServiceFetchBlocksMalformed(t *testing.T) {
lastRoundAtStart := local.LastRound()
// Make Service
- s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, nil, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil)
+ s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil)
s.fetcherFactory = &MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}}
// Start the service ( dummy )
@@ -460,7 +460,7 @@ func helperTestOnSwitchToUnSupportedProtocol(
config.CatchupParallelBlocks = 2
// Make Service
- s := MakeService(logging.Base(), config, &mocks.MockNetwork{}, local, nil, &mockedAuthenticator{errorRound: -1}, nil)
+ s := MakeService(logging.Base(), config, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: -1}, nil)
s.deadlineTimeout = 2 * time.Second
s.fetcherFactory = &MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}}
@@ -647,7 +647,7 @@ func TestCatchupUnmatchedCertificate(t *testing.T) {
lastRoundAtStart := local.LastRound()
// Make Service
- s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, nil, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil)
+ s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil)
s.latestRoundFetcherFactory = &MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}}
s.testStart()
for roundNumber := 2; roundNumber < 10; roundNumber += 3 {
diff --git a/catchup/wsFetcher.go b/catchup/wsFetcher.go
index b75688713..29443b53c 100644
--- a/catchup/wsFetcher.go
+++ b/catchup/wsFetcher.go
@@ -18,6 +18,7 @@ package catchup
import (
"context"
+ "encoding/binary"
"fmt"
"time"
@@ -40,27 +41,21 @@ const numBufferedInternalMsg = 1
// a custom websockets interface (bidirectional). Internally it keeps track
// of multiple peers and handles dropping them appropriately using a NetworkFetcher.
type WsFetcher struct {
- tag protocol.Tag // domain separation per request
-
f *NetworkFetcher
clients map[network.Peer]*wsFetcherClient
config *config.Local
- // service
- service *rpcs.WsFetcherService
-
// metadata
log logging.Logger
mu deadlock.RWMutex
}
// MakeWsFetcher creates a fetcher that fetches over the gossip network.
-// It instantiates a NetworkFetcher under the hood, registers as a handler for the given message tag,
+// It instantiates a NetworkFetcher under the hood,
// and demuxes messages appropriately to the corresponding fetcher clients.
-func MakeWsFetcher(log logging.Logger, tag protocol.Tag, peers []network.Peer, service *rpcs.WsFetcherService, cfg *config.Local) Fetcher {
+func MakeWsFetcher(log logging.Logger, peers []network.Peer, cfg *config.Local) Fetcher {
f := &WsFetcher{
log: log,
- tag: tag,
config: cfg,
}
f.clients = make(map[network.Peer]*wsFetcherClient)
@@ -68,9 +63,7 @@ func MakeWsFetcher(log logging.Logger, tag protocol.Tag, peers []network.Peer, s
for i, peer := range peers {
fc := &wsFetcherClient{
target: peer.(network.UnicastPeer),
- tag: f.tag,
pendingCtxs: make(map[context.Context]context.CancelFunc),
- service: service,
config: cfg,
}
p[i] = fc
@@ -82,7 +75,6 @@ func MakeWsFetcher(log logging.Logger, tag protocol.Tag, peers []network.Peer, s
peers: p,
log: f.log,
}
- f.service = service
return f
}
@@ -109,8 +101,6 @@ func (wsf *WsFetcher) Close() {
// a stub fetcherClient to satisfy the NetworkFetcher interface
type wsFetcherClient struct {
target network.UnicastPeer // the peer where we're going to send the request.
- tag protocol.Tag // the tag that is associated with the request/
- service *rpcs.WsFetcherService // the fetcher service. This is where we perform the actual request and waiting for the response.
pendingCtxs map[context.Context]context.CancelFunc // a map of all the current pending contexts.
config *config.Local
@@ -139,17 +129,14 @@ func (w *wsFetcherClient) GetBlockBytes(ctx context.Context, r basics.Round) ([]
delete(w.pendingCtxs, childCtx)
}()
- resp, err := w.service.RequestBlock(childCtx, w.target, r, w.tag)
+ blockBytes, err := w.requestBlock(childCtx, r)
if err != nil {
return nil, err
}
- if resp.Error != "" {
- return nil, fmt.Errorf("wsFetcherClient(%d): server error, %v", r, resp.Error)
- }
- if len(resp.BlockBytes) == 0 {
+ if len(blockBytes) == 0 {
return nil, fmt.Errorf("wsFetcherClient(%d): empty response", r)
}
- return resp.BlockBytes, nil
+ return blockBytes, nil
}
// Address implements FetcherClient
@@ -168,3 +155,39 @@ func (w *wsFetcherClient) Close() error {
w.pendingCtxs = make(map[context.Context]context.CancelFunc)
return nil
}
+
+// requestBlock send a request for block <round> and wait until it receives a response or a context expires.
+func (w *wsFetcherClient) requestBlock(ctx context.Context, round basics.Round) ([]byte, error) {
+ roundBin := make([]byte, binary.MaxVarintLen64)
+ binary.PutUvarint(roundBin, uint64(round))
+ topics := network.Topics{
+ network.MakeTopic(rpcs.RequestDataTypeKey,
+ []byte(rpcs.BlockAndCertValue)),
+ network.MakeTopic(
+ rpcs.RoundKey,
+ roundBin),
+ }
+ resp, err := w.target.Request(ctx, protocol.UniEnsBlockReqTag, topics)
+ if err != nil {
+ return nil, fmt.Errorf("wsFetcherClient(%s).requestBlock(%d): Request failed, %v", w.target.GetAddress(), round, err)
+ }
+
+ if errMsg, found := resp.Topics.GetValue(network.ErrorKey); found {
+ return nil, fmt.Errorf("wsFetcherClient(%s).requestBlock(%d): Request failed, %s", w.target.GetAddress(), round, string(errMsg))
+ }
+
+ blk, found := resp.Topics.GetValue(rpcs.BlockDataKey)
+ if !found {
+ return nil, fmt.Errorf("wsFetcherClient(%s): request failed: block data not found", w.target.GetAddress())
+ }
+ cert, found := resp.Topics.GetValue(rpcs.CertDataKey)
+ if !found {
+ return nil, fmt.Errorf("wsFetcherClient(%s): request failed: cert data not found", w.target.GetAddress())
+ }
+
+ blockCertBytes := protocol.EncodeReflect(rpcs.PreEncodedBlockCert{
+ Block: blk,
+ Certificate: cert})
+
+ return blockCertBytes, nil
+}
diff --git a/cmd/catchupsrv/main.go b/cmd/catchupsrv/main.go
index 18dacaf65..e41f27f55 100644
--- a/cmd/catchupsrv/main.go
+++ b/cmd/catchupsrv/main.go
@@ -85,7 +85,7 @@ func main() {
requestHeader := make(http.Header)
requestHeader.Set(network.GenesisHeader, genesisID)
requestHeader.Set(network.NodeRandomHeader, base64.StdEncoding.EncodeToString(rnd[:]))
- requestHeader.Set(network.ProtocolVersionHeader, "1")
+ requestHeader.Set(network.ProtocolVersionHeader, "2.1")
conn, err := upgrader.Upgrade(w, r, requestHeader)
if err != nil {
diff --git a/cmd/dispenser/server.go b/cmd/dispenser/server.go
index 31e95f623..35b3b2954 100644
--- a/cmd/dispenser/server.go
+++ b/cmd/dispenser/server.go
@@ -104,7 +104,12 @@ const topPageTemplate = `
<div class="g-recaptcha" data-sitekey="{{.RecaptchaSiteKey}}">
</div>
<div>
- <input id="target" placeholder="target address">
+ <p>The dispensed Algos have no monetary value and should only be used to test applications.</p>
+ <p>This service is gracefully provided to enable development on the Algorand blockchain test networks.</p>
+ <p>Please do not abuse it by requesting more Algos than needed.</p>
+ </div>
+ <div>
+ <input id="target" placeholder="target address" size="80">
<button id="dispense">Dispense</button>
</div>
<div>
diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go
index 60cb1a13d..3c1be04ce 100644
--- a/cmd/opdoc/opdoc.go
+++ b/cmd/opdoc/opdoc.go
@@ -32,9 +32,13 @@ func opGroupMarkdownTable(og *logic.OpGroup, out io.Writer) {
fmt.Fprint(out, `| Op | Description |
| --- | --- |
`)
+ opSpecs := logic.OpsByName[logic.LogicVersion]
// TODO: sort by logic.OpSpecs[].Opcode
for _, opname := range og.Ops {
- fmt.Fprintf(out, "| `%s` | %s |\n", markdownTableEscape(opname), markdownTableEscape(logic.OpDoc(opname)))
+ spec := opSpecs[opname]
+ fmt.Fprintf(out, "| `%s%s` | %s |\n",
+ markdownTableEscape(spec.Name), immediateMarkdown(&spec),
+ markdownTableEscape(logic.OpDoc(opname)))
}
}
@@ -82,7 +86,7 @@ func fieldTableMarkdown(out io.Writer, names []string, types []logic.StackType,
}
func transactionFieldsMarkdown(out io.Writer) {
- fmt.Fprintf(out, "\n`txn` Fields:\n\n")
+ fmt.Fprintf(out, "\n`txn` Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/)):\n\n")
fieldTableMarkdown(out, logic.TxnFieldNames, logic.TxnFieldTypes, logic.TxnFieldDocs())
}
@@ -101,6 +105,14 @@ func assetParamsFieldsMarkdown(out io.Writer) {
fieldTableMarkdown(out, logic.AssetParamsFieldNames, logic.AssetParamsFieldTypes, logic.AssetParamsFieldDocs)
}
+func immediateMarkdown(op *logic.OpSpec) string {
+ markdown := ""
+ for _, imm := range op.Details.Immediates {
+ markdown = markdown + " " + imm.Name
+ }
+ return markdown
+}
+
func opToMarkdown(out io.Writer, op *logic.OpSpec) (err error) {
ws := ""
opextra := logic.OpImmediateNote(op.Name)
@@ -108,7 +120,7 @@ func opToMarkdown(out io.Writer, op *logic.OpSpec) (err error) {
ws = " "
}
costs := logic.OpAllCosts(op.Name)
- fmt.Fprintf(out, "\n## %s\n\n- Opcode: 0x%02x%s%s\n", op.Name, op.Opcode, ws, opextra)
+ fmt.Fprintf(out, "\n## %s%s\n\n- Opcode: 0x%02x%s%s\n", op.Name, immediateMarkdown(op), op.Opcode, ws, opextra)
if op.Args == nil {
fmt.Fprintf(out, "- Pops: _None_\n")
} else if len(op.Args) == 1 {
@@ -277,8 +289,8 @@ func buildLanguageSpec(opGroups map[string][]string) *LanguageSpec {
records[i].Name = spec.Name
records[i].Args = typeString(spec.Args)
records[i].Returns = typeString(spec.Returns)
- records[i].Cost = logic.OpCost(spec.Name)
- records[i].Size = logic.OpSize(spec.Name)
+ records[i].Cost = spec.Details.Cost
+ records[i].Size = spec.Details.Size
records[i].ArgEnum = argEnum(spec.Name)
records[i].ArgEnumTypes = argEnumTypes(spec.Name)
records[i].Doc = logic.OpDoc(spec.Name)
diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go
index 9c1a05f9f..338a25cf5 100644
--- a/cmd/tealdbg/local.go
+++ b/cmd/tealdbg/local.go
@@ -337,6 +337,13 @@ func (r *LocalRunner) Setup(dp *DebugParams) (err error) {
source := string(data)
ops, err := logic.AssembleStringWithVersion(source, r.proto.LogicSigVersion)
if err != nil {
+ errorLines := ""
+ for _, lineError := range ops.Errors {
+ errorLines = fmt.Sprintf("%s\n%s", errorLines, lineError.Error())
+ }
+ if errorLines != "" {
+ return fmt.Errorf("%w:%s", err, errorLines)
+ }
return err
}
r.runs[i].program = ops.Program
diff --git a/cmd/tealdbg/local_test.go b/cmd/tealdbg/local_test.go
index 5cb4feddf..4a93825c5 100644
--- a/cmd/tealdbg/local_test.go
+++ b/cmd/tealdbg/local_test.go
@@ -460,7 +460,7 @@ int 100
ProgramBlobs: [][]byte{[]byte(source)},
BalanceBlob: balanceBlob,
TxnBlob: txnBlob,
- Proto: "future",
+ Proto: string(protocol.ConsensusCurrentVersion),
Round: 222,
LatestTimestamp: 333,
GroupIndex: 0,
diff --git a/config/consensus.go b/config/consensus.go
index 948b1c576..8c5200e0c 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -345,6 +345,9 @@ type ConsensusParams struct {
// EnableAssetCloseAmount adds an extra field to the ApplyData. The field contains the amount of the remaining
// asset that were sent to the close-to address.
EnableAssetCloseAmount bool
+
+ // update the initial rewards rate calculation to take the reward pool minimum balance into account
+ InitialRewardsRateCalculation bool
}
// PaysetCommitType enumerates possible ways for the block header to commit to
@@ -359,6 +362,9 @@ const (
// PaysetCommitFlat hashes the entire payset array.
PaysetCommitFlat
+
+ // PaysetCommitMerkle uses merklearray to commit to the payset.
+ PaysetCommitMerkle
)
// ConsensusProtocols defines a set of supported protocol versions and their
@@ -847,12 +853,28 @@ func initConsensusProtocols() {
v25.EnableAssetCloseAmount = true
Consensus[protocol.ConsensusV25] = v25
- // v24 can be upgraded to v25, with an update delay of 7 days ( see calculation above )
- v24.ApprovedUpgrades[protocol.ConsensusV25] = 140000
+ // v26 adds support for teal3
+ v26 := v25
+ v26.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+
+ // Enable the InitialRewardsRateCalculation fix
+ v26.InitialRewardsRateCalculation = true
+
+ // Enable transaction Merkle tree.
+ v26.PaysetCommit = PaysetCommitMerkle
+
+ // Enable teal3
+ v26.LogicSigVersion = 3
+
+ Consensus[protocol.ConsensusV26] = v26
+
+ // v25 or v24 can be upgraded to v26, with an update delay of 7 days ( see calculation above )
+ v25.ApprovedUpgrades[protocol.ConsensusV26] = 140000
+ v24.ApprovedUpgrades[protocol.ConsensusV26] = 140000
// ConsensusFuture is used to test features that are implemented
// but not yet released in a production protocol version.
- vFuture := v25
+ vFuture := v26
vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
// FilterTimeout for period 0 should take a new optimized, configured value, need to revisit this later
@@ -865,6 +887,11 @@ func initConsensusProtocols() {
vFuture.CompactCertWeightThreshold = (1 << 32) * 30 / 100
vFuture.CompactCertSecKQ = 128
+ // enable the InitialRewardsRateCalculation fix
+ vFuture.InitialRewardsRateCalculation = true
+ // Enable transaction Merkle tree.
+ vFuture.PaysetCommit = PaysetCommitMerkle
+
Consensus[protocol.ConsensusFuture] = vFuture
}
diff --git a/crypto/compactcert/verifier.go b/crypto/compactcert/verifier.go
index 5e7c492c3..38747d611 100644
--- a/crypto/compactcert/verifier.go
+++ b/crypto/compactcert/verifier.go
@@ -49,11 +49,11 @@ func (v *Verifier) Verify(c *Cert) error {
}
// Verify all of the reveals
- sigs := make(map[uint64]crypto.Hashable)
- parts := make(map[uint64]crypto.Hashable)
+ sigs := make(map[uint64]crypto.Digest)
+ parts := make(map[uint64]crypto.Digest)
for pos, r := range c.Reveals {
- sigs[pos] = r.SigSlot
- parts[pos] = r.Part
+ sigs[pos] = crypto.HashObj(r.SigSlot)
+ parts[pos] = crypto.HashObj(r.Part)
ephID := basics.OneTimeIDForRound(v.SigRound, r.Part.KeyDilution)
if !r.Part.PK.Verify(ephID, v.Msg, r.SigSlot.Sig.OneTimeSignature) {
diff --git a/crypto/merklearray/merkle.go b/crypto/merklearray/merkle.go
index daaf850fb..06be1bdac 100644
--- a/crypto/merklearray/merkle.go
+++ b/crypto/merklearray/merkle.go
@@ -167,10 +167,10 @@ func (tree *Tree) Prove(idxs []uint64) ([]crypto.Digest, error) {
return s.hints, nil
}
-// Verify ensures that the positions in elems correspond to the hashes of their respective
-// crypto.Hashable objects in a tree with the given root hash. The proof is expected to
-// be the proof returned by Prove().
-func Verify(root crypto.Digest, elems map[uint64]crypto.Hashable, proof []crypto.Digest) error {
+// Verify ensures that the positions in elems correspond to the respective hashes
+// in a tree with the given root hash. The proof is expected to be the proof
+// returned by Prove().
+func Verify(root crypto.Digest, elems map[uint64]crypto.Digest, proof []crypto.Digest) error {
if len(elems) == 0 {
if len(proof) != 0 {
return fmt.Errorf("non-empty proof for empty set of elements")
@@ -183,7 +183,7 @@ func Verify(root crypto.Digest, elems map[uint64]crypto.Hashable, proof []crypto
for pos, elem := range elems {
pl = append(pl, layerItem{
pos: pos,
- hash: crypto.HashObj(elem),
+ hash: elem,
})
}
diff --git a/crypto/merklearray/merkle_test.go b/crypto/merklearray/merkle_test.go
index 4017aaca3..7db789e81 100644
--- a/crypto/merklearray/merkle_test.go
+++ b/crypto/merklearray/merkle_test.go
@@ -91,7 +91,7 @@ func TestMerkle(t *testing.T) {
root := tree.Root()
var allpos []uint64
- allmap := make(map[uint64]crypto.Hashable)
+ allmap := make(map[uint64]crypto.Digest)
for i := uint64(0); i < sz; i++ {
proof, err := tree.Prove([]uint64{i})
@@ -99,18 +99,18 @@ func TestMerkle(t *testing.T) {
t.Error(err)
}
- err = Verify(root, map[uint64]crypto.Hashable{i: a[i]}, proof)
+ err = Verify(root, map[uint64]crypto.Digest{i: crypto.HashObj(a[i])}, proof)
if err != nil {
t.Error(err)
}
- err = Verify(root, map[uint64]crypto.Hashable{i: junk}, proof)
+ err = Verify(root, map[uint64]crypto.Digest{i: crypto.HashObj(junk)}, proof)
if err == nil {
t.Errorf("no error when verifying junk")
}
allpos = append(allpos, i)
- allmap[i] = a[i]
+ allmap[i] = crypto.HashObj(a[i])
}
proof, err := tree.Prove(allpos)
@@ -123,12 +123,12 @@ func TestMerkle(t *testing.T) {
t.Error(err)
}
- err = Verify(root, map[uint64]crypto.Hashable{0: junk}, proof)
+ err = Verify(root, map[uint64]crypto.Digest{0: crypto.HashObj(junk)}, proof)
if err == nil {
t.Errorf("no error when verifying junk batch")
}
- err = Verify(root, map[uint64]crypto.Hashable{0: junk}, nil)
+ err = Verify(root, map[uint64]crypto.Digest{0: crypto.HashObj(junk)}, nil)
if err == nil {
t.Errorf("no error when verifying junk batch")
}
@@ -138,18 +138,18 @@ func TestMerkle(t *testing.T) {
t.Errorf("no error when proving past the end")
}
- err = Verify(root, map[uint64]crypto.Hashable{sz: junk}, nil)
+ err = Verify(root, map[uint64]crypto.Digest{sz: crypto.HashObj(junk)}, nil)
if err == nil {
t.Errorf("no error when verifying past the end")
}
if sz > 0 {
var somepos []uint64
- somemap := make(map[uint64]crypto.Hashable)
+ somemap := make(map[uint64]crypto.Digest)
for i := 0; i < 10; i++ {
pos := crypto.RandUint64() % sz
somepos = append(somepos, pos)
- somemap[pos] = a[pos]
+ somemap[pos] = crypto.HashObj(a[pos])
}
proof, err = tree.Prove(somepos)
@@ -234,7 +234,7 @@ func BenchmarkMerkleVerify1M(b *testing.B) {
b.ResetTimer()
for i := uint64(0); i < uint64(b.N); i++ {
- err := Verify(root, map[uint64]crypto.Hashable{i % a.count: msg}, proofs[i])
+ err := Verify(root, map[uint64]crypto.Digest{i % a.count: crypto.HashObj(msg)}, proofs[i])
if err != nil {
b.Error(err)
}
diff --git a/daemon/algod/api/Makefile b/daemon/algod/api/Makefile
index c2818fce5..d2f8c9cc3 100644
--- a/daemon/algod/api/Makefile
+++ b/daemon/algod/api/Makefile
@@ -1,3 +1,6 @@
+GOPATH := $(shell go env GOPATH)
+GOPATH1 := $(firstword $(subst :, ,$(GOPATH)))
+
# `make all` or just `make` should be appropriate for dev work
all: server/v2/generated/types.go server/v2/generated/routes.go server/v2/generated/private/types.go server/v2/generated/private/routes.go
@@ -5,16 +8,16 @@ all: server/v2/generated/types.go server/v2/generated/routes.go server/v2/genera
generate: oapi-codegen all
server/v2/generated/types.go: algod.oas3.yml
- oapi-codegen -package generated -type-mappings integer=uint64 -generate types -exclude-tags=private,common -o ./server/v2/generated/types.go algod.oas3.yml
+ $(GOPATH1)/bin/oapi-codegen -package generated -type-mappings integer=uint64 -generate types -exclude-tags=private,common -o ./server/v2/generated/types.go algod.oas3.yml
server/v2/generated/routes.go: algod.oas3.yml
- oapi-codegen -package generated -type-mappings integer=uint64 -generate server,spec -exclude-tags=private,common -o ./server/v2/generated/routes.go algod.oas3.yml
+ $(GOPATH1)/bin/oapi-codegen -package generated -type-mappings integer=uint64 -generate server,spec -exclude-tags=private,common -o ./server/v2/generated/routes.go algod.oas3.yml
server/v2/generated/private/types.go: algod.oas3.yml
- oapi-codegen -package private -type-mappings integer=uint64 -generate types -include-tags=private -o ./server/v2/generated/private/types.go algod.oas3.yml
+ $(GOPATH1)/bin/oapi-codegen -package private -type-mappings integer=uint64 -generate types -include-tags=private -o ./server/v2/generated/private/types.go algod.oas3.yml
server/v2/generated/private/routes.go: algod.oas3.yml
- oapi-codegen -package private -type-mappings integer=uint64 -generate server,spec -include-tags=private -o ./server/v2/generated/private/routes.go algod.oas3.yml
+ $(GOPATH1)/bin/oapi-codegen -package private -type-mappings integer=uint64 -generate server,spec -include-tags=private -o ./server/v2/generated/private/routes.go algod.oas3.yml
algod.oas3.yml: algod.oas2.json
curl -s -X POST "https://converter.swagger.io/api/convert" -H "accept: application/json" -H "Content-Type: application/json" -d @./algod.oas2.json -o .3tmp.json
diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json
index 595cd752f..790241f68 100644
--- a/daemon/algod/api/algod.oas2.json
+++ b/daemon/algod/api/algod.oas2.json
@@ -358,6 +358,84 @@
}
]
},
+ "/v2/blocks/{round}/transactions/{txid}/proof": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Get a Merkle proof for a transaction in a block.",
+ "operationId": "GetProof",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "The round in which the transaction appears.",
+ "name": "round",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "pattern": "[A-Z0-9]+",
+ "description": "The transaction ID for which to generate a proof.",
+ "name": "txid",
+ "in": "path",
+ "required": true
+ },
+ {
+ "$ref": "#/parameters/format"
+ }
+ ],
+ "responses": {
+ "200": {
+ "$ref": "#/responses/ProofResponse"
+ },
+ "400": {
+ "description": "Malformed round number or transaction ID",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Non-existent block or transaction",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal error, including protocol not supporting Merkle proofs.",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown error"
+ }
+ }
+ },
+ "parameters": [
+ {
+ "type": "integer",
+ "name": "round",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "name": "txid",
+ "in": "path",
+ "required": true
+ }
+ ]
+ },
"/v2/ledger/supply": {
"get": {
"produces": [
@@ -2008,6 +2086,33 @@
}
}
},
+ "ProofResponse": {
+ "description": "Proof of transaction in a block.",
+ "schema": {
+ "type": "object",
+ "required": [
+ "proof",
+ "stibhash",
+ "idx"
+ ],
+ "properties": {
+ "proof": {
+ "description": "Merkle proof of transaction membership.",
+ "type": "string",
+ "format": "byte"
+ },
+ "stibhash": {
+ "description": "Hash of SignedTxnInBlock for verifying proof.",
+ "type": "string",
+ "format": "byte"
+ },
+ "idx": {
+ "description": "Index of the transaction in the block's payset.",
+ "type": "integer"
+ }
+ }
+ }
+ },
"CatchpointStartResponse": {
"tags": [
"private"
diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml
index 78fe01e8a..403902904 100644
--- a/daemon/algod/api/algod.oas3.yml
+++ b/daemon/algod/api/algod.oas3.yml
@@ -571,6 +571,39 @@
},
"description": "Transaction ID of the submission."
},
+ "ProofResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "idx": {
+ "description": "Index of the transaction in the block's payset.",
+ "type": "integer"
+ },
+ "proof": {
+ "description": "Merkle proof of transaction membership.",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ },
+ "stibhash": {
+ "description": "Hash of SignedTxnInBlock for verifying proof.",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ }
+ },
+ "required": [
+ "idx",
+ "proof",
+ "stibhash"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Proof of transaction in a block."
+ },
"SupplyResponse": {
"content": {
"application/json": {
@@ -2013,6 +2046,124 @@
"summary": "Get the block for the given round."
}
},
+ "/v2/blocks/{round}/transactions/{txid}/proof": {
+ "get": {
+ "operationId": "GetProof",
+ "parameters": [
+ {
+ "description": "The round in which the transaction appears.",
+ "in": "path",
+ "name": "round",
+ "required": true,
+ "schema": {
+ "type": "integer"
+ }
+ },
+ {
+ "description": "The transaction ID for which to generate a proof.",
+ "in": "path",
+ "name": "txid",
+ "required": true,
+ "schema": {
+ "pattern": "[A-Z0-9]+",
+ "type": "string"
+ }
+ },
+ {
+ "description": "Configures whether the response object is JSON or MessagePack encoded.",
+ "in": "query",
+ "name": "format",
+ "schema": {
+ "enum": [
+ "json",
+ "msgpack"
+ ],
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "idx": {
+ "description": "Index of the transaction in the block's payset.",
+ "type": "integer"
+ },
+ "proof": {
+ "description": "Merkle proof of transaction membership.",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ },
+ "stibhash": {
+ "description": "Hash of SignedTxnInBlock for verifying proof.",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ }
+ },
+ "required": [
+ "idx",
+ "proof",
+ "stibhash"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Proof of transaction in a block."
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Malformed round number or transaction ID"
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API token"
+ },
+ "404": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Non-existent block or transaction"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal error, including protocol not supporting Merkle proofs."
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown error"
+ }
+ },
+ "summary": "Get a Merkle proof for a transaction in a block."
+ }
+ },
"/v2/catchup/{catchpoint}": {
"delete": {
"description": "Given a catchpoint, it aborts catching up to this catchpoint",
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index c91824e89..b231e80a7 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -579,3 +579,10 @@ func (client RestClient) RawDryrun(data []byte) (response []byte, err error) {
response = blob
return
}
+
+// Proof gets a Merkle proof for a transaction in a block.
+func (client RestClient) Proof(txid string, round uint64) (response generatedV2.ProofResponse, err error) {
+ txid = stripTransaction(txid)
+ err = client.get(&response, fmt.Sprintf("/v2/blocks/%d/transactions/%s/proof", round, txid), nil)
+ return
+}
diff --git a/daemon/algod/api/server/v2/generated/private/routes.go b/daemon/algod/api/server/v2/generated/private/routes.go
index 2f3dce981..6630bd31f 100644
--- a/daemon/algod/api/server/v2/generated/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/private/routes.go
@@ -236,131 +236,133 @@ func RegisterHandlers(router interface {
var swaggerSpec = []string{
"H4sIAAAAAAAC/+x9/XPcNpLov4I3u1X+eMMZ+Su7VlVqn2I7iV4cx2Upubu1fAmG7JlBRAIMAUqa+PS/",
- "X3UDIEESnBnJWt+ldn+yNQAajUZ/odFofpykqiiVBGn05PDjpOQVL8BARX/xNFW1NInI8K8MdFqJ0ggl",
- "J4e+jWlTCbmaTCcCfy25WU+mE8kLaPvg+Omkgt9qUUE2OTRVDdOJTtdQcARsNiX2biBdJSuVOBBHFsTx",
- "y8n1lgaeZRVoPcTyB5lvmJBpXmfATMWl5ik2aXYpzJqZtdDMDWZCMiWBqSUz605nthSQZ3rmF/lbDdUm",
- "WKWbfHxJ1y2KSaVyGOL5QhULIcFjBQ1SzYYwo1gGS+q05obhDIir72gU08CrdM2WqtqBqkUixBdkXUwO",
- "3080yAwq2q0UxAX9d1kB/A6J4dUKzOTDNLa4pYEqMaKILO3YUb8CXedGM+pLa1yJC5AMR83Y97U2bAGM",
- "S/bu6xfsyZMnz3EhBTcGMsdko6tqZw/XZIdPDicZN+Cbh7zG85WquMySpv+7r1/Q/Cdugfv24lpDXFiO",
- "sIUdvxxbgB8YYSEhDaxoHzrcjyMiQtH+vIClqmDPPbGd73RTwvn/R3cl5SZdl0pIE9kXRq3MNkd1WDB8",
- "mw5rEOj0L5FSFQJ9f5A8//Dx0fTRwfWf3h8lf3d/PntyvefyXzRwd1Ag2jGtqwpkuklWFXCSljWXQ3q8",
- "c/yg16rOM7bmF7T5vCBV78YyHGtV5wXPa+QTkVbqKF8pzbhjowyWvM4N8xOzWuaophCa43YmNCsrdSEy",
- "yKaofS/XIl2zlGsLgvqxS5HnyIO1hmyM1+Kr2yJM1yFJEK9b0YMW9L+XGO26dlACrkgbJGmuNCRG7TBP",
- "3uJwmbHQoLS2St/MWLHTNTCaHBussSXaSeTpPN8wQ/uaMa4ZZ940TZlYso2q2SVtTi7OabxbDVKtYEg0",
- "2pyOHUXhHSPfgBgR4i2UyoFLIp6XuyHJ5FKs6go0u1yDWTubV4EuldTA1OJXSA1u+/8/+eENUxX7HrTm",
- "K3jL03MGMlXZ+B67SWMW/FetcMMLvSp5eh4317koRATl7/mVKOqCybpYQIX75e2DUawCU1dyDCELcQef",
- "FfxqOOlpVcuUNredtuOoISsJXeZ8M2PHS1bwqy8Ppg4dzXiesxJkJuSKmSs56qTh3LvRSypVy2wPH8bg",
- "hgVWU5eQiqWAjDVQtmDiptmFj5A3w6f1rAJ0PJBRdJpZdqAj4SrCMyi62MJKvoKAZWbsR6e5qNWoc5CN",
- "gmOLDTWVFVwIVetm0AiONPV291oqA0lZwVJEeOzEkQO1h+3j1GvhHJxUScOFhAw1LyGtDFhNNIpTMOH2",
- "w8zQRC+4hi+ejhnwtnXP3V+q/q5v3fG9dps6JVYkI3YRW53Axt2mzvg9Dn/h3FqsEvvzYCPF6hRNyVLk",
- "ZGZ+xf3zZKg1KYEOIbzh0WIluakrODyTD/EvlrATw2XGqwx/KexP39e5ESdihT/l9qfXaiXSE7EaIWaD",
- "a/Q0RcMK+w/Ci6tjcxU9NLxW6rwuwwWlnVPpYsOOX45tsoV5U8Y8ao6y4ani9MqfNG46wlw1GzmC5Cjt",
- "So4dz2FTAWLL0yX9c7UkfuLL6nf8pyzzGE2RgZ2hpaCACxa8c7/hTyjyYM8ECEWkHIk6J/N5+DFA6M8V",
- "LCeHkz/N20jJ3LbquYOLM15PJ0ctnLufqR1p19c7yLTNTEi7O9R1as+Ed48PQo1iQo5qD4evcpWe3wqH",
- "slIlVEbYfVwgnKGkEHi2Bp5BxTJu+Kw9VFk/a4TfaeC3NI5OSVBFTNwP9B+eM2xGKeTGu2/ougqNTpwK",
- "Ak0ZenzWjtiZsAN5oooV1slj6JzdCMsX7eRWQTca9b0jy4c+tMjuvLJ+JaMRfhG49PbUeLRQ1e34pccI",
- "krVnYcYRauP94sq7O0td6zJx9In407ZDD1Abfhyq1ZBCffAxWnWocGL4P4AKGqHeBRW6gO6aCqooRQ53",
- "IK9rrtfDRaCD8+QxO/n26Nmjxz8/fvYFWuiyUquKF2yxMaDZfWdXmDabHB4MV0YKvs5NHPoXT/0Jqgt3",
- "J4UI4Qb2PhJ1CqgZLMWYjRcgdi+rTVXLOyAhVJWqIj4vsY5RqcqTC6i0UJHwxVvXg7keqIes39373WLL",
- "LrlmODcdx2qZQTWLUR7PWWTSDRR6l6GwoE+vZEsbB5BXFd8MdsCuN7I6N+8+e9IlvvfuNSuhSsyVZBks",
- "6lVoo9iyUgXjLKOBpBDfqAxODDe1vgMt0AJrkcGNCFHgC1UbxplUGQo0do7rh5FYJgVRKPZjQpVj1tb+",
- "LAC945TXq7Vh6Faq2Na2AxOe2k1JyFbokaNfc2a3vex0Nk6WV8CzDVsASKYW7nzlTn60SE5hGeNvXJx2",
- "atFqzgQdvMpKpaA1ZIm7XtqJmu9nd9lsoRMhTgg3szCt2JJXt0TWKMPzHYhSnxi6jTvhDqVDrPebftsG",
- "9icPt5FXeMa0XIC+C0p3DgbGSLgnTS6gosPZP3T//CS33b66HLk6cRb4VBQovkxyqTSkSmY6Cizn2iS7",
- "xBY7ddwEXEEgKTFJJcAjAYLXXBt7RBcyI5fRqhuah8bQFOMIj1oUhPyTNyZD2CnqSalr3VgWXZelqgxk",
- "sTVIuNoy1xu4auZSywB2Y76MYrWGXZDHqBTAd8SyK7EE4sbFiJoY1nBxFI5HO7CJkrKDREuIbYic+F4B",
- "dcPw8QgieL5oRhLjCN3jnCZmPZ1oo8oS5c8ktWzGjZHpxPY+Mj+2fYfMxU2r1zMFOLvxODnMLy1l7cXB",
- "mqNvR5BZwc/RNpGnZmMJQ5xRGBMtZArJNs5HsTzBXqEI7BDSESfZXU0Gs/WEo8e/UaYbZYIduzC24BGP",
- "/a2NgJ+20aE7cFpeguEi141j0oTZ21koIt/PlkAvsoIUpMk3yKtLURX2UovMmfa/Wbcnc7PY65tW/GTG",
- "KrjkVeZ7DE9LwWISITO4imtX3omNZHDFRBzpZTOzMCz1V04yBDCLCrq9xEtzpYVcJfZ2cJdRay717mlW",
- "S+EM2CVUDq8lVM7sGn87lhjlb9C24bGNFC44cxsi4ND4tBY5u1s6dolKDSiIhUgrxe3dKBK1t0BWQcER",
- "O7qlc2Z/fM5txH5h2/1VrQ+Rh7wbh+v5dVTDNCx6uabNQlXbJ2LI9Xi0BQ1jC1nlasHzBB1+SDLIzc7Q",
- "Gx4k4CX1RHut0uHwLspnZ+/z7OzsA3uNfelsAewcNnO6sWbpmssVtNcIobzYUwNcQVqHpqVHxr0Ogi5W",
- "2sW+exScTkql8qQ58vavPQbmpk/3c5GeQ8ZQX5GIOSt4r7tDOAm7jyyum4uhy/XGu5BlCRKyBzPGjiSD",
- "ojQbF1/peTy9yeU9s23+K5o1q+mOmktGi5ydyXhow95wf6JMeTDbJcmmfH3iVBbI9onMlRwRJ35JFzQI",
- "LiqfW6OjJzQyMH0Dix4wlcVinxjCN5QHxTu7LDI6jrTWTdeLQlAyVNBtiprT308PT/jCzBg7Jd2BBywN",
- "F1DxnDI9tA8cC80KgQd1XacpQHZ4JpMOJqkq3MT32/9atXRWHxw8AXbwoD9GG3RX3VnSykB/7JfsYGqb",
- "iFzsS3Y2OZsMIFVQqAvI7Hks5Gs7aifY/9PAPZM/DBQzK/jGnuS8LDJdL5ciFZbouUK9vlI9r1MqaoEK",
- "0QM0s5oJMyVTRhQlb93uSyuAk6j3dBcxnwhU9NPRlKK287eSXd7RDK54iqvkpGQ21iNo+GzoBBlVJiGA",
- "aAh6y4zuEkB39Pgt5W6oz20AYjt+p70QRIccAbvOdvvuA2JEMdhH/I9YqXDXhcs/8kkqudBmgKQLR9AN",
- "UMOQEaMzY/+hapZykt+yNtCc7VRFByY6SOMMZGP9nM5TaykEORRgI0TU8vBhf+EPH7o9F5ot4dIn7WHH",
- "PjkePrRCoLT5ZAnosebVccSBosA8WtNIovWa6/VsZ5Ce4O4Vmw9AH7/0E5IwaU0m5no6wSN3vrkDgbeA",
- "WAXO39OdUJW2rWoZJgi6/dMbbaAYRnvt0J9HPNF3/qQ4sLRK5kJCUigJm2hOvJDwPTVG7TSxyMhgEtax",
- "sf2TdAf/HlrdefbZzU+lL+12wBJvm3TFO9j8PtxeoD9MjSQvE/KScZbmgsKYSmpT1ak5k5wCJT03qMcW",
- "PvwzHjp74bvEY3WRUJoDdSa5Rho24ZPoBdASIoHRrwF8BE3XqxXonlvElgBn0vUSkg69NBd5lYndsBIq",
- "uqmb2Z7oCSx5TpG+36FSbFGbruqlDC7r2dhbB5yGqeWZ5IblwLVh3wt5ekXg/AnH84wEc6mq84YKIyc0",
- "kKCFTuKXmd/Y1m+5XvvlY0evbNxgG1dH+G2a18ZAJ0X8P+//7fD9UfJ3nvx+kDz/v/MPH59eP3g4+PHx",
- "9Zdf/lf3pyfXXz74259jO+Vxj+UXOcyPXzq35Pgl2Z72vmGA+2cLQhdCJlEmw+NCISSlqfZ4i91HC+oZ",
- "6EF7c+F2/UyaK4mMdMFzkeER+Dbs0FdxA1m00tHjms5G9GKKfq0fYsedlUpKnp5TLsBkJcy6XsxSVcy9",
- "OzZfqcY1m2ccCiWpLZvzUszxeDu/eLTDNH6CvmIRdXU9nTito+8868gBji2oP2cTzfd/G8XuffPqlM3d",
- "Tul7NtnQgg6yxCIetHvr1jnM4eLtYxmbbYmHmZewFFJg++GZzLjh8wXXItXzWkP1Fc+5TGG2UuyQOZAv",
- "ueEUA+hFNsfes1F8xmFT1otcpOw8NMWtaI4Fxs7O3iODnJ19GNz9DQ2nmyoebKQJkkth1qo2iYsOj8cR",
- "2lgLQbZxuW2zTpmDbTnSRZ8d/JEAaFnqJIiIxZdfljkuP2BDzWgQ5Y4xbVTllSBqRhfTwP19o9ztZ8Uv",
- "fQZ/jef2XwpevhfSfGCJO38flSWF2yje9YvTNciTmxL2j5m1KLbAYucsWrh1qG6cT0hAT+woH0TWccph",
- "E5GO+qBWaGOCt6UTgvpW5bi5tyZTACNKndqsE5Sp6Ko0shbJQ/Dukq9QF/o7QDw2I/O5d0ALYOka0nPI",
- "6AKEYoTTznB/zewsixdZoe3THZs2SPnldBxcAKvLjDvby+Wmn+irwRif3fwOzmFzqtr09Jtk9l5PJy6o",
- "nyDPjAlIifQIjIBadsXFXwz0Nt/d8VDgvSyZjW3bjEzPFocNX/gx4wJkLdMdCE+MKRoybOH3klcRQljm",
- "HyHBLRaK8D6J9aORdF4ZkYrSrn+/2PzbzhgEskupR9W4Wva19UCZRrW37ZwsuI4rbsAW3A+UoX5Gh5/J",
- "RlbsZR2j59+OcRc5BLdK2kk2r8jZ8cu271nHUItzCVSytaYejS5FQrO9dtej4qK9FKXr730M3M5LKeQi",
- "n7cguuFngfPmcMFHbwJG310cB8kIwXO+5lWFV2x9YZg2L2zsy3r/+sI/ufDvLCbTG72ZmE5cflxsO5Qk",
- "655BDivuAt+UeecvXS1q93SwQYjHD8tlLiSwJJbXwLVWqbB3oa0ud3MAOn8PGbOBFbY3hBgbB2hTxJAA",
- "szcqlE25ugmSEgSFGLmHTbHG4G/YHXFrSxw4t3Kn+zfUHa0QTdsnSHYbh9Gf6SSqksY8804vZrssYHCU",
- "ibEoqqZhPGQYddGQA5njpKNZk/NYlAy9CiA2PPHDAned3RdLNPIPgsBxBSs8e7fnVZRWH4D5vDGDC2Ug",
- "WYpKm4SOytHlYaevNTmDX2PXuPrpkIrZN9Iii2sfmvYcNkkm8jq+227e717itG+ac4uuF+ewISMDPF2z",
- "Bb3pRyvUmR77bJna5vZsXfBru+DX/M7Wux8vYVecuFLK9Ob4g3BVT59sE6YIA8aYY7hroyTdol6CbISh",
- "bgnyIGzOBOVXzLad1gfCdOOMjlHNayFF1xI4ultXYRN/bG5P8CR+mGc+IgO8LEV21Ts7W6hxHqcpbuKo",
- "W49/QAXaXQdsBwWCc3IslbECf9a3WxrYTFvcYJDutZsy/SSzQCGEUwntS/MMCYWsTdk4u2h1Cjz/DjY/",
- "YV9azuR6Ovm0I3+M1g7iDlq/bbY3SmeKIdsjYCdydkOS87Ks1AXPE/eUZ4w1K3XhWJO6+5c/n1nVxY/f",
- "p6+OXr916FP2GvDKJW1tWxX1K/8wq8ITcSxz6zSIjJC36s/O1hELNr95TxkGU3yiXceXQy3mmMuKV2Pg",
- "QlF0wZVl/CprZ6gkTM67lWR2svs+NTIXpvrdqcgPJCzOoe0O79AL4VxbijEUtt6IZkr2ExzQjaNTJrFL",
- "wTe4izYwO1QQsi4SFIFE5yKNhw7kQqMUybqgRy4bA4w6jziECLEWI+FzWYsAFnbTe9wU9ZAM5ogSk8I6",
- "W2i3UK5QXC3FbzUwkYE02FS5hKeOsKBs+BzeoUmL5ws7wC5luAH/KXYeQY1ZeEJiu5EPo7yRbHV/6PML",
- "bcLT+EMQnLvBJU0448AsbblgcfzhuNnedK+70dqwrttQByFj2Bogu4vK+dDB2iI6Mke0SNyoxj4a19aU",
- "B76/nm7VMqEbKmSbm8dzrSJgannJpa35hOMsDd1oDfbcjqMuVUWPpzREb6iFTpaV+h3ip8klblQkB8uR",
- "klw2Gj2LPErpK9EmMtJW8/P0DfEYZe0xbypoZN1LtBEJJy4PwteUVOqDTFxatrb1qTpXt3HhCNMt5hZ+",
- "KxwO50GKSs4vFzxWqgGdGsTpqL0o6YTDjGJ+sN8F3eRSO94L7lyavsK+OCqhahMlh69bb+mg/LFYPoNU",
- "FDyPR0czon73KUomVsIW+ao1BFWkHCBbHdFykavEZa+iWtIcL9nBNKhT53YjExdCi0UO1OOR7bHgGuyL",
- "l/AVjEsKMiDNWlP3x3t0X9cyqyAza20JqxVrnEj7uMHHnxdgLgEkO6B+j56z+xR51+ICHiAVnS8yOXz0",
- "nFIy7B8HMWPnqvlt0ysZKZZ/c4olzsd09WBhoJFyUGfR12+2BOu4CtsiTXboPrJEPZ3W2y1LBZd8BfEb",
- "1WIHTnYs7SYF7np0kZmtH6hNpTZMmPj8YDjqp5G0LFR/Fg2XK1+gABnFtCqQn9oSUXZSD84WI3RlWzxe",
- "vpGuOUr/5qF3aP28QVpry2OrpsuoN7yALlmnjNtHovRswz0udgpxNlJfA6qL+CTVyAZ7u+nGsvtSyaRA",
- "2cketAl/Af9Fy0sow/PotMbrrn7mynbQ+7paCCUZJWzdISwPdNKtSVxX8XXyGqf68d1rZxgKVcVqRbTa",
- "0BmJCkwl4CIqsf3EtcYzacyFp3zMQfmqFnn2U5tu2ivLVHGZrqPxzwUO/LmtI9eQ3VI9+gRvzaWEPArO",
- "yvLPXuYjWulXte88hZB79u2XW7LL7S2uRbyLpkfKT4jkFSbHCUKqdvPvmsSRfKUyRvO0j71bRhi+kQpK",
- "z/xWgzax91zUYHOd6IyN/oqtfMJAZmTtZ8y+f0JcOi9YyMqKos7tawjIVlC5AExd5opnU4ZwTl8dvWZ2",
- "Vu3e8tK7G6q8srJv6Tqr6J2tgsoQN3lcOJYatT+c7TkjuGpt6Gm4NrwoY1mv2OPUd6DU2gsucp9+QOYn",
- "pM6MvbSWX3u7Yidp35CyZjqna4gn8D/G8HRNJrVjgMZZfv+SQZ4rdVA6s6lC2BR3sM8ijfJVg2zRoClT",
- "6PdcCm3L/8IFdBNtm6xz59L5xNvu8qpaSsspcfu05VXEbcjukbMXez4kFcWsR/gbmhmt6iqFm1ZQOqFR",
- "0TdW/XJMg5qZErLTK9nUrPNl3VMulRQpvXAKCg43KLtSwvvETPd4DNY/LnsRdxIaEa5oEagmdcBRcbQs",
- "lFeEjnDDgFHQiptqucP+aahmLR4EV2C002yQTX2hL3eOE1KDK9ZBVaUDPYnH8f79YfRqo32uf0M2ovS/",
- "EXfla2wjV0W4lJ1zIenxqiObyw6yJy2qdGrweCcMWynQbj3dx1n6PY6ZnV7JY8T4w8xXRiUYNoSMy7Z3",
- "FkNQR/4Gw90YYN8X2JdRuLj9uZNqaCc9Kks3aUwT6GaHY6XKRgkciYInPgwZELeBH0Lbwm5brx7JniKj",
- "wQVdXEBJdnjAGCNP4F/hodZylH1Ja6/8o08zhIyg8VpIaOv2RgxEGjUJtDEkryPjdFpxY13AvXTaKfCc",
- "bkpiCk0bFzr6VFC9DSaS0Br9HOPb2BasG1EcTYfWceNy05QLRu4OnIkXVKfcEXJYfo68KudEZZTU1StI",
- "F1McqLh9KceuARiKwdAnssNNxa3k3MQSjSWhZ0LjcaRY5JE0lpdNY1CUkfLlFhv6N/YAeXwF7mLt1gUz",
- "aOCN/cvtxSty3PtEi9Utd6Udf4fb0pOBcI9i3P8K1Ur4bmfwltwqnuZZDV3hK18ilw4VTWJ6l2dJ0UUP",
- "bW210+2H1vG6pVNSjSOJPO/aF6Pcal8bGxxL50lHs8+4camlhrNtVWRssdEYBHsPaYuc2g+GRAMDY3eP",
- "9uoRmwej9/MbBl4Ywd5KUH+pPUToO5+1wkouXOC7FZEhZV1+2zDjcJ/Ml3aD+4twWWMEJLaSWyZ57SV7",
- "QypFBDtMDdjBnucdktrXID1PUlVwx6QNTOgNSTtMeth3ebQO4phaw3Cde29Ah7YjtN+H8K1eGBJ3XJzN",
- "Yh9xjifV43DSJ5Yg/tnHUJt8Nm3QqZHs5o3t+k9j0QN7Qh4JVPVoWos827W5nbBj+5yZAms/L7542one",
- "fc4H1T/bC/mhuLm3pTcx/P1NIMJE1tqZPJgqCCjuEUt0wyKRQ6oFldaVMBvK3fGepvg5mpf8DUhXKdoV",
- "3m9uQN0FnP3miwtNr5re7Wc6vlG2dHaB7i+5goaKpLy64kWZg5OLL+8t/gJP/vo0O3jy6C+Lvx48O0jh",
- "6bPnBwf8+VP+6PmTR/D4r8+eHsCj5RfPF4+zx08fL54+fvrFs+fpk6ePFk+/eP6Xe/4bGRbR9vsT/05V",
- "B5Kjt8fJKSLb0oSX4jvY2HfGyMb+BTNPSRKh4CKfHPqf/p+XsFmqiuCzfu7XiYv0T9bGlPpwPr+8vJyF",
- "Q+YrKtuXGFWn67mfZ1iT5u1xE6C1F/60ozb2hqxAm+pY4Yja3r06OWVHb49nLcNMDicHs4PZIyoUUoLk",
- "pZgcTp7QTyQ9a9r3uWO2yeHH6+lkvgaem7X7owBTidQ36Uu+WkE1c0+58aeLx3Mf35l/dJfc19vaulkG",
- "7ulIMCB4ezj/2Kn7mIVw6WXe/KPPwAiabF3j+UcKHwW/u2qf849t+d1ry+s5xM7xvixY253KfdFXCbT9",
- "Fdnb3xIK3a3W3OzVcYZ7hKNeNKWIw4+vvv8n/VThh96XWx4fHPyTfYPi6Q1XvNU77ZzmIlUTvuIZ8zdF",
- "NPejzzf3saR3G6iemFW/19PJs8+5+mOJLM9zRj2D3I7h1v8oz6W6lL4n2sq6KHi18WKsO0rBFxgnjcxX",
- "msoQVuICj+gfqM5l7GpuRLnQxz5urFzoCyb/Ui6fS7n8MT7t8viGAv7HX/G/1OkfTZ2eWHW3vzp1rpxN",
- "RpjbQnCth+ffQA4fBnZ90zGd7A4u7D5FPSVcPnAJDRZs5JFpc3msMhvh8IWCfJpUUMS7q7PfOaCd98zf",
- "wUbvUuCna2C/tJ/b/4WSOekqYcpUxX7heR78Rl9N9U74bOTb/c3Dw30/3H99PY2htQTwqaWUQupqmaIh",
- "Owf/RNXSoHPdOLyhb8vKLWH0+722+laowRwLPjo4OIil9vRxdtEYizGl8l6qJIcLyIdbPYZE76Xqtq9d",
- "jn4PZPjAODxFR7jOfxy6eXM8+vHP7qvZm2D3Usl7hl1y4UqqB1Vq7AdiCmH8d3Ftyo9LB2xsRPxbqgmC",
- "3P6p5U813n+82qTXW5SdXtcmU5dyXHHRWyGeu2RbSn9tggdGMQ+g0VQz5j90mG/8l3oZp+QjVZvuB7R9",
- "8YleCeamPNJKSJqApJxmsVnlPMjZdB/kGCrBE4fZG/v9kp7ei35H1OIYl/uY0H8qLw0dja175YuVdP6e",
- "I8uju2q/z5QQhYYBCgM8n7u0k96v9nI4+LFbZjny67x5qBVt7IddYq3zj+YqiKy08c4wfkg71UQO339A",
- "glNGsNvENhx2OJ/ThexaaTOfoMLphsrCxg8NjT/6nfe0vv5w/d8BAAD//wvTuDrLhgAA",
+ "X3UDIEESnBnJWt+ldn+yNQQbjUZ/obvR/DhJVVEqCdLoyeHHSckrXoCBiv7iaapqaRKR4V8Z6LQSpRFK",
+ "Tg79M6ZNJeRqMp0I/LXkZj2ZTiQvoB2D708nFfxWiwqyyaGpaphOdLqGgiNgsylxdAPpKlmpxIE4siCO",
+ "X06utzzgWVaB1kMsf5D5hgmZ5nUGzFRcap7iI80uhVkzsxaauZeZkExJYGrJzLozmC0F5Jme+UX+VkO1",
+ "CVbpJh9f0nWLYlKpHIZ4vlDFQkjwWEGDVLMhzCiWwZIGrblhOAPi6gcaxTTwKl2zpap2oGqRCPEFWReT",
+ "w/cTDTKDinYrBXFB/11WAL9DYni1AjP5MI0tbmmgSowoIks7dtSvQNe50YzG0hpX4gIkw7dm7PtaG7YA",
+ "xiV79/UL9uTJk+e4kIIbA5ljstFVtbOHa7KvTw4nGTfgHw95jecrVXGZJc34d1+/oPlP3AL3HcW1hriw",
+ "HOETdvxybAH+xQgLCWlgRfvQ4X58IyIU7c8LWKoK9twTO/hONyWc/390V1Ju0nWphDSRfWH0lNnHUR0W",
+ "vL5NhzUIdMaXSKkKgb4/SJ5/+Pho+ujg+k/vj5K/uz+fPbnec/kvGrg7KBAdmNZVBTLdJKsKOEnLmssh",
+ "Pd45ftBrVecZW/ML2nxekKp37zJ816rOC57XyCcirdRRvlKaccdGGSx5nRvmJ2a1zFFNITTH7UxoVlbq",
+ "QmSQTVH7Xq5FumYp1xYEjWOXIs+RB2sN2RivxVe3RZiuQ5IgXreiBy3ofy8x2nXtoARckTZI0lxpSIza",
+ "YZ68xeEyY6FBaW2VvpmxYqdrYDQ5PrDGlmgnkafzfMMM7WvGuGacedM0ZWLJNqpml7Q5uTin991qkGoF",
+ "Q6LR5nTsKArvGPkGxIgQb6FUDlwS8bzcDUkml2JVV6DZ5RrM2tm8CnSppAamFr9CanDb///JD2+Yqtj3",
+ "oDVfwVuenjOQqcrG99hNGrPgv2qFG17oVcnT87i5zkUhIih/z69EURdM1sUCKtwvbx+MYhWYupJjCFmI",
+ "O/is4FfDSU+rWqa0ue20HUcNWUnoMuebGTtesoJffXkwdehoxvOclSAzIVfMXMlRJw3n3o1eUqlaZnv4",
+ "MAY3LLCauoRULAVkrIGyBRM3zS58hLwZPq1nFaDjgYyi08yyAx0JVxGeQdHFJ6zkKwhYZsZ+dJqLnhp1",
+ "DrJRcGyxoUdlBRdC1bp5aQRHmnq7ey2VgaSsYCkiPHbiyIHaw45x6rVwDk6qpOFCQoaal5BWBqwmGsUp",
+ "mHD7YWZoohdcwxdPxwx4+3TP3V+q/q5v3fG9dpsGJVYkI3YRnzqBjbtNnff3OPyFc2uxSuzPg40Uq1M0",
+ "JUuRk5n5FffPk6HWpAQ6hPCGR4uV5Kau4PBMPsS/WMJODJcZrzL8pbA/fV/nRpyIFf6U259eq5VIT8Rq",
+ "hJgNrtHTFL1W2H8QXlwdm6vooeG1Uud1GS4o7ZxKFxt2/HJsky3MmzLmUXOUDU8Vp1f+pHHTN8xVs5Ej",
+ "SI7SruQ48Bw2FSC2PF3SP1dL4ie+rH7Hf8oyj9EUGdgZWgoKuGDBO/cb/oQiD/ZMgFBEypGoczKfhx8D",
+ "hP5cwXJyOPnTvI2UzO1TPXdwccbr6eSohXP3M7Vv2vX1DjLtYyak3R0aOrVnwrvHB6FGMSFHtYfDV7lK",
+ "z2+FQ1mpEioj7D4uEM5QUgg8WwPPoGIZN3zWHqqsnzXC7/Tit/QenZKgipi4H+g/PGf4GKWQG+++oesq",
+ "NDpxKgg0ZejxWTtiZ8IB5IkqVlgnj6FzdiMsX7STWwXdaNT3jiwf+tAiu/PK+pWM3vCLwKW3p8ajhapu",
+ "xy89RpCsPQszjlAb7xdX3t1ZGlqXiaNPxJ+2A3qA2vDjUK2GFOqDj9GqQ4UTw/8BVNAI9S6o0AV011RQ",
+ "RSlyuAN5XXO9Hi4CHZwnj9nJt0fPHj3++fGzL9BCl5VaVbxgi40Bze47u8K02eTwYLgyUvB1buLQv3jq",
+ "T1BduDspRAg3sPeRqFNAzWApxmy8ALF7WW2qWt4BCaGqVBXxeYl1jEpVnlxApYWKhC/euhHMjUA9ZP3u",
+ "3u8WW3bJNcO56ThWywyqWYzyeM4ik26g0LsMhQV9eiVb2jiAvKr4ZrADdr2R1bl599mTLvG9d69ZCVVi",
+ "riTLYFGvQhvFlpUqGGcZvUgK8Y3K4MRwU+s70AItsBYZ3IgQBb5QtWGcSZWhQOPguH4YiWVSEIViPyZU",
+ "OWZt7c8C0DtOeb1aG4ZupYptbftiwlO7KQnZCj1y9GvO7HaUnc7GyfIKeLZhCwDJ1MKdr9zJjxbJKSxj",
+ "fMbFaacWreZM0MGrrFQKWkOWuPTSTtT8OLvLZgudCHFCuJmFacWWvLolskYZnu9AlMbE0G3cCXcoHWK9",
+ "3/TbNrA/ebiNvMIzpuUC9F1QunMwMEbCPWlyARUdzv6h++cnue321eVI6sRZ4FNRoPgyyaXSkCqZ6Siw",
+ "nGuT7BJbHNRxE3AFgaTEJJUAjwQIXnNt7BFdyIxcRqtuaB56h6YYR3jUoiDkn7wxGcJOUU9KXevGsui6",
+ "LFVlIIutQcLVlrnewFUzl1oGsBvzZRSrNeyCPEalAL4jll2JJRA3LkbUxLCGi6NwPNqBTZSUHSRaQmxD",
+ "5MSPCqgbho9HEMHzRfMmMY7QPc5pYtbTiTaqLFH+TFLL5r0xMp3Y0Ufmx3bskLm4afV6pgBnNx4nh/ml",
+ "paxNHKw5+nYEmRX8HG0TeWo2ljDEGYUx0UKmkGzjfBTLExwVisAOIR1xkl1qMpitJxw9/o0y3SgT7NiF",
+ "sQWPeOxvbQT8tI0O3YHT8hIMF7luHJMmzN7OQhH5frUEepEVpCBNvkFeXYqqsEktMmfa/2bdnszNYtM3",
+ "rfjJjFVwyavMjxieloLFJEJmcBXXrrwTG8ngiok40stmZmFY6lNOMgQwiwq6TeKludJCrhKbHdxl1Jqk",
+ "3j3NaimcAbuEyuG1hMqZXeOzY4lRPoO2DY9tpHDBmdsQAV+NT2uRs7ulY0lUeoCCWIi0UtzmRpGovQWy",
+ "CgqO2FGWzpn98Tm3EfuFfe5TtT5EHvJuHK7n11EN07Do5Zo2C1Vtn4gh1+PRFjSMLWSVqwXPE3T4Ickg",
+ "NztDb3iQgJc0Eu21Soevd1E+O3ufZ2dnH9hrHEtnC2DnsJlTxpqlay5X0KYRQnmxpwa4grQOTUuPjHsd",
+ "BF2stIt99yg4nZRK5Ulz5O2nPQbmpk/3c5GeQ8ZQX5GIOSt4r7tDOAm7jyyum8TQ5XrjXciyBAnZgxlj",
+ "R5JBUZqNi6/0PJ7e5PKe2Tb/Fc2a1ZSj5pLRImdnMh7asBnuT5QpD2a7JNmSr0+cygLZPpG5kiPixC8p",
+ "QYPgovK5NTp6Qm8Gpm9g0QOmsljsE0P4huqgeGeXRUbHkda66XpRCCqGCoZNUXP6/PTwhC/MjLFT0h14",
+ "wNJwARXPqdJD+8Cx0KwQeFDXdZoCZIdnMulgkqrCTXy//a9VS2f1wcETYAcP+u9og+6qO0taGei/+yU7",
+ "mNpHRC72JTubnE0GkCoo1AVk9jwW8rV9ayfY/9PAPZM/DBQzK/jGnuS8LDJdL5ciFZbouUK9vlI9r1Mq",
+ "egIVogdoZjUTZkqmjChK3rrdl1YAJ1Hv6S5iPhGo6KejKUVt57OSXd7RDK54iqvkpGQ21iNo+GzoBBlV",
+ "JiGAaAh6y4wuCaA7evyWcjfU5zYAsR2/014IokOOgF1nu333ATGiGOwj/kesVLjrwtUf+SKVXGgzQNKF",
+ "IygD1DBkxOjM2H+omqWc5LesDTRnO1XRgYkO0jgD2Vg/p/PUWgpBDgXYCBE9efiwv/CHD92eC82WcOmL",
+ "9nBgnxwPH1ohUNp8sgT0WPPqOOJAUWAerWmk0HrN9Xq2M0hPcPeKzQegj1/6CUmYtCYTgwuvlFrewWpF",
+ "dhX1WeAqtlK3cxRuu6dZyTej7nWJCEaqtaA6zymWr5Y9jmRO/61FiSDbypKNgU5V6n/e/9vh+6Pk7zz5",
+ "/SB5/n/nHz4+vX7wcPDj4+svv/yv7k9Prr988Lc/x5wXbcQinvf5lus1Yuo0x5U8ljZzi54nBew2Lg6g",
+ "lp8b7x6L4WZ6ygdL2ofp3sY2RKArQZtNPHdSl2W+uQMjYwGxCtwZQ3fCo9o+VcuwKNVxnt5oA8Uww2Bf",
+ "/Xnk9PPORycGXKpkLiQkhZKwid7DEBK+p4dR35DU0sjLZCDG3u1Hbzr499DqzrPPZn4qfWm3AzX0timR",
+ "vYPN78PtJZfCclw62UBeMs7SXFDoXEltqjo1Z5JTcK7nevfYwoccx8O1L/yQeHw4Er51oM4k10jDJmQX",
+ "TTouIRKM/xrAR211vVqB7rnibAlwJt0oISnQQnPRSSaxG1ZCRdnhmR2J3ueS5xRd/h0qxRa16Zp7qhq0",
+ "3rTNdOE0TC3PJDcsB64N+17I0ysC50/VnmckmEtVnTdUGIkKgAQtdBJXpN/Yp6RP3fLXTrfSFQ772Oub",
+ "z20APO6xmjaH+fFL5wofvyR/p81xDXD/bImPQsgkymR4RC2EpNLoHm+x++i1eQZ60GbL3K6fSXMlkZEu",
+ "eC4ybm7HDn0VN5BFKx09rulsRC+O7df6IXbEXqmk5Ok51Z9MVsKs68UsVcXcHwHmK9UcB+YZh0JJepbN",
+ "eSnmuoR0fvFohzv2CfqKRdTV9XTitI6+80o3Bzi2oP6cTQbJ/20Uu/fNq1M2dzul79kCVws6qEyMnNrc",
+ "/cpOAAEXby9o2QpfPEC/hKWQAp8fnsmMGz5fcC1SPa81VF/xnMsUZivFDpkD+ZIbTnGnXjR97A4lxQQd",
+ "NmW9yEXKzkNT3IrmWDD27Ow9MsjZ2YdBvnloON1U8QA3TZBcCrNWtUlcRmI8dtXG9wiyjQVvm3XKHGzL",
+ "kS7j4eCPBN3LUidBFDa+/LLMcfkBG2pGL1G9ItNGVV4JomZ0cTTc3zfKZdwrfulvjdQaNPul4OV7Ic0H",
+ "lriYz1FZUoiXYqy/OF2DPLkpYf84bYtiCyx2tqeFW4fqxjWsBPTEvuUTFzpOOXxEpKMxqBXaOPRt6YSg",
+ "vlU5bu6tyRTAiFKnNusEZSq6Ko2sRfIQ3PXlK9SFPu+sxUoi87m7Zwtg6RrSc8go6UZx6WnndV/a4CyL",
+ "F1mh7XUxW6pKdxooBLEAVpcZd7aXy02/uFyDMb6i/h2cw+ZUtVciblJNfj2duERSgjwzJiAl0iMwAmrZ",
+ "FRefjOptvssrUrKnLJnNp9gqYM8Whw1f+HfGBchapjsQnhhTNGTYwu8lryKEsMw/QoJbLBThfRLrR7M3",
+ "vDIiFaVd/375oLeddxDILqUeVeNq2dfWA2Ua1d52cLLgOq64AZ/gfqAM9auI/Ew2mmcTxIxaDjjGXeQQ",
+ "ZDK1k2xekbPjl23vUI+hFucSqGRrTT0aXYqEZnvtUvLiok3EU6hlHwO3MxGKXORrZUQ35SFw3hwu+Gj2",
+ "afSuz3FQABNcIW1u8njF1heGaXOry3Zz8Dd+/DUff7dnMr3RPZ3pxNVkxrZDSbLuGeSw4i7ZQtWePtFv",
+ "Ubungw1CPH5YLnMhgSWxWhqutUqFzb+3utzNAej8PWTMBlbY3hBibBygTVFqAszeqFA25eomSEoQFNbm",
+ "HjbFt4O/YXeUt22r4dzKne7fUHe0QjRtr73ZbRxGf6aTqEoa88w7o5gdsoDBUSbGoqiahvGQYdRFQw5k",
+ "jpOOZk3OY1Ey9CqA2PDEvxa46+y+WKKRfxAkKypY4dm7Pa+itPoAzOeNGVwoA8lSVNokdFSOLg8Hfa3J",
+ "Gfwah8bVT4dUzN7LF1lc+9C057BJMpHX8d128373Eqd905xbdL04hw0ZGeDpmi2ojwRaoc70OGbL1Lae",
+ "bOuCX9sFv+Z3tt79eAmH4sSVUqY3xx+Eq3r6ZJswRRgwxhzDXRsl6Rb1ElTADHVLUHtj63Sopme27bQ+",
+ "EKYbVxGNal4LKbqWwNHdugpbbGbryYI2DMO7DSMywMtSZFe9s7OFOpIuIwf+Bo669fgjKaBJA2wHBYJz",
+ "cqx8tgJ/1rdbGthM21BjUGK4mzL9wsZAIYRTCe3bQQ0JhaxNFWC7aHUKPP8ONj/hWFrO5Ho6+bQjf4zW",
+ "DuIOWr9ttjdKZ4oh2yNgJ3J2Q5LzsqzUBc8Td31sjDUrdeFYk4b722afWdXFj9+nr45ev3XoU8Uk8MoV",
+ "Cm5bFY0r/zCrwhNxrFrwNIiMkLfqz87WEQs2v7nDGwZTfHFnx5dDLeaYy4pXY+BCUXTBlWU8lbUzVBIW",
+ "hN5KMjsVpZ8amQvLS+9U5AcSFufQdod36IVwri0NQArb40YzJftFNejG0SmT2KXgG9xFG5gdKghZFwmK",
+ "QKJzkcZDB3KhUYpkXdDFqo0BRoNHHEKEWIuR8LmsRQALh+k9MkU9JIM5osSksM4W2i2Ua05YS/FbDUxk",
+ "IA0+qlyRXUdYUDZ83fjQpMVr1B1gV6begP8UO4+gxiw8IbHdyIdR3sgNCX/o8wttwtP4QxCcu0GSJpxx",
+ "YJa2JFgcfzhutpnudTdaG/YSHOogZAzbd2Z3I0MfOlhbREfmiDYmHNXYR+Pamu4e7K+nW7VM6IYK2daD",
+ "8lyrCJhaXnJp+4zhe5aG7m0N9tyOb12qii7saYhmqIVOlpX6HeKnySVuVKTuz5GSXDZ6exa5CNVXok1k",
+ "pO0g6ekb4jHK2mPeVPCQdZNoIxJOXB6Er6mQ2QeZuLRsbXuidVK3ceEIyy3mFn4rHA7nQYlKzi8XPNYe",
+ "BJ0axOmoTZR0wmFGMf+y3wXd1O873gtyLs1YYW+5lVC1xbnDG9W3dFD+WCyfQSoKnsejoxlRv3v9KRMr",
+ "YRvL1RqCzmUOkO3IabnIdX+zqaiWNMdLdjANeiO63cjEhdBikQONeGRHLLgGe8sqvHnlioIMSLPWNPzx",
+ "HsPXtcwqyMxaW8JqxRon0l6o8fHnBZhLAMkOaNyj5+w+Rd61uIAHSEXni0wOHz2nkgz7x0HM2LkOktv0",
+ "SkaK5d+cYonzMaUeLAw0Ug7qLHrj0rb9HVdhW6TJvrqPLNFIp/V2y1LBJV9BPKNa7MDJvku7SYG7Hl1k",
+ "ZntWalOpDRMmPj8YjvpppCwL1Z9Fw93PKFCAjGJaFchPbVsyO6kHZxtgulZBHi//kNIcpb9n0zu0ft4g",
+ "rbXlsVVTMuoNL6BL1inj9mIyXRVyF9qdQpyN9HSB6iI+STWywd5uunfZfalkUqDsZA/agr+A/6ItTZTh",
+ "eXRa43VXv3JlO+h9XS2EkowStu4Qlgc66dYkrqv4OnmNU/347rUzDIWqYv1JWm3ojEQFphJwEZXYfuFa",
+ "45k05sJTPuagfFWLPPupLTfttQKruEzX0fjnAl/8ue1d2JDdUj167XPNpYQ8Cs7K8s9e5iNa6Ve17zyF",
+ "kHuO7bf4ssvtLa5FvIumR8pPiOQVJscJQqp26++awpF8pTJG87QNBlpGGN7LC9od/VaDNrE7hPTA1jrR",
+ "GRv9Fdtth4HMyNrPmL1zh7h0bk2RlRVFndsbOJCtoHIBmLrMFc+mDOGcvjp6zeys2t0fp7te1O1nZe9v",
+ "dlbRO1sF3UhucqF1rDRqfzjba0Zw1dpQOwJteFHGql5xxKkfQKW1F1zkvvyAzE9InRl7aS2/9nbFTtLe",
+ "W2bNdE7XEE/gf4zh6ZpMascAjbP8/m2qPFfqoF1r0/myaShir+Ia5TtV2UZVU6bQ77kU2rachgvoFto2",
+ "VefOpfOFt93lVbWUllPi9mnLrYjbkN0jZxN7PiQVxaxH+BuaGa3qKoWbdu06obei9/r6LcAGfVrtDZ+m",
+ "T6L/lEDKpZIipVt1QZPrBmXXvnqfmOkeFxD7x2Uv4k5CI8IVbTzWlA44Ko62IvOK0BFuGDAKnuKmWu6w",
+ "fxrqk4wHwRUY7TQbZFPfXM6d44TU4BrEUCfzQE/icbyfP4ymNtoWETdkIyr/G3FXvsZn5KoIV7JzLiRd",
+ "mHZkc9VB9qRF3XUNHu+EYSsF2q2ne01Ov8d3ZnRVLIOrDzPfjZdg2BAyLtvmLIagjnwGw2UMcOwLHMso",
+ "XNz+3Ck1tJMelaWbNHrLrdnhWHu8UQJHouCJD0MGxG3gh9C2sNvW1CPZU2Q0uKDEBZRkhweMMdJ24RUe",
+ "ai1H2dvbNuUfvZohZASN10JC2ys6YiDSqEmgjSF5HXlPpxU31gXcS6edAs8pUxJTaNq40NGnguptMJGE",
+ "1ujnGN/GtkniiOJoBrSOG5ebpkU1cnfgTLyg3viOkMOWh+RVOScqo6KuXhPEmOJAxe3bh3YNwFAMhj6R",
+ "fd1U3ErOTSzRWBF6JjQeR4pFHiljedk8DBqBUr3cYkP/xi69j6/AJdZu3aSFXryxf7m9YUqOe59osbrl",
+ "rrTv3+G29GQg3KMY979CtRLe2xn0L7CKp7lWQyl85dsy06GiKUzv8iwpuuihre2wu/3QOt4rd0qqcaSQ",
+ "5117Y5Rb7Wtjg2PlPOlo9Rk3rrTUcLatc5FtcBuDYPOQtrGu/UhNNDAwlnu0qUd8PHh7P79h4IUR7K0E",
+ "9UntIULf+aoVVnLhAt+tiAwp6+rbhhWH+1S+tBvcX4SrGiMgsZXcsshrL9kbUiki2GFpwA72PO+Q1N4G",
+ "6XmSqoI7Jm1gQm9I2mHRw77Lo3UQx9QahuvcewM6tB2h/T6Eb/XCkLjj4mwW+4hzvKgeXyd9Ygnir30M",
+ "tcln0wadvtxu3tiu/zQWPbAn5JFAVY+mtcizXZvbCTu215kpsPbz4ounnejd57xQ/bNNyA/Fzd0tvYnh",
+ "728CESay1s7kwVRBQHGPWKJ7LRI5pP5jaV0Js6HaHe9pip+jdcnfgHTdyd3HHpoMqEvA2e8MudD0qhnd",
+ "fhrmG2XbtRfo/pIraKgxz6srXpQ5OLn48t7iL/Dkr0+zgyeP/rL468GzgxSePnt+cMCfP+WPnj95BI//",
+ "+uzpATxafvF88Th7/PTx4unjp188e54+efpo8fSL53+557/LYhFtv3ny79R1IDl6e5ycIrItTXgpvoON",
+ "vWeMbOxvMPOUJBEKLvLJof/p/3kJm6WqCD4l6X6duEj/ZG1MqQ/n88vLy1n4ynxFrSITo+p0PffzDPsg",
+ "vT1uArQ24U87amNvyAq0qY4VjujZu1cnp+zo7fGsZZjJ4eRgdjB7RI1CSpC8FJPDyRP6iaRnTfs+d8w2",
+ "Ofx4PZ3M18Bzs3Z/FGAqkfpH+pKvVlDN3FVu/Oni8dzHd+YfXZL7etuzbpWBuzoSvBDcPZx/7PQazUK4",
+ "dDNv/tFXYASPbC/t+UcKH43+3kXjo7kS2fXc9/xxb7ietPOPbZPoaysdOcRO/r55XTucmtLRtzO0/RUF",
+ "wucVhe72FG929zjDXcW3XjQNs8NPBL//J/2g5ofe94UeHxz8k30p5ekNV7zVn+2c/yJ9Fr7iGfO5JZr7",
+ "0eeb+1jSTQ9UaMwq7Ovp5NnnXP2xRJbnOaORQTXIcOt/lOdSXUo/Eq1rXRS82ngx1h2l4Nvgkw7nK03N",
+ "MitxgYf6D9SNNZbMG1Eu9EmaGysX+s7Ov5TL51Iuf4wPED2+oYD/8Vf8L3X6R1OnJ1bd7a9OnStnyxfm",
+ "tnVc6+H5W5PDq4Rdb3ZMJ7ujDrtPcVIJlw9cCYQFG7mW2qSbVWZjIr61kC+sClrNd3X2Owe0cwP6O9jo",
+ "XQr8dA3sFwc+EdkvVP5JyYcpUxX7hed58Bt929e77bO4vm+vKu78wmgroDG0lgC+GJWKTl3HXTRk5+Av",
+ "tVoadBKUw5x+24huCaNfmbb9ukIN5ljw0cHBQawYqI+zi99YjKn491IlOVxAPtzqMSR6d1u3fZN19Ks1",
+ "wyvJ4bk7wnX+E+bNLeXRT9R279neBLuXSt4z7JIL1/g/6GtjP2NUCOO/3myLhFwBYWMj4l/8TRDk9g+C",
+ "f6rx/uN10L3eouz0ujaZupTjiotuF/HcledSwWwTbjCKeQCNppox/znOfOO/J804lSup2nQ/8+7bVfQa",
+ "hTcNlVZC0gQk5TSLrUPnQZWn+2zMUAmeOMze2K/s9PRe9Gu3Fse43MeE/lN5aehobN0r396k8/ccWR7d",
+ "VfsVsYQoNAxpGOD53BWq9H616eTgx24z8Miv8+ZqV/RhP1ATe+riKH5QGyENI460U02s8f0HJDjVELtN",
+ "bANoh/M5pXDXSpv5BBVON7gWPvzQ0Pij33lP6+sP1/8dAAD//3s5wHVxiQAA",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/private/types.go b/daemon/algod/api/server/v2/generated/private/types.go
index 9ec60c2cb..a1e957bbc 100644
--- a/daemon/algod/api/server/v2/generated/private/types.go
+++ b/daemon/algod/api/server/v2/generated/private/types.go
@@ -580,6 +580,19 @@ type PostTransactionsResponse struct {
TxId string `json:"txId"`
}
+// ProofResponse defines model for ProofResponse.
+type ProofResponse struct {
+
+ // Index of the transaction in the block's payset.
+ Idx uint64 `json:"idx"`
+
+ // Merkle proof of transaction membership.
+ Proof []byte `json:"proof"`
+
+ // Hash of SignedTxnInBlock for verifying proof.
+ Stibhash []byte `json:"stibhash"`
+}
+
// SupplyResponse defines model for SupplyResponse.
type SupplyResponse struct {
diff --git a/daemon/algod/api/server/v2/generated/routes.go b/daemon/algod/api/server/v2/generated/routes.go
index 32f6e331d..b69ebf08e 100644
--- a/daemon/algod/api/server/v2/generated/routes.go
+++ b/daemon/algod/api/server/v2/generated/routes.go
@@ -32,6 +32,9 @@ type ServerInterface interface {
// Get the block for the given round.
// (GET /v2/blocks/{round})
GetBlock(ctx echo.Context, round uint64, params GetBlockParams) error
+ // Get a Merkle proof for a transaction in a block.
+ // (GET /v2/blocks/{round}/transactions/{txid}/proof)
+ GetProof(ctx echo.Context, round uint64, txid string, params GetProofParams) error
// Get the current supply reported by the ledger.
// (GET /v2/ledger/supply)
GetSupply(ctx echo.Context) error
@@ -266,6 +269,57 @@ func (w *ServerInterfaceWrapper) GetBlock(ctx echo.Context) error {
return err
}
+// GetProof converts echo context to params.
+func (w *ServerInterfaceWrapper) GetProof(ctx echo.Context) error {
+
+ validQueryParams := map[string]bool{
+ "pretty": true,
+ "format": true,
+ }
+
+ // Check for unknown query parameters.
+ for name, _ := range ctx.QueryParams() {
+ if _, ok := validQueryParams[name]; !ok {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unknown parameter detected: %s", name))
+ }
+ }
+
+ var err error
+ // ------------- Path parameter "round" -------------
+ var round uint64
+
+ err = runtime.BindStyledParameter("simple", false, "round", ctx.Param("round"), &round)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
+ }
+
+ // ------------- Path parameter "txid" -------------
+ var txid string
+
+ err = runtime.BindStyledParameter("simple", false, "txid", ctx.Param("txid"), &txid)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter txid: %s", err))
+ }
+
+ ctx.Set("api_key.Scopes", []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params GetProofParams
+ // ------------- Optional query parameter "format" -------------
+ if paramValue := ctx.QueryParam("format"); paramValue != "" {
+
+ }
+
+ err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetProof(ctx, round, txid, params)
+ return err
+}
+
// GetSupply converts echo context to params.
func (w *ServerInterfaceWrapper) GetSupply(ctx echo.Context) error {
@@ -546,6 +600,7 @@ func RegisterHandlers(router interface {
router.GET("/v2/applications/:application-id", wrapper.GetApplicationByID, m...)
router.GET("/v2/assets/:asset-id", wrapper.GetAssetByID, m...)
router.GET("/v2/blocks/:round", wrapper.GetBlock, m...)
+ router.GET("/v2/blocks/:round/transactions/:txid/proof", wrapper.GetProof, m...)
router.GET("/v2/ledger/supply", wrapper.GetSupply, m...)
router.GET("/v2/status", wrapper.GetStatus, m...)
router.GET("/v2/status/wait-for-block-after/:round", wrapper.WaitForBlock, m...)
@@ -561,171 +616,177 @@ func RegisterHandlers(router interface {
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9/XfbNrLov4Kre89p0itazld343N67nPjtPXbJM2J3b37bpzXhciRhDUJcAHQkprn",
- "//0dDAASJEFJ/kjadPVTYhEfg8F8YTCY+ThKRVEKDlyr0dHHUUklLUCDxL9omoqK64Rl5q8MVCpZqZng",
+ "H4sIAAAAAAAC/+x9/XPbOJLov4LTXVWSOdFyvmY3rpq654kzs36byaRiz96+i/PmILIlYU0CXAC0pMnz",
+ "//4KDYAESVCSP5JM5vRTYhEfjUZ/obvR+DhKRVEKDlyr0dHHUUklLUCDxL9omoqK64Rl5q8MVCpZqZng",
"oyP/jSgtGZ+PxiNmfi2pXozGI04LaNqY/uORhH9WTEI2OtKygvFIpQsoqBlYr0vTuh5plcxF4oY4tkOc",
- "noyuN3ygWSZBqT6UP/F8TRhP8yoDoiXliqbmkyJLphdEL5girjNhnAgORMyIXrQakxmDPFMHfpH/rECu",
- "g1W6yYeXdN2AmEiRQx/OF6KYMg4eKqiBqjeEaEEymGGjBdXEzGBg9Q21IAqoTBdkJuQWUC0QIbzAq2J0",
- "9H6kgGcgcbdSYFf435kE+BUSTeUc9OjDOLa4mQaZaFZElnbqsC9BVblWBNviGufsCjgxvQ7I60ppMgVC",
- "OXn3/Qvy5MmT52YhBdUaMkdkg6tqZg/XZLuPjkYZ1eA/92mN5nMhKc+Suv2771/g/Gdugbu2okpBnFmO",
- "zRdyejK0AN8xQkKMa5jjPrSo3/SIMEXz8xRmQsKOe2Ib3+umhPP/pruSUp0uSsG4juwLwa/Efo7KsKD7",
- "JhlWA9BqXxpMSTPo+8Pk+YePj8aPDq///f1x8j/uz2dPrndc/ot63C0YiDZMKymBp+tkLoEitywo7+Pj",
+ "noyuN3ygWSZBqT6UP/N8TRhP8yoDoiXliqbmkyJLphdEL5girjNhnAgORMyIXrQakxmDPFMHfpH/rECu",
+ "g1W6yYeXdN2AmEiRQx/Ol6KYMg4eKqiBqjeEaEEymGGjBdXEzGBg9Q21IAqoTBdkJuQWUC0QIbzAq2J0",
+ "9H6kgGcgcbdSYFf435kE+A0STeUc9OjDOLa4mQaZaFZElnbqsC9BVblWBNviGufsCjgxvQ7IT5XSZAqE",
+ "cvLuh5fk6dOnL8xCCqo1ZI7IBlfVzB6uyXYfHY0yqsF/7tMazedCUp4ldft3P7zE+c/cAndtRZWCOLMc",
+ "my/k9GRoAb5jhIQY1zDHfWhRv+kRYYrm5ynMhIQd98Q2vtdNCef/oruSUp0uSsG4juwLwa/Efo7KsKD7",
+ "JhlWA9BqXxpMSTPo+8PkxYePj8ePD6//9f1x8l/uz+dPr3dc/st63C0YiDZMKymBp+tkLoEitywo7+Pj",
"naMHtRBVnpEFvcLNpwWKeteXmL5WdF7RvDJ0wlIpjvO5UIQ6MspgRqtcEz8xqXhuxJQZzVE7YYqUUlyx",
"DLKxkb7LBUsXJKXKDoHtyJLluaHBSkE2RGvx1W1gpusQJQauW+EDF/T7RUazri2YgBVKgyTNhYJEiy3q",
"yWscyjMSKpRGV6mbKStyvgCCk5sPVtki7rih6TxfE437mhGqCCVeNY0Jm5G1qMgSNydnl9jfrcZgrSAG",
- "abg5LT1qmHcIfT1kRJA3FSIHyhF5nu/6KOMzNq8kKLJcgF44nSdBlYIrIGL6D0i12fb/ffbTGyIkeQ1K",
- "0Tm8peklAZ6KbHiP3aQxDf4PJcyGF2pe0vQyrq5zVrAIyK/pihVVQXhVTEGa/fL6QQsiQVeSDwFkR9xC",
- "ZwVd9Sc9lxVPcXObaVuGmiElpsqcrg/I6YwUdPXt4diBowjNc1ICzxifE73ig0aamXs7eIkUFc92sGG0",
- "2bBAa6oSUjZjkJF6lA2QuGm2wcP4zeBpLKsAHD/IIDj1LFvA4bCK0IxhXfOFlHQOAckckJ+d5MKvWlwC",
- "rwUcma7xUynhiolK1Z0GYMSpN5vXXGhISgkzFqGxM4cOIz1sGydeC2fgpIJryjhkRvIi0EKDlUSDMAUT",
- "bj7M9FX0lCr45umQAm++7rj7M9Hd9Y07vtNuY6PEsmREL5qvjmHjZlOr/w6Hv3BuxeaJ/bm3kWx+blTJ",
- "jOWoZv5h9s+joVIoBFqI8IpHsTmnupJwdMG/Nn+RhJxpyjMqM/NLYX96XeWanbG5+Sm3P70Sc5aesfkA",
- "MmtYo6cp7FbYf8x4cXGsV9FDwyshLqsyXFDaOpVO1+T0ZGiT7Zg3Jczj+igbnirOV/6kcdMeelVv5ACQ",
- "g7grqWl4CWsJBlqazvCf1Qzpic7kr+afssxjODUE7BQtOgWcs+Cd+838ZFge7JnAjMJSapA6QfV59DEA",
- "6D8kzEZHo3+fNJ6Sif2qJm5cM+P1eHTcjHP/MzU97fo6B5nmM2Hc7g42Hdsz4f3DY0aNQoKGageG73KR",
- "Xt4KhlKKEqRmdh+nZpw+p+DwZAE0A0kyqulBc6iydtYAvWPHH7EfnpJARlTcT/gfmhPz2XAh1d58M6Yr",
- "U8aIE4GjKTMWn9UjdibTAC1RQQpr5BFjnN0IyhfN5FZA1xL1vUPLh+5okd15ae1Kgj38IszSm1Pj8VTI",
- "29FLhxA4ac7ChJpRa+vXrLy9s9i0KhOHn4g9bRt0Bmrcj32xGmKoO3wMVy0snGn6CbCgzKj3gYX2QPeN",
- "BVGULId74NcFVYv+IoyB8+QxOfvx+Nmjx788fvaN0dClFHNJCzJda1DkgdMrROl1Dg/7K0MBX+U6Pvo3",
- "T/0Jqj3uVgwhwPXYu3DUORjJYDFGrL/AQHci17Li94BCkFLIiM2LpKNFKvLkCqRiIuK+eOtaENfCyCFr",
- "d3d+t9CSJVXEzI3HsYpnIA9imDfnLFTpGgq1TVHYoc9XvMGNG5BKSde9HbDrjazOzbvLnrSR7617RUqQ",
- "iV5xksG0moc6isykKAglGXZEgfhGZHCmqa7UPUiBZrAGGLMRIQh0KipNKOEiMwxtGsflw4AvE50o6PvR",
- "ocjRC6t/pmCs45RW84UmxqwUsa1tOiY0tZuSoK5QA0e/+sxuW9nprJ8sl0CzNZkCcCKm7nzlTn64SIpu",
- "Ge1vXJx0asCqzwQtuEopUlAKssRdL20Fzbezu6w34AkBR4DrWYgSZEblLYHVQtN8C6DYJgZubU64Q2kf",
- "6t2m37SB3cnDbaTSnDEtFRjbxXB3DhqGULgjTq5A4uHsk+6fn+S221eVA1cnTgOfs8KwL+GUCwWp4JmK",
- "DpZTpZNtbGsatcwEs4KAU2KcigMPOAheUaXtEZ3xDE1GK25wHuyDUwwDPKhRzMh/9cqkP3Zq5CRXlao1",
- "i6rKUkgNWWwNHFYb5noDq3ouMQvGrtWXFqRSsG3kISwF4ztk2ZVYBFHtfES1D6u/OHTHGz2wjqKyBUSD",
- "iE2AnPlWAXZD9/EAIOZ8UfdEwmGqQzm1z3o8UlqUpeE/nVS87jeEpjPb+lj/3LTtExfVjVzPBJjZtYfJ",
- "Qb60mLUXBwtqbDscmRT00ugmtNSsL6EPs2HGRDGeQrKJ8g1bnplWIQtsYdIBI9ldTQazdZijQ79Rohsk",
- "gi27MLTgAYv9rfWAnzfeoXswWk5AU5ar2jCp3ezNLOiR70ZLGCtSQgpc52tDqzMmC3uphepM+d+s2ZO5",
- "Wez1TcN+PCMSllRmvkX/tBQsJmE8g1VcutKWbySDFWFxoGf1zEyT1F858XCAgyij20u8NBeK8Xlibwe3",
- "KbX6Uu8rRSrOnAJbgnRwzUA6tav97Viihb9B2wTHJlQ458xtkGC6xqe1wNndUrFLVPxgGLFgqRTU3o0a",
- "pHYWSCQU1ECHt3RO7Q/PuQnZL+x3f1XrXeQh7cbH9fQ6KGFqEl0ucLOMqO0iMaR6c7QFBUMLmediSvPE",
- "GPyQZJDrra43c5CAE2xp9LVI+93bIF9cvM+zi4sP5JVpi2cLIJewnuCNNUkXlM+huUYI+cWeGmAFaRWq",
- "lg4adzoIOl9pG/r2UXA8KoXIk/rI27326KmbLt4vWXoJGTHyClnMacGv2jtkJiEPDImr+mJouVh7E7Is",
- "gUP28ICQY06gKPXa+Vc6Fk9ncv6V3jT/CmfNKryjppzgIg8ueNy1YW+478hTfpjNnGRDvu44lR1k80R6",
- "xQfYiS7xgsYMF+XPjd7RM+wZqL6eRg+IykKxiw/hB4yDoq1dZhkeRxrtpqppwTAYKmg2NpLT30/3T/hM",
- "HxByjrLDHLAUXIGkOUZ6KO84ZooUzBzUVZWmANnRBU9akKSicBM/aP5rxdJFdXj4BMjhw24fpY256s6S",
- "lge6fb8lh2P7CdFFviUXo4tRbyQJhbiCzJ7HQrq2vbYO+2/1uBf8p55gJgVd25Oc50WiqtmMpcwiPRdG",
- "rs9Fx+rkAr+ANOCBUbOKMD1GVYYYRWvd7kvDgKOo9XQfPp/IqMZON6rUSDt/K9mmHUVgRVOzSopCZm0t",
- "gprO+kaQFmUSDhB1QW+Y0V0CqJYcvyXf9eW5dUBshu+844JooSMg14PttnsPGVEIdmH/Y1IKs+vMxR/5",
- "IJWcKd0D0rkj8AaoJsiI0jkg/0dUJKXIv2WloT7bCYkHJjxImxlQx/o5naXWYAhyKMB6iPDL1193F/71",
- "127PmSIzWPqgPdOwi46vv7ZMIJS+Mwd0SHN1GjGg0DFvtGkk0HpB1eJgq5Mex93JNx8MfXriJ0RmUgpV",
- "zPV4ZI7c+foeGN4ORCQ4e0+1XFXKfhWzMEDQ7Z9aKw1F39tru/4yYIm+8yfFnqYVPGcckkJwWEdj4hmH",
- "1/gxqqeRRAY6I7MO9e2epFvwd8Bqz7PLbt4Vv7jbAUm8rcMV72Hzu+N2HP1haCRamZCXhJI0Z+jGFFxp",
- "WaX6glN0lHTMoA5ZePfPsOvshW8S99VFXGluqAtOlcFh7T6JXgDNIOIY/R7Ae9BUNZ+D6phFZAZwwV0r",
- "xvHQi3OhVZnYDStB4k3dgW1pLIEZzdHT9ytIQaaVbotejOCylo29dTDTEDG74FSTHKjS5DXj5ysczp9w",
- "PM1w0EshL2ssDJzQgINiKolfZv5gv/5I1cIv3zT0wsZ1tn51M34T5rXW0AoR/78P/uvo/XHyPzT59TB5",
- "/p+TDx+fXj/8uvfj4+tvv/1/7Z+eXH/78L/+I7ZTHvZYfJGD/PTEmSWnJ6h7mvuGHuyfzQldMJ5Eicwc",
- "FwrGMUy1Q1vkgdGgnoAeNjcXbtcvuF5xQ0hXNGeZOQLfhhy6Iq7Hi5Y7OlTT2oiOT9Gv9UPsuDMXSUnT",
- "S4wFGM2ZXlTTg1QUE2+OTeaiNs0mGYVCcPyWTWjJJuZ4O7l6tEU13kFekYi4uh6PnNRR9x515AaOLag7",
- "Z+3N939rQb764eU5mbidUl/ZYEM7dBAlFrGg3Vu31mHOLN4+lrHRluYwcwIzxpn5fnTBM6rpZEoVS9Wk",
- "UiC/oznlKRzMBTkibsgTqin6ADqezaH3bOifcdCU1TRnKbkMVXHDmkOOsYuL94ZALi4+9O7++orTTRV3",
- "NuIEyZLphah04rzDw36ExteCI1u/3KZZx8SNbSnSeZ/d+AMO0LJUSeARiy+/LHOz/IAMFcFOGDtGlBbS",
- "C0EjGZ1Pw+zvG+FuPyVd+gj+ypzb/17Q8j3j+gNJ3Pn7uCzR3Yb+rr87WWNocl3C7j6zBsRmsNg5Cxdu",
- "DaobxxPioGe2l3ciqzjmzCdEHbYxUqHxCd4WT2aoH0VuNvfWaArGiGKn0ovE8FR0VcqQFvJD8O6Szo0s",
- "9HeA5thsiM+9A5oCSReQXkKGFyDoIxy3uvtrZqdZPMsyZZ/u2LBBjC/H4+AUSFVm1OleytfdQF8FWvvo",
- "5ndwCetz0YSn3ySy93o8ck79xNDMEIOUBh+BEhCzNrv4i4HO5rs7HnS8lyWxvm0bkenJ4qimC99nmIGs",
- "ZroH5okRRY2GDfReUhlBhCX+ARTcYqFmvDuRftSTTqVmKSvt+nfzzb9t9TGDbBPqUTEuZl1p3ROmUelt",
- "GydTquKCG8wXsx+Gh7oRHX4m61mxl3UEn387wp3mENwqKcfZVKKx45dt37MOgRanEpC80aYejDZGQrW9",
- "cNej7Kq5FMXr710U3NZLKUNFPm6Btd3PzMybwxUdvAkYfHdxGgQjBM/56lcVXrB1mWFcv7CxL+v96wv/",
- "5MK/sxiNb/RmYjxy8XGx7RActXsGOcypc3xj5J2/dLWgfaWCDTJw/DSb5YwDSWJxDVQpkTJ7F9rIcjcH",
- "GOPva0KsY4XsPEKMjAOw0WOIA5M3IuRNPr8JkBwYuhipHxt9jcHfsN3j1qQ4cGblVvOvLzsaJho3T5Ds",
- "Nva9P+NRVCQNWeatVsQ2mULvKBMjUSOa+v6QvtdFQQ6ojpOWZE0uY14yY1UAkuGZ7xaY6+QBmxkl/zBw",
- "HEuYm7N3c1413OodMJ/XZ3AlNCQzJpVO8KgcXZ5p9L1CY/B70zQuflqoIvaNNMvi0genvYR1krG8iu+2",
- "m/cvJ2baN/W5RVXTS1ijkgGaLsgU3/QbLdSa3rTZMLWN7dm44Fd2wa/ova13N1oyTc3EUgjdmeMLoaqO",
- "PNnETBECjBFHf9cGUbpBvATRCH3ZEsRB2JgJjK842HRa7zHTjSM6BiWvHSm6lsDQ3bgKG/hjY3uCJ/H9",
- "OPMBHqBlybJV5+xsR43TOE5xE0PdWvw9LODuusG2YCA4J8dCGSX4s77d0kBn2uQGvXCv7ZjpBpkFAiGc",
- "iimfmqePKEPaGI2zDVfnQPO/wPqvpi0uZ3Q9Ht3tyB/DtRtxC67f1tsbxTP6kO0RsOU5uyHKaVlKcUXz",
- "xD3lGSJNKa4caWJz//LnM4u6+PH7/OXxq7cOfIxeAypd0NamVWG78otZlTkRxyK3zgPPCFqr/uxsDbFg",
- "8+v3lKEzxQfatWw5I8UccVn2qhVcyIrOuTKLX2VtdZWEwXm34sxWdN9dPXNhqN+9snyPw+IU2uzwFrkQ",
- "zrUhGUNh840oIng3wMGYcXjKRHIp6NrsonXM9gUEr4rEsECicpbGXQd8qgwX8arARy5rDQQbDxiEZsSK",
- "DbjPecWCsUwztcNNUQfIYI4oMtGtswF3U+ESxVWc/bMCwjLg2nySLuCpxSyGN3wMb1+lxeOF3cAuZLge",
- "/i563gw1pOERiM1KPvTyRqLV/aHPL7R2T5sfAufcDS5pwhl7amnDBYujD0fN9qZ70fbWhnnd+jLIEIbN",
- "AbI9qZx3HSwsoANzRJPEDUrs42FpjXHgu8vpRiwjuKFAtrF5NFciMkzFl5TbnE+mn8Wh663AnttNr6WQ",
- "+HhKQfSGmqlkJsWvED9NzsxGRWKwHCrRZMPeB5FHKV0hWntGmmx+Hr8hHIOkPWRNBR9J+xJtgMORygP3",
- "NQaVeicT5ZasbX6q1tVtnDnCcIuJHb9hDgdzL0Qlp8spjaVqMEaNgem4uShpucO0IL6z3wVVx1I72gvu",
- "XOq2zL44KkE2gZL91623NFC+LJLPIGUFzePe0Qyx336KkrE5s0m+KgVBFik3kM2OaKnIZeKyV1ENak5n",
- "5HAc5Klzu5GxK6bYNAds8ci2mFIF9sVL+ArGBQVp4HqhsPnjHZovKp5JyPRCWcQqQWoj0j5u8P7nKegl",
- "ACeH2O7Rc/IAPe+KXcFDg0Vni4yOHj3HkAz7x2FM2blsfpvkSoaC5b+dYInTMV492DGMknKjHkRfv9kU",
- "rMMibAM32a678BK2dFJvOy8VlNM5xG9Uiy0w2b64m+i46+CFZzZ/oNJSrAnT8flBUyOfBsKyjPizYLhY",
- "+cIwkBZEicLQU5Miyk7qh7PJCF3aFg+X/4jXHKV/89A5tH5eJ63V5bFV42XUG1pAG61jQu0jUXy24R4X",
- "O4F4MJBfA+RVfBI5sMFeb7q+5AEXPCkM72QPm4C/gP6i6SWEpnl0Wu1lVzdyZfPQu5paZpRkELFVC7E0",
- "kEm3RnEl4+uklZnq53evnGIohIzlimikoVMSErRkcBXl2G7gWm2Z1OrCYz5moHxXsTz7axNu2knLJClP",
- "F1H/59R0/KXJI1ej3WI9+gRvQTmHPDqc5eVfPM9HpNI/xK7zFIzv2Labbskut7O4BvA2mB4oP6FBL9O5",
- "mSDEajv+rg4cyeciIzhP89i7IYT+G6kg9cw/K1A69p4LP9hYJzxjG3vFZj4hwDPU9gfEvn8ysLResKCW",
- "ZUWV29cQkM1BOgdMVeaCZmNixjl/efyK2FmVe8uL724w88rcvqVrraJztgoyQ9zkceFQaNTu42yOGTGr",
- "VhqfhitNizIW9WpanPsGGFp7RVnuww9Q/YTYOSAnVvMrr1fsJM0bUlJP52QN0oT5j9Y0XaBKbSmgYZLf",
- "PWWQp0oVpM6ssxDWyR3ss0gtfNYgmzRoTISxe5ZM2fS/cAXtQNs66tyZdD7wtr08WXFuKSWunza8irgN",
- "2j1w9mLPu6SikHUQf0M1o0QlU7hpBqUz7BV9Y9VNx9TLmckhO1/xOmedT+ueUi44S/GFU5BwuAbZpRLe",
- "xWe6w2Ow7nHZs7jj0AhzRZNA1aEDDouDaaG8IHSI6zuMgq9mUy112D815qw1B8E5aOUkG2Rjn+jLneMY",
- "V+CSdWBW6UBOmuN49/4werXRPNe/IRlh+N+AufK9+YamCnMhO5eM4+NVhzYXHWRPWpjpVJvjHdNkLkC5",
- "9bQfZ6n3ps/B+YqfGog/HPjMqDiGdSGbZds7i/5Qx/4Gw90YmLYvTFuC7uLm51aooZ30uCzdpDFJoOod",
- "jqUqG0RwxAueeDdkgNx6/HC0DeS28eoR9akhNLjCiwsoUQ/3CGPgCfxLc6i1FGVf0tor/+jTDMYjYLxi",
- "HJq8vREFkUZVAm4M8utAP5VKqq0JuJNMOwea401JTKAp7VxHdx2qs8GIElyjn2N4G5uEdQOCo27QGG6U",
- "r+t0wYa6A2PiBeYpd4jsp59Dq8oZURkGdXUS0sUEhxHcPpVjWwH02aBvE9nuWlLLOTfRRENB6BlT5jhS",
- "TPNIGMtJ/TFIyojxctM1/ht7gDy8AnexduuEGdjxxvbl5uQVudn7RLH5LXel6X+P29LhgXCPYtT/0oiV",
- "8N1O7y25FTz1sxq8whc+RS4eKurA9DbNoqCLHtqabKebD63DeUvHKBoHAnneNS9GqZW+1jc4FM6TDkaf",
- "Ue1CSzUlm7LI2GSjsRHsPaRNcmoLhkQdA0N3j/bq0Xzu9d7NbuhZYTj2RoT6S+0+QH/xUSukpMw5vhsW",
- "6WPWxbf1Iw53iXxpNri7CBc1hoPEVnLLIK+deK+PpQhjh6EBW8jzsoVS+xqkY0kKCfeM2kCF3hC1/aCH",
- "XZeH60CKqRT017nzBrRwO4D7XRDfyIU+cofZWU93Yed4UL3pjvLEIsQ/++hLk88mDVo5kt28sV3/65D3",
- "wJ6QBxxVHZxWLM+2bW7L7dg8Z0bH2i/Tb562vHef80H1L/ZCvs9u7m3pTRR/dxMQMZG1tiYPpgocijv4",
- "El23iOcQc0GllWR6jbE73tJkv0Tjkn8A7jJFu8T79Q2ou4CzNV+ca3pet27KdPwgbOrswpi/aApqTJLy",
- "ckWLMgfHF99+Nf0TPPnz0+zwyaM/Tf98+OwwhafPnh8e0udP6aPnTx7B4z8/e3oIj2bfPJ8+zh4/fTx9",
- "+vjpN8+ep0+ePpo+/eb5n77yNTIsoE39ib9h1oHk+O1pcm6AbXBCS/YXWNt3xoaM/QtmmiInQkFZPjry",
- "P/0vz2EHqSiCsn7u15Hz9I8WWpfqaDJZLpcHYZfJHNP2JVpU6WLi5+nnpHl7Wjto7YU/7qj1vRlSwE11",
- "pHCM3969PDsnx29PDxqCGR2NDg8ODx5hopASOC3Z6Gj0BH9C7lngvk8csY2OPl6PR5MF0Fwv3B8FaMlS",
- "/0kt6XwO8sA95TY/XT2eeP/O5KO75L42o85jUU0+1VbtX+y/cB5bh4U5s9SptYLHPMq98RmTqY3fIS67",
- "G8/QA2hjM4xoq5F1mgVFRINqFeNWDdT3X1BZr1jep9hT8Vih1jrKfLhQT1DL0NcvfPbn68hF04dO8ZXH",
- "h4efoODKuDWKx8stK7c8vUcQ2yeoOwPaHa4nFV7T3NAN1MX4RrigR1/sgk45vucwYotYsXw9Hj37gnfo",
- "lBvGoTnBlkEISV8U/swvuVhy39Ko5KooqFyjwg0ekoem1fWgyG0Hb7kXecNyGIIMZMEj3pZje7r2dDYm",
- "qk44XUomjOGApSszSCVQVPNC4n1Qk8vMPVUEm2H79fHf0Hv8+vhvNklgtKxfML1NmNkW4j+AjuTa+27d",
- "lKbaKNF/KzE5/t1WQvxydN5dVc0+Y+MXm7FxB6G93919Ps4vNh/nl22SrurAW0q44AnHpAZXQAK31t5G",
- "/V3bqM8On3yxqzkDecVSIOdQlEJSyfI1+ZnXEUF3M8FrmVPxIEZro/zpZZ9vrOjAfA8SLE0+topbZNud",
- "J60X2VkrBzmNFwcNcs+4aNBx88yU8sxGcvi7WjX2zy3RW2ffNdv9GPceYx7EjPTgquW79enJLnZ5a03B",
- "C7SYbd7C181KDn9Sj8WtC7d+Sg3Qg+M7mhEfMvqJZfNuwvTp4dPPB0G4C2+EJt9jkNknFumf1E8QJ6tA",
- "2GASs8lH/1htBwHjHoK2RUu32m9MqBgOHbuYfZemuS6+YeSJFYT2LW5fapgZdpUX/beqMUnRvM/7vciI",
- "GxVT3suFvVy4tVzoElQjEWzlx8lHDLANxUGPJbEe9R/ooiTInidF4dO3CDIDnS5cqezOXXZErPjA5GGZ",
- "sulZ4Z3ly75Q+l0Kpe9wINkj+PNUov+SHR+BtiQJeYPmEDK4j7n+I7o9PqVG/tQLeiM4EFgxhVk1LS3u",
- "rxtrcwEf4CNSfK2EMON9bTq4kqmTj00N4+smEsQ+EpxYy3+TXWELvIzu9U5nX5TnCyjK89ufKu7EIZ3V",
- "SggLMYN7JNtwi0/K2c9U2Q6Wcs3VotKZWAahVU3y40FOsi3ulZPeiAzsuO3wwv67dGoL8ikPRIeBthRD",
- "H6rtzpSrRprSar7QNhFDNMtLU2qeppbwN5bAjxe/t9WEcgk0W9uS8mJqFt3sKy7ypnXlLVylFCko9Ylr",
- "4Nez3LYIvgXWioTNgHYTMtTg1oao4/o+1LtNv2kDu5OH20glNJWAtMCLvhxcdYoICnfEyRVIzB7ySffP",
- "T3Lb7fNFzPvlwezXc1bA9vLorlzRNra19f+btSiweW08p3zO8lGtMu3RkeN10ewaetX1w6rt/UQsYQ34",
- "TaX9zbZFCq/ZrEvbRh7CUjB+/fA9KKKu61y0QMxwkcVhVVTqDK9IJsR4+fpNgJz5VgF2QxfQACBMNYiu",
- "k7S3KSfIiLSxen7s9a1pfax/btr2iSusJpsJsFmQXHsH+dJi1ua0WFBFHBykoJdYxEGKuQsR68McL+nf",
- "5yZWwJlpFbLAFibtGnkh+3fqjbWYo0O/UaIbJIItuzC04JhZ+bswAm966Os6Fj/hSaxtVgfmVWNW2r8n",
- "S8p0MhPSaswEs7lFnLrt2f+bMu1yCLrznhZGKIDROpgPzgoUN06Q0UWF8TWu0IYvzs6KyEWwmep7IXfy",
- "ITeOWS2IWRipuGb+BQAWZPI25u/PIbu3nvfW89563lvPe+t5bz3vree99fypreffJiiEJImX0z7iNxbv",
- "S0ZfpIX/BYXUfs4Y2Mbor01+PCQYE93w8cbLIg00n7g8anivLtRg1FmYky010zFOypxiouiV9m+fMEf0",
- "N0990EidXcimZTCyxjR48pic/Xj87NHjXx4/+6au0d5u+8DntFV6ncNDd6lev7n2t+vAKaYxwst16k8/",
- "qY94sdb8jOVAsFLoS2x+AleQG1PeUCSmi6six6NzoPkLhxwrlUDp70S27hCOWf8EUdEmmSYbAuNURjKD",
- "RUqBd5GsBWYHdKnueieo63uNVImnb+5v2La9GkhhHCXvTfSyNXOuS+rqxt7ljszsqUcncVnFflORTRAi",
- "R2aNePrdBPd1K3s4xsG2xqpw/PelBuJ5xEcZD9l2bGgyq1LAQm2O4laJaTQHnjixkExFtvZVPVySwpaU",
- "tdnjhoXsyxWkleElhMSxwQP10NXExCyYoasnmr03SEYNOF5Tx+lzC06bCG2j3Lw9dbTTKt85jKM7XF9q",
- "BG9GHghJ5lJU5UNbP4Kv8UhclJSvvRvM2IqYlxlLoGLo2f1K6jonZU/O7p5WODyv4DvC7u8WLWRJlc8p",
- "nNmkwvHESt3Ut9sx3iR23JaIx643moR2IOVsfxP9Lrug19r1V4JM9IpHUkF2Ej/u473/JVTCWymumDk4",
- "RyWsdczrqEA42KoZZCCyUDV0Xv963dCWp+/oMnxLvKtMXSXO8LyzVboAW6fNW2mRp9JGX0pBs5QqDGl1",
- "2bo/scWqV6cRvwOCiSkvZr0nf0aBby/JgOPuZE8GQzflv/BNurK5vX5b67LJSHPsIohb2Ni7Av4oroDv",
- "PPMpQomkyy5zBhn0dxBTdKlXPCqlJk19wWjEW8AQdUGye7y76w3fvsILKn/ZKwjIS0JJmjO8oBBcaVml",
- "+oJTdIGGFdf613vesTtsSr3wTeJe+IiT3A11wSnWyKkdo1GTagaxfPIA3mJT1XwOSnck8QzggrtWjDf1",
- "eAqWSpHYuE+jro1EP7AtC7omM5qjD/9XkIJMzSkifEaNDkWlWZ67+0QzDRGzC041ycEI/dfMGHRmOO9z",
- "qu/IXeWGsKJ/30HvktwNFJH6wX79kaqFX773G6F7y362N2afvwSUhz1WG9JBfnriUpycnuCr9eYmsQf7",
- "Z7teKhhPokRmNL67ke/SFnngCpIhAT1s7iTdrl9wY0xrYevtN9WAb0YO3WuAHi9a7uhQTWsjOrcFfq0f",
- "Ys9r5iIxR0bMfD2aM72oppgM0j+7mcxF/QRnklEoBMdv2YSWbKJKSCdXj7bYB3eQVyQirvaa+4/jxO9W",
- "rKw33hixvb0f0Mv3kFHu951GbmuI0j5p2z5p2z6t1z5p235390nb9inN9inN/lVTmh1stBAnH/VqlyRD",
- "4agss4V2JaR25lqAh81a6Yj615JMHxByjlV0qdEBcAWS5lg8XfnkCEyRgs0XmqgqTQGyowuetCCx5VzN",
- "xA+a/9pj7kV1ePgEyOHDbh/rtwgkb78vmqr4ydZU+pZcjC5GvZEkFOIKXHISbJ5VeFdse20d9t/qcX+S",
- "va0r6No6Vxa0LMGoNVXNZixlFuW5MIeBuejE93GBX0Aa4MBIVEWYtnngEJ8YF+mic6irGRUzuvv6/Qa5",
- "+I875BJ/BmAI74YZm/9zl3TN/yoG9gloynJVv06InKfwZNOlrCVVDevWUmXsY9qV/81dWLtZcnYJYQwu",
- "Rh8sqcx8i2gtvCbzn6/12HcttVOiZbDyJkEX6Fk9M9M2iZk5cPaqE/U9WzaxWJoLc2ZNbM2JbZHtdQ3v",
- "r5StKI6MhvYqwjUD6WLv0ZuVCwWJFk3yyGE4NqHCZYG6DRIGaqtfj0cWOLtbKlZtCT8YkYheYYpOYeqq",
- "pYcLNEKFGugkPkOysf/Dc25C9gv73RUAqb2CHR98ZFxPr4NhxjWJ2hLtKPW6SAypfkZchoQBR7StZ2gD",
- "OW5d1bDTvVcwKs8uLj6QVzZ5J1Y7u4T1xNbZSReUz0HVOAr5xT4dsuE9QXx5B433V0nRaK9koAbqaT/m",
- "vIv3S5ZeQkaMvPIF+gcOE+RBnYkQi1wvF2v/jsSqw4cHhBxzAkWp177eddvn3Zmcf6U3zb8KFXhbM0bC",
- "F1NgVyDvyFN+mM2cpMAw3B2nsoNsnkiv+AA70WXkaL1raqrISbpzrg2IykJxHw6KvXbca8e9dtxrx712",
- "3GvHP7x27Dml9m6bz+G2+c0dN3+gtJz7DJy/swWFwaytFNt38GbXhURj1rjzUzeFesPCt+hlrEvevv9w",
- "/cF8k1feAdnUcT2aTNCqWAilJ6Pr8cdOjdfwoxGldG5HcA6+UrIrTKD74fr/BwAA///AuJW4hOkAAA==",
+ "abg5LT1qmHcIfT1kRJA3FSIHyhF5nu/6KOMzNq8kKLJcgF44nSdBlYIrIGL6D0i12fb/ffbzGyIk+QmU",
+ "onN4S9NLAjwV2fAeu0ljGvwfSpgNL9S8pOllXF3nrGARkH+iK1ZUBeFVMQVp9svrBy2IBF1JPgSQHXEL",
+ "nRV01Z/0XFY8xc1tpm0ZaoaUmCpzuj4gpzNS0NV3h2MHjiI0z0kJPGN8TvSKDxppZu7t4CVSVDzbwYbR",
+ "ZsMCralKSNmMQUbqUTZA4qbZBg/jN4OnsawCcPwgg+DUs2wBh8MqQjOGdc0XUtI5BCRzQH5xkgu/anEJ",
+ "vBZwZLrGT6WEKyYqVXcagBGn3mxec6EhKSXMWITGzhw6jPSwbZx4LZyBkwquKeOQGcmLQAsNVhINwhRM",
+ "uPkw01fRU6rg22dDCrz5uuPuz0R31zfu+E67jY0Sy5IRvWi+OoaNm02t/jsc/sK5FZsn9ufeRrL5uVEl",
+ "M5ajmvmH2T+PhkqhEGghwisexeac6krC0QX/xvxFEnKmKc+ozMwvhf3ppyrX7IzNzU+5/em1mLP0jM0H",
+ "kFnDGj1NYbfC/mPGi4tjvYoeGl4LcVmV4YLS1ql0uianJ0ObbMe8KWEe10fZ8FRxvvInjZv20Kt6IweA",
+ "HMRdSU3DS1hLMNDSdIb/rGZIT3QmfzP/lGUew6khYKdo0SngnAXv3G/mJ8PyYM8EZhSWUoPUCarPo48B",
+ "QP8mYTY6Gv3rpPGUTOxXNXHjmhmvx6PjZpz7n6npadfXOcg0nwnjdnew6dieCe8fHjNqFBI0VDswfJ+L",
+ "9PJWMJRSlCA1s/s4NeP0OQWHJwugGUiSUU0PmkOVtbMG6B07/gX74SkJZETF/Yz/oTkxnw0XUu3NN2O6",
+ "MmWMOBE4mjJj8Vk9YmcyDdASFaSwRh4xxtmNoHzZTG4FdC1R3zu0fOiOFtmdV9auJNjDL8IsvTk1Hk+F",
+ "vB29dAiBk+YsTKgZtbZ+zcrbO4tNqzJx+InY07ZBZ6DG/dgXqyGGusPHcNXCwpmmnwALyox6H1hoD3Tf",
+ "WBBFyXK4B35dULXoL8IYOE+fkLO/HD9//OTXJ8+/NRq6lGIuaUGmaw2KPHR6hSi9zuFRf2Uo4Ktcx0f/",
+ "9pk/QbXH3YohBLgeexeOOgcjGSzGiPUXGOhO5FpW/B5QCFIKGbF5kXS0SEWeXIFUTETcF29dC+JaGDlk",
+ "7e7O7xZasqSKmLnxOFbxDORBDPPmnIUqXUOhtikKO/T5ije4cQNSKem6twN2vZHVuXl32ZM28r11r0gJ",
+ "MtErTjKYVvNQR5GZFAWhJMOOKBDfiAzONNWVugcp0AzWAGM2IgSBTkWlCSVcZIahTeO4fBjwZaITBX0/",
+ "OhQ5emH1zxSMdZzSar7QxJiVIra1TceEpnZTEtQVauDoV5/ZbSs7nfWT5RJotiZTAE7E1J2v3MkPF0nR",
+ "LaN9xMVJpwas+kzQgquUIgWlIEtceGkraL6d3WW9AU8IOAJcz0KUIDMqbwmsFprmWwDFNjFwa3PCHUr7",
+ "UO82/aYN7E4ebiOV5oxpqcDYLoa7c9AwhMIdcXIFEg9nn3T//CS33b6qHAidOA18zgrDvoRTLhSkgmcq",
+ "OlhOlU62sa1p1DITzAoCTolxKg484CB4TZW2R3TGMzQZrbjBebAPTjEM8KBGMSP/zSuT/tipkZNcVarW",
+ "LKoqSyE1ZLE1cFhtmOsNrOq5xCwYu1ZfWpBKwbaRh7AUjO+QZVdiEUS18xHVPqz+4tAdb/TAOorKFhAN",
+ "IjYBcuZbBdgN3ccDgJjzRd0TCYepDuXUPuvxSGlRlob/dFLxut8Qms5s62P9S9O2T1xUN3I9E2Bm1x4m",
+ "B/nSYtYGDhbU2HY4MinopdFNaKlZX0IfZsOMiWI8hWQT5Ru2PDOtQhbYwqQDRrILTQazdZijQ79Rohsk",
+ "gi27MLTgAYv9rfWAnzfeoXswWk5AU5ar2jCp3ezNLOiR72ZLGCtSQgpc52tDqzMmCxvUQnWm/G/W7Mnc",
+ "LDZ807Afz4iEJZWZb9E/LQWLSRjPYBWXrrTlG8lgRVgc6Fk9M9Mk9SEnHg5wEGV0G8RLc6EYnyc2OrhN",
+ "qdVBvQeKVJw5BbYE6eCagXRqV/voWKKFj6BtgmMTKpxz5jZIMF3j01rg7G6pWBAVPxhGLFgqBbWxUYPU",
+ "zgKJhIIa6DBK59T+8JybkP3SfvehWu8iD2k3Pq6n10EJU5PocoGbZURtF4kh1ZujLSgYWsg8F1OaJ8bg",
+ "hySDXG91vZmDBJxgS6OvRdrv3gb54uJ9nl1cfCCvTVs8WwC5hPUEI9YkXVA+hyaMEPKLPTXACtIqVC0d",
+ "NO50EHS+0jb07aPgeFQKkSf1kbcb9uipmy7eL1l6CRkx8gpZzGnBB+0dMpOQh4bEVR0YWi7W3oQsS+CQ",
+ "PTog5JgTKEq9dv6VjsXTmZw/0JvmX+GsWYUxasoJLvLggsddGzbCfUee8sNs5iSb8nXHqewgmyfSKz7A",
+ "TnSJARozXJQ/N3pHz7BnoPp6Gj0gKgvFLj6EHzEPirZ2mWV4HGm0m6qmBcNkqKDZ2EhOH5/un/CZPiDk",
+ "HGWHOWApuAJJc8z0UN5xzBQpmDmoqypNAbKjC560IElF4SZ+2PzXiqWL6vDwKZDDR90+Shtz1Z0lLQ90",
+ "+35HDsf2E6KLfEcuRhej3kgSCnEFmT2PhXRte20d9l/qcS/4zz3BTAq6tic5z4tEVbMZS5lFei6MXJ+L",
+ "jtXJBX4BacADo2YVYXqMqgwxita63ZeGAUdR6+k+fD6RUY2dblSpkXY+KtmmHUVgRVOzSopCZm0tgprO",
+ "+kaQFmUSDhB1QW+Y0QUBVEuO35Lv+vLcOiA2w3fecUG00BGQ68F2272HjCgEu7D/MSmF2XXm8o98kkrO",
+ "lO4B6dwRGAGqCTKidA7I/xEVSSnyb1lpqM92QuKBCQ/SZgbUsX5OZ6k1GIIcCrAeIvzyzTfdhX/zjdtz",
+ "psgMlj5pzzTsouObbywTCKXvzAEd0lydRgwodMwbbRpJtF5QtTjY6qTHcXfyzQdDn574CZGZlEIVYxYu",
+ "hZjdw2pZtoraLLCKrdTtHLrbHihS0vWgeV0aACPZWiAvc/Tli1mHIomTfwtWmiGbzJK1hlZW6v99+B9H",
+ "74+T/6LJb4fJi3+ffPj47PrRN70fn1x/993/a//09Pq7R//xbzHjRWk2jcd9/kLVwkDqJMeKn3IbuTWW",
+ "Jzrs1s4PIGafG+4OiZnN9JgPlrQL0b2NbQgzpgRuNtLcWVWW+foelIwdiEhwZwzVco8q+1XMwqRUR3lq",
+ "rTQU/QiD7frrwOnnnfdO9KhU8JxxSArBYR29h8E4/IQfo7YhiqWBzqgghvp2vTct+DtgtefZZTPvil/c",
+ "7UAMva1TZO9h87vjdoJLYTounmwgLwklac7QdS640rJK9QWn6JzrmN4dsvAux2F37UvfJO4fjrhv3VAX",
+ "nCqDw9plFw06ziDijP8BwHttVTWfg+qY4mQGcMFdK8bR0YJz4UkmsRtWgsTo8IFtaazPGc3Ru/wbSEGm",
+ "lW6re8watNa0jXSZaYiYXXCqSQ5UafIT4+crHM6fqj3NcNBLIS9rLAx4BYCDYiqJC9If7VeUp275Cydb",
+ "8QqH/ezlzedWAB72WE6bg/z0xJnCpydo7zQxrh7sny3wUTCeRInMHFELxjE1ukNb5KGx2jwBPWqiZW7X",
+ "L7hecUNIVzRnGdW3I4euiOvxouWODtW0NqLjx/Zr/RA7Ys9FUtL0EvNPRnOmF9X0IBXFxB8BJnNRHwcm",
+ "GYVCcPyWTWjJJqqEdHL1eIs5dgd5RSLi6no8clJH3Xummxs4tqDunHUEyf+tBXnw46tzMnE7pR7YBFc7",
+ "dJCZGDm1ufuVLQeCWby9oGUzfM0B+gRmjDPz/eiCZ1TTyZQqlqpJpUB+T3PKUziYC3JE3JAnVFP0O3W8",
+ "6UN3KNEn6KApq2nOUnIZquKGNYecsRcX7w2BXFx86MWb+4rTTRV3cOMEyZLphah04iISw76rxr+HI1tf",
+ "8KZZx8SNbSnSRTzc+ANO97JUSeCFjS+/LHOz/IAMFcFOmK9IlBbSC0EjGZ0fzezvG+Ei7pIu/a2RSoEi",
+ "/13Q8j3j+gNJnM/nuCzRxYs+1v92ssbQ5LqE3f20DYjNYLGzPS7cGlQ3zmHFQc9sLx+4UHHMmU+IOmxj",
+ "pELjh74tnsxQfxG52dxboykYI4qdSi8Sw1PRVSlDWsgPwV1fOjey0MedFZtzQ3zu7tkUSLqA9BIyDLqh",
+ "X3rc6u5TG5xm8SzLlL0uZlNV8U4DuiCmQKoyo073Ur7uJpcr0Npn1L+DS1ifi+ZKxE2yya/HIxdISgzN",
+ "DDFIafARKAExa7OLD0Z1Nt/FFTHYU5bExlNsFrAni6OaLnyfYQaymukemCdGFDUaNtB7SWUEEZb4B1Bw",
+ "i4Wa8e5E+tHoDZWapay0698tHvS21ccMsk2oR8W4mHWldU+YRqW3bZxMqYoLbjBfzH4YHupmEfmZrDfP",
+ "BogJlhxwhDvNIYhkKsfZVKKx45dt71APgRanEpC80aYejDZGQrW9cCF5dtUE4tHVsouC2xoINVTkc2VY",
+ "O+TBzLw5XNHB6NPgXZ/TIAEmuEJa3+Txgq3LDOP6Vpet5uBv/PhrPv5uz2h8o3s645HLyYxth+Co3TPI",
+ "YU5dsAWzPX2g34L2QAUbZOD4eTbLGQeSxHJpqFIiZTb+3shyNwcY4+8bQqxjhew8QoyMA7DRS40Dkzci",
+ "5E0+vwmQHBi6takfG/3bwd+w3cvblNVwZuVW868vOxomGjfX3uw29r0/41FUJA1Z5q1WxDaZQu8oEyNR",
+ "I5r6/pC+10VBDqiOk5ZkTS5jXjJjVQCS4ZnvFpjr5CGbGSX/KAhWSJibs3dzXjXc6h0wn9dncCU0JDMm",
+ "lU7wqBxdnmn0g0Jj8AfTNC5+Wqgi9l4+y+LSB6e9hHWSsbyK77ab968nZto39blFVdNLWKOSAZouyBTr",
+ "SBgt1JretNkwtc0n27jg13bBr+m9rXc3WjJNzcRSCN2Z4yuhqo482cRMEQKMEUd/1wZRukG8BBkwfdkS",
+ "5N7YPB3M6TnYdFrvMdONs4gGJa8dKbqWwNDduAqbbGbzyYIyDP27DQM8QMuSZavO2dmOOhAuQwP+Boa6",
+ "tfgjIaBRPdgWDATn5Fj6rAR/1rdbGuhMW1Cjl2K4HTPdxMZAIIRTMeXLQfURZUgbM8C24eocaP5XWP/N",
+ "tMXljK7Ho7sd+WO4diNuwfXbenujeEYfsj0CtjxnN0Q5LUsprmieuOtjQ6QpxZUjTWzub5t9ZlEXP36f",
+ "vzp+/daBjxmTQKVLFNy0KmxXfjWrMifiWLbgeeAZQWvVn52tIRZsfn2HN3Sm+OTOli1npJgjLstetYIL",
+ "WdE5V2bxUNZWV0mYEHorzmxllN7VMxeml94ry/c4LE6hzQ5vkQvhXBsKgBS2xo0igneTaowZh6dMJJeC",
+ "rs0uWsdsX0DwqkgMCyQqZ2ncdcCnynARrwq8WLXWQLDxgEFoRqzYgPucVywYyzRTO0SKOkAGc0SRiW6d",
+ "DbibClecsOLsnxUQlgHX5pN0SXYtZjG84fPG+yotnqPuBnZp6vXwd9HzZqghDY9AbFbyoZc3ckPCH/r8",
+ "Qmv3tPkhcM7dIEgTzthTSxsCLI4+HDXbSPei7a0Nawn2ZZAhDFt3ZnshQ+86WFhAB+aIFiYclNjHw9Ia",
+ "7x7sLqcbsYzghgLZ5oPSXInIMBVfUm7rjJl+FoeutwJ7bje9lkLihT0F0Qg1U8lMit8gfpqcmY2K5P05",
+ "VKLJhr0PIhehukK09ow0FSQ9fkM4Bkl7yJoKPpJ2EG2Aw5HKA/c1JjJ7JxPllqxtTbRW6DbOHGG6xcSO",
+ "3zCHg7mXopLT5ZTGyoMYo8bAdNwESlruMC2I7+x3QdX5+472gphL3ZbZW24lyCY5t3+j+pYGytdF8hmk",
+ "rKB53DuaIfbb158yNme2sFylIKhc5gayFTktFbnqbzYU1aDmdEYOx0FtRLcbGbtiik1zwBaPbYspVWBv",
+ "WYU3r1xSkAauFwqbP9mh+aLimYRML5RFrBKkNiLthRrvf56CXgJwcojtHr8gD9HzrtgVPDJYdLbI6Ojx",
+ "C0zJsH8cxpSdqyC5Sa5kKFj+0wmWOB1j6MGOYZSUG/UgeuPSlv0dFmEbuMl23YWXsKWTett5qaCcziEe",
+ "US22wGT74m6i466DF57ZmpVKS7EmTMfnB02NfBpIyzLiz4Lh7mcUhoG0IEoUhp6asmR2Uj+cLYDpSgV5",
+ "uPxHDHOU/p5N59D6eZ20VpfHVo3BqDe0gDZax4Tai8l4VchdaHcC8WCgpgvIq/gkcmCDvd50fclDLnhS",
+ "GN7JHjUJfwH9RUuaCE3z6LTay65u5srmoXc1tcwoySBiqxZiaSCTbo3iSsbXSSsz1S/vXjvFUAgZq0/S",
+ "SEOnJCRoyeAqyrHdxLXaMqnVhcd8zED5vmJ59rcm3bRTCkxSni6i/s+p6fhrU7uwRrvFevTa54JyDnl0",
+ "OMvLv3qej0ilf4hd5ykY37Ftt8SXXW5ncQ3gbTA9UH5Cg16mczNBiNV2/l2dOJLPRUZwnqbAQEMI/Xt5",
+ "Qbmjf1agdOwOIX6wuU54xjb2iq22Q4BnqO0PiL1zZ2Bp3ZpCLcuKKrc3cCCbg3QOmKrMBc3GxIxz/ur4",
+ "NbGzKnd/HO96YbWfub2/2VpF52wVVCO5yYXWodSo3cfZnDNiVq00liNQmhZlLOvVtDj3DTC19oqy3Kcf",
+ "oPoJsXNATqzmV16v2Emae8ukns7JGqQJ8x+tabpAldpSQMMkv3uZKk+VKijXWle+rAuK2Ku4WvhKVbZQ",
+ "1ZgIY/csmbIlp+EK2om2dda5M+l84m17ebLi3FJKXD9tuBVxG7R74Gxgz7ukopB1EH9DNaNEJVO4adWu",
+ "M+wVvdfXLQHWq9Nqb/jUdRL9UwIp5YKzFG/VBUWua5Bd+epdfKY7XEDsHpc9izsOjTBXtPBYnTrgsDhY",
+ "iswLQoe4vsMo+Go21VKH/VNjnWRzEJyDVk6yQTb2xeXcOY5xBa5ADFYyD+SkOY5344fR0EZTIuKGZITp",
+ "fwPmyg/mG5oqzKXsXDKOF6Yd2lx2kD1pYXVdbY53TJO5AOXW074mp96bPgd4VSyD1YcDX40Xx7AuZLNs",
+ "G7PoD3XsIxguYmDavjRtCbqLm59bqYZ20uOydJNGb7nVOxwrjzeI4IgXPPFuyAC59fjhaBvIbWPoEfWp",
+ "ITS4wsAFlKiHe4QxUHbhlTnUWoqyt7dtyD96NYPxCBivGYemVnREQaRRlYAbg/w60E+lkmprAu4k086B",
+ "5hgpiQk0pZ3r6K5DdTYYUYJr9HMMb2NTJHFAcNQNGsON8nVdotpQd2BMvMTa+A6R/ZKHaFU5IyrDpK5O",
+ "EcSY4DCC25cPbSuAPhv0bSLbXUtqOecmmmgoCT1jyhxHimkeSWM5qT8GhUAxX266xn9jl96HV+ACa7cu",
+ "0oIdb2xfbi6Ykpu9TxSb33JXmv73uC0dHgj3KEb9r4xYCe/t9OoXWMFTX6vBEL7wZZnxUFEnprdpFgVd",
+ "9NDWVNjdfGgdrpU7RtE4kMjzrrkxSq30tb7BoXSedDD7jGqXWqop2VS5yBa4jY1g45C2sK59pCbqGBiK",
+ "PdrQo/nc672b3dCzwnDsjQj1Qe0+QH/1WSukpMw5vhsW6WPW5bf1Mw53yXxpNri7CJc1hoPEVnLLJK+d",
+ "eK+PpQhjh6kBW8jzsoVSexukY0kKCfeM2kCF3hC1/aSHXZeH60CKqRT017nzBrRwO4D7XRDfyIU+cofZ",
+ "WU93Yed4Ur3pjvLEIsRf++hLk88mDVp1ud28sV3/25D3wJ6QBxxVHZxWLM+2bW7L7dhcZ0bH2q/Tb5+1",
+ "vHef80L1rzYg32c3d7f0Joq/uwmImMhaW5MHUwUOxR18ia5bxHOI9cfSSjK9xtwdb2myX6N5yT8Cd9XJ",
+ "3WMPdQTUBeDsO0PONT2vWzdPw/wobLn2wpi/aApqLMzzakWLMgfHF989mP4Jnv75WXb49PGfpn8+fH6Y",
+ "wrPnLw4P6Ytn9PGLp4/hyZ+fPzuEx7NvX0yfZE+ePZk+e/Ls2+cv0qfPHk+fffviTw/8uywW0ObNk79j",
+ "1YHk+O1pcm6AbXBCS/ZXWNt7xoaM/Q1mmiInQkFZPjryP/0vz2EHqSiCpyTdryPn6R8ttC7V0WSyXC4P",
+ "wi6TOZaKTLSo0sXEz9Ovg/T2tHbQ2oA/7qj1vRlSwE11pHCM3969Ojsnx29PDxqCGR2NDg8ODx5joZAS",
+ "OC3Z6Gj0FH9C7lngvk8csY2OPl6PR5MF0Fwv3B8FaMlS/0kt6XwO8sBd5TY/XT2ZeP/O5KMLcl+bUeex",
+ "rCZf3q32L/ZvOI+tw8KcWepybsFlHuXu+IzJ1ObvEFdRkGfoAbS5GUa01cg6zYKHa4MXUsatd3fff0VP",
+ "ycVqjcWuisceB66zzIcfhwrez/RvZj7/83Uk0PSh8+DPk8PDT/DIz7g1isfLLV8LenaPILZPUHcGtDtc",
+ "Tyr8RHNDN1A/ADnCBT3+ahd0yvE+hxFbxIrl6/Ho+Ve8Q6fcMA7NCbYMUkj6ovAXfsnFkvuWRiVXRUHl",
+ "GhVucJE8NK2uB0VuO3nL3cgblsMQVL0LLvG2HNvTtaezMVF1kfNSMmEMB3wuNYNUAkU1LyTGg5r6ee6q",
+ "Itiq7j8d/x29xz8d/90Wpow+JRlMb4u0toX4j6Aj9R2/XzfPoW2U6F9KTI5/t69vfj06766qZl8l9Kut",
+ "ErqD0N7v7r4G7FdbA/brNklXdeItJVzwhGNRgysggVtrb6P+rm3U54dPv9rVnIG8YimQcyhKIalk+Zr8",
+ "wuuMoLuZ4LXMqXiQo7VR/vRePGis6MB8DwosTT62HlTJtjtPWjeys1bdexp/kDaoPeOyQcfNNVPKM5vJ",
+ "4WO1auyvW6K3zt5rtvsx7l3GPIgZ6UGo5fv16ckudnlrTcENtJht3sLXzZ65/qQei1s/FvwpNUAPju9p",
+ "RnzK6CeWzbsJ02eHzz4fBOEuvBGa/IBJZp9YpH9SP0GcrAJhg0XMJh/9ZbUdBIy7CNoWLd0XpmNCxXDo",
+ "2OXsuzLN9YMvRp5YQWjv4valhplhV3nRv6sakxTN/bzfi4y40QPee7mwlwu3lgtdgmokgn1tdPIRE2xD",
+ "cdBjSXwo4A8UKAmq50lR+PItgsxApwv3PHsnlh0RKz4xeVimbLpWeGf5sn+c/y6P8+9wINkj+C4I7gm1",
+ "V+5NeMtebhFfu+Mj0JYkIW/QHEIG9znXf0S3x6fUyJ96QW8EBwIrprCqpqXFfbixNhfqZ5HqtxLCivcD",
+ "pkM76PhRr1h2PakfThoyKt669302GhWNpmY8eBI6dK/QsgQq1a2V9PZw2HlnxtOTsAykqFOdCG2eT4qA",
+ "YvByw0jiv+8SRvzjRuv2b3zt3/i63Rtfn/XI3CTkWFHl40SyIzW+6Hlaf5Hz9BvBE9S2wLW3/Fpo+XJn",
+ "a7zW0qrH7i8pB6/nm99DOaAOdlKvMBhKaAkVTOkcJmOnbN2b+JOP+B9MBr1u0i7tjfyJdbNt0rf2NbXR",
+ "vSZQ7F/A+wpewPvyLrw7maOd1Uoo6yQ0jNYj/Tfc4itg98tCtzOTXXO1qHQmlkEec/PSwCAn2Rb3yklv",
+ "RAZ23HYuf78IDLUvLisPRIeBahkRL3rmsdm0s9femXLPzae0mi+0rXoULalWd0xoagk/sceB+IRN0oRt",
+ "5R96vwJCcwk0W5MpACdiahbd7CsusvNWgpOE8SIyDVylFCkoBVkSlhXZBFqdVY7+QL0BTwg4AlzPQpQg",
+ "MypvCawVCZsB7VY/qsGtvT6O6/tQ7zb9pg3sTh5uI5XQPLunBWbV5OCegoqgcEecoKnKPvH++Uluu31V",
+ "iZUrIm9x2q/nrMBrbpxyoSAVPFPRwbCg/Ta2xTcUg7UosEXkPKd8zrcabQX+oRthZuT4I6R2DfXLG3VN",
+ "GWtpQRategarDXO9gVU9l5jFXjm1JQ63jTyEpWD8usqMrj0SVAceCTNcZHH47D11hlek7HAIRIOITYCc",
+ "+VYBdsNj/wAgTDWIrl9EaVNOUH5QaVGWhv90UvG63xCazmzrY/1L07ZPXC4RHOV6JkCFZraDfGkxawtI",
+ "LagiDg5S0Etnoc9dPnYfZsOMiWI8dW9EDL2cxAo4M61CFtjCpF0jL2T/zuOeLebo0G+U6AaJYMsuDC04",
+ "Zlb+LozAm57yuv6DT+j2bJvVgXnVmJX278mSMp3MhLQaM8HSqZEIanv2/6RMu4K97gyshXNbuuKrVqC4",
+ "cYLyaSpMZnWvWjk+Mrvfz58wU/0g5E4B28a3qgUxCyMV18xft8PXD72N+fuLfu6t5731vLee99bz3nre",
+ "W89763lvPX9q6/nLZGCSJPFy2l+viV2uIaOv0sL/iu6vfM4LJ43RX5v8eEgwJrrh442ZGRpoPnFFSzGE",
+ "LtRgindYADU10zFOypziqwwr7S8a44MM3z7ziQJ1KT9bA8nIGtPg6RNy9pfj54+f/Prk+bdG+tj3OFtt",
+ "H/oC8kqvc3jkMtjqAic+lQ04xZqBmMlG/ekn9VkO1pqfsRwIPsv9CpufwBXkxpS3sU5iDiP949E50Pyl",
+ "Q46VSqD09yJbdwjHrH+CqGiTTBMwZ5zKSBnOPqH0kKwFluJ1dWV7J6jre82ZiOcJ9Dds214NvBcQJe9N",
+ "9LI1L8BVUHdj7xIjM3vq0UlcCc8vKrIJQuTIrBFPv5tM+u4zWo5xsK2xKhz/fa1Z7x7xUcZDth0bmsyq",
+ "FPBVVEdxq8Q0mgNPnFhIpiJb+ye0XEXglpS1pVqHheyrFaSV4SWExLHBQ/XIPUCNJadDV0+0VH7w8gPg",
+ "eM2jiZ9bcNqqoxvl5u2po/2GwZ1zJrvD9aVGkHTxUEgyl6IqH9nHmvgaj8RFSfnau8GMrYiPIOB745jn",
+ "fb+Sui4A3ZOzu9fwD88reGm/+7tFC1lS5Qv4Z7aCf7yKYbfO/HaMN1WUt1W9s+uNVnwfqO/e30S/yy7R",
+ "sXb9lSATveKRusudKsv7y1X/I1TCWymumDk4RyVsPwurEQgHWzWDDEQWqoZOqQ2vG9ry9B1dhoU7dpWp",
+ "q8QZnne2ShdgH0X1VlqkLonRl1LQLKUK74+4pzE+scWqV6cRvwOCifWl+pm+RoFvf/8Ix93JnmxnersJ",
+ "sQCMsoU0v6x12WSbHrvrOi1s7F0BfxRXwPee+RShRNJllzmD52p2EFN0qVc8KqUmzWO+0Yy3gCHq1z/v",
+ "MXbXG74dwgue2bQhCMhLQkmaMwxQCK60rFJ9wSm6QMPnTfvhPe/YHTalXvomcS98xEnuhrrgFB+kqx2j",
+ "UZNqBrHHWwC8xaaq+RyU7kjiGcAFd60Ybx6/K1gqRWLzPo26NhL9wLYs6JrMaI4+/N9ACjI1p4iwZgk6",
+ "FJVmee7iiWYaImYXnGqSgxH6PzFj0JnhvM+pjpG7Z5I8FuIXK1xF2YEXG3+0X/HSglu+9xuhe8t+9tnQ",
+ "4y9T9zn6ELOD/PTE1RM7PcESMU0ksQf7ZwsvFYwnUSIzGt9F5Lu0RR661z+RgB41MUm36xfcGNNaEBT0",
+ "zdP7NyOHbhigx4uWOzpU09qITrTAr/VD7C7rXCTmyIjPTIzmTC+qKVZe9ndcJ3NR33edZBQKwfFbNqEl",
+ "m6gS0snV4y32wR3kFYmIq73m/uM48bvPQ9cbb4zY3t4P6OV7KN/6+67ZujVFaV8hdV8hdV9Dc18hdb+7",
+ "+wqp+/qh+/qh/1Prhx5stBBdzY2tFf1aN40z+6q9hNTOXAvwsFmr9l8/LMn0ASHn+GQ9NToArkDSnKRU",
+ "WcOI20y5gs0XmqgqTQGyowuetCCxb6ebiR82/7XH3Ivq8PApkMNH3T7WbxFI3n5fNFXxk33A8DtyMboY",
+ "9UaSUIgrcJXAsHlWYazY9to67L/U4/4se1tX0LV1rixoWYJRa6qazVjKLMpzYQ4Dc9HJ7+MCv4A0wNlC",
+ "E4RpW3QV8Yl5kS47h7rb5jGju6/fb/DwzXGHXPZFTT6FgX0CmrJc1bcTIucpPNl0KWtJVcO6tVTx5QxA",
+ "+d9cwNrNkrNLCHNwMftgSWXmW0Qfnm3K7PqHlfuupXb90QxW3iToAj2rZ2baVgw1B87eU4B9z5at4pnm",
+ "wpxZE/vA07bMdnwxyvR7oNBrahkN7VWEawbS5d6jNysXChItmkrNw3BsQoUruXgbJKjBIjUWOLtbKva0",
+ "IX4wIhG9whSdwojUzgKNUKEGOonXkGzu//Ccm5D90n53r23VXsGODz4yrqfXwTTjmkSXqFxQ6nWRGFL9",
+ "jLgKCQOOaPt4sE3kuPUTwp3uvdcZ8+zi4gN5bStl49Oil7Ce2Eft0gXlc1A1jkJ+sVeHbHpPkF/eQeP9",
+ "PVtstFcy8OD4aT/nvIv3S5ZeQkaMvEIWc6nwkcMEeViX/Z0xlORrf4/EqsNHB4QccwJFqdfEStiOz7sz",
+ "OX+gN82/ChV4WzNG0hdTYFcg78hTfpjNnKTAMNwdp7KDbJ5Ir/gAO9Fl5Gi9ax3IyEm6c64NiMpCcR8O",
+ "ir123GvHvXbca8e9dtxrxz+8duw5pfZum8/htvnijps/UA3sfbnr39mCwmTW1nsWd/Bm1692x6xx56du",
+ "XsUPX5lHL2P9vvz7D9cfzDd55R2QzaPpR5MJWhULofRkdD3+2HlQPfxoRCmd2xGcg6+U7Aqr1X+4/v8B",
+ "AAD//8YbKCxl8wAA",
}
// GetSwagger returns the Swagger specification corresponding to the generated code
diff --git a/daemon/algod/api/server/v2/generated/types.go b/daemon/algod/api/server/v2/generated/types.go
index 0dfe5b7b8..2efbdab65 100644
--- a/daemon/algod/api/server/v2/generated/types.go
+++ b/daemon/algod/api/server/v2/generated/types.go
@@ -580,6 +580,19 @@ type PostTransactionsResponse struct {
TxId string `json:"txId"`
}
+// ProofResponse defines model for ProofResponse.
+type ProofResponse struct {
+
+ // Index of the transaction in the block's payset.
+ Idx uint64 `json:"idx"`
+
+ // Merkle proof of transaction membership.
+ Proof []byte `json:"proof"`
+
+ // Hash of SignedTxnInBlock for verifying proof.
+ Stibhash []byte `json:"stibhash"`
+}
+
// SupplyResponse defines model for SupplyResponse.
type SupplyResponse struct {
@@ -647,6 +660,13 @@ type GetBlockParams struct {
Format *string `json:"format,omitempty"`
}
+// GetProofParams defines parameters for GetProof.
+type GetProofParams struct {
+
+ // Configures whether the response object is JSON or MessagePack encoded.
+ Format *string `json:"format,omitempty"`
+}
+
// TealDryrunJSONBody defines parameters for TealDryrun.
type TealDryrunJSONBody DryrunRequest
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index acc5605c9..aaed6a930 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -182,6 +182,64 @@ func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params generated.Ge
return ctx.Blob(http.StatusOK, contentType, data)
}
+// GetProof generates a Merkle proof for a transaction in a block.
+// (GET /v2/blocks/{round}/transactions/{txid}/proof)
+func (v2 *Handlers) GetProof(ctx echo.Context, round uint64, txid string, params generated.GetProofParams) error {
+ var txID transactions.Txid
+ err := txID.UnmarshalText([]byte(txid))
+ if err != nil {
+ return badRequest(ctx, err, errNoTxnSpecified, v2.Log)
+ }
+
+ ledger := v2.Node.Ledger()
+ block, _, err := ledger.BlockCert(basics.Round(round))
+ if err != nil {
+ return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
+ }
+
+ proto := config.Consensus[block.CurrentProtocol]
+ if proto.PaysetCommit != config.PaysetCommitMerkle {
+ return notFound(ctx, err, "protocol does not support Merkle proofs", v2.Log)
+ }
+
+ txns, err := block.DecodePaysetFlat()
+ if err != nil {
+ return internalError(ctx, err, "decoding transactions", v2.Log)
+ }
+
+ for idx := range txns {
+ if txns[idx].Txn.ID() == txID {
+ tree, err := block.TxnMerkleTree()
+ if err != nil {
+ return internalError(ctx, err, "building Merkle tree", v2.Log)
+ }
+
+ proof, err := tree.Prove([]uint64{uint64(idx)})
+ if err != nil {
+ return internalError(ctx, err, "generating proof", v2.Log)
+ }
+
+ var proofconcat []byte
+ for _, proofelem := range proof {
+ proofconcat = append(proofconcat, proofelem[:]...)
+ }
+
+ stibhash := block.Payset[idx].Hash()
+
+ response := generated.ProofResponse{
+ Proof: proofconcat,
+ Stibhash: stibhash[:],
+ Idx: uint64(idx),
+ }
+
+ return ctx.JSON(http.StatusOK, response)
+ }
+ }
+
+ err = errors.New(errTransactionNotFound)
+ return notFound(ctx, err, err.Error(), v2.Log)
+}
+
// GetSupply gets the current supply reported by the ledger.
// (GET /v2/ledger/supply)
func (v2 *Handlers) GetSupply(ctx echo.Context) error {
diff --git a/daemon/kmd/lib/kmdapi/requests.go b/daemon/kmd/lib/kmdapi/requests.go
index a28414064..0411dddbf 100644
--- a/daemon/kmd/lib/kmdapi/requests.go
+++ b/daemon/kmd/lib/kmdapi/requests.go
@@ -161,6 +161,11 @@ type APIV1POSTKeyListRequest struct {
type APIV1POSTTransactionSignRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
+ // Base64 encoding of msgpack encoding of a `Transaction` object
+ // Note: SDK and goal usually generate `SignedTxn` objects
+ // in that case, the field `txn` / `Transaction` of the
+ // generated `SignedTxn` object needs to be used
+ //
// swagger:strfmt byte
Transaction []byte `json:"transaction"`
PublicKey crypto.PublicKey `json:"public_key"`
diff --git a/data/basics/teal.go b/data/basics/teal.go
index 2b3afbe04..38cd3780e 100644
--- a/data/basics/teal.go
+++ b/data/basics/teal.go
@@ -285,7 +285,7 @@ func (tk TealKeyValue) Clone() TealKeyValue {
}
// ToStateSchema calculates the number of each value type in a TealKeyValue and
-// reprsents the result as a StateSchema
+// represents the result as a StateSchema
func (tk TealKeyValue) ToStateSchema() (schema StateSchema, err error) {
for _, value := range tk {
switch value.Type {
diff --git a/data/basics/units.go b/data/basics/units.go
index 619d7acfd..9f92429e9 100644
--- a/data/basics/units.go
+++ b/data/basics/units.go
@@ -121,7 +121,7 @@ func OneTimeIDForRound(round Round, keyDilution uint64) crypto.OneTimeSignatureI
}
}
-// SubSaturate subtracts two rounds with saturation arithmetic that does not
+// SubSaturate subtracts x rounds with saturation arithmetic that does not
// wrap around past zero, and instead returns 0 on underflow.
func (round Round) SubSaturate(x Round) Round {
if round < x {
diff --git a/data/basics/userBalance.go b/data/basics/userBalance.go
index 97ee51988..9e76876b0 100644
--- a/data/basics/userBalance.go
+++ b/data/basics/userBalance.go
@@ -30,11 +30,14 @@ import (
type Status byte
const (
- // Offline indicates that the associated account is delegated.
+ // Offline indicates that the associated account receives rewards but does not participate in the consensus.
Offline Status = iota
- // Online indicates that the associated account used as part of the delegation pool.
+ // Online indicates that the associated account participates in the consensus and receive rewards.
Online
- // NotParticipating indicates that the associated account is neither a delegator nor a delegate. Currently it is reserved for the incentive pool.
+ // NotParticipating indicates that the associated account neither participates in the consensus, nor recieves rewards.
+ // Accounts that are marked as NotParticipating cannot change their status, but can receive and send Algos to other accounts.
+ // Two special accounts that are defined as NotParticipating are the incentive pool (also know as rewards pool) and the fee sink.
+ // These two accounts also have additional Algo transfer restrictions.
NotParticipating
// MaxEncodedAccountDataSize is a rough estimate for the worst-case scenario we're going to have of the account data and address serialized.
diff --git a/data/bookkeeping/block.go b/data/bookkeeping/block.go
index 1f52eed80..eaf788534 100644
--- a/data/bookkeeping/block.go
+++ b/data/bookkeeping/block.go
@@ -47,9 +47,8 @@ type (
Seed committee.Seed `codec:"seed"`
// TxnRoot authenticates the set of transactions appearing in the block.
- // More specifically, it's the root of a merkle tree whose leaves are the block's Txids.
- // Note that the TxnRoot does not authenticate the signatures on the transactions, only the transactions themselves.
- // Two blocks with the same transactions but with different signatures will have the same TxnRoot.
+ // The commitment is computed based on the PaysetCommit type specified
+ // in the block's consensus protocol.
TxnRoot crypto.Digest `codec:"txn"`
// TimeStamp in seconds since epoch
@@ -179,11 +178,15 @@ type (
// (instead of materializing it separately, like balances).
//msgp:ignore UpgradeState
UpgradeState struct {
- CurrentProtocol protocol.ConsensusVersion `codec:"proto"`
- NextProtocol protocol.ConsensusVersion `codec:"nextproto"`
- NextProtocolApprovals uint64 `codec:"nextyes"`
- NextProtocolVoteBefore basics.Round `codec:"nextbefore"`
- NextProtocolSwitchOn basics.Round `codec:"nextswitch"`
+ CurrentProtocol protocol.ConsensusVersion `codec:"proto"`
+ NextProtocol protocol.ConsensusVersion `codec:"nextproto"`
+ NextProtocolApprovals uint64 `codec:"nextyes"`
+ // NextProtocolVoteBefore specify the last voting round for the next protocol proposal. If there is no voting for
+ // an upgrade taking place, this would be zero.
+ NextProtocolVoteBefore basics.Round `codec:"nextbefore"`
+ // NextProtocolSwitchOn specify the round number at which the next protocol would be adopted. If there is no upgrade taking place,
+ // nor a wait for the next protocol, this would be zero.
+ NextProtocolSwitchOn basics.Round `codec:"nextswitch"`
}
// CompactCertState tracks the state of compact certificates.
@@ -486,11 +489,21 @@ func (block Block) PaysetCommit() (crypto.Digest, error) {
return crypto.Digest{}, fmt.Errorf("unsupported protocol %v", block.CurrentProtocol)
}
- switch params.PaysetCommit {
+ return block.paysetCommit(params.PaysetCommit)
+}
+
+func (block Block) paysetCommit(t config.PaysetCommitType) (crypto.Digest, error) {
+ switch t {
case config.PaysetCommitFlat:
return block.Payset.CommitFlat(), nil
+ case config.PaysetCommitMerkle:
+ tree, err := block.TxnMerkleTree()
+ if err != nil {
+ return crypto.Digest{}, err
+ }
+ return tree.Root(), nil
default:
- return crypto.Digest{}, fmt.Errorf("unsupported payset commit type %d", params.PaysetCommit)
+ return crypto.Digest{}, fmt.Errorf("unsupported payset commit type %d", t)
}
}
diff --git a/data/bookkeeping/block_test.go b/data/bookkeeping/block_test.go
index c3ab8a8ec..4bce02f71 100644
--- a/data/bookkeeping/block_test.go
+++ b/data/bookkeeping/block_test.go
@@ -375,3 +375,57 @@ func TestDecodeMalformedSignedTxn(t *testing.T) {
_, _, err = b.DecodeSignedTxn(txib2)
require.Error(t, err)
}
+
+// TestInitialRewardsRateCalculation perform positive and negative testing for the InitialRewardsRateCalculation fix by
+// running the rounds in the same way eval() is executing them over RewardsRateRefreshInterval rounds.
+func TestInitialRewardsRateCalculation(t *testing.T) {
+ consensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ runTest := func() bool {
+ incentivePoolBalance := uint64(125000000000000)
+ totalRewardUnits := uint64(10000000000)
+ require.GreaterOrEqual(t, incentivePoolBalance, consensusParams.MinBalance)
+
+ curRewardsState := RewardsState{
+ RewardsLevel: 0,
+ RewardsResidue: 0,
+ RewardsRecalculationRound: basics.Round(consensusParams.RewardsRateRefreshInterval),
+ }
+ if consensusParams.InitialRewardsRateCalculation {
+ curRewardsState.RewardsRate = basics.SubSaturate(incentivePoolBalance, consensusParams.MinBalance) / uint64(consensusParams.RewardsRateRefreshInterval)
+ } else {
+ curRewardsState.RewardsRate = incentivePoolBalance / uint64(consensusParams.RewardsRateRefreshInterval)
+ }
+ for rnd := 1; rnd < int(consensusParams.RewardsRateRefreshInterval+2); rnd++ {
+ nextRewardState := curRewardsState.NextRewardsState(basics.Round(rnd), consensusParams, basics.MicroAlgos{Raw: incentivePoolBalance}, totalRewardUnits)
+ // adjust the incentive pool balance
+ var ot basics.OverflowTracker
+
+ // get number of rewards per unit
+ rewardsPerUnit := ot.Sub(nextRewardState.RewardsLevel, curRewardsState.RewardsLevel)
+ require.False(t, ot.Overflowed)
+
+ // subtract the total dispersed funds from the pool balance
+ incentivePoolBalance = ot.Sub(incentivePoolBalance, ot.Mul(totalRewardUnits, rewardsPerUnit))
+ require.False(t, ot.Overflowed)
+
+ // make sure the pool retain at least the min balance
+ ot.Sub(incentivePoolBalance, consensusParams.MinBalance)
+ if ot.Overflowed {
+ return false
+ }
+
+ // prepare for the next iteration
+ curRewardsState = nextRewardState
+ }
+ return true
+ }
+
+ // test expected failuire
+ consensusParams.InitialRewardsRateCalculation = false
+ require.False(t, runTest())
+
+ // test expected success
+ consensusParams.InitialRewardsRateCalculation = true
+ require.True(t, runTest())
+}
diff --git a/data/bookkeeping/txn_merkle.go b/data/bookkeeping/txn_merkle.go
new file mode 100644
index 000000000..043a09cc8
--- /dev/null
+++ b/data/bookkeeping/txn_merkle.go
@@ -0,0 +1,98 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package bookkeeping
+
+import (
+ "fmt"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklearray"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// TxnMerkleTree returns a cryptographic commitment to the transactions in the
+// block, along with their ApplyData, as a Merkle tree. This allows the
+// caller to either extract the root hash (for inclusion in the block
+// header), or to generate proofs of membership for transactions that are
+// in this block.
+func (block Block) TxnMerkleTree() (*merklearray.Tree, error) {
+ return merklearray.Build(&txnMerkleArray{block: block})
+}
+
+// txnMerkleArray is a representation of the transactions in this block,
+// along with their ApplyData, as an array for the merklearray package.
+type txnMerkleArray struct {
+ block Block
+}
+
+// Length implements the merklearray.Array interface.
+func (tma *txnMerkleArray) Length() uint64 {
+ return uint64(len(tma.block.Payset))
+}
+
+// Get implements the merklearray.Array interface.
+func (tma *txnMerkleArray) GetHash(pos uint64) (crypto.Digest, error) {
+ if pos >= uint64(len(tma.block.Payset)) {
+ return crypto.Digest{}, fmt.Errorf("txnMerkleArray.Get(%d): out of bounds, payset size %d", pos, len(tma.block.Payset))
+ }
+
+ var elem txnMerkleElem
+ elem.stib = tma.block.Payset[pos]
+
+ stxn, _, err := tma.block.DecodeSignedTxn(elem.stib)
+ if err != nil {
+ return crypto.Digest{}, err
+ }
+ elem.txn = stxn.Txn
+
+ return elem.Hash(), nil
+}
+
+// txnMerkleElem represents a leaf in the Merkle tree of all transactions
+// in a block.
+type txnMerkleElem struct {
+ txn transactions.Transaction
+ stib transactions.SignedTxnInBlock
+}
+
+// ToBeHashed implements the crypto.Hashable interface.
+func (tme *txnMerkleElem) ToBeHashed() (protocol.HashID, []byte) {
+ // The leaf contains two hashes: the transaction ID (hash of the
+ // transaction itself), and the hash of the entire SignedTxnInBlock.
+ txid := tme.txn.ID()
+ stib := crypto.HashObj(&tme.stib)
+
+ var buf [2 * crypto.DigestSize]byte
+ copy(buf[:crypto.DigestSize], txid[:])
+ copy(buf[crypto.DigestSize:], stib[:])
+
+ return protocol.TxnMerkleLeaf, buf[:]
+}
+
+// Hash implements an optimized version of crypto.HashObj(tme).
+func (tme *txnMerkleElem) Hash() crypto.Digest {
+ txid := tme.txn.ID()
+ stib := tme.stib.Hash()
+
+ var buf [len(protocol.TxnMerkleLeaf) + 2*crypto.DigestSize]byte
+ s := buf[:0]
+ s = append(s, protocol.TxnMerkleLeaf...)
+ s = append(s, txid[:]...)
+ s = append(s, stib[:]...)
+ return crypto.Hash(s)
+}
diff --git a/data/bookkeeping/txn_merkle_test.go b/data/bookkeeping/txn_merkle_test.go
new file mode 100644
index 000000000..6af499b3e
--- /dev/null
+++ b/data/bookkeeping/txn_merkle_test.go
@@ -0,0 +1,140 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package bookkeeping
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklearray"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+func TestTxnMerkleElemHash(t *testing.T) {
+ var tme txnMerkleElem
+ crypto.RandBytes(tme.stib.SignedTxn.Txn.Header.Sender[:])
+ require.Equal(t, crypto.HashObj(&tme), tme.Hash())
+}
+
+func TestTxnMerkle(t *testing.T) {
+ for ntxn := uint64(0); ntxn < 128; ntxn++ {
+ var b Block
+ b.CurrentProtocol = protocol.ConsensusCurrentVersion
+ crypto.RandBytes(b.BlockHeader.GenesisHash[:])
+
+ var elems []txnMerkleElem
+
+ for i := uint64(0); i < ntxn; i++ {
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ GenesisHash: b.BlockHeader.GenesisHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Amount: basics.MicroAlgos{Raw: i},
+ },
+ }
+
+ sigtxn := transactions.SignedTxn{Txn: txn}
+ ad := transactions.ApplyData{}
+
+ stib, err := b.BlockHeader.EncodeSignedTxn(sigtxn, ad)
+ require.NoError(t, err)
+
+ b.Payset = append(b.Payset, stib)
+
+ var e txnMerkleElem
+ e.txn = txn
+ e.stib = stib
+ elems = append(elems, e)
+ }
+
+ tree, err := b.TxnMerkleTree()
+ require.NoError(t, err)
+
+ root := tree.Root()
+ for i := uint64(0); i < ntxn; i++ {
+ proof, err := tree.Prove([]uint64{i})
+ require.NoError(t, err)
+
+ elemVerif := make(map[uint64]crypto.Digest)
+ elemVerif[i] = elems[i].Hash()
+ err = merklearray.Verify(root, elemVerif, proof)
+ require.NoError(t, err)
+ }
+ }
+}
+
+func BenchmarkTxnRoots(b *testing.B) {
+ var blk Block
+ blk.CurrentProtocol = protocol.ConsensusCurrentVersion
+ crypto.RandBytes(blk.BlockHeader.GenesisHash[:])
+
+ proto := config.Consensus[blk.CurrentProtocol]
+
+ for i := 0; true; i++ {
+ txn := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ GenesisHash: blk.BlockHeader.GenesisHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Amount: basics.MicroAlgos{Raw: crypto.RandUint64()},
+ },
+ }
+
+ crypto.RandBytes(txn.Sender[:])
+ crypto.RandBytes(txn.PaymentTxnFields.Receiver[:])
+
+ sigtxn := transactions.SignedTxn{Txn: txn}
+ ad := transactions.ApplyData{}
+
+ stib, err := blk.BlockHeader.EncodeSignedTxn(sigtxn, ad)
+ require.NoError(b, err)
+
+ blk.Payset = append(blk.Payset, stib)
+
+ if (i%1024 == 0) && len(protocol.Encode(blk.Payset)) >= proto.MaxTxnBytesPerBlock {
+ break
+ }
+ }
+
+ var r crypto.Digest
+
+ b.Run("FlatCommit", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var err error
+ r, err = blk.paysetCommit(config.PaysetCommitFlat)
+ require.NoError(b, err)
+ }
+ })
+
+ b.Run("MerkleCommit", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var err error
+ r, err = blk.paysetCommit(config.PaysetCommitMerkle)
+ require.NoError(b, err)
+ }
+ })
+
+ _ = r
+}
diff --git a/data/ledger.go b/data/ledger.go
index a1688f444..9da66ac12 100644
--- a/data/ledger.go
+++ b/data/ledger.go
@@ -89,11 +89,16 @@ func makeGenesisBlock(proto protocol.ConsensusVersion, genesisBal GenesisBalance
FeeSink: genesisBal.feeSink,
RewardsPool: genesisBal.rewardsPool,
RewardsLevel: 0,
- RewardsRate: incentivePoolBalanceAtGenesis.Raw / uint64(params.RewardsRateRefreshInterval),
RewardsResidue: 0,
RewardsRecalculationRound: basics.Round(params.RewardsRateRefreshInterval),
}
+ if params.InitialRewardsRateCalculation {
+ genesisRewardsState.RewardsRate = basics.SubSaturate(incentivePoolBalanceAtGenesis.Raw, params.MinBalance) / uint64(params.RewardsRateRefreshInterval)
+ } else {
+ genesisRewardsState.RewardsRate = incentivePoolBalanceAtGenesis.Raw / uint64(params.RewardsRateRefreshInterval)
+ }
+
genesisProtoState := bookkeeping.UpgradeState{
CurrentProtocol: proto,
}
@@ -308,15 +313,53 @@ func (l *Ledger) ConsensusParams(r basics.Round) (config.ConsensusParams, error)
}
// ConsensusVersion gives the consensus version agreed on in a given round,
-// returning an error if we don't have that round or we have an
-// I/O error.
+// returning an error if the consensus version could not be figured using
+// either the block header for the given round, or the latest block header.
// Implements agreement.Ledger.ConsensusVersion
func (l *Ledger) ConsensusVersion(r basics.Round) (protocol.ConsensusVersion, error) {
blockhdr, err := l.BlockHdr(r)
- if err != nil {
+ if err == nil {
+ return blockhdr.UpgradeState.CurrentProtocol, nil
+ }
+ // try to see if we can figure out what the version would be.
+ latestRound := l.Latest()
+ // if the request round was for an older round, then just say the we don't know.
+ if r < latestRound {
return "", err
}
- return blockhdr.UpgradeState.CurrentProtocol, nil
+ // the request was for a future round. See if we have any known plans for the next round.
+ latestBlockhdr, err := l.BlockHdr(latestRound)
+ // if we have the lastest block header, look inside and try to figure out if we can deduce the
+ // protocol version for the given round.
+ if err == nil {
+ // check to see if we have a protocol upgrade.
+ if latestBlockhdr.NextProtocolSwitchOn == 0 {
+ // no protocol upgrade taking place, we have *at least* UpgradeVoteRounds before the protocol version would get changed.
+ // it's safe to ignore the error case here since we know that we couldn't reached to this "known" round
+ // without having the binary supporting this protocol version.
+ currentConsensusParams, _ := config.Consensus[latestBlockhdr.CurrentProtocol]
+ // we're using <= here since there is no current upgrade on this round, and if there will be one on the subsequent round
+ // it would still be correct until (latestBlockhdr.Round + currentConsensusParams.UpgradeVoteRounds)
+ if r <= latestBlockhdr.Round+basics.Round(currentConsensusParams.UpgradeVoteRounds) {
+ return latestBlockhdr.CurrentProtocol, nil
+ }
+ // otherwise, we can't really tell.
+ return "", ledgercore.ErrNoEntry{Round: r, Latest: latestRound, Committed: latestRound}
+ }
+ // in this case, we do have a protocol upgrade taking place.
+ if r < latestBlockhdr.NextProtocolSwitchOn {
+ // if we're in the voting duration or uprade waiting period, then the protocol version is the current version.
+ return latestBlockhdr.CurrentProtocol, nil
+ }
+ // if the requested round aligns with the protocol version switch version and we've passed the voting period, then we know that on the switching round
+ // we will be using the next protocol.
+ if r == latestBlockhdr.NextProtocolSwitchOn && latestBlockhdr.Round >= latestBlockhdr.NextProtocolVoteBefore {
+ return latestBlockhdr.NextProtocol, nil
+ }
+ err = ledgercore.ErrNoEntry{Round: r, Latest: latestRound, Committed: latestRound}
+ }
+ // otherwise, we can't really tell what the protocol version would be at round r.
+ return "", err
}
// EnsureValidatedBlock ensures that the block, and associated certificate c, are
diff --git a/data/ledger_test.go b/data/ledger_test.go
index 1f64edfb7..c1ed1bbf9 100644
--- a/data/ledger_test.go
+++ b/data/ledger_test.go
@@ -26,9 +26,9 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
-
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
@@ -313,3 +313,105 @@ func TestLedgerSeed(t *testing.T) {
}
return
}
+
+func TestConsensusVersion(t *testing.T) {
+ // find a consensus protocol that leads to ConsensusCurrentVersion
+ var previousProtocol protocol.ConsensusVersion
+ for ver, params := range config.Consensus {
+ if _, has := params.ApprovedUpgrades[protocol.ConsensusCurrentVersion]; has {
+ previousProtocol = ver
+ break
+ }
+ }
+ require.NotEqual(t, protocol.ConsensusVersion(""), previousProtocol)
+ consensusParams := config.Consensus[previousProtocol]
+
+ genesisInitState, _ := testGenerateInitState(t, previousProtocol)
+
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = false
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Warn)
+ realLedger, err := ledger.OpenLedger(log, t.Name(), inMem, genesisInitState, cfg)
+ require.NoError(t, err, "could not open ledger")
+ defer realLedger.Close()
+
+ l := Ledger{Ledger: realLedger}
+ require.NotNil(t, &l)
+
+ blk := genesisInitState.Block
+
+ // add 5 blocks.
+ for rnd := basics.Round(1); rnd < basics.Round(consensusParams.MaxTxnLife+5); rnd++ {
+ blk.BlockHeader.Round++
+ blk.BlockHeader.Seed[0] = byte(uint64(rnd))
+ blk.BlockHeader.Seed[1] = byte(uint64(rnd) / 256)
+ blk.BlockHeader.Seed[2] = byte(uint64(rnd) / 65536)
+ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
+ blk.BlockHeader.CurrentProtocol = previousProtocol
+ require.NoError(t, l.AddBlock(blk, agreement.Certificate{}))
+ l.WaitForCommit(rnd)
+ }
+ // ensure that all the first 5 has the expected version.
+ for rnd := basics.Round(consensusParams.MaxTxnLife); rnd < basics.Round(consensusParams.MaxTxnLife+5); rnd++ {
+ ver, err := l.ConsensusVersion(rnd)
+ require.NoError(t, err)
+ require.Equal(t, previousProtocol, ver)
+ }
+ // the next UpgradeVoteRounds can also be known to have the previous version.
+ for rnd := basics.Round(consensusParams.MaxTxnLife + 5); rnd < basics.Round(consensusParams.MaxTxnLife+5+consensusParams.UpgradeVoteRounds); rnd++ {
+ ver, err := l.ConsensusVersion(rnd)
+ require.NoError(t, err)
+ require.Equal(t, previousProtocol, ver)
+ }
+
+ // but two rounds ahead is not known.
+ ver, err := l.ConsensusVersion(basics.Round(consensusParams.MaxTxnLife + 6 + consensusParams.UpgradeVoteRounds))
+ require.Equal(t, protocol.ConsensusVersion(""), ver)
+ require.Equal(t, ledgercore.ErrNoEntry{Round: basics.Round(consensusParams.MaxTxnLife + 6 + consensusParams.UpgradeVoteRounds), Latest: basics.Round(consensusParams.MaxTxnLife + 4), Committed: basics.Round(consensusParams.MaxTxnLife + 4)}, err)
+
+ // check round #1 which was already dropped.
+ ver, err = l.ConsensusVersion(basics.Round(1))
+ require.Equal(t, protocol.ConsensusVersion(""), ver)
+ require.Equal(t, ledgercore.ErrNoEntry{Round: basics.Round(1), Latest: basics.Round(consensusParams.MaxTxnLife + 4), Committed: basics.Round(consensusParams.MaxTxnLife + 4)}, err)
+
+ // add another round, with upgrade
+ rnd := basics.Round(consensusParams.MaxTxnLife + 5)
+ blk.BlockHeader.Round++
+ blk.BlockHeader.Seed[0] = byte(uint64(rnd))
+ blk.BlockHeader.Seed[1] = byte(uint64(rnd) / 256)
+ blk.BlockHeader.Seed[2] = byte(uint64(rnd) / 65536)
+ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
+ blk.BlockHeader.CurrentProtocol = previousProtocol
+ blk.BlockHeader.NextProtocol = protocol.ConsensusCurrentVersion
+ blk.BlockHeader.NextProtocolVoteBefore = basics.Round(rnd) + basics.Round(consensusParams.UpgradeVoteRounds)
+ blk.BlockHeader.NextProtocolSwitchOn = basics.Round(rnd) + basics.Round(consensusParams.UpgradeVoteRounds) + basics.Round(consensusParams.ApprovedUpgrades[protocol.ConsensusCurrentVersion])
+ require.NoError(t, l.AddBlock(blk, agreement.Certificate{}))
+ l.WaitForCommit(rnd)
+
+ for ; rnd < blk.BlockHeader.NextProtocolSwitchOn; rnd++ {
+ ver, err := l.ConsensusVersion(rnd)
+ require.NoError(t, err)
+ require.Equal(t, previousProtocol, ver)
+ }
+
+ for rnd = blk.BlockHeader.Round; rnd <= blk.BlockHeader.NextProtocolVoteBefore; rnd++ {
+ blk.BlockHeader.Round++
+ blk.BlockHeader.Seed[0] = byte(uint64(rnd))
+ blk.BlockHeader.Seed[1] = byte(uint64(rnd) / 256)
+ blk.BlockHeader.Seed[2] = byte(uint64(rnd) / 65536)
+ blk.BlockHeader.NextProtocolApprovals++
+ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
+ require.NoError(t, l.AddBlock(blk, agreement.Certificate{}))
+ l.WaitForCommit(rnd)
+ }
+
+ ver, err = l.ConsensusVersion(blk.BlockHeader.NextProtocolSwitchOn)
+ require.NoError(t, err)
+ require.Equal(t, protocol.ConsensusCurrentVersion, ver)
+
+ ver, err = l.ConsensusVersion(blk.BlockHeader.NextProtocolSwitchOn + 1)
+ require.Equal(t, protocol.ConsensusVersion(""), ver)
+ require.Equal(t, ledgercore.ErrNoEntry{Round: basics.Round(blk.BlockHeader.NextProtocolSwitchOn + 1), Latest: basics.Round(blk.BlockHeader.Round), Committed: basics.Round(blk.BlockHeader.Round)}, err)
+}
diff --git a/data/transactions/logic/Makefile b/data/transactions/logic/Makefile
index 62c164dc6..d083f801a 100644
--- a/data/transactions/logic/Makefile
+++ b/data/transactions/logic/Makefile
@@ -1,10 +1,33 @@
-all: TEAL_opcodes.md wat.md fields_string.go
+all: TEAL_opcodes.md README.md fields_string.go
+
+# Location of algorandfoundation/specs repo. (Optional)
+SPECS := ../../../../specs
+# Location of algorand/docs repo. (Optional)
+DOCS := ../../../../docs
TEAL_opcodes.md: fields_string.go ../../../cmd/opdoc/opdoc.go eval.go assembler.go doc.go opcodes.go
go run ../../../cmd/opdoc/opdoc.go ../../../cmd/opdoc/tmLanguage.go
+ @if [ -e $(SPECS)/dev/TEAL_opcodes.md ]; then \
+ sed '/^$$/q' $(SPECS)/dev/TEAL_opcodes.md | cat - TEAL_opcodes.md > opcodes.spec; \
+ mv opcodes.spec $(SPECS)/dev/TEAL_opcodes.md; \
+ echo "TEAL_opcodes.md updated in specs repo"; \
+ fi
+ @if [ -e $(DOCS)/docs/reference/teal/opcodes.md ]; then \
+ sed 's/^# /title: /g' TEAL_opcodes.md > $(DOCS)/docs/reference/teal/opcodes.md; \
+ echo "opcodes.md updated in docs repo"; \
+ fi
fields_string.go: fields.go
go generate
-wat.md: TEAL_opcodes.md README_in.md
+README.md: TEAL_opcodes.md README_in.md
python merge.py > README.md
+ @if [ -e $(SPECS)/dev/TEAL.md ]; then \
+ sed '/^$$/q' $(SPECS)/dev/TEAL.md | cat - README.md > teal.spec; \
+ mv teal.spec $(SPECS)/dev/TEAL.md; \
+ echo "TEAL.md updated in specs repo"; \
+ fi
+ @if [ -e $(DOCS)/docs/reference/teal/specification.md ]; then \
+ sed 's/^# /title: /g' README.md > $(DOCS)/docs/reference/teal/specification.md; \
+ echo "specification.md updated in docs repo"; \
+ fi
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index f51590469..263e58ffb 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -100,8 +100,6 @@ For one-argument ops, `X` is the last element on the stack, which is typically r
For two-argument ops, `A` is the previous element on the stack and `B` is the last element on the stack. These typically result in popping A and B from the stack and pushing the result.
-`ed25519verify` is currently the only 3 argument opcode and is described in detail in the opcode refrence.
-
| Op | Description |
| --- | --- |
| `sha256` | SHA256 hash of value X, yields [32]byte |
@@ -131,9 +129,13 @@ For two-argument ops, `A` is the previous element on the stack and `B` is the la
| `~` | bitwise invert value X |
| `mulw` | A times B out to 128-bit long result as low (top) and high uint64 values on the stack |
| `addw` | A plus B out to 128-bit long result as sum (top) and carry-bit uint64 values on the stack |
-| `concat` | pop two byte strings A and B and join them, push the result |
-| `substring` | pop a byte string X. For immediate values in 0..255 M and N: extract a range of bytes from it starting at M up to but not including N, push the substring result. If N < M, or either is larger than the string length, the program fails |
-| `substring3` | pop a byte string A and two integers B and C. Extract a range of bytes from A starting at B up to but not including C, push the substring result. If C < B, or either is larger than the string length, the program fails |
+| `getbit` | pop a target A (integer or byte-array), and index B. Push the Bth bit of A. |
+| `setbit` | pop a target A, index B, and bit C. Set the Bth bit of A to C, and push the result |
+| `getbyte` | pop a byte-array A and integer B. Extract the Bth byte of A and push it as an integer |
+| `setbyte` | pop a byte-array A, integer B, and small integer C (between 0..255). Set the Bth byte of A to C, and push the result |
+| `concat` | pop two byte-arrays A and B and join them, push the result |
+| `substring s e` | pop a byte-array A. For immediate values in 0..255 S and E: extract a range of bytes from A starting at S up to but not including E, push the substring result. If E < S, or either is larger than the array length, the program fails |
+| `substring3` | pop a byte-array A and two integers B and C. Extract a range of bytes from A starting at B up to but not including C, push the substring result. If C < B, or either is larger than the array length, the program fails |
### Loading Values
@@ -143,30 +145,34 @@ Some of these have immediate data in the byte or bytes after the opcode.
| Op | Description |
| --- | --- |
-| `intcblock` | load block of uint64 constants |
-| `intc` | push value from uint64 constants to stack by index into constants |
+| `intcblock uint ...` | prepare block of uint64 constants for use by intc |
+| `intc i` | push Ith constant from intcblock to stack |
| `intc_0` | push constant 0 from intcblock to stack |
| `intc_1` | push constant 1 from intcblock to stack |
| `intc_2` | push constant 2 from intcblock to stack |
| `intc_3` | push constant 3 from intcblock to stack |
-| `bytecblock` | load block of byte-array constants |
-| `bytec` | push bytes constant to stack by index into constants |
+| `pushint uint` | push immediate UINT to the stack as an integer |
+| `bytecblock bytes ...` | prepare block of byte-array constants for use by bytec |
+| `bytec i` | push Ith constant from bytecblock to stack |
| `bytec_0` | push constant 0 from bytecblock to stack |
| `bytec_1` | push constant 1 from bytecblock to stack |
| `bytec_2` | push constant 2 from bytecblock to stack |
| `bytec_3` | push constant 3 from bytecblock to stack |
-| `arg` | push Args[N] value to stack by index |
-| `arg_0` | push Args[0] to stack |
-| `arg_1` | push Args[1] to stack |
-| `arg_2` | push Args[2] to stack |
-| `arg_3` | push Args[3] to stack |
-| `txn` | push field from current transaction to stack |
-| `gtxn` | push field to the stack from a transaction in the current transaction group |
-| `txna` | push value of an array field from current transaction to stack |
-| `gtxna` | push value of a field to the stack from a transaction in the current transaction group |
-| `global` | push value from globals to stack |
-| `load` | copy a value from scratch space to the stack |
-| `store` | pop a value from the stack and store to scratch space |
+| `pushbytes bytes` | push the following program bytes to the stack |
+| `arg n` | push Nth LogicSig argument to stack |
+| `arg_0` | push LogicSig argument 0 to stack |
+| `arg_1` | push LogicSig argument 1 to stack |
+| `arg_2` | push LogicSig argument 2 to stack |
+| `arg_3` | push LogicSig argument 3 to stack |
+| `txn f` | push field F of current transaction to stack |
+| `gtxn t f` | push field F of the Tth transaction in the current group |
+| `txna f i` | push Ith value of the array field F of the current transaction |
+| `gtxna t f i` | push Ith value of the array field F from the Tth transaction in the current group |
+| `gtxns f` | push field F of the Ath transaction in the current group |
+| `gtxnsa f i` | push Ith value of the array field F from the Ath transaction in the current group |
+| `global f` | push value from globals to stack |
+| `load i` | copy a value from scratch space to the stack |
+| `store i` | pop a value from the stack and store to scratch space |
**Transaction Fields**
@@ -220,6 +226,14 @@ Some of these have immediate data in the byte or bytes after the opcode.
| 45 | FreezeAsset | uint64 | Asset ID being frozen or un-frozen. LogicSigVersion >= 2. |
| 46 | FreezeAssetAccount | []byte | 32 byte address of the account whose asset slot is being frozen or un-frozen. LogicSigVersion >= 2. |
| 47 | FreezeAssetFrozen | uint64 | The new frozen value, 0 or 1. LogicSigVersion >= 2. |
+| 48 | Assets | uint64 | Foreign Assets listed in the ApplicationCall transaction. LogicSigVersion >= 3. |
+| 49 | NumAssets | uint64 | Number of Assets. LogicSigVersion >= 3. |
+| 50 | Applications | uint64 | Foreign Apps listed in the ApplicationCall transaction. LogicSigVersion >= 3. |
+| 51 | NumApplications | uint64 | Number of Applications. LogicSigVersion >= 3. |
+| 52 | GlobalNumUint | uint64 | Number of global state integers in ApplicationCall. LogicSigVersion >= 3. |
+| 53 | GlobalNumByteSlice | uint64 | Number of global state byteslices in ApplicationCall. LogicSigVersion >= 3. |
+| 54 | LocalNumUint | uint64 | Number of local state integers in ApplicationCall. LogicSigVersion >= 3. |
+| 55 | LocalNumByteSlice | uint64 | Number of local state byteslices in ApplicationCall. LogicSigVersion >= 3. |
Additional details in the [opcodes document](TEAL_opcodes.md#txn) on the `txn` op.
@@ -239,6 +253,7 @@ Global fields are fields that are common to all the transactions in the group. I
| 6 | Round | uint64 | Current round number. LogicSigVersion >= 2. |
| 7 | LatestTimestamp | uint64 | Last confirmed block UNIX timestamp. Fails if negative. LogicSigVersion >= 2. |
| 8 | CurrentApplicationID | uint64 | ID of current application executing. Fails if no such application is executing. LogicSigVersion >= 2. |
+| 9 | CreatorAddress | []byte | Address of the creator of the current application. Fails if no such application is executing. LogicSigVersion >= 3. |
**Asset Fields**
@@ -271,30 +286,35 @@ Asset fields include `AssetHolding` and `AssetParam` fields that are used in `as
| Op | Description |
| --- | --- |
| `err` | Error. Panic immediately. This is primarily a fencepost against accidental zero bytes getting compiled into programs. |
-| `bnz` | branch if value X is not zero |
-| `bz` | branch if value X is zero |
-| `b` | branch unconditionally to offset |
+| `bnz target` | branch to TARGET if value X is not zero |
+| `bz target` | branch to TARGET if value X is zero |
+| `b target` | branch unconditionally to TARGET |
| `return` | use last value on stack as success value; end |
| `pop` | discard value X from stack |
| `dup` | duplicate last value on stack |
| `dup2` | duplicate two last values on stack: A, B -> A, B, A, B |
+| `dig n` | push the Nth value from the top of the stack. dig 0 is equivalent to dup |
+| `swap` | swaps two last values on stack: A, B -> B, A |
+| `select` | selects one of two values based on top-of-stack: A, B, C -> (if C != 0 then B else A) |
+| `assert` | immediately fail unless value X is a non-zero number |
### State Access
| Op | Description |
| --- | --- |
-| `balance` | get balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender |
+| `balance` | get balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. |
+| `min_balance` | get minimum required balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. |
| `app_opted_in` | check if account specified by Txn.Accounts[A] opted in for the application B => {0 or 1} |
| `app_local_get` | read from account specified by Txn.Accounts[A] from local state of the current application key B => value |
-| `app_local_get_ex` | read from account specified by Txn.Accounts[A] from local state of the application B key C => {0 or 1 (top), value} |
+| `app_local_get_ex` | read from account specified by Txn.Accounts[A] from local state of the application B key C => [*... stack*, value, 0 or 1] |
| `app_global_get` | read key A from global state of a current application => value |
-| `app_global_get_ex` | read from application Txn.ForeignApps[A] global state key B => {0 or 1 (top), value}. A is specified as an account index in the ForeignApps field of the ApplicationCall transaction, zero index means this app |
+| `app_global_get_ex` | read from application Txn.ForeignApps[A] global state key B => [*... stack*, value, 0 or 1]. A is specified as an account index in the ForeignApps field of the ApplicationCall transaction, zero index means this app |
| `app_local_put` | write to account specified by Txn.Accounts[A] to local state of a current application key B with value C |
| `app_global_put` | write key A and value B to global state of the current application |
| `app_local_del` | delete from account specified by Txn.Accounts[A] local state key B of the current application |
| `app_global_del` | delete key A from a global state of the current application |
-| `asset_holding_get` | read from account specified by Txn.Accounts[A] and asset B holding field X (imm arg) => {0 or 1 (top), value} |
-| `asset_params_get` | read from asset Txn.ForeignAssets[A] params field X (imm arg) => {0 or 1 (top), value} |
+| `asset_holding_get i` | read from account specified by Txn.Accounts[A] and asset B holding field X (imm arg) => {0 or 1 (top), value} |
+| `asset_params_get i` | read from asset Txn.ForeignAssets[A] params field X (imm arg) => {0 or 1 (top), value} |
# Assembler Syntax
diff --git a/data/transactions/logic/README_in.md b/data/transactions/logic/README_in.md
index 64bdd299c..ef43d5230 100644
--- a/data/transactions/logic/README_in.md
+++ b/data/transactions/logic/README_in.md
@@ -77,8 +77,6 @@ For one-argument ops, `X` is the last element on the stack, which is typically r
For two-argument ops, `A` is the previous element on the stack and `B` is the last element on the stack. These typically result in popping A and B from the stack and pushing the result.
-`ed25519verify` is currently the only 3 argument opcode and is described in detail in the opcode refrence.
-
@@ Arithmetic.md @@
### Loading Values
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index 1657c107b..7a3ab2e53 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -19,6 +19,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
- **Cost**:
- 7 (LogicSigVersion = 1)
- 35 (LogicSigVersion = 2)
+ - 35 (LogicSigVersion = 3)
## keccak256
@@ -29,6 +30,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
- **Cost**:
- 26 (LogicSigVersion = 1)
- 130 (LogicSigVersion = 2)
+ - 130 (LogicSigVersion = 3)
## sha512_256
@@ -39,6 +41,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
- **Cost**:
- 9 (LogicSigVersion = 1)
- 45 (LogicSigVersion = 2)
+ - 45 (LogicSigVersion = 3)
## ed25519verify
@@ -219,21 +222,21 @@ Overflow is an error condition which halts execution and fails the transaction.
- A plus B out to 128-bit long result as sum (top) and carry-bit uint64 values on the stack
- LogicSigVersion >= 2
-## intcblock
+## intcblock uint ...
- Opcode: 0x20 {varuint length} [{varuint value}, ...]
- Pops: _None_
- Pushes: _None_
-- load block of uint64 constants
+- prepare block of uint64 constants for use by intc
`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.
-## intc
+## intc i
- Opcode: 0x21 {uint8 int constant index}
- Pops: _None_
- Pushes: uint64
-- push value from uint64 constants to stack by index into constants
+- push Ith constant from intcblock to stack
## intc_0
@@ -263,21 +266,21 @@ Overflow is an error condition which halts execution and fails the transaction.
- Pushes: uint64
- push constant 3 from intcblock to stack
-## bytecblock
+## bytecblock bytes ...
- Opcode: 0x26 {varuint length} [({varuint value length} bytes), ...]
- Pops: _None_
- Pushes: _None_
-- load block of byte-array constants
+- prepare block of byte-array constants for use by bytec
-`bytecblock` loads the following program bytes into an array of byte string constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.
+`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.
-## bytec
+## bytec i
- Opcode: 0x27 {uint8 byte constant index}
- Pops: _None_
- Pushes: []byte
-- push bytes constant to stack by index into constants
+- push Ith constant from bytecblock to stack
## bytec_0
@@ -307,12 +310,12 @@ Overflow is an error condition which halts execution and fails the transaction.
- Pushes: []byte
- push constant 3 from bytecblock to stack
-## arg
+## arg n
- Opcode: 0x2c {uint8 arg index N}
- Pops: _None_
- Pushes: []byte
-- push Args[N] value to stack by index
+- push Nth LogicSig argument to stack
- Mode: Signature
## arg_0
@@ -320,7 +323,7 @@ Overflow is an error condition which halts execution and fails the transaction.
- Opcode: 0x2d
- Pops: _None_
- Pushes: []byte
-- push Args[0] to stack
+- push LogicSig argument 0 to stack
- Mode: Signature
## arg_1
@@ -328,7 +331,7 @@ Overflow is an error condition which halts execution and fails the transaction.
- Opcode: 0x2e
- Pops: _None_
- Pushes: []byte
-- push Args[1] to stack
+- push LogicSig argument 1 to stack
- Mode: Signature
## arg_2
@@ -336,7 +339,7 @@ Overflow is an error condition which halts execution and fails the transaction.
- Opcode: 0x2f
- Pops: _None_
- Pushes: []byte
-- push Args[2] to stack
+- push LogicSig argument 2 to stack
- Mode: Signature
## arg_3
@@ -344,17 +347,17 @@ Overflow is an error condition which halts execution and fails the transaction.
- Opcode: 0x30
- Pops: _None_
- Pushes: []byte
-- push Args[3] to stack
+- push LogicSig argument 3 to stack
- Mode: Signature
-## txn
+## txn f
- Opcode: 0x31 {uint8 transaction field index}
- Pops: _None_
- Pushes: any
-- push field from current transaction to stack
+- push field F of current transaction to stack
-`txn` Fields:
+`txn` Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/)):
| Index | Name | Type | Notes |
| --- | --- | --- | --- |
@@ -406,6 +409,14 @@ Overflow is an error condition which halts execution and fails the transaction.
| 45 | FreezeAsset | uint64 | Asset ID being frozen or un-frozen. LogicSigVersion >= 2. |
| 46 | FreezeAssetAccount | []byte | 32 byte address of the account whose asset slot is being frozen or un-frozen. LogicSigVersion >= 2. |
| 47 | FreezeAssetFrozen | uint64 | The new frozen value, 0 or 1. LogicSigVersion >= 2. |
+| 48 | Assets | uint64 | Foreign Assets listed in the ApplicationCall transaction. LogicSigVersion >= 3. |
+| 49 | NumAssets | uint64 | Number of Assets. LogicSigVersion >= 3. |
+| 50 | Applications | uint64 | Foreign Apps listed in the ApplicationCall transaction. LogicSigVersion >= 3. |
+| 51 | NumApplications | uint64 | Number of Applications. LogicSigVersion >= 3. |
+| 52 | GlobalNumUint | uint64 | Number of global state integers in ApplicationCall. LogicSigVersion >= 3. |
+| 53 | GlobalNumByteSlice | uint64 | Number of global state byteslices in ApplicationCall. LogicSigVersion >= 3. |
+| 54 | LocalNumUint | uint64 | Number of local state integers in ApplicationCall. LogicSigVersion >= 3. |
+| 55 | LocalNumByteSlice | uint64 | Number of local state byteslices in ApplicationCall. LogicSigVersion >= 3. |
TypeEnum mapping:
@@ -423,7 +434,7 @@ TypeEnum mapping:
FirstValidTime causes the program to fail. The field is reserved for future use.
-## global
+## global f
- Opcode: 0x32 {uint8 global field index}
- Pops: _None_
@@ -443,74 +454,93 @@ FirstValidTime causes the program to fail. The field is reserved for future use.
| 6 | Round | uint64 | Current round number. LogicSigVersion >= 2. |
| 7 | LatestTimestamp | uint64 | Last confirmed block UNIX timestamp. Fails if negative. LogicSigVersion >= 2. |
| 8 | CurrentApplicationID | uint64 | ID of current application executing. Fails if no such application is executing. LogicSigVersion >= 2. |
+| 9 | CreatorAddress | []byte | Address of the creator of the current application. Fails if no such application is executing. LogicSigVersion >= 3. |
-## gtxn
+## gtxn t f
-- Opcode: 0x33 {uint8 transaction group index}{uint8 transaction field index}
+- Opcode: 0x33 {uint8 transaction group index} {uint8 transaction field index}
- Pops: _None_
- Pushes: any
-- push field to the stack from a transaction in the current transaction group
+- push field F of the Tth transaction in the current group
for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`.
-## load
+## load i
- Opcode: 0x34 {uint8 position in scratch space to load from}
- Pops: _None_
- Pushes: any
- copy a value from scratch space to the stack
-## store
+## store i
- Opcode: 0x35 {uint8 position in scratch space to store to}
- Pops: *... stack*, any
- Pushes: _None_
- pop a value from the stack and store to scratch space
-## txna
+## txna f i
-- Opcode: 0x36 {uint8 transaction field index}{uint8 transaction field array index}
+- Opcode: 0x36 {uint8 transaction field index} {uint8 transaction field array index}
- Pops: _None_
- Pushes: any
-- push value of an array field from current transaction to stack
+- push Ith value of the array field F of the current transaction
- LogicSigVersion >= 2
-## gtxna
+## gtxna t f i
-- Opcode: 0x37 {uint8 transaction group index}{uint8 transaction field index}{uint8 transaction field array index}
+- Opcode: 0x37 {uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}
- Pops: _None_
- Pushes: any
-- push value of a field to the stack from a transaction in the current transaction group
+- push Ith value of the array field F from the Tth transaction in the current group
- LogicSigVersion >= 2
-## bnz
+## gtxns f
+
+- Opcode: 0x38 {uint8 transaction field index}
+- Pops: *... stack*, uint64
+- Pushes: any
+- push field F of the Ath transaction in the current group
+- LogicSigVersion >= 3
+
+for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction.
+
+## gtxnsa f i
+
+- Opcode: 0x39 {uint8 transaction field index} {uint8 transaction field array index}
+- Pops: *... stack*, uint64
+- Pushes: any
+- push Ith value of the array field F from the Ath transaction in the current group
+- LogicSigVersion >= 3
+
+## bnz target
- Opcode: 0x40 {0..0x7fff forward branch offset, big endian}
- Pops: *... stack*, uint64
- Pushes: _None_
-- branch if value X is not zero
+- branch to TARGET if value X is not zero
The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be well aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Branch offsets are currently limited to forward branches only, 0-0x7fff. A future expansion might make this a signed 16 bit integer allowing for backward branches and looping.
At LogicSigVersion 2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before LogicSigVersion 2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)
-## bz
+## bz target
- Opcode: 0x41 {0..0x7fff forward branch offset, big endian}
- Pops: *... stack*, uint64
- Pushes: _None_
-- branch if value X is zero
+- branch to TARGET if value X is zero
- LogicSigVersion >= 2
See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.
-## b
+## b target
- Opcode: 0x42 {0..0x7fff forward branch offset, big endian}
- Pops: _None_
- Pushes: _None_
-- branch unconditionally to offset
+- branch unconditionally to TARGET
- LogicSigVersion >= 2
See `bnz` for details on how branches work. `b` always jumps to the offset.
@@ -523,6 +553,14 @@ See `bnz` for details on how branches work. `b` always jumps to the offset.
- use last value on stack as success value; end
- LogicSigVersion >= 2
+## assert
+
+- Opcode: 0x44
+- Pops: *... stack*, uint64
+- Pushes: _None_
+- immediately fail unless value X is a non-zero number
+- LogicSigVersion >= 3
+
## pop
- Opcode: 0x48
@@ -545,22 +583,46 @@ See `bnz` for details on how branches work. `b` always jumps to the offset.
- duplicate two last values on stack: A, B -> A, B, A, B
- LogicSigVersion >= 2
+## dig n
+
+- Opcode: 0x4b {uint8 depth}
+- Pops: *... stack*, any
+- Pushes: *... stack*, any, any
+- push the Nth value from the top of the stack. dig 0 is equivalent to dup
+- LogicSigVersion >= 3
+
+## swap
+
+- Opcode: 0x4c
+- Pops: *... stack*, {any A}, {any B}
+- Pushes: *... stack*, any, any
+- swaps two last values on stack: A, B -> B, A
+- LogicSigVersion >= 3
+
+## select
+
+- Opcode: 0x4d
+- Pops: *... stack*, {any A}, {any B}, {uint64 C}
+- Pushes: any
+- selects one of two values based on top-of-stack: A, B, C -> (if C != 0 then B else A)
+- LogicSigVersion >= 3
+
## concat
- Opcode: 0x50
- Pops: *... stack*, {[]byte A}, {[]byte B}
- Pushes: []byte
-- pop two byte strings A and B and join them, push the result
+- pop two byte-arrays A and B and join them, push the result
- LogicSigVersion >= 2
`concat` panics if the result would be greater than 4096 bytes.
-## substring
+## substring s e
-- Opcode: 0x51 {uint8 start position}{uint8 end position}
+- Opcode: 0x51 {uint8 start position} {uint8 end position}
- Pops: *... stack*, []byte
- Pushes: []byte
-- pop a byte string X. For immediate values in 0..255 M and N: extract a range of bytes from it starting at M up to but not including N, push the substring result. If N < M, or either is larger than the string length, the program fails
+- pop a byte-array A. For immediate values in 0..255 S and E: extract a range of bytes from A starting at S up to but not including E, push the substring result. If E < S, or either is larger than the array length, the program fails
- LogicSigVersion >= 2
## substring3
@@ -568,15 +630,51 @@ See `bnz` for details on how branches work. `b` always jumps to the offset.
- Opcode: 0x52
- Pops: *... stack*, {[]byte A}, {uint64 B}, {uint64 C}
- Pushes: []byte
-- pop a byte string A and two integers B and C. Extract a range of bytes from A starting at B up to but not including C, push the substring result. If C < B, or either is larger than the string length, the program fails
+- pop a byte-array A and two integers B and C. Extract a range of bytes from A starting at B up to but not including C, push the substring result. If C < B, or either is larger than the array length, the program fails
- LogicSigVersion >= 2
+## getbit
+
+- Opcode: 0x53
+- Pops: *... stack*, {any A}, {uint64 B}
+- Pushes: uint64
+- pop a target A (integer or byte-array), and index B. Push the Bth bit of A.
+- LogicSigVersion >= 3
+
+see explanation of bit ordering in setbit
+
+## setbit
+
+- Opcode: 0x54
+- Pops: *... stack*, {any A}, {uint64 B}, {uint64 C}
+- Pushes: uint64
+- pop a target A, index B, and bit C. Set the Bth bit of A to C, and push the result
+- LogicSigVersion >= 3
+
+bit indexing begins with low-order bits in integers. Setting bit 4 to 1 on the integer 0 yields 16 (`int 0x0010`, or 2^4). Indexing begins in the first bytes of a byte-string (as seen in getbyte and substring). Setting bits 0 through 11 to 1 in a 4 byte-array of 0s yields `byte 0xfff00000`
+
+## getbyte
+
+- Opcode: 0x55
+- Pops: *... stack*, {[]byte A}, {uint64 B}
+- Pushes: uint64
+- pop a byte-array A and integer B. Extract the Bth byte of A and push it as an integer
+- LogicSigVersion >= 3
+
+## setbyte
+
+- Opcode: 0x56
+- Pops: *... stack*, {[]byte A}, {uint64 B}, {uint64 C}
+- Pushes: []byte
+- pop a byte-array A, integer B, and small integer C (between 0..255). Set the Bth byte of A to C, and push the result
+- LogicSigVersion >= 3
+
## balance
- Opcode: 0x60
- Pops: *... stack*, uint64
- Pushes: uint64
-- get balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender
+- get balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted.
- LogicSigVersion >= 2
- Mode: Application
@@ -600,18 +698,18 @@ params: account index, application id (top of the stack on opcode entry). Return
- LogicSigVersion >= 2
- Mode: Application
-params: account index, state key. Return: value. The value is zero if the key does not exist.
+params: account index, state key. Return: value. The value is zero (of type uint64) if the key does not exist.
## app_local_get_ex
- Opcode: 0x63
- Pops: *... stack*, {uint64 A}, {uint64 B}, {[]byte C}
- Pushes: *... stack*, any, uint64
-- read from account specified by Txn.Accounts[A] from local state of the application B key C => {0 or 1 (top), value}
+- read from account specified by Txn.Accounts[A] from local state of the application B key C => [*... stack*, value, 0 or 1]
- LogicSigVersion >= 2
- Mode: Application
-params: account index, application id, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value.
+params: account index, application id, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.
## app_global_get
@@ -622,18 +720,18 @@ params: account index, application id, state key. Return: did_exist flag (top of
- LogicSigVersion >= 2
- Mode: Application
-params: state key. Return: value. The value is zero if the key does not exist.
+params: state key. Return: value. The value is zero (of type uint64) if the key does not exist.
## app_global_get_ex
- Opcode: 0x65
- Pops: *... stack*, {uint64 A}, {[]byte B}
- Pushes: *... stack*, any, uint64
-- read from application Txn.ForeignApps[A] global state key B => {0 or 1 (top), value}. A is specified as an account index in the ForeignApps field of the ApplicationCall transaction, zero index means this app
+- read from application Txn.ForeignApps[A] global state key B => [*... stack*, value, 0 or 1]. A is specified as an account index in the ForeignApps field of the ApplicationCall transaction, zero index means this app
- LogicSigVersion >= 2
- Mode: Application
-params: application index, state key. Return: value. Application index is
+params: application index, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.
## app_local_put
@@ -681,7 +779,7 @@ params: state key.
Deleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.)
-## asset_holding_get
+## asset_holding_get i
- Opcode: 0x70 {uint8 asset holding field index}
- Pops: *... stack*, {uint64 A}, {uint64 B}
@@ -700,7 +798,7 @@ Deleting a key which is already absent has no effect on the application global s
params: account index, asset id. Return: did_exist flag (1 if exist and 0 otherwise), value.
-## asset_params_get
+## asset_params_get i
- Opcode: 0x71 {uint8 asset params field index}
- Pops: *... stack*, uint64
@@ -727,3 +825,28 @@ params: account index, asset id. Return: did_exist flag (1 if exist and 0 otherw
params: txn.ForeignAssets offset. Return: did_exist flag (1 if exist and 0 otherwise), value.
+
+## min_balance
+
+- Opcode: 0x78
+- Pops: *... stack*, uint64
+- Pushes: uint64
+- get minimum required balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes.
+- LogicSigVersion >= 3
+- Mode: Application
+
+## pushbytes bytes
+
+- Opcode: 0x80 {varuint length} {bytes}
+- Pops: _None_
+- Pushes: []byte
+- push the following program bytes to the stack
+- LogicSigVersion >= 3
+
+## pushint uint
+
+- Opcode: 0x81 {varuint int}
+- Pops: _None_
+- Pushes: uint64
+- push immediate UINT to the stack as an integer
+- LogicSigVersion >= 3
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index 14de2cea6..99e2d28e3 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -157,7 +157,6 @@ func (ops *OpStream) Intc(constIndex uint) {
} else {
ops.trace("intc %d %d", constIndex, ops.intc[constIndex])
}
- ops.tpush(StackUint64)
}
// Uint writes opcodes for loading a uint literal
@@ -201,7 +200,6 @@ func (ops *OpStream) Bytec(constIndex uint) {
} else {
ops.trace("bytec %d %s", constIndex, hex.EncodeToString(ops.bytec[constIndex]))
}
- ops.tpush(StackBytes)
}
// ByteLiteral writes opcodes and data for loading a []byte literal
@@ -241,7 +239,6 @@ func (ops *OpStream) Arg(val uint64) error {
ops.pending.WriteByte(0x2c)
ops.pending.WriteByte(uint8(val))
}
- ops.tpush(StackBytes)
return nil
}
@@ -304,6 +301,32 @@ func (ops *OpStream) Gtxna(gid, fieldNum uint64, arrayFieldIdx uint64) {
ops.tpush(TxnFieldTypes[fieldNum])
}
+// Gtxns writes opcodes for loading a field from the current transaction
+func (ops *OpStream) Gtxns(fieldNum uint64) {
+ if fieldNum >= uint64(len(TxnFieldNames)) {
+ ops.errorf("invalid gtxns field: %d", fieldNum)
+ fieldNum = 0 // avoid further error in tpush as we forge ahead
+ }
+ ops.pending.WriteByte(0x38)
+ ops.pending.WriteByte(uint8(fieldNum))
+ ops.tpush(TxnFieldTypes[fieldNum])
+}
+
+// Gtxnsa writes opcodes for loading an array field from the current transaction
+func (ops *OpStream) Gtxnsa(fieldNum uint64, arrayFieldIdx uint64) {
+ if fieldNum >= uint64(len(TxnFieldNames)) {
+ ops.errorf("invalid gtxnsa field: %d", fieldNum)
+ fieldNum = 0 // avoid further error in tpush as we forge ahead
+ }
+ if arrayFieldIdx > 255 {
+ ops.errorf("gtxnsa array index beyond 255: %d", arrayFieldIdx)
+ }
+ ops.pending.WriteByte(0x39)
+ ops.pending.WriteByte(uint8(fieldNum))
+ ops.pending.WriteByte(uint8(arrayFieldIdx))
+ ops.tpush(TxnFieldTypes[fieldNum])
+}
+
// Global writes opcodes for loading an evaluator-global field
func (ops *OpStream) Global(val GlobalField) {
ops.pending.WriteByte(0x32)
@@ -318,7 +341,7 @@ func (ops *OpStream) AssetHolding(val uint64) {
ops.errorf("invalid asset holding field: %d", val)
val = 0 // avoid further error in tpush as we forge ahead
}
- ops.pending.WriteByte(opsByName[ops.Version]["asset_holding_get"].Opcode)
+ ops.pending.WriteByte(OpsByName[ops.Version]["asset_holding_get"].Opcode)
ops.pending.WriteByte(uint8(val))
ops.tpush(AssetHoldingFieldTypes[val])
ops.tpush(StackUint64)
@@ -330,16 +353,16 @@ func (ops *OpStream) AssetParams(val uint64) {
ops.errorf("invalid asset params field: %d", val)
val = 0 // avoid further error in tpush as we forge ahead
}
- ops.pending.WriteByte(opsByName[ops.Version]["asset_params_get"].Opcode)
+ ops.pending.WriteByte(OpsByName[ops.Version]["asset_params_get"].Opcode)
ops.pending.WriteByte(uint8(val))
ops.tpush(AssetParamsFieldTypes[val])
ops.tpush(StackUint64)
}
func assembleInt(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.checkArgs(*spec)
if len(args) != 1 {
- ops.error("int needs one argument")
- args = []string{"0"} // By continuing, Uint will maintain type stack.
+ return ops.error("int needs one argument")
}
// check friendly TypeEnum constants
te, isTypeEnum := txnTypeConstToUint64[args[0]]
@@ -361,8 +384,7 @@ func assembleInt(ops *OpStream, spec *OpSpec, args []string) error {
}
val, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
- ops.error(err)
- val = 0 // By continuing, Uint will maintain type stack.
+ return ops.error(err)
}
ops.Uint(val)
return nil
@@ -370,32 +392,62 @@ func assembleInt(ops *OpStream, spec *OpSpec, args []string) error {
// Explicit invocation of const lookup and push
func assembleIntC(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.checkArgs(*spec)
if len(args) != 1 {
- ops.error("intc operation needs one argument")
- args = []string{"0"} // By continuing, Intc will maintain type stack.
+ return ops.error("intc operation needs one argument")
}
constIndex, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
- ops.error(err)
- constIndex = 0 // By continuing, Intc will maintain type stack.
+ return ops.error(err)
}
ops.Intc(uint(constIndex))
return nil
}
func assembleByteC(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.checkArgs(*spec)
if len(args) != 1 {
ops.error("bytec operation needs one argument")
- args = []string{"0"} // By continuing, Bytec will maintain type stack.
}
constIndex, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
ops.error(err)
- constIndex = 0 // By continuing, Bytec will maintain type stack.
}
ops.Bytec(uint(constIndex))
return nil
}
+func asmPushInt(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.checkArgs(*spec)
+ if len(args) != 1 {
+ ops.errorf("%s needs one argument", spec.Name)
+ }
+ val, err := strconv.ParseUint(args[0], 0, 64)
+ if err != nil {
+ ops.error(err)
+ }
+ ops.pending.WriteByte(spec.Opcode)
+ var scratch [binary.MaxVarintLen64]byte
+ vlen := binary.PutUvarint(scratch[:], val)
+ ops.pending.Write(scratch[:vlen])
+ return nil
+}
+func asmPushBytes(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.checkArgs(*spec)
+ if len(args) != 1 {
+ ops.errorf("%s needs one argument", spec.Name)
+ }
+ val, _, err := parseBinaryArgs(args)
+ if err != nil {
+ return ops.error(err)
+ }
+ ops.pending.WriteByte(spec.Opcode)
+ var scratch [binary.MaxVarintLen64]byte
+ vlen := binary.PutUvarint(scratch[:], uint64(len(val)))
+ ops.pending.Write(scratch[:vlen])
+ ops.pending.Write(val)
+ return nil
+}
+
func base32DecdodeAnyPadding(x string) (val []byte, err error) {
val, err = base32.StdEncoding.WithPadding(base32.NoPadding).DecodeString(x)
if err != nil {
@@ -544,16 +596,13 @@ func parseStringLiteral(input string) (result []byte, err error) {
// byte 0x....
// byte "this is a string\n"
func assembleByte(ops *OpStream, spec *OpSpec, args []string) error {
- var val []byte
- var err error
+ ops.checkArgs(*spec)
if len(args) == 0 {
- ops.error("byte operation needs byte literal argument")
- args = []string{"0x00"} // By continuing, ByteLiteral will maintain type stack.
+ return ops.error("byte operation needs byte literal argument")
}
- val, _, err = parseBinaryArgs(args)
+ val, _, err := parseBinaryArgs(args)
if err != nil {
- ops.error(err)
- val = []byte{} // By continuing, ByteLiteral will maintain type stack.
+ return ops.error(err)
}
ops.ByteLiteral(val)
return nil
@@ -611,41 +660,38 @@ func assembleByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
// addr A1EU...
// parses base32-with-checksum account address strings into a byte literal
func assembleAddr(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.checkArgs(*spec)
if len(args) != 1 {
- ops.error("addr operation needs one argument")
- // By continuing, ByteLiteral will maintain type stack.
- args = []string{"7777777777777777777777777777777777777777777777777774MSJUVU"}
+ return ops.error("addr operation needs one argument")
}
addr, err := basics.UnmarshalChecksumAddress(args[0])
if err != nil {
- ops.error(err)
- addr = basics.Address{} // By continuing, ByteLiteral will maintain type stack.
+ return ops.error(err)
}
ops.ByteLiteral(addr[:])
return nil
}
func assembleArg(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.checkArgs(*spec)
if len(args) != 1 {
- ops.error("arg operation needs one argument")
- args = []string{"0"}
+ return ops.error("arg operation needs one argument")
}
val, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
- ops.error(err)
- val = 0 // Let ops.Arg maintain type stack
+ return ops.error(err)
}
ops.Arg(val)
return nil
}
func assembleBranch(ops *OpStream, spec *OpSpec, args []string) error {
+ ops.checkArgs(*spec)
if len(args) != 1 {
- ops.error("branch operation needs label argument") // proceeding so checkArgs runs
- } else {
- ops.ReferToLabel(ops.pending.Len(), args[0])
+ return ops.error("branch operation needs label argument")
}
- ops.checkArgs(*spec)
+
+ ops.ReferToLabel(ops.pending.Len(), args[0])
ops.pending.WriteByte(spec.Opcode)
// zero bytes will get replaced with actual offset in resolveLabels()
ops.pending.WriteByte(0)
@@ -653,112 +699,31 @@ func assembleBranch(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func assembleLoad(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- ops.error("load operation needs one argument")
- args = []string{"0"} // By continuing, tpush will maintain type stack.
- }
- val, err := strconv.ParseUint(args[0], 0, 64)
- if err != nil {
- ops.error(err)
- val = 0
- }
- if val > EvalMaxScratchSize {
- ops.errorf("load outside 0..255: %d", val)
- val = 0
- }
- ops.pending.WriteByte(0x34)
- ops.pending.WriteByte(byte(val))
- ops.tpush(StackAny)
- return nil
-}
-
-func assembleStore(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- ops.error("store operation needs one argument")
- args = []string{"0"} // By continuing, checkArgs, tpush will maintain type stack.
- }
- val, err := strconv.ParseUint(args[0], 0, 64)
- if err != nil {
- ops.error(err)
- val = 0
- }
- if val > EvalMaxScratchSize {
- ops.errorf("store outside 0..255: %d", val)
- val = 0
- }
- ops.checkArgs(*spec)
- ops.pending.WriteByte(spec.Opcode)
- ops.pending.WriteByte(byte(val))
- return nil
-}
-
func assembleSubstring(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 2 {
- ops.error("substring expects 2 args")
- args = []string{"0", "0"} // By continuing, checkArgs, tpush will maintain type stack.
- }
- start, err := strconv.ParseUint(args[0], 0, 64)
- if err != nil {
- ops.error(err)
- start = 0
- }
- if start > EvalMaxScratchSize {
- ops.error("substring limited to 0..255")
- start = 0
- }
-
- end, err := strconv.ParseUint(args[1], 0, 64)
- if err != nil {
- ops.error(err)
- end = start
- }
- if end > EvalMaxScratchSize {
- ops.error("substring limited to 0..255")
- end = start
- }
-
+ asmDefault(ops, spec, args)
+ // Having run asmDefault, only need to check extra constraints.
+ start, _ := strconv.ParseUint(args[0], 0, 64)
+ end, _ := strconv.ParseUint(args[1], 0, 64)
if end < start {
- ops.error("substring end is before start")
- end = start
+ return ops.error("substring end is before start")
}
- opcode := byte(0x51)
- ops.checkArgs(*spec)
- ops.pending.WriteByte(opcode)
- ops.pending.WriteByte(byte(start))
- ops.pending.WriteByte(byte(end))
- ops.trace(" pushes([]byte)")
- ops.tpush(StackBytes)
return nil
}
-func disSubstring(dis *disassembleState, spec *OpSpec) {
- lastIdx := dis.pc + 2
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- return
- }
- start := uint(dis.program[dis.pc+1])
- end := uint(dis.program[dis.pc+2])
- dis.nextpc = dis.pc + 3
- _, dis.err = fmt.Fprintf(dis.out, "substring %d %d\n", start, end)
-}
-
func assembleTxn(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.error("txn expects one argument")
}
fs, ok := txnFieldSpecByName[args[0]]
if !ok {
- return ops.errorf("txn unknown arg: %v", args[0])
+ return ops.errorf("txn unknown field: %v", args[0])
}
_, ok = txnaFieldSpecByField[fs.field]
if ok {
- return ops.errorf("found txna field %v in txn op", args[0])
+ return ops.errorf("found array field %v in txn op", args[0])
}
if fs.version > ops.Version {
- return ops.errorf("txn %s available in version %d. Missed #pragma version?", args[0], fs.version)
+ return ops.errorf("field %s available in version %d. Missed #pragma version?", args[0], fs.version)
}
val := fs.field
ops.Txn(uint64(val))
@@ -782,11 +747,11 @@ func assembleTxna(ops *OpStream, spec *OpSpec, args []string) error {
}
fs, ok := txnFieldSpecByName[args[0]]
if !ok {
- return ops.errorf("txna unknown arg: %v", args[0])
+ return ops.errorf("txna unknown field: %v", args[0])
}
_, ok = txnaFieldSpecByField[fs.field]
if !ok {
- return ops.errorf("txna unknown arg: %v", args[0])
+ return ops.errorf("txna unknown field: %v", args[0])
}
if fs.version > ops.Version {
return ops.errorf("txna %s available in version %d. Missed #pragma version?", args[0], fs.version)
@@ -810,14 +775,14 @@ func assembleGtxn(ops *OpStream, spec *OpSpec, args []string) error {
}
fs, ok := txnFieldSpecByName[args[1]]
if !ok {
- return ops.errorf("gtxn unknown arg: %v", args[1])
+ return ops.errorf("gtxn unknown field: %v", args[1])
}
_, ok = txnaFieldSpecByField[fs.field]
if ok {
- return ops.errorf("found gtxna field %v in gtxn op", args[1])
+ return ops.errorf("found array field %v in gtxn op", args[1])
}
if fs.version > ops.Version {
- return ops.errorf("gtxn %s available in version %d. Missed #pragma version?", args[1], fs.version)
+ return ops.errorf("field %s available in version %d. Missed #pragma version?", args[1], fs.version)
}
val := fs.field
ops.Gtxn(gtid, uint64(val))
@@ -844,11 +809,11 @@ func assembleGtxna(ops *OpStream, spec *OpSpec, args []string) error {
}
fs, ok := txnFieldSpecByName[args[1]]
if !ok {
- return ops.errorf("gtxna unknown arg: %v", args[1])
+ return ops.errorf("gtxna unknown field: %v", args[1])
}
_, ok = txnaFieldSpecByField[fs.field]
if !ok {
- return ops.errorf("gtxna unknown arg: %v", args[1])
+ return ops.errorf("gtxna unknown field: %v", args[1])
}
if fs.version > ops.Version {
return ops.errorf("gtxna %s available in version %d. Missed #pragma version?", args[1], fs.version)
@@ -862,6 +827,53 @@ func assembleGtxna(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
+func assembleGtxns(ops *OpStream, spec *OpSpec, args []string) error {
+ if len(args) == 2 {
+ return assembleGtxnsa(ops, spec, args)
+ }
+ if len(args) != 1 {
+ return ops.error("gtxns expects one or two immediate arguments")
+ }
+ fs, ok := txnFieldSpecByName[args[0]]
+ if !ok {
+ return ops.errorf("gtxns unknown field: %v", args[0])
+ }
+ _, ok = txnaFieldSpecByField[fs.field]
+ if ok {
+ return ops.errorf("found array field %v in gtxns op", args[0])
+ }
+ if fs.version > ops.Version {
+ return ops.errorf("field %s available in version %d. Missed #pragma version?", args[0], fs.version)
+ }
+ val := fs.field
+ ops.Gtxns(uint64(val))
+ return nil
+}
+
+func assembleGtxnsa(ops *OpStream, spec *OpSpec, args []string) error {
+ if len(args) != 2 {
+ return ops.error("gtxnsa expects two immediate arguments")
+ }
+ fs, ok := txnFieldSpecByName[args[0]]
+ if !ok {
+ return ops.errorf("gtxnsa unknown field: %v", args[0])
+ }
+ _, ok = txnaFieldSpecByField[fs.field]
+ if !ok {
+ return ops.errorf("gtxnsa unknown field: %v", args[0])
+ }
+ if fs.version > ops.Version {
+ return ops.errorf("gtxnsa %s available in version %d. Missed #pragma version?", args[0], fs.version)
+ }
+ arrayFieldIdx, err := strconv.ParseUint(args[1], 0, 64)
+ if err != nil {
+ return ops.error(err)
+ }
+ fieldNum := fs.field
+ ops.Gtxnsa(uint64(fieldNum), uint64(arrayFieldIdx))
+ return nil
+}
+
func assembleGlobal(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
ops.error("global expects one argument")
@@ -869,7 +881,7 @@ func assembleGlobal(ops *OpStream, spec *OpSpec, args []string) error {
}
fs, ok := globalFieldSpecByName[args[0]]
if !ok {
- ops.errorf("global unknown arg: %v", args[0])
+ ops.errorf("global unknown field: %v", args[0])
fs, _ = globalFieldSpecByName[GlobalFieldNames[0]]
}
if fs.version > ops.Version {
@@ -909,32 +921,33 @@ func assembleAssetParams(ops *OpStream, spec *OpSpec, args []string) error {
type assembleFunc func(*OpStream, *OpSpec, []string) error
+// Basic assembly. Any extra bytes of opcode are encoded as byte immediates.
func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
ops.checkArgs(*spec)
- if len(spec.Returns) > 0 {
- ops.tpusha(spec.Returns)
- ops.trace(" pushes(%s", spec.Returns[0].String())
- if len(spec.Returns) > 1 {
- for _, rt := range spec.Returns[1:] {
- ops.trace(", %s", rt.String())
- }
- }
- ops.trace(")")
+ if len(args) != spec.Details.Size-1 {
+ ops.errorf("%s expects %d immediate arguments", spec.Name, spec.Details.Size)
}
ops.pending.WriteByte(spec.Opcode)
+ for i := 0; i < spec.Details.Size-1; i++ {
+ val, err := strconv.ParseUint(args[i], 0, 64)
+ if err != nil {
+ return ops.error(err)
+ }
+ if val > 255 {
+ return ops.errorf("%s outside 0..255: %d", spec.Name, val)
+ }
+ ops.pending.WriteByte(byte(val))
+ }
return nil
}
// keywords handle parsing and assembling special asm language constructs like 'addr'
-var keywords map[string]assembleFunc
-
-func init() {
- // WARNING: special case op assembly by argOps functions must do their own type stack maintenance via ops.tpop() ops.tpush()/ops.tpusha()
- keywords = make(map[string]assembleFunc)
- keywords["int"] = assembleInt
- keywords["byte"] = assembleByte
- keywords["addr"] = assembleAddr // parse basics.Address, actually just another []byte constant
- // WARNING: special case op assembly by argOps functions must do their own type stack maintenance via ops.tpop() ops.tpush()/ops.tpusha()
+// We use OpSpec here, but somewhat degenerate, since they don't have opcodes or eval functions
+var keywords = map[string]OpSpec{
+ "int": {0, "int", nil, assembleInt, nil, nil, oneInt, 1, modeAny, opDetails{1, 2, nil, nil}},
+ "byte": {0, "byte", nil, assembleByte, nil, nil, oneBytes, 1, modeAny, opDetails{1, 2, nil, nil}},
+ // parse basics.Address, actually just another []byte constant
+ "addr": {0, "addr", nil, assembleAddr, nil, nil, oneBytes, 1, modeAny, opDetails{1, 2, nil, nil}},
}
type lineError struct {
@@ -942,10 +955,6 @@ type lineError struct {
Err error
}
-func fmtLineError(line int, format string, args ...interface{}) error {
- return &lineError{Line: line, Err: fmt.Errorf(format, args...)}
-}
-
func (le *lineError) Error() string {
return fmt.Sprintf("%d: %s", le.Line, le.Err.Error())
}
@@ -957,6 +966,9 @@ func (le *lineError) Unwrap() error {
func typecheck(expected, got StackType) bool {
// Some ops push 'any' and we wait for run time to see what it is.
// Some of those 'any' are based on fields that we _could_ know now but haven't written a more detailed system of typecheck for (yet).
+ if expected == StackAny && got == StackNone { // Any is lenient, but stack can't be empty
+ return false
+ }
if (expected == StackAny) || (got == StackAny) {
return true
}
@@ -1068,10 +1080,24 @@ func (ops *OpStream) checkArgs(spec OpSpec) {
if !firstPop {
ops.trace(")")
}
+
+ if len(spec.Returns) > 0 {
+ ops.tpusha(spec.Returns)
+ ops.trace(" pushes(%s", spec.Returns[0].String())
+ if len(spec.Returns) > 1 {
+ for _, rt := range spec.Returns[1:] {
+ ops.trace(", %s", rt.String())
+ }
+ }
+ ops.trace(")")
+ }
}
// assemble reads text from an input and accumulates the program
func (ops *OpStream) assemble(fin io.Reader) error {
+ if ops.Version > LogicVersion && ops.Version != assemblerNoVersion {
+ return ops.errorf("Can not assemble version %d", ops.Version)
+ }
scanner := bufio.NewScanner(fin)
ops.sourceLine = 0
for scanner.Scan() {
@@ -1086,8 +1112,8 @@ func (ops *OpStream) assemble(fin io.Reader) error {
continue
}
if strings.HasPrefix(line, "#pragma") {
- // all pragmas must be be already processed in advance
ops.trace("%d: #pragma line\n", ops.sourceLine)
+ ops.pragma(line)
continue
}
fields := fieldsFromLine(line)
@@ -1095,21 +1121,19 @@ func (ops *OpStream) assemble(fin io.Reader) error {
ops.trace("%d: no fields\n", ops.sourceLine)
continue
}
+ // we're going to process opcodes, so fix the Version
+ if ops.Version == assemblerNoVersion {
+ ops.Version = AssemblerDefaultVersion
+ }
opstring := fields[0]
- spec, ok := opsByName[ops.Version][opstring]
- var asmFunc assembleFunc
- if ok {
- asmFunc = spec.asm
- } else {
- kwFunc, ok := keywords[opstring]
- if ok {
- asmFunc = kwFunc
- }
+ spec, ok := OpsByName[ops.Version][opstring]
+ if !ok {
+ spec, ok = keywords[opstring]
}
- if asmFunc != nil {
+ if ok {
ops.trace("%3d: %s\t", ops.sourceLine, opstring)
ops.RecordSourceLine()
- asmFunc(ops, &spec, fields[1:])
+ spec.asm(ops, &spec, fields[1:])
ops.trace("\n")
continue
}
@@ -1117,7 +1141,13 @@ func (ops *OpStream) assemble(fin io.Reader) error {
ops.createLabel(opstring[:len(opstring)-1])
continue
}
- ops.errorf("unknown opcode: %v", opstring)
+ // unknown opcode, let's report a good error if version problem
+ spec, ok = OpsByName[AssemblerMaxVersion][opstring]
+ if ok {
+ ops.errorf("%s opcode was introduced in TEAL v%d", opstring, spec.Version)
+ } else {
+ ops.errorf("unknown opcode: %s", opstring)
+ }
}
// backward compatibility: do not allow jumps behind last instruction in TEAL v1
@@ -1143,6 +1173,49 @@ func (ops *OpStream) assemble(fin io.Reader) error {
return nil
}
+func (ops *OpStream) pragma(line string) error {
+ fields := strings.Split(line, " ")
+ if fields[0] != "#pragma" {
+ return ops.errorf("invalid syntax: %s", fields[0])
+ }
+ if len(fields) < 2 {
+ return ops.error("empty pragma")
+ }
+ key := fields[1]
+ switch key {
+ case "version":
+ if len(fields) < 3 {
+ return ops.error("no version value")
+ }
+ value := fields[2]
+ var ver uint64
+ if ops.pending.Len() > 0 {
+ return ops.error("#pragma version is only allowed before instructions")
+ }
+ ver, err := strconv.ParseUint(value, 0, 64)
+ if err != nil {
+ return ops.errorf("bad #pragma version: %#v", value)
+ }
+ if ver < 1 || ver > AssemblerMaxVersion {
+ return ops.errorf("unsupported version: %d", ver)
+ }
+
+ // We initialize Version with assemblerNoVersion as a marker for
+ // non-specified version because version 0 is valid
+ // version for TEAL v1.
+ if ops.Version == assemblerNoVersion {
+ ops.Version = ver
+ } else if ops.Version != ver {
+ return ops.errorf("version mismatch: assembling v%d with v%d assembler", ver, ops.Version)
+ } else {
+ // ops.Version is already correct, or needed to be upped.
+ }
+ return nil
+ default:
+ return ops.errorf("unsupported pragma directive: %#v", key)
+ }
+}
+
func (ops *OpStream) resolveLabels() {
saved := ops.sourceLine
raw := ops.pending.Bytes()
@@ -1298,84 +1371,15 @@ func AssembleString(text string) (*OpStream, error) {
// version is assemblerNoVersion it uses #pragma version or fallsback
// to AssemblerDefaultVersion. OpStream is returned to allow access
// to warnings, (multiple) errors, or the PC to source line mapping.
+// Note that AssemblerDefaultVersion is not the latest supported version,
+// and therefore we might need to pass in explicitly a higher version.
func AssembleStringWithVersion(text string, version uint64) (*OpStream, error) {
sr := strings.NewReader(text)
- ps := PragmaStream{}
- err := ps.Process(sr)
- if err != nil {
- return nil, err
- }
- // If version not set yet then set either default or #pragma version.
- // We have to use assemblerNoVersion as a marker for non-specified version
- // because version 0 is valid version for TEAL v1
- if version == assemblerNoVersion {
- if ps.Version != 0 {
- version = ps.Version
- } else {
- version = AssemblerDefaultVersion
- }
- } else if ps.Version != 0 && version != ps.Version {
- err = fmt.Errorf("version mismatch: assembling v%d with v%d assembler", ps.Version, version)
- return nil, err
- } else {
- // otherwise the passed version matches the pragma and we are ok
- }
-
- sr = strings.NewReader(text)
ops := OpStream{Version: version}
- err = ops.assemble(sr)
+ err := ops.assemble(sr)
return &ops, err
}
-// PragmaStream represents all parsed pragmas from the program
-type PragmaStream struct {
- Version uint64
-}
-
-// Process all pragmas in the input stream
-func (ps *PragmaStream) Process(fin io.Reader) (err error) {
- scanner := bufio.NewScanner(fin)
- sourceLine := 0
- for scanner.Scan() {
- sourceLine++
- line := scanner.Text()
- if len(line) == 0 || !strings.HasPrefix(line, "#pragma") {
- continue
- }
-
- fields := strings.Split(line, " ")
- if fields[0] != "#pragma" {
- return fmtLineError(sourceLine, "invalid syntax: %s", fields[0])
- }
- if len(fields) < 2 {
- return fmtLineError(sourceLine, "empty pragma")
- }
- key := fields[1]
- switch key {
- case "version":
- if len(fields) < 3 {
- return fmtLineError(sourceLine, "no version value")
- }
- value := fields[2]
- var ver uint64
- if sourceLine != 1 {
- return fmtLineError(sourceLine, "#pragma version is only allowed on 1st line")
- }
- ver, err = strconv.ParseUint(value, 0, 64)
- if err != nil {
- return &lineError{Line: sourceLine, Err: err}
- }
- if ver < 1 || ver > AssemblerMaxVersion {
- return fmtLineError(sourceLine, "unsupported version: %d", ver)
- }
- ps.Version = ver
- default:
- return fmtLineError(sourceLine, "unsupported pragma directive: %s", key)
- }
- }
- return
-}
-
type disassembleState struct {
program []byte
pc int
@@ -1403,9 +1407,27 @@ func (dis *disassembleState) outputLabelIfNeeded() (err error) {
type disassembleFunc func(dis *disassembleState, spec *OpSpec)
+// Basic disasemble, and extra bytes of opcode are decoded as bytes integers.
func disDefault(dis *disassembleState, spec *OpSpec) {
- dis.nextpc = dis.pc + 1
- _, dis.err = fmt.Fprintf(dis.out, "%s\n", spec.Name)
+ lastIdx := dis.pc + spec.Details.Size - 1
+ if len(dis.program) <= lastIdx {
+ missing := lastIdx - len(dis.program) + 1
+ dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
+ return
+ }
+ dis.nextpc = dis.pc + spec.Details.Size
+ _, dis.err = fmt.Fprintf(dis.out, "%s", spec.Name)
+ if dis.err != nil {
+ return
+ }
+ for s := 1; s < spec.Details.Size; s++ {
+ b := uint(dis.program[dis.pc+s])
+ _, dis.err = fmt.Fprintf(dis.out, " %d", b)
+ if dis.err != nil {
+ return
+ }
+ }
+ _, dis.err = fmt.Fprintf(dis.out, "\n")
}
var errShortIntcblock = errors.New("intcblock ran past end of program")
@@ -1571,17 +1593,6 @@ func disIntcblock(dis *disassembleState, spec *OpSpec) {
_, dis.err = dis.out.Write([]byte("\n"))
}
-func disIntc(dis *disassembleState, spec *OpSpec) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- return
- }
- dis.nextpc = dis.pc + 2
- _, dis.err = fmt.Fprintf(dis.out, "intc %d\n", dis.program[dis.pc+1])
-}
-
func disBytecblock(dis *disassembleState, spec *OpSpec) {
var bytec [][]byte
bytec, dis.nextpc, dis.err = parseBytecBlock(dis.program, dis.pc)
@@ -1601,28 +1612,45 @@ func disBytecblock(dis *disassembleState, spec *OpSpec) {
_, dis.err = dis.out.Write([]byte("\n"))
}
-func disBytec(dis *disassembleState, spec *OpSpec) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
+func disPushInt(dis *disassembleState, spec *OpSpec) {
+ pos := dis.pc + 1
+ val, bytesUsed := binary.Uvarint(dis.program[pos:])
+ if bytesUsed <= 0 {
+ dis.err = fmt.Errorf("could not decode int at pc=%d", pos)
return
}
- dis.nextpc = dis.pc + 2
- _, dis.err = fmt.Fprintf(dis.out, "bytec %d\n", dis.program[dis.pc+1])
+ pos += bytesUsed
+ _, dis.err = fmt.Fprintf(dis.out, "%s %d\n", spec.Name, val)
+ dis.nextpc = pos
+}
+func checkPushInt(cx *evalContext) int {
+ opPushInt(cx)
+ return 1
}
-func disArg(dis *disassembleState, spec *OpSpec) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
+func disPushBytes(dis *disassembleState, spec *OpSpec) {
+ pos := dis.pc + 1
+ length, bytesUsed := binary.Uvarint(dis.program[pos:])
+ if bytesUsed <= 0 {
+ dis.err = fmt.Errorf("could not decode bytes length at pc=%d", pos)
return
}
- dis.nextpc = dis.pc + 2
- _, dis.err = fmt.Fprintf(dis.out, "arg %d\n", dis.program[dis.pc+1])
+ pos += bytesUsed
+ end := uint64(pos) + length
+ if end > uint64(len(dis.program)) || end < uint64(pos) {
+ dis.err = fmt.Errorf("pushbytes too long %d %d", end, pos)
+ return
+ }
+ bytes := dis.program[pos:end]
+ _, dis.err = fmt.Fprintf(dis.out, "%s 0x%s", spec.Name, hex.EncodeToString(bytes))
+ dis.nextpc = int(end)
+}
+func checkPushBytes(cx *evalContext) int {
+ opPushBytes(cx)
+ return 1
}
+// This is also used to disassemble gtxns
func disTxn(dis *disassembleState, spec *OpSpec) {
lastIdx := dis.pc + 1
if len(dis.program) <= lastIdx {
@@ -1636,9 +1664,10 @@ func disTxn(dis *disassembleState, spec *OpSpec) {
dis.err = fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc)
return
}
- _, dis.err = fmt.Fprintf(dis.out, "txn %s\n", TxnFieldNames[txarg])
+ _, dis.err = fmt.Fprintf(dis.out, "%s %s\n", spec.Name, TxnFieldNames[txarg])
}
+// This is also used to disassemble gtxnsa
func disTxna(dis *disassembleState, spec *OpSpec) {
lastIdx := dis.pc + 2
if len(dis.program) <= lastIdx {
@@ -1653,7 +1682,7 @@ func disTxna(dis *disassembleState, spec *OpSpec) {
return
}
arrayFieldIdx := dis.program[dis.pc+2]
- _, dis.err = fmt.Fprintf(dis.out, "txna %s %d\n", TxnFieldNames[txarg], arrayFieldIdx)
+ _, dis.err = fmt.Fprintf(dis.out, "%s %s %d\n", spec.Name, TxnFieldNames[txarg], arrayFieldIdx)
}
func disGtxn(dis *disassembleState, spec *OpSpec) {
@@ -1727,30 +1756,6 @@ func disBranch(dis *disassembleState, spec *OpSpec) {
_, dis.err = fmt.Fprintf(dis.out, "%s %s\n", spec.Name, label)
}
-func disLoad(dis *disassembleState, spec *OpSpec) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- return
- }
- n := uint(dis.program[dis.pc+1])
- dis.nextpc = dis.pc + 2
- _, dis.err = fmt.Fprintf(dis.out, "load %d\n", n)
-}
-
-func disStore(dis *disassembleState, spec *OpSpec) {
- lastIdx := dis.pc + 1
- if len(dis.program) <= lastIdx {
- missing := lastIdx - len(dis.program) + 1
- dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing)
- return
- }
- n := uint(dis.program[dis.pc+1])
- dis.nextpc = dis.pc + 2
- _, dis.err = fmt.Fprintf(dis.out, "store %d\n", n)
-}
-
func disAssetHolding(dis *disassembleState, spec *OpSpec) {
lastIdx := dis.pc + 1
if len(dis.program) <= lastIdx {
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index 80aae4469..762335193 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -27,7 +27,7 @@ import (
)
// used by TestAssemble and others, see UPDATE PROCEDURE in TestAssemble()
-const bigTestAssembleNonsenseProgram = `err
+const v1Nonsense = `err
global MinTxnFee
global MinBalance
global MaxTxnLife
@@ -130,6 +130,9 @@ store 2
intc 0
intc 1
mulw
+`
+
+const v2Nonsense = `
dup2
pop
pop
@@ -214,6 +217,42 @@ txn FreezeAssetAccount
txn FreezeAssetFrozen
`
+const v3Nonsense = `
+assert
+min_balance
+int 0x031337 // get bit 1, negate it, put it back
+int 1
+getbit
+!
+int 1
+setbit
+byte "test" // get byte 2, increment it, put it back
+int 2
+getbyte
+int 1
++
+int 2
+setbyte
+swap
+select
+dig 2
+int 1
+gtxns ConfigAsset
+int 2
+gtxnsa Accounts 0
+pushint 1000
+pushbytes "john"
+`
+
+func pseudoOp(opcode string) bool {
+ // We don't test every combination of
+ // intcblock,bytecblock,intc*,bytec*,arg* here. Not all of
+ // these are truly pseudops, but it seems a good name.
+ return strings.HasPrefix(opcode, "int") ||
+ strings.HasPrefix(opcode, "byte") ||
+ strings.HasPrefix(opcode, "arg")
+}
+
// Check that assembly output is stable across time.
func TestAssemble(t *testing.T) {
// UPDATE PROCEDURE:
@@ -226,17 +265,14 @@ func TestAssemble(t *testing.T) {
// This doesn't have to be a sensible program to run, it just has to compile.
for _, spec := range OpSpecs {
// Ensure that we have some basic check of all the ops, except
- // we don't test every combination of
- // intcblock,bytecblock,intc*,bytec*,arg* here.
- if !strings.Contains(bigTestAssembleNonsenseProgram, spec.Name) &&
- !strings.HasPrefix(spec.Name, "int") &&
- !strings.HasPrefix(spec.Name, "byte") &&
- !strings.HasPrefix(spec.Name, "arg") {
- t.Errorf("test should contain op %v", spec.Name)
+ if !strings.Contains(v1Nonsense+v2Nonsense, spec.Name) &&
+ !pseudoOp(spec.Name) && spec.Version <= 2 {
+ t.Errorf("v2 nonsense test should contain op %v", spec.Name)
}
}
- ops, err := AssembleStringWithVersion(bigTestAssembleNonsenseProgram, AssemblerMaxVersion)
- require.NoError(t, err)
+ // First, we test v2, not AssemblerMaxVersion. A higher version is
+ // allowed to differ (and must, in the first byte).
+ ops := testProg(t, v1Nonsense+v2Nonsense, 2)
// check that compilation is stable over time and we assemble to the same bytes this month that we did last month.
expectedBytes, _ := hex.DecodeString("022008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f")
if bytes.Compare(expectedBytes, ops.Program) != 0 {
@@ -244,6 +280,27 @@ func TestAssemble(t *testing.T) {
t.Log(hex.EncodeToString(ops.Program))
}
require.Equal(t, expectedBytes, ops.Program)
+
+ // We test v3 here, and compare to AssemblerMaxVersion, with
+ // the intention that the test breaks the next time
+ // AssemblerMaxVersion is increased. At that point, we would
+ // add a new test for v4, and leave behind this test for v3.
+
+ for _, spec := range OpSpecs {
+ // Ensure that we have some basic check of all the ops, except
+ if !strings.Contains(v1Nonsense+v2Nonsense+v3Nonsense, spec.Name) &&
+ !pseudoOp(spec.Name) && spec.Version <= 3 {
+ t.Errorf("v3 nonsense test should contain op %v", spec.Name)
+ }
+ }
+ ops = testProg(t, v1Nonsense+v2Nonsense+v3Nonsense, AssemblerMaxVersion)
+ // check that compilation is stable over time and we assemble to the same bytes this month that we did last month.
+ expectedBytes, _ = hex.DecodeString("032008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f4478222105531421055427042106552105082106564c4d4b02210538212106391c0081e80780046a6f686e")
+ if bytes.Compare(expectedBytes, ops.Program) != 0 {
+ // this print is for convenience if the program has been changed. the hex string can be copy pasted back in as a new expected result.
+ t.Log(hex.EncodeToString(ops.Program))
+ }
+ require.Equal(t, expectedBytes, ops.Program)
}
func TestAssembleAlias(t *testing.T) {
@@ -284,30 +341,44 @@ func testMatch(t *testing.T, actual, expected string) {
}
}
-func testProg(t *testing.T, source string, ver uint64, expected ...expect) {
- ops, err := AssembleStringWithVersion(source, ver)
+func testProg(t *testing.T, source string, ver uint64, expected ...expect) *OpStream {
+ program := strings.ReplaceAll(source, ";", "\n")
+ ops, err := AssembleStringWithVersion(program, ver)
if len(expected) == 0 {
+ if len(ops.Errors) > 0 || err != nil || ops == nil || ops.Program == nil {
+ t.Log(program)
+ }
+ require.Empty(t, ops.Errors)
require.NoError(t, err)
require.NotNil(t, ops)
- require.Empty(t, ops.Errors)
require.NotNil(t, ops.Program)
} else {
require.Error(t, err)
errors := ops.Errors
- require.Len(t, errors, len(expected))
for _, exp := range expected {
- var found *lineError
- for _, err := range errors {
- if err.Line == exp.l {
- found = err
+ if exp.l == 0 {
+ // line 0 means: "must match all"
+ require.Len(t, expected, 1)
+ for _, err := range errors {
+ msg := err.Unwrap().Error()
+ testMatch(t, msg, exp.s)
}
+ } else {
+ var found *lineError
+ for _, err := range errors {
+ if err.Line == exp.l {
+ found = err
+ break
+ }
+ }
+ require.NotNil(t, found)
+ msg := found.Unwrap().Error()
+ testMatch(t, msg, exp.s)
}
- require.NotNil(t, found)
- msg := found.Unwrap().Error()
- testMatch(t, msg, exp.s)
}
require.Nil(t, ops.Program)
}
+ return ops
}
func testLine(t *testing.T, line string, ver uint64, expected string) {
@@ -324,11 +395,11 @@ func testLine(t *testing.T, line string, ver uint64, expected string) {
func TestAssembleTxna(t *testing.T) {
testLine(t, "txna Accounts 256", AssemblerMaxVersion, "txna array index beyond 255: 256")
testLine(t, "txna ApplicationArgs 256", AssemblerMaxVersion, "txna array index beyond 255: 256")
- testLine(t, "txna Sender 256", AssemblerMaxVersion, "txna unknown arg: Sender")
+ testLine(t, "txna Sender 256", AssemblerMaxVersion, "txna unknown field: Sender")
testLine(t, "gtxna 0 Accounts 256", AssemblerMaxVersion, "gtxna array index beyond 255: 256")
testLine(t, "gtxna 0 ApplicationArgs 256", AssemblerMaxVersion, "gtxna array index beyond 255: 256")
testLine(t, "gtxna 256 Accounts 0", AssemblerMaxVersion, "gtxna group index beyond 255: 256")
- testLine(t, "gtxna 0 Sender 256", AssemblerMaxVersion, "gtxna unknown arg: Sender")
+ testLine(t, "gtxna 0 Sender 256", AssemblerMaxVersion, "gtxna unknown field: Sender")
testLine(t, "txn Accounts 0", 1, "txn expects one argument")
testLine(t, "txn Accounts 0 1", 2, "txn expects one or two arguments")
testLine(t, "txna Accounts 0 1", AssemblerMaxVersion, "txna expects two arguments")
@@ -338,20 +409,20 @@ func TestAssembleTxna(t *testing.T) {
testLine(t, "gtxna 0 Accounts 1 2", AssemblerMaxVersion, "gtxna expects three arguments")
testLine(t, "gtxna a Accounts 0", AssemblerMaxVersion, "strconv.ParseUint...")
testLine(t, "gtxna 0 Accounts a", AssemblerMaxVersion, "strconv.ParseUint...")
- testLine(t, "txn ABC", 2, "txn unknown arg: ABC")
- testLine(t, "gtxn 0 ABC", 2, "gtxn unknown arg: ABC")
+ testLine(t, "txn ABC", 2, "txn unknown field: ABC")
+ testLine(t, "gtxn 0 ABC", 2, "gtxn unknown field: ABC")
testLine(t, "gtxn a ABC", 2, "strconv.ParseUint...")
- testLine(t, "txn Accounts", AssemblerMaxVersion, "found txna field Accounts in txn op")
- testLine(t, "txn Accounts", 1, "found txna field Accounts in txn op")
+ testLine(t, "txn Accounts", AssemblerMaxVersion, "found array field Accounts in txn op")
+ testLine(t, "txn Accounts", 1, "found array field Accounts in txn op")
testLine(t, "txn Accounts 0", AssemblerMaxVersion, "")
- testLine(t, "gtxn 0 Accounts", AssemblerMaxVersion, "found gtxna field Accounts in gtxn op")
- testLine(t, "gtxn 0 Accounts", 1, "found gtxna field Accounts in gtxn op")
+ testLine(t, "gtxn 0 Accounts", AssemblerMaxVersion, "found array field Accounts in gtxn op")
+ testLine(t, "gtxn 0 Accounts", 1, "found array field Accounts in gtxn op")
testLine(t, "gtxn 0 Accounts 1", AssemblerMaxVersion, "")
}
func TestAssembleGlobal(t *testing.T) {
testLine(t, "global", AssemblerMaxVersion, "global expects one argument")
- testLine(t, "global a", AssemblerMaxVersion, "global unknown arg: a")
+ testLine(t, "global a", AssemblerMaxVersion, "global unknown field: a")
}
func TestAssembleDefault(t *testing.T) {
@@ -703,6 +774,7 @@ byte base64 avGWRM+yy3BCavBDXO/FYTNZ6o2Jai5edsMCBdDEz//=
==
int 1 //sometext
&& //somemoretext
+int 1
==
byte b64 //GWRM+yy3BCavBDXO/FYTNZ6o2Jai5edsMCBdDEz+8=
byte b64 avGWRM+yy3BCavBDXO/FYTNZ6o2Jai5edsMCBdDEz//=
@@ -710,10 +782,9 @@ byte b64 avGWRM+yy3BCavBDXO/FYTNZ6o2Jai5edsMCBdDEz//=
||`
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(text, v)
- require.NoError(t, err)
+ ops := testProg(t, text, v)
s := hex.EncodeToString(ops.Program)
- require.Equal(t, mutateProgVersion(v, "01200101260320fff19644cfb2cb70426af0435cefc5613359ea8d896a2e5e76c30205d0c4cfed206af19644cfb2cb70426af0435cefc5613359ea8d896a2e5e76c30205d0c4cfff20fff19644cfb2cb70426af0435cefc5613359ea8d896a2e5e76c30205d0c4cfef2829122210122a291211"), s)
+ require.Equal(t, mutateProgVersion(v, "01200101260320fff19644cfb2cb70426af0435cefc5613359ea8d896a2e5e76c30205d0c4cfed206af19644cfb2cb70426af0435cefc5613359ea8d896a2e5e76c30205d0c4cfff20fff19644cfb2cb70426af0435cefc5613359ea8d896a2e5e76c30205d0c4cfef282912221022122a291211"), s)
})
}
}
@@ -756,7 +827,7 @@ int 2`
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
testProg(t, source, v,
expect{2, "reference to undefined label nowhere"},
- expect{4, "txn unknown arg: XYZ"})
+ expect{4, "txn unknown field: XYZ"})
})
}
}
@@ -765,7 +836,7 @@ func TestAssembleDisassemble(t *testing.T) {
// Specifically constructed program text that should be recreated by Disassemble()
// TODO: disassemble to int/byte psuedo-ops instead of raw intcblock/bytecblock/intc/bytec
t.Parallel()
- text := `// version 2
+ text := fmt.Sprintf(`// version %d
intcblock 0 1 2 3 4 5
bytecblock 0xcafed00d 0x1337 0x2001 0xdeadbeef 0x70077007
intc_1
@@ -794,6 +865,7 @@ global LogicSigVersion
global Round
global LatestTimestamp
global CurrentApplicationID
+global CreatorAddress
txn Sender
txn Fee
bnz label1
@@ -844,8 +916,16 @@ txn ConfigAssetClawback
txn FreezeAsset
txn FreezeAssetAccount
txn FreezeAssetFrozen
+txna Assets 0
+txn NumAssets
+txna Applications 0
+txn NumApplications
+txn GlobalNumUint
+txn GlobalNumByteSlice
+txn LocalNumUint
+txn LocalNumByteSlice
gtxn 12 Fee
-`
+`, AssemblerMaxVersion)
for _, globalField := range GlobalFieldNames {
if !strings.Contains(text, globalField) {
t.Errorf("TestAssembleDisassemble missing field global %v", globalField)
@@ -856,8 +936,7 @@ gtxn 12 Fee
t.Errorf("TestAssembleDisassemble missing field txn %v", txnField)
}
}
- ops, err := AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
+ ops := testProg(t, text, AssemblerMaxVersion)
t2, err := Disassemble(ops.Program)
require.Equal(t, text, t2)
require.NoError(t, err)
@@ -869,21 +948,26 @@ func TestAssembleDisassembleCycle(t *testing.T) {
t.Parallel()
tests := map[uint64]string{
- 2: bigTestAssembleNonsenseProgram,
- 1: bigTestAssembleNonsenseProgram[:strings.Index(bigTestAssembleNonsenseProgram, "dup2")],
+ 1: v1Nonsense,
+ 2: v1Nonsense + v2Nonsense,
+ 3: v1Nonsense + v2Nonsense + v3Nonsense,
}
+ // This confirms that each program compiles to the same bytes
+ // (except the leading version indicator), when compiled under
+ // original and max versions. That doesn't *have* to be true,
+ // as we can introduce optimizations in later versions that
+ // change the bytecode emitted. But currently it is, so we
+ // test it for now to catch any suprises.
for v, source := range tests {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(source, v)
- require.NoError(t, err)
+ ops := testProg(t, source, v)
t2, err := Disassemble(ops.Program)
require.NoError(t, err)
- ops2, err := AssembleStringWithVersion(t2, 2)
+ ops2 := testProg(t, t2, AssemblerMaxVersion)
if err != nil {
t.Log(t2)
}
- require.NoError(t, err)
require.Equal(t, ops.Program[1:], ops2.Program[1:])
})
}
@@ -1004,7 +1088,7 @@ func TestAssembleVersions(t *testing.T) {
t.Parallel()
testLine(t, "txna Accounts 0", AssemblerMaxVersion, "")
testLine(t, "txna Accounts 0", 2, "")
- testLine(t, "txna Accounts 0", 1, "unknown opcode: txna")
+ testLine(t, "txna Accounts 0", 1, "txna opcode was introduced in TEAL v2")
}
func TestAssembleBalance(t *testing.T) {
@@ -1029,7 +1113,7 @@ func TestAssembleAsset(t *testing.T) {
func TestDisassembleSingleOp(t *testing.T) {
t.Parallel()
// test ensures no double arg_0 entries in disassembly listing
- sample := "// version 2\narg_0\n"
+ sample := fmt.Sprintf("// version %d\narg_0\n", AssemblerMaxVersion)
ops, err := AssembleStringWithVersion(sample, AssemblerMaxVersion)
require.NoError(t, err)
require.Equal(t, 2, len(ops.Program))
@@ -1041,21 +1125,21 @@ func TestDisassembleSingleOp(t *testing.T) {
func TestDisassembleTxna(t *testing.T) {
t.Parallel()
// check txn and txna are properly disassembled
- txnSample := "// version 2\ntxn Sender\n"
+ txnSample := fmt.Sprintf("// version %d\ntxn Sender\n", AssemblerMaxVersion)
ops, err := AssembleStringWithVersion(txnSample, AssemblerMaxVersion)
require.NoError(t, err)
disassembled, err := Disassemble(ops.Program)
require.NoError(t, err)
require.Equal(t, txnSample, disassembled)
- txnaSample := "// version 2\ntxna Accounts 0\n"
+ txnaSample := fmt.Sprintf("// version %d\ntxna Accounts 0\n", AssemblerMaxVersion)
ops, err = AssembleStringWithVersion(txnaSample, AssemblerMaxVersion)
require.NoError(t, err)
disassembled, err = Disassemble(ops.Program)
require.NoError(t, err)
require.Equal(t, txnaSample, disassembled)
- txnSample2 := "// version 2\ntxn Accounts 0\n"
+ txnSample2 := fmt.Sprintf("// version %d\ntxn Accounts 0\n", AssemblerMaxVersion)
ops, err = AssembleStringWithVersion(txnSample2, AssemblerMaxVersion)
require.NoError(t, err)
disassembled, err = Disassemble(ops.Program)
@@ -1067,21 +1151,21 @@ func TestDisassembleTxna(t *testing.T) {
func TestDisassembleGtxna(t *testing.T) {
t.Parallel()
// check gtxn and gtxna are properly disassembled
- gtxnSample := "// version 2\ngtxn 0 Sender\n"
+ gtxnSample := fmt.Sprintf("// version %d\ngtxn 0 Sender\n", AssemblerMaxVersion)
ops, err := AssembleStringWithVersion(gtxnSample, AssemblerMaxVersion)
require.NoError(t, err)
disassembled, err := Disassemble(ops.Program)
require.NoError(t, err)
require.Equal(t, gtxnSample, disassembled)
- gtxnaSample := "// version 2\ngtxna 0 Accounts 0\n"
+ gtxnaSample := fmt.Sprintf("// version %d\ngtxna 0 Accounts 0\n", AssemblerMaxVersion)
ops, err = AssembleStringWithVersion(gtxnaSample, AssemblerMaxVersion)
require.NoError(t, err)
disassembled, err = Disassemble(ops.Program)
require.NoError(t, err)
require.Equal(t, gtxnaSample, disassembled)
- gtxnSample2 := "// version 2\ngtxn 0 Accounts 0\n"
+ gtxnSample2 := fmt.Sprintf("// version %d\ngtxn 0 Accounts 0\n", AssemblerMaxVersion)
ops, err = AssembleStringWithVersion(gtxnSample2, AssemblerMaxVersion)
require.NoError(t, err)
disassembled, err = Disassemble(ops.Program)
@@ -1114,8 +1198,7 @@ label1:
func TestAssembleOffsets(t *testing.T) {
t.Parallel()
source := "err"
- ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
+ ops := testProg(t, source, AssemblerMaxVersion)
require.Equal(t, 2, len(ops.Program))
require.Equal(t, 1, len(ops.OffsetToLine))
// vlen
@@ -1131,8 +1214,7 @@ func TestAssembleOffsets(t *testing.T) {
// comment
err
`
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
+ ops = testProg(t, source, AssemblerMaxVersion)
require.Equal(t, 3, len(ops.Program))
require.Equal(t, 2, len(ops.OffsetToLine))
// vlen
@@ -1149,13 +1231,12 @@ err
require.Equal(t, 2, line)
source = `err
-bnz label1
+b label1
err
label1:
err
`
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
+ ops = testProg(t, source, AssemblerMaxVersion)
require.Equal(t, 7, len(ops.Program))
require.Equal(t, 4, len(ops.OffsetToLine))
// vlen
@@ -1166,15 +1247,15 @@ err
line, ok = ops.OffsetToLine[1]
require.True(t, ok)
require.Equal(t, 0, line)
- // bnz
+ // b
line, ok = ops.OffsetToLine[2]
require.True(t, ok)
require.Equal(t, 1, line)
- // bnz byte 1
+ // b byte 1
line, ok = ops.OffsetToLine[3]
require.False(t, ok)
require.Equal(t, 0, line)
- // bnz byte 2
+ // b byte 2
line, ok = ops.OffsetToLine[4]
require.False(t, ok)
require.Equal(t, 0, line)
@@ -1191,8 +1272,7 @@ err
// comment
!
`
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
+ ops = testProg(t, source, AssemblerMaxVersion)
require.Equal(t, 6, len(ops.Program))
require.Equal(t, 2, len(ops.OffsetToLine))
// vlen
@@ -1325,114 +1405,67 @@ func TestStringLiteralParsing(t *testing.T) {
require.Nil(t, result)
}
-func TestPragmaStream(t *testing.T) {
+func TestPragmas(t *testing.T) {
t.Parallel()
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
text := fmt.Sprintf("#pragma version %d", v)
- sr := strings.NewReader(text)
- ps := PragmaStream{}
- err := ps.Process(sr)
- require.NoError(t, err)
- require.Equal(t, v, ps.Version)
+ ops := testProg(t, text, v)
+ require.Equal(t, v, ops.Version)
}
- text := `#pragma version 100`
- sr := strings.NewReader(text)
- ps := PragmaStream{}
- err := ps.Process(sr)
- require.Error(t, err)
- require.Contains(t, err.Error(), "1: unsupported version: 100")
- require.Equal(t, uint64(0), ps.Version)
+ testProg(t, `#pragma version 100`, assemblerNoVersion,
+ expect{1, "unsupported version: 100"})
- text = `#pragma version 0`
- sr = strings.NewReader(text)
- ps = PragmaStream{}
- err = ps.Process(sr)
+ ops, err := AssembleStringWithVersion(`int 1`, 99)
require.Error(t, err)
- require.Contains(t, err.Error(), "1: unsupported version: 0")
- require.Equal(t, uint64(0), ps.Version)
- text = `#pragma version a`
- sr = strings.NewReader(text)
- ps = PragmaStream{}
- err = ps.Process(sr)
- require.Error(t, err)
- require.Contains(t, err.Error(), "1: strconv.ParseUint")
- require.Equal(t, uint64(0), ps.Version)
+ testProg(t, `#pragma version 0`, assemblerNoVersion,
+ expect{1, "unsupported version: 0"})
- text = `#pragmas version 1`
- sr = strings.NewReader(text)
- ps = PragmaStream{}
- err = ps.Process(sr)
- require.Error(t, err)
- require.Contains(t, err.Error(), "1: invalid syntax")
- require.Equal(t, uint64(0), ps.Version)
+ testProg(t, `#pragma version a`, assemblerNoVersion,
+ expect{1, `bad #pragma version: "a"`})
- text = `
-#pragma version a`
- sr = strings.NewReader(text)
- ps = PragmaStream{}
- err = ps.Process(sr)
- require.Error(t, err)
- require.Contains(t, err.Error(), "2: #pragma version is only allowed on 1st line")
- require.Equal(t, uint64(0), ps.Version)
+ // will default to 1
+ ops = testProg(t, "int 3", assemblerNoVersion)
+ require.Equal(t, uint64(1), ops.Version)
+ require.Equal(t, uint8(1), ops.Program[0])
- text = `#pragma version 1
-#pragma version 2`
- sr = strings.NewReader(text)
- ps = PragmaStream{}
- err = ps.Process(sr)
- require.Error(t, err)
- require.Contains(t, err.Error(), "2: #pragma version is only allowed on 1st line")
- require.Equal(t, uint64(1), ps.Version)
+ ops = testProg(t, "\n#pragma version 2", assemblerNoVersion)
+ require.Equal(t, uint64(2), ops.Version)
- text = `#pragma version 1
-#pragma run-mode 2`
- sr = strings.NewReader(text)
- ps = PragmaStream{}
- err = ps.Process(sr)
- require.Error(t, err)
- require.Contains(t, err.Error(), "2: unsupported pragma directive: run-mode")
- require.Equal(t, uint64(1), ps.Version)
+ ops = testProg(t, "\n//comment\n#pragma version 2", assemblerNoVersion)
+ require.Equal(t, uint64(2), ops.Version)
- text = `#pragma versions`
- sr = strings.NewReader(text)
- ps = PragmaStream{}
- err = ps.Process(sr)
- require.Error(t, err)
- require.Contains(t, err.Error(), "1: unsupported pragma directive: versions")
- require.Equal(t, uint64(0), ps.Version)
+ // changing version is not allowed
+ testProg(t, "#pragma version 1", 2, expect{1, "version mismatch..."})
+ testProg(t, "#pragma version 2", 1, expect{1, "version mismatch..."})
- text = `# pragmas version 1`
- sr = strings.NewReader(text)
- ps = PragmaStream{}
- err = ps.Process(sr)
- require.NoError(t, err)
- require.Equal(t, uint64(0), ps.Version)
+ ops = testProg(t, "#pragma version 2\n#pragma version 1", assemblerNoVersion,
+ expect{2, "version mismatch..."})
- text = `
-# pragmas version 1`
- sr = strings.NewReader(text)
- ps = PragmaStream{}
- err = ps.Process(sr)
- require.NoError(t, err)
- require.Equal(t, uint64(0), ps.Version)
+ // repetitive, but fine
+ ops = testProg(t, "#pragma version 2\n#pragma version 2", assemblerNoVersion)
+ require.Equal(t, uint64(2), ops.Version)
- text = `#pragma`
- sr = strings.NewReader(text)
- ps = PragmaStream{}
- err = ps.Process(sr)
- require.Error(t, err)
- require.Contains(t, err.Error(), "1: empty pragma")
- require.Equal(t, uint64(0), ps.Version)
+ testProg(t, "\nint 1\n#pragma version 2", assemblerNoVersion,
+ expect{3, "#pragma version is only allowed before instructions"})
- text = `#pragma version`
- sr = strings.NewReader(text)
- ps = PragmaStream{}
- err = ps.Process(sr)
- require.Error(t, err)
- require.Contains(t, err.Error(), "1: no version")
- require.Equal(t, uint64(0), ps.Version)
+ testProg(t, "#pragma run-mode 2", assemblerNoVersion,
+ expect{1, `unsupported pragma directive: "run-mode"`})
+
+ testProg(t, "#pragma versions", assemblerNoVersion,
+ expect{1, `unsupported pragma directive: "versions"`})
+
+ ops = testProg(t, "#pragma version 1", assemblerNoVersion)
+ require.Equal(t, uint64(1), ops.Version)
+
+ ops = testProg(t, "\n#pragma version 1", assemblerNoVersion)
+ require.Equal(t, uint64(1), ops.Version)
+
+ testProg(t, "#pragma", assemblerNoVersion, expect{1, "empty pragma"})
+
+ testProg(t, "#pragma version", assemblerNoVersion,
+ expect{1, "no version value"})
}
func TestAssemblePragmaVersion(t *testing.T) {
@@ -1446,13 +1479,9 @@ int 1
require.NoError(t, err)
require.Equal(t, ops1.Program, ops.Program)
- _, err = AssembleStringWithVersion(text, 0)
- require.Error(t, err)
- require.Contains(t, err.Error(), "version mismatch")
-
- _, err = AssembleStringWithVersion(text, 2)
- require.Error(t, err)
- require.Contains(t, err.Error(), "version mismatch")
+ testProg(t, text, 0, expect{1, "version mismatch..."})
+ testProg(t, text, 2, expect{1, "version mismatch..."})
+ testProg(t, text, assemblerNoVersion)
ops, err = AssembleStringWithVersion(text, assemblerNoVersion)
require.NoError(t, err)
@@ -1467,13 +1496,8 @@ int 1
require.NoError(t, err)
require.Equal(t, ops2.Program, ops.Program)
- _, err = AssembleStringWithVersion(text, 0)
- require.Error(t, err)
- require.Contains(t, err.Error(), "version mismatch")
-
- _, err = AssembleStringWithVersion(text, 1)
- require.Error(t, err)
- require.Contains(t, err.Error(), "version mismatch")
+ testProg(t, text, 0, expect{1, "version mismatch..."})
+ testProg(t, text, 1, expect{1, "version mismatch..."})
ops, err = AssembleStringWithVersion(text, assemblerNoVersion)
require.NoError(t, err)
@@ -1492,10 +1516,8 @@ len
require.NoError(t, err)
require.Equal(t, ops2.Program, ops.Program)
- _, err = AssembleString("#pragma unk")
- require.Error(t, err)
- require.Contains(t, err.Error(), "1: unsupported pragma directive: unk")
-
+ testProg(t, "#pragma unk", assemblerNoVersion,
+ expect{1, `unsupported pragma directive: "unk"`})
}
func TestAssembleConstants(t *testing.T) {
diff --git a/data/transactions/logic/backwardCompat_test.go b/data/transactions/logic/backwardCompat_test.go
index a8cc7147f..2b3c1cb56 100644
--- a/data/transactions/logic/backwardCompat_test.go
+++ b/data/transactions/logic/backwardCompat_test.go
@@ -222,7 +222,7 @@ gtxn 0 TxID
==
&&
pop
-// check global
+// check global (these are set equal in defaultEvalProto())
global MinTxnFee
global MinBalance
==
@@ -334,24 +334,24 @@ func TestBackwardCompatTEALv1(t *testing.T) {
// ensure v2 fields error in v1 program
func TestBackwardCompatGlobalFields(t *testing.T) {
t.Parallel()
- var fields []string
+ var fields []globalFieldSpec
for _, fs := range globalFieldSpecs {
if fs.version > 1 {
- fields = append(fields, fs.gfield.String())
+ fields = append(fields, fs)
}
}
require.Greater(t, len(fields), 1)
ledger := makeTestLedger(nil)
for _, field := range fields {
- text := fmt.Sprintf("global %s", field)
- // check V1 assembler fails
- testLine(t, text, assemblerNoVersion, "...available in version 2. Missed #pragma version?")
- testLine(t, text, 0, "...available in version 2. Missed #pragma version?")
- testLine(t, text, 1, "...available in version 2. Missed #pragma version?")
+ text := fmt.Sprintf("global %s", field.gfield.String())
+ // check assembler fails if version before introduction
+ testLine(t, text, assemblerNoVersion, "...available in version...")
+ for v := uint64(0); v < field.version; v++ {
+ testLine(t, text, v, "...available in version...")
+ }
- ops, err := AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
+ ops := testProg(t, text, AssemblerMaxVersion)
proto := config.Consensus[protocol.ConsensusV23]
require.False(t, proto.Application)
@@ -360,7 +360,7 @@ func TestBackwardCompatGlobalFields(t *testing.T) {
ep.Ledger = ledger
// check failure with version check
- _, err = Eval(ops.Program, ep)
+ _, err := Eval(ops.Program, ep)
require.Error(t, err)
require.Contains(t, err.Error(), "greater than protocol supported version")
_, err = Eval(ops.Program, ep)
@@ -414,16 +414,17 @@ func TestBackwardCompatTxnFields(t *testing.T) {
field := fs.field.String()
for _, command := range tests {
text := fmt.Sprintf(command, field)
- asmError := "...available in version 2..."
+ asmError := "...available in version ..."
if _, ok := txnaFieldSpecByField[fs.field]; ok {
parts := strings.Split(text, " ")
op := parts[0]
- asmError = fmt.Sprintf("found %sa field %s in %s op", op, field, op)
+ asmError = fmt.Sprintf("found array field %s in %s op", field, op)
}
- // check V1 assembler fails
+ // check assembler fails if version before introduction
testLine(t, text, assemblerNoVersion, asmError)
- testLine(t, text, 0, asmError)
- testLine(t, text, 1, asmError)
+ for v := uint64(0); v < fs.version; v++ {
+ testLine(t, text, v, asmError)
+ }
ops, err := AssembleStringWithVersion(text, AssemblerMaxVersion)
if _, ok := txnaFieldSpecByField[fs.field]; ok {
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index 5f6dfbed2..4da698f34 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -65,52 +65,65 @@ var opDocList = []stringString{
{"~", "bitwise invert value X"},
{"mulw", "A times B out to 128-bit long result as low (top) and high uint64 values on the stack"},
{"addw", "A plus B out to 128-bit long result as sum (top) and carry-bit uint64 values on the stack"},
- {"intcblock", "load block of uint64 constants"},
- {"intc", "push value from uint64 constants to stack by index into constants"},
+ {"intcblock", "prepare block of uint64 constants for use by intc"},
+ {"intc", "push Ith constant from intcblock to stack"},
{"intc_0", "push constant 0 from intcblock to stack"},
{"intc_1", "push constant 1 from intcblock to stack"},
{"intc_2", "push constant 2 from intcblock to stack"},
{"intc_3", "push constant 3 from intcblock to stack"},
- {"bytecblock", "load block of byte-array constants"},
- {"bytec", "push bytes constant to stack by index into constants"},
+ {"pushint", "push immediate UINT to the stack as an integer"},
+ {"bytecblock", "prepare block of byte-array constants for use by bytec"},
+ {"bytec", "push Ith constant from bytecblock to stack"},
{"bytec_0", "push constant 0 from bytecblock to stack"},
{"bytec_1", "push constant 1 from bytecblock to stack"},
{"bytec_2", "push constant 2 from bytecblock to stack"},
{"bytec_3", "push constant 3 from bytecblock to stack"},
- {"arg", "push Args[N] value to stack by index"},
- {"arg_0", "push Args[0] to stack"},
- {"arg_1", "push Args[1] to stack"},
- {"arg_2", "push Args[2] to stack"},
- {"arg_3", "push Args[3] to stack"},
- {"txn", "push field from current transaction to stack"},
- {"gtxn", "push field to the stack from a transaction in the current transaction group"},
- {"txna", "push value of an array field from current transaction to stack"},
- {"gtxna", "push value of a field to the stack from a transaction in the current transaction group"},
+ {"pushbytes", "push the following program bytes to the stack"},
+ {"arg", "push Nth LogicSig argument to stack"},
+ {"arg_0", "push LogicSig argument 0 to stack"},
+ {"arg_1", "push LogicSig argument 1 to stack"},
+ {"arg_2", "push LogicSig argument 2 to stack"},
+ {"arg_3", "push LogicSig argument 3 to stack"},
+ {"txn", "push field F of current transaction to stack"},
+ {"gtxn", "push field F of the Tth transaction in the current group"},
+ {"gtxns", "push field F of the Ath transaction in the current group"},
+ {"txna", "push Ith value of the array field F of the current transaction"},
+ {"gtxna", "push Ith value of the array field F from the Tth transaction in the current group"},
+ {"gtxnsa", "push Ith value of the array field F from the Ath transaction in the current group"},
{"global", "push value from globals to stack"},
{"load", "copy a value from scratch space to the stack"},
{"store", "pop a value from the stack and store to scratch space"},
- {"bnz", "branch if value X is not zero"},
- {"bz", "branch if value X is zero"},
- {"b", "branch unconditionally to offset"},
+ {"bnz", "branch to TARGET if value X is not zero"},
+ {"bz", "branch to TARGET if value X is zero"},
+ {"b", "branch unconditionally to TARGET"},
{"return", "use last value on stack as success value; end"},
{"pop", "discard value X from stack"},
{"dup", "duplicate last value on stack"},
{"dup2", "duplicate two last values on stack: A, B -> A, B, A, B"},
- {"concat", "pop two byte strings A and B and join them, push the result"},
- {"substring", "pop a byte string X. For immediate values in 0..255 M and N: extract a range of bytes from it starting at M up to but not including N, push the substring result. If N < M, or either is larger than the string length, the program fails"},
- {"substring3", "pop a byte string A and two integers B and C. Extract a range of bytes from A starting at B up to but not including C, push the substring result. If C < B, or either is larger than the string length, the program fails"},
- {"balance", "get balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender"},
+ {"dig", "push the Nth value from the top of the stack. dig 0 is equivalent to dup"},
+ {"swap", "swaps two last values on stack: A, B -> B, A"},
+ {"select", "selects one of two values based on top-of-stack: A, B, C -> (if C != 0 then B else A)"},
+ {"concat", "pop two byte-arrays A and B and join them, push the result"},
+ {"substring", "pop a byte-array A. For immediate values in 0..255 S and E: extract a range of bytes from A starting at S up to but not including E, push the substring result. If E < S, or either is larger than the array length, the program fails"},
+ {"substring3", "pop a byte-array A and two integers B and C. Extract a range of bytes from A starting at B up to but not including C, push the substring result. If C < B, or either is larger than the array length, the program fails"},
+ {"getbit", "pop a target A (integer or byte-array), and index B. Push the Bth bit of A."},
+ {"setbit", "pop a target A, index B, and bit C. Set the Bth bit of A to C, and push the result"},
+ {"getbyte", "pop a byte-array A and integer B. Extract the Bth byte of A and push it as an integer"},
+ {"setbyte", "pop a byte-array A, integer B, and small integer C (between 0..255). Set the Bth byte of A to C, and push the result"},
+ {"balance", "get balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted."},
+ {"min_balance", "get minimum required balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes."},
{"app_opted_in", "check if account specified by Txn.Accounts[A] opted in for the application B => {0 or 1}"},
{"app_local_get", "read from account specified by Txn.Accounts[A] from local state of the current application key B => value"},
- {"app_local_get_ex", "read from account specified by Txn.Accounts[A] from local state of the application B key C => {0 or 1 (top), value}"},
+ {"app_local_get_ex", "read from account specified by Txn.Accounts[A] from local state of the application B key C => [*... stack*, value, 0 or 1]"},
{"app_global_get", "read key A from global state of a current application => value"},
- {"app_global_get_ex", "read from application Txn.ForeignApps[A] global state key B => {0 or 1 (top), value}. A is specified as an account index in the ForeignApps field of the ApplicationCall transaction, zero index means this app"},
+ {"app_global_get_ex", "read from application Txn.ForeignApps[A] global state key B => [*... stack*, value, 0 or 1]. A is specified as an account index in the ForeignApps field of the ApplicationCall transaction, zero index means this app"},
{"app_local_put", "write to account specified by Txn.Accounts[A] to local state of a current application key B with value C"},
{"app_global_put", "write key A and value B to global state of the current application"},
{"app_local_del", "delete from account specified by Txn.Accounts[A] local state key B of the current application"},
{"app_global_del", "delete key A from a global state of the current application"},
{"asset_holding_get", "read from account specified by Txn.Accounts[A] and asset B holding field X (imm arg) => {0 or 1 (top), value}"},
{"asset_params_get", "read from asset Txn.ForeignAssets[A] params field X (imm arg) => {0 or 1 (top), value}"},
+ {"assert", "immediately fail unless value X is a non-zero number"},
}
var opDocByName map[string]string
@@ -127,20 +140,25 @@ func OpDoc(opName string) string {
var opcodeImmediateNoteList = []stringString{
{"intcblock", "{varuint length} [{varuint value}, ...]"},
{"intc", "{uint8 int constant index}"},
+ {"pushint", "{varuint int}"},
{"bytecblock", "{varuint length} [({varuint value length} bytes), ...]"},
{"bytec", "{uint8 byte constant index}"},
+ {"pushbytes", "{varuint length} {bytes}"},
{"arg", "{uint8 arg index N}"},
{"txn", "{uint8 transaction field index}"},
- {"gtxn", "{uint8 transaction group index}{uint8 transaction field index}"},
- {"txna", "{uint8 transaction field index}{uint8 transaction field array index}"},
- {"gtxna", "{uint8 transaction group index}{uint8 transaction field index}{uint8 transaction field array index}"},
+ {"gtxn", "{uint8 transaction group index} {uint8 transaction field index}"},
+ {"gtxns", "{uint8 transaction field index}"},
+ {"txna", "{uint8 transaction field index} {uint8 transaction field array index}"},
+ {"gtxna", "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}"},
+ {"gtxnsa", "{uint8 transaction field index} {uint8 transaction field array index}"},
{"global", "{uint8 global field index}"},
{"bnz", "{0..0x7fff forward branch offset, big endian}"},
{"bz", "{0..0x7fff forward branch offset, big endian}"},
{"b", "{0..0x7fff forward branch offset, big endian}"},
{"load", "{uint8 position in scratch space to load from}"},
{"store", "{uint8 position in scratch space to store to}"},
- {"substring", "{uint8 start position}{uint8 end position}"},
+ {"substring", "{uint8 start position} {uint8 end position}"},
+ {"dig", "{uint8 depth}"},
{"asset_holding_get", "{uint8 asset holding field index}"},
{"asset_params_get", "{uint8 asset params field index}"},
}
@@ -161,18 +179,21 @@ var opDocExtraList = []stringString{
{"bz", "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`."},
{"b", "See `bnz` for details on how branches work. `b` always jumps to the offset."},
{"intcblock", "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script."},
- {"bytecblock", "`bytecblock` loads the following program bytes into an array of byte string constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script."},
+ {"bytecblock", "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script."},
{"*", "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`."},
{"+", "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `addw`."},
{"txn", "FirstValidTime causes the program to fail. The field is reserved for future use."},
{"gtxn", "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`."},
+ {"gtxns", "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction."},
{"btoi", "`btoi` panics if the input is longer than 8 bytes."},
{"concat", "`concat` panics if the result would be greater than 4096 bytes."},
+ {"getbit", "see explanation of bit ordering in setbit"},
+ {"setbit", "bit indexing begins with low-order bits in integers. Setting bit 4 to 1 on the integer 0 yields 16 (`int 0x0010`, or 2^4). Indexing begins in the first bytes of a byte-string (as seen in getbyte and substring). Setting bits 0 through 11 to 1 in a 4 byte-array of 0s yields `byte 0xfff00000`"},
{"app_opted_in", "params: account index, application id (top of the stack on opcode entry). Return: 1 if opted in and 0 otherwise."},
- {"app_local_get", "params: account index, state key. Return: value. The value is zero if the key does not exist."},
- {"app_local_get_ex", "params: account index, application id, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value."},
- {"app_global_get_ex", "params: application index, state key. Return: value. Application index is"},
- {"app_global_get", "params: state key. Return: value. The value is zero if the key does not exist."},
+ {"app_local_get", "params: account index, state key. Return: value. The value is zero (of type uint64) if the key does not exist."},
+ {"app_local_get_ex", "params: account index, application id, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist."},
+ {"app_global_get_ex", "params: application index, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist."},
+ {"app_global_get", "params: state key. Return: value. The value is zero (of type uint64) if the key does not exist."},
{"app_local_put", "params: account index, state key, value."},
{"app_local_del", "params: account index, state key.\n\nDeleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.)"},
{"app_global_del", "params: state key.\n\nDeleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.)"},
@@ -199,26 +220,21 @@ type OpGroup struct {
// OpGroupList is groupings of ops for documentation purposes.
var OpGroupList = []OpGroup{
- {"Arithmetic", []string{"sha256", "keccak256", "sha512_256", "ed25519verify", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "concat", "substring", "substring3"}},
- {"Loading Values", []string{"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "txn", "gtxn", "txna", "gtxna", "global", "load", "store"}},
- {"Flow Control", []string{"err", "bnz", "bz", "b", "return", "pop", "dup", "dup2"}},
- {"State Access", []string{"balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get"}},
-}
-
-// OpCost returns the relative cost score for an op
-func OpCost(opName string) int {
- return opsByName[LogicVersion][opName].opSize.cost
+ {"Arithmetic", []string{"sha256", "keccak256", "sha512_256", "ed25519verify", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "getbit", "setbit", "getbyte", "setbyte", "concat", "substring", "substring3"}},
+ {"Loading Values", []string{"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "txn", "gtxn", "txna", "gtxna", "gtxns", "gtxnsa", "global", "load", "store"}},
+ {"Flow Control", []string{"err", "bnz", "bz", "b", "return", "pop", "dup", "dup2", "dig", "swap", "select", "assert"}},
+ {"State Access", []string{"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get"}},
}
// OpAllCosts returns an array of the relative cost score for an op by version.
// If all the costs are the same the array is single entry
// otherwise it has costs by op version
func OpAllCosts(opName string) []int {
- cost := opsByName[LogicVersion][opName].opSize.cost
+ cost := OpsByName[LogicVersion][opName].Details.Cost
costs := make([]int, LogicVersion+1)
isDifferent := false
for v := 1; v <= LogicVersion; v++ {
- costs[v] = opsByName[v][opName].opSize.cost
+ costs[v] = OpsByName[v][opName].Details.Cost
if costs[v] > 0 && costs[v] != cost {
isDifferent = true
}
@@ -230,11 +246,6 @@ func OpAllCosts(opName string) []int {
return costs
}
-// OpSize returns the number of bytes for an op. 0 for variable.
-func OpSize(opName string) int {
- return opsByName[LogicVersion][opName].opSize.size
-}
-
// see assembler.go TxnTypeNames
// also used to parse symbolic constants for `int`
var typeEnumDescriptions = []stringString{
@@ -307,6 +318,14 @@ var txnFieldDocList = []stringString{
{"NumAppArgs", "Number of ApplicationArgs"},
{"Accounts", "Accounts listed in the ApplicationCall transaction"},
{"NumAccounts", "Number of Accounts"},
+ {"Assets", "Foreign Assets listed in the ApplicationCall transaction"},
+ {"NumAssets", "Number of Assets"},
+ {"Applications", "Foreign Apps listed in the ApplicationCall transaction"},
+ {"NumApplications", "Number of Applications"},
+ {"GlobalNumUint", "Number of global state integers in ApplicationCall"},
+ {"GlobalNumByteSlice", "Number of global state byteslices in ApplicationCall"},
+ {"LocalNumUint", "Number of local state integers in ApplicationCall"},
+ {"LocalNumByteSlice", "Number of local state byteslices in ApplicationCall"},
{"ApprovalProgram", "Approval program"},
{"ClearStateProgram", "Clear state program"},
{"RekeyTo", "32 byte Sender's new AuthAddr"},
@@ -345,6 +364,7 @@ var globalFieldDocList = []stringString{
{"Round", "Current round number"},
{"LatestTimestamp", "Last confirmed block UNIX timestamp. Fails if negative"},
{"CurrentApplicationID", "ID of current application executing. Fails if no such application is executing"},
+ {"CreatorAddress", "Address of the creator of the current application. Fails if no such application is executing"},
}
// globalFieldDocs are notes on fields available in `global`
diff --git a/data/transactions/logic/doc_test.go b/data/transactions/logic/doc_test.go
index 70ed12966..75568d2e0 100644
--- a/data/transactions/logic/doc_test.go
+++ b/data/transactions/logic/doc_test.go
@@ -84,12 +84,7 @@ func TestOpDocExtra(t *testing.T) {
require.Empty(t, xd)
}
-func TestOpCost(t *testing.T) {
- c := OpCost("+")
- require.Equal(t, 1, c)
- c = OpCost("sha256")
- require.True(t, c > 1)
-
+func TestOpAllCosts(t *testing.T) {
a := OpAllCosts("+")
require.Equal(t, 1, len(a))
require.Equal(t, 1, a[0])
@@ -101,13 +96,6 @@ func TestOpCost(t *testing.T) {
}
}
-func TestOpSize(t *testing.T) {
- c := OpSize("+")
- require.Equal(t, 1, c)
- c = OpSize("intc")
- require.Equal(t, 2, c)
-}
-
func TestTypeNameDescription(t *testing.T) {
require.Equal(t, len(TxnTypeNames), len(typeEnumDescriptions))
for i, a := range TxnTypeNames {
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index 9dddc585e..7809dd4ce 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -124,12 +124,14 @@ func (sv *stackValue) toTealValue() (tv basics.TealValue) {
// LedgerForLogic represents ledger API for Stateful TEAL program
type LedgerForLogic interface {
Balance(addr basics.Address) (basics.MicroAlgos, error)
+ MinBalance(addr basics.Address, proto *config.ConsensusParams) (basics.MicroAlgos, error)
Round() basics.Round
LatestTimestamp() int64
AssetHolding(addr basics.Address, assetIdx basics.AssetIndex) (basics.AssetHolding, error)
AssetParams(aidx basics.AssetIndex) (basics.AssetParams, error)
ApplicationID() basics.AppIndex
+ CreatorAddress() basics.Address
OptedIn(addr basics.Address, appIdx basics.AppIndex) (bool, error)
GetLocal(addr basics.Address, appIdx basics.AppIndex, key string) (value basics.TealValue, exists bool, err error)
@@ -287,7 +289,7 @@ func (pe PanicError) Error() string {
}
var errLoopDetected = errors.New("loop detected")
-var errLogicSignNotSupported = errors.New("LogicSig not supported")
+var errLogicSigNotSupported = errors.New("LogicSig not supported")
var errTooManyArgs = errors.New("LogicSig has too many arguments")
// EvalStateful executes stateful TEAL program
@@ -326,7 +328,7 @@ func eval(program []byte, cx *evalContext) (pass bool, err error) {
}
}
err = PanicError{x, errstr}
- cx.EvalParams.log().Errorf("recovered panic in Eval: %s", err)
+ cx.EvalParams.log().Errorf("recovered panic in Eval: %w", err)
}
}()
@@ -341,7 +343,7 @@ func eval(program []byte, cx *evalContext) (pass bool, err error) {
}()
if (cx.EvalParams.Proto == nil) || (cx.EvalParams.Proto.LogicSigVersion == 0) {
- err = errLogicSignNotSupported
+ err = errLogicSigNotSupported
return
}
if cx.EvalParams.Txn.Lsig.Args != nil && len(cx.EvalParams.Txn.Lsig.Args) > transactions.EvalMaxArgs {
@@ -456,7 +458,7 @@ func check(program []byte, params EvalParams) (cost int, err error) {
}
}()
if (params.Proto == nil) || (params.Proto.LogicSigVersion == 0) {
- err = errLogicSignNotSupported
+ err = errLogicSigNotSupported
return
}
var cx evalContext
@@ -560,17 +562,17 @@ func (cx *evalContext) step() {
}
}
- oz := spec.opSize
- if oz.size != 0 && (cx.pc+oz.size > len(cx.program)) {
+ deets := spec.Details
+ if deets.Size != 0 && (cx.pc+deets.Size > len(cx.program)) {
cx.err = fmt.Errorf("%3d %s program ends short of immediate values", cx.pc, spec.Name)
return
}
- cx.cost += oz.cost
+ cx.cost += deets.Cost
spec.op(cx)
if cx.Trace != nil {
immArgsString := " "
if spec.Name != "bnz" {
- for i := 1; i < spec.opSize.size; i++ {
+ for i := 1; i < spec.Details.Size; i++ {
immArgsString += fmt.Sprintf("0x%02x ", cx.program[cx.pc+i])
}
}
@@ -608,7 +610,7 @@ func (cx *evalContext) step() {
cx.pc = cx.nextpc
cx.nextpc = 0
} else {
- cx.pc++
+ cx.pc += deets.Size
}
}
@@ -623,23 +625,23 @@ func (cx *evalContext) checkStep() (cost int) {
cx.err = fmt.Errorf("%s not allowed in current mode", spec.Name)
return
}
- oz := spec.opSize
- if oz.size != 0 && (cx.pc+oz.size > len(cx.program)) {
+ deets := spec.Details
+ if deets.Size != 0 && (cx.pc+deets.Size > len(cx.program)) {
cx.err = fmt.Errorf("%3d %s program ends short of immediate values", cx.pc, spec.Name)
return 1
}
prevpc := cx.pc
- if oz.checkFunc != nil {
- cost = oz.checkFunc(cx)
+ if deets.checkFunc != nil {
+ cost = deets.checkFunc(cx)
if cx.nextpc != 0 {
cx.pc = cx.nextpc
cx.nextpc = 0
} else {
- cx.pc += oz.size
+ cx.pc += deets.Size
}
} else {
- cost = oz.cost
- cx.pc += oz.size
+ cost = deets.Cost
+ cx.pc += deets.Size
}
if cx.Trace != nil {
fmt.Fprintf(cx.Trace, "%3d %s\n", prevpc, spec.Name)
@@ -674,6 +676,32 @@ func opReturn(cx *evalContext) {
cx.nextpc = len(cx.program)
}
+func opAssert(cx *evalContext) {
+ last := len(cx.stack) - 1
+ if cx.stack[last].Uint != 0 {
+ cx.stack = cx.stack[:last]
+ return
+ }
+ cx.err = errors.New("assert failed")
+}
+
+func opSwap(cx *evalContext) {
+ last := len(cx.stack) - 1
+ prev := last - 1
+ cx.stack[last], cx.stack[prev] = cx.stack[prev], cx.stack[last]
+}
+
+func opSelect(cx *evalContext) {
+ last := len(cx.stack) - 1 // condition on top
+ prev := last - 1 // true is one down
+ pprev := prev - 1 // false below that
+
+ if cx.stack[last].Uint != 0 {
+ cx.stack[pprev] = cx.stack[prev]
+ }
+ cx.stack = cx.stack[:prev]
+}
+
func opSHA256(cx *evalContext) {
last := len(cx.stack) - 1
hash := sha256.Sum256(cx.stack[last].Bytes)
@@ -1009,7 +1037,6 @@ func opIntConstN(cx *evalContext, n uint) {
func opIntConstLoad(cx *evalContext) {
n := uint(cx.program[cx.pc+1])
opIntConstN(cx, n)
- cx.nextpc = cx.pc + 2
}
func opIntConst0(cx *evalContext) {
opIntConstN(cx, 0)
@@ -1024,6 +1051,17 @@ func opIntConst3(cx *evalContext) {
opIntConstN(cx, 3)
}
+func opPushInt(cx *evalContext) {
+ val, bytesUsed := binary.Uvarint(cx.program[cx.pc+1:])
+ if bytesUsed <= 0 {
+ cx.err = fmt.Errorf("could not decode int at pc=%d", cx.pc+1)
+ return
+ }
+ sv := stackValue{Uint: val}
+ cx.stack = append(cx.stack, sv)
+ cx.nextpc = cx.pc + 1 + bytesUsed
+}
+
func opByteConstBlock(cx *evalContext) {
cx.bytec, cx.nextpc, cx.err = parseBytecBlock(cx.program, cx.pc)
}
@@ -1038,7 +1076,6 @@ func opByteConstN(cx *evalContext, n uint) {
func opByteConstLoad(cx *evalContext) {
n := uint(cx.program[cx.pc+1])
opByteConstN(cx, n)
- cx.nextpc = cx.pc + 2
}
func opByteConst0(cx *evalContext) {
opByteConstN(cx, 0)
@@ -1053,6 +1090,24 @@ func opByteConst3(cx *evalContext) {
opByteConstN(cx, 3)
}
+func opPushBytes(cx *evalContext) {
+ pos := cx.pc + 1
+ length, bytesUsed := binary.Uvarint(cx.program[pos:])
+ if bytesUsed <= 0 {
+ cx.err = fmt.Errorf("could not decode length at pc=%d", pos)
+ return
+ }
+ pos += bytesUsed
+ end := uint64(pos) + length
+ if end > uint64(len(cx.program)) || end < uint64(pos) {
+ cx.err = fmt.Errorf("pushbytes too long at pc=%d", pos)
+ return
+ }
+ sv := stackValue{Bytes: cx.program[pos:end]}
+ cx.stack = append(cx.stack, sv)
+ cx.nextpc = int(end)
+}
+
func opArgN(cx *evalContext, n uint64) {
if n >= uint64(len(cx.Txn.Lsig.Args)) {
cx.err = fmt.Errorf("cannot load arg[%d] of %d", n, len(cx.Txn.Lsig.Args))
@@ -1065,7 +1120,6 @@ func opArgN(cx *evalContext, n uint64) {
func opArg(cx *evalContext) {
n := uint64(cx.program[cx.pc+1])
opArgN(cx, n)
- cx.nextpc = cx.pc + 2
}
func opArg0(cx *evalContext) {
opArgN(cx, 0)
@@ -1160,6 +1214,19 @@ func opDup2(cx *evalContext) {
cx.stack = append(cx.stack, cx.stack[prev:]...)
}
+func opDig(cx *evalContext) {
+ depth := int(uint(cx.program[cx.pc+1]))
+ idx := len(cx.stack) - 1 - depth
+ // Need to check stack size explicitly here because checkArgs() doesn't understand dig
+ // so we can't expect out stack to be prechecked.
+ if idx < 0 {
+ cx.err = fmt.Errorf("dig %d with stack size = %d", depth, len(cx.stack))
+ return
+ }
+ sv := cx.stack[idx]
+ cx.stack = append(cx.stack, sv)
+}
+
func (cx *evalContext) assetHoldingEnumToValue(holding *basics.AssetHolding, field uint64) (sv stackValue, err error) {
switch AssetHoldingField(field) {
case AssetBalance:
@@ -1293,6 +1360,7 @@ func (cx *evalContext) txnFieldToStack(txn *transactions.Transaction, field TxnF
sv.Uint = uint64(txn.ApplicationID)
case OnCompletion:
sv.Uint = uint64(txn.OnCompletion)
+
case ApplicationArgs:
if arrayFieldIdx >= uint64(len(txn.ApplicationArgs)) {
err = fmt.Errorf("invalid ApplicationArgs index %d", arrayFieldIdx)
@@ -1301,6 +1369,7 @@ func (cx *evalContext) txnFieldToStack(txn *transactions.Transaction, field TxnF
sv.Bytes = nilToEmpty(txn.ApplicationArgs[arrayFieldIdx])
case NumAppArgs:
sv.Uint = uint64(len(txn.ApplicationArgs))
+
case Accounts:
if arrayFieldIdx == 0 {
// special case: sender
@@ -1314,6 +1383,40 @@ func (cx *evalContext) txnFieldToStack(txn *transactions.Transaction, field TxnF
}
case NumAccounts:
sv.Uint = uint64(len(txn.Accounts))
+
+ case Assets:
+ if arrayFieldIdx >= uint64(len(txn.ForeignAssets)) {
+ err = fmt.Errorf("invalid Assets index %d", arrayFieldIdx)
+ return
+ }
+ sv.Uint = uint64(txn.ForeignAssets[arrayFieldIdx])
+ case NumAssets:
+ sv.Uint = uint64(len(txn.ForeignAssets))
+
+ case Applications:
+ if arrayFieldIdx == 0 {
+ // special case: current app id
+ sv.Uint = uint64(txn.ApplicationID)
+ } else {
+ if arrayFieldIdx > uint64(len(txn.ForeignApps)) {
+ err = fmt.Errorf("invalid Applications index %d", arrayFieldIdx)
+ return
+ }
+ sv.Uint = uint64(txn.ForeignApps[arrayFieldIdx-1])
+ }
+ case NumApplications:
+ sv.Uint = uint64(len(txn.ForeignApps))
+
+ case GlobalNumUint:
+ sv.Uint = uint64(txn.GlobalStateSchema.NumUint)
+ case GlobalNumByteSlice:
+ sv.Uint = uint64(txn.GlobalStateSchema.NumByteSlice)
+
+ case LocalNumUint:
+ sv.Uint = uint64(txn.LocalStateSchema.NumUint)
+ case LocalNumByteSlice:
+ sv.Uint = uint64(txn.LocalStateSchema.NumByteSlice)
+
case ApprovalProgram:
sv.Bytes = nilToEmpty(txn.ApprovalProgram)
case ClearStateProgram:
@@ -1383,7 +1486,6 @@ func opTxn(cx *evalContext) {
return
}
cx.stack = append(cx.stack, sv)
- cx.nextpc = cx.pc + 2
}
func opTxna(cx *evalContext) {
@@ -1407,7 +1509,6 @@ func opTxna(cx *evalContext) {
return
}
cx.stack = append(cx.stack, sv)
- cx.nextpc = cx.pc + 3
}
func opGtxn(cx *evalContext) {
@@ -1441,7 +1542,6 @@ func opGtxn(cx *evalContext) {
}
}
cx.stack = append(cx.stack, sv)
- cx.nextpc = cx.pc + 3
}
func opGtxna(cx *evalContext) {
@@ -1471,7 +1571,70 @@ func opGtxna(cx *evalContext) {
return
}
cx.stack = append(cx.stack, sv)
- cx.nextpc = cx.pc + 4
+}
+
+func opGtxns(cx *evalContext) {
+ last := len(cx.stack) - 1
+ gtxid := int(cx.stack[last].Uint)
+ if gtxid >= len(cx.TxnGroup) {
+ cx.err = fmt.Errorf("gtxns lookup TxnGroup[%d] but it only has %d", gtxid, len(cx.TxnGroup))
+ return
+ }
+ tx := &cx.TxnGroup[gtxid].Txn
+ field := TxnField(uint64(cx.program[cx.pc+1]))
+ fs, ok := txnFieldSpecByField[field]
+ if !ok || fs.version > cx.version {
+ cx.err = fmt.Errorf("invalid txn field %d", field)
+ return
+ }
+ _, ok = txnaFieldSpecByField[field]
+ if ok {
+ cx.err = fmt.Errorf("invalid txn field %d", field)
+ return
+ }
+ var sv stackValue
+ var err error
+ if TxnField(field) == GroupIndex {
+ // GroupIndex; asking this when we just specified it is _dumb_, but oh well
+ sv.Uint = uint64(gtxid)
+ } else {
+ sv, err = cx.txnFieldToStack(tx, field, 0, gtxid)
+ if err != nil {
+ cx.err = err
+ return
+ }
+ }
+ cx.stack[last] = sv
+}
+
+func opGtxnsa(cx *evalContext) {
+ last := len(cx.stack) - 1
+ gtxid := int(cx.stack[last].Uint)
+ if gtxid >= len(cx.TxnGroup) {
+ cx.err = fmt.Errorf("gtxnsa lookup TxnGroup[%d] but it only has %d", gtxid, len(cx.TxnGroup))
+ return
+ }
+ tx := &cx.TxnGroup[gtxid].Txn
+ field := TxnField(uint64(cx.program[cx.pc+1]))
+ fs, ok := txnFieldSpecByField[field]
+ if !ok || fs.version > cx.version {
+ cx.err = fmt.Errorf("invalid txn field %d", field)
+ return
+ }
+ _, ok = txnaFieldSpecByField[field]
+ if !ok {
+ cx.err = fmt.Errorf("gtxnsa unsupported field %d", field)
+ return
+ }
+ var sv stackValue
+ var err error
+ arrayFieldIdx := uint64(cx.program[cx.pc+2])
+ sv, err = cx.txnFieldToStack(tx, field, arrayFieldIdx, gtxid)
+ if err != nil {
+ cx.err = err
+ return
+ }
+ cx.stack[last] = sv
}
func (cx *evalContext) getRound() (rnd uint64, err error) {
@@ -1503,6 +1666,14 @@ func (cx *evalContext) getApplicationID() (rnd uint64, err error) {
return uint64(cx.Ledger.ApplicationID()), nil
}
+func (cx *evalContext) getCreatorAddress() ([]byte, error) {
+ if cx.Ledger == nil {
+ return nil, fmt.Errorf("ledger not available")
+ }
+ addr := cx.Ledger.CreatorAddress()
+ return addr[:], nil
+}
+
var zeroAddress basics.Address
func (cx *evalContext) globalFieldToStack(field GlobalField) (sv stackValue, err error) {
@@ -1525,6 +1696,8 @@ func (cx *evalContext) globalFieldToStack(field GlobalField) (sv stackValue, err
sv.Uint, err = cx.getLatestTimestamp()
case CurrentApplicationID:
sv.Uint, err = cx.getApplicationID()
+ case CreatorAddress:
+ sv.Bytes, err = cx.getCreatorAddress()
default:
err = fmt.Errorf("invalid global[%d]", field)
}
@@ -1557,7 +1730,6 @@ func opGlobal(cx *evalContext) {
}
cx.stack = append(cx.stack, sv)
- cx.nextpc = cx.pc + 2
}
// Msg is data meant to be signed and then verified with the
@@ -1613,7 +1785,6 @@ func opEd25519verify(cx *evalContext) {
func opLoad(cx *evalContext) {
gindex := int(uint(cx.program[cx.pc+1]))
cx.stack = append(cx.stack, cx.scratch[gindex])
- cx.nextpc = cx.pc + 2
}
func opStore(cx *evalContext) {
@@ -1621,7 +1792,6 @@ func opStore(cx *evalContext) {
last := len(cx.stack) - 1
cx.scratch[gindex] = cx.stack[last]
cx.stack = cx.stack[:last]
- cx.nextpc = cx.pc + 2
}
func opConcat(cx *evalContext) {
@@ -1661,7 +1831,6 @@ func opSubstring(cx *evalContext) {
start := cx.program[cx.pc+1]
end := cx.program[cx.pc+2]
cx.stack[last].Bytes, cx.err = substring(cx.stack[last].Bytes, int(start), int(end))
- cx.nextpc = cx.pc + 3
}
func opSubstring3(cx *evalContext) {
@@ -1678,6 +1847,124 @@ func opSubstring3(cx *evalContext) {
cx.stack = cx.stack[:prev]
}
+func opGetBit(cx *evalContext) {
+ last := len(cx.stack) - 1
+ prev := last - 1
+ idx := cx.stack[last].Uint
+ target := cx.stack[prev]
+
+ var bit uint64
+ if target.argType() == StackUint64 {
+ if idx > 63 {
+ cx.err = errors.New("getbit index > 63 with with Uint")
+ return
+ }
+ mask := uint64(1) << idx
+ bit = (target.Uint & mask) >> idx
+ } else {
+ // indexing into a byteslice
+ byteIdx := idx / 8
+ if byteIdx >= uint64(len(target.Bytes)) {
+ cx.err = errors.New("getbit index beyond byteslice")
+ return
+ }
+ byteVal := target.Bytes[byteIdx]
+
+ bitIdx := idx % 8
+ // We saying that bit 9 (the 10th bit), for example,
+ // is the 2nd bit in the second byte, and that "2nd
+ // bit" here means almost-highest-order bit, because
+ // we're thinking of the bits in the byte itself as
+ // being big endian. So this looks "reversed"
+ mask := byte(0x80) >> bitIdx
+ bit = uint64((byteVal & mask) >> (7 - bitIdx))
+ }
+ cx.stack[prev].Uint = bit
+ cx.stack[prev].Bytes = nil
+ cx.stack = cx.stack[:last]
+}
+
+func opSetBit(cx *evalContext) {
+ last := len(cx.stack) - 1
+ prev := last - 1
+ pprev := prev - 1
+
+ bit := cx.stack[last].Uint
+ idx := cx.stack[prev].Uint
+ target := cx.stack[pprev]
+
+ if bit > 1 {
+ cx.err = errors.New("setbit value > 1")
+ return
+ }
+
+ if target.argType() == StackUint64 {
+ if idx > 63 {
+ cx.err = errors.New("setbit index > 63 with Uint")
+ return
+ }
+ mask := uint64(1) << idx
+ if bit == uint64(1) {
+ cx.stack[pprev].Uint |= mask // manipulate stack in place
+ } else {
+ cx.stack[pprev].Uint &^= mask // manipulate stack in place
+ }
+ } else {
+ // indexing into a byteslice
+ byteIdx := idx / 8
+ if byteIdx >= uint64(len(target.Bytes)) {
+ cx.err = errors.New("setbit index beyond byteslice")
+ return
+ }
+
+ bitIdx := idx % 8
+ // We saying that bit 9 (the 10th bit), for example,
+ // is the 2nd bit in the second byte, and that "2nd
+ // bit" here means almost-highest-order bit, because
+ // we're thinking of the bits in the byte itself as
+ // being big endian. So this looks "reversed"
+ mask := byte(0x80) >> bitIdx
+ if bit == uint64(1) {
+ target.Bytes[byteIdx] |= mask
+ } else {
+ target.Bytes[byteIdx] &^= mask
+ }
+ }
+ cx.stack = cx.stack[:prev]
+}
+
+func opGetByte(cx *evalContext) {
+ last := len(cx.stack) - 1
+ prev := last - 1
+
+ idx := cx.stack[last].Uint
+ target := cx.stack[prev]
+
+ if idx >= uint64(len(target.Bytes)) {
+ cx.err = errors.New("getbyte index beyond byteslice")
+ return
+ }
+ cx.stack[prev].Uint = uint64(target.Bytes[idx])
+ cx.stack[prev].Bytes = nil
+ cx.stack = cx.stack[:last]
+}
+
+func opSetByte(cx *evalContext) {
+ last := len(cx.stack) - 1
+ prev := last - 1
+ pprev := prev - 1
+ if cx.stack[last].Uint > 255 {
+ cx.err = errors.New("setbyte value > 255")
+ return
+ }
+ if cx.stack[prev].Uint > uint64(len(cx.stack[pprev].Bytes)) {
+ cx.err = errors.New("setbyte index > byte length")
+ return
+ }
+ cx.stack[pprev].Bytes[cx.stack[prev].Uint] = byte(cx.stack[last].Uint)
+ cx.stack = cx.stack[:prev]
+}
+
func opBalance(cx *evalContext) {
last := len(cx.stack) - 1 // account offset
@@ -1696,7 +1983,32 @@ func opBalance(cx *evalContext) {
microAlgos, err := cx.Ledger.Balance(addr)
if err != nil {
- cx.err = fmt.Errorf("failed to fetch balance of %v: %s", addr, err.Error())
+ cx.err = fmt.Errorf("failed to fetch balance of %v: %w", addr, err)
+ return
+ }
+
+ cx.stack[last].Uint = microAlgos.Raw
+}
+
+func opMinBalance(cx *evalContext) {
+ last := len(cx.stack) - 1 // account offset
+
+ accountIdx := cx.stack[last].Uint
+
+ if cx.Ledger == nil {
+ cx.err = fmt.Errorf("ledger not available")
+ return
+ }
+
+ addr, err := cx.Txn.Txn.AddressByIndex(accountIdx, cx.Txn.Txn.Sender)
+ if err != nil {
+ cx.err = err
+ return
+ }
+
+ microAlgos, err := cx.Ledger.MinBalance(addr, cx.Proto)
+ if err != nil {
+ cx.err = fmt.Errorf("failed to fetch minimum balance of %v: %w", addr, err)
return
}
@@ -2018,8 +2330,6 @@ func opAssetHoldingGet(cx *evalContext) {
cx.stack[prev] = value
cx.stack[last].Uint = exist
-
- cx.nextpc = cx.pc + 2
}
func opAssetParamsGet(cx *evalContext) {
@@ -2053,6 +2363,4 @@ func opAssetParamsGet(cx *evalContext) {
cx.stack[last] = value
cx.stack = append(cx.stack, stackValue{Uint: exist})
-
- cx.nextpc = cx.pc + 2
}
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index 07c22473d..af19c586e 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -25,6 +25,7 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/protocol"
@@ -33,24 +34,39 @@ import (
type balanceRecord struct {
addr basics.Address
balance uint64
- apps map[basics.AppIndex]map[string]basics.TealValue
+ locals map[basics.AppIndex]basics.TealKeyValue
holdings map[uint64]basics.AssetHolding
mods map[basics.AppIndex]map[string]basics.ValueDelta
}
+// In our test ledger, we don't store the AppParams with its creator,
+// so we need to carry the creator arround with the params,
+type appParams struct {
+ basics.AppParams
+ Creator basics.Address
+}
+
type testLedger struct {
balances map[basics.Address]balanceRecord
- applications map[basics.AppIndex]map[string]basics.TealValue
+ applications map[basics.AppIndex]appParams
assets map[basics.AssetIndex]basics.AssetParams
- appID uint64
+ appID basics.AppIndex
+ creatorAddr basics.Address
mods map[basics.AppIndex]map[string]basics.ValueDelta
}
+func makeSchemas(li uint64, lb uint64, gi uint64, gb uint64) basics.StateSchemas {
+ return basics.StateSchemas{
+ LocalStateSchema: basics.StateSchema{NumUint: li, NumByteSlice: lb},
+ GlobalStateSchema: basics.StateSchema{NumUint: gi, NumByteSlice: gb},
+ }
+}
+
func makeBalanceRecord(addr basics.Address, balance uint64) balanceRecord {
br := balanceRecord{
addr: addr,
balance: balance,
- apps: make(map[basics.AppIndex]map[string]basics.TealValue),
+ locals: make(map[basics.AppIndex]basics.TealKeyValue),
holdings: make(map[uint64]basics.AssetHolding),
mods: make(map[basics.AppIndex]map[string]basics.ValueDelta),
}
@@ -63,7 +79,7 @@ func makeTestLedger(balances map[basics.Address]uint64) *testLedger {
for addr, balance := range balances {
l.balances[addr] = makeBalanceRecord(addr, balance)
}
- l.applications = make(map[basics.AppIndex]map[string]basics.TealValue)
+ l.applications = make(map[basics.AppIndex]appParams)
l.assets = make(map[basics.AssetIndex]basics.AssetParams)
l.mods = make(map[basics.AppIndex]map[string]basics.ValueDelta)
return l
@@ -77,28 +93,36 @@ func (l *testLedger) reset() {
}
}
-func (l *testLedger) newApp(addr basics.Address, appID uint64) {
+func (l *testLedger) newApp(addr basics.Address, appID basics.AppIndex, schemas basics.StateSchemas) {
l.appID = appID
- appIdx := basics.AppIndex(appID)
- l.applications[appIdx] = make(map[string]basics.TealValue)
+ appIdx := appID
+ l.applications[appIdx] = appParams{
+ Creator: addr,
+ AppParams: basics.AppParams{
+ StateSchemas: schemas,
+ GlobalState: make(basics.TealKeyValue),
+ },
+ }
br, ok := l.balances[addr]
if !ok {
br = makeBalanceRecord(addr, 0)
}
- br.apps[appIdx] = make(map[string]basics.TealValue)
+ br.locals[appIdx] = make(map[string]basics.TealValue)
l.balances[addr] = br
}
-func (l *testLedger) newAsset(assetID uint64, params basics.AssetParams) {
+func (l *testLedger) newAsset(creator basics.Address, assetID uint64, params basics.AssetParams) {
l.assets[basics.AssetIndex(assetID)] = params
+ // We're not simulating details of ReserveAddress yet.
+ l.setHolding(creator, assetID, params.Total, params.DefaultFrozen)
}
-func (l *testLedger) setHolding(addr basics.Address, assetID uint64, holding basics.AssetHolding) {
+func (l *testLedger) setHolding(addr basics.Address, assetID uint64, amount uint64, frozen bool) {
br, ok := l.balances[addr]
if !ok {
br = makeBalanceRecord(addr, 0)
}
- br.holdings[assetID] = holding
+ br.holdings[assetID] = basics.AssetHolding{Amount: amount, Frozen: frozen}
l.balances[addr] = br
}
@@ -123,16 +147,53 @@ func (l *testLedger) Balance(addr basics.Address) (amount basics.MicroAlgos, err
return basics.MicroAlgos{Raw: br.balance}, nil
}
+func (l *testLedger) MinBalance(addr basics.Address, proto *config.ConsensusParams) (amount basics.MicroAlgos, err error) {
+ if l.balances == nil {
+ err = fmt.Errorf("empty ledger")
+ return
+ }
+ br, ok := l.balances[addr]
+ if !ok {
+ err = fmt.Errorf("no such address")
+ return
+ }
+
+ var min uint64
+
+ // First, base MinBalance
+ min = proto.MinBalance
+
+ // MinBalance for each Asset
+ assetCost := basics.MulSaturate(proto.MinBalance, uint64(len(br.holdings)))
+ min = basics.AddSaturate(min, assetCost)
+
+ // Base MinBalance + GlobalStateSchema.MinBalance for each created application
+ for _, params := range l.applications {
+ if params.Creator == addr {
+ min = basics.AddSaturate(min, proto.AppFlatParamsMinBalance)
+ min = basics.AddSaturate(min, params.GlobalStateSchema.MinBalance(proto).Raw)
+ }
+ }
+
+ // Base MinBalance + LocalStateSchema.MinBalance for each opted in application
+ for idx := range br.locals {
+ min = basics.AddSaturate(min, proto.AppFlatParamsMinBalance)
+ min = basics.AddSaturate(min, l.applications[idx].LocalStateSchema.MinBalance(proto).Raw)
+ }
+
+ return basics.MicroAlgos{Raw: min}, nil
+}
+
func (l *testLedger) GetGlobal(appIdx basics.AppIndex, key string) (basics.TealValue, bool, error) {
if appIdx == basics.AppIndex(0) {
- appIdx = basics.AppIndex(l.appID)
+ appIdx = l.appID
}
- tkvd, ok := l.applications[appIdx]
+ params, ok := l.applications[appIdx]
if !ok {
return basics.TealValue{}, false, fmt.Errorf("no such app")
}
- // return most recent value if avialiable
+ // return most recent value if available
tkvm, ok := l.mods[appIdx]
if ok {
val, ok := tkvm[key]
@@ -143,20 +204,20 @@ func (l *testLedger) GetGlobal(appIdx basics.AppIndex, key string) (basics.TealV
}
// otherwise return original one
- val, ok := tkvd[key]
+ val, ok := params.GlobalState[key]
return val, ok, nil
}
func (l *testLedger) SetGlobal(key string, value basics.TealValue) error {
- appIdx := basics.AppIndex(l.appID)
- tkv, ok := l.applications[appIdx]
+ appIdx := l.appID
+ params, ok := l.applications[appIdx]
if !ok {
return fmt.Errorf("no such app")
}
// if writing the same value, return
// this simulates real ledger behavior for tests
- val, ok := tkv[key]
+ val, ok := params.GlobalState[key]
if ok && val == value {
return nil
}
@@ -171,14 +232,14 @@ func (l *testLedger) SetGlobal(key string, value basics.TealValue) error {
}
func (l *testLedger) DelGlobal(key string) error {
- appIdx := basics.AppIndex(l.appID)
- tkv, ok := l.applications[appIdx]
+ appIdx := l.appID
+ params, ok := l.applications[appIdx]
if !ok {
return fmt.Errorf("no such app")
}
exist := false
- if _, ok := tkv[key]; ok {
+ if _, ok := params.GlobalState[key]; ok {
exist = true
}
@@ -199,13 +260,13 @@ func (l *testLedger) DelGlobal(key string) error {
func (l *testLedger) GetLocal(addr basics.Address, appIdx basics.AppIndex, key string) (basics.TealValue, bool, error) {
if appIdx == 0 {
- appIdx = basics.AppIndex(l.appID)
+ appIdx = l.appID
}
br, ok := l.balances[addr]
if !ok {
return basics.TealValue{}, false, fmt.Errorf("no such address")
}
- tkvd, ok := br.apps[appIdx]
+ tkvd, ok := br.locals[appIdx]
if !ok {
return basics.TealValue{}, false, fmt.Errorf("no app for account")
}
@@ -225,13 +286,13 @@ func (l *testLedger) GetLocal(addr basics.Address, appIdx basics.AppIndex, key s
}
func (l *testLedger) SetLocal(addr basics.Address, key string, value basics.TealValue) error {
- appIdx := basics.AppIndex(l.appID)
+ appIdx := l.appID
br, ok := l.balances[addr]
if !ok {
return fmt.Errorf("no such address")
}
- tkv, ok := br.apps[appIdx]
+ tkv, ok := br.locals[appIdx]
if !ok {
return fmt.Errorf("no app for account")
}
@@ -253,13 +314,13 @@ func (l *testLedger) SetLocal(addr basics.Address, key string, value basics.Teal
}
func (l *testLedger) DelLocal(addr basics.Address, key string) error {
- appIdx := basics.AppIndex(l.appID)
+ appIdx := l.appID
br, ok := l.balances[addr]
if !ok {
return fmt.Errorf("no such address")
}
- tkv, ok := br.apps[appIdx]
+ tkv, ok := br.locals[appIdx]
if !ok {
return fmt.Errorf("no app for account")
}
@@ -285,13 +346,13 @@ func (l *testLedger) DelLocal(addr basics.Address, key string) error {
func (l *testLedger) OptedIn(addr basics.Address, appIdx basics.AppIndex) (bool, error) {
if appIdx == 0 {
- appIdx = basics.AppIndex(l.appID)
+ appIdx = l.appID
}
br, ok := l.balances[addr]
if !ok {
return false, fmt.Errorf("no such address")
}
- _, ok = br.apps[appIdx]
+ _, ok = br.locals[appIdx]
return ok, nil
}
@@ -313,7 +374,11 @@ func (l *testLedger) AssetParams(assetID basics.AssetIndex) (basics.AssetParams,
}
func (l *testLedger) ApplicationID() basics.AppIndex {
- return basics.AppIndex(l.appID)
+ return l.appID
+}
+
+func (l *testLedger) CreatorAddress() basics.Address {
+ return l.creatorAddr
}
func (l *testLedger) LocalSchema() basics.StateSchema {
@@ -331,7 +396,7 @@ func (l *testLedger) GlobalSchema() basics.StateSchema {
}
func (l *testLedger) GetDelta(txn *transactions.Transaction) (evalDelta basics.EvalDelta, err error) {
- if tkv, ok := l.mods[basics.AppIndex(l.appID)]; ok {
+ if tkv, ok := l.mods[l.appID]; ok {
evalDelta.GlobalDelta = tkv
}
if len(txn.Accounts) > 0 {
@@ -343,7 +408,7 @@ func (l *testLedger) GetDelta(txn *transactions.Transaction) (evalDelta basics.E
evalDelta.LocalDeltas = make(map[uint64]basics.StateDelta)
for addr, br := range l.balances {
if idx, ok := accounts[addr]; ok {
- if delta, ok := br.mods[basics.AppIndex(l.appID)]; ok {
+ if delta, ok := br.mods[l.appID]; ok {
evalDelta.LocalDeltas[uint64(idx)] = delta
}
}
@@ -433,6 +498,9 @@ arg 4
opcodesRunModeApplication := `int 0
balance
&&
+int 0
+min_balance
+&&
intc_0
intc 6 // 100
app_opted_in
@@ -520,21 +588,19 @@ pop
txn.Txn.Sender: 1,
},
)
- ledger.newApp(txn.Txn.Sender, 100)
- ledger.balances[txn.Txn.Sender].apps[100]["ALGO"] = algoValue
- ledger.newAsset(5, params)
- ledger.setHolding(txn.Txn.Sender, 5, basics.AssetHolding{Amount: 123, Frozen: true})
+ ledger.newApp(txn.Txn.Sender, 100, makeSchemas(0, 0, 0, 0))
+ ledger.balances[txn.Txn.Sender].locals[100]["ALGO"] = algoValue
+ ledger.newAsset(txn.Txn.Sender, 5, params)
for mode, test := range tests {
t.Run(fmt.Sprintf("opcodes_mode=%d", mode), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(test.source, AssemblerMaxVersion)
- require.NoError(t, err)
+ ops := testProg(t, test.source, AssemblerMaxVersion)
sb := strings.Builder{}
ep := defaultEvalParams(&sb, &txn)
ep.TxnGroup = txgroup
ep.Ledger = ledger
ep.Txn.Txn.ApplicationID = 100
- _, err = test.check(ops.Program, ep)
+ _, err := test.check(ops.Program, ep)
require.NoError(t, err)
_, err = test.eval(ops.Program, ep)
if err != nil {
@@ -581,9 +647,10 @@ pop
require.Contains(t, err.Error(), "not allowed in current mode")
}
- // check new opcodes are not allowed in stateless mode
- newOpcodeCalls := []string{
+ // check stateful opcodes are not allowed in stateless mode
+ statefulOpcodeCalls := []string{
"int 0\nbalance",
+ "int 0\nmin_balance",
"int 0\nint 0\napp_opted_in",
"int 0\nint 0\nbyte 0x01\napp_local_get_ex",
"byte 0x01\napp_global_get",
@@ -596,11 +663,10 @@ pop
"int 0\nint 0\nasset_params_get AssetManager",
}
- for _, source := range newOpcodeCalls {
- ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
+ for _, source := range statefulOpcodeCalls {
+ ops := testProg(t, source, AssemblerMaxVersion)
ep := defaultEvalParams(nil, nil)
- _, err = Check(ops.Program, ep)
+ _, err := Check(ops.Program, ep)
require.Error(t, err)
_, err = Eval(ops.Program, ep)
require.Error(t, err)
@@ -618,7 +684,7 @@ func TestBalance(t *testing.T) {
text := `int 2
balance
-int 1
+int 177
==`
ops, err := AssembleStringWithVersion(text, AssemblerMaxVersion)
require.NoError(t, err)
@@ -634,7 +700,7 @@ int 1
ep.Ledger = makeTestLedger(
map[basics.Address]uint64{
- txn.Txn.Receiver: 1,
+ txn.Txn.Receiver: 177,
},
)
_, err = EvalStateful(ops.Program, ep)
@@ -643,7 +709,7 @@ int 1
text = `int 1
balance
-int 1
+int 177
==`
ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
require.NoError(t, err)
@@ -656,7 +722,7 @@ int 1
text = `int 0
balance
-int 1
+int 13
==`
ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
require.NoError(t, err)
@@ -664,7 +730,7 @@ int 1
copy(addr[:], []byte("aoeuiaoeuiaoeuiaoeuiaoeuiaoeui02"))
ep.Ledger = makeTestLedger(
map[basics.Address]uint64{
- addr: 1,
+ addr: 13,
},
)
pass, err = EvalStateful(ops.Program, ep)
@@ -674,7 +740,7 @@ int 1
ep.Ledger = makeTestLedger(
map[basics.Address]uint64{
- txn.Txn.Sender: 1,
+ txn.Txn.Sender: 13,
},
)
cost, err = CheckStateful(ops.Program, ep)
@@ -685,99 +751,116 @@ int 1
require.True(t, pass)
}
-func TestAppCheckOptedIn(t *testing.T) {
- t.Parallel()
-
- text := `int 2 // account idx
-int 100 // app idx
-app_opted_in
-int 1
-==`
- ops, err := AssembleStringWithVersion(text, AssemblerMaxVersion)
+func testApp(t *testing.T, program string, ep EvalParams, problems ...string) basics.EvalDelta {
+ ops := testProg(t, program, AssemblerMaxVersion)
+ sb := &strings.Builder{}
+ ep.Trace = sb
+ cost, err := CheckStateful(ops.Program, ep)
require.NoError(t, err)
+ require.True(t, cost < 1000)
+
+ // we only use this to test stateful apps. While, I suppose
+ // it's *legal* to have an app with no stateful ops, this
+ // convenience routine can assume it, and check it.
+ pass, err := Eval(ops.Program, ep)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "not allowed in current mode")
+ require.False(t, pass)
+
+ pass, err = EvalStateful(ops.Program, ep)
+ if len(problems) == 0 {
+ require.NoError(t, err, sb.String())
+ require.True(t, pass, sb.String())
+ delta, err := ep.Ledger.GetDelta(&ep.Txn.Txn)
+ require.NoError(t, err)
+ return delta
+ }
+
+ require.Error(t, err, sb.String())
+ for _, problem := range problems {
+ require.Contains(t, err.Error(), problem)
+ }
+ if ep.Ledger != nil {
+ delta, err := ep.Ledger.GetDelta(&ep.Txn.Txn)
+ require.NoError(t, err)
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
+ return delta
+ }
+ return basics.EvalDelta{}
+}
+
+func TestMinBalance(t *testing.T) {
+ t.Parallel()
txn := makeSampleTxn()
txgroup := makeSampleTxnGroup(txn)
ep := defaultEvalParams(nil, nil)
ep.Txn = &txn
ep.TxnGroup = txgroup
- _, err = EvalStateful(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "ledger not available")
- ep.Ledger = makeTestLedger(
+ testApp(t, "int 0; min_balance; int 1001; ==", ep, "ledger not available")
+
+ ledger := makeTestLedger(
map[basics.Address]uint64{
- txn.Txn.Receiver: 1,
+ txn.Txn.Sender: 234, // min_balance 0 is Sender
+ txn.Txn.Receiver: 123, // Accounts[0] has been packed with the Receiver
},
)
- _, err = EvalStateful(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "cannot load account")
+ ep.Ledger = ledger
- // Receiver is not opted in
- text = `int 1 // account idx
-int 100 // app idx
-app_opted_in
-int 0
-==`
- ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
- cost, err := CheckStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, cost < 1000)
- pass, err := EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
+ testApp(t, "int 0; min_balance; int 1001; ==", ep)
+ // Sender makes an asset, min blance goes up
+ ledger.newAsset(txn.Txn.Sender, 7, basics.AssetParams{Total: 1000})
+ testApp(t, "int 0; min_balance; int 2002; ==", ep)
+ schemas := makeSchemas(1, 2, 3, 4)
+ ledger.newApp(txn.Txn.Sender, 77, schemas)
+ // create + optin + 10 schema base + 4 ints + 6 bytes (local
+ // and global count b/c newApp opts the creator in)
+ minb := 2*1002 + 10*1003 + 4*1004 + 6*1005
+ testApp(t, fmt.Sprintf("int 0; min_balance; int %d; ==", 2002+minb), ep)
+
+ testApp(t, "int 1; min_balance; int 1001; ==", ep) // 1 == Accounts[0]
+ // Receiver opts in
+ ledger.setHolding(txn.Txn.Receiver, 7, 1, true)
+ testApp(t, "int 1; min_balance; int 2002; ==", ep) // 1 == Accounts[0]
+
+ testApp(t, "int 2; min_balance; int 1001; ==", ep, "cannot load account")
+
+}
+
+func TestAppCheckOptedIn(t *testing.T) {
+ t.Parallel()
+
+ txn := makeSampleTxn()
+ txgroup := makeSampleTxnGroup(txn)
+ ep := defaultEvalParams(nil, nil)
+ ep.Txn = &txn
+ ep.TxnGroup = txgroup
+ testApp(t, "int 2; int 100; app_opted_in; int 1; ==", ep, "ledger not available")
- // Sender is not opted in
- text = `int 0 // account idx
-int 100 // app idx
-app_opted_in
-int 0
-==`
- ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
ledger := makeTestLedger(
map[basics.Address]uint64{
- txn.Txn.Sender: 1,
+ txn.Txn.Receiver: 1,
+ txn.Txn.Sender: 1,
},
)
ep.Ledger = ledger
- cost, err = CheckStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, cost < 1000)
- pass, err = EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
-
- // Receiver opted in
- text = `int 1 // account idx
-int 100 // app idx
-app_opted_in
-int 1
-==`
- ledger.newApp(txn.Txn.Receiver, 100)
+ testApp(t, "int 2; int 100; app_opted_in; int 1; ==", ep, "cannot load account")
- ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
- pass, err = EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
+ // Receiver is not opted in
+ testApp(t, "int 1; int 100; app_opted_in; int 0; ==", ep)
- // Sender opted in
- text = `int 0 // account idx
-int 100 // app idx
-app_opted_in
-int 1
-==`
- ledger.newApp(txn.Txn.Sender, 100)
+ // Sender is not opted in
+ testApp(t, "int 0; int 100; app_opted_in; int 0; ==", ep)
- ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
- pass, err = EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
+ // Receiver opted in
+ ledger.newApp(txn.Txn.Receiver, 100, makeSchemas(0, 0, 0, 0))
+ testApp(t, "int 1; int 100; app_opted_in; int 1; ==", ep)
+ // Sender opted in
+ ledger.newApp(txn.Txn.Sender, 100, makeSchemas(0, 0, 0, 0))
+ testApp(t, "int 0; int 100; app_opted_in; int 1; ==", ep)
}
func TestAppReadLocalState(t *testing.T) {
@@ -796,8 +879,6 @@ err
exit:
int 1
==`
- ops, err := AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
txn := makeSampleTxn()
txgroup := makeSampleTxnGroup(txn)
@@ -805,12 +886,8 @@ int 1
ep.Txn = &txn
ep.Txn.Txn.ApplicationID = 100
ep.TxnGroup = txgroup
- cost, err := CheckStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, cost < 1000)
- _, err = EvalStateful(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "ledger not available")
+
+ testApp(t, text, ep, "ledger not available")
ledger := makeTestLedger(
map[basics.Address]uint64{
@@ -818,9 +895,7 @@ int 1
},
)
ep.Ledger = ledger
- _, err = EvalStateful(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "cannot load account")
+ testApp(t, text, ep, "cannot load account")
text = `int 1 // account idx
int 100 // app id
@@ -834,11 +909,8 @@ exist:
err
exit:
int 1`
- ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
- _, err = EvalStateful(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "no app for account")
+
+ testApp(t, text, ep, "no app for account")
ledger = makeTestLedger(
map[basics.Address]uint64{
@@ -846,18 +918,13 @@ int 1`
},
)
ep.Ledger = ledger
- ledger.newApp(txn.Txn.Receiver, 9999)
+ ledger.newApp(txn.Txn.Receiver, 9999, makeSchemas(0, 0, 0, 0))
- _, err = EvalStateful(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "no app for account")
+ testApp(t, text, ep, "no app for account")
// create the app and check the value from ApplicationArgs[0] (protocol.PaymentTx) does not exist
- ledger.newApp(txn.Txn.Receiver, 100)
-
- pass, err := EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
+ ledger.newApp(txn.Txn.Receiver, 100, makeSchemas(0, 0, 0, 0))
+ testApp(t, text, ep)
text = `int 1 // account idx
int 100 // app id
@@ -868,21 +935,12 @@ err
exist:
byte 0x414c474f
==`
- ledger.balances[txn.Txn.Receiver].apps[100][string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
-
- ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
+ ledger.balances[txn.Txn.Receiver].locals[100][string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
- cost, err = CheckStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, cost < 1000)
-
- pass, err = EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
+ testApp(t, text, ep)
// check special case account idx == 0 => sender
- ledger.newApp(txn.Txn.Sender, 100)
+ ledger.newApp(txn.Txn.Sender, 100, makeSchemas(0, 0, 0, 0))
text = `int 0 // account idx
int 100 // app id
txn ApplicationArgs 0
@@ -893,17 +951,12 @@ exist:
byte 0x414c474f
==`
- ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
-
- ledger.balances[txn.Txn.Sender].apps[100][string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
- pass, err = EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
+ ledger.balances[txn.Txn.Sender].locals[100][string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
+ testApp(t, text, ep)
// check reading state of other app
- ledger.newApp(txn.Txn.Sender, 101)
- ledger.newApp(txn.Txn.Sender, 100)
+ ledger.newApp(txn.Txn.Sender, 101, makeSchemas(0, 0, 0, 0))
+ ledger.newApp(txn.Txn.Sender, 100, makeSchemas(0, 0, 0, 0))
text = `int 0 // account idx
int 101 // app id
txn ApplicationArgs 0
@@ -914,13 +967,8 @@ exist:
byte 0x414c474f
==`
- ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
-
- ledger.balances[txn.Txn.Sender].apps[101][string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
- pass, err = EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
+ ledger.balances[txn.Txn.Sender].locals[101][string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
+ testApp(t, text, ep)
// check app_local_get
text = `int 0 // account idx
@@ -929,13 +977,8 @@ app_local_get
byte 0x414c474f
==`
- ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
-
- ledger.balances[txn.Txn.Sender].apps[100][string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
- pass, err = EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
+ ledger.balances[txn.Txn.Sender].locals[100][string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
+ testApp(t, text, ep)
// check app_local_get default value
text = `int 0 // account idx
@@ -944,13 +987,8 @@ app_local_get
int 0
==`
- ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
-
- ledger.balances[txn.Txn.Sender].apps[100][string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
- pass, err = EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
+ ledger.balances[txn.Txn.Sender].locals[100][string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
+ testApp(t, text, ep)
}
func TestAppReadGlobalState(t *testing.T) {
@@ -1008,13 +1046,13 @@ byte 0x414c474f
require.Contains(t, err.Error(), "no such app")
// create the app and check the value from ApplicationArgs[0] (protocol.PaymentTx) does not exist
- ledger.newApp(txn.Txn.Sender, 100)
+ ledger.newApp(txn.Txn.Sender, 100, makeSchemas(0, 0, 0, 1))
_, err = EvalStateful(ops.Program, ep)
require.Error(t, err)
require.Contains(t, err.Error(), "err opcode")
- ledger.applications[100][string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
+ ledger.applications[100].GlobalState[string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
cost, err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
@@ -1044,7 +1082,7 @@ int 0
ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
require.NoError(t, err)
- ledger.balances[txn.Txn.Sender].apps[100][string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
+ ledger.balances[txn.Txn.Sender].locals[100][string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"}
pass, err = EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -1248,8 +1286,8 @@ func TestAssets(t *testing.T) {
Freeze: txn.Txn.Receiver,
Clawback: txn.Txn.Receiver,
}
- ledger.newAsset(55, params)
- ledger.setHolding(txn.Txn.Sender, 55, basics.AssetHolding{Amount: 123, Frozen: true})
+ ledger.newAsset(txn.Txn.Sender, 55, params)
+ ledger.setHolding(txn.Txn.Sender, 55, 123, true)
ep := defaultEvalParams(&sb, &txn)
ep.Ledger = ledger
@@ -1280,7 +1318,7 @@ int 1
`
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
- ledger.setHolding(txn.Txn.Sender, 55, basics.AssetHolding{Amount: 123, Frozen: false})
+ ledger.setHolding(txn.Txn.Sender, 55, 123, false)
cost, err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, cost < 1000)
@@ -1289,7 +1327,7 @@ int 1
require.True(t, pass)
// check holdings invalid offsets
- require.Equal(t, opsByName[ep.Proto.LogicSigVersion]["asset_holding_get"].Opcode, ops.Program[8])
+ require.Equal(t, OpsByName[ep.Proto.LogicSigVersion]["asset_holding_get"].Opcode, ops.Program[8])
ops.Program[9] = 0x02
_, err = EvalStateful(ops.Program, ep)
require.Error(t, err)
@@ -1311,12 +1349,12 @@ int 1
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
params.DefaultFrozen = true
- ledger.newAsset(55, params)
+ ledger.newAsset(txn.Txn.Sender, 55, params)
pass, err = EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
// check holdings invalid offsets
- require.Equal(t, opsByName[ep.Proto.LogicSigVersion]["asset_params_get"].Opcode, ops.Program[6])
+ require.Equal(t, OpsByName[ep.Proto.LogicSigVersion]["asset_params_get"].Opcode, ops.Program[6])
ops.Program[7] = 0x20
_, err = EvalStateful(ops.Program, ep)
require.Error(t, err)
@@ -1339,7 +1377,7 @@ int 1
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
params.URL = ""
- ledger.newAsset(55, params)
+ ledger.newAsset(txn.Txn.Sender, 55, params)
pass, err = EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -1360,7 +1398,7 @@ int 1
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
params.URL = "foobarbaz"
- ledger.newAsset(77, params)
+ ledger.newAsset(txn.Txn.Sender, 77, params)
pass, err = EvalStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
@@ -1380,7 +1418,7 @@ int 1
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
params.URL = ""
- ledger.newAsset(55, params)
+ ledger.newAsset(txn.Txn.Sender, 55, params)
cost, err = CheckStateful(ops.Program, ep)
require.NoError(t, err)
require.True(t, cost < 1000)
@@ -1464,8 +1502,8 @@ int 100
ep.Ledger = ledger
saved := ops.Program[firstCmdOffset]
- require.Equal(t, opsByName[0]["intc_0"].Opcode, saved)
- ops.Program[firstCmdOffset] = opsByName[0]["intc_1"].Opcode
+ require.Equal(t, OpsByName[0]["intc_0"].Opcode, saved)
+ ops.Program[firstCmdOffset] = OpsByName[0]["intc_1"].Opcode
_, err = EvalStateful(ops.Program, ep)
require.Error(t, err)
require.Contains(t, err.Error(), "cannot load account[100]")
@@ -1475,7 +1513,7 @@ int 100
require.Error(t, err)
require.Contains(t, err.Error(), "no app for account")
- ledger.newApp(txn.Txn.Sender, 100)
+ ledger.newApp(txn.Txn.Sender, 100, makeSchemas(0, 0, 0, 0))
if name == "read" {
_, err = EvalStateful(ops.Program, ep)
@@ -1483,8 +1521,8 @@ int 100
require.Contains(t, err.Error(), "err opcode") // no such key
}
- ledger.balances[txn.Txn.Sender].apps[100]["ALGO"] = basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
- ledger.balances[txn.Txn.Sender].apps[100]["ALGOA"] = basics.TealValue{Type: basics.TealUintType, Uint: 1}
+ ledger.balances[txn.Txn.Sender].locals[100]["ALGO"] = basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
+ ledger.balances[txn.Txn.Sender].locals[100]["ALGOA"] = basics.TealValue{Type: basics.TealUintType, Uint: 1}
ledger.reset()
pass, err := EvalStateful(ops.Program, ep)
@@ -1492,12 +1530,12 @@ int 100
require.True(t, pass)
delta, err := ledger.GetDelta(&ep.Txn.Txn)
require.NoError(t, err)
- require.Equal(t, 0, len(delta.GlobalDelta))
+ require.Empty(t, delta.GlobalDelta)
expLocal := 1
if name == "read" {
expLocal = 0
}
- require.Equal(t, expLocal, len(delta.LocalDeltas))
+ require.Len(t, delta.LocalDeltas, expLocal)
})
}
}
@@ -1515,7 +1553,7 @@ func TestAppLocalStateReadWrite(t *testing.T) {
},
)
ep.Ledger = ledger
- ledger.newApp(txn.Txn.Sender, 100)
+ ledger.newApp(txn.Txn.Sender, 100, makeSchemas(0, 0, 0, 0))
// write int and bytes values
source := `int 0 // account
@@ -1556,10 +1594,10 @@ int 0x77
require.True(t, pass)
delta, err := ledger.GetDelta(&ep.Txn.Txn)
require.NoError(t, err)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 1, len(delta.LocalDeltas))
+ require.Empty(t, 0, delta.GlobalDelta)
+ require.Len(t, delta.LocalDeltas, 1)
- require.Equal(t, 2, len(delta.LocalDeltas[0]))
+ require.Len(t, delta.LocalDeltas[0], 2)
vd := delta.LocalDeltas[0]["ALGO"]
require.Equal(t, basics.SetUintAction, vd.Action)
require.Equal(t, uint64(0x77), vd.Uint)
@@ -1584,11 +1622,11 @@ int 0x77
==
`
ledger.reset()
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGOA")
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGO")
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGOA")
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGO")
algoValue := basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
- ledger.balances[txn.Txn.Sender].apps[100]["ALGO"] = algoValue
+ ledger.balances[txn.Txn.Sender].locals[100]["ALGO"] = algoValue
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
@@ -1600,8 +1638,8 @@ int 0x77
require.True(t, pass)
delta, err = ledger.GetDelta(&ep.Txn.Txn)
require.NoError(t, err)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 0, len(delta.LocalDeltas))
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
// write same value after reading, expect no local delta
source = `int 0 // account
@@ -1625,8 +1663,8 @@ exist2:
==
`
ledger.reset()
- ledger.balances[txn.Txn.Sender].apps[100]["ALGO"] = algoValue
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGOA")
+ ledger.balances[txn.Txn.Sender].locals[100]["ALGO"] = algoValue
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGOA")
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
@@ -1635,8 +1673,8 @@ exist2:
require.True(t, pass)
delta, err = ledger.GetDelta(&ep.Txn.Txn)
require.NoError(t, err)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 0, len(delta.LocalDeltas))
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
// write a value and expect local delta change
source = `int 0 // account
@@ -1646,8 +1684,8 @@ app_local_put
int 1
`
ledger.reset()
- ledger.balances[txn.Txn.Sender].apps[100]["ALGO"] = algoValue
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGOA")
+ ledger.balances[txn.Txn.Sender].locals[100]["ALGO"] = algoValue
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGOA")
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
@@ -1656,9 +1694,9 @@ int 1
require.True(t, pass)
delta, err = ledger.GetDelta(&ep.Txn.Txn)
require.NoError(t, err)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 1, len(delta.LocalDeltas))
- require.Equal(t, 1, len(delta.LocalDeltas[0]))
+ require.Empty(t, delta.GlobalDelta)
+ require.Len(t, delta.LocalDeltas, 1)
+ require.Len(t, delta.LocalDeltas[0], 1)
vd = delta.LocalDeltas[0]["ALGOA"]
require.Equal(t, basics.SetUintAction, vd.Action)
require.Equal(t, uint64(0x78), vd.Uint)
@@ -1679,8 +1717,8 @@ int 0x78
==
`
ledger.reset()
- ledger.balances[txn.Txn.Sender].apps[100]["ALGO"] = algoValue
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGOA")
+ ledger.balances[txn.Txn.Sender].locals[100]["ALGO"] = algoValue
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGOA")
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
@@ -1689,9 +1727,9 @@ int 0x78
require.True(t, pass)
delta, err = ledger.GetDelta(&ep.Txn.Txn)
require.NoError(t, err)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 1, len(delta.LocalDeltas))
- require.Equal(t, 1, len(delta.LocalDeltas[0]))
+ require.Empty(t, delta.GlobalDelta)
+ require.Len(t, delta.LocalDeltas, 1)
+ require.Len(t, delta.LocalDeltas[0], 1)
vd = delta.LocalDeltas[0]["ALGO"]
require.Equal(t, basics.SetUintAction, vd.Action)
require.Equal(t, uint64(0x78), vd.Uint)
@@ -1710,8 +1748,8 @@ int 0x78 // value
app_local_put
`
ledger.reset()
- ledger.balances[txn.Txn.Sender].apps[100]["ALGO"] = algoValue
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGOA")
+ ledger.balances[txn.Txn.Sender].locals[100]["ALGO"] = algoValue
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGOA")
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
@@ -1720,9 +1758,9 @@ app_local_put
require.True(t, pass)
delta, err = ledger.GetDelta(&ep.Txn.Txn)
require.NoError(t, err)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 1, len(delta.LocalDeltas))
- require.Equal(t, 1, len(delta.LocalDeltas[0]))
+ require.Empty(t, delta.GlobalDelta)
+ require.Len(t, delta.LocalDeltas, 1)
+ require.Len(t, delta.LocalDeltas[0], 1)
vd = delta.LocalDeltas[0]["ALGO"]
require.Equal(t, basics.SetUintAction, vd.Action)
require.Equal(t, uint64(0x78), vd.Uint)
@@ -1747,11 +1785,11 @@ app_local_put
int 1
`
ledger.reset()
- ledger.balances[txn.Txn.Sender].apps[100]["ALGO"] = algoValue
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGOA")
+ ledger.balances[txn.Txn.Sender].locals[100]["ALGO"] = algoValue
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGOA")
ledger.balances[txn.Txn.Receiver] = makeBalanceRecord(txn.Txn.Receiver, 500)
- ledger.balances[txn.Txn.Receiver].apps[100] = make(map[string]basics.TealValue)
+ ledger.balances[txn.Txn.Receiver].locals[100] = make(basics.TealKeyValue)
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
@@ -1763,10 +1801,10 @@ int 1
require.True(t, pass)
delta, err = ledger.GetDelta(&ep.Txn.Txn)
require.NoError(t, err)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 2, len(delta.LocalDeltas))
- require.Equal(t, 2, len(delta.LocalDeltas[0]))
- require.Equal(t, 1, len(delta.LocalDeltas[1]))
+ require.Empty(t, delta.GlobalDelta)
+ require.Len(t, delta.LocalDeltas, 2)
+ require.Len(t, delta.LocalDeltas[0], 2)
+ require.Len(t, delta.LocalDeltas[1], 1)
vd = delta.LocalDeltas[0]["ALGO"]
require.Equal(t, basics.SetUintAction, vd.Action)
require.Equal(t, uint64(0x78), vd.Uint)
@@ -1837,7 +1875,7 @@ int 1
require.Error(t, err)
require.Contains(t, err.Error(), "no such app")
- ledger.newApp(txn.Txn.Sender, 100)
+ ledger.newApp(txn.Txn.Sender, 100, makeSchemas(0, 0, 1, 0))
// a special test for read
if name == "read" {
@@ -1845,7 +1883,7 @@ int 1
require.Error(t, err)
require.Contains(t, err.Error(), "err opcode") // no such key
}
- ledger.applications[100]["ALGO"] = basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
+ ledger.applications[100].GlobalState["ALGO"] = basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
ledger.reset()
pass, err := EvalStateful(ops.Program, ep)
@@ -1854,7 +1892,7 @@ int 1
delta, err := ledger.GetDelta(&ep.Txn.Txn)
require.NoError(t, err)
- require.Equal(t, 0, len(delta.LocalDeltas))
+ require.Empty(t, delta.LocalDeltas)
})
}
}
@@ -1932,7 +1970,7 @@ int 0x77
},
)
ep.Ledger = ledger
- ledger.newApp(txn.Txn.Sender, 100)
+ ledger.newApp(txn.Txn.Sender, 100, makeSchemas(0, 0, 0, 0))
ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
@@ -1945,8 +1983,8 @@ int 0x77
delta, err := ledger.GetDelta(&ep.Txn.Txn)
require.NoError(t, err)
- require.Equal(t, 2, len(delta.GlobalDelta))
- require.Equal(t, 0, len(delta.LocalDeltas))
+ require.Len(t, delta.GlobalDelta, 2)
+ require.Empty(t, delta.LocalDeltas)
vd := delta.GlobalDelta["ALGO"]
require.Equal(t, basics.SetUintAction, vd.Action)
@@ -1966,11 +2004,11 @@ int 0x77
==
`
ledger.reset()
- delete(ledger.applications[100], "ALGOA")
- delete(ledger.applications[100], "ALGO")
+ delete(ledger.applications[100].GlobalState, "ALGOA")
+ delete(ledger.applications[100].GlobalState, "ALGO")
algoValue := basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
- ledger.applications[100]["ALGO"] = algoValue
+ ledger.applications[100].GlobalState["ALGO"] = algoValue
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
@@ -1980,8 +2018,8 @@ int 0x77
delta, err = ledger.GetDelta(&ep.Txn.Txn)
require.NoError(t, err)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 0, len(delta.LocalDeltas))
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
// write existing value after read
source = `int 0
@@ -2000,8 +2038,8 @@ int 0x77
==
`
ledger.reset()
- delete(ledger.applications[100], "ALGOA")
- ledger.applications[100]["ALGO"] = algoValue
+ delete(ledger.applications[100].GlobalState, "ALGOA")
+ ledger.applications[100].GlobalState["ALGO"] = algoValue
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
@@ -2010,8 +2048,8 @@ int 0x77
require.True(t, pass)
delta, err = ledger.GetDelta(&ep.Txn.Txn)
require.NoError(t, err)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 0, len(delta.LocalDeltas))
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
// write new values after and before read
source = `int 0
@@ -2046,8 +2084,8 @@ byte 0x414c474f
&&
`
ledger.reset()
- delete(ledger.applications[100], "ALGOA")
- ledger.applications[100]["ALGO"] = algoValue
+ delete(ledger.applications[100].GlobalState, "ALGOA")
+ ledger.applications[100].GlobalState["ALGO"] = algoValue
ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
@@ -2066,8 +2104,8 @@ byte 0x414c474f
delta, err = ledger.GetDelta(&ep.Txn.Txn)
require.NoError(t, err)
- require.Equal(t, 2, len(delta.GlobalDelta))
- require.Equal(t, 0, len(delta.LocalDeltas))
+ require.Len(t, delta.GlobalDelta, 2)
+ require.Empty(t, delta.LocalDeltas)
vd = delta.GlobalDelta["ALGO"]
require.Equal(t, basics.SetUintAction, vd.Action)
@@ -2107,35 +2145,22 @@ byte "myval"
},
)
ep.Ledger = ledger
- ledger.newApp(txn.Txn.Sender, 100)
+ ledger.newApp(txn.Txn.Sender, 100, makeSchemas(0, 0, 0, 0))
- ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- cost, err := CheckStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, cost < 1000)
- pass, err := EvalStateful(ops.Program, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "no such app")
- require.False(t, pass)
- delta, err := ledger.GetDelta(&ep.Txn.Txn)
- require.NoError(t, err)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 0, len(delta.LocalDeltas))
+ delta := testApp(t, source, ep, "no such app")
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
- ledger.newApp(txn.Txn.Receiver, 101)
- ledger.newApp(txn.Txn.Receiver, 100) // this keeps current app id = 100
+ ledger.newApp(txn.Txn.Receiver, 101, makeSchemas(0, 0, 0, 0))
+ ledger.newApp(txn.Txn.Receiver, 100, makeSchemas(0, 0, 0, 0)) // this keeps current app id = 100
algoValue := basics.TealValue{Type: basics.TealBytesType, Bytes: "myval"}
- ledger.applications[101]["mykey"] = algoValue
+ ledger.applications[101].GlobalState["mykey"] = algoValue
- pass, err = EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
- delta, err = ledger.GetDelta(&ep.Txn.Txn)
- require.NoError(t, err)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 0, len(delta.LocalDeltas))
+ delta = testApp(t, source, ep)
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
}
+
func TestAppGlobalDelete(t *testing.T) {
t.Parallel()
@@ -2175,33 +2200,18 @@ int 1
},
)
ep.Ledger = ledger
- ledger.newApp(txn.Txn.Sender, 100)
- sb := strings.Builder{}
- ep.Trace = &sb
+ ledger.newApp(txn.Txn.Sender, 100, makeSchemas(0, 0, 0, 0))
- ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- cost, err := CheckStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, cost < 1000)
- pass, err := EvalStateful(ops.Program, ep)
- if !pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.NoError(t, err)
- require.True(t, pass)
- delta, err := ledger.GetDelta(&ep.Txn.Txn)
- require.NoError(t, err)
- require.Equal(t, 2, len(delta.GlobalDelta))
- require.Equal(t, 0, len(delta.LocalDeltas))
+ delta := testApp(t, source, ep)
+ require.Len(t, delta.GlobalDelta, 2)
+ require.Empty(t, delta.LocalDeltas)
ledger.reset()
- delete(ledger.applications[100], "ALGOA")
- delete(ledger.applications[100], "ALGO")
+ delete(ledger.applications[100].GlobalState, "ALGOA")
+ delete(ledger.applications[100].GlobalState, "ALGO")
algoValue := basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
- ledger.applications[100]["ALGO"] = algoValue
+ ledger.applications[100].GlobalState["ALGO"] = algoValue
// check delete existing
source = `byte 0x414c474f // key "ALGO"
@@ -2212,15 +2222,8 @@ app_global_get_ex
== // two zeros
`
ep.Txn.Txn.ForeignApps = []basics.AppIndex{txn.Txn.ApplicationID}
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- pass, err = EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
- delta, err = ledger.GetDelta(&ep.Txn.Txn)
- require.NoError(t, err)
-
- require.Equal(t, 1, len(delta.GlobalDelta))
+ delta = testApp(t, source, ep)
+ require.Len(t, delta.GlobalDelta, 1)
vd := delta.GlobalDelta["ALGO"]
require.Equal(t, basics.DeleteAction, vd.Action)
require.Equal(t, uint64(0), vd.Uint)
@@ -2228,10 +2231,10 @@ app_global_get_ex
require.Equal(t, 0, len(delta.LocalDeltas))
ledger.reset()
- delete(ledger.applications[100], "ALGOA")
- delete(ledger.applications[100], "ALGO")
+ delete(ledger.applications[100].GlobalState, "ALGOA")
+ delete(ledger.applications[100].GlobalState, "ALGO")
- ledger.applications[100]["ALGO"] = algoValue
+ ledger.applications[100].GlobalState["ALGO"] = algoValue
// check delete and write non-existing
source = `byte 0x414c474f41 // key "ALGOA"
@@ -2244,26 +2247,19 @@ byte 0x414c474f41
int 0x78
app_global_put
`
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- pass, err = EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
- delta, err = ledger.GetDelta(&ep.Txn.Txn)
- require.NoError(t, err)
-
- require.Equal(t, 1, len(delta.GlobalDelta))
+ delta = testApp(t, source, ep)
+ require.Len(t, delta.GlobalDelta, 1)
vd = delta.GlobalDelta["ALGOA"]
require.Equal(t, basics.SetUintAction, vd.Action)
require.Equal(t, uint64(0x78), vd.Uint)
require.Equal(t, "", vd.Bytes)
- require.Equal(t, 0, len(delta.LocalDeltas))
+ require.Empty(t, delta.LocalDeltas)
ledger.reset()
- delete(ledger.applications[100], "ALGOA")
- delete(ledger.applications[100], "ALGO")
+ delete(ledger.applications[100].GlobalState, "ALGOA")
+ delete(ledger.applications[100].GlobalState, "ALGO")
- ledger.applications[100]["ALGO"] = algoValue
+ ledger.applications[100].GlobalState["ALGO"] = algoValue
// check delete and write existing
source = `byte 0x414c474f // key "ALGO"
@@ -2273,24 +2269,17 @@ int 0x78
app_global_put
int 1
`
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- pass, err = EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
- delta, err = ledger.GetDelta(&ep.Txn.Txn)
- require.NoError(t, err)
-
- require.Equal(t, 1, len(delta.GlobalDelta))
+ delta = testApp(t, source, ep)
+ require.Len(t, delta.GlobalDelta, 1)
vd = delta.GlobalDelta["ALGO"]
require.Equal(t, basics.SetUintAction, vd.Action)
- require.Equal(t, 0, len(delta.LocalDeltas))
+ require.Empty(t, delta.LocalDeltas)
ledger.reset()
- delete(ledger.applications[100], "ALGOA")
- delete(ledger.applications[100], "ALGO")
+ delete(ledger.applications[100].GlobalState, "ALGOA")
+ delete(ledger.applications[100].GlobalState, "ALGO")
- ledger.applications[100]["ALGO"] = algoValue
+ ledger.applications[100].GlobalState["ALGO"] = algoValue
// check delete,write,delete existing
source = `byte 0x414c474f // key "ALGO"
@@ -2302,27 +2291,17 @@ byte 0x414c474f
app_global_del
int 1
`
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- cost, err = CheckStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, cost < 1000)
- pass, err = EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
- delta, err = ledger.GetDelta(&ep.Txn.Txn)
- require.NoError(t, err)
-
- require.Equal(t, 1, len(delta.GlobalDelta))
+ delta = testApp(t, source, ep)
+ require.Len(t, delta.GlobalDelta, 1)
vd = delta.GlobalDelta["ALGO"]
require.Equal(t, basics.DeleteAction, vd.Action)
- require.Equal(t, 0, len(delta.LocalDeltas))
+ require.Empty(t, delta.LocalDeltas)
ledger.reset()
- delete(ledger.applications[100], "ALGOA")
- delete(ledger.applications[100], "ALGO")
+ delete(ledger.applications[100].GlobalState, "ALGOA")
+ delete(ledger.applications[100].GlobalState, "ALGO")
- ledger.applications[100]["ALGO"] = algoValue
+ ledger.applications[100].GlobalState["ALGO"] = algoValue
// check delete, write, delete non-existing
source = `byte 0x414c474f41 // key "ALGOA"
@@ -2334,18 +2313,9 @@ byte 0x414c474f41
app_global_del
int 1
`
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- cost, err = CheckStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, cost < 1000)
- pass, err = EvalStateful(ops.Program, ep)
- require.NoError(t, err)
- require.True(t, pass)
- delta, err = ledger.GetDelta(&ep.Txn.Txn)
- require.NoError(t, err)
- require.Equal(t, 1, len(delta.GlobalDelta))
- require.Equal(t, 0, len(delta.LocalDeltas))
+ delta = testApp(t, source, ep)
+ require.Len(t, delta.GlobalDelta, 1)
+ require.Len(t, delta.LocalDeltas, 0)
}
func TestAppLocalDelete(t *testing.T) {
@@ -2393,9 +2363,9 @@ int 1
},
)
ep.Ledger = ledger
- ledger.newApp(txn.Txn.Sender, 100)
+ ledger.newApp(txn.Txn.Sender, 100, makeSchemas(0, 0, 0, 0))
ledger.balances[txn.Txn.Receiver] = makeBalanceRecord(txn.Txn.Receiver, 1)
- ledger.balances[txn.Txn.Receiver].apps[100] = make(basics.TealKeyValue)
+ ledger.balances[txn.Txn.Receiver].locals[100] = make(basics.TealKeyValue)
sb := strings.Builder{}
ep.Trace = &sb
@@ -2418,13 +2388,13 @@ int 1
require.Equal(t, 2, len(delta.LocalDeltas))
ledger.reset()
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGOA")
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGO")
- delete(ledger.balances[txn.Txn.Receiver].apps[100], "ALGOA")
- delete(ledger.balances[txn.Txn.Receiver].apps[100], "ALGO")
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGOA")
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGO")
+ delete(ledger.balances[txn.Txn.Receiver].locals[100], "ALGOA")
+ delete(ledger.balances[txn.Txn.Receiver].locals[100], "ALGO")
algoValue := basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
- ledger.balances[txn.Txn.Sender].apps[100]["ALGO"] = algoValue
+ ledger.balances[txn.Txn.Sender].locals[100]["ALGO"] = algoValue
// check delete existing
source = `int 0 // account
@@ -2455,10 +2425,10 @@ app_local_get_ex
require.Equal(t, "", vd.Bytes)
ledger.reset()
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGOA")
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGO")
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGOA")
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGO")
- ledger.balances[txn.Txn.Sender].apps[100]["ALGO"] = algoValue
+ ledger.balances[txn.Txn.Sender].locals[100]["ALGO"] = algoValue
// check delete and write non-existing
source = `int 0 // account
@@ -2492,10 +2462,10 @@ app_local_put
require.Equal(t, "", vd.Bytes)
ledger.reset()
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGOA")
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGO")
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGOA")
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGO")
- ledger.balances[txn.Txn.Sender].apps[100]["ALGO"] = algoValue
+ ledger.balances[txn.Txn.Sender].locals[100]["ALGO"] = algoValue
// check delete and write existing
source = `int 0 // account
@@ -2525,10 +2495,10 @@ int 1
require.Equal(t, "", vd.Bytes)
ledger.reset()
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGOA")
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGO")
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGOA")
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGO")
- ledger.balances[txn.Txn.Sender].apps[100]["ALGO"] = algoValue
+ ledger.balances[txn.Txn.Sender].locals[100]["ALGO"] = algoValue
// check delete,write,delete existing
source = `int 0 // account
@@ -2561,10 +2531,10 @@ int 1
require.Equal(t, "", vd.Bytes)
ledger.reset()
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGOA")
- delete(ledger.balances[txn.Txn.Sender].apps[100], "ALGO")
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGOA")
+ delete(ledger.balances[txn.Txn.Sender].locals[100], "ALGO")
- ledger.balances[txn.Txn.Sender].apps[100]["ALGO"] = algoValue
+ ledger.balances[txn.Txn.Sender].locals[100]["ALGO"] = algoValue
// check delete, write, delete non-existing
source = `int 0 // account
@@ -2647,8 +2617,7 @@ func TestEnumFieldErrors(t *testing.T) {
Freeze: txn.Txn.Receiver,
Clawback: txn.Txn.Receiver,
}
- ledger.newAsset(55, params)
- ledger.setHolding(txn.Txn.Sender, 55, basics.AssetHolding{Amount: 123, Frozen: true})
+ ledger.newAsset(txn.Txn.Sender, 55, params)
ep.Txn = &txn
ep.Ledger = ledger
@@ -2726,15 +2695,14 @@ func TestReturnTypes(t *testing.T) {
Freeze: txn.Txn.Receiver,
Clawback: txn.Txn.Receiver,
}
- ledger.newAsset(1, params)
- ledger.setHolding(txn.Txn.Sender, 1, basics.AssetHolding{Amount: 123, Frozen: true})
- ledger.newApp(txn.Txn.Sender, 1)
+ ledger.newAsset(txn.Txn.Sender, 1, params)
+ ledger.newApp(txn.Txn.Sender, 1, makeSchemas(0, 0, 0, 0))
ledger.balances[txn.Txn.Receiver] = makeBalanceRecord(txn.Txn.Receiver, 1)
- ledger.balances[txn.Txn.Receiver].apps[1] = make(basics.TealKeyValue)
+ ledger.balances[txn.Txn.Receiver].locals[1] = make(basics.TealKeyValue)
key, err := hex.DecodeString("33343536")
require.NoError(t, err)
algoValue := basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
- ledger.balances[txn.Txn.Receiver].apps[1][string(key)] = algoValue
+ ledger.balances[txn.Txn.Receiver].locals[1][string(key)] = algoValue
ep.Ledger = ledger
@@ -2747,23 +2715,28 @@ func TestReturnTypes(t *testing.T) {
"arg": "arg 0",
"load": "load 0",
"store": "store 0",
- "intc": "intcblock 0\nintc 0",
- "intc_0": "intcblock 0\nintc_0",
- "intc_1": "intcblock 0 0\nintc_1",
- "intc_2": "intcblock 0 0 0\nintc_2",
- "intc_3": "intcblock 0 0 0 0\nintc_3",
- "bytec": "bytecblock 0x32\nbytec 0",
- "bytec_0": "bytecblock 0x32\nbytec_0",
- "bytec_1": "bytecblock 0x32 0x33\nbytec_1",
- "bytec_2": "bytecblock 0x32 0x33 0x34\nbytec_2",
- "bytec_3": "bytecblock 0x32 0x33 0x34 0x35\nbytec_3",
+ "dig": "dig 0",
+ "intc": "intcblock 0; intc 0",
+ "intc_0": "intcblock 0; intc_0",
+ "intc_1": "intcblock 0 0; intc_1",
+ "intc_2": "intcblock 0 0 0; intc_2",
+ "intc_3": "intcblock 0 0 0 0; intc_3",
+ "bytec": "bytecblock 0x32; bytec 0",
+ "bytec_0": "bytecblock 0x32; bytec_0",
+ "bytec_1": "bytecblock 0x32 0x33; bytec_1",
+ "bytec_2": "bytecblock 0x32 0x33 0x34; bytec_2",
+ "bytec_3": "bytecblock 0x32 0x33 0x34 0x35; bytec_3",
"substring": "substring 0 2",
- "ed25519verify": "pop\npop\npop\nint 1", // ignore
+ "ed25519verify": "pop; pop; pop; int 1", // ignore
"asset_params_get": "asset_params_get AssetTotal",
"asset_holding_get": "asset_holding_get AssetBalance",
+ "gtxns": "gtxns Sender",
+ "gtxnsa": "gtxnsa ApplicationArgs 0",
+ "pushint": "pushint 7272",
+ "pushbytes": `pushbytes "jojogoodgorilla"`,
}
- byName := opsByName[LogicVersion]
+ byName := OpsByName[LogicVersion]
for _, m := range []runMode{runModeSignature, runModeApplication} {
t.Run(fmt.Sprintf("m=%s", m.String()), func(t *testing.T) {
for name, spec := range byName {
@@ -2781,8 +2754,7 @@ func TestReturnTypes(t *testing.T) {
sb.WriteString(name + "\n")
}
source := sb.String()
- ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
+ ops := testProg(t, source, AssemblerMaxVersion)
var cx evalContext
cx.EvalParams = ep
@@ -2875,7 +2847,7 @@ int 42
ledger := makeTestLedger(
map[basics.Address]uint64{},
)
- ledger.appID = 42
+ ledger.appID = basics.AppIndex(42)
ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
require.NoError(t, err)
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index 3c8178386..1fd7e6495 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -53,6 +53,14 @@ func defaultEvalProtoWithVersion(version uint64) config.ConsensusParams {
LogicSigMaxCost: 20000,
MaxAppKeyLen: 64,
MaxAppBytesValueLen: 64,
+ // These must be identical to keep an old backward compat test working
+ MinTxnFee: 1001,
+ MinBalance: 1001,
+ // Strange choices below so that we test against conflating them
+ AppFlatParamsMinBalance: 1002,
+ SchemaMinBalancePerEntry: 1003,
+ SchemaUintMinBalance: 1004,
+ SchemaBytesMinBalance: 1005,
}
}
@@ -200,26 +208,14 @@ func TestWrongProtoVersion(t *testing.T) {
}
}
-func TestTrivialMath(t *testing.T) {
+func TestSimpleMath(t *testing.T) {
t.Parallel()
- for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 2
-int 3
-+
-int 5
-==`, v)
- require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
- require.NoError(t, err)
- require.True(t, cost < 1000)
- var txn transactions.SignedTxn
- txn.Lsig.Logic = ops.Program
- pass, err := Eval(ops.Program, defaultEvalParams(nil, &txn))
- require.True(t, pass)
- require.NoError(t, err)
- })
- }
+ testAccepts(t, "int 2; int 3; + ;int 5;==", 1)
+ testAccepts(t, "int 22; int 3; - ;int 19;==", 1)
+ testAccepts(t, "int 8; int 7; * ;int 56;==", 1)
+ testAccepts(t, "int 21; int 7; / ;int 3;==", 1)
+
+ testPanics(t, "int 1; int 2; - ;int 0; ==", 1)
}
func TestSha256EqArg(t *testing.T) {
@@ -369,122 +365,38 @@ func TestTLHC(t *testing.T) {
func TestU64Math(t *testing.T) {
t.Parallel()
- for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 0x1234567812345678
-int 0x100000000
-/
-int 0x12345678
-==`, v)
- require.NoError(t, err)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if !pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.True(t, pass)
- require.NoError(t, err)
- })
- }
+ testAccepts(t, "int 0x1234567812345678; int 0x100000000; /; int 0x12345678; ==", 1)
}
func TestItob(t *testing.T) {
t.Parallel()
- for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`byte 0x1234567812345678
-int 0x1234567812345678
-itob
-==`, v)
- require.NoError(t, err)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if !pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.NoError(t, err)
- require.True(t, pass)
- })
- }
+ testAccepts(t, "byte 0x1234567812345678; int 0x1234567812345678; itob; ==", 1)
}
func TestBtoi(t *testing.T) {
t.Parallel()
- for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 0x1234567812345678
-byte 0x1234567812345678
-btoi
-==`, v)
- require.NoError(t, err)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if !pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.NoError(t, err)
- require.True(t, pass)
- })
- }
+ testAccepts(t, "int 0x1234567812345678; byte 0x1234567812345678; btoi; ==", 1)
}
func TestBtoiTooLong(t *testing.T) {
t.Parallel()
- for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 0x1234567812345678
-byte 0x1234567812345678aaaa
-btoi
-==`, v)
- require.NoError(t, err)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.False(t, pass)
- require.Error(t, err)
- isNotPanic(t, err)
- })
- }
+ testPanics(t, "int 0x1234567812345678; byte 0x1234567812345678aa; btoi; ==", 1)
}
func TestBnz(t *testing.T) {
t.Parallel()
- for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 1
+ testAccepts(t, `
+int 1
dup
bnz safe
err
safe:
int 1
-+`, v)
- require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
- require.NoError(t, err)
- require.True(t, cost < 1000)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if !pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.NoError(t, err)
- require.True(t, pass)
- })
- }
-}
++
+`, 1)
-func TestBnz2(t *testing.T) {
- t.Parallel()
- for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 1
+ testAccepts(t, `
+int 1
int 2
int 1
int 2
@@ -498,196 +410,48 @@ planb:
after:
dup
pop
-`, v)
- require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
- require.NoError(t, err)
- require.True(t, cost < 1000)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if !pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.NoError(t, err)
- require.True(t, pass)
- })
- }
+`, 1)
}
-func TestBz(t *testing.T) {
+func TestV2Branches(t *testing.T) {
t.Parallel()
- for v := uint64(2); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 0
+ testAccepts(t, `
+int 0
dup
bz safe
err
safe:
int 1
-+`, v)
- require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
- require.NoError(t, err)
- require.True(t, cost < 1000)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if !pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.NoError(t, err)
- require.True(t, pass)
- })
- }
-}
++
+`, 2)
-func TestB(t *testing.T) {
- t.Parallel()
- for v := uint64(2); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`b safe
+ testAccepts(t, `
+b safe
err
safe:
-int 1`, v)
- require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
- require.NoError(t, err)
- require.True(t, cost < 1000)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if !pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.NoError(t, err)
- require.True(t, pass)
- })
- }
+int 1
+`, 2)
}
func TestReturn(t *testing.T) {
t.Parallel()
- for v := uint64(2); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 1
-return
-err`, v)
- require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
- require.NoError(t, err)
- require.True(t, cost < 1000)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if !pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.NoError(t, err)
- require.True(t, pass)
- })
- }
-}
-
-func TestReturnFalse(t *testing.T) {
- t.Parallel()
- for v := uint64(2); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 0
-return
-int 1`, v)
- require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
- require.NoError(t, err)
- require.True(t, cost < 1000)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.False(t, pass)
- require.NoError(t, err)
- })
- }
+ testAccepts(t, "int 1; return; err", 2)
+ testRejects(t, "int 0; return; int 1", 2)
}
func TestSubUnderflow(t *testing.T) {
t.Parallel()
- for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 1
-int 0x100000000
--
-pop
-int 1`, v)
- require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
- require.NoError(t, err)
- require.True(t, cost < 1000)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.False(t, pass)
- require.Error(t, err)
- isNotPanic(t, err)
- })
- }
+ testPanics(t, "int 1; int 10; -; pop; int 1", 1)
}
func TestAddOverflow(t *testing.T) {
t.Parallel()
- for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 0xf000000000000000
-int 0x1111111111111111
-+
-pop
-int 1`, v)
- require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
- require.NoError(t, err)
- require.True(t, cost < 1000)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.False(t, pass)
- require.Error(t, err)
- isNotPanic(t, err)
- })
- }
+ testPanics(t, "int 0xf000000000000000; int 0x1111111111111111; +; pop; int 1", 1)
}
func TestMulOverflow(t *testing.T) {
t.Parallel()
- for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 0x111111111
-int 0x222222222
-*
-pop
-int 1`, v)
- require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
- require.NoError(t, err)
- require.True(t, cost < 1000)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.False(t, pass)
- require.Error(t, err)
- isNotPanic(t, err)
- })
- }
+ testPanics(t, "int 0x111111111; int 0x222222222; *; pop; int 1", 1)
}
func TestMulwImpl(t *testing.T) {
@@ -715,10 +479,8 @@ func TestMulwImpl(t *testing.T) {
func TestMulw(t *testing.T) {
t.Parallel()
- // multiply two numbers, ensure high is 2 and low is 0x468acf130eca8642
- for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 0x111111111
+ testAccepts(t, `
+int 0x111111111
int 0x222222222
mulw
int 0x468acf130eca8642 // compare low (top of the stack)
@@ -732,21 +494,7 @@ bnz done
err
done:
int 1 // ret 1
-`, v)
- require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
- require.NoError(t, err)
- require.True(t, cost < 1000)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if !pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.True(t, pass)
- require.NoError(t, err)
- })
- }
+`, 1)
}
func TestAddwImpl(t *testing.T) {
@@ -770,10 +518,8 @@ func TestAddwImpl(t *testing.T) {
func TestAddw(t *testing.T) {
t.Parallel()
- // add two numbers, ensure sum is 0x42 and carry is 0x1
- for v := uint64(2); v <= AssemblerMaxVersion; v++ {
- t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 0xFFFFFFFFFFFFFFFF
+ testAccepts(t, `
+int 0xFFFFFFFFFFFFFFFF
int 0x43
addw
int 0x42 // compare sum (top of the stack)
@@ -787,21 +533,7 @@ bnz done
err
done:
int 1 // ret 1
-`, v)
- require.NoError(t, err)
- cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
- require.NoError(t, err)
- require.True(t, cost < 1000)
- sb := strings.Builder{}
- pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
- if !pass {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.True(t, pass)
- require.NoError(t, err)
- })
- }
+`, 2)
}
func TestDivZero(t *testing.T) {
@@ -1105,8 +837,8 @@ func TestTxnBadField(t *testing.T) {
isNotPanic(t, err)
// test txn does not accept ApplicationArgs and Accounts
- txnOpcode := opsByName[LogicVersion]["txn"].Opcode
- txnaOpcode := opsByName[LogicVersion]["txna"].Opcode
+ txnOpcode := OpsByName[LogicVersion]["txn"].Opcode
+ txnaOpcode := OpsByName[LogicVersion]["txna"].Opcode
fields := []TxnField{ApplicationArgs, Accounts}
for _, field := range fields {
@@ -1170,8 +902,8 @@ func TestGtxnBadField(t *testing.T) {
isNotPanic(t, err)
// test gtxn does not accept ApplicationArgs and Accounts
- txnOpcode := opsByName[LogicVersion]["txn"].Opcode
- txnaOpcode := opsByName[LogicVersion]["txna"].Opcode
+ txnOpcode := OpsByName[LogicVersion]["txn"].Opcode
+ txnaOpcode := OpsByName[LogicVersion]["txna"].Opcode
fields := []TxnField{ApplicationArgs, Accounts}
for _, field := range fields {
@@ -1248,7 +980,8 @@ int 9
}
}
-const globalV1TestProgram = `global MinTxnFee
+const globalV1TestProgram = `
+global MinTxnFee
int 123
==
global MinBalance
@@ -1269,9 +1002,12 @@ int 1
&&
`
-const globalV2TestProgram = `global LogicSigVersion
-int 2
-==
+const testAddr = "47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU"
+
+const globalV2TestProgram = globalV1TestProgram + `
+global LogicSigVersion
+int 1
+>
&&
global Round
int 0
@@ -1286,6 +1022,12 @@ int 42
==
&&
`
+const globalV3TestProgram = globalV2TestProgram + `
+global CreatorAddress
+addr ` + testAddr + `
+==
+&&
+`
func TestGlobal(t *testing.T) {
t.Parallel()
@@ -1299,16 +1041,19 @@ func TestGlobal(t *testing.T) {
0: {GroupSize, globalV1TestProgram, Eval, Check},
1: {GroupSize, globalV1TestProgram, Eval, Check},
2: {
- CurrentApplicationID, globalV1TestProgram + globalV2TestProgram,
- func(p []byte, ep EvalParams) (bool, error) {
- pass, err := EvalStateful(p, ep)
- return pass, err
- },
- func(program []byte, ep EvalParams) (int, error) { return CheckStateful(program, ep) },
+ CurrentApplicationID, globalV2TestProgram,
+ EvalStateful, CheckStateful,
+ },
+ 3: {
+ CreatorAddress, globalV3TestProgram,
+ EvalStateful, CheckStateful,
},
}
ledger := makeTestLedger(nil)
ledger.appID = 42
+ addr, err := basics.UnmarshalChecksumAddress(testAddr)
+ require.NoError(t, err)
+ ledger.creatorAddr = addr
for v := uint64(0); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
last := tests[v].lastField
@@ -1320,8 +1065,7 @@ func TestGlobal(t *testing.T) {
t.Errorf("TestGlobal missing field %v", globalField)
}
}
- ops, err := AssembleStringWithVersion(testProgram, v)
- require.NoError(t, err)
+ ops := testProg(t, testProgram, v)
cost, err := check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
require.True(t, cost < 1000)
@@ -1551,7 +1295,7 @@ arg 8
&&
`
-var testTxnProgramText = testTxnProgramTextV1 + `txn ApplicationID
+const testTxnProgramTextV2 = testTxnProgramTextV1 + `txn ApplicationID
int 123
==
&&
@@ -1655,6 +1399,46 @@ int 1
&&
`
+const testTxnProgramTextV3 = testTxnProgramTextV2 + `
+assert
+txn NumAssets
+int 2
+==
+assert
+txna Assets 0
+int 55
+==
+assert
+txn NumApplications
+int 3
+==
+assert
+txn Applications 3 // Assembler will use 'txna'
+int 111
+==
+assert
+
+txn GlobalNumUint
+int 3
+==
+assert
+txn GlobalNumByteSlice
+int 0
+==
+assert
+txn LocalNumUint
+int 1
+==
+assert
+txn LocalNumByteSlice
+int 2
+==
+assert
+
+
+int 1
+`
+
func makeSampleTxn() transactions.SignedTxn {
var txn transactions.SignedTxn
copy(txn.Txn.Sender[:], []byte("aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"))
@@ -1716,6 +1500,9 @@ func makeSampleTxn() transactions.SignedTxn {
copy(txn.Txn.FreezeAccount[:], freezeAccAddr)
txn.Txn.AssetFrozen = true
txn.Txn.ForeignAssets = []basics.AssetIndex{55, 77}
+ txn.Txn.ForeignApps = []basics.AppIndex{56, 78, 111}
+ txn.Txn.GlobalStateSchema = basics.StateSchema{NumUint: 3, NumByteSlice: 0}
+ txn.Txn.LocalStateSchema = basics.StateSchema{NumUint: 1, NumByteSlice: 2}
return txn
}
@@ -1734,7 +1521,7 @@ func makeSampleTxnGroup(txn transactions.SignedTxn) []transactions.SignedTxn {
func TestTxn(t *testing.T) {
t.Parallel()
for _, txnField := range TxnFieldNames {
- if !strings.Contains(testTxnProgramText, txnField) {
+ if !strings.Contains(testTxnProgramTextV3, txnField) {
if txnField != FirstValidTime.String() {
t.Errorf("TestTxn missing field %v", txnField)
}
@@ -1743,7 +1530,8 @@ func TestTxn(t *testing.T) {
tests := map[uint64]string{
1: testTxnProgramTextV1,
- 2: testTxnProgramText,
+ 2: testTxnProgramTextV2,
+ 3: testTxnProgramTextV3,
}
clearOps, err := AssembleStringWithVersion("int 1", 1)
@@ -1751,8 +1539,7 @@ func TestTxn(t *testing.T) {
for v, source := range tests {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(source, v)
- require.NoError(t, err)
+ ops := testProg(t, source, v)
cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
require.True(t, cost < 1000)
@@ -1946,22 +1733,11 @@ int 1
for v, source := range tests {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(source, v)
- require.NoError(t, err)
- sb := strings.Builder{}
- cost, err := Check(ops.Program, defaultEvalParams(&sb, nil))
- if err != nil {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
- }
- require.NoError(t, err)
- require.True(t, cost < 1000)
txn := makeSampleTxn()
// RekeyTo not allowed in TEAL v1
if v < rekeyingEnabledVersion {
txn.Txn.RekeyTo = basics.Address{}
}
- txn.Lsig.Logic = ops.Program
txn.Lsig.Args = [][]byte{
txn.Txn.Sender[:],
txn.Txn.Receiver[:],
@@ -1970,21 +1746,47 @@ int 1
txn.Txn.SelectionPK[:],
txn.Txn.Note,
}
- txgroup := makeSampleTxnGroup(txn)
- sb = strings.Builder{}
- ep := defaultEvalParams(&sb, &txn)
- ep.TxnGroup = txgroup
- pass, err := Eval(ops.Program, ep)
- if !pass || err != nil {
- t.Log(hex.EncodeToString(ops.Program))
- t.Log(sb.String())
+ ep := defaultEvalParams(nil, &txn)
+ ep.TxnGroup = makeSampleTxnGroup(txn)
+ testLogic(t, source, v, ep)
+ if v >= 3 {
+ gtxnsProg := strings.ReplaceAll(source, "gtxn 0", "int 0; gtxns")
+ gtxnsProg = strings.ReplaceAll(gtxnsProg, "gtxn 1", "int 1; gtxns")
+ gtxnsProg = strings.ReplaceAll(gtxnsProg, "gtxna 0", "int 0; gtxnsa")
+ gtxnsProg = strings.ReplaceAll(gtxnsProg, "gtxna 1", "int 1; gtxnsa")
+ require.False(t, strings.Contains(gtxnsProg, "gtxn ")) // Got 'em all
+ require.False(t, strings.Contains(gtxnsProg, "gtxna ")) // Got 'em all
+ testLogic(t, gtxnsProg, v, ep)
}
- require.NoError(t, err)
- require.True(t, pass)
})
}
}
+func testLogic(t *testing.T, program string, v uint64, ep EvalParams, problems ...string) {
+ ops := testProg(t, program, v)
+ sb := &strings.Builder{}
+ ep.Trace = sb
+ ep.Txn.Lsig.Logic = ops.Program
+ cost, err := Check(ops.Program, ep)
+ if err != nil {
+ t.Log(hex.EncodeToString(ops.Program))
+ t.Log(sb.String())
+ }
+ require.NoError(t, err)
+ require.True(t, cost < 1000)
+
+ pass, err := Eval(ops.Program, ep)
+ if len(problems) == 0 {
+ require.NoError(t, err, sb.String())
+ require.True(t, pass, sb.String())
+ } else {
+ require.Error(t, err, sb.String())
+ for _, problem := range problems {
+ require.Contains(t, err.Error(), problem)
+ }
+ }
+}
+
func TestTxna(t *testing.T) {
t.Parallel()
source := `txna Accounts 1
@@ -2195,7 +1997,7 @@ int 0x310
func TestStringOps(t *testing.T) {
t.Parallel()
- program, err := assembleStringWithTrace(t, `byte 0x123456789abc
+ ops := testProg(t, `byte 0x123456789abc
substring 1 3
byte 0x3456
==
@@ -2222,14 +2024,13 @@ len
int 0
==
&&`, 2)
- require.NoError(t, err)
- cost, err := Check(program, defaultEvalParams(nil, nil))
+ cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
require.True(t, cost < 1000)
sb := strings.Builder{}
- pass, err := Eval(program, defaultEvalParams(&sb, nil))
+ pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if !pass {
- t.Log(hex.EncodeToString(program))
+ t.Log(hex.EncodeToString(ops.Program))
t.Log(sb.String())
}
require.NoError(t, err)
@@ -2238,7 +2039,7 @@ int 0
func TestConsOverflow(t *testing.T) {
t.Parallel()
- program, err := assembleStringWithTrace(t, `byte 0xf000000000000000
+ ops := testProg(t, `byte 0xf000000000000000
dup
concat
dup
@@ -2276,14 +2077,13 @@ concat
dup
concat
len`, 2)
- require.NoError(t, err)
- cost, err := Check(program, defaultEvalParams(nil, nil))
+ cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
require.True(t, cost < 1000)
sb := strings.Builder{}
- pass, err := Eval(program, defaultEvalParams(&sb, nil))
+ pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
- t.Log(hex.EncodeToString(program))
+ t.Log(hex.EncodeToString(ops.Program))
t.Log(sb.String())
}
require.False(t, pass)
@@ -2294,26 +2094,23 @@ len`, 2)
func TestSubstringFlop(t *testing.T) {
t.Parallel()
// fails in compiler
- program, err := assembleStringWithTrace(t, `byte 0xf000000000000000
+ ops := testProg(t, `byte 0xf000000000000000
substring 4 2
-len`, 2)
- require.Error(t, err)
- require.Nil(t, program)
+len`, 2, expect{2, "substring end is before start"})
// fails at runtime
- program, err = assembleStringWithTrace(t, `byte 0xf000000000000000
+ ops = testProg(t, `byte 0xf000000000000000
int 4
int 2
substring3
len`, 2)
- require.NoError(t, err)
- cost, err := Check(program, defaultEvalParams(nil, nil))
+ cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
require.True(t, cost < 1000)
sb := strings.Builder{}
- pass, err := Eval(program, defaultEvalParams(&sb, nil))
+ pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
- t.Log(hex.EncodeToString(program))
+ t.Log(hex.EncodeToString(ops.Program))
t.Log(sb.String())
}
require.False(t, pass)
@@ -2321,16 +2118,15 @@ len`, 2)
isNotPanic(t, err)
// fails at runtime
- program, err = assembleStringWithTrace(t, `byte 0xf000000000000000
+ ops = testProg(t, `byte 0xf000000000000000
int 4
int 0xFFFFFFFFFFFFFFFE
substring3
len`, 2)
- require.NoError(t, err)
- cost, err = Check(program, defaultEvalParams(nil, nil))
+ cost, err = Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
require.True(t, cost < 1000)
- pass, err = Eval(program, defaultEvalParams(nil, nil))
+ pass, err = Eval(ops.Program, defaultEvalParams(nil, nil))
require.False(t, pass)
require.Error(t, err)
require.Contains(t, err.Error(), "substring range beyond length of string")
@@ -2338,17 +2134,16 @@ len`, 2)
func TestSubstringRange(t *testing.T) {
t.Parallel()
- program, err := assembleStringWithTrace(t, `byte 0xf000000000000000
+ ops := testProg(t, `byte 0xf000000000000000
substring 2 99
len`, 2)
- require.NoError(t, err)
- cost, err := Check(program, defaultEvalParams(nil, nil))
+ cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
require.True(t, cost < 1000)
sb := strings.Builder{}
- pass, err := Eval(program, defaultEvalParams(&sb, nil))
+ pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if pass {
- t.Log(hex.EncodeToString(program))
+ t.Log(hex.EncodeToString(ops.Program))
t.Log(sb.String())
}
require.False(t, pass)
@@ -2360,7 +2155,7 @@ func TestLoadStore(t *testing.T) {
t.Parallel()
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops, err := AssembleStringWithVersion(`int 37
+ ops := testProg(t, `int 37
int 37
store 1
byte 0xabbacafe
@@ -2375,7 +2170,6 @@ load 0
load 1
+
&&`, v)
- require.NoError(t, err)
cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
require.True(t, cost < 1000)
@@ -2391,18 +2185,6 @@ load 1
}
}
-func assembleStringWithTrace(t testing.TB, text string, version uint64) ([]byte, error) {
- sr := strings.NewReader(text)
- sb := strings.Builder{}
- ops := OpStream{Trace: &sb, Version: version}
- err := ops.assemble(sr)
- if err != nil {
- t.Log(sb.String())
- return nil, err
- }
- return ops.Program, nil
-}
-
func TestLoadStore2(t *testing.T) {
t.Parallel()
progText := `int 2
@@ -2418,15 +2200,14 @@ int 5
==`
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- program, err := assembleStringWithTrace(t, progText, v)
- require.NoError(t, err)
- cost, err := Check(program, defaultEvalParams(nil, nil))
+ ops := testProg(t, progText, v)
+ cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
require.True(t, cost < 1000)
sb := strings.Builder{}
- pass, err := Eval(program, defaultEvalParams(&sb, nil))
+ pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
if !pass {
- t.Log(hex.EncodeToString(program))
+ t.Log(hex.EncodeToString(ops.Program))
t.Log(sb.String())
}
require.NoError(t, err)
@@ -2760,17 +2541,16 @@ int 1
func TestShortProgramTrue(t *testing.T) {
t.Parallel()
- program, err := assembleStringWithTrace(t, `intcblock 1
+ ops := testProg(t, `intcblock 1
intc 0
intc 0
bnz done
done:`, 2)
- require.NoError(t, err)
- cost, err := Check(program, defaultEvalParams(nil, nil))
+ cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
require.NoError(t, err)
require.True(t, cost < 1000)
sb := strings.Builder{}
- pass, err := Eval(program, defaultEvalParams(&sb, nil))
+ pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil))
require.NoError(t, err)
require.True(t, pass)
}
@@ -2853,7 +2633,7 @@ func TestPanic(t *testing.T) {
oldSpec = spec
opsByOpcode[v][opcode].op = opPanic
opsByOpcode[v][opcode].Modes = modeAny
- opsByOpcode[v][opcode].opSize.checkFunc = checkPanic
+ opsByOpcode[v][opcode].Details.checkFunc = checkPanic
ops.Program = append(ops.Program, byte(opcode))
break
}
@@ -3535,7 +3315,7 @@ ed25519verify`, pkStr), AssemblerMaxVersion)
func BenchmarkCheckx5(b *testing.B) {
sourcePrograms := []string{
tlhcProgramText,
- testTxnProgramText,
+ testTxnProgramTextV3,
testCompareProgramText,
addBenchmarkSource,
addBenchmark2Source,
@@ -3616,7 +3396,7 @@ intc_0
ep := defaultEvalParams(nil, nil)
- origSpec := opsByName[LogicVersion]["+"]
+ origSpec := OpsByName[LogicVersion]["+"]
spec := origSpec
defer func() {
// restore, opsByOpcode is global
@@ -3665,24 +3445,19 @@ int 1
`
ep := defaultEvalParams(nil, nil)
- ops, err := AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
+ ops := testProg(t, text, AssemblerMaxVersion)
pass, err := Eval(ops.Program, ep)
require.NoError(t, err)
require.True(t, pass)
- text = `dup2`
- ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
- require.NoError(t, err)
+ text = `int 1; int 2; dup2; pop; pop; pop`
+ ops = testProg(t, text, AssemblerMaxVersion)
pass, err = Eval(ops.Program, ep)
- require.Error(t, err)
- require.False(t, pass)
-
- text = `int 1
-dup2
-`
- ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion)
require.NoError(t, err)
+ require.True(t, pass)
+
+ text = `int 1; int 2; dup2; pop; pop`
+ ops = testProg(t, text, AssemblerMaxVersion)
pass, err = Eval(ops.Program, ep)
require.Error(t, err)
require.False(t, pass)
@@ -3878,24 +3653,24 @@ func TestAllowedOpcodesV2(t *testing.T) {
tests := map[string]string{
"txna": "txna Accounts 0",
"gtxna": "gtxna 0 ApplicationArgs 0",
- "bz": "bz l\nl:",
- "b": "b l\nl:",
- "return": "int 1\nreturn",
- "addw": "int 0\nint 1\naddw",
- "dup2": "dup2",
- "concat": "byte 0x41\ndup\nconcat",
- "substring": "byte 0x41\nsubstring 0 1",
- "substring3": "byte 0x41\ndup\ndup\nsubstring3",
- "balance": "int 1\nbalance",
- "app_opted_in": "int 0\ndup\napp_opted_in",
- "app_local_get": "int 0\nbyte 0x41\napp_local_get",
- "app_local_get_ex": "int 0\ndup\nbyte 0x41\napp_local_get_ex",
- "app_global_get": "int 0\nbyte 0x41\napp_global_get",
- "app_global_get_ex": "int 0\nbyte 0x41\napp_global_get_ex",
- "app_local_put": "int 0\ndup\nbyte 0x41\napp_local_put",
- "app_global_put": "byte 0x41\ndup\napp_global_put",
- "app_local_del": "int 0\nbyte 0x41\napp_local_del",
- "app_global_del": "byte 0x41\napp_global_del",
+ "bz": "int 0; bz l; l:",
+ "b": "b l; l:",
+ "return": "int 1; return",
+ "addw": "int 0; int 1; addw",
+ "dup2": "int 1; int 2; dup2",
+ "concat": "byte 0x41; dup; concat",
+ "substring": "byte 0x41; substring 0 1",
+ "substring3": "byte 0x41; dup; dup; substring3",
+ "balance": "int 1; balance",
+ "app_opted_in": "int 0; dup; app_opted_in",
+ "app_local_get": "int 0; byte 0x41; app_local_get",
+ "app_local_get_ex": "int 0; dup; byte 0x41; app_local_get_ex",
+ "app_global_get": "int 0; byte 0x41; app_global_get",
+ "app_global_get_ex": "int 0; byte 0x41; app_global_get_ex",
+ "app_local_put": "int 0; dup; byte 0x41; app_local_put",
+ "app_global_put": "byte 0x41; dup; app_global_put",
+ "app_local_del": "int 0; byte 0x41; app_local_del",
+ "app_global_del": "byte 0x41; app_global_del",
"asset_holding_get": "asset_holding_get AssetBalance",
"asset_params_get": "asset_params_get AssetTotal",
}
@@ -3912,17 +3687,17 @@ func TestAllowedOpcodesV2(t *testing.T) {
cnt := 0
for _, spec := range OpSpecs {
- if spec.Version > 1 && !excluded[spec.Name] {
+ if spec.Version == 2 && !excluded[spec.Name] {
source, ok := tests[spec.Name]
- require.True(t, ok, fmt.Sprintf("Missed opcode in the test: %s", spec.Name))
- ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err, source)
+ require.True(t, ok, "Missed opcode in the test: %s", spec.Name)
+ require.Contains(t, source, spec.Name)
+ ops := testProg(t, source, AssemblerMaxVersion)
// all opcodes allowed in stateful mode so use CheckStateful/EvalStateful
- _, err = CheckStateful(ops.Program, ep)
+ _, err := CheckStateful(ops.Program, ep)
require.NoError(t, err, source)
_, err = EvalStateful(ops.Program, ep)
if spec.Name != "return" {
- // "return" opcode is always succeed so ignore it
+ // "return" opcode always succeeds so ignore it
require.Error(t, err, source)
require.NotContains(t, err.Error(), "illegal opcode")
}
@@ -3948,6 +3723,66 @@ func TestAllowedOpcodesV2(t *testing.T) {
require.Equal(t, len(tests), cnt)
}
+// check all v3 opcodes: allowed in v3 and not allowed before
+func TestAllowedOpcodesV3(t *testing.T) {
+ t.Parallel()
+
+ // all tests are expected to fail in evaluation
+ tests := map[string]string{
+ "assert": "int 1; assert",
+ "min_balance": "int 1; min_balance",
+ "getbit": "int 15; int 64; getbit",
+ "setbit": "int 15; int 64; int 0; setbit",
+ "getbyte": "byte \"john\"; int 5; getbyte",
+ "setbyte": "byte \"john\"; int 5; int 66; setbyte",
+ "swap": "int 1; byte \"x\"; swap",
+ "select": "int 1; byte \"x\"; int 1; select",
+ "dig": "int 1; int 1; dig 1",
+ "gtxns": "int 0; gtxns FirstValid",
+ "gtxnsa": "int 0; gtxnsa Accounts 0",
+ "pushint": "pushint 7; pushint 4",
+ "pushbytes": `pushbytes "stringsfail?"`,
+ }
+
+ excluded := map[string]bool{}
+
+ ep := defaultEvalParams(nil, nil)
+
+ cnt := 0
+ for _, spec := range OpSpecs {
+ if spec.Version == 3 && !excluded[spec.Name] {
+ source, ok := tests[spec.Name]
+ require.True(t, ok, "Missed opcode in the test: %s", spec.Name)
+ require.Contains(t, source, spec.Name)
+ ops := testProg(t, source, AssemblerMaxVersion)
+ // all opcodes allowed in stateful mode so use CheckStateful/EvalStateful
+ _, err := CheckStateful(ops.Program, ep)
+ require.NoError(t, err, source)
+ _, err = EvalStateful(ops.Program, ep)
+ require.Error(t, err, source)
+ require.NotContains(t, err.Error(), "illegal opcode")
+
+ for v := byte(0); v <= 1; v++ {
+ ops.Program[0] = v
+ _, err = Check(ops.Program, ep)
+ require.Error(t, err, source)
+ require.Contains(t, err.Error(), "illegal opcode")
+ _, err = CheckStateful(ops.Program, ep)
+ require.Error(t, err, source)
+ require.Contains(t, err.Error(), "illegal opcode")
+ _, err = Eval(ops.Program, ep)
+ require.Error(t, err, source)
+ require.Contains(t, err.Error(), "illegal opcode")
+ _, err = EvalStateful(ops.Program, ep)
+ require.Error(t, err, source)
+ require.Contains(t, err.Error(), "illegal opcode")
+ }
+ cnt++
+ }
+ }
+ require.Equal(t, len(tests), cnt)
+}
+
func TestRekeyFailsOnOldVersion(t *testing.T) {
t.Parallel()
for v := uint64(0); v < rekeyingEnabledVersion; v++ {
@@ -3972,3 +3807,161 @@ func TestRekeyFailsOnOldVersion(t *testing.T) {
})
}
}
+
+func obfuscate(program string) string {
+ // Put a prefix on the program that does nothing interesting,
+ // but prevents assembly from detecting type errors. Allows
+ // evaluation testing of a program that would be rejected by
+ // assembler.
+ if strings.Contains(program, "obfuscate") {
+ return program // Already done. Tests sometimes use at multiple levels
+ }
+ return "int 0;bnz obfuscate;obfuscate:;" + program
+}
+
+type evalTester func(pass bool, err error) bool
+
+func testEvaluation(t *testing.T, program string, introduced uint64, tester evalTester) {
+ for v := uint64(1); v <= AssemblerMaxVersion; v++ {
+ t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
+ if v < introduced {
+ testProg(t, obfuscate(program), v, expect{0, "...opcode was introduced..."})
+ return
+ }
+ ops := testProg(t, program, v)
+ cost, err := Check(ops.Program, defaultEvalParams(nil, nil))
+ require.NoError(t, err)
+ require.True(t, cost < 1000)
+ var txn transactions.SignedTxn
+ txn.Lsig.Logic = ops.Program
+ sb := strings.Builder{}
+ pass, err := Eval(ops.Program, defaultEvalParams(&sb, &txn))
+ ok := tester(pass, err)
+ if !ok {
+ t.Log(hex.EncodeToString(ops.Program))
+ t.Log(sb.String())
+ }
+ require.True(t, ok)
+ isNotPanic(t, err) // Never want a Go level panic.
+ })
+ }
+}
+
+func testAccepts(t *testing.T, program string, introduced uint64) {
+ testEvaluation(t, program, introduced, func(pass bool, err error) bool {
+ return pass && err == nil
+ })
+}
+func testRejects(t *testing.T, program string, introduced uint64) {
+ testEvaluation(t, program, introduced, func(pass bool, err error) bool {
+ // Returned False, but didn't panic
+ return !pass && err == nil
+ })
+}
+func testPanics(t *testing.T, program string, introduced uint64) {
+ testEvaluation(t, program, introduced, func(pass bool, err error) bool {
+ // TEAL panic! not just reject at exit
+ return !pass && err != nil
+ })
+}
+
+func TestAssert(t *testing.T) {
+ t.Parallel()
+ testAccepts(t, "int 1;assert;int 1", 3)
+ testRejects(t, "int 1;assert;int 0", 3)
+ testPanics(t, "int 0;assert;int 1", 3)
+ testPanics(t, obfuscate("assert;int 1"), 3)
+ testPanics(t, obfuscate(`byte "john";assert;int 1`), 3)
+}
+
+func TestBits(t *testing.T) {
+ t.Parallel()
+ testAccepts(t, "int 1; int 0; getbit; int 1; ==", 3)
+ testAccepts(t, "int 1; int 1; getbit; int 0; ==", 3)
+
+ testAccepts(t, "int 1; int 63; getbit; int 0; ==", 3)
+ testPanics(t, "int 1; int 64; getbit; int 0; ==", 3)
+
+ testAccepts(t, "int 0; int 3; int 1; setbit; int 8; ==", 3)
+ testAccepts(t, "int 8; int 3; getbit; int 1; ==", 3)
+
+ testAccepts(t, "int 15; int 3; int 0; setbit; int 7; ==", 3)
+
+ // bit 10 is the 3rd bit (from the high end) in the second byte
+ testAccepts(t, "byte 0xfff0; int 10; getbit; int 1; ==", 3)
+ testAccepts(t, "byte 0xfff0; int 12; getbit; int 0; ==", 3)
+ testPanics(t, "byte 0xfff0; int 16; getbit; int 0; ==", 3)
+
+ testAccepts(t, "byte 0xfffff0; int 21; int 1; setbit; byte 0xfffff4; ==", 3)
+ testAccepts(t, "byte 0xfffff4; int 1; int 0; setbit; byte 0xbffff4; ==", 3)
+ testPanics(t, "byte 0xfffff4; int 24; int 0; setbit; byte 0xbf; ==", 3)
+
+ testAccepts(t, "byte 0x0000; int 3; int 1; setbit; byte 0x1000; ==", 3)
+ testAccepts(t, "byte 0x0000; int 15; int 1; setbit; byte 0x0001; ==", 3)
+ testAccepts(t, "int 0x0000; int 3; int 1; setbit; int 0x0008; ==", 3)
+ testAccepts(t, "int 0x0000; int 12; int 1; setbit; int 0x1000; ==", 3)
+}
+
+func TestBytes(t *testing.T) {
+ t.Parallel()
+ testAccepts(t, "byte 0x12345678; int 2; getbyte; int 0x56; ==", 3)
+ testPanics(t, "byte 0x12345678; int 4; getbyte; int 0x56; ==", 3)
+
+ testAccepts(t, `byte "john"; int 0; getbyte; int 106; ==`, 3) // ascii j
+ testAccepts(t, `byte "john"; int 1; getbyte; int 111; ==`, 3) // ascii o
+ testAccepts(t, `byte "john"; int 2; getbyte; int 104; ==`, 3) // ascii h
+ testAccepts(t, `byte "john"; int 3; getbyte; int 110; ==`, 3) // ascii n
+ testPanics(t, `byte "john"; int 4; getbyte; int 1; ==`, 3) // past end
+
+ testAccepts(t, `byte "john"; int 2; int 105; setbyte; byte "join"; ==`, 3)
+ // dup makes copies, modifying one does not change the other
+ testAccepts(t, `byte "john"; dup; int 2; int 105; setbyte; pop; byte "john"; ==`, 3)
+}
+
+func TestSwap(t *testing.T) {
+ t.Parallel()
+ testAccepts(t, "int 1; byte 0x1234; swap; int 1; ==; assert; byte 0x1234; ==", 3)
+}
+
+func TestSelect(t *testing.T) {
+ t.Parallel()
+
+ testAccepts(t, "int 1; byte 0x1231; int 0; select", 3) // selects the 1
+ testRejects(t, "int 0; byte 0x1232; int 0; select", 3) // selects the 0
+
+ testAccepts(t, "int 0; int 1; int 1; select", 3) // selects the 1
+ testPanics(t, "int 1; byte 0x1233; int 1; select", 3) // selects the bytes
+}
+
+func TestDig(t *testing.T) {
+ t.Parallel()
+ testAccepts(t, "int 3; int 2; int 1; dig 1; int 2; ==; return", 3)
+ testPanics(t, "int 3; int 2; int 1; dig 11; int 2; ==; return", 3)
+}
+
+func TestPush(t *testing.T) {
+ t.Parallel()
+ testAccepts(t, "int 2; pushint 2; ==", 3)
+ testAccepts(t, "pushbytes 0x1234; byte 0x1234; ==", 3)
+
+ // There's a savings to be had if the intcblock is entirely avoided
+ ops1 := testProg(t, "int 1", 3)
+ ops2 := testProg(t, "pushint 1", 3)
+ require.Less(t, len(ops2.Program), len(ops1.Program))
+
+ // There's no savings to be had if the pushint replaces a
+ // reference to one of the arg{0-3} opcodes, since they only
+ // use one byte. And the intcblock only grows by the varuint
+ // encoding size of the pushedint. Which is the same either
+ // way.
+
+ ops1 = testProg(t, "int 2; int 1", 3)
+ ops2 = testProg(t, "int 2; pushint 1", 3)
+ require.Equal(t, len(ops2.Program), len(ops1.Program))
+
+ // There's a savings to be had when intcblock > 4 elements,
+ // because references beyong arg 3 require two byte.
+ ops1 = testProg(t, "int 2; int 3; int 5; int 6; int 1", 3)
+ ops2 = testProg(t, "int 2; int 3; int 5; int 6; pushint 1", 3)
+ require.Less(t, len(ops2.Program), len(ops1.Program))
+}
diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go
index 78439daba..eb3b956f1 100644
--- a/data/transactions/logic/fields.go
+++ b/data/transactions/logic/fields.go
@@ -17,6 +17,8 @@
package logic
import (
+ "fmt"
+
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/protocol"
)
@@ -79,7 +81,7 @@ const (
ApplicationID
// OnCompletion OnCompletion
OnCompletion
- // ApplicationArgs []basics.TealValue
+ // ApplicationArgs [][]byte
ApplicationArgs
// NumAppArgs len(ApplicationArgs)
NumAppArgs
@@ -123,6 +125,23 @@ const (
FreezeAssetAccount
// FreezeAssetFrozen bool
FreezeAssetFrozen
+ // Assets []basics.AssetIndex
+ Assets
+ // NumAssets len(ForeignAssets)
+ NumAssets
+ // Applications []basics.AppIndex
+ Applications
+ // NumApplications len(ForeignApps)
+ NumApplications
+
+ // GlobalNumUint uint64
+ GlobalNumUint
+ // GlobalNumByteSlice uint64
+ GlobalNumByteSlice
+ // LocalNumUint uint64
+ LocalNumUint
+ // LocalNumByteSlice uint64
+ LocalNumByteSlice
invalidTxnField // fence for some setup that loops from Sender..invalidTxnField
)
@@ -141,7 +160,7 @@ type tfNameSpecMap map[string]txnFieldSpec
func (s tfNameSpecMap) getExtraFor(name string) (extra string) {
if s[name].version > 1 {
- extra = "LogicSigVersion >= 2."
+ extra = fmt.Sprintf("LogicSigVersion >= %d.", s[name].version)
}
return
}
@@ -201,6 +220,14 @@ var txnFieldSpecs = []txnFieldSpec{
{FreezeAsset, StackUint64, 2},
{FreezeAssetAccount, StackBytes, 2},
{FreezeAssetFrozen, StackUint64, 2},
+ {Assets, StackUint64, 3},
+ {NumAssets, StackUint64, 3},
+ {Applications, StackUint64, 3},
+ {NumApplications, StackUint64, 3},
+ {GlobalNumUint, StackUint64, 3},
+ {GlobalNumByteSlice, StackUint64, 3},
+ {LocalNumUint, StackUint64, 3},
+ {LocalNumByteSlice, StackUint64, 3},
}
// TxnaFieldNames are arguments to the 'txna' opcode
@@ -211,11 +238,15 @@ var TxnaFieldNames = []string{ApplicationArgs.String(), Accounts.String()}
var TxnaFieldTypes = []StackType{
txnaFieldSpecByField[ApplicationArgs].ftype,
txnaFieldSpecByField[Accounts].ftype,
+ txnaFieldSpecByField[Assets].ftype,
+ txnaFieldSpecByField[Applications].ftype,
}
var txnaFieldSpecByField = map[TxnField]txnFieldSpec{
ApplicationArgs: {ApplicationArgs, StackBytes, 2},
Accounts: {Accounts, StackBytes, 2},
+ Assets: {Assets, StackUint64, 3},
+ Applications: {Applications, StackUint64, 3},
}
// TxnTypeNames is the values of Txn.Type in enum order
@@ -275,6 +306,9 @@ const (
ZeroAddress
// GroupSize len(txn group)
GroupSize
+
+ // v2
+
// LogicSigVersion ConsensusParams.LogicSigVersion
LogicSigVersion
// Round basics.Round
@@ -284,6 +318,11 @@ const (
// CurrentApplicationID uint64
CurrentApplicationID
+ // v3
+
+ // CreatorAddress [32]byte
+ CreatorAddress
+
invalidGlobalField
)
@@ -310,6 +349,7 @@ var globalFieldSpecs = []globalFieldSpec{
{Round, StackUint64, runModeApplication, 2},
{LatestTimestamp, StackUint64, runModeApplication, 2},
{CurrentApplicationID, StackUint64, runModeApplication, 2},
+ {CreatorAddress, StackBytes, runModeApplication, 3},
}
// GlobalFieldSpecByField maps GlobalField to spec
@@ -321,7 +361,7 @@ type gfNameSpecMap map[string]globalFieldSpec
func (s gfNameSpecMap) getExtraFor(name string) (extra string) {
if s[name].version > 1 {
- extra = "LogicSigVersion >= 2."
+ extra = fmt.Sprintf("LogicSigVersion >= %d.", s[name].version)
}
return
}
diff --git a/data/transactions/logic/fields_string.go b/data/transactions/logic/fields_string.go
index 046b5a55a..c06df6944 100644
--- a/data/transactions/logic/fields_string.go
+++ b/data/transactions/logic/fields_string.go
@@ -56,12 +56,20 @@ func _() {
_ = x[FreezeAsset-45]
_ = x[FreezeAssetAccount-46]
_ = x[FreezeAssetFrozen-47]
- _ = x[invalidTxnField-48]
+ _ = x[Assets-48]
+ _ = x[NumAssets-49]
+ _ = x[Applications-50]
+ _ = x[NumApplications-51]
+ _ = x[GlobalNumUint-52]
+ _ = x[GlobalNumByteSlice-53]
+ _ = x[LocalNumUint-54]
+ _ = x[LocalNumByteSlice-55]
+ _ = x[invalidTxnField-56]
}
-const _TxnField_name = "SenderFeeFirstValidFirstValidTimeLastValidNoteLeaseReceiverAmountCloseRemainderToVotePKSelectionPKVoteFirstVoteLastVoteKeyDilutionTypeTypeEnumXferAssetAssetAmountAssetSenderAssetReceiverAssetCloseToGroupIndexTxIDApplicationIDOnCompletionApplicationArgsNumAppArgsAccountsNumAccountsApprovalProgramClearStateProgramRekeyToConfigAssetConfigAssetTotalConfigAssetDecimalsConfigAssetDefaultFrozenConfigAssetUnitNameConfigAssetNameConfigAssetURLConfigAssetMetadataHashConfigAssetManagerConfigAssetReserveConfigAssetFreezeConfigAssetClawbackFreezeAssetFreezeAssetAccountFreezeAssetFrozeninvalidTxnField"
+const _TxnField_name = "SenderFeeFirstValidFirstValidTimeLastValidNoteLeaseReceiverAmountCloseRemainderToVotePKSelectionPKVoteFirstVoteLastVoteKeyDilutionTypeTypeEnumXferAssetAssetAmountAssetSenderAssetReceiverAssetCloseToGroupIndexTxIDApplicationIDOnCompletionApplicationArgsNumAppArgsAccountsNumAccountsApprovalProgramClearStateProgramRekeyToConfigAssetConfigAssetTotalConfigAssetDecimalsConfigAssetDefaultFrozenConfigAssetUnitNameConfigAssetNameConfigAssetURLConfigAssetMetadataHashConfigAssetManagerConfigAssetReserveConfigAssetFreezeConfigAssetClawbackFreezeAssetFreezeAssetAccountFreezeAssetFrozenAssetsNumAssetsApplicationsNumApplicationsGlobalNumUintGlobalNumByteSliceLocalNumUintLocalNumByteSliceinvalidTxnField"
-var _TxnField_index = [...]uint16{0, 6, 9, 19, 33, 42, 46, 51, 59, 65, 81, 87, 98, 107, 115, 130, 134, 142, 151, 162, 173, 186, 198, 208, 212, 225, 237, 252, 262, 270, 281, 296, 313, 320, 331, 347, 366, 390, 409, 424, 438, 461, 479, 497, 514, 533, 544, 562, 579, 594}
+var _TxnField_index = [...]uint16{0, 6, 9, 19, 33, 42, 46, 51, 59, 65, 81, 87, 98, 107, 115, 130, 134, 142, 151, 162, 173, 186, 198, 208, 212, 225, 237, 252, 262, 270, 281, 296, 313, 320, 331, 347, 366, 390, 409, 424, 438, 461, 479, 497, 514, 533, 544, 562, 579, 585, 594, 606, 621, 634, 652, 664, 681, 696}
func (i TxnField) String() string {
if i < 0 || i >= TxnField(len(_TxnField_index)-1) {
@@ -82,15 +90,16 @@ func _() {
_ = x[Round-6]
_ = x[LatestTimestamp-7]
_ = x[CurrentApplicationID-8]
- _ = x[invalidGlobalField-9]
+ _ = x[CreatorAddress-9]
+ _ = x[invalidGlobalField-10]
}
-const _GlobalField_name = "MinTxnFeeMinBalanceMaxTxnLifeZeroAddressGroupSizeLogicSigVersionRoundLatestTimestampCurrentApplicationIDinvalidGlobalField"
+const _GlobalField_name = "MinTxnFeeMinBalanceMaxTxnLifeZeroAddressGroupSizeLogicSigVersionRoundLatestTimestampCurrentApplicationIDCreatorAddressinvalidGlobalField"
-var _GlobalField_index = [...]uint8{0, 9, 19, 29, 40, 49, 64, 69, 84, 104, 122}
+var _GlobalField_index = [...]uint8{0, 9, 19, 29, 40, 49, 64, 69, 84, 104, 118, 136}
func (i GlobalField) String() string {
- if i < 0 || i >= GlobalField(len(_GlobalField_index)-1) {
+ if i >= GlobalField(len(_GlobalField_index)-1) {
return "GlobalField(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _GlobalField_name[_GlobalField_index[i]:_GlobalField_index[i+1]]
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index 3f8ae41f8..8748b4225 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -21,7 +21,7 @@ import (
)
// LogicVersion defines default assembler and max eval versions
-const LogicVersion = 2
+const LogicVersion = 3
// rekeyingEnabledVersion is the version of TEAL where RekeyTo functionality
// was enabled. This is important to remember so that old TEAL accounts cannot
@@ -33,16 +33,54 @@ const rekeyingEnabledVersion = 2
// from being used with applications. Do not edit!
const appsEnabledVersion = 2
-// opSize records the length in bytes for an op that is constant-length but not length 1
-type opSize struct {
- cost int
- size int
- checkFunc opCheckFunc
+// opDetails records details such as non-standard costs, immediate
+// arguments, or dynamic layout controlled by a check function.
+type opDetails struct {
+ Cost int
+ Size int
+ checkFunc opCheckFunc
+ Immediates []immediate
}
-var opSizeDefault = opSize{1, 1, nil}
+var opDefault = opDetails{1, 1, nil, nil}
+var opBranch = opDetails{1, 3, checkBranch, []immediate{{"target", immLabel}}}
-// OpSpec defines one byte opcode
+func costly(cost int) opDetails {
+ return opDetails{cost, 1, nil, nil}
+}
+
+func immediates(name string, rest ...string) opDetails {
+ num := 1 + len(rest)
+ immediates := make([]immediate, num, len(rest)+1)
+ immediates[0] = immediate{name, immByte}
+ for i, n := range rest {
+ immediates[i+1] = immediate{n, immByte}
+ }
+ return opDetails{1, 1 + num, nil, immediates}
+}
+
+func varies(checker opCheckFunc, name string, kind immKind) opDetails {
+ return opDetails{1, 0, checker, []immediate{{name, kind}}}
+}
+
+// immType describes the immediate arguments to an opcode
+type immKind byte
+
+const (
+ immByte immKind = iota
+ immLabel
+ immInt
+ immBytes
+ immInts
+ immBytess // "ss" not a typo. Multiple "bytes"
+)
+
+type immediate struct {
+ Name string
+ kind immKind
+}
+
+// OpSpec defines an opcode
type OpSpec struct {
Opcode byte
Name string
@@ -53,17 +91,21 @@ type OpSpec struct {
Returns StackTypes // what gets pushed to the stack
Version uint64 // TEAL version opcode introduced
Modes runMode // if non-zero, then (mode & Modes) != 0 to allow
- opSize opSize // opSizes records the size of ops that are constant size but not 1, time 'cost' and custom check functions.
+ Details opDetails // Special cost or bytecode layout considerations
}
var oneBytes = StackTypes{StackBytes}
var twoBytes = StackTypes{StackBytes, StackBytes}
var threeBytes = StackTypes{StackBytes, StackBytes, StackBytes}
+var byteInt = StackTypes{StackBytes, StackUint64}
var byteIntInt = StackTypes{StackBytes, StackUint64, StackUint64}
var oneInt = StackTypes{StackUint64}
var twoInts = StackTypes{StackUint64, StackUint64}
+var threeInts = StackTypes{StackUint64, StackUint64, StackUint64}
var oneAny = StackTypes{StackAny}
var twoAny = StackTypes{StackAny, StackAny}
+var anyInt = StackTypes{StackAny, StackUint64}
+var anyIntInt = StackTypes{StackAny, StackUint64, StackUint64}
// OpSpecs is the table of operations that can be assembled and evaluated.
//
@@ -71,99 +113,118 @@ var twoAny = StackTypes{StackAny, StackAny}
//
// WARNING: special case op assembly by argOps functions must do their own type stack maintenance via ops.tpop() ops.tpush()/ops.tpusha()
var OpSpecs = []OpSpec{
- {0x00, "err", opErr, asmDefault, disDefault, nil, nil, 1, modeAny, opSizeDefault},
- {0x01, "sha256", opSHA256, asmDefault, disDefault, oneBytes, oneBytes, 1, modeAny, opSize{7, 1, nil}},
- {0x02, "keccak256", opKeccak256, asmDefault, disDefault, oneBytes, oneBytes, 1, modeAny, opSize{26, 1, nil}},
- {0x03, "sha512_256", opSHA512_256, asmDefault, disDefault, oneBytes, oneBytes, 1, modeAny, opSize{9, 1, nil}},
+ {0x00, "err", opErr, asmDefault, disDefault, nil, nil, 1, modeAny, opDefault},
+ {0x01, "sha256", opSHA256, asmDefault, disDefault, oneBytes, oneBytes, 1, modeAny, costly(7)},
+ {0x02, "keccak256", opKeccak256, asmDefault, disDefault, oneBytes, oneBytes, 1, modeAny, costly(26)},
+ {0x03, "sha512_256", opSHA512_256, asmDefault, disDefault, oneBytes, oneBytes, 1, modeAny, costly(9)},
// Cost of these opcodes increases in TEAL version 2 based on measured
// performance. Should be able to run max hashes during stateful TEAL
// and achieve reasonable TPS. Same opcode for different TEAL versions
// is OK.
- {0x01, "sha256", opSHA256, asmDefault, disDefault, oneBytes, oneBytes, 2, modeAny, opSize{35, 1, nil}},
- {0x02, "keccak256", opKeccak256, asmDefault, disDefault, oneBytes, oneBytes, 2, modeAny, opSize{130, 1, nil}},
- {0x03, "sha512_256", opSHA512_256, asmDefault, disDefault, oneBytes, oneBytes, 2, modeAny, opSize{45, 1, nil}},
-
- {0x04, "ed25519verify", opEd25519verify, asmDefault, disDefault, threeBytes, oneInt, 1, runModeSignature, opSize{1900, 1, nil}},
- {0x08, "+", opPlus, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opSizeDefault},
- {0x09, "-", opMinus, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opSizeDefault},
- {0x0a, "/", opDiv, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opSizeDefault},
- {0x0b, "*", opMul, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opSizeDefault},
- {0x0c, "<", opLt, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opSizeDefault},
- {0x0d, ">", opGt, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opSizeDefault},
- {0x0e, "<=", opLe, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opSizeDefault},
- {0x0f, ">=", opGe, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opSizeDefault},
- {0x10, "&&", opAnd, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opSizeDefault},
- {0x11, "||", opOr, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opSizeDefault},
- {0x12, "==", opEq, asmDefault, disDefault, twoAny, oneInt, 1, modeAny, opSizeDefault},
- {0x13, "!=", opNeq, asmDefault, disDefault, twoAny, oneInt, 1, modeAny, opSizeDefault},
- {0x14, "!", opNot, asmDefault, disDefault, oneInt, oneInt, 1, modeAny, opSizeDefault},
- {0x15, "len", opLen, asmDefault, disDefault, oneBytes, oneInt, 1, modeAny, opSizeDefault},
- {0x16, "itob", opItob, asmDefault, disDefault, oneInt, oneBytes, 1, modeAny, opSizeDefault},
- {0x17, "btoi", opBtoi, asmDefault, disDefault, oneBytes, oneInt, 1, modeAny, opSizeDefault},
- {0x18, "%", opModulo, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opSizeDefault},
- {0x19, "|", opBitOr, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opSizeDefault},
- {0x1a, "&", opBitAnd, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opSizeDefault},
- {0x1b, "^", opBitXor, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opSizeDefault},
- {0x1c, "~", opBitNot, asmDefault, disDefault, oneInt, oneInt, 1, modeAny, opSizeDefault},
- {0x1d, "mulw", opMulw, asmDefault, disDefault, twoInts, twoInts, 1, modeAny, opSizeDefault},
- {0x1e, "addw", opAddw, asmDefault, disDefault, twoInts, twoInts, 2, modeAny, opSizeDefault},
-
- {0x20, "intcblock", opIntConstBlock, assembleIntCBlock, disIntcblock, nil, nil, 1, modeAny, opSize{1, 0, checkIntConstBlock}},
- {0x21, "intc", opIntConstLoad, assembleIntC, disIntc, nil, oneInt, 1, modeAny, opSize{1, 2, nil}},
- {0x22, "intc_0", opIntConst0, asmDefault, disDefault, nil, oneInt, 1, modeAny, opSizeDefault},
- {0x23, "intc_1", opIntConst1, asmDefault, disDefault, nil, oneInt, 1, modeAny, opSizeDefault},
- {0x24, "intc_2", opIntConst2, asmDefault, disDefault, nil, oneInt, 1, modeAny, opSizeDefault},
- {0x25, "intc_3", opIntConst3, asmDefault, disDefault, nil, oneInt, 1, modeAny, opSizeDefault},
- {0x26, "bytecblock", opByteConstBlock, assembleByteCBlock, disBytecblock, nil, nil, 1, modeAny, opSize{1, 0, checkByteConstBlock}},
- {0x27, "bytec", opByteConstLoad, assembleByteC, disBytec, nil, oneBytes, 1, modeAny, opSize{1, 2, nil}},
- {0x28, "bytec_0", opByteConst0, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opSizeDefault},
- {0x29, "bytec_1", opByteConst1, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opSizeDefault},
- {0x2a, "bytec_2", opByteConst2, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opSizeDefault},
- {0x2b, "bytec_3", opByteConst3, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opSizeDefault},
- {0x2c, "arg", opArg, assembleArg, disArg, nil, oneBytes, 1, runModeSignature, opSize{1, 2, nil}},
- {0x2d, "arg_0", opArg0, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opSizeDefault},
- {0x2e, "arg_1", opArg1, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opSizeDefault},
- {0x2f, "arg_2", opArg2, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opSizeDefault},
- {0x30, "arg_3", opArg3, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opSizeDefault},
- {0x31, "txn", opTxn, assembleTxn, disTxn, nil, oneAny, 1, modeAny, opSize{1, 2, nil}},
+ {0x01, "sha256", opSHA256, asmDefault, disDefault, oneBytes, oneBytes, 2, modeAny, costly(35)},
+ {0x02, "keccak256", opKeccak256, asmDefault, disDefault, oneBytes, oneBytes, 2, modeAny, costly(130)},
+ {0x03, "sha512_256", opSHA512_256, asmDefault, disDefault, oneBytes, oneBytes, 2, modeAny, costly(45)},
+
+ {0x04, "ed25519verify", opEd25519verify, asmDefault, disDefault, threeBytes, oneInt, 1, runModeSignature, costly(1900)},
+ {0x08, "+", opPlus, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
+ {0x09, "-", opMinus, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
+ {0x0a, "/", opDiv, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
+ {0x0b, "*", opMul, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
+ {0x0c, "<", opLt, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
+ {0x0d, ">", opGt, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
+ {0x0e, "<=", opLe, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
+ {0x0f, ">=", opGe, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
+ {0x10, "&&", opAnd, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
+ {0x11, "||", opOr, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
+ {0x12, "==", opEq, asmDefault, disDefault, twoAny, oneInt, 1, modeAny, opDefault},
+ {0x13, "!=", opNeq, asmDefault, disDefault, twoAny, oneInt, 1, modeAny, opDefault},
+ {0x14, "!", opNot, asmDefault, disDefault, oneInt, oneInt, 1, modeAny, opDefault},
+ {0x15, "len", opLen, asmDefault, disDefault, oneBytes, oneInt, 1, modeAny, opDefault},
+ {0x16, "itob", opItob, asmDefault, disDefault, oneInt, oneBytes, 1, modeAny, opDefault},
+ {0x17, "btoi", opBtoi, asmDefault, disDefault, oneBytes, oneInt, 1, modeAny, opDefault},
+ {0x18, "%", opModulo, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
+ {0x19, "|", opBitOr, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
+ {0x1a, "&", opBitAnd, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
+ {0x1b, "^", opBitXor, asmDefault, disDefault, twoInts, oneInt, 1, modeAny, opDefault},
+ {0x1c, "~", opBitNot, asmDefault, disDefault, oneInt, oneInt, 1, modeAny, opDefault},
+ {0x1d, "mulw", opMulw, asmDefault, disDefault, twoInts, twoInts, 1, modeAny, opDefault},
+ {0x1e, "addw", opAddw, asmDefault, disDefault, twoInts, twoInts, 2, modeAny, opDefault},
+
+ {0x20, "intcblock", opIntConstBlock, assembleIntCBlock, disIntcblock, nil, nil, 1, modeAny, varies(checkIntConstBlock, "uint ...", immInts)},
+ {0x21, "intc", opIntConstLoad, assembleIntC, disDefault, nil, oneInt, 1, modeAny, immediates("i")},
+ {0x22, "intc_0", opIntConst0, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault},
+ {0x23, "intc_1", opIntConst1, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault},
+ {0x24, "intc_2", opIntConst2, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault},
+ {0x25, "intc_3", opIntConst3, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault},
+ {0x26, "bytecblock", opByteConstBlock, assembleByteCBlock, disBytecblock, nil, nil, 1, modeAny, varies(checkByteConstBlock, "bytes ...", immBytess)},
+ {0x27, "bytec", opByteConstLoad, assembleByteC, disDefault, nil, oneBytes, 1, modeAny, immediates("i")},
+ {0x28, "bytec_0", opByteConst0, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault},
+ {0x29, "bytec_1", opByteConst1, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault},
+ {0x2a, "bytec_2", opByteConst2, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault},
+ {0x2b, "bytec_3", opByteConst3, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault},
+ {0x2c, "arg", opArg, assembleArg, disDefault, nil, oneBytes, 1, runModeSignature, immediates("n")},
+ {0x2d, "arg_0", opArg0, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault},
+ {0x2e, "arg_1", opArg1, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault},
+ {0x2f, "arg_2", opArg2, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault},
+ {0x30, "arg_3", opArg3, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault},
+ {0x31, "txn", opTxn, assembleTxn, disTxn, nil, oneAny, 1, modeAny, immediates("f")},
// It is ok to have the same opcode for different TEAL versions.
// This 'txn' asm command supports additional argument in version 2 and
// generates 'txna' opcode in that particular case
- {0x31, "txn", opTxn, assembleTxn2, disTxn, nil, oneAny, 2, modeAny, opSize{1, 2, nil}},
- {0x32, "global", opGlobal, assembleGlobal, disGlobal, nil, oneAny, 1, modeAny, opSize{1, 2, nil}},
- {0x33, "gtxn", opGtxn, assembleGtxn, disGtxn, nil, oneAny, 1, modeAny, opSize{1, 3, nil}},
- {0x33, "gtxn", opGtxn, assembleGtxn2, disGtxn, nil, oneAny, 2, modeAny, opSize{1, 3, nil}},
- {0x34, "load", opLoad, assembleLoad, disLoad, nil, oneAny, 1, modeAny, opSize{1, 2, nil}},
- {0x35, "store", opStore, assembleStore, disStore, oneAny, nil, 1, modeAny, opSize{1, 2, nil}},
- {0x36, "txna", opTxna, assembleTxna, disTxna, nil, oneAny, 2, modeAny, opSize{1, 3, nil}},
- {0x37, "gtxna", opGtxna, assembleGtxna, disGtxna, nil, oneAny, 2, modeAny, opSize{1, 4, nil}},
-
- {0x40, "bnz", opBnz, assembleBranch, disBranch, oneInt, nil, 1, modeAny, opSize{1, 3, checkBranch}},
- {0x41, "bz", opBz, assembleBranch, disBranch, oneInt, nil, 2, modeAny, opSize{1, 3, checkBranch}},
- {0x42, "b", opB, assembleBranch, disBranch, nil, nil, 2, modeAny, opSize{1, 3, checkBranch}},
- {0x43, "return", opReturn, asmDefault, disDefault, oneInt, nil, 2, modeAny, opSizeDefault},
- {0x48, "pop", opPop, asmDefault, disDefault, oneAny, nil, 1, modeAny, opSizeDefault},
- {0x49, "dup", opDup, asmDefault, disDefault, oneAny, twoAny, 1, modeAny, opSizeDefault},
- {0x4a, "dup2", opDup2, asmDefault, disDefault, twoAny, twoAny.plus(twoAny), 2, modeAny, opSizeDefault},
-
- {0x50, "concat", opConcat, asmDefault, disDefault, twoBytes, oneBytes, 2, modeAny, opSizeDefault},
- {0x51, "substring", opSubstring, assembleSubstring, disSubstring, oneBytes, oneBytes, 2, modeAny, opSize{1, 3, nil}},
- {0x52, "substring3", opSubstring3, asmDefault, disDefault, byteIntInt, oneBytes, 2, modeAny, opSizeDefault},
-
- {0x60, "balance", opBalance, asmDefault, disDefault, oneInt, oneInt, 2, runModeApplication, opSizeDefault},
- {0x61, "app_opted_in", opAppCheckOptedIn, asmDefault, disDefault, twoInts, oneInt, 2, runModeApplication, opSizeDefault},
- {0x62, "app_local_get", opAppGetLocalState, asmDefault, disDefault, oneInt.plus(oneBytes), oneAny, 2, runModeApplication, opSizeDefault},
- {0x63, "app_local_get_ex", opAppGetLocalStateEx, asmDefault, disDefault, twoInts.plus(oneBytes), oneAny.plus(oneInt), 2, runModeApplication, opSizeDefault},
- {0x64, "app_global_get", opAppGetGlobalState, asmDefault, disDefault, oneBytes, oneAny, 2, runModeApplication, opSizeDefault},
- {0x65, "app_global_get_ex", opAppGetGlobalStateEx, asmDefault, disDefault, oneInt.plus(oneBytes), oneAny.plus(oneInt), 2, runModeApplication, opSizeDefault},
- {0x66, "app_local_put", opAppPutLocalState, asmDefault, disDefault, oneInt.plus(oneBytes).plus(oneAny), nil, 2, runModeApplication, opSizeDefault},
- {0x67, "app_global_put", opAppPutGlobalState, asmDefault, disDefault, oneBytes.plus(oneAny), nil, 2, runModeApplication, opSizeDefault},
- {0x68, "app_local_del", opAppDeleteLocalState, asmDefault, disDefault, oneInt.plus(oneBytes), nil, 2, runModeApplication, opSizeDefault},
- {0x69, "app_global_del", opAppDeleteGlobalState, asmDefault, disDefault, oneBytes, nil, 2, runModeApplication, opSizeDefault},
-
- {0x70, "asset_holding_get", opAssetHoldingGet, assembleAssetHolding, disAssetHolding, twoInts, oneAny.plus(oneInt), 2, runModeApplication, opSize{1, 2, nil}},
- {0x71, "asset_params_get", opAssetParamsGet, assembleAssetParams, disAssetParams, oneInt, oneAny.plus(oneInt), 2, runModeApplication, opSize{1, 2, nil}},
+ {0x31, "txn", opTxn, assembleTxn2, disTxn, nil, oneAny, 2, modeAny, immediates("f")},
+ {0x32, "global", opGlobal, assembleGlobal, disGlobal, nil, oneAny, 1, modeAny, immediates("f")},
+ {0x33, "gtxn", opGtxn, assembleGtxn, disGtxn, nil, oneAny, 1, modeAny, immediates("t", "f")},
+ {0x33, "gtxn", opGtxn, assembleGtxn2, disGtxn, nil, oneAny, 2, modeAny, immediates("t", "f")},
+ {0x34, "load", opLoad, asmDefault, disDefault, nil, oneAny, 1, modeAny, immediates("i")},
+ {0x35, "store", opStore, asmDefault, disDefault, oneAny, nil, 1, modeAny, immediates("i")},
+ {0x36, "txna", opTxna, assembleTxna, disTxna, nil, oneAny, 2, modeAny, immediates("f", "i")},
+ {0x37, "gtxna", opGtxna, assembleGtxna, disGtxna, nil, oneAny, 2, modeAny, immediates("t", "f", "i")},
+ // Like gtxn, but gets txn index from stack, rather than immediate arg
+ {0x38, "gtxns", opGtxns, assembleGtxns, disTxn, oneInt, oneAny, 3, modeAny, immediates("f")},
+ {0x39, "gtxnsa", opGtxnsa, assembleGtxns, disTxna, oneInt, oneAny, 3, modeAny, immediates("f", "i")},
+
+ {0x40, "bnz", opBnz, assembleBranch, disBranch, oneInt, nil, 1, modeAny, opBranch},
+ {0x41, "bz", opBz, assembleBranch, disBranch, oneInt, nil, 2, modeAny, opBranch},
+ {0x42, "b", opB, assembleBranch, disBranch, nil, nil, 2, modeAny, opBranch},
+ {0x43, "return", opReturn, asmDefault, disDefault, oneInt, nil, 2, modeAny, opDefault},
+ {0x44, "assert", opAssert, asmDefault, disDefault, oneInt, nil, 3, modeAny, opDefault},
+ {0x48, "pop", opPop, asmDefault, disDefault, oneAny, nil, 1, modeAny, opDefault},
+ {0x49, "dup", opDup, asmDefault, disDefault, oneAny, twoAny, 1, modeAny, opDefault},
+ {0x4a, "dup2", opDup2, asmDefault, disDefault, twoAny, twoAny.plus(twoAny), 2, modeAny, opDefault},
+ // There must be at least one thing on the stack for dig, but
+ // it would be nice if we did better checking than that.
+ {0x4b, "dig", opDig, asmDefault, disDefault, oneAny, twoAny, 3, modeAny, immediates("n")},
+ {0x4c, "swap", opSwap, asmDefault, disDefault, twoAny, twoAny, 3, modeAny, opDefault},
+ {0x4d, "select", opSelect, asmDefault, disDefault, twoAny.plus(oneInt), oneAny, 3, modeAny, opDefault},
+
+ {0x50, "concat", opConcat, asmDefault, disDefault, twoBytes, oneBytes, 2, modeAny, opDefault},
+ {0x51, "substring", opSubstring, assembleSubstring, disDefault, oneBytes, oneBytes, 2, modeAny, immediates("s", "e")},
+ {0x52, "substring3", opSubstring3, asmDefault, disDefault, byteIntInt, oneBytes, 2, modeAny, opDefault},
+ {0x53, "getbit", opGetBit, asmDefault, disDefault, anyInt, oneInt, 3, modeAny, opDefault},
+ {0x54, "setbit", opSetBit, asmDefault, disDefault, anyIntInt, oneInt, 3, modeAny, opDefault},
+ {0x55, "getbyte", opGetByte, asmDefault, disDefault, byteInt, oneInt, 3, modeAny, opDefault},
+ {0x56, "setbyte", opSetByte, asmDefault, disDefault, byteIntInt, oneBytes, 3, modeAny, opDefault},
+
+ {0x60, "balance", opBalance, asmDefault, disDefault, oneInt, oneInt, 2, runModeApplication, opDefault},
+ {0x61, "app_opted_in", opAppCheckOptedIn, asmDefault, disDefault, twoInts, oneInt, 2, runModeApplication, opDefault},
+ {0x62, "app_local_get", opAppGetLocalState, asmDefault, disDefault, oneInt.plus(oneBytes), oneAny, 2, runModeApplication, opDefault},
+ {0x63, "app_local_get_ex", opAppGetLocalStateEx, asmDefault, disDefault, twoInts.plus(oneBytes), oneAny.plus(oneInt), 2, runModeApplication, opDefault},
+ {0x64, "app_global_get", opAppGetGlobalState, asmDefault, disDefault, oneBytes, oneAny, 2, runModeApplication, opDefault},
+ {0x65, "app_global_get_ex", opAppGetGlobalStateEx, asmDefault, disDefault, oneInt.plus(oneBytes), oneAny.plus(oneInt), 2, runModeApplication, opDefault},
+ {0x66, "app_local_put", opAppPutLocalState, asmDefault, disDefault, oneInt.plus(oneBytes).plus(oneAny), nil, 2, runModeApplication, opDefault},
+ {0x67, "app_global_put", opAppPutGlobalState, asmDefault, disDefault, oneBytes.plus(oneAny), nil, 2, runModeApplication, opDefault},
+ {0x68, "app_local_del", opAppDeleteLocalState, asmDefault, disDefault, oneInt.plus(oneBytes), nil, 2, runModeApplication, opDefault},
+ {0x69, "app_global_del", opAppDeleteGlobalState, asmDefault, disDefault, oneBytes, nil, 2, runModeApplication, opDefault},
+
+ {0x70, "asset_holding_get", opAssetHoldingGet, assembleAssetHolding, disAssetHolding, twoInts, oneAny.plus(oneInt), 2, runModeApplication, immediates("i")},
+ {0x71, "asset_params_get", opAssetParamsGet, assembleAssetParams, disAssetParams, oneInt, oneAny.plus(oneInt), 2, runModeApplication, immediates("i")},
+
+ {0x78, "min_balance", opMinBalance, asmDefault, disDefault, oneInt, oneInt, 3, runModeApplication, opDefault},
+
+ // Immediate bytes and ints. Smaller code size for single use of constant.
+ {0x80, "pushbytes", opPushBytes, asmPushBytes, disPushBytes, nil, oneBytes, 3, modeAny, varies(checkPushBytes, "bytes", immBytes)},
+ {0x81, "pushint", opPushInt, asmPushInt, disPushInt, nil, oneInt, 3, modeAny, varies(checkPushInt, "uint", immInt)},
}
type sortByOpcode []OpSpec
@@ -173,7 +234,7 @@ func (a sortByOpcode) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a sortByOpcode) Less(i, j int) bool { return a[i].Opcode < a[j].Opcode }
// OpcodesByVersion returns list of opcodes available in a specific version of TEAL
-// by copying v1 opcodes to v2 to create a full list.
+// by copying v1 opcodes to v2, and then on to v3 to create a full list
func OpcodesByVersion(version uint64) []OpSpec {
// for updated opcodes use the lowest version opcode was introduced in
maxOpcode := 0
@@ -220,7 +281,9 @@ func OpcodesByVersion(version uint64) []OpSpec {
// direct opcode bytes
var opsByOpcode [LogicVersion + 1][256]OpSpec
-var opsByName [LogicVersion + 1]map[string]OpSpec
+
+// OpsByName map for each each version, mapping opcode name to OpSpec
+var OpsByName [LogicVersion + 1]map[string]OpSpec
// Migration from TEAL v1 to TEAL v2.
// TEAL v1 allowed execution of program with version 0.
@@ -231,27 +294,27 @@ var opsByName [LogicVersion + 1]map[string]OpSpec
func init() {
// First, initialize baseline v1 opcodes.
// Zero (empty) version is an alias for TEAL v1 opcodes and needed for compatibility with v1 code.
- opsByName[0] = make(map[string]OpSpec, 256)
- opsByName[1] = make(map[string]OpSpec, 256)
+ OpsByName[0] = make(map[string]OpSpec, 256)
+ OpsByName[1] = make(map[string]OpSpec, 256)
for _, oi := range OpSpecs {
if oi.Version == 1 {
cp := oi
cp.Version = 0
opsByOpcode[0][oi.Opcode] = cp
- opsByName[0][oi.Name] = cp
+ OpsByName[0][oi.Name] = cp
opsByOpcode[1][oi.Opcode] = oi
- opsByName[1][oi.Name] = oi
+ OpsByName[1][oi.Name] = oi
}
}
// Start from v2 TEAL and higher,
// copy lower version opcodes and overwrite matching version
for v := uint64(2); v <= EvalMaxVersion; v++ {
- opsByName[v] = make(map[string]OpSpec, 256)
+ OpsByName[v] = make(map[string]OpSpec, 256)
// Copy opcodes from lower version
- for opName, oi := range opsByName[v-1] {
- opsByName[v][opName] = oi
+ for opName, oi := range OpsByName[v-1] {
+ OpsByName[v][opName] = oi
}
for op, oi := range opsByOpcode[v-1] {
opsByOpcode[v][op] = oi
@@ -261,7 +324,7 @@ func init() {
for _, oi := range OpSpecs {
if oi.Version == v {
opsByOpcode[v][oi.Opcode] = oi
- opsByName[v][oi.Name] = oi
+ OpsByName[v][oi.Name] = oi
}
}
}
diff --git a/data/transactions/logic/opcodes_test.go b/data/transactions/logic/opcodes_test.go
index 8bde67a2c..7c61f9068 100644
--- a/data/transactions/logic/opcodes_test.go
+++ b/data/transactions/logic/opcodes_test.go
@@ -28,7 +28,7 @@ func TestOpSpecs(t *testing.T) {
t.Parallel()
for _, spec := range OpSpecs {
- require.NotEmpty(t, spec.opSize, spec)
+ require.NotEmpty(t, spec.Details, spec)
}
}
@@ -84,7 +84,7 @@ func TestOpcodesByVersion(t *testing.T) {
OpSpecs2[idx] = cp
}
- opSpecs := make([][]OpSpec, 2)
+ opSpecs := make([][]OpSpec, LogicVersion)
for v := uint64(1); v <= LogicVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
opSpecs[v-1] = OpcodesByVersion(v)
@@ -117,8 +117,8 @@ func TestOpcodesByVersion(t *testing.T) {
func TestOpcodesVersioningV2(t *testing.T) {
t.Parallel()
- require.Equal(t, 3, len(opsByOpcode))
- require.Equal(t, 3, len(opsByName))
+ require.Equal(t, 4, len(opsByOpcode))
+ require.Equal(t, 4, len(OpsByName))
// ensure v0 has only v0 opcodes
cntv0 := 0
@@ -128,12 +128,12 @@ func TestOpcodesVersioningV2(t *testing.T) {
cntv0++
}
}
- for _, spec := range opsByName[0] {
+ for _, spec := range OpsByName[0] {
if spec.op != nil {
require.Equal(t, uint64(0), spec.Version)
}
}
- require.Equal(t, cntv0, len(opsByName[0]))
+ require.Equal(t, cntv0, len(OpsByName[0]))
// ensure v1 has only v1 opcodes
cntv1 := 0
@@ -143,12 +143,12 @@ func TestOpcodesVersioningV2(t *testing.T) {
cntv1++
}
}
- for _, spec := range opsByName[1] {
+ for _, spec := range OpsByName[1] {
if spec.op != nil {
require.Equal(t, uint64(1), spec.Version)
}
}
- require.Equal(t, cntv1, len(opsByName[1]))
+ require.Equal(t, cntv1, len(OpsByName[1]))
require.Equal(t, cntv1, cntv0)
require.Equal(t, 52, cntv1)
@@ -159,25 +159,24 @@ func TestOpcodesVersioningV2(t *testing.T) {
reflect.ValueOf(a.dis).Pointer() == reflect.ValueOf(b.dis).Pointer() &&
reflect.DeepEqual(a.Args, b.Args) && reflect.DeepEqual(a.Returns, b.Returns) &&
a.Modes == b.Modes &&
- a.opSize.cost == b.opSize.cost && a.opSize.size == b.opSize.size &&
- reflect.ValueOf(a.opSize.checkFunc).Pointer() == reflect.ValueOf(b.opSize.checkFunc).Pointer()
+ a.Details.Cost == b.Details.Cost && a.Details.Size == b.Details.Size &&
+ reflect.ValueOf(a.Details.checkFunc).Pointer() == reflect.ValueOf(b.Details.checkFunc).Pointer()
return
}
// ensure v0 and v1 are the same
require.Equal(t, len(opsByOpcode[1]), len(opsByOpcode[0]))
- require.Equal(t, len(opsByName[1]), len(opsByName[0]))
+ require.Equal(t, len(OpsByName[1]), len(OpsByName[0]))
for op, spec1 := range opsByOpcode[1] {
spec0 := opsByOpcode[0][op]
msg := fmt.Sprintf("%v\n%v\n", spec0, spec1)
require.True(t, eqButVersion(&spec1, &spec0), msg)
}
- for name, spec1 := range opsByName[1] {
- spec0 := opsByName[0][name]
+ for name, spec1 := range OpsByName[1] {
+ spec0 := OpsByName[0][name]
require.True(t, eqButVersion(&spec1, &spec0))
}
// ensure v2 has v1 and v2 opcodes
- require.Equal(t, len(opsByName[2]), len(opsByName[2]))
cntv2 := 0
cntAdded := 0
for _, spec := range opsByOpcode[2] {
@@ -189,12 +188,12 @@ func TestOpcodesVersioningV2(t *testing.T) {
cntv2++
}
}
- for _, spec := range opsByName[2] {
+ for _, spec := range OpsByName[2] {
if spec.op != nil {
require.True(t, spec.Version == 1 || spec.Version == 2)
}
}
- require.Equal(t, cntv2, len(opsByName[2]))
+ require.Equal(t, cntv2, len(OpsByName[2]))
// hardcode and ensure amount of new v2 opcodes
newOpcodes := 22
@@ -202,4 +201,29 @@ func TestOpcodesVersioningV2(t *testing.T) {
require.Equal(t, newOpcodes+overwritten, cntAdded)
require.Equal(t, cntv2, cntv1+newOpcodes)
+
+ // ensure v3 has v1, v2, v3 opcodes
+ cntv3 := 0
+ cntAdded = 0
+ for _, spec := range opsByOpcode[3] {
+ if spec.op != nil {
+ require.True(t, spec.Version == 1 || spec.Version == 2 || spec.Version == 3)
+ if spec.Version == 3 {
+ cntAdded++
+ }
+ cntv3++
+ }
+ }
+ for _, spec := range OpsByName[3] {
+ if spec.op != nil {
+ require.True(t, spec.Version == 1 || spec.Version == 2 || spec.Version == 3)
+ }
+ }
+ require.Len(t, OpsByName[3], cntv3)
+
+ // assert, min_balance, {get,set}{bit,byte}, swap, select, dig, stxn, stxna, push{int,bytes}
+ newOpcodes = 13
+ overwritten = 0 // ? none yet
+ require.Equal(t, newOpcodes+overwritten, cntAdded)
+
}
diff --git a/data/transactions/signedtxn.go b/data/transactions/signedtxn.go
index e1aa4f47a..7a20c3d8a 100644
--- a/data/transactions/signedtxn.go
+++ b/data/transactions/signedtxn.go
@@ -107,3 +107,15 @@ func AssembleSignedTxn(txn Transaction, sig crypto.Signature, msig crypto.Multis
}
return s, nil
}
+
+// ToBeHashed implements the crypto.Hashable interface.
+func (s *SignedTxnInBlock) ToBeHashed() (protocol.HashID, []byte) {
+ return protocol.SignedTxnInBlock, protocol.Encode(s)
+}
+
+// Hash implements an optimized version of crypto.HashObj(s).
+func (s *SignedTxnInBlock) Hash() crypto.Digest {
+ enc := s.MarshalMsg(append(protocol.GetEncodingBuf(), []byte(protocol.SignedTxnInBlock)...))
+ defer protocol.PutEncodingBuf(enc)
+ return crypto.Hash(enc)
+}
diff --git a/data/transactions/signedtxn_test.go b/data/transactions/signedtxn_test.go
index 2f2ef97b4..7375c8b77 100644
--- a/data/transactions/signedtxn_test.go
+++ b/data/transactions/signedtxn_test.go
@@ -73,4 +73,10 @@ func TestDecodeNil(t *testing.T) {
}
}
+func TestSignedTxnInBlockHash(t *testing.T) {
+ var stib SignedTxnInBlock
+ crypto.RandBytes(stib.Txn.Sender[:])
+ require.Equal(t, crypto.HashObj(&stib), stib.Hash())
+}
+
//TODO: test multisig
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index e7ae52566..099d43724 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -241,7 +241,7 @@ func (e *RoundOffsetError) Error() string {
}
// StaleDatabaseRoundError is generated when we detect that the database round is behind the accountUpdates in-memory dbRound. This
-// should never happen, since we update the database first, and only upon a successfull update we update the in-memory dbRound.
+// should never happen, since we update the database first, and only upon a successful update we update the in-memory dbRound.
type StaleDatabaseRoundError struct {
memoryRound basics.Round
databaseRound basics.Round
@@ -361,7 +361,7 @@ func (au *accountUpdates) IsWritingCatchpointFile() bool {
// LookupWithRewards returns the account data for a given address at a given round.
// Note that the function doesn't update the account with the rewards,
-// even while it does return the AccoutData which represent the "rewarded" account data.
+// even while it does return the AccountData which represent the "rewarded" account data.
func (au *accountUpdates) LookupWithRewards(rnd basics.Round, addr basics.Address) (data basics.AccountData, err error) {
return au.lookupWithRewards(rnd, addr)
}
@@ -1491,7 +1491,7 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S
// lookupWithRewards returns the account data for a given address at a given round.
// The rewards are added to the AccountData before returning. Note that the function doesn't update the account with the rewards,
-// even while it does return the AccoutData which represent the "rewarded" account data.
+// even while it does return the AccountData which represent the "rewarded" account data.
func (au *accountUpdates) lookupWithRewards(rnd basics.Round, addr basics.Address) (data basics.AccountData, err error) {
au.accountsMu.RLock()
needUnlock := true
diff --git a/ledger/applications.go b/ledger/applications.go
index 68bb135e1..9dee2b4e6 100644
--- a/ledger/applications.go
+++ b/ledger/applications.go
@@ -19,6 +19,7 @@ package ledger
import (
"fmt"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
)
@@ -74,6 +75,16 @@ func (al *logicLedger) Balance(addr basics.Address) (res basics.MicroAlgos, err
return record.MicroAlgos, nil
}
+func (al *logicLedger) MinBalance(addr basics.Address, proto *config.ConsensusParams) (res basics.MicroAlgos, err error) {
+ // Fetch record with pending rewards applied
+ record, err := al.cow.Get(addr, true)
+ if err != nil {
+ return
+ }
+
+ return record.MinBalance(proto), nil
+}
+
func (al *logicLedger) AssetHolding(addr basics.Address, assetIdx basics.AssetIndex) (basics.AssetHolding, error) {
// Fetch the requested balance record
record, err := al.cow.Get(addr, false)
@@ -131,6 +142,10 @@ func (al *logicLedger) ApplicationID() basics.AppIndex {
return al.aidx
}
+func (al *logicLedger) CreatorAddress() basics.Address {
+ return al.creator
+}
+
func (al *logicLedger) OptedIn(addr basics.Address, appIdx basics.AppIndex) (bool, error) {
if appIdx == basics.AppIndex(0) {
appIdx = al.aidx
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index 0f5a6fe4d..1080d87b1 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -92,7 +92,12 @@ func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion) (gene
initAccounts[sinkAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 7654321})
incentivePoolBalanceAtGenesis := initAccounts[poolAddr].MicroAlgos
- initialRewardsPerRound := incentivePoolBalanceAtGenesis.Raw / uint64(params.RewardsRateRefreshInterval)
+ var initialRewardsPerRound uint64
+ if params.InitialRewardsRateCalculation {
+ initialRewardsPerRound = basics.SubSaturate(incentivePoolBalanceAtGenesis.Raw, params.MinBalance) / uint64(params.RewardsRateRefreshInterval)
+ } else {
+ initialRewardsPerRound = incentivePoolBalanceAtGenesis.Raw / uint64(params.RewardsRateRefreshInterval)
+ }
initBlock := bookkeeping.Block{
BlockHeader: bookkeeping.BlockHeader{
@@ -357,7 +362,7 @@ func TestLedgerBlockHeaders(t *testing.T) {
// TODO test rewards cases with changing poolAddr money, with changing round, and with changing total reward units
badBlock = bookkeeping.Block{BlockHeader: correctHeader}
- badBlock.BlockHeader.TxnRoot = crypto.Digest{}
+ badBlock.BlockHeader.TxnRoot = crypto.Hash([]byte{0})
a.Error(l.appendUnvalidated(badBlock), "added block header with empty transaction root")
badBlock = bookkeeping.Block{BlockHeader: correctHeader}
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index 376562ed7..d09cbf7e7 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -1046,3 +1046,12 @@ func (c *Client) Dryrun(data []byte) (resp generatedV2.DryrunResponse, err error
}
return
}
+
+// TxnProof returns a Merkle proof for a transaction in a block.
+func (c *Client) TxnProof(txid string, round uint64) (resp generatedV2.ProofResponse, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err == nil {
+ return algod.Proof(txid, round)
+ }
+ return
+}
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 8caf3c3a8..3b3df0ce8 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -54,7 +54,6 @@ import (
)
const incomingThreads = 20
-const broadcastThreads = 4
const messageFilterSize = 5000 // messages greater than that size may be blocked by incoming/outgoing filter
// httpServerReadHeaderTimeout is the amount of time allowed to read
@@ -308,8 +307,9 @@ type WebsocketNetwork struct {
ctx context.Context
ctxCancel context.CancelFunc
- peersLock deadlock.RWMutex
- peers []*wsPeer
+ peersLock deadlock.RWMutex
+ peers []*wsPeer
+ peersChangeCounter int32 // peersChangeCounter is an atomic variable that increases on each change to the peers. It helps avoiding taking the peersLock when checking if the peers list was modified.
broadcastQueueHighPrio chan broadcastRequest
broadcastQueueBulk chan broadcastRequest
@@ -736,10 +736,8 @@ func (wn *WebsocketNetwork) Start() {
wn.wg.Add(1)
go wn.messageHandlerThread()
}
- for i := 0; i < broadcastThreads; i++ {
- wn.wg.Add(1)
- go wn.broadcastThread()
- }
+ wn.wg.Add(1)
+ go wn.broadcastThread()
if wn.prioScheme != nil {
wn.wg.Add(1)
go wn.prioWeightRefresh()
@@ -1165,49 +1163,145 @@ func (wn *WebsocketNetwork) sendFilterMessage(msg IncomingMessage) {
func (wn *WebsocketNetwork) broadcastThread() {
defer wn.wg.Done()
- var peers []*wsPeer
+
slowWritingPeerCheckTicker := time.NewTicker(wn.slowWritingPeerMonitorInterval)
defer slowWritingPeerCheckTicker.Stop()
+ peers, lastPeersChangeCounter := wn.peerSnapshot([]*wsPeer{})
+ // updatePeers update the peers list if their peer change counter has changed.
+ updatePeers := func() {
+ if curPeersChangeCounter := atomic.LoadInt32(&wn.peersChangeCounter); curPeersChangeCounter != lastPeersChangeCounter {
+ peers, lastPeersChangeCounter = wn.peerSnapshot(peers)
+ }
+ }
+
+ // waitForPeers waits until there is at least a single peer connected or pending request expires.
+ // in any of the above two cases, it returns true.
+ // otherwise, false is returned ( the network context has expired )
+ waitForPeers := func(request *broadcastRequest) bool {
+ // waitSleepTime defines how long we'd like to sleep between consecutive tests that the peers list have been updated.
+ const waitSleepTime = 5 * time.Millisecond
+ // requestDeadline is the request deadline. If we surpass that deadline, the function returns true.
+ var requestDeadline time.Time
+ // sleepDuration is the current iteration sleep time.
+ var sleepDuration time.Duration
+ // initialize the requestDeadline if we have a request.
+ if request != nil {
+ requestDeadline = request.enqueueTime.Add(maxMessageQueueDuration)
+ } else {
+ sleepDuration = waitSleepTime
+ }
+
+ // wait until the we have at least a single peer connected.
+ for len(peers) == 0 {
+ // adjust the sleep time in case we have a request
+ if request != nil {
+ // we want to clamp the sleep time so that we won't sleep beyond the expiration of the request.
+ now := time.Now()
+ sleepDuration = requestDeadline.Sub(now)
+ if sleepDuration > waitSleepTime {
+ sleepDuration = waitSleepTime
+ } else if sleepDuration < 0 {
+ return true
+ }
+ }
+ select {
+ case <-time.After(sleepDuration):
+ if (request != nil) && time.Now().After(requestDeadline) {
+ // message time have elapsed.
+ return true
+ }
+ updatePeers()
+ continue
+ case <-wn.ctx.Done():
+ return false
+ }
+ }
+ return true
+ }
+
+ // load the peers list
+ updatePeers()
+
+ // wait until the we have at least a single peer connected.
+ if !waitForPeers(nil) {
+ return
+ }
+
for {
// broadcast from high prio channel as long as we can
// we want to try and keep this as a single case select with a default, since go compiles a single-case
// select with a default into a more efficient non-blocking receive, instead of compiling it to the general-purpose selectgo
select {
case request := <-wn.broadcastQueueHighPrio:
- wn.innerBroadcast(request, true, &peers)
+ wn.innerBroadcast(request, true, peers)
+ continue
+ default:
+ }
+
+ // if nothing high prio, try to sample from either queques in a non-blocking fashion.
+ select {
+ case request := <-wn.broadcastQueueHighPrio:
+ wn.innerBroadcast(request, true, peers)
continue
+ case request := <-wn.broadcastQueueBulk:
+ wn.innerBroadcast(request, false, peers)
+ continue
+ case <-wn.ctx.Done():
+ return
default:
}
- // if nothing high prio, broadcast anything
+ // block until we have some request that need to be sent.
select {
case request := <-wn.broadcastQueueHighPrio:
- wn.innerBroadcast(request, true, &peers)
+ // check if peers need to be updated, since we've been waiting a while.
+ updatePeers()
+ if !waitForPeers(&request) {
+ return
+ }
+ wn.innerBroadcast(request, true, peers)
case <-slowWritingPeerCheckTicker.C:
wn.checkSlowWritingPeers()
continue
case request := <-wn.broadcastQueueBulk:
- wn.innerBroadcast(request, false, &peers)
+ // check if peers need to be updated, since we've been waiting a while.
+ updatePeers()
+ if !waitForPeers(&request) {
+ return
+ }
+ wn.innerBroadcast(request, false, peers)
case <-wn.ctx.Done():
return
}
}
}
-func (wn *WebsocketNetwork) peerSnapshot(dest []*wsPeer) []*wsPeer {
+// peerSnapshot returns the currently connected peers as well as the current value of the peersChangeCounter
+func (wn *WebsocketNetwork) peerSnapshot(dest []*wsPeer) ([]*wsPeer, int32) {
wn.peersLock.RLock()
defer wn.peersLock.RUnlock()
if cap(dest) >= len(wn.peers) {
+ // clear out the unused portion of the peers array to allow the GC to cleanup unused peers.
+ remainderPeers := dest[len(wn.peers):cap(dest)]
+ for i := range remainderPeers {
+ // we want to delete only up to the first nil peer, since we're always writing to this array from the begining to the end
+ if remainderPeers[i] == nil {
+ break
+ }
+ remainderPeers[i] = nil
+ }
+ // adjust array size
dest = dest[:len(wn.peers)]
} else {
dest = make([]*wsPeer, len(wn.peers))
}
copy(dest, wn.peers)
- return dest
+ peerChangeCounter := atomic.LoadInt32(&wn.peersChangeCounter)
+ return dest, peerChangeCounter
}
// prio is set if the broadcast is a high-priority broadcast.
-func (wn *WebsocketNetwork) innerBroadcast(request broadcastRequest, prio bool, ppeers *[]*wsPeer) {
+func (wn *WebsocketNetwork) innerBroadcast(request broadcastRequest, prio bool, peers []*wsPeer) {
if request.done != nil {
defer close(request.done)
}
@@ -1230,22 +1324,17 @@ func (wn *WebsocketNetwork) innerBroadcast(request broadcastRequest, prio bool,
digest = crypto.Hash(mbytes)
}
- *ppeers = wn.peerSnapshot(*ppeers)
- peers := *ppeers
-
// first send to all the easy outbound peers who don't block, get them started.
sentMessageCount := 0
- for pi, peer := range peers {
+ for _, peer := range peers {
if wn.config.BroadcastConnectionsLimit >= 0 && sentMessageCount >= wn.config.BroadcastConnectionsLimit {
break
}
if peer == request.except {
- peers[pi] = nil
continue
}
ok := peer.writeNonBlock(mbytes, prio, digest, request.enqueueTime)
if ok {
- peers[pi] = nil
sentMessageCount++
continue
}
@@ -1527,7 +1616,7 @@ func (wn *WebsocketNetwork) sendPeerConnectionsTelemetryStatus() {
}
wn.lastPeerConnectionsSent = now
var peers []*wsPeer
- peers = wn.peerSnapshot(peers)
+ peers, _ = wn.peerSnapshot(peers)
var connectionDetails telemetryspec.PeersConnectionDetails
for _, peer := range peers {
connDetail := telemetryspec.PeerConnectionDetails{
@@ -1559,6 +1648,9 @@ func (wn *WebsocketNetwork) prioWeightRefresh() {
ticker := time.NewTicker(prioWeightRefreshTime)
defer ticker.Stop()
var peers []*wsPeer
+ // the lastPeersChangeCounter is initialized with -1 in order to force the peers to be loaded on the first iteration.
+ // then, it would get reloaded on per-need basis.
+ lastPeersChangeCounter := int32(-1)
for {
select {
case <-ticker.C:
@@ -1566,7 +1658,10 @@ func (wn *WebsocketNetwork) prioWeightRefresh() {
return
}
- peers = wn.peerSnapshot(peers)
+ if curPeersChangeCounter := atomic.LoadInt32(&wn.peersChangeCounter); curPeersChangeCounter != lastPeersChangeCounter {
+ peers, lastPeersChangeCounter = wn.peerSnapshot(peers)
+ }
+
for _, peer := range peers {
wn.peersLock.RLock()
addr := peer.prioAddress
@@ -1604,7 +1699,7 @@ func (wn *WebsocketNetwork) pingThread() {
wn.log.Debugf("ping %d peers...", len(sendList))
for _, peer := range sendList {
if !peer.sendPing() {
- // if we failed to send a ping, see how long it was since last successfull ping.
+ // if we failed to send a ping, see how long it was since last successful ping.
lastPingSent, _ := peer.pingTimes()
wn.log.Infof("failed to ping to %v for the past %f seconds", peer, time.Now().Sub(lastPingSent).Seconds())
}
@@ -1688,10 +1783,14 @@ const ProtocolVersionHeader = "X-Algorand-Version"
const ProtocolAcceptVersionHeader = "X-Algorand-Accept-Version"
// SupportedProtocolVersions contains the list of supported protocol versions by this node ( in order of preference ).
-var SupportedProtocolVersions = []string{"2.1", "1"}
+var SupportedProtocolVersions = []string{"2.1"}
// ProtocolVersion is the current version attached to the ProtocolVersionHeader header
-const ProtocolVersion = "1"
+/* Version history:
+ * 1 Catchup service over websocket connections with unicast messages between peers
+ * 2.1 Introducted topic key/data pairs and enabled services over the gossip connections
+ */
+const ProtocolVersion = "2.1"
// TelemetryIDHeader HTTP header for telemetry-id for logging
const TelemetryIDHeader = "X-Algorand-TelId"
@@ -2027,6 +2126,7 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) {
if peer.throttledOutgoingConnection {
atomic.AddInt32(&wn.throttledOutgoingConnections, int32(1))
}
+ atomic.AddInt32(&wn.peersChangeCounter, 1)
}
wn.countPeersSetGauges()
}
@@ -2042,6 +2142,7 @@ func (wn *WebsocketNetwork) addPeer(peer *wsPeer) {
}
heap.Push(peersHeap{wn}, peer)
wn.prioTracker.setPriority(peer, peer.prioAddress, peer.prioWeight)
+ atomic.AddInt32(&wn.peersChangeCounter, 1)
wn.countPeersSetGauges()
if len(wn.peers) >= wn.config.GossipFanout {
// we have a quorum of connected peers, if we weren't ready before, we are now
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index eb1fefe49..3464ee750 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -1231,7 +1231,7 @@ func TestSlowPeerDisconnection(t *testing.T) {
waitReady(t, netB, readyTimeout.C)
var peers []*wsPeer
- peers = netA.peerSnapshot(peers)
+ peers, _ = netA.peerSnapshot(peers)
require.Equalf(t, len(peers), 1, "Expected number of peers should be 1")
peer := peers[0]
// modify the peer on netA and
@@ -1240,7 +1240,7 @@ func TestSlowPeerDisconnection(t *testing.T) {
// wait up to 10 seconds for the monitor to figure out it needs to disconnect.
expire := beforeLoopTime.Add(2 * slowWritingPeerMonitorInterval)
for {
- peers = netA.peerSnapshot(peers)
+ peers, _ = netA.peerSnapshot(peers)
if len(peers) == 0 || peers[0] != peer {
// make sure it took more than 1 second, and less than 5 seconds.
waitTime := time.Now().Sub(beforeLoopTime)
@@ -1425,7 +1425,7 @@ func handleTopicRequest(msg IncomingMessage) (out OutgoingMessage) {
// Set up two nodes, test topics send/recieve is working
func TestWebsocketNetworkTopicRoundtrip(t *testing.T) {
- var topicMsgReqTag Tag = protocol.UniCatchupReqTag
+ var topicMsgReqTag Tag = protocol.UniEnsBlockReqTag
netA := makeTestWebsocketNode(t)
netA.config.GossipFanout = 1
netA.Start()
@@ -1542,7 +1542,6 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
for i := 0; i < 5; i++ {
netA.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte{0, 1, 2, 3, 4}, true, nil)
netA.Broadcast(context.Background(), protocol.TxnTag, []byte{0, 1, 2, 3, 4}, true, nil)
- netA.Broadcast(context.Background(), protocol.UniEnsBlockResTag, []byte{0, 1, 2, 3, 4}, true, nil)
netA.Broadcast(context.Background(), protocol.ProposalPayloadTag, []byte{0, 1, 2, 3, 4}, true, nil)
netA.Broadcast(context.Background(), protocol.VoteBundleTag, []byte{0, 1, 2, 3, 4}, true, nil)
}
diff --git a/network/wsPeer.go b/network/wsPeer.go
index f4acf2836..99a96d135 100644
--- a/network/wsPeer.go
+++ b/network/wsPeer.go
@@ -74,8 +74,6 @@ var defaultSendMessageTags = map[protocol.Tag]bool{
protocol.TxnTag: true,
protocol.UniCatchupReqTag: true,
protocol.UniEnsBlockReqTag: true,
- protocol.UniEnsBlockResTag: true,
- protocol.UniCatchupResTag: true,
protocol.VoteBundleTag: true,
}
@@ -124,8 +122,8 @@ type Response struct {
}
type wsPeer struct {
- // lastPacketTime contains the UnixNano at the last time a successfull communication was made with the peer.
- // "successfull communication" above refers to either reading from or writing to a connection without receiving any
+ // lastPacketTime contains the UnixNano at the last time a successful communication was made with the peer.
+ // "successful communication" above refers to either reading from or writing to a connection without receiving any
// error.
// we want this to be a 64-bit aligned for atomics.
lastPacketTime int64
@@ -438,7 +436,7 @@ func (wp *wsPeer) readLoop() {
select {
case channel <- &Response{Topics: topics}:
- // do nothing. writing was successfull.
+ // do nothing. writing was successful.
default:
wp.net.log.Warnf("wsPeer readLoop: channel blocked. Could not pass the response to the requester", wp.conn.RemoteAddr().String())
}
diff --git a/node/node.go b/node/node.go
index 5bba9dfeb..851afd8e9 100644
--- a/node/node.go
+++ b/node/node.go
@@ -108,7 +108,6 @@ type AlgorandFullNode struct {
catchpointCatchupService *catchup.CatchpointCatchupService
blockService *rpcs.BlockService
ledgerService *rpcs.LedgerService
- wsFetcherService *rpcs.WsFetcherService // to handle inbound gossip msgs for fetching over gossip
txPoolSyncerService *rpcs.TxSyncer
indexer *indexer.Indexer
@@ -230,7 +229,6 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
node.blockService = rpcs.MakeBlockService(cfg, node.ledger, p2pNode, node.genesisID)
node.ledgerService = rpcs.MakeLedgerService(cfg, node.ledger, p2pNode, node.genesisID)
- node.wsFetcherService = rpcs.MakeWsFetcherService(node.log, p2pNode)
rpcs.RegisterTxService(node.transactionPool, p2pNode, node.genesisID, cfg.TxPoolSize, cfg.TxSyncServeResponseSize)
crashPathname := filepath.Join(genesisDir, config.CrashFilename)
@@ -259,7 +257,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
node.agreementService = agreement.MakeService(agreementParameters)
node.catchupBlockAuth = blockAuthenticatorImpl{Ledger: node.ledger, AsyncVoteVerifier: agreement.MakeAsyncVoteVerifier(node.lowPriorityCryptoVerificationPool)}
- node.catchupService = catchup.MakeService(node.log, node.config, p2pNode, node.ledger, node.wsFetcherService, node.catchupBlockAuth, agreementLedger.UnmatchedPendingCertificates)
+ node.catchupService = catchup.MakeService(node.log, node.config, p2pNode, node.ledger, node.catchupBlockAuth, agreementLedger.UnmatchedPendingCertificates)
node.txPoolSyncerService = rpcs.MakeTxSyncer(node.transactionPool, node.net, node.txHandler.SolicitedTxHandler(), time.Duration(cfg.TxSyncIntervalSeconds)*time.Second, time.Duration(cfg.TxSyncTimeoutSeconds)*time.Second, cfg.TxSyncServeResponseSize)
err = node.loadParticipationKeys()
@@ -351,7 +349,6 @@ func (node *AlgorandFullNode) Start() {
if node.catchpointCatchupService != nil {
node.catchpointCatchupService.Start(node.ctx)
} else {
- node.wsFetcherService.Start()
node.catchupService.Start()
node.agreementService.Start()
node.txPoolSyncerService.Start(node.catchupService.InitialSyncDone)
@@ -426,7 +423,6 @@ func (node *AlgorandFullNode) Stop() {
node.txPoolSyncerService.Stop()
node.blockService.Stop()
node.ledgerService.Stop()
- node.wsFetcherService.Stop()
}
node.catchupBlockAuth.Quit()
node.highPriorityCryptoVerificationPool.Shutdown()
@@ -487,7 +483,7 @@ func (node *AlgorandFullNode) BroadcastSignedTxGroup(txgroup []transactions.Sign
enc = append(enc, protocol.Encode(&tx)...)
txids = append(txids, tx.ID())
}
- err = node.net.Broadcast(context.TODO(), protocol.TxnTag, enc, true, nil)
+ err = node.net.Broadcast(context.TODO(), protocol.TxnTag, enc, false, nil)
if err != nil {
node.log.Infof("failure broadcasting transaction to network: %v - transaction group was %+v", err, txgroup)
return err
@@ -967,7 +963,6 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo
node.txPoolSyncerService.Stop()
node.blockService.Stop()
node.ledgerService.Stop()
- node.wsFetcherService.Stop()
prevNodeCancelFunc := node.cancelCtx
@@ -981,7 +976,6 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo
defer node.mu.Unlock()
// start
node.transactionPool.Reset()
- node.wsFetcherService.Start()
node.catchupService.Start()
node.agreementService.Start()
node.txPoolSyncerService.Start(node.catchupService.InitialSyncDone)
diff --git a/protocol/consensus.go b/protocol/consensus.go
index 4428ee600..7b2a50793 100644
--- a/protocol/consensus.go
+++ b/protocol/consensus.go
@@ -138,6 +138,11 @@ const ConsensusV25 = ConsensusVersion(
"https://github.com/algorandfoundation/specs/tree/bea19289bf41217d2c0af30522fa222ef1366466",
)
+// ConsensusV26 adds support for TEAL 3, initial rewards calculation and merkle tree hash commitments
+const ConsensusV26 = ConsensusVersion(
+ "https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff",
+)
+
// ConsensusFuture is a protocol that should not appear in any production
// network, but is used to test features before they are released.
const ConsensusFuture = ConsensusVersion(
@@ -150,7 +155,7 @@ const ConsensusFuture = ConsensusVersion(
// ConsensusCurrentVersion is the latest version and should be used
// when a specific version is not provided.
-const ConsensusCurrentVersion = ConsensusV25
+const ConsensusCurrentVersion = ConsensusV26
// Error is used to indicate that an unsupported protocol has been detected.
type Error ConsensusVersion
diff --git a/protocol/hash.go b/protocol/hash.go
index 307c359e2..1f70fc683 100644
--- a/protocol/hash.go
+++ b/protocol/hash.go
@@ -49,8 +49,10 @@ const (
ProposerSeed HashID = "PS"
Seed HashID = "SD"
SpecialAddr HashID = "SpecialAddr"
+ SignedTxnInBlock HashID = "STIB"
TestHashable HashID = "TE"
TxGroup HashID = "TG"
+ TxnMerkleLeaf HashID = "TL"
Transaction HashID = "TX"
Vote HashID = "VO"
)
diff --git a/protocol/tags.go b/protocol/tags.go
index 9e14bcef4..7f448a762 100644
--- a/protocol/tags.go
+++ b/protocol/tags.go
@@ -35,25 +35,9 @@ const (
ProposalPayloadTag Tag = "PP"
TopicMsgRespTag Tag = "TS"
TxnTag Tag = "TX"
- UniCatchupReqTag Tag = "UC"
+ UniCatchupReqTag Tag = "UC" //Replaced by UniEnsBlockReqTag. Only for backward compatibility.
UniEnsBlockReqTag Tag = "UE"
- UniEnsBlockResTag Tag = "US"
- UniCatchupResTag Tag = "UT"
- VoteBundleTag Tag = "VB"
+ //UniEnsBlockResTag Tag = "US" was used for wsfetcherservice
+ //UniCatchupResTag Tag = "UT" was used for wsfetcherservice
+ VoteBundleTag Tag = "VB"
)
-
-// Complement is a convenience function for returning a corresponding response/request tag
-func (t Tag) Complement() Tag {
- switch t {
- case UniCatchupResTag:
- return UniCatchupReqTag
- case UniCatchupReqTag:
- return UniCatchupResTag
- case UniEnsBlockResTag:
- return UniEnsBlockReqTag
- case UniEnsBlockReqTag:
- return UniEnsBlockResTag
- default:
- return UnknownMsgTag
- }
-}
diff --git a/rpcs/blockService.go b/rpcs/blockService.go
index 062bf8346..fec1fe402 100644
--- a/rpcs/blockService.go
+++ b/rpcs/blockService.go
@@ -48,6 +48,15 @@ const blockServerCatchupRequestBufferSize = 10
// e.g. .Handle(BlockServiceBlockPath, &ls)
const BlockServiceBlockPath = "/v{version:[0-9.]+}/{genesisID}/block/{round:[0-9a-z]+}"
+// Constant strings used as keys for topics
+const (
+ RoundKey = "roundKey" // Block round-number topic-key in the request
+ RequestDataTypeKey = "requestDataType" // Data-type topic-key in the request (e.g. block, cert, block+cert)
+ BlockDataKey = "blockData" // Block-data topic-key in the response
+ CertDataKey = "certData" // Cert-data topic-key in the response
+ BlockAndCertValue = "blockAndCert" // block+cert request data (as the value of requestDataTypeKey)
+)
+
// BlockService represents the Block RPC API
type BlockService struct {
ledger *data.Ledger
@@ -102,7 +111,7 @@ func (bs *BlockService) Start() {
bs.net.RegisterHandlers(handlers)
}
bs.stop = make(chan struct{})
- go bs.ListenForCatchupReq(bs.catchupReqs, bs.stop)
+ go bs.listenForCatchupReq(bs.catchupReqs, bs.stop)
}
// Stop servicing catchup requests over ws
@@ -201,18 +210,6 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re
}
}
-// WsGetBlockRequest is a msgpack message requesting a block
-type WsGetBlockRequest struct {
- Round uint64 `json:"round"`
-}
-
-// WsGetBlockOut is a msgpack message delivered on responding to a block (not rpc-based though)
-type WsGetBlockOut struct {
- Round uint64
- Error string
- BlockBytes []byte `json:"blockbytes"`
-}
-
func (bs *BlockService) processIncomingMessage(msg network.IncomingMessage) (n network.OutgoingMessage) {
// don't block - just stick in a slightly buffered channel if possible
select {
@@ -223,8 +220,8 @@ func (bs *BlockService) processIncomingMessage(msg network.IncomingMessage) (n n
return
}
-// ListenForCatchupReq handles catchup getblock request
-func (bs *BlockService) ListenForCatchupReq(reqs <-chan network.IncomingMessage, stop chan struct{}) {
+// listenForCatchupReq handles catchup getblock request
+func (bs *BlockService) listenForCatchupReq(reqs <-chan network.IncomingMessage, stop chan struct{}) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for {
@@ -245,32 +242,9 @@ const datatypeUnsupportedErrMsg = "requested data type is unsupported"
// a blocking function for handling a catchup request
func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.IncomingMessage) {
- var res WsGetBlockOut
target := reqMsg.Sender.(network.UnicastPeer)
var respTopics network.Topics
- if target.Version() == "1" {
-
- defer func() {
- bs.sendCatchupRes(ctx, target, reqMsg.Tag, res)
- }()
- var req WsGetBlockRequest
- err := protocol.DecodeReflect(reqMsg.Data, &req)
- if err != nil {
- res.Error = err.Error()
- return
- }
- res.Round = req.Round
- encodedBlob, err := RawBlockBytes(bs.ledger, basics.Round(req.Round))
-
- if err != nil {
- res.Error = err.Error()
- return
- }
- res.BlockBytes = encodedBlob
- return
- }
- // Else, if version == 2.1
defer func() {
target.Respond(ctx, reqMsg, respTopics)
}()
@@ -282,7 +256,7 @@ func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.Inc
network.MakeTopic(network.ErrorKey, []byte(err.Error()))}
return
}
- roundBytes, found := topics.GetValue(roundKey)
+ roundBytes, found := topics.GetValue(RoundKey)
if !found {
logging.Base().Infof("BlockService handleCatchupReq: %s", noRoundNumberErrMsg)
respTopics = network.Topics{
@@ -290,7 +264,7 @@ func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.Inc
[]byte(noRoundNumberErrMsg))}
return
}
- requestType, found := topics.GetValue(requestDataTypeKey)
+ requestType, found := topics.GetValue(RequestDataTypeKey)
if !found {
logging.Base().Infof("BlockService handleCatchupReq: %s", noDataTypeErrMsg)
respTopics = network.Topics{
@@ -311,15 +285,6 @@ func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.Inc
return
}
-func (bs *BlockService) sendCatchupRes(ctx context.Context, target network.UnicastPeer, reqTag protocol.Tag, outMsg WsGetBlockOut) {
- t := reqTag.Complement()
- logging.Base().Infof("catching down peer: %v, round %v. outcome: %v. ledger: %v", target.GetAddress(), outMsg.Round, outMsg.Error, bs.ledger.LastRound())
- err := target.Unicast(ctx, protocol.EncodeReflect(outMsg), t)
- if err != nil {
- logging.Base().Info("failed to respond to catchup req", err)
- }
-}
-
func topicBlockBytes(dataLedger *data.Ledger, round basics.Round, requestType string) network.Topics {
blk, cert, err := dataLedger.EncodedBlockCert(round)
if err != nil {
@@ -332,12 +297,12 @@ func topicBlockBytes(dataLedger *data.Ledger, round basics.Round, requestType st
network.MakeTopic(network.ErrorKey, []byte(blockNotAvailabeErrMsg))}
}
switch requestType {
- case blockAndCertValue:
+ case BlockAndCertValue:
return network.Topics{
network.MakeTopic(
- blockDataKey, blk),
+ BlockDataKey, blk),
network.MakeTopic(
- certDataKey, cert),
+ CertDataKey, cert),
}
default:
return network.Topics{
diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go
index fd56e25bc..042988eff 100644
--- a/rpcs/blockService_test.go
+++ b/rpcs/blockService_test.go
@@ -77,7 +77,7 @@ func TestHandleCatchupReqNegative(t *testing.T) {
// case where data type is missing
roundNumberData := make([]byte, 0)
- reqTopics = network.Topics{network.MakeTopic(roundKey, roundNumberData)}
+ reqTopics = network.Topics{network.MakeTopic(RoundKey, roundNumberData)}
reqMsg.Data = reqTopics.MarshallTopics()
ls.handleCatchupReq(context.Background(), reqMsg)
respTopics = reqMsg.Sender.(*mockUnicastPeer).responseTopics
@@ -88,8 +88,8 @@ func TestHandleCatchupReqNegative(t *testing.T) {
// case where round number is corrupted
roundNumberData = make([]byte, 0)
- reqTopics = network.Topics{network.MakeTopic(roundKey, roundNumberData),
- network.MakeTopic(requestDataTypeKey, []byte(blockAndCertValue)),
+ reqTopics = network.Topics{network.MakeTopic(RoundKey, roundNumberData),
+ network.MakeTopic(RequestDataTypeKey, []byte(BlockAndCertValue)),
}
reqMsg.Data = reqTopics.MarshallTopics()
ls.handleCatchupReq(context.Background(), reqMsg)
diff --git a/rpcs/wsFetcherService.go b/rpcs/wsFetcherService.go
deleted file mode 100644
index e9c0651c9..000000000
--- a/rpcs/wsFetcherService.go
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package rpcs
-
-import (
- "context"
- "encoding/binary"
- "fmt"
-
- "github.com/algorand/go-deadlock"
-
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/network"
- "github.com/algorand/go-algorand/protocol"
-)
-
-// WsFetcherService exists for the express purpose or providing a global
-// handler for fetcher gossip message response types
-type WsFetcherService struct {
- log logging.Logger
- mu deadlock.RWMutex
- pendingRequests map[string]chan WsGetBlockOut
- net network.GossipNode
-}
-
-// Constant strings used as keys for topics
-const (
- roundKey = "roundKey" // Block round-number topic-key in the request
- requestDataTypeKey = "requestDataType" // Data-type topic-key in the request (e.g. block, cert, block+cert)
- blockDataKey = "blockData" // Block-data topic-key in the response
- certDataKey = "certData" // Cert-data topic-key in the response
- blockAndCertValue = "blockAndCert" // block+cert request data (as the value of requestDataTypeKey)
-)
-
-func makePendingRequestKey(target network.UnicastPeer, round basics.Round, tag protocol.Tag) string {
- return fmt.Sprintf("<%s>:%d:%s", target.GetAddress(), round, tag)
-
-}
-
-func (fs *WsFetcherService) handleNetworkMsg(msg network.IncomingMessage) (out network.OutgoingMessage) {
- // route message to appropriate wsFetcher (if registered)
- uniPeer := msg.Sender.(network.UnicastPeer)
- switch msg.Tag {
- case protocol.UniCatchupResTag:
- case protocol.UniEnsBlockResTag:
- default:
- fs.log.Warnf("WsFetcherService: unable to process message coming from '%s'; no fetcher registered for tag (%v)", uniPeer.GetAddress(), msg.Tag)
- return
- }
-
- var resp WsGetBlockOut
-
- if len(msg.Data) == 0 {
- fs.log.Warnf("WsFetcherService(%s): request failed: catchup response no bytes sent", uniPeer.GetAddress())
- out.Action = network.Disconnect
- return
- }
-
- if decodeErr := protocol.DecodeReflect(msg.Data, &resp); decodeErr != nil {
- fs.log.Warnf("WsFetcherService(%s): request failed: unable to decode message : %v", uniPeer.GetAddress(), decodeErr)
- out.Action = network.Disconnect
- return
- }
-
- waitKey := makePendingRequestKey(uniPeer, basics.Round(resp.Round), msg.Tag.Complement())
- fs.mu.RLock()
- f, hasWaitCh := fs.pendingRequests[waitKey]
- fs.mu.RUnlock()
- if !hasWaitCh {
- if resp.Error != "" {
- fs.log.Infof("WsFetcherService: received a message response for a stale block request from '%s', round %d, length %d, error : '%s'", uniPeer.GetAddress(), resp.Round, len(resp.BlockBytes), resp.Error)
- } else {
- fs.log.Infof("WsFetcherService: received a message response for a stale block request from '%s', round %d, length %d", uniPeer.GetAddress(), resp.Round, len(resp.BlockBytes))
- }
- return
- }
-
- f <- resp
- return
-}
-
-// RequestBlock send a request for block <round> and wait until it receives a response or a context expires.
-func (fs *WsFetcherService) RequestBlock(ctx context.Context, target network.UnicastPeer, round basics.Round, tag protocol.Tag) (WsGetBlockOut, error) {
- waitCh := make(chan WsGetBlockOut, 1)
- waitKey := makePendingRequestKey(target, round, tag)
-
- // register.
- fs.mu.Lock()
- if _, has := fs.pendingRequests[waitKey]; has {
- // we already have a pending request for the same round and tag from the same peer
- fs.mu.Unlock()
- return WsGetBlockOut{}, fmt.Errorf("WsFetcherService.RequestBlock(%d): only single concurrent request for a round from a single peer(%s) is supported", round, target.GetAddress())
- }
- fs.pendingRequests[waitKey] = waitCh
- fs.mu.Unlock()
-
- defer func() {
- // unregister
- fs.mu.Lock()
- delete(fs.pendingRequests, waitKey)
- fs.mu.Unlock()
- }()
- if target.Version() == "1" {
- req := WsGetBlockRequest{Round: uint64(round)}
- err := target.Unicast(ctx, protocol.EncodeReflect(req), tag)
- if err != nil {
- return WsGetBlockOut{}, fmt.Errorf("WsFetcherService.RequestBlock(%d): unicast failed, %v", round, err)
- }
- select {
- case resp := <-waitCh:
- return resp, nil
- case <-ctx.Done():
- switch ctx.Err() {
- case context.DeadlineExceeded:
- return WsGetBlockOut{}, fmt.Errorf("WsFetcherService.RequestBlock(%d): request to %s was timed out", round, target.GetAddress())
- case context.Canceled:
- return WsGetBlockOut{}, fmt.Errorf("WsFetcherService.RequestBlock(%d): request to %s was cancelled by context", round, target.GetAddress())
- default:
- return WsGetBlockOut{}, ctx.Err()
- }
- }
- }
-
- // Else, if version == 2.1
- roundBin := make([]byte, binary.MaxVarintLen64)
- binary.PutUvarint(roundBin, uint64(round))
- topics := network.Topics{
- network.MakeTopic(requestDataTypeKey,
- []byte(blockAndCertValue)),
- network.MakeTopic(
- roundKey,
- roundBin),
- }
- resp, err := target.Request(ctx, tag, topics)
- if err != nil {
- return WsGetBlockOut{}, fmt.Errorf("WsFetcherService(%s).RequestBlock(%d): Request failed, %v", target.GetAddress(), round, err)
- }
-
- if errMsg, found := resp.Topics.GetValue(network.ErrorKey); found {
- return WsGetBlockOut{}, fmt.Errorf("WsFetcherService(%s).RequestBlock(%d): Request failed, %s", target.GetAddress(), round, string(errMsg))
- }
-
- blk, found := resp.Topics.GetValue(blockDataKey)
- if !found {
- return WsGetBlockOut{}, fmt.Errorf("WsFetcherService(%s): request failed: block data not found", target.GetAddress())
- }
- cert, found := resp.Topics.GetValue(certDataKey)
- if !found {
- return WsGetBlockOut{}, fmt.Errorf("WsFetcherService(%s): request failed: cert data not found", target.GetAddress())
- }
-
- // For backward compatibility, the block and cert are repackaged here.
- // This can be dropeed once the v1 is dropped.
- blockCertBytes := protocol.EncodeReflect(PreEncodedBlockCert{
- Block: blk,
- Certificate: cert})
-
- wsBlockOut := WsGetBlockOut{
- Round: uint64(round),
- BlockBytes: blockCertBytes,
- }
- return wsBlockOut, nil
-}
-
-// MakeWsFetcherService creates and returns a WsFetcherService that services gossip fetcher responses
-func MakeWsFetcherService(log logging.Logger, net network.GossipNode) *WsFetcherService {
- service := &WsFetcherService{
- log: log,
- pendingRequests: make(map[string]chan WsGetBlockOut),
- net: net,
- }
- return service
-}
-
-// Start starts the WsFetcherService
-func (fs *WsFetcherService) Start() {
- handlers := []network.TaggedMessageHandler{
- {Tag: protocol.UniCatchupResTag, MessageHandler: network.HandlerFunc(fs.handleNetworkMsg)}, // handles the response for a block catchup request
- {Tag: protocol.UniEnsBlockResTag, MessageHandler: network.HandlerFunc(fs.handleNetworkMsg)}, // handles the response for a block ensure digest request
- }
- fs.net.RegisterHandlers(handlers)
-}
-
-// Stop stops the WsFetcherService
-func (fs *WsFetcherService) Stop() {
-
-}
diff --git a/scripts/buildhost/start_ec2_instance.sh b/scripts/buildhost/start_ec2_instance.sh
index 52460cd41..1d06d3622 100755
--- a/scripts/buildhost/start_ec2_instance.sh
+++ b/scripts/buildhost/start_ec2_instance.sh
@@ -10,7 +10,7 @@
#
# Examples: scripts/buildhost/start_ec2_instance.sh <AWS_REGION> <AWS_AMI> <AWS_INSTANCE_TYPE>
#
-# Upon successfull execution, the following files would be created:
+# Upon successful execution, the following files would be created:
# sgid - contain the security group identifier
# key.pem - contains the certificate required to log on to the server
# instance - contains the address of the created instance
diff --git a/scripts/release/test/deb/testDebian.exp b/scripts/release/test/deb/testDebian.exp
index 80e94fe80..f8139ddd8 100644
--- a/scripts/release/test/deb/testDebian.exp
+++ b/scripts/release/test/deb/testDebian.exp
@@ -33,7 +33,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set TEST_ROOT_DIR_LS_OUTPUT [ eval exec ls $TEST_ROOT_DIR ]
puts "TEST_ROOT_DIR_LS_OUTPUT ls output: $TEST_ROOT_DIR_LS_OUTPUT"
@@ -93,7 +93,7 @@ if { [catch {
::AlgorandGoal::DeleteAccount $WALLET_1_NAME $WALLET_1_PASSWORD $ACCOUNT_1_ADDRESS $TEST_PRIMARY_NODE_DIR
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Basic Goal Test Successful"
diff --git a/test/README.md b/test/README.md
index 096e18071..0061fb0f1 100644
--- a/test/README.md
+++ b/test/README.md
@@ -33,16 +33,17 @@ These tests are shell scripts which all run in series against a single private n
Each script is provided with a wallet which contains a large supply of algos to use during the test.
```
-usage: e2e_client_runner.py [-h] [--keep-temps] [--timeout TIMEOUT] [--verbose] [scripts [scripts ...]]
+usage: e2e_client_runner.py [-h] [--keep-temps] [--timeout TIMEOUT] [--verbose] [--version Future|vXX] [scripts [scripts ...]]
positional arguments:
scripts scripts to run
optional arguments:
- -h, --help show this help message and exit
- --keep-temps if set, keep all the test files
- --timeout TIMEOUT integer seconds to wait for the scripts to run
+ -h, --help show this help message and exit
+ --keep-temps if set, keep all the test files
+ --timeout TIMEOUT integer seconds to wait for the scripts to run
--verbose
+ --version Future|vXX selects the network template file
```
To run a specific test:
diff --git a/test/e2e-go/cli/goal/expect/basicGoalTest.exp b/test/e2e-go/cli/goal/expect/basicGoalTest.exp
index 319c1cf1a..b9ca99e56 100644
--- a/test/e2e-go/cli/goal/expect/basicGoalTest.exp
+++ b/test/e2e-go/cli/goal/expect/basicGoalTest.exp
@@ -21,7 +21,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
@@ -96,7 +96,7 @@ if { [catch {
::AlgorandGoal::DeleteAccount $WALLET_1_NAME $WALLET_1_PASSWORD $ACCOUNT_1_ADDRESS $TEST_PRIMARY_NODE_DIR
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Basic Goal Test Successful"
diff --git a/test/e2e-go/cli/goal/expect/corsTest.exp b/test/e2e-go/cli/goal/expect/corsTest.exp
index 8d18bbce4..7691b740f 100644
--- a/test/e2e-go/cli/goal/expect/corsTest.exp
+++ b/test/e2e-go/cli/goal/expect/corsTest.exp
@@ -21,7 +21,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
# Set Primary Wallet Name
set PRIMARY_WALLET_NAME unencrypted-default-wallet
@@ -37,7 +37,7 @@ if { [catch {
::AlgorandGoal::CheckNetworkAddressForCors $KMD_NET_ADDRESS
exec goal kmd stop -d $TEST_PRIMARY_NODE_DIR
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Basic Cors Test Successful"
diff --git a/test/e2e-go/cli/goal/expect/createWalletTest.exp b/test/e2e-go/cli/goal/expect/createWalletTest.exp
index c6c2bef15..96dfaec8c 100644
--- a/test/e2e-go/cli/goal/expect/createWalletTest.exp
+++ b/test/e2e-go/cli/goal/expect/createWalletTest.exp
@@ -26,7 +26,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
# Set Primary Wallet Name
set PRIMARY_WALLET_NAME unencrypted-default-wallet
@@ -171,7 +171,7 @@ if { [catch {
}
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
exit 0
diff --git a/test/e2e-go/cli/goal/expect/doubleSpendingTest.exp b/test/e2e-go/cli/goal/expect/doubleSpendingTest.exp
index ee8c27f47..c88ada3c7 100644
--- a/test/e2e-go/cli/goal/expect/doubleSpendingTest.exp
+++ b/test/e2e-go/cli/goal/expect/doubleSpendingTest.exp
@@ -27,7 +27,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
# Create a new wallet
set WALLET_1_NAME Wallet_1_$TIME_STAMP
@@ -146,7 +146,7 @@ if { [catch {
puts "SUCCESS: Double spending test successful"
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
exit 0
diff --git a/test/e2e-go/cli/goal/expect/goalAccountInfoTest.exp b/test/e2e-go/cli/goal/expect/goalAccountInfoTest.exp
index 3bba5b877..063651a9a 100644
--- a/test/e2e-go/cli/goal/expect/goalAccountInfoTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalAccountInfoTest.exp
@@ -23,7 +23,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
@@ -148,7 +148,7 @@ Opted In Apps:
}
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Goal Account Info Test Successful"
diff --git a/test/e2e-go/cli/goal/expect/goalAccountTest.exp b/test/e2e-go/cli/goal/expect/goalAccountTest.exp
index deb9f6e0a..0b5999cc4 100644
--- a/test/e2e-go/cli/goal/expect/goalAccountTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalAccountTest.exp
@@ -22,7 +22,7 @@ if { [catch {
# Create network
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
@@ -133,7 +133,7 @@ if { [catch {
}
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
exit 0
} EXCEPTION ] } {
diff --git a/test/e2e-go/cli/goal/expect/goalAppAccountAddressTest.exp b/test/e2e-go/cli/goal/expect/goalAppAccountAddressTest.exp
index f9c0b8752..cddb56f36 100644
--- a/test/e2e-go/cli/goal/expect/goalAppAccountAddressTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalAppAccountAddressTest.exp
@@ -38,7 +38,7 @@ proc goalAppAccountAddress { TEST_ALGO_DIR TEST_DATA_DIR} {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
@@ -164,7 +164,7 @@ proc goalAppAccountAddress { TEST_ALGO_DIR TEST_DATA_DIR} {
}
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Goal Stateful Teal test Successful"
}
diff --git a/test/e2e-go/cli/goal/expect/goalAssetTest.exp b/test/e2e-go/cli/goal/expect/goalAssetTest.exp
index 47c1a62e1..23975811e 100644
--- a/test/e2e-go/cli/goal/expect/goalAssetTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalAssetTest.exp
@@ -23,7 +23,7 @@ if { [catch {
# Create network
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
@@ -63,7 +63,7 @@ if { [catch {
}
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
} EXCEPTION] } {
puts "ERROR in goalAssetTest: $EXCEPTION"
exit 1
diff --git a/test/e2e-go/cli/goal/expect/goalClerkGroupTest.exp b/test/e2e-go/cli/goal/expect/goalClerkGroupTest.exp
index 9c80b9c1f..7a86effbd 100644
--- a/test/e2e-go/cli/goal/expect/goalClerkGroupTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalClerkGroupTest.exp
@@ -21,7 +21,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_WALLET_NAME unencrypted-default-wallet
@@ -57,7 +57,7 @@ if { [catch {
}
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "goal clerk group test Successful"
diff --git a/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp b/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp
index e2ee04da1..886b57cd8 100644
--- a/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp
@@ -64,7 +64,7 @@ if { [catch {
exec rm $TEST_ROOT_DIR/Primary/config.json
exec mv $TEST_ROOT_DIR/Primary/config.json.new $TEST_ROOT_DIR/Primary/config.json
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
@@ -152,7 +152,7 @@ if { [catch {
TestGoalDryrun $DRREQ_FILE_OPTIN $TEST_PRIMARY_NODE_DIR
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
exit 0
} EXCEPTION ] } {
diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
index 5920c9df3..7a14e84f6 100644
--- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
+++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
@@ -51,7 +51,7 @@ proc ::AlgorandGoal::Abort { ERROR } {
puts "GLOBAL_TEST_ALGO_DIR $::GLOBAL_TEST_ALGO_DIR"
puts "GLOBAL_TEST_ROOT_DIR $::GLOBAL_TEST_ROOT_DIR"
puts "GLOBAL_NETWORK_NAME $::GLOBAL_NETWORK_NAME"
- ::AlgorandGoal::StopNetwork $::GLOBAL_NETWORK_NAME $::GLOBAL_TEST_ALGO_DIR $::GLOBAL_TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $::GLOBAL_NETWORK_NAME $::GLOBAL_TEST_ROOT_DIR
}
exit 1
}
@@ -199,9 +199,8 @@ proc ::AlgorandGoal::CreateNetwork { NETWORK_NAME NETWORK_TEMPLATE TEST_ALGO_DIR
}
# Start the network
-proc ::AlgorandGoal::StartNetwork { NETWORK_NAME NETWORK_TEMPLATE TEST_ALGO_DIR TEST_ROOT_DIR } {
+proc ::AlgorandGoal::StartNetwork { NETWORK_NAME NETWORK_TEMPLATE TEST_ROOT_DIR } {
set timeout 120
- set ::GLOBAL_TEST_ALGO_DIR $TEST_ALGO_DIR
set ::GLOBAL_TEST_ROOT_DIR $TEST_ROOT_DIR
set ::GLOBAL_NETWORK_NAME $NETWORK_NAME
@@ -235,16 +234,15 @@ proc ::AlgorandGoal::StartNetwork { NETWORK_NAME NETWORK_TEMPLATE TEST_ALGO_DIR
}
# Stop the network
-proc ::AlgorandGoal::StopNetwork { NETWORK_NAME TEST_ALGO_DIR TEST_ROOT_DIR } {
+proc ::AlgorandGoal::StopNetwork { NETWORK_NAME TEST_ROOT_DIR } {
set timeout 60
set NETWORK_STOP_MESSAGE ""
puts "Stopping network: $NETWORK_NAME"
- spawn goal network stop -d $TEST_ALGO_DIR -r $TEST_ROOT_DIR
+ spawn goal network stop -r $TEST_ROOT_DIR
expect {
timeout {
close
puts "Timed out shutting down network"
- puts "GLOBAL_TEST_ALGO_DIR $::GLOBAL_TEST_ALGO_DIR"
puts "GLOBAL_TEST_ROOT_DIR $::GLOBAL_TEST_ROOT_DIR"
puts "GLOBAL_NETWORK_NAME $::GLOBAL_NETWORK_NAME"
exit 1
diff --git a/test/e2e-go/cli/goal/expect/goalNodeStatusTest.exp b/test/e2e-go/cli/goal/expect/goalNodeStatusTest.exp
index 9a7ccfdf8..ea35ec737 100644
--- a/test/e2e-go/cli/goal/expect/goalNodeStatusTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalNodeStatusTest.exp
@@ -21,7 +21,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
exec sleep 5
@@ -29,7 +29,7 @@ if { [catch {
::AlgorandGoal::WaitForRound 3 $TEST_PRIMARY_NODE_DIR
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Basic Goal Test Successful"
diff --git a/test/e2e-go/cli/goal/expect/goalNodeTest.exp b/test/e2e-go/cli/goal/expect/goalNodeTest.exp
index ca284ba23..6a2f94406 100644
--- a/test/e2e-go/cli/goal/expect/goalNodeTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalNodeTest.exp
@@ -23,7 +23,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
exec sleep 5
@@ -79,7 +79,7 @@ if { [catch {
}
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Basic Goal Test Successful"
diff --git a/test/e2e-go/cli/goal/expect/goalTxValidityTest.exp b/test/e2e-go/cli/goal/expect/goalTxValidityTest.exp
index 4c791d2e3..37acb80ce 100644
--- a/test/e2e-go/cli/goal/expect/goalTxValidityTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalTxValidityTest.exp
@@ -49,7 +49,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
# use goal node status command to wait for round 0
::AlgorandGoal::WaitForRound 0 $TEST_PRIMARY_NODE_DIR
@@ -88,7 +88,7 @@ if { [catch {
TestLastValidInTx "goal account changeonlinestatus --validrounds 1000 --firstvalid 2 --address $PRIMARY_ACCOUNT_ADDRESS --online -d $TEST_PRIMARY_NODE_DIR -t $TX_FILE" $TX_FILE $LV_EXPECTED
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
exit 0
} EXCEPTION ] } {
diff --git a/test/e2e-go/cli/goal/expect/ledgerTest.exp b/test/e2e-go/cli/goal/expect/ledgerTest.exp
index dd279c1ba..f53b3bb0d 100644
--- a/test/e2e-go/cli/goal/expect/ledgerTest.exp
+++ b/test/e2e-go/cli/goal/expect/ledgerTest.exp
@@ -25,7 +25,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
@@ -43,7 +43,7 @@ if { [catch {
::AlgorandGoal::GetLedgerSupply $TEST_PRIMARY_NODE_DIR
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Goal Ledger Test Successful"
diff --git a/test/e2e-go/cli/goal/expect/limitOrderTest.exp b/test/e2e-go/cli/goal/expect/limitOrderTest.exp
index 5c97633b8..26d899f69 100644
--- a/test/e2e-go/cli/goal/expect/limitOrderTest.exp
+++ b/test/e2e-go/cli/goal/expect/limitOrderTest.exp
@@ -21,7 +21,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
@@ -182,7 +182,7 @@ if { [catch {
puts "send limit order transaction in $RAW_TRANSACTION_ID"
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Limit Order Test Successful"
diff --git a/test/e2e-go/cli/goal/expect/listExpiredParticipationKeyTest.exp b/test/e2e-go/cli/goal/expect/listExpiredParticipationKeyTest.exp
index 8cbcbd6bb..4bd942c77 100644
--- a/test/e2e-go/cli/goal/expect/listExpiredParticipationKeyTest.exp
+++ b/test/e2e-go/cli/goal/expect/listExpiredParticipationKeyTest.exp
@@ -26,7 +26,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_WALLET_NAME unencrypted-default-wallet
@@ -48,7 +48,7 @@ if { [catch {
::AlgorandGoal::ListParticipationKeys $TEST_PRIMARY_NODE_DIR
# Clean up
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
exit 0
diff --git a/test/e2e-go/cli/goal/expect/multisigCreationDeletionTest.exp b/test/e2e-go/cli/goal/expect/multisigCreationDeletionTest.exp
index de199864f..4cae0e2a4 100644
--- a/test/e2e-go/cli/goal/expect/multisigCreationDeletionTest.exp
+++ b/test/e2e-go/cli/goal/expect/multisigCreationDeletionTest.exp
@@ -26,7 +26,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
# Set Wallet Name and verify it
set WALLET_NAME unencrypted-default-wallet
@@ -47,7 +47,7 @@ if { [catch {
::AlgorandGoal::DeleteMultisigAccount $MULTISIG_ADDRESS $TEST_PRIMARY_NODE_DIR
# Clean up
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
exit 0
diff --git a/test/e2e-go/cli/goal/expect/pingpongTest.exp b/test/e2e-go/cli/goal/expect/pingpongTest.exp
index 72b212f41..5a03ee685 100644
--- a/test/e2e-go/cli/goal/expect/pingpongTest.exp
+++ b/test/e2e-go/cli/goal/expect/pingpongTest.exp
@@ -24,7 +24,7 @@ proc pingpongTest { TEST_ALGO_DIR TEST_DATA_DIR} {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
@@ -82,7 +82,7 @@ proc pingpongTest { TEST_ALGO_DIR TEST_DATA_DIR} {
# Shutdown the network
#----------------------
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Pinpong Test Successful"
diff --git a/test/e2e-go/cli/goal/expect/reportTest.exp b/test/e2e-go/cli/goal/expect/reportTest.exp
index f01bf43f8..e8112960e 100644
--- a/test/e2e-go/cli/goal/expect/reportTest.exp
+++ b/test/e2e-go/cli/goal/expect/reportTest.exp
@@ -25,7 +25,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
@@ -34,7 +34,7 @@ if { [catch {
::AlgorandGoal::Report $TEST_PRIMARY_NODE_DIR
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Goal Report Test Successful"
diff --git a/test/e2e-go/cli/goal/expect/statefulTealAppInfoTest.exp b/test/e2e-go/cli/goal/expect/statefulTealAppInfoTest.exp
index d5fd16955..eb4f5ed03 100644
--- a/test/e2e-go/cli/goal/expect/statefulTealAppInfoTest.exp
+++ b/test/e2e-go/cli/goal/expect/statefulTealAppInfoTest.exp
@@ -24,7 +24,7 @@ proc statefulTealAppInfoTest { TEST_ALGO_DIR TEST_DATA_DIR} {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
@@ -135,7 +135,7 @@ proc statefulTealAppInfoTest { TEST_ALGO_DIR TEST_DATA_DIR} {
}
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Goal statefulTealAppInfoTest Successful"
diff --git a/test/e2e-go/cli/goal/expect/statefulTealAppReadTest.exp b/test/e2e-go/cli/goal/expect/statefulTealAppReadTest.exp
index 04a7d36dd..274573ae0 100644
--- a/test/e2e-go/cli/goal/expect/statefulTealAppReadTest.exp
+++ b/test/e2e-go/cli/goal/expect/statefulTealAppReadTest.exp
@@ -24,7 +24,7 @@ proc statefulTealAppReadTest { TEST_ALGO_DIR TEST_DATA_DIR} {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
@@ -122,7 +122,7 @@ proc statefulTealAppReadTest { TEST_ALGO_DIR TEST_DATA_DIR} {
}
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Goal statefulTealAppReadTest Successful"
diff --git a/test/e2e-go/cli/goal/expect/statefulTealCreateAppTest.exp b/test/e2e-go/cli/goal/expect/statefulTealCreateAppTest.exp
index a6aceb9d4..9bd0ff253 100644
--- a/test/e2e-go/cli/goal/expect/statefulTealCreateAppTest.exp
+++ b/test/e2e-go/cli/goal/expect/statefulTealCreateAppTest.exp
@@ -25,7 +25,7 @@ proc statefulTealTest { TEST_ALGO_DIR TEST_DATA_DIR TEAL_PROGRAM} {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
@@ -115,7 +115,7 @@ proc statefulTealTest { TEST_ALGO_DIR TEST_DATA_DIR TEAL_PROGRAM} {
::AlgorandGoal::AppDelete $APP_ID $WALLET_1_NAME $WALLET_1_PASSWORD $ACCOUNT_1_ADDRESS "str:hello" $TEST_PRIMARY_NODE_DIR
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Goal Stateful Teal test Successful"
}
diff --git a/test/e2e-go/cli/goal/expect/tealAndStatefulTealTest.exp b/test/e2e-go/cli/goal/expect/tealAndStatefulTealTest.exp
index ab7e511e9..a32e2b401 100644
--- a/test/e2e-go/cli/goal/expect/tealAndStatefulTealTest.exp
+++ b/test/e2e-go/cli/goal/expect/tealAndStatefulTealTest.exp
@@ -21,7 +21,7 @@ if { [catch {
::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
# Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
@@ -152,7 +152,7 @@ if { [catch {
::AlgorandGoal::RawSend signout.tx $TEST_PRIMARY_NODE_DIR
# Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
puts "Mixed Teal Test Successful"
diff --git a/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp b/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp
index 57b0fb405..7be2f9066 100644
--- a/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp
+++ b/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp
@@ -11,15 +11,79 @@ if { [catch {
exec mkdir -p $TEST_DIR
set TEAL_PROG_FILE "$TEST_DIR/trivial.teal"
+
+ # this is ConsensusV25
+ set PROTOCOL_VERSION_2 "https://github.com/algorandfoundation/specs/tree/bea19289bf41217d2c0af30522fa222ef1366466"
+
+ # this is ConsensusV26
+ set PROTOCOL_VERSION_3 "https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff"
+
+ # run the test using version 2:
+
exec printf "#pragma version 2\nint 1\ndup\n+\n" > $TEAL_PROG_FILE
set URL ""
set PASSED 0
- spawn tealdbg debug -v $TEAL_PROG_FILE
+ spawn tealdbg debug -v $TEAL_PROG_FILE -p $PROTOCOL_VERSION_2
+ expect_background {
+ timeout { puts "tealdbg debug timed out"; exit 1 }
+ -re {CDT debugger listening on: (ws://[.a-z0-9:/]+)} { set URL $expect_out(1,string); }
+ eof {
+ catch wait result
+ if { [lindex $result 3] != 0 } {
+ puts "returned error code is [lindex $result 3]"
+ exit 1
+ }
+ }
+ }
+ set tealdbg_spawn_id $spawn_id
+
+ # wait until URL is set or timeout
+ set it 0
+ while { $it < 10 && $URL == "" } {
+ set it [expr {$it + 1}]
+ sleep 1
+ }
+ if { $URL == "" } {
+ puts "ERROR: URL is not set after timeout"
+ exit 1
+ }
+
+ spawn cdtmock $URL
+ expect {
+ timeout { puts "cdt-mock debug timed out"; exit 1 }
+ -re {Debugger.paused} { set PASSED 1; }
+ eof { catch wait result; if { [lindex $result 3] == 0 } { puts "Expected non-zero exit code"; exit [lindex $result 3] } }
+ }
+
+ if { $PASSED == 0 } {
+ puts "ERROR: have not found 'Debugger.paused' in cdtmock output"
+ exit 1
+ }
+
+ puts "Shutting down tealdbg"
+ close -i $tealdbg_spawn_id
+ exec rm $TEAL_PROG_FILE
+
+ # run the test using version 3:
+
+ exec printf "#pragma version 3\nint 1\ndup\n+\n" > $TEAL_PROG_FILE
+
+ set URL ""
+ set PASSED 0
+ spawn tealdbg debug -v $TEAL_PROG_FILE -p $PROTOCOL_VERSION_3
expect_background {
timeout { puts "tealdbg debug timed out"; exit 1 }
-re {CDT debugger listening on: (ws://[.a-z0-9:/]+)} { set URL $expect_out(1,string); }
+ eof {
+ catch wait result
+ if { [lindex $result 3] != 0 } {
+ puts "returned error code is [lindex $result 3]"
+ exit 1
+ }
+ }
}
+ set tealdbg_spawn_id $spawn_id
# wait until URL is set or timeout
set it 0
@@ -44,6 +108,9 @@ if { [catch {
exit 1
}
+ puts "Shutting down tealdbg"
+ close -i $tealdbg_spawn_id
+
} EXCEPTION ] } {
puts "ERROR in teadbgTest: $EXCEPTION"
exit 1
diff --git a/test/e2e-go/features/catchup/basicCatchup_test.go b/test/e2e-go/features/catchup/basicCatchup_test.go
index bef417d17..7eb7a9b0c 100644
--- a/test/e2e-go/features/catchup/basicCatchup_test.go
+++ b/test/e2e-go/features/catchup/basicCatchup_test.go
@@ -25,6 +25,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/framework/fixtures"
)
@@ -76,19 +77,24 @@ func TestBasicCatchup(t *testing.T) {
// The current versions are the original v1 and the upgraded to v2.1
func TestCatchupOverGossip(t *testing.T) {
t.Parallel()
+
+ supportedVersions := network.SupportedProtocolVersions
+ require.LessOrEqual(t, len(supportedVersions), 3)
+
// ledger node upgraded version, fetcher node upgraded version
- runCatchupOverGossip(t, false, false)
- // ledger node older version, fetcher node upgraded version
- runCatchupOverGossip(t, true, false)
- // ledger node upgraded older version, fetcher node older version
- runCatchupOverGossip(t, false, true)
- // ledger node older version, fetcher node older version
- runCatchupOverGossip(t, true, true)
+ // Run with the default values. Instead of "", pass the default value
+ // to exercise loading it from the config file.
+ runCatchupOverGossip(t, supportedVersions[0], supportedVersions[0])
+ for i := 1; i < len(supportedVersions); i++ {
+ runCatchupOverGossip(t, supportedVersions[i], "")
+ runCatchupOverGossip(t, "", supportedVersions[i])
+ runCatchupOverGossip(t, supportedVersions[i], supportedVersions[i])
+ }
}
func runCatchupOverGossip(t *testing.T,
- ledgerNodeDowngrade,
- fetcherNodeDowngrade bool) {
+ ledgerNodeDowngradeTo,
+ fetcherNodeDowngradeTo string) {
if testing.Short() {
t.Skip()
@@ -105,22 +111,24 @@ func runCatchupOverGossip(t *testing.T,
// distribution for catchup so this is fine.
fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodes100Second.json"))
- if ledgerNodeDowngrade {
+ if ledgerNodeDowngradeTo != ""{
// Force the node to only support v1
dir, err := fixture.GetNodeDir("Node")
a.NoError(err)
cfg, err := config.LoadConfigFromDisk(dir)
a.NoError(err)
- cfg.NetworkProtocolVersion = "1"
+ require.Empty(t, cfg.NetworkProtocolVersion)
+ cfg.NetworkProtocolVersion = ledgerNodeDowngradeTo
cfg.SaveToDisk(dir)
}
- if fetcherNodeDowngrade {
+ if fetcherNodeDowngradeTo != "" {
// Force the node to only support v1
dir := fixture.PrimaryDataDir()
cfg, err := config.LoadConfigFromDisk(dir)
a.NoError(err)
- cfg.NetworkProtocolVersion = "1"
+ require.Empty(t, cfg.NetworkProtocolVersion)
+ cfg.NetworkProtocolVersion = fetcherNodeDowngradeTo
cfg.SaveToDisk(dir)
}
diff --git a/test/e2e-go/features/transactions/proof_test.go b/test/e2e-go/features/transactions/proof_test.go
new file mode 100644
index 000000000..3409191df
--- /dev/null
+++ b/test/e2e-go/features/transactions/proof_test.go
@@ -0,0 +1,105 @@
+// Copyright (C) 2019-2021 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package transactions
+
+import (
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklearray"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/framework/fixtures"
+)
+
+func TestTxnMerkleProof(t *testing.T) {
+ t.Parallel()
+ a := require.New(t)
+
+ var fixture fixtures.RestClientFixture
+ fixture.Setup(t, filepath.Join("nettemplates", "OneNodeFuture.json"))
+ defer fixture.Shutdown()
+ client := fixture.LibGoalClient
+
+ // We will create three new accounts, transfer some amount of money into
+ // the first account, and then transfer a smaller amount to the second
+ // account while closing out the rest into the third.
+
+ accountList, err := fixture.GetWalletsSortedByBalance()
+ a.NoError(err)
+ baseAcct := accountList[0].Address
+
+ walletHandle, err := client.GetUnencryptedWalletHandle()
+ a.NoError(err)
+
+ acct0, err := client.GenerateAddress(walletHandle)
+ a.NoError(err)
+
+ status, err := client.Status()
+ a.NoError(err)
+
+ // Transfer some money to acct0, as well as other random accounts to
+ // fill up the Merkle tree with more than one element.
+ for i := 0; i < 10; i++ {
+ accti, err := client.GenerateAddress(walletHandle)
+ a.NoError(err)
+
+ _, err = client.SendPaymentFromUnencryptedWallet(baseAcct, accti, 1000, 10000000, nil)
+ a.NoError(err)
+ }
+
+ tx, err := client.SendPaymentFromUnencryptedWallet(baseAcct, acct0, 1000, 10000000, nil)
+ a.NoError(err)
+
+ for i := 0; i < 10; i++ {
+ accti, err := client.GenerateAddress(walletHandle)
+ a.NoError(err)
+
+ _, err = client.SendPaymentFromUnencryptedWallet(baseAcct, accti, 1000, 10000000, nil)
+ a.NoError(err)
+ }
+
+ txid := tx.ID()
+ confirmedTx, err := fixture.WaitForConfirmedTxn(status.LastRound+10, baseAcct, txid.String())
+ a.NoError(err)
+
+ proofresp, err := client.TxnProof(txid.String(), confirmedTx.ConfirmedRound)
+ a.NoError(err)
+
+ var proof []crypto.Digest
+ proofconcat := []byte(proofresp.Proof)
+ for len(proofconcat) > 0 {
+ var d crypto.Digest
+ copy(d[:], proofconcat)
+ proof = append(proof, d)
+ proofconcat = proofconcat[len(d):]
+ }
+
+ blk, err := client.BookkeepingBlock(confirmedTx.ConfirmedRound)
+ a.NoError(err)
+
+ merkleNode := []byte(protocol.TxnMerkleLeaf)
+ merkleNode = append(merkleNode, txid[:]...)
+ merkleNode = append(merkleNode, proofresp.Stibhash...)
+
+ elems := make(map[uint64]crypto.Digest)
+ elems[proofresp.Idx] = crypto.Hash(merkleNode)
+ err = merklearray.Verify(blk.TxnRoot, elems, proof)
+ a.NoError(err)
+}
diff --git a/test/e2e-go/upgrades/application_support_test.go b/test/e2e-go/upgrades/application_support_test.go
index 68fffac21..8514c54fd 100644
--- a/test/e2e-go/upgrades/application_support_test.go
+++ b/test/e2e-go/upgrades/application_support_test.go
@@ -36,27 +36,31 @@ import (
// test-fast-upgrade-future
const consensusTestUnupgradedProtocol = protocol.ConsensusVersion("test-unupgraded-protocol")
+// given that consensus version are constant and only growing forward, we can safely refer to them here:
+const lastProtocolBeforeApplicationSupport = protocol.ConsensusV23
+const firstProtocolWithApplicationSupport = protocol.ConsensusV24
+
func makeApplicationUpgradeConsensus(t *testing.T) (appConsensus config.ConsensusProtocols) {
appConsensus = generateFastUpgradeConsensus()
// make sure that the "current" version does not support application and that the "future" version *does* support applications.
- currentProtocolParams, ok := appConsensus[consensusTestFastUpgrade(protocol.ConsensusCurrentVersion)]
+ currentProtocolParams, ok := appConsensus[consensusTestFastUpgrade(lastProtocolBeforeApplicationSupport)]
require.True(t, ok)
- futureProtocolParams, ok := appConsensus[consensusTestFastUpgrade(protocol.ConsensusFuture)]
+ futureProtocolParams, ok := appConsensus[consensusTestFastUpgrade(firstProtocolWithApplicationSupport)]
require.True(t, ok)
// ensure it's disabled.
- currentProtocolParams.Application = false
- currentProtocolParams.SupportRekeying = false
+ require.False(t, currentProtocolParams.Application)
+ require.False(t, currentProtocolParams.SupportRekeying)
// verify that the future protocol supports applications.
require.True(t, futureProtocolParams.Application)
// add an upgrade path from current to future.
currentProtocolParams.ApprovedUpgrades = make(map[protocol.ConsensusVersion]uint64)
- currentProtocolParams.ApprovedUpgrades[consensusTestFastUpgrade(protocol.ConsensusFuture)] = 0
+ currentProtocolParams.ApprovedUpgrades[consensusTestFastUpgrade(firstProtocolWithApplicationSupport)] = 0
appConsensus[consensusTestUnupgradedProtocol] = currentProtocolParams
- appConsensus[consensusTestFastUpgrade(protocol.ConsensusFuture)] = futureProtocolParams
+ appConsensus[consensusTestFastUpgrade(firstProtocolWithApplicationSupport)] = futureProtocolParams
return
}
@@ -286,9 +290,9 @@ func TestApplicationsUpgradeOverGossip(t *testing.T) {
fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodes100SecondTestUnupgradedProtocol.json"))
// for the primary node, we want to have a different consensus which always enables applications.
- primaryNodeUnupgradedProtocol := consensus[consensusTestFastUpgrade(protocol.ConsensusFuture)]
+ primaryNodeUnupgradedProtocol := consensus[consensusTestFastUpgrade(firstProtocolWithApplicationSupport)]
primaryNodeUnupgradedProtocol.ApprovedUpgrades = make(map[protocol.ConsensusVersion]uint64)
- primaryNodeUnupgradedProtocol.ApprovedUpgrades[consensusTestFastUpgrade(protocol.ConsensusFuture)] = 0
+ primaryNodeUnupgradedProtocol.ApprovedUpgrades[consensusTestFastUpgrade(firstProtocolWithApplicationSupport)] = 0
consensus[consensusTestUnupgradedProtocol] = primaryNodeUnupgradedProtocol
client := fixture.GetLibGoalClientForNamedNode("Primary")
diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go
index bd7616b19..60bd6fc96 100644
--- a/test/framework/fixtures/libgoalFixture.go
+++ b/test/framework/fixtures/libgoalFixture.go
@@ -102,7 +102,7 @@ func (f *LibGoalFixture) setup(test TestingT, testName string, templateFile stri
}
}
-// nodeExitWithError is a callback from the network indicating that the node exit with an error after a successfull startup.
+// nodeExitWithError is a callback from the network indicating that the node exit with an error after a successful startup.
// i.e. node terminated, and not due to shutdown.. this is likely to be a crash/panic.
func (f *LibGoalFixture) nodeExitWithError(nc *nodecontrol.NodeController, err error) {
if err == nil {
diff --git a/test/muleCI/mule.yaml b/test/muleCI/mule.yaml
index 3b147875c..dcab28bae 100644
--- a/test/muleCI/mule.yaml
+++ b/test/muleCI/mule.yaml
@@ -8,6 +8,7 @@ agents:
- TRAVIS_BRANCH=${GIT_BRANCH}
- NETWORK=$NETWORK
- VERSION=$VERSION
+ - BUILD_NUMBER=$BUILD_NUMBER
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=amd64
@@ -20,6 +21,7 @@ agents:
- TRAVIS_BRANCH=${GIT_BRANCH}
- NETWORK=$NETWORK
- VERSION=$VERSION
+ - BUILD_NUMBER=$BUILD_NUMBER
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=amd64
@@ -32,6 +34,7 @@ agents:
- TRAVIS_BRANCH=${GIT_BRANCH}
- NETWORK=$NETWORK
- VERSION=$VERSION
+ - BUILD_NUMBER=$BUILD_NUMBER
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=arm64v8
@@ -44,6 +47,7 @@ agents:
- TRAVIS_BRANCH=${GIT_BRANCH}
- NETWORK=$NETWORK
- VERSION=$VERSION
+ - BUILD_NUMBER=$BUILD_NUMBER
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=arm32v6
@@ -55,10 +59,25 @@ agents:
- TRAVIS_BRANCH=${GIT_BRANCH}
- NETWORK=$NETWORK
- VERSION=$VERSION
+ - BUILD_NUMBER=$BUILD_NUMBER
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
volumes:
- /var/run/docker.sock:/var/run/docker.sock
+ - name: docker-ubuntu-signer
+ dockerFilePath: docker/build/docker.ubuntu.Dockerfile
+ image: algorand/go-algorand-docker-linux-ubuntu
+ version: scripts/configure_dev-deps.sh
+ env:
+ - TRAVIS_BRANCH=${GIT_BRANCH}
+ - NETWORK=$NETWORK
+ - VERSION=$VERSION
+ - BUILD_NUMBER=$BUILD_NUMBER
+ buildArgs:
+ - GOLANG_VERSION=`./scripts/get_golang_version.sh`
+ volumes:
+ - $XDG_RUNTIME_DIR/gnupg/S.gpg-agent:/root/.gnupg/S.gpg-agent
+ - $HOME/.gnupg/pubring.kbx:/root/.gnupg/pubring.kbx
tasks:
- task: docker.Make
@@ -94,7 +113,7 @@ tasks:
bucketName: go-algorand-ci-cache
stashId: ${JENKINS_JOB_CACHE_ID}/linux-amd64
globSpecs:
- - tmp/node_pkgs/**
+ - tmp/node_pkgs/**/*
- gen/devnet/genesis.json
- gen/testnet/genesis.json
- gen/mainnet/genesis.json
@@ -103,7 +122,7 @@ tasks:
bucketName: go-algorand-ci-cache
stashId: ${JENKINS_JOB_CACHE_ID}/darwin-amd64
globSpecs:
- - tmp/node_pkgs/**
+ - tmp/node_pkgs/**/*
- gen/devnet/genesis.json
- gen/testnet/genesis.json
- gen/mainnet/genesis.json
@@ -112,7 +131,7 @@ tasks:
bucketName: go-algorand-ci-cache
stashId: ${JENKINS_JOB_CACHE_ID}/linux-arm64
globSpecs:
- - tmp/node_pkgs/**
+ - tmp/node_pkgs/**/*
- gen/devnet/genesis.json
- gen/testnet/genesis.json
- gen/mainnet/genesis.json
@@ -121,7 +140,7 @@ tasks:
bucketName: go-algorand-ci-cache
stashId: ${JENKINS_JOB_CACHE_ID}/linux-arm
globSpecs:
- - tmp/node_pkgs/**
+ - tmp/node_pkgs/**/*
- gen/devnet/genesis.json
- gen/testnet/genesis.json
- gen/mainnet/genesis.json
@@ -130,7 +149,10 @@ tasks:
bucketName: go-algorand-ci-cache
stashId: ${JENKINS_JOB_CACHE_ID}/packages
globSpecs:
- - tmp/node_pkgs/**
+ - tmp/node_pkgs/**/*
+ - gen/devnet/genesis.json
+ - gen/testnet/genesis.json
+ - gen/mainnet/genesis.json
# Unstash tasks
- task: stash.Unstash
@@ -159,6 +181,11 @@ tasks:
agent: docker-ubuntu
target: mule-package-docker
+ - task: docker.Make
+ name: docker-sign
+ agent: docker-ubuntu-signer
+ target: mule-sign
+
jobs:
build-linux-amd64:
tasks:
@@ -172,7 +199,6 @@ jobs:
tasks:
- docker.Make.build.arm
- stash.Stash.linux-arm
-
package-linux-amd64:
tasks:
- stash.Unstash.linux-amd64
@@ -185,7 +211,11 @@ jobs:
tasks:
- stash.Unstash.packages
- docker.Make.archive.amd64
-
package-docker:
tasks:
- docker.Make.docker-image
+ sign-signer:
+ tasks:
+ - stash.Unstash.packages
+ - docker.Make.docker-sign
+ - stash.Stash.packages
diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh
index 50662a93d..2ec92c2b8 100755
--- a/test/scripts/e2e.sh
+++ b/test/scripts/e2e.sh
@@ -47,7 +47,6 @@ echo "Test output can be found in ${TEMPDIR}"
export BINDIR=${TEMPDIR}/bin
export DATADIR=${TEMPDIR}/data
-RUNNING_COUNT=0
function reset_dirs() {
rm -rf ${BINDIR}
@@ -61,7 +60,7 @@ function reset_dirs() {
reset_dirs
echo Killing all instances and installing current build
-pkill -u $(whoami) -x algod || true
+pkill -u "$(whoami)" -x algod || true
if ! ${NO_BUILD} ; then
./scripts/local_install.sh -c ${CHANNEL} -p ${BINDIR} -d ${DATADIR}
@@ -82,11 +81,14 @@ cd "${SCRIPT_PATH}"
./timeout 200 ./e2e_basic_start_stop.sh
-python3 -m venv ${TEMPDIR}/ve
-. ${TEMPDIR}/ve/bin/activate
-${TEMPDIR}/ve/bin/pip3 install --upgrade pip
-${TEMPDIR}/ve/bin/pip3 install --upgrade py-algorand-sdk cryptography
-${TEMPDIR}/ve/bin/python3 e2e_client_runner.py e2e_subs/*.sh
+python3 -m venv "${TEMPDIR}/ve"
+. "${TEMPDIR}/ve/bin/activate"
+"${TEMPDIR}/ve/bin/pip3" install --upgrade pip
+"${TEMPDIR}/ve/bin/pip3" install --upgrade py-algorand-sdk cryptography
+"${TEMPDIR}/ve/bin/python3" e2e_client_runner.py "$SRCROOT"/test/scripts/e2e_subs/*.sh
+for vdir in "$SRCROOT"/test/scripts/e2e_subs/v??; do
+ "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py --version "$(basename "$vdir")" "$vdir"/*.sh
+done
deactivate
# Export our root temp folder as 'TESTDIR' for tests to use as their root test folder
@@ -97,7 +99,7 @@ export SRCROOT=${SRCROOT}
./e2e_go_tests.sh ${GO_TEST_ARGS}
-rm -rf ${TEMPDIR}
+rm -rf "${TEMPDIR}"
if ! ${NO_BUILD} ; then
rm -rf ${PKG_ROOT}
diff --git a/test/scripts/e2e_client_runner.py b/test/scripts/e2e_client_runner.py
index c15d24399..7b1d2d069 100755
--- a/test/scripts/e2e_client_runner.py
+++ b/test/scripts/e2e_client_runner.py
@@ -3,10 +3,11 @@
# Create a local private network and run functional tests on it in parallel.
#
# Each test is run as `ftest.sh wallet_name` for a wallet with a
-# million Algos. A test should carefully specify that wallet (or
-# wallets created for the test) for all actions. Tests are expected to
-# not be CPU intensive, merely setting up a handful of transactions
-# and executing them against the network, exercising aspects of the
+# million Algos, with the current directory set to the top of the
+# repo. A test should carefully specify that wallet (or wallets
+# created for the test) for all actions. Tests are expected to not be
+# CPU intensive, merely setting up a handful of transactions and
+# executing them against the network, exercising aspects of the
# network and the goal tools.
#
# Usage:
@@ -34,6 +35,9 @@ import algosdk
logger = logging.getLogger(__name__)
+scriptdir = os.path.dirname(os.path.realpath(__file__))
+repodir = os.path.join(scriptdir, "..", "..")
+
# less than 16kB of log we show the whole thing, otherwise the last 16kB
LOG_WHOLE_CUTOFF = 1024 * 16
@@ -104,7 +108,7 @@ def _script_thread_inner(runset, scriptname):
runset.done(scriptname, False, time.time() - start)
return
logger.info('starting %s', scriptname)
- p = subprocess.Popen([scriptname, walletname], env=env, stdout=cmdlog, stderr=subprocess.STDOUT)
+ p = subprocess.Popen([scriptname, walletname], env=env, cwd=repodir, stdout=cmdlog, stderr=subprocess.STDOUT)
cmdlog.close()
runset.running(scriptname, p)
timeout = read_script_for_timeout(scriptname)
@@ -362,6 +366,7 @@ def main():
ap.add_argument('--keep-temps', default=False, action='store_true', help='if set, keep all the test files')
ap.add_argument('--timeout', default=500, type=int, help='integer seconds to wait for the scripts to run')
ap.add_argument('--verbose', default=False, action='store_true')
+ ap.add_argument('--version', default="Future")
args = ap.parse_args()
if args.verbose:
@@ -387,13 +392,9 @@ def main():
netdir = os.path.join(tempdir, 'net')
env['NETDIR'] = netdir
- gopath = os.getenv('GOPATH')
- if not gopath:
- logger.error('$GOPATH not set')
- sys.exit(1)
-
retcode = 0
- xrun(['goal', 'network', 'create', '-r', netdir, '-n', 'tbd', '-t', os.path.join(gopath, 'src/github.com/algorand/go-algorand/test/testdata/nettemplates/TwoNodes50EachFuture.json')], timeout=90)
+ capv = args.version.capitalize()
+ xrun(['goal', 'network', 'create', '-r', netdir, '-n', 'tbd', '-t', os.path.join(repodir, f'test/testdata/nettemplates/TwoNodes50Each{capv}.json')], timeout=90)
xrun(['goal', 'network', 'start', '-r', netdir], timeout=90)
atexit.register(goal_network_stop, netdir, env)
diff --git a/test/scripts/e2e_subs/asset-misc.sh b/test/scripts/e2e_subs/asset-misc.sh
index d9e7d7eee..839f33ec8 100755
--- a/test/scripts/e2e_subs/asset-misc.sh
+++ b/test/scripts/e2e_subs/asset-misc.sh
@@ -16,7 +16,9 @@ ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }')
ACCOUNTC=$(${gcmd} account new|awk '{ print $6 }')
ACCOUNTD=$(${gcmd} account new|awk '{ print $6 }')
-${gcmd} asset create --creator ${ACCOUNT} --name asset-misc --unitname amisc --total 1000000000000
+ASSET_NAME='Birlot : décollage vs. ࠶🦪'
+
+${gcmd} asset create --creator ${ACCOUNT} --name "${ASSET_NAME}" --unitname amisc --total 1000000000000
ASSET_ID=$(${gcmd} asset info --creator $ACCOUNT --unitname amisc|grep 'Asset ID'|awk '{ print $3 }')
@@ -35,7 +37,7 @@ ${gcmd} asset send --assetid ${ASSET_ID} -f ${ACCOUNT} -t ${ACCOUNTB} -a 1000
# asset send some and close the rest
${gcmd} asset send --assetid ${ASSET_ID} -f ${ACCOUNTB} -t ${ACCOUNTC} -a 100 --close-to ${ACCOUNTD}
-if ${gcmd} account info -a ${ACCOUNTC} |grep asset-misc|grep -c -q 'balance 100 '; then
+if ${gcmd} account info -a ${ACCOUNTC} |grep "${ASSET_NAME}"|grep -c -q 'balance 100 '; then
echo ok
else
date '+asset-misc asset balance error %Y%m%d_%H%M%S'
diff --git a/test/scripts/e2e_subs/dynamic-fee-teal-test.sh b/test/scripts/e2e_subs/dynamic-fee-teal-test.sh
index 62b8a5956..7978de676 100755
--- a/test/scripts/e2e_subs/dynamic-fee-teal-test.sh
+++ b/test/scripts/e2e_subs/dynamic-fee-teal-test.sh
@@ -22,7 +22,7 @@ LEASE=uImiLf+mqOqs0BFsqIUHBh436N/z964X50e3P9Ii4ac=
${gcmd} clerk send -a 100000000 -f ${ACCOUNT} -t ${ACCOUNTB}
# Generate the template
-algotmpl -d ${GOPATH}/src/github.com/algorand/go-algorand/tools/teal/templates/ dynamic-fee --amt=1000000 --cls=${ACCOUNTC} --to=${ACCOUNTD} --fv=1 --lv=1001 --lease=${LEASE} > ${TEMPDIR}/dynamic.teal
+algotmpl -d tools/teal/templates/ dynamic-fee --amt=1000000 --cls=${ACCOUNTC} --to=${ACCOUNTD} --fv=1 --lv=1001 --lease=${LEASE} > ${TEMPDIR}/dynamic.teal
#
# Fee will come from ACCOUNT in the first transaction
diff --git a/test/scripts/e2e_subs/e2e-teal.sh b/test/scripts/e2e_subs/e2e-teal.sh
index 367e4186d..a71c78a10 100755
--- a/test/scripts/e2e_subs/e2e-teal.sh
+++ b/test/scripts/e2e_subs/e2e-teal.sh
@@ -21,7 +21,7 @@ ROUND=$(goal node status | grep 'Last committed block:'|awk '{ print $4 }')
TIMEOUT_ROUND=$((${ROUND} + 14))
# timeout after 14 rounds
-python ${GOPATH}/src/github.com/algorand/go-algorand/data/transactions/logic/tlhc.py --from ${ACCOUNT} --to ${ACCOUNTB} --timeout-round ${TIMEOUT_ROUND} > ${TEMPDIR}/tlhc.teal 2> ${TEMPDIR}/tlhc.teal.secret
+python data/transactions/logic/tlhc.py --from ${ACCOUNT} --to ${ACCOUNTB} --timeout-round ${TIMEOUT_ROUND} > ${TEMPDIR}/tlhc.teal 2> ${TEMPDIR}/tlhc.teal.secret
cat ${TEMPDIR}/tlhc.teal
@@ -117,4 +117,45 @@ ${gcmd} clerk send --amount 1000000 --from ${ACCOUNT} --to ${ACCOUNTM}
${gcmd} clerk send --amount 200000 --from ${ACCOUNTM} --to ${ACCOUNTC} -L ${TEMPDIR}/mtrue.lsig
+echo "#pragma version 1" | ${gcmd} clerk compile -
+echo "#pragma version 2" | ${gcmd} clerk compile -
+
+set +o pipefail
+# The compile will fail, but this tests against a regression in which compile SEGV'd
+echo "#pragma version 100" | ${gcmd} clerk compile - 2>&1 | grep "unsupported version"
+set -o pipefail
+
+
+# Compile a v3 version of same program, fund it, use it to lsig.
+cat >${TEMPDIR}/true3.teal<<EOF
+#pragma version 3
+int 1
+assert
+int 1
+EOF
+
+ACCOUNT_TRUE=$(${gcmd} clerk compile -n ${TEMPDIR}/true3.teal|awk '{ print $2 }')
+${gcmd} clerk send --amount 1000000 --from ${ACCOUNT} --to ${ACCOUNT_TRUE}
+${gcmd} clerk send --amount 10 --from-program ${TEMPDIR}/true3.teal --to ${ACCOUNTB}
+
+
+# However, ensure it fails if marked v2. We have to be tricky here,
+# since the compiler won't let us compile this, we rewrite the first
+# byte to 2, then compute the new account, and try use. But since it
+# uses assert in a v2 program, it fails.
+
+${gcmd} clerk compile ${TEMPDIR}/true3.teal -o ${TEMPDIR}/true3.lsig
+cp ${TEMPDIR}/true3.lsig ${TEMPDIR}/true2.lsig
+printf '\x02' | dd of=${TEMPDIR}/true2.lsig bs=1 seek=0 count=1 conv=notrunc
+
+# compute the escrow account for the frankenstein program
+ACCOUNT_TRUE=$(python -c 'import algosdk, sys; print(algosdk.logic.address(sys.stdin.buffer.read()))' < ${TEMPDIR}/true2.lsig)
+# fund that escrow account
+${gcmd} clerk send --amount 1000000 --from ${ACCOUNT} --to ${ACCOUNT_TRUE}
+# try, and fail, to lsig with it
+set +o pipefail
+${gcmd} clerk send --amount 10 --from-program-bytes ${TEMPDIR}/true2.lsig --to ${ACCOUNTB} 2>&1 | grep "illegal opcode"
+set -o pipefail
+
+
date '+e2e_teal OK %Y%m%d_%H%M%S'
diff --git a/test/scripts/e2e_subs/htlc-teal-test.sh b/test/scripts/e2e_subs/htlc-teal-test.sh
index e450a7fcf..9dfd62a57 100755
--- a/test/scripts/e2e_subs/htlc-teal-test.sh
+++ b/test/scripts/e2e_subs/htlc-teal-test.sh
@@ -17,7 +17,7 @@ ZERO_ADDRESS=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ
LEASE=YmxhaCBibGFoIGxlYXNlIHdoYXRldmVyIGJsYWghISE=
# Generate the template
-algotmpl -d ${GOPATH}/src/github.com/algorand/go-algorand/tools/teal/templates/ htlc --fee=2000 --hashfn="sha256" --hashimg="9S+9MrKzuG/4jvbEkGKChfSCrxXdyylUH5S89Saj9sc=" --own=${ACCOUNT} --rcv=${ACCOUNTB} --timeout=100000 > ${TEMPDIR}/atomic.teal
+algotmpl -d tools/teal/templates/ htlc --fee=2000 --hashfn="sha256" --hashimg="9S+9MrKzuG/4jvbEkGKChfSCrxXdyylUH5S89Saj9sc=" --own=${ACCOUNT} --rcv=${ACCOUNTB} --timeout=100000 > ${TEMPDIR}/atomic.teal
# Compile the template
CONTRACT=$(${gcmd} clerk compile ${TEMPDIR}/atomic.teal | awk '{ print $2 }')
diff --git a/test/scripts/e2e_subs/keyreg-teal-test.sh b/test/scripts/e2e_subs/keyreg-teal-test.sh
index acf8196c8..806640483 100755
--- a/test/scripts/e2e_subs/keyreg-teal-test.sh
+++ b/test/scripts/e2e_subs/keyreg-teal-test.sh
@@ -24,7 +24,7 @@ FEE=100000
echo "generating new delegate and participation keys for newly-funded account ${ACCOUNTA}"
${gcmd} clerk send --from ${ACCOUNT} --to ${ACCOUNTA} -a 1000000
DELKEY=$(algokey generate -f ${TEMPDIR}/delegate.keyregkey | grep "Public key" | awk '{ print $3 }')
-algotmpl -d ${GOPATH}/src/github.com/algorand/go-algorand/tools/teal/templates/ delegate-key-registration --fee ${FEE} --dur ${DUR} --period ${PERIOD} --expire ${EXPIRE} --auth ${DELKEY} --lease ${LEASE} > ${TEMPDIR}/delegate.teal
+algotmpl -d tools/teal/templates/ delegate-key-registration --fee ${FEE} --dur ${DUR} --period ${PERIOD} --expire ${EXPIRE} --auth ${DELKEY} --lease ${LEASE} > ${TEMPDIR}/delegate.teal
${gcmd} clerk compile -a ${ACCOUNTA} -s -o ${TEMPDIR}/kr.lsig ${TEMPDIR}/delegate.teal
RES=$(${gcmd} account addpartkey -a ${ACCOUNTA} --roundFirstValid 0 --roundLastValid 100)
@@ -115,7 +115,7 @@ FEE=100000
${gcmd} clerk send --from ${ACCOUNT} --to ${ACCOUNTB} -a 1000000
DELKEY=$(algokey generate -f ${TEMPDIR}/delegate.keyregkey | grep "Public key" | awk '{ print $3 }')
-algotmpl -d ${GOPATH}/src/github.com/algorand/go-algorand/tools/teal/templates/ delegate-key-registration --fee ${FEE} --dur ${DUR} --period ${PERIOD} --expire ${EXPIRE} --auth ${DELKEY} --lease ${LEASE} > ${TEMPDIR}/delegate.teal
+algotmpl -d tools/teal/templates/ delegate-key-registration --fee ${FEE} --dur ${DUR} --period ${PERIOD} --expire ${EXPIRE} --auth ${DELKEY} --lease ${LEASE} > ${TEMPDIR}/delegate.teal
${gcmd} clerk compile -a ${ACCOUNTB} -s -o ${TEMPDIR}/kr.lsig ${TEMPDIR}/delegate.teal
RES=$(${gcmd} account addpartkey -a ${ACCOUNTB} --roundFirstValid 0 --roundLastValid 100)
diff --git a/test/scripts/e2e_subs/limit-swap-test.sh b/test/scripts/e2e_subs/limit-swap-test.sh
index dd3d6fef9..17fa337d5 100755
--- a/test/scripts/e2e_subs/limit-swap-test.sh
+++ b/test/scripts/e2e_subs/limit-swap-test.sh
@@ -26,7 +26,7 @@ echo "closeout part a, Algo trader"
ROUND=$(goal node status | grep 'Last committed block:'|awk '{ print $4 }')
TIMEOUT_ROUND=$((${ROUND} + 2))
-sed s/TMPL_ASSET/${ASSET_ID}/g < ${GOPATH}/src/github.com/algorand/go-algorand/tools/teal/templates/limit-order-a.teal.tmpl | sed s/TMPL_SWAPN/31337/g | sed s/TMPL_SWAPD/137/g | sed s/TMPL_TIMEOUT/${TIMEOUT_ROUND}/g | sed s/TMPL_OWN/${ACCOUNT}/g | sed s/TMPL_FEE/100000/g | sed s/TMPL_MINTRD/10000/g > ${TEMPDIR}/limit-order-a.teal
+sed s/TMPL_ASSET/${ASSET_ID}/g < tools/teal/templates/limit-order-a.teal.tmpl | sed s/TMPL_SWAPN/31337/g | sed s/TMPL_SWAPD/137/g | sed s/TMPL_TIMEOUT/${TIMEOUT_ROUND}/g | sed s/TMPL_OWN/${ACCOUNT}/g | sed s/TMPL_FEE/100000/g | sed s/TMPL_MINTRD/10000/g > ${TEMPDIR}/limit-order-a.teal
ACCOUNT_ALGO_TRADER=$(${gcmd} clerk compile ${TEMPDIR}/limit-order-a.teal -o ${TEMPDIR}/limit-order-a.tealc|awk '{ print $2 }')
@@ -45,7 +45,7 @@ ROUND=$(goal node status | grep 'Last committed block:'|awk '{ print $4 }')
SETUP_ROUND=$((${ROUND} + 10))
TIMEOUT_ROUND=$((${SETUP_ROUND} + 1))
-sed s/TMPL_ASSET/${ASSET_ID}/g < ${GOPATH}/src/github.com/algorand/go-algorand/tools/teal/templates/limit-order-b.teal.tmpl | sed s/TMPL_SWAPN/137/g | sed s/TMPL_SWAPD/31337/g | sed s/TMPL_TIMEOUT/${TIMEOUT_ROUND}/g | sed s/TMPL_OWN/${ACCOUNT}/g | sed s/TMPL_FEE/100000/g | sed s/TMPL_MINTRD/10000/g > ${TEMPDIR}/limit-order-b.teal
+sed s/TMPL_ASSET/${ASSET_ID}/g < tools/teal/templates/limit-order-b.teal.tmpl | sed s/TMPL_SWAPN/137/g | sed s/TMPL_SWAPD/31337/g | sed s/TMPL_TIMEOUT/${TIMEOUT_ROUND}/g | sed s/TMPL_OWN/${ACCOUNT}/g | sed s/TMPL_FEE/100000/g | sed s/TMPL_MINTRD/10000/g > ${TEMPDIR}/limit-order-b.teal
ACCOUNT_ASSET_TRADER=$(${gcmd} clerk compile ${TEMPDIR}/limit-order-b.teal -o ${TEMPDIR}/limit-order-b.tealc|awk '{ print $2 }')
@@ -94,7 +94,7 @@ ROUND=$(goal node status | grep 'Last committed block:'|awk '{ print $4 }')
SETUP_ROUND=$((${ROUND} + 199))
TIMEOUT_ROUND=$((${SETUP_ROUND} + 1))
-sed s/TMPL_ASSET/${ASSET_ID}/g < ${GOPATH}/src/github.com/algorand/go-algorand/tools/teal/templates/limit-order-b.teal.tmpl | sed s/TMPL_SWAPN/137/g | sed s/TMPL_SWAPD/31337/g | sed s/TMPL_TIMEOUT/${TIMEOUT_ROUND}/g | sed s/TMPL_OWN/${ACCOUNT}/g | sed s/TMPL_FEE/100000/g | sed s/TMPL_MINTRD/10000/g > ${TEMPDIR}/limit-order-b.teal
+sed s/TMPL_ASSET/${ASSET_ID}/g < tools/teal/templates/limit-order-b.teal.tmpl | sed s/TMPL_SWAPN/137/g | sed s/TMPL_SWAPD/31337/g | sed s/TMPL_TIMEOUT/${TIMEOUT_ROUND}/g | sed s/TMPL_OWN/${ACCOUNT}/g | sed s/TMPL_FEE/100000/g | sed s/TMPL_MINTRD/10000/g > ${TEMPDIR}/limit-order-b.teal
ACCOUNT_ASSET_TRADER=$(${gcmd} clerk compile ${TEMPDIR}/limit-order-b.teal -o ${TEMPDIR}/limit-order-b.tealc|awk '{ print $2 }')
@@ -115,7 +115,7 @@ ${gcmd} asset send --assetid ${ASSET_ID} -f ${ACCOUNT} -t ${ACCOUNT_ASSET_TRADER
echo "make Algo trader"
-sed s/TMPL_ASSET/${ASSET_ID}/g < ${GOPATH}/src/github.com/algorand/go-algorand/tools/teal/templates/limit-order-a.teal.tmpl | sed s/TMPL_SWAPN/31337/g | sed s/TMPL_SWAPD/137/g | sed s/TMPL_TIMEOUT/${TIMEOUT_ROUND}/g | sed s/TMPL_OWN/${ACCOUNT}/g | sed s/TMPL_FEE/100000/g | sed s/TMPL_MINTRD/10000/g > ${TEMPDIR}/limit-order-a.teal
+sed s/TMPL_ASSET/${ASSET_ID}/g < tools/teal/templates/limit-order-a.teal.tmpl | sed s/TMPL_SWAPN/31337/g | sed s/TMPL_SWAPD/137/g | sed s/TMPL_TIMEOUT/${TIMEOUT_ROUND}/g | sed s/TMPL_OWN/${ACCOUNT}/g | sed s/TMPL_FEE/100000/g | sed s/TMPL_MINTRD/10000/g > ${TEMPDIR}/limit-order-a.teal
ACCOUNT_ALGO_TRADER=$(${gcmd} clerk compile ${TEMPDIR}/limit-order-a.teal -o ${TEMPDIR}/limit-order-a.tealc|awk '{ print $2 }')
diff --git a/test/scripts/e2e_subs/periodic-teal-test.sh b/test/scripts/e2e_subs/periodic-teal-test.sh
index 8880af143..7ec6512ac 100755
--- a/test/scripts/e2e_subs/periodic-teal-test.sh
+++ b/test/scripts/e2e_subs/periodic-teal-test.sh
@@ -16,7 +16,7 @@ ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }')
ZERO_ADDRESS=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ
LEASE=YmxhaCBibGFoIGxlYXNlIHdoYXRldmVyIGJsYWghISE=
-sed s/TMPL_RCV/${ACCOUNTB}/g < ${GOPATH}/src/github.com/algorand/go-algorand/tools/teal/templates/periodic-payment-escrow.teal.tmpl | sed s/TMPL_PERIOD/5/g | sed s/TMPL_DUR/2/g | sed s/TMPL_AMT/100000/g | sed s/TMPL_LEASE/${LEASE}/g | sed s/TMPL_TIMEOUT/16/g | sed s/TMPL_FEE/10000/g > ${TEMPDIR}/periodic.teal
+sed s/TMPL_RCV/${ACCOUNTB}/g < tools/teal/templates/periodic-payment-escrow.teal.tmpl | sed s/TMPL_PERIOD/5/g | sed s/TMPL_DUR/2/g | sed s/TMPL_AMT/100000/g | sed s/TMPL_LEASE/${LEASE}/g | sed s/TMPL_TIMEOUT/16/g | sed s/TMPL_FEE/10000/g > ${TEMPDIR}/periodic.teal
ACCOUNT_PERIODIC=$(${gcmd} clerk compile ${TEMPDIR}/periodic.teal -o ${TEMPDIR}/periodic.tealc|awk '{ print $2 }')
diff --git a/test/scripts/e2e_subs/teal-split-test.sh b/test/scripts/e2e_subs/teal-split-test.sh
index 2377574fe..6a364ff4d 100755
--- a/test/scripts/e2e_subs/teal-split-test.sh
+++ b/test/scripts/e2e_subs/teal-split-test.sh
@@ -16,7 +16,7 @@ ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }')
ACCOUNTC=$(${gcmd} account new|awk '{ print $6 }')
-sed s/TMPL_RCV1/${ACCOUNTB}/g < ${GOPATH}/src/github.com/algorand/go-algorand/tools/teal/templates/split.teal.tmpl | sed s/TMPL_RCV2/${ACCOUNTC}/g | sed s/TMPL_RAT1/60/g | sed s/TMPL_RAT2/40/g | sed s/TMPL_MINPAY/100000/g | sed s/TMPL_TIMEOUT/4/g | sed s/TMPL_OWN/${ACCOUNTB}/g | sed s/TMPL_FEE/10000/g > ${TEMPDIR}/split.teal
+sed s/TMPL_RCV1/${ACCOUNTB}/g < tools/teal/templates/split.teal.tmpl | sed s/TMPL_RCV2/${ACCOUNTC}/g | sed s/TMPL_RAT1/60/g | sed s/TMPL_RAT2/40/g | sed s/TMPL_MINPAY/100000/g | sed s/TMPL_TIMEOUT/4/g | sed s/TMPL_OWN/${ACCOUNTB}/g | sed s/TMPL_FEE/10000/g > ${TEMPDIR}/split.teal
ACCOUNT_SPLIT=$(${gcmd} clerk compile ${TEMPDIR}/split.teal -o ${TEMPDIR}/split.tealc|awk '{ print $2 }')
diff --git a/test/scripts/e2e_subs/v24/teal-v2-only.sh b/test/scripts/e2e_subs/v24/teal-v2-only.sh
new file mode 100755
index 000000000..4b4c87d4c
--- /dev/null
+++ b/test/scripts/e2e_subs/v24/teal-v2-only.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+
+date '+teal-v2-only start %Y%m%d_%H%M%S'
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+# prints:
+# Created new account with address UCTHHNBEAUWHDQWQI5DGQCTB7AR4CSVNU5YNPROAYQIT3Y3LKVDFAA5M6Q
+ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }')
+
+cat >${TEMPDIR}/true.teal<<EOF
+#pragma version 2
+int 1
+EOF
+
+${gcmd} clerk compile -o ${TEMPDIR}/true.lsig -s -a ${ACCOUNT} ${TEMPDIR}/true.teal
+
+${gcmd} clerk send -f ${ACCOUNT} -t ${ACCOUNTB} -a 1000000 -L ${TEMPDIR}/true.lsig
+
+${gcmd} clerk send -f ${ACCOUNT} -t ${ACCOUNTB} -a 1000000 -o ${TEMPDIR}/one.tx
+
+${gcmd} clerk sign -L ${TEMPDIR}/true.lsig -i ${TEMPDIR}/one.tx -o ${TEMPDIR}/one.stx
+
+${gcmd} clerk rawsend -f ${TEMPDIR}/one.stx
+
+${gcmd} clerk dryrun -t ${TEMPDIR}/one.stx
+
+ACCOUNT_TRUE=$(${gcmd} clerk compile -n ${TEMPDIR}/true.teal|awk '{ print $2 }')
+
+${gcmd} clerk send --amount 1000000 --from ${ACCOUNT} --to ${ACCOUNT_TRUE}
+
+${gcmd} clerk send --amount 10 --from-program ${TEMPDIR}/true.teal --to ${ACCOUNTB}
+
+${gcmd} clerk send --amount 10 --from ${ACCOUNT_TRUE} --to ${ACCOUNTB} -o ${TEMPDIR}/true.tx
+
+${gcmd} clerk sign -i ${TEMPDIR}/true.tx -o ${TEMPDIR}/true.stx --program ${TEMPDIR}/true.teal
+
+${gcmd} clerk rawsend -f ${TEMPDIR}/true.stx
+
+${gcmd} clerk inspect ${TEMPDIR}/true.stx
+
+${gcmd} clerk compile -D ${TEMPDIR}/true.lsig
+
+echo "#pragma version 1" | ${gcmd} clerk compile -
+echo "#pragma version 2" | ${gcmd} clerk compile -
+
+
+
+set +o pipefail
+# v3 opcodes with v2 pragma fails
+printf "#pragma version 2\nint 1\nassert" | ${gcmd} clerk compile - 2>&1 | grep "assert opcode was introduced"
+set -o pipefail
+
+# Although we are in an earlier version, v3 can be compiled, it just can't be used.
+cat >${TEMPDIR}/true3.teal<<EOF
+#pragma version 3
+int 1
+EOF
+
+
+ACCOUNT_TRUE=$(${gcmd} clerk compile -n ${TEMPDIR}/true3.teal|awk '{ print $2 }')
+
+${gcmd} clerk send --amount 1000000 --from ${ACCOUNT} --to ${ACCOUNT_TRUE}
+
+set +o pipefail
+${gcmd} clerk send --amount 10 --from-program ${TEMPDIR}/true3.teal --to ${ACCOUNTB} 2>&1 | grep "LogicSig.Logic version too new"
+set -o pipefail
+
+
+# Now, ensure it still fails, even if using the v2 program, if the
+# assert opcode is added. (That is, failure based on opcode choice,
+# not just on the version marker.)
+
+${gcmd} clerk compile ${TEMPDIR}/true.teal -o ${TEMPDIR}/true.lsig
+# append "assert" opcode to the true program
+(cat ${TEMPDIR}/true.lsig; printf '\x72') > ${TEMPDIR}/assert.lsig
+# compute the escrow account for the asserting program
+ACCOUNT_TRUE=$(python -c 'import algosdk, sys; print(algosdk.logic.address(sys.stdin.buffer.read()))' < ${TEMPDIR}/assert.lsig)
+# fund that escrow account
+${gcmd} clerk send --amount 1000000 --from ${ACCOUNT} --to ${ACCOUNT_TRUE}
+# try, and fail, to lsig with the assert program
+set +o pipefail
+${gcmd} clerk send --amount 10 --from-program-bytes ${TEMPDIR}/assert.lsig --to ${ACCOUNTB} 2>&1 | grep "illegal opcode"
+set -o pipefail
+
+
+
+date '+teal-v2-only OK %Y%m%d_%H%M%S'
diff --git a/test/testdata/nettemplates/TwoNodes50EachV24.json b/test/testdata/nettemplates/TwoNodes50EachV24.json
new file mode 100644
index 000000000..b181da21c
--- /dev/null
+++ b/test/testdata/nettemplates/TwoNodes50EachV24.json
@@ -0,0 +1,29 @@
+{
+ "Genesis": {
+ "NetworkName": "tbd",
+ "ConsensusProtocol": "https://github.com/algorandfoundation/specs/tree/3a83c4c743f8b17adfd73944b4319c25722a6782",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 50,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 50,
+ "Online": true
+ }
+ ]
+ },
+ "Nodes": [
+ {
+ "Name": "Primary",
+ "IsRelay": true,
+ "Wallets": [{ "Name": "Wallet1", "ParticipationOnly": false }]
+ },
+ {
+ "Name": "Node",
+ "Wallets": [{ "Name": "Wallet2", "ParticipationOnly": false }]
+ }
+ ]
+}