summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2022-07-02 10:24:59 -0400
committerGitHub <noreply@github.com>2022-07-02 10:24:59 -0400
commitd867a09450efd95c186da572ed55b3698864a3d9 (patch)
tree200a4ea932ea9eeda15d77c805637587c4c186f6
parente2f78420b7751cf838de3effe20a195029401e0d (diff)
parent641a1f1316904f78895fb9080532f04fdb0bd5fd (diff)
Merge pull request #4189 from Algo-devops-service/relstable3.8.0v3.8.0-stable
go-algorand 3.8.0-stable Release PR
-rw-r--r--.circleci/config.yml22
-rw-r--r--buildnumber.dat2
-rw-r--r--cmd/buildtools/genesis.go21
-rw-r--r--cmd/catchpointdump/net.go142
-rw-r--r--cmd/goal/account.go293
-rw-r--r--cmd/pingpong/runCmd.go18
-rw-r--r--config/consensus.go3
-rw-r--r--config/version.go2
-rw-r--r--daemon/algod/server.go8
-rw-r--r--data/bookkeeping/genesis.go45
-rw-r--r--data/bookkeeping/genesis_test.go157
-rw-r--r--data/transactions/logic/README.md2
-rw-r--r--data/transactions/logic/TEAL_opcodes.md21
-rw-r--r--data/transactions/logic/assembler.go120
-rw-r--r--data/transactions/logic/assembler_test.go55
-rw-r--r--data/transactions/logic/doc.go8
-rw-r--r--data/transactions/logic/eval.go142
-rw-r--r--data/transactions/logic/evalStateful_test.go2
-rw-r--r--data/transactions/logic/eval_test.go234
-rw-r--r--data/transactions/logic/jsonspec.md6
-rw-r--r--data/transactions/logic/jsonspec_test.go4
-rw-r--r--data/transactions/logic/langspec.json25
-rw-r--r--data/transactions/logic/opcodes.go44
-rw-r--r--data/transactions/logic/pairing_test.go29
-rw-r--r--data/transactions/logic/teal.tmLanguage.json2
-rw-r--r--data/transactions/transaction.go6
-rw-r--r--ledger/.gitignore1
-rw-r--r--ledger/accountdb_test.go4
-rw-r--r--ledger/acctupdates_test.go10
-rw-r--r--ledger/catchpointtracker_test.go4
-rw-r--r--ledger/internal/apptxn_test.go6
-rw-r--r--ledger/internal/eval_blackbox_test.go12
-rw-r--r--ledger/ledgercore/error.go2
-rw-r--r--ledger/testing/randomAccounts.go96
-rw-r--r--libgoal/libgoal.go26
-rw-r--r--libgoal/participation.go208
-rw-r--r--libgoal/transactions.go109
-rw-r--r--logging/log.go6
-rw-r--r--logging/telemetryConfig.go10
-rw-r--r--logging/telemetryhook.go2
-rw-r--r--logging/telemetryspec/event.go22
-rw-r--r--netdeploy/remote/deployedNetwork.go2
-rw-r--r--network/requestTracker.go10
-rw-r--r--network/wsNetwork.go50
-rw-r--r--node/node.go81
-rwxr-xr-xscripts/release/mule/sign/sign.sh75
-rw-r--r--shared/pingpong/accounts.go40
-rw-r--r--shared/pingpong/config.go4
-rw-r--r--shared/pingpong/pingpong.go57
-rw-r--r--test/e2e-go/cli/goal/expect/pingpongTest.exp44
-rw-r--r--test/e2e-go/features/participation/accountParticipationTransitions_test.go4
-rw-r--r--test/e2e-go/features/participation/onlineOfflineParticipation_test.go12
-rw-r--r--test/e2e-go/features/participation/overlappingParticipationKeys_test.go69
-rw-r--r--test/e2e-go/features/participation/participationExpiration_test.go8
-rw-r--r--test/e2e-go/features/transactions/onlineStatusChange_test.go19
-rw-r--r--test/e2e-go/stress/transactions/createManyAndGoOnline_test.go4
-rw-r--r--test/heapwatch/bwstart.sh4
-rwxr-xr-xtest/heapwatch/start.sh4
-rw-r--r--test/muleCI/mule.yaml42
-rwxr-xr-xtest/scripts/e2e_subs/goal-partkey-commands.sh100
-rwxr-xr-xtest/scripts/e2e_subs/goal-partkey-information.sh47
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/Makefile15
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py32
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/genesis.json64
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/net.json232
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/node.json10
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/nonPartNode.json5
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/recipe.json7
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/relay.json11
-rw-r--r--test/testdata/deployednettemplates/recipes/alphanet/topology.json32
70 files changed, 2088 insertions, 927 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index b911cafed..56b1c9d80 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -217,10 +217,12 @@ commands:
- restore_libsodium
- restore_cache:
keys:
- - 'go-mod-1-14-7-v2-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
+ - 'go-mod-1.17.9-v3-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
- restore_cache:
keys:
- - 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-'
+ - 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}'
+ - 'go-cache-v3-{{ arch }}-{{ .Branch }}-'
+ - 'go-cache-v3-{{ arch }}-'
- run:
name: scripts/travis/build.sh --make_debug
command: |
@@ -233,11 +235,11 @@ commands:
scripts/travis/build.sh --make_debug
- cache_libsodium
- save_cache:
- key: 'go-mod-1-14-7-v2-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
+ key: 'go-mod-1.17.9-v3-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
paths:
- << parameters.build_dir >>/go/pkg/mod
- save_cache:
- key: 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-{{ .Environment.CIRCLE_BUILD_NUM }}'
+ key: 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}'
paths:
- tmp/go-cache
- persist_to_workspace:
@@ -257,7 +259,7 @@ commands:
mkdir -p tmp
find crypto/libsodium-fork -type f -exec openssl md5 "{}" + > tmp/libsodium.md5
- save_cache:
- key: 'libsodium-fork-v2-{{ .Environment.CIRCLE_STAGE }}-{{ checksum "tmp/libsodium.md5" }}'
+ key: 'libsodium-fork-v2-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}'
paths:
- crypto/libs
@@ -271,7 +273,7 @@ commands:
find crypto/libsodium-fork -type f -exec openssl md5 "{}" + > tmp/libsodium.md5
- restore_cache:
keys:
- - 'libsodium-fork-v2-{{ .Environment.CIRCLE_STAGE }}-{{ checksum "tmp/libsodium.md5" }}'
+ - 'libsodium-fork-v2-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}'
generic_test:
description: Run build tests from build workspace, for re-use by diferent architectures
@@ -301,7 +303,9 @@ commands:
touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/testresults.json
- restore_cache:
keys:
- - 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-'
+ - 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}'
+ - 'go-cache-v3-{{ arch }}-{{ .Branch }}-'
+ - 'go-cache-v3-{{ arch }}-'
- run:
name: Run build tests
no_output_timeout: << parameters.no_output_timeout >>
@@ -333,10 +337,6 @@ commands:
root: << parameters.result_path >>
paths:
- << parameters.result_subdir >>
- - save_cache:
- key: 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-{{ .Environment.CIRCLE_BUILD_NUM }}'
- paths:
- - tmp/go-cache
upload_coverage:
description: Collect coverage reports and upload them
diff --git a/buildnumber.dat b/buildnumber.dat
index 0cfbf0888..573541ac9 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-2
+0
diff --git a/cmd/buildtools/genesis.go b/cmd/buildtools/genesis.go
index 079c76d61..98cb60ca6 100644
--- a/cmd/buildtools/genesis.go
+++ b/cmd/buildtools/genesis.go
@@ -152,8 +152,7 @@ var dumpGenesisHashCmd = &cobra.Command{
os.Exit(1)
}
- hash := crypto.HashObj(genesis)
- fmt.Print(hash.String())
+ fmt.Print(genesis.Hash().String())
},
}
@@ -237,7 +236,7 @@ func ensureReleaseGenesis(src bookkeeping.Genesis, releaseFile string) (err erro
return fmt.Errorf("error saving file: %v", err)
}
- hash := crypto.HashObj(releaseGenesis)
+ hash := releaseGenesis.Hash()
err = ioutil.WriteFile(releaseFileHash, []byte(hash.String()), 0666)
if err != nil {
return fmt.Errorf("error saving hash file '%s': %v", releaseFileHash, err)
@@ -261,8 +260,20 @@ func verifyReleaseGenesis(src bookkeeping.Genesis, releaseFile string) (updateGe
func verifyGenesisHashes(src, release bookkeeping.Genesis, hashFile string) (err error) {
src.Timestamp = release.Timestamp
- srcHash := crypto.HashObj(src)
- releaseHash := crypto.HashObj(release)
+ srcHash := src.Hash()
+ releaseHash := release.Hash()
+
+ srcHashCrypo := crypto.HashObj(src)
+ releaseHashCrypto := crypto.HashObj(release)
+
+ if srcHash != srcHashCrypo {
+ return fmt.Errorf("source hashes differ - genesis.json our hashing function isn't consistent")
+ }
+
+ if releaseHash != releaseHashCrypto {
+ return fmt.Errorf("release hashes differ - genesis.json our hashing function isn't consistent")
+ }
+
if srcHash != releaseHash {
return fmt.Errorf("source and release hashes differ - genesis.json may have diverge from released version")
}
diff --git a/cmd/catchpointdump/net.go b/cmd/catchpointdump/net.go
index e31be81fd..113ab7899 100644
--- a/cmd/catchpointdump/net.go
+++ b/cmd/catchpointdump/net.go
@@ -18,6 +18,7 @@ package main
import (
"context"
+ "errors"
"fmt"
"io"
"net/http"
@@ -191,7 +192,46 @@ func getRemoteDataStream(url string, hint string) (result io.ReadCloser, ctxCanc
return
}
-func downloadCatchpoint(addr string, round int) (tarName string, err error) {
+func doDownloadCatchpoint(url string, wdReader util.WatchdogStreamReader, out io.Writer) error {
+ writeChunkSize := 64 * 1024
+
+ var totalBytes int
+ tempBytes := make([]byte, writeChunkSize)
+ lastProgressUpdate := time.Now()
+ progress := -25
+ printDownloadProgressLine(progress, 50, url, 0)
+
+ for {
+ n, err := wdReader.Read(tempBytes)
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ totalBytes += n
+ _, err = out.Write(tempBytes[:n])
+ if err != nil {
+ return err
+ }
+
+ err = wdReader.Reset()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ if time.Since(lastProgressUpdate) > 50*time.Millisecond {
+ lastProgressUpdate = time.Now()
+ printDownloadProgressLine(progress, 50, url, int64(totalBytes))
+ progress++
+ }
+ }
+}
+
+// Downloads a catchpoint tar file and returns the path to the tar file.
+func downloadCatchpoint(addr string, round int) (string, error) {
genesisID := strings.Split(networkName, ".")[0] + "-v1.0"
urlTemplate := "http://" + addr + "/v1/" + genesisID + "/%s/" + strconv.FormatUint(uint64(round), 36)
catchpointURL := fmt.Sprintf(urlTemplate, "ledger")
@@ -199,7 +239,7 @@ func downloadCatchpoint(addr string, round int) (tarName string, err error) {
catchpointStream, catchpointCtxCancel, err := getRemoteDataStream(catchpointURL, "catchpoint")
defer catchpointCtxCancel()
if err != nil {
- return
+ return "", err
}
defer catchpointStream.Close()
@@ -207,69 +247,71 @@ func downloadCatchpoint(addr string, round int) (tarName string, err error) {
os.RemoveAll(dirName)
err = os.MkdirAll(dirName, 0777)
if err != nil && !os.IsExist(err) {
- return
+ return "", err
}
- tarName = dirName + "/" + strconv.FormatUint(uint64(round), 10) + ".tar"
- file, err2 := os.Create(tarName) // will create a file with 0666 permission.
- if err2 != nil {
- return tarName, err2
+ tarName := dirName + "/" + strconv.FormatUint(uint64(round), 10) + ".tar"
+ file, err := os.Create(tarName) // will create a file with 0666 permission.
+ if err != nil {
+ return "", err
}
- defer func() {
- err = file.Close()
- }()
- writeChunkSize := 64 * 1024
+ defer file.Close()
- wdReader := util.MakeWatchdogStreamReader(catchpointStream, 4096, 4096, 2*time.Second)
- var totalBytes int
- tempBytes := make([]byte, writeChunkSize)
- lastProgressUpdate := time.Now()
- progress := -25
- printDownloadProgressLine(progress, 50, catchpointURL, 0)
- defer printDownloadProgressLine(0, 0, catchpointURL, 0)
- var n int
- for {
- n, err = wdReader.Read(tempBytes)
- if err != nil && err != io.EOF {
- return
- }
- totalBytes += n
- writtenBytes, err2 := file.Write(tempBytes[:n])
- if err2 != nil || n != writtenBytes {
- return tarName, err2
- }
+ wdReader := util.MakeWatchdogStreamReader(catchpointStream, 4096, 4096, 5*time.Second)
+ defer wdReader.Close()
- err = wdReader.Reset()
- if err != nil {
- if err == io.EOF {
- return tarName, nil
- }
- return
+ err = doDownloadCatchpoint(catchpointURL, wdReader, file)
+ if err != nil {
+ return "", err
+ }
+
+ printDownloadProgressLine(0, 0, catchpointURL, 0)
+
+ err = file.Close()
+ if err != nil {
+ return "", err
+ }
+
+ err = catchpointStream.Close()
+ if err != nil {
+ return "", err
+ }
+
+ return tarName, nil
+}
+
+func deleteLedgerFiles(deleteTracker bool) error {
+ paths := []string{
+ "./ledger.block.sqlite",
+ "./ledger.block.sqlite-shm",
+ "./ledger.block.sqlite-wal",
+ }
+ if deleteTracker {
+ trackerPaths := []string{
+ "./ledger.tracker.sqlite",
+ "./ledger.tracker.sqlite-shm",
+ "./ledger.tracker.sqlite-wal",
}
- if time.Since(lastProgressUpdate) > 50*time.Millisecond {
- lastProgressUpdate = time.Now()
- printDownloadProgressLine(progress, 50, catchpointURL, int64(totalBytes))
- progress++
+ paths = append(paths, trackerPaths...)
+ }
+
+ for _, path := range paths {
+ err := os.Remove(path)
+ if (err != nil) && !errors.Is(err, os.ErrNotExist) {
+ return err
}
}
+
+ return nil
}
func loadAndDump(addr string, tarFile string, genesisInitState ledgercore.InitState) error {
- deleteLedgerFiles := func(deleteTracker bool) {
- os.Remove("./ledger.block.sqlite")
- os.Remove("./ledger.block.sqlite-shm")
- os.Remove("./ledger.block.sqlite-wal")
- if deleteTracker {
- os.Remove("./ledger.tracker.sqlite")
- os.Remove("./ledger.tracker.sqlite-shm")
- os.Remove("./ledger.tracker.sqlite-wal")
- }
- }
// delete current ledger files.
deleteLedgerFiles(true)
cfg := config.GetDefaultLocal()
l, err := ledger.OpenLedger(logging.Base(), "./ledger", false, genesisInitState, cfg)
if err != nil {
reportErrorf("Unable to open ledger : %v", err)
+ return err
}
defer deleteLedgerFiles(!loadOnly)
@@ -279,6 +321,7 @@ func loadAndDump(addr string, tarFile string, genesisInitState ledgercore.InitSt
err = catchupAccessor.ResetStagingBalances(context.Background(), true)
if err != nil {
reportErrorf("Unable to initialize catchup database : %v", err)
+ return err
}
stats, err := os.Stat(tarFile)
@@ -297,6 +340,7 @@ func loadAndDump(addr string, tarFile string, genesisInitState ledgercore.InitSt
fileHeader, err = loadCatchpointIntoDatabase(context.Background(), catchupAccessor, reader, tarSize)
if err != nil {
reportErrorf("Unable to load catchpoint file into in-memory database : %v", err)
+ return err
}
if !loadOnly {
diff --git a/cmd/goal/account.go b/cmd/goal/account.go
index f6e609a5a..c78ed0872 100644
--- a/cmd/goal/account.go
+++ b/cmd/goal/account.go
@@ -25,6 +25,7 @@ import (
"path/filepath"
"sort"
"strings"
+ "time"
"github.com/spf13/cobra"
@@ -825,7 +826,7 @@ var changeOnlineCmd = &cobra.Command{
reportErrorf(err.Error())
}
err = changeAccountOnlineStatus(
- accountAddress, part, online, statusChangeTxFile, walletName,
+ accountAddress, online, statusChangeTxFile, walletName,
firstTxRound, lastTxRound, transactionFee, scLeaseBytes(cmd), dataDir, client,
)
if err != nil {
@@ -834,12 +835,16 @@ var changeOnlineCmd = &cobra.Command{
},
}
-func changeAccountOnlineStatus(acct string, part *algodAcct.Participation, goOnline bool, txFile string, wallet string, firstTxRound, lastTxRound, fee uint64, leaseBytes [32]byte, dataDir string, client libgoal.Client) error {
+func changeAccountOnlineStatus(
+ acct string, goOnline bool, txFile string, wallet string,
+ firstTxRound, lastTxRound, fee uint64, leaseBytes [32]byte,
+ dataDir string, client libgoal.Client,
+) error {
// Generate an unsigned online/offline tx
var utx transactions.Transaction
var err error
if goOnline {
- utx, err = client.MakeUnsignedGoOnlineTx(acct, part, firstTxRound, lastTxRound, fee, leaseBytes)
+ utx, err = client.MakeUnsignedGoOnlineTx(acct, firstTxRound, lastTxRound, fee, leaseBytes)
} else {
utx, err = client.MakeUnsignedGoOfflineTx(acct, firstTxRound, lastTxRound, fee, leaseBytes)
}
@@ -870,8 +875,8 @@ func changeAccountOnlineStatus(acct string, part *algodAcct.Participation, goOnl
var addParticipationKeyCmd = &cobra.Command{
Use: "addpartkey",
- Short: "Generate a participation key for the specified account",
- Long: `Generate a participation key for the specified account. This participation key can then be used for going online and participating in consensus.`,
+ Short: "Generate and install participation key for the specified account",
+ Long: `Generate and install participation key for the specified account. This participation key can then be used for going online and participating in consensus.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
dataDir := ensureSingleDataDir()
@@ -886,8 +891,9 @@ var addParticipationKeyCmd = &cobra.Command{
reportInfof("Please stand by while generating keys. This might take a few minutes...")
var err error
+ var part algodAcct.Participation
participationGen := func() {
- _, _, err = client.GenParticipationKeysTo(accountAddress, roundFirstValid, roundLastValid, keyDilution, partKeyOutDir)
+ part, _, err = client.GenParticipationKeysTo(accountAddress, roundFirstValid, roundLastValid, keyDilution, partKeyOutDir)
}
util.RunFuncWithSpinningCursor(participationGen)
@@ -895,7 +901,7 @@ var addParticipationKeyCmd = &cobra.Command{
reportErrorf(errorRequestFail, err)
}
- reportInfof("Participation key generation successful")
+ reportInfof("Participation key generation successful. Participation ID: %s\n", part.ID())
},
}
@@ -922,11 +928,22 @@ No --delete-input flag specified, exiting without installing key.`)
dataDir := ensureSingleDataDir()
client := ensureAlgodClient(dataDir)
- _, _, err := client.InstallParticipationKeys(partKeyFile)
+ addResponse, err := client.AddParticipationKey(partKeyFile)
if err != nil {
reportErrorf(errorRequestFail, err)
}
- fmt.Println("Participation key installed successfully")
+ // In an abundance of caution, check for ourselves that the key has been installed.
+ if err := client.VerifyParticipationKey(time.Minute, addResponse.PartId); err != nil {
+ err = fmt.Errorf("unable to verify key installation. Verify key installation with 'goal account partkeyinfo' and delete '%s', or retry the command. Error: %w", partKeyFile, err)
+ reportErrorf(errorRequestFail, err)
+ }
+
+ reportInfof("Participation key installed successfully, Participation ID: %s\n", addResponse.PartId)
+
+ // Delete partKeyFile
+ if nil != os.Remove(partKeyFile) {
+ reportErrorf("An error occurred while removing the partkey file, please delete it manually: %s", err)
+ }
},
}
@@ -957,14 +974,14 @@ var renewParticipationKeyCmd = &cobra.Command{
txRoundLastValid := currentRound + proto.MaxTxnLife
// Make sure we don't already have a partkey valid for (or after) specified roundLastValid
- parts, err := client.ListParticipationKeyFiles()
+ parts, err := client.ListParticipationKeys()
if err != nil {
reportErrorf(errorRequestFail, err)
}
for _, part := range parts {
- if part.Address().String() == accountAddress {
- if part.LastValid >= basics.Round(roundLastValid) {
- reportErrorf(errExistingPartKey, roundLastValid, part.LastValid)
+ if part.Address == accountAddress {
+ if part.Key.VoteLastValid >= roundLastValid {
+ reportErrorf(errExistingPartKey, roundLastValid, part.Key.VoteLastValid)
}
}
}
@@ -982,7 +999,7 @@ func generateAndRegisterPartKey(address string, currentRound, keyLastValidRound,
var keyPath string
var err error
genFunc := func() {
- part, keyPath, err = client.GenParticipationKeysTo(address, currentRound, keyLastValidRound, dilution, "")
+ part, keyPath, err = client.GenParticipationKeys(address, currentRound, keyLastValidRound, dilution)
if err != nil {
err = fmt.Errorf(errorRequestFail, err)
}
@@ -997,12 +1014,13 @@ func generateAndRegisterPartKey(address string, currentRound, keyLastValidRound,
// Now register it as our new online participation key
goOnline := true
txFile := ""
- err = changeAccountOnlineStatus(address, &part, goOnline, txFile, wallet, currentRound, txLastValidRound, fee, leaseBytes, dataDir, client)
+ err = changeAccountOnlineStatus(address, goOnline, txFile, wallet, currentRound, txLastValidRound, fee, leaseBytes, dataDir, client)
if err != nil {
os.Remove(keyPath)
fmt.Fprintf(os.Stderr, " Error registering keys - deleting newly-generated key file: %s\n", keyPath)
}
- return err
+ fmt.Printf("Participation key installed successfully, Participation ID: %s\n", part.ID())
+ return nil
}
var renewAllParticipationKeyCmd = &cobra.Command{
@@ -1025,19 +1043,19 @@ func renewPartKeysInDir(dataDir string, lastValidRound uint64, fee uint64, lease
client := ensureAlgodClient(dataDir)
// Build list of accounts to renew from all accounts with part keys present
- parts, err := client.ListParticipationKeyFiles()
+ parts, err := client.ListParticipationKeys()
if err != nil {
return fmt.Errorf(errorRequestFail, err)
}
- renewAccounts := make(map[basics.Address]algodAcct.Participation)
+ renewAccounts := make(map[string]generatedV2.ParticipationKey)
for _, part := range parts {
- if existing, has := renewAccounts[part.Address()]; has {
- if existing.LastValid >= part.LastValid {
+ if existing, has := renewAccounts[part.Address]; has {
+ if existing.Key.VoteFirstValid >= part.Key.VoteLastValid {
// We already saw a partkey that expires later
continue
}
}
- renewAccounts[part.Address()] = part
+ renewAccounts[part.Address] = part
}
currentRound, err := client.CurrentRound()
@@ -1062,18 +1080,18 @@ func renewPartKeysInDir(dataDir string, lastValidRound uint64, fee uint64, lease
// at least through lastValidRound, generate a new key and register it.
// Make sure we don't already have a partkey valid for (or after) specified roundLastValid
for _, renewPart := range renewAccounts {
- if renewPart.LastValid >= basics.Round(lastValidRound) {
- fmt.Printf(" Skipping account %s: Already has a part key valid beyond %d (currently %d)\n", renewPart.Address(), lastValidRound, renewPart.LastValid)
+ if renewPart.Key.VoteLastValid >= lastValidRound {
+ fmt.Printf(" Skipping account %s: Already has a part key valid beyond %d (currently %d)\n", renewPart.Address, lastValidRound, renewPart.Key.VoteLastValid)
continue
}
// If the account's latest partkey expired before the current round, don't automatically renew and instead instruct the user to explicitly renew it.
- if renewPart.LastValid < basics.Round(lastValidRound) {
- fmt.Printf(" Skipping account %s: This account has part keys that have expired. Please renew this account explicitly using 'renewpartkey'\n", renewPart.Address())
+ if renewPart.Key.VoteLastValid < lastValidRound {
+ fmt.Printf(" Skipping account %s: This account has part keys that have expired. Please renew this account explicitly using 'renewpartkey'\n", renewPart.Address)
continue
}
- address := renewPart.Address().String()
+ address := renewPart.Address
err = generateAndRegisterPartKey(address, currentRound, lastValidRound, txLastValidRound, fee, leaseBytes, dilution, wallet, dataDir, client)
if err != nil {
fmt.Fprintf(os.Stderr, " Error renewing part key for account %s: %v\n", address, err)
@@ -1097,53 +1115,6 @@ func uintToStr(number uint64) string {
return fmt.Sprintf("%d", number)
}
-// legacyListParticipationKeysCommand prints key information in the same
-// format as earlier versions of goal. Some users are using this information
-// in scripts and need some extra time to migrate to the REST API.
-// DEPRECATED
-func legacyListParticipationKeysCommand() {
- dataDir := ensureSingleDataDir()
-
- client := ensureGoalClient(dataDir, libgoal.DynamicClient)
- parts, err := client.ListParticipationKeyFiles()
- if err != nil {
- reportErrorf(errorRequestFail, err)
- }
-
- var filenames []string
- for fn := range parts {
- filenames = append(filenames, fn)
- }
- sort.Strings(filenames)
-
- rowFormat := "%-10s\t%-80s\t%-60s\t%12s\t%12s\t%12s\n"
- fmt.Printf(rowFormat, "Registered", "Filename", "Parent address", "First round", "Last round", "First key")
- for _, fn := range filenames {
- onlineInfoStr := "unknown"
- onlineAccountInfo, err := client.AccountInformation(parts[fn].Address().GetUserAddress())
- if err == nil {
- votingBytes := parts[fn].Voting.OneTimeSignatureVerifier
- vrfBytes := parts[fn].VRF.PK
- if onlineAccountInfo.Participation != nil &&
- (string(onlineAccountInfo.Participation.ParticipationPK) == string(votingBytes[:])) &&
- (string(onlineAccountInfo.Participation.VRFPK) == string(vrfBytes[:])) &&
- (onlineAccountInfo.Participation.VoteFirst == uint64(parts[fn].FirstValid)) &&
- (onlineAccountInfo.Participation.VoteLast == uint64(parts[fn].LastValid)) &&
- (onlineAccountInfo.Participation.VoteKeyDilution == parts[fn].KeyDilution) {
- onlineInfoStr = "yes"
- } else {
- onlineInfoStr = "no"
- }
- }
- // it's okay to proceed without algod info
- first, last := parts[fn].ValidInterval()
- fmt.Printf(rowFormat, onlineInfoStr, fn, parts[fn].Address().GetUserAddress(),
- fmt.Sprintf("%d", first),
- fmt.Sprintf("%d", last),
- fmt.Sprintf("%d.%d", parts[fn].Voting.FirstBatch, parts[fn].Voting.FirstOffset))
- }
-}
-
var listParticipationKeysCmd = &cobra.Command{
Use: "listpartkeys",
Short: "List participation keys summary",
@@ -1396,47 +1367,6 @@ func strOrNA(value *uint64) string {
return uintToStr(*value)
}
-// legacyPartkeyInfoCommand prints key information in the same
-// format as earlier versions of goal. Some users are using this information
-// in scripts and need some extra time to migrate to alternatives.
-// DEPRECATED
-func legacyPartkeyInfoCommand() {
- type partkeyInfo struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
- Address string `codec:"acct"`
- FirstValid basics.Round `codec:"first"`
- LastValid basics.Round `codec:"last"`
- VoteID crypto.OneTimeSignatureVerifier `codec:"vote"`
- SelectionID crypto.VRFVerifier `codec:"sel"`
- VoteKeyDilution uint64 `codec:"voteKD"`
- }
-
- onDataDirs(func(dataDir string) {
- fmt.Printf("Dumping participation key info from %s...\n", dataDir)
- client := ensureGoalClient(dataDir, libgoal.DynamicClient)
-
- // Make sure we don't already have a partkey valid for (or after) specified roundLastValid
- parts, err := client.ListParticipationKeyFiles()
- if err != nil {
- reportErrorf(errorRequestFail, err)
- }
-
- for filename, part := range parts {
- fmt.Println("------------------------------------------------------------------")
- info := partkeyInfo{
- Address: part.Address().String(),
- FirstValid: part.FirstValid,
- LastValid: part.LastValid,
- VoteID: part.VotingSecrets().OneTimeSignatureVerifier,
- SelectionID: part.VRFSecrets().PK,
- VoteKeyDilution: part.KeyDilution,
- }
- infoString := protocol.EncodeJSON(&info)
- fmt.Printf("File: %s\n%s\n", filename, string(infoString))
- }
- })
-}
-
var partkeyInfoCmd = &cobra.Command{
Use: "partkeyinfo",
Short: "Output details about all available part keys",
@@ -1528,3 +1458,138 @@ var markNonparticipatingCmd = &cobra.Command{
}
},
}
+
+// listParticipationKeyFiles returns the available participation keys,
+// as a map from database filename to Participation key object.
+// DEPRECATED
+func listParticipationKeyFiles(c *libgoal.Client) (partKeyFiles map[string]algodAcct.Participation, err error) {
+ genID, err := c.GenesisID()
+ if err != nil {
+ return
+ }
+
+ // Get a list of files in the participation keys directory
+ keyDir := filepath.Join(c.DataDir(), genID)
+ files, err := ioutil.ReadDir(keyDir)
+ if err != nil {
+ return
+ }
+
+ partKeyFiles = make(map[string]algodAcct.Participation)
+ for _, file := range files {
+ // If it can't be a participation key database, skip it
+ if !config.IsPartKeyFilename(file.Name()) {
+ continue
+ }
+
+ filename := file.Name()
+
+ // Fetch a handle to this database
+ handle, err := db.MakeErasableAccessor(filepath.Join(keyDir, filename))
+ if err != nil {
+ // Couldn't open it, skip it
+ continue
+ }
+
+ // Fetch an account.Participation from the database
+ part, err := algodAcct.RestoreParticipation(handle)
+ if err != nil {
+ // Couldn't read it, skip it
+ handle.Close()
+ continue
+ }
+
+ partKeyFiles[filename] = part.Participation
+ part.Close()
+ }
+
+ return
+}
+
+// legacyListParticipationKeysCommand prints key information in the same
+// format as earlier versions of goal. Some users are using this information
+// in scripts and need some extra time to migrate to the REST API.
+// DEPRECATED
+func legacyListParticipationKeysCommand() {
+ dataDir := ensureSingleDataDir()
+
+ client := ensureGoalClient(dataDir, libgoal.DynamicClient)
+ parts, err := listParticipationKeyFiles(&client)
+ if err != nil {
+ reportErrorf(errorRequestFail, err)
+ }
+
+ var filenames []string
+ for fn := range parts {
+ filenames = append(filenames, fn)
+ }
+ sort.Strings(filenames)
+
+ rowFormat := "%-10s\t%-80s\t%-60s\t%12s\t%12s\t%12s\n"
+ fmt.Printf(rowFormat, "Registered", "Filename", "Parent address", "First round", "Last round", "First key")
+ for _, fn := range filenames {
+ onlineInfoStr := "unknown"
+ onlineAccountInfo, err := client.AccountInformation(parts[fn].Address().GetUserAddress())
+ if err == nil {
+ votingBytes := parts[fn].Voting.OneTimeSignatureVerifier
+ vrfBytes := parts[fn].VRF.PK
+ if onlineAccountInfo.Participation != nil &&
+ (string(onlineAccountInfo.Participation.ParticipationPK) == string(votingBytes[:])) &&
+ (string(onlineAccountInfo.Participation.VRFPK) == string(vrfBytes[:])) &&
+ (onlineAccountInfo.Participation.VoteFirst == uint64(parts[fn].FirstValid)) &&
+ (onlineAccountInfo.Participation.VoteLast == uint64(parts[fn].LastValid)) &&
+ (onlineAccountInfo.Participation.VoteKeyDilution == parts[fn].KeyDilution) {
+ onlineInfoStr = "yes"
+ } else {
+ onlineInfoStr = "no"
+ }
+ }
+ // it's okay to proceed without algod info
+ first, last := parts[fn].ValidInterval()
+ fmt.Printf(rowFormat, onlineInfoStr, fn, parts[fn].Address().GetUserAddress(),
+ fmt.Sprintf("%d", first),
+ fmt.Sprintf("%d", last),
+ fmt.Sprintf("%d.%d", parts[fn].Voting.FirstBatch, parts[fn].Voting.FirstOffset))
+ }
+}
+
+// legacyPartkeyInfoCommand prints key information in the same
+// format as earlier versions of goal. Some users are using this information
+// in scripts and need some extra time to migrate to alternatives.
+// DEPRECATED
+func legacyPartkeyInfoCommand() {
+ type partkeyInfo struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+ Address string `codec:"acct"`
+ FirstValid basics.Round `codec:"first"`
+ LastValid basics.Round `codec:"last"`
+ VoteID crypto.OneTimeSignatureVerifier `codec:"vote"`
+ SelectionID crypto.VRFVerifier `codec:"sel"`
+ VoteKeyDilution uint64 `codec:"voteKD"`
+ }
+
+ onDataDirs(func(dataDir string) {
+ fmt.Printf("Dumping participation key info from %s...\n", dataDir)
+ client := ensureGoalClient(dataDir, libgoal.DynamicClient)
+
+ // Make sure we don't already have a partkey valid for (or after) specified roundLastValid
+ parts, err := listParticipationKeyFiles(&client)
+ if err != nil {
+ reportErrorf(errorRequestFail, err)
+ }
+
+ for filename, part := range parts {
+ fmt.Println(strings.Repeat("-", 40))
+ info := partkeyInfo{
+ Address: part.Address().String(),
+ FirstValid: part.FirstValid,
+ LastValid: part.LastValid,
+ VoteID: part.VotingSecrets().OneTimeSignatureVerifier,
+ SelectionID: part.VRFSecrets().PK,
+ VoteKeyDilution: part.KeyDilution,
+ }
+ infoString := protocol.EncodeJSON(&info)
+ fmt.Printf("File: %s\n%s\n", filename, string(infoString))
+ }
+ })
+}
diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go
index cb6284036..21bd8c1aa 100644
--- a/cmd/pingpong/runCmd.go
+++ b/cmd/pingpong/runCmd.go
@@ -43,9 +43,7 @@ var minFee uint64
var randomFee, noRandomFee bool
var randomAmount, noRandomAmount bool
var randomDst bool
-var delayBetween string
var runTime string
-var restTime string
var refreshTime string
var saveConfig bool
var useDefault bool
@@ -84,9 +82,7 @@ func init() {
runCmd.Flags().BoolVar(&randomFee, "rf", false, "Set to enable random fees (between minf and mf)")
runCmd.Flags().BoolVar(&noRandomFee, "nrf", false, "Set to disable random fees")
runCmd.Flags().BoolVar(&randomDst, "rd", false, "Send money to randomly-generated addresses")
- runCmd.Flags().StringVar(&delayBetween, "delay", "", "Delay (ms) between every transaction (0 means none)")
runCmd.Flags().StringVar(&runTime, "run", "", "Duration of time (seconds) to run transfers before resting (0 means non-stop)")
- runCmd.Flags().StringVar(&restTime, "rest", "", "Duration of time (seconds) to rest between transfer periods (0 means no rest)")
runCmd.Flags().StringVar(&refreshTime, "refresh", "", "Duration of time (seconds) between refilling accounts with money (0 means no refresh)")
runCmd.Flags().StringVar(&logicProg, "program", "", "File containing the compiled program to include as a logic sig")
runCmd.Flags().BoolVar(&saveConfig, "save", false, "Save the effective configuration to disk")
@@ -187,13 +183,6 @@ var runCmd = &cobra.Command{
}
cfg.RandomizeDst = randomDst
cfg.Quiet = quietish
- if delayBetween != "" {
- val, err := strconv.ParseUint(delayBetween, 10, 32)
- if err != nil {
- reportErrorf("Invalid value specified for --delay: %v\n", err)
- }
- cfg.DelayBetweenTxn = time.Duration(uint32(val)) * time.Millisecond
- }
if runTime != "" {
val, err := strconv.ParseUint(runTime, 10, 32)
if err != nil {
@@ -201,13 +190,6 @@ var runCmd = &cobra.Command{
}
cfg.RunTime = time.Duration(uint32(val)) * time.Second
}
- if restTime != "" {
- val, err := strconv.ParseUint(restTime, 10, 32)
- if err != nil {
- reportErrorf("Invalid value specified for --rest: %v\n", err)
- }
- cfg.RestTime = time.Duration(uint32(val)) * time.Second
- }
if refreshTime != "" {
val, err := strconv.ParseUint(refreshTime, 10, 32)
if err != nil {
diff --git a/config/consensus.go b/config/consensus.go
index 61ca55a2f..42e196b1b 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -1146,7 +1146,7 @@ func initConsensusProtocols() {
vFuture.CompactCertWeightThreshold = (1 << 32) * 30 / 100
vFuture.CompactCertSecKQ = 128
- vFuture.LogicSigVersion = 7
+ vFuture.LogicSigVersion = 7 // When moving this to a release, put a new higher LogicSigVersion here
vFuture.MinInnerApplVersion = 4
vFuture.UnifyInnerTxIDs = true
@@ -1178,4 +1178,5 @@ func init() {
for _, p := range Consensus {
checkSetAllocBounds(p)
}
+
}
diff --git a/config/version.go b/config/version.go
index fb07796ec..c85775d55 100644
--- a/config/version.go
+++ b/config/version.go
@@ -33,7 +33,7 @@ const VersionMajor = 3
// VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced.
// Not enforced until after initial public release (x > 0).
-const VersionMinor = 7
+const VersionMinor = 8
// Version is the type holding our full version information.
type Version struct {
diff --git a/daemon/algod/server.go b/daemon/algod/server.go
index 07e206a81..c423e8de2 100644
--- a/daemon/algod/server.go
+++ b/daemon/algod/server.go
@@ -147,7 +147,7 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes
fmt.Fprintln(logWriter, "Logging Starting")
if s.log.GetTelemetryUploadingEnabled() {
// May or may not be logging to node.log
- fmt.Fprintf(logWriter, "Telemetry Enabled: %s\n", s.log.GetTelemetryHostName())
+ fmt.Fprintf(logWriter, "Telemetry Enabled: %s\n", s.log.GetTelemetryGUID())
fmt.Fprintf(logWriter, "Session: %s\n", s.log.GetTelemetrySession())
} else {
// May or may not be logging to node.log
@@ -158,6 +158,12 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes
metricLabels := map[string]string{}
if s.log.GetTelemetryEnabled() {
metricLabels["telemetry_session"] = s.log.GetTelemetrySession()
+ if h := s.log.GetTelemetryGUID(); h != "" {
+ metricLabels["telemetry_host"] = h
+ }
+ if i := s.log.GetInstanceName(); i != "" {
+ metricLabels["telemetry_instance"] = i
+ }
}
s.metricCollector = metrics.MakeMetricService(
&metrics.ServiceConfig{
diff --git a/data/bookkeeping/genesis.go b/data/bookkeeping/genesis.go
index d763203fe..114bb37f8 100644
--- a/data/bookkeeping/genesis.go
+++ b/data/bookkeeping/genesis.go
@@ -101,6 +101,51 @@ func (genesis Genesis) ID() string {
return string(genesis.Network) + "-" + genesis.SchemaID
}
+// Hash is the genesis hash.
+func (genesis Genesis) Hash() crypto.Digest {
+ return crypto.HashObj(genesis)
+}
+
+// Balances returns the genesis account balances.
+func (genesis Genesis) Balances() (GenesisBalances, error) {
+ genalloc := make(map[basics.Address]basics.AccountData)
+ for _, entry := range genesis.Allocation {
+ addr, err := basics.UnmarshalChecksumAddress(entry.Address)
+ if err != nil {
+ return GenesisBalances{}, fmt.Errorf("cannot parse genesis addr %s: %w", entry.Address, err)
+ }
+
+ _, present := genalloc[addr]
+ if present {
+ return GenesisBalances{}, fmt.Errorf("repeated allocation to %s", entry.Address)
+ }
+
+ genalloc[addr] = entry.State
+ }
+
+ feeSink, err := basics.UnmarshalChecksumAddress(genesis.FeeSink)
+ if err != nil {
+ return GenesisBalances{}, fmt.Errorf("cannot parse fee sink addr %s: %w", genesis.FeeSink, err)
+ }
+
+ rewardsPool, err := basics.UnmarshalChecksumAddress(genesis.RewardsPool)
+ if err != nil {
+ return GenesisBalances{}, fmt.Errorf("cannot parse rewards pool addr %s: %w", genesis.RewardsPool, err)
+ }
+
+ return MakeTimestampedGenesisBalances(genalloc, feeSink, rewardsPool, genesis.Timestamp), nil
+}
+
+// Block computes the genesis block.
+func (genesis Genesis) Block() (Block, error) {
+ genBal, err := genesis.Balances()
+ if err != nil {
+ return Block{}, err
+ }
+
+ return MakeGenesisBlock(genesis.Proto, genBal, genesis.ID(), genesis.Hash())
+}
+
// A GenesisAllocation object represents an allocation of algos to
// an address in the genesis block. Address is the checksummed
// short address. Comment is a note about what this address is
diff --git a/data/bookkeeping/genesis_test.go b/data/bookkeeping/genesis_test.go
new file mode 100644
index 000000000..9ca60bd5e
--- /dev/null
+++ b/data/bookkeeping/genesis_test.go
@@ -0,0 +1,157 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package bookkeeping
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestGenesis_Balances(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ containsErrorFunc := func(str string) assert.ErrorAssertionFunc {
+ return func(_ assert.TestingT, err error, i ...interface{}) bool {
+ require.ErrorContains(t, err, str)
+ return true
+ }
+ }
+ mustAddr := func(addr string) basics.Address {
+ address, err := basics.UnmarshalChecksumAddress(addr)
+ require.NoError(t, err)
+ return address
+ }
+ makeAddr := func(addr uint64) basics.Address {
+ var address basics.Address
+ address[0] = byte(addr)
+ return address
+ }
+ acctWith := func(algos uint64, addr string) GenesisAllocation {
+ return GenesisAllocation{
+ _struct: struct{}{},
+ Address: addr,
+ Comment: "",
+ State: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: algos},
+ },
+ }
+ }
+ goodAddr := makeAddr(100)
+ allocation1 := acctWith(1000, makeAddr(1).String())
+ allocation2 := acctWith(2000, makeAddr(2).String())
+ badAllocation := acctWith(1234, "El Toro Loco")
+ type fields struct {
+ Allocation []GenesisAllocation
+ FeeSink string
+ RewardsPool string
+ }
+ tests := []struct {
+ name string
+ fields fields
+ want GenesisBalances
+ wantErr assert.ErrorAssertionFunc
+ }{
+ {
+ name: "basic test",
+ fields: fields{
+ Allocation: []GenesisAllocation{allocation1},
+ FeeSink: goodAddr.String(),
+ RewardsPool: goodAddr.String(),
+ },
+ want: GenesisBalances{
+ Balances: map[basics.Address]basics.AccountData{
+ mustAddr(allocation1.Address): allocation1.State,
+ },
+ FeeSink: goodAddr,
+ RewardsPool: goodAddr,
+ Timestamp: 0,
+ },
+ wantErr: assert.NoError,
+ },
+ {
+ name: "two test",
+ fields: fields{
+ Allocation: []GenesisAllocation{allocation1, allocation2},
+ FeeSink: goodAddr.String(),
+ RewardsPool: goodAddr.String(),
+ },
+ want: GenesisBalances{
+ Balances: map[basics.Address]basics.AccountData{
+ mustAddr(allocation1.Address): allocation1.State,
+ mustAddr(allocation2.Address): allocation2.State,
+ },
+ FeeSink: goodAddr,
+ RewardsPool: goodAddr,
+ Timestamp: 0,
+ },
+ wantErr: assert.NoError,
+ },
+ {
+ name: "bad fee sink",
+ fields: fields{
+ Allocation: []GenesisAllocation{allocation1, allocation2},
+ RewardsPool: goodAddr.String(),
+ },
+ wantErr: containsErrorFunc("cannot parse fee sink addr"),
+ },
+ {
+ name: "bad rewards pool",
+ fields: fields{
+ Allocation: []GenesisAllocation{allocation1, allocation2},
+ FeeSink: goodAddr.String(),
+ },
+ wantErr: containsErrorFunc("cannot parse rewards pool addr"),
+ },
+ {
+ name: "bad genesis addr",
+ fields: fields{
+ Allocation: []GenesisAllocation{badAllocation},
+ FeeSink: goodAddr.String(),
+ RewardsPool: goodAddr.String(),
+ },
+ wantErr: containsErrorFunc("cannot parse genesis addr"),
+ },
+ {
+ name: "repeat address",
+ fields: fields{
+ Allocation: []GenesisAllocation{allocation1, allocation1},
+ FeeSink: goodAddr.String(),
+ RewardsPool: goodAddr.String(),
+ },
+ wantErr: containsErrorFunc("repeated allocation to"),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ genesis := Genesis{
+ Allocation: tt.fields.Allocation,
+ FeeSink: tt.fields.FeeSink,
+ RewardsPool: tt.fields.RewardsPool,
+ }
+ got, err := genesis.Balances()
+ if tt.wantErr(t, err, fmt.Sprintf("Balances()")) {
+ return
+ }
+ assert.Equalf(t, tt.want, got, "Balances()")
+ })
+ }
+}
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index 44d68bd1e..597b47763 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -327,6 +327,8 @@ return stack matches the name of the input value.
| `extract_uint16` | A uint16 formed from a range of big-endian bytes from A starting at B up to but not including B+2. If B+2 is larger than the array length, the program fails |
| `extract_uint32` | A uint32 formed from a range of big-endian bytes from A starting at B up to but not including B+4. If B+4 is larger than the array length, the program fails |
| `extract_uint64` | A uint64 formed from a range of big-endian bytes from A starting at B up to but not including B+8. If B+8 is larger than the array length, the program fails |
+| `replace2 s` | Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A) |
+| `replace3` | Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A) |
| `base64_decode e` | decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E |
| `json_ref r` | return key B's value from a [valid](jsonspec.md) utf-8 encoded json object A |
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index de7c8d8bd..cd7a9a952 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -746,12 +746,26 @@ When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on
- A uint64 formed from a range of big-endian bytes from A starting at B up to but not including B+8. If B+8 is larger than the array length, the program fails
- Availability: v5
+## replace2 s
+
+- Opcode: 0x5c {uint8 start position}
+- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
+- Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)
+- Availability: v7
+
+## replace3
+
+- Opcode: 0x5d
+- Stack: ..., A: []byte, B: uint64, C: []byte &rarr; ..., []byte
+- Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)
+- Availability: v7
+
## base64_decode e
-- Opcode: 0x5c {uint8 encoding index}
+- Opcode: 0x5e {uint8 encoding index}
- Stack: ..., A: []byte &rarr; ..., []byte
- decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E
-- **Cost**: 1 + 1 per 16 bytes
+- **Cost**: 1 + 1 per 16 bytes of A
- Availability: v7
`base64` Encodings:
@@ -766,9 +780,10 @@ Decodes A using the base64 encoding E. Specify the encoding with an immediate ar
## json_ref r
-- Opcode: 0x5d {string return type}
+- Opcode: 0x5f {string return type}
- Stack: ..., A: []byte, B: []byte &rarr; ..., any
- return key B's value from a [valid](jsonspec.md) utf-8 encoded json object A
+- **Cost**: 25 + 2 per 7 bytes of A
- Availability: v7
`json_ref` Types:
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index 8c9d5955e..6a05596b2 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -252,12 +252,18 @@ type OpStream struct {
// newOpStream constructs OpStream instances ready to invoke assemble. A new
// OpStream must be used for each call to assemble().
func newOpStream(version uint64) OpStream {
- return OpStream{
+ o := OpStream{
labels: make(map[string]int),
OffsetToLine: make(map[int]int),
typeTracking: true,
Version: version,
}
+
+ for i := range o.known.scratchSpace {
+ o.known.scratchSpace[i] = StackUint64
+ }
+
+ return o
}
// ProgramKnowledge tracks statically known information as we assemble
@@ -279,6 +285,8 @@ type ProgramKnowledge struct {
// deadcode indicates that the program is in deadcode, so no type checking
// errors should be reported.
deadcode bool
+
+ scratchSpace [256]StackType
}
func (pgm *ProgramKnowledge) pop() StackType {
@@ -312,6 +320,9 @@ func (pgm *ProgramKnowledge) reset() {
pgm.stack = nil
pgm.bottom = StackAny
pgm.deadcode = false
+ for i := range pgm.scratchSpace {
+ pgm.scratchSpace[i] = StackAny
+ }
}
// createLabel inserts a label to point to the next instruction, reporting an
@@ -334,7 +345,7 @@ func (ops *OpStream) referToLabel(pc int, label string) {
ops.labelReferences = append(ops.labelReferences, labelReference{ops.sourceLine, pc, label})
}
-type refineFunc func(pgm ProgramKnowledge, immediates []string) (StackTypes, StackTypes)
+type refineFunc func(pgm *ProgramKnowledge, immediates []string) (StackTypes, StackTypes)
// returns allows opcodes like `txn` to be specific about their return value
// types, based on the field requested, rather than use Any as specified by
@@ -969,7 +980,19 @@ func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func typeSwap(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+// Interprets the arg at index argIndex as byte-long immediate
+func getByteImm(args []string, argIndex int) (byte, bool) {
+ if len(args) <= argIndex {
+ return 0, false
+ }
+ n, err := strconv.ParseUint(args[argIndex], 0, 8)
+ if err != nil {
+ return 0, false
+ }
+ return byte(n), true
+}
+
+func typeSwap(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
topTwo := StackTypes{StackAny, StackAny}
top := len(pgm.stack) - 1
if top >= 0 {
@@ -982,12 +1005,9 @@ func typeSwap(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return nil, reversed
}
-func typeDig(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
- if len(args) == 0 {
- return nil, nil
- }
- n, err := strconv.ParseUint(args[0], 0, 64)
- if err != nil {
+func typeDig(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ n, ok := getByteImm(args, 0)
+ if !ok {
return nil, nil
}
depth := int(n) + 1
@@ -1008,7 +1028,7 @@ func typeDig(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return anys, returns
}
-func typeEquals(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeEquals(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
top := len(pgm.stack) - 1
if top >= 0 {
//Require arg0 and arg1 to have same type
@@ -1017,7 +1037,7 @@ func typeEquals(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return nil, nil
}
-func typeDup(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeDup(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
top := len(pgm.stack) - 1
if top >= 0 {
return StackTypes{pgm.stack[top]}, StackTypes{pgm.stack[top], pgm.stack[top]}
@@ -1025,7 +1045,7 @@ func typeDup(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return nil, nil
}
-func typeDupTwo(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeDupTwo(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
topTwo := StackTypes{StackAny, StackAny}
top := len(pgm.stack) - 1
if top >= 0 {
@@ -1037,7 +1057,7 @@ func typeDupTwo(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return nil, append(topTwo, topTwo...)
}
-func typeSelect(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeSelect(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
top := len(pgm.stack) - 1
if top >= 2 {
if pgm.stack[top-1] == pgm.stack[top-2] {
@@ -1047,7 +1067,7 @@ func typeSelect(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return nil, nil
}
-func typeSetBit(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeSetBit(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
top := len(pgm.stack) - 1
if top >= 2 {
return nil, StackTypes{pgm.stack[top-2]}
@@ -1055,12 +1075,9 @@ func typeSetBit(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return nil, nil
}
-func typeCover(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
- if len(args) == 0 {
- return nil, nil
- }
- n, err := strconv.ParseUint(args[0], 0, 64)
- if err != nil {
+func typeCover(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ n, ok := getByteImm(args, 0)
+ if !ok {
return nil, nil
}
depth := int(n) + 1
@@ -1086,12 +1103,9 @@ func typeCover(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return anys, returns
}
-func typeUncover(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
- if len(args) == 0 {
- return nil, nil
- }
- n, err := strconv.ParseUint(args[0], 0, 64)
- if err != nil {
+func typeUncover(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ n, ok := getByteImm(args, 0)
+ if !ok {
return nil, nil
}
depth := int(n) + 1
@@ -1114,7 +1128,7 @@ func typeUncover(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return anys, returns
}
-func typeTxField(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+func typeTxField(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
if len(args) != 1 {
return nil, nil
}
@@ -1125,6 +1139,51 @@ func typeTxField(pgm ProgramKnowledge, args []string) (StackTypes, StackTypes) {
return StackTypes{fs.ftype}, nil
}
+func typeStore(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ scratchIndex, ok := getByteImm(args, 0)
+ if !ok {
+ return nil, nil
+ }
+ top := len(pgm.stack) - 1
+ if top >= 0 {
+ pgm.scratchSpace[scratchIndex] = pgm.stack[top]
+ }
+ return nil, nil
+}
+
+func typeStores(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ top := len(pgm.stack) - 1
+ if top < 0 {
+ return nil, nil
+ }
+ for i := range pgm.scratchSpace {
+ // We can't know what slot stacktop is being stored in, but we can at least keep the slots that are the same type as stacktop
+ if pgm.scratchSpace[i] != pgm.stack[top] {
+ pgm.scratchSpace[i] = StackAny
+ }
+ }
+ return nil, nil
+}
+
+func typeLoad(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ scratchIndex, ok := getByteImm(args, 0)
+ if !ok {
+ return nil, nil
+ }
+ return nil, StackTypes{pgm.scratchSpace[scratchIndex]}
+}
+
+func typeLoads(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes) {
+ scratchType := pgm.scratchSpace[0]
+ for _, item := range pgm.scratchSpace {
+ // If all the scratch slots are one type, then we can say we are loading that type
+ if item != scratchType {
+ return nil, nil
+ }
+ }
+ return nil, StackTypes{scratchType}
+}
+
// keywords or "pseudo-ops" handle parsing and assembling special asm language
// constructs like 'addr' We use an OpSpec here, but it's somewhat degenerate,
// since they don't have opcodes or eval functions. But it does need a lot of
@@ -1353,10 +1412,11 @@ func (ops *OpStream) assemble(text string) error {
// enough to report follow-on errors. Of course, we still have to
// bail out on the assembly as a whole.
spec, ok = OpsByName[AssemblerMaxVersion][opstring]
- if !ok {
+ if ok {
+ ops.errorf("%s opcode was introduced in TEAL v%d", opstring, spec.Version)
+ } else {
spec, ok = keywords[opstring]
}
- ops.errorf("%s opcode was introduced in TEAL v%d", opstring, spec.Version)
}
if ok {
ops.trace("%3d: %s\t", ops.sourceLine, opstring)
@@ -1366,7 +1426,7 @@ func (ops *OpStream) assemble(text string) error {
}
args, returns := spec.Arg.Types, spec.Return.Types
if spec.OpDetails.refine != nil {
- nargs, nreturns := spec.OpDetails.refine(ops.known, fields[1:])
+ nargs, nreturns := spec.OpDetails.refine(&ops.known, fields[1:])
if nargs != nil {
args = nargs
}
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index 8c719bbfe..478eb7c21 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -25,6 +25,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -376,18 +378,17 @@ pushbytes 0x012345
dup
dup
ed25519verify_bare
-pushbytes 0x012345
-dup
-bn256_add
-dup
-bn256_scalar_mul
-dup
-bn256_pairing
-`
+pushbytes 0x4321
+pushbytes 0x77
+replace2 2
+pushbytes 0x88
+pushint 1
+replace3
+` + pairingNonsense
const v6Compiled = "2004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b53a03b6b7043cb8033a0c2349c42a9631007300810881088120978101c53a8101c6003a"
-const v7Compiled = v6Compiled + "5c005d018120af060180070123456789abcd4949050198800301234549498480030123454999499a499b"
+const v7Compiled = v6Compiled + "5e005f018120af060180070123456789abcd49490501988003012345494984800243218001775c0280018881015d" + pairingCompiled
var nonsense = map[uint64]string{
1: v1Nonsense,
@@ -455,6 +456,19 @@ func TestAssemble(t *testing.T) {
}
}
+var experiments = []uint64{fidoVersion, pairingVersion}
+
+// TestExperimental forces a conscious choice to promote "experimental" opcode
+// groups. This will fail when we increment vFuture's LogicSigVersion. If we had
+// intended to release the opcodes, they should have been removed from
+// `experiments`.
+func TestExperimental(t *testing.T) {
+ futureV := config.Consensus[protocol.ConsensusFuture].LogicSigVersion
+ for _, v := range experiments {
+ require.Equal(t, futureV, v)
+ }
+}
+
func TestAssembleAlias(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -2400,6 +2414,29 @@ func TestSetBitTypeCheck(t *testing.T) {
testProg(t, "byte 0x1234; int 2; int 3; setbit; !", AssemblerMaxVersion, Expect{5, "! arg 0..."})
}
+func TestScratchTypeCheck(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ // All scratch slots should start as uint64
+ testProg(t, "load 0; int 1; +", AssemblerMaxVersion)
+ // Check load and store accurately using the scratch space
+ testProg(t, "byte 0x01; store 0; load 0; int 1; +", AssemblerMaxVersion, Expect{5, "+ arg 0..."})
+ // Loads should know the type it's loading if all the slots are the same type
+ testProg(t, "int 0; loads; btoi", AssemblerMaxVersion, Expect{3, "btoi arg 0..."})
+ // Loads doesn't know the type when slot types vary
+ testProg(t, "byte 0x01; store 0; int 1; loads; btoi", AssemblerMaxVersion)
+ // Stores should only set slots to StackAny if they are not the same type as what is being stored
+ testProg(t, "byte 0x01; store 0; int 3; byte 0x01; stores; load 0; int 1; +", AssemblerMaxVersion, Expect{8, "+ arg 0..."})
+ // ScratchSpace should reset after hitting label in deadcode
+ testProg(t, "byte 0x01; store 0; b label1; label1:; load 0; int 1; +", AssemblerMaxVersion)
+ // But it should reset to StackAny not uint64
+ testProg(t, "int 1; store 0; b label1; label1:; load 0; btoi", AssemblerMaxVersion)
+ // Callsubs should also reset the scratch space
+ testProg(t, "callsub A; load 0; btoi; return; A: byte 0x01; store 0; retsub", AssemblerMaxVersion)
+ // But the scratchspace should still be tracked after the callsub
+ testProg(t, "callsub A; int 1; store 0; load 0; btoi; return; A: retsub", AssemblerMaxVersion, Expect{5, "btoi arg 0..."})
+}
+
func TestCoverAsm(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index a8badee03..16dabf605 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -144,6 +144,8 @@ var opDocByName = map[string]string{
"extract_uint16": "A uint16 formed from a range of big-endian bytes from A starting at B up to but not including B+2. If B+2 is larger than the array length, the program fails",
"extract_uint32": "A uint32 formed from a range of big-endian bytes from A starting at B up to but not including B+4. If B+4 is larger than the array length, the program fails",
"extract_uint64": "A uint64 formed from a range of big-endian bytes from A starting at B up to but not including B+8. If B+8 is larger than the array length, the program fails",
+ "replace2": "Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)",
+ "replace3": "Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)",
"base64_decode": "decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E",
"balance": "get balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted.",
@@ -229,6 +231,7 @@ var opcodeImmediateNotes = map[string]string{
"substring": "{uint8 start position} {uint8 end position}",
"extract": "{uint8 start position} {uint8 length}",
+ "replace2": "{uint8 start position}",
"dig": "{uint8 depth}",
"cover": "{uint8 depth}",
"uncover": "{uint8 depth}",
@@ -326,7 +329,7 @@ func OpDocExtra(opName string) string {
// opcodes consecutively, even if their opcode values are not.
var OpGroups = map[string][]string{
"Arithmetic": {"sha256", "keccak256", "sha512_256", "sha3_256", "ed25519verify", "ed25519verify_bare", "ecdsa_verify", "ecdsa_pk_recover", "ecdsa_pk_decompress", "bn256_add", "bn256_scalar_mul", "bn256_pairing", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "shl", "shr", "sqrt", "bitlen", "exp", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "divw", "divmodw", "expw", "getbit", "setbit", "getbyte", "setbyte", "concat"},
- "Byte Array Manipulation": {"substring", "substring3", "extract", "extract3", "extract_uint16", "extract_uint32", "extract_uint64", "base64_decode", "json_ref"},
+ "Byte Array Manipulation": {"substring", "substring3", "extract", "extract3", "extract_uint16", "extract_uint32", "extract_uint64", "replace2", "replace3", "base64_decode", "json_ref"},
"Byte Array Arithmetic": {"b+", "b-", "b/", "b*", "b<", "b>", "b<=", "b>=", "b==", "b!=", "b%", "bsqrt"},
"Byte Array Logic": {"b|", "b&", "b^", "b~"},
"Loading Values": {"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "bzero", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "args", "txn", "gtxn", "txna", "txnas", "gtxna", "gtxnas", "gtxns", "gtxnsa", "gtxnsas", "global", "load", "loads", "store", "stores", "gload", "gloads", "gloadss", "gaid", "gaids"},
@@ -356,7 +359,8 @@ func OpAllCosts(opName string) []VerCost {
if !ok {
continue
}
- cost := spec.OpDetails.docCost()
+ argLen := len(spec.Arg.Types)
+ cost := spec.OpDetails.docCost(argLen)
if costs == nil || cost != costs[len(costs)-1].Cost {
costs = append(costs, VerCost{v, v, cost})
} else {
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index f55b61bc9..dae291b6e 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -1033,13 +1033,13 @@ func (cx *EvalContext) step() error {
return nil
}
-// oneBlank is a boring stack provided to deets.Cost during checkStep. It is
+// blankStack is a boring stack provided to deets.Cost during checkStep. It is
// good enough to allow Cost() to not crash. It would be incorrect to provide
// this stack if there were linear cost opcodes before backBranchEnabledVersion,
// because the static cost would be wrong. But then again, a static cost model
// wouldn't work before backBranchEnabledVersion, so such an opcode is already
// unacceptable. TestLinearOpcodes ensures.
-var oneBlank = []stackValue{{Bytes: []byte{}}}
+var blankStack = make([]stackValue, 5)
func (cx *EvalContext) checkStep() (int, error) {
cx.instructionStarts[cx.pc] = true
@@ -1055,7 +1055,7 @@ func (cx *EvalContext) checkStep() (int, error) {
if deets.Size != 0 && (cx.pc+deets.Size > len(cx.program)) {
return 0, fmt.Errorf("%s program ends short of immediate values", spec.Name)
}
- opcost := deets.Cost(cx.program, cx.pc, oneBlank)
+ opcost := deets.Cost(cx.program, cx.pc, blankStack)
if opcost <= 0 {
return 0, fmt.Errorf("%s reported non-positive cost", spec.Name)
}
@@ -3486,6 +3486,63 @@ func opExtract3(cx *EvalContext) error {
return err
}
+func replaceCarefully(original []byte, replacement []byte, start uint64) ([]byte, error) {
+ if start > uint64(len(original)) {
+ return nil, fmt.Errorf("replacement start %d beyond length: %d", start, len(original))
+ }
+ end := start + uint64(len(replacement))
+ if end < start { // impossible because it is sum of two avm value lengths
+ return nil, fmt.Errorf("replacement end exceeds uint64")
+ }
+
+ if end > uint64(len(original)) {
+ return nil, fmt.Errorf("replacement end %d beyond original length: %d", end, len(original))
+ }
+
+ // Do NOT use the append trick to make a copy here.
+ // append(nil, []byte{}...) would return a nil, which means "not a bytearray" to AVM.
+ clone := make([]byte, len(original))
+ copy(clone[:start], original)
+ copy(clone[start:end], replacement)
+ copy(clone[end:], original[end:])
+ return clone, nil
+}
+
+func opReplace2(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // replacement
+ prev := last - 1 // original
+
+ replacement := cx.stack[last].Bytes
+ start := uint64(cx.program[cx.pc+1])
+ original := cx.stack[prev].Bytes
+
+ bytes, err := replaceCarefully(original, replacement, start)
+ if err != nil {
+ return err
+ }
+ cx.stack[prev].Bytes = bytes
+ cx.stack = cx.stack[:last]
+ return err
+}
+
+func opReplace3(cx *EvalContext) error {
+ last := len(cx.stack) - 1 // replacement
+ prev := last - 1 // start
+ pprev := prev - 1 // original
+
+ replacement := cx.stack[last].Bytes
+ start := cx.stack[prev].Uint
+ original := cx.stack[pprev].Bytes
+
+ bytes, err := replaceCarefully(original, replacement, start)
+ if err != nil {
+ return err
+ }
+ cx.stack[pprev].Bytes = bytes
+ cx.stack = cx.stack[:prev]
+ return err
+}
+
// We convert the bytes manually here because we need to accept "short" byte arrays.
// A single byte is a legal uint64 decoded this way.
func convertBytesToInt(x []byte) uint64 {
@@ -4764,56 +4821,52 @@ func opBase64Decode(cx *EvalContext) error {
cx.stack[last].Bytes = bytes
return nil
}
-func hasDuplicateKeys(jsonText []byte) (bool, map[string]json.RawMessage, error) {
+
+func isPrimitiveJSON(jsonText []byte) (bool, error) {
dec := json.NewDecoder(bytes.NewReader(jsonText))
- parsed := make(map[string]json.RawMessage)
t, err := dec.Token()
if err != nil {
- return false, nil, err
+ return false, err
}
t, ok := t.(json.Delim)
if !ok || t.(json.Delim).String() != "{" {
- return false, nil, fmt.Errorf("only json object is allowed")
- }
- for dec.More() {
- var value json.RawMessage
- // get JSON key
- key, err := dec.Token()
- if err != nil {
- return false, nil, err
- }
- // end of json
- if key == '}' {
- break
- }
- // decode value
- err = dec.Decode(&value)
- if err != nil {
- return false, nil, err
- }
- // check for duplicates
- if _, ok := parsed[key.(string)]; ok {
- return true, nil, nil
- }
- parsed[key.(string)] = value
+ return true, nil
}
- return false, parsed, nil
+ return false, nil
}
func parseJSON(jsonText []byte) (map[string]json.RawMessage, error) {
- if !json.Valid(jsonText) {
+ // parse JSON with Algorand's standard JSON library
+ var parsed map[interface{}]json.RawMessage
+ err := protocol.DecodeJSON(jsonText, &parsed)
+
+ if err != nil {
+ // if the error was caused by duplicate keys
+ if strings.Contains(err.Error(), "cannot decode into a non-pointer value") {
+ return nil, fmt.Errorf("invalid json text, duplicate keys not allowed")
+ }
+
+ // if the error was caused by non-json object
+ if strings.Contains(err.Error(), "read map - expect char '{' but got char") {
+ return nil, fmt.Errorf("invalid json text, only json object is allowed")
+ }
+
return nil, fmt.Errorf("invalid json text")
}
- // parse json text and check for duplicate keys
- hasDuplicates, parsed, err := hasDuplicateKeys(jsonText)
- if hasDuplicates {
- return nil, fmt.Errorf("invalid json text, duplicate keys not allowed")
- }
- if err != nil {
- return nil, fmt.Errorf("invalid json text, %v", err)
+
+ // check whether any keys are not strings
+ stringMap := make(map[string]json.RawMessage)
+ for k, v := range parsed {
+ key, ok := k.(string)
+ if !ok {
+ return nil, fmt.Errorf("invalid json text")
+ }
+ stringMap[key] = v
}
- return parsed, nil
+
+ return stringMap, nil
}
+
func opJSONRef(cx *EvalContext) error {
// get json key
last := len(cx.stack) - 1
@@ -4837,6 +4890,17 @@ func opJSONRef(cx *EvalContext) error {
var stval stackValue
_, ok = parsed[key]
if !ok {
+ // if the key is not found, first check whether the JSON text is the null value
+ // by checking whether it is a primitive JSON value. Any other primitive
+ // (or array) would have thrown an error previously during `parseJSON`.
+ isPrimitive, err := isPrimitiveJSON(cx.stack[last].Bytes)
+ if err == nil && isPrimitive {
+ err = fmt.Errorf("invalid json text, only json object is allowed")
+ }
+ if err != nil {
+ return fmt.Errorf("error while parsing JSON text, %v", err)
+ }
+
return fmt.Errorf("key %s not found in JSON text", key)
}
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index e861883a3..2a9f329c2 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -2345,6 +2345,8 @@ func TestReturnTypes(t *testing.T) {
"substring": "substring 0 2",
"extract_uint32": ": byte 0x0102030405; int 1; extract_uint32",
"extract_uint64": ": byte 0x010203040506070809; int 1; extract_uint64",
+ "replace2": ": byte 0x0102030405; byte 0x0809; replace2 2",
+ "replace3": ": byte 0x0102030405; int 2; byte 0x0809; replace3",
"asset_params_get": "asset_params_get AssetUnitName",
"asset_holding_get": "asset_holding_get AssetBalance",
"gtxns": "gtxns Sender",
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index 9e54e7b29..acc3d7808 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -20,6 +20,7 @@ import (
"encoding/base64"
"encoding/binary"
"encoding/hex"
+ "encoding/json"
"fmt"
"strconv"
"strings"
@@ -114,7 +115,7 @@ func benchmarkEvalParams(txn *transactions.SignedTxn) *EvalParams {
ep := defaultEvalParamsWithVersion(txn, LogicVersion)
ep.Trace = nil // Tracing would slow down benchmarks
clone := *ep.Proto
- bigBudget := 1000 * 1000 // Allow long run times
+ bigBudget := 2 * 1000 * 1000 // Allow long run times
clone.LogicSigMaxCost = uint64(bigBudget)
clone.MaxAppProgramCost = bigBudget
ep.Proto = &clone
@@ -260,6 +261,22 @@ func TestWrongProtoVersion(t *testing.T) {
}
}
+func TestBlankStackSufficient(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ t.Parallel()
+ for v := 0; v <= LogicVersion; v++ {
+ t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
+ for i := 0; i < 256; i++ {
+ spec := opsByOpcode[v][i]
+ argLen := len(spec.Arg.Types)
+ blankStackLen := len(blankStack)
+ require.GreaterOrEqual(t, blankStackLen, argLen)
+ }
+ })
+ }
+}
+
func TestSimpleMath(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -2311,6 +2328,35 @@ func TestExtractFlop(t *testing.T) {
require.Contains(t, err.Error(), "extract range beyond length of string")
}
+func TestReplace(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ testAccepts(t, `byte 0x11111111; byte 0x2222; replace2 0; byte 0x22221111; ==`, 7)
+ testAccepts(t, `byte 0x11111111; byte 0x2222; replace2 1; byte 0x11222211; ==`, 7)
+ testAccepts(t, `byte 0x11111111; byte 0x2222; replace2 2; byte 0x11112222; ==`, 7)
+ testPanics(t, `byte 0x11111111; byte 0x2222; replace2 3; byte 0x11112222; ==`, 7)
+
+ testAccepts(t, `byte 0x11111111; int 0; byte 0x2222; replace3; byte 0x22221111; ==`, 7)
+ testAccepts(t, `byte 0x11111111; int 1; byte 0x2222; replace3; byte 0x11222211; ==`, 7)
+ testAccepts(t, `byte 0x11111111; int 2; byte 0x2222; replace3; byte 0x11112222; ==`, 7)
+ testPanics(t, `byte 0x11111111; int 3; byte 0x2222; replace3; byte 0x11112222; ==`, 7)
+
+ testAccepts(t, `byte 0x11111111; int 0; byte 0x; replace3; byte 0x11111111; ==`, 7)
+ testAccepts(t, `byte 0x11111111; int 1; byte 0x; replace3; byte 0x11111111; ==`, 7)
+ testAccepts(t, `byte 0x11111111; int 2; byte 0x; replace3; byte 0x11111111; ==`, 7)
+ testAccepts(t, `byte 0x11111111; int 3; byte 0x; replace3; byte 0x11111111; ==`, 7)
+ // unusual, perhaps, but legal. inserts 0 bytes at len(A)
+ testAccepts(t, `byte 0x11111111; int 4; byte 0x; replace3; byte 0x11111111; ==`, 7)
+ // but can't replace a byte there
+ testPanics(t, `byte 0x11111111; int 4; byte 0x22; replace3; len`, 7)
+ // even a zero byte replace fails after len(A)
+ testPanics(t, `byte 0x11111111; int 5; byte 0x; replace3; len`, 7)
+
+ testAccepts(t, `byte 0x; byte 0x; replace2 0; byte 0x; ==`, 7)
+ testAccepts(t, `byte 0x; int 0; byte 0x; replace3; byte 0x; ==`, 7)
+}
+
func TestLoadStore(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -3564,9 +3610,89 @@ func BenchmarkCheckx5(b *testing.B) {
}
}
+func makeNestedKeys(depth int) string {
+ if depth <= 0 {
+ return `{\"key0\":\"value0\"}`
+ }
+ return fmt.Sprintf(`{\"key0\":%s}`, makeNestedKeys(depth-1))
+}
+
+func BenchmarkJsonRef(b *testing.B) {
+ // base case
+ oneKey := `{\"key0\":\"value0\"}`
+
+ // many keys
+ sb := &strings.Builder{}
+ sb.WriteString(`{`)
+ for i := 0; i < 100; i++ {
+ sb.WriteString(fmt.Sprintf(`\"key%d\":\"value%d\",`, i, i))
+ }
+ sb.WriteString(`\"key100\":\"value100\"}`) // so there is no trailing comma
+ manyKeys := sb.String()
+
+ lenOfManyKeys := len(manyKeys)
+ longTextLen := lenOfManyKeys - 36 // subtract the difference
+ mediumText := strings.Repeat("a", longTextLen/2)
+ longText := strings.Repeat("a", longTextLen)
+
+ // medium key
+ mediumKey := fmt.Sprintf(`{\"%s\":\"value\",\"key1\":\"value2\"}`, mediumText)
+
+ // long key
+ longKey := fmt.Sprintf(`{\"%s\":\"value\",\"key1\":\"value2\"}`, longText)
+
+ // long value
+ longValue := fmt.Sprintf(`{\"key0\":\"%s\",\"key1\":\"value2\"}`, longText)
+
+ // nested keys
+ nestedKeys := makeNestedKeys(200)
+
+ jsonLabels := []string{"one key", "many keys", "medium key", "long key", "long val", "nested keys"}
+ jsonSamples := []string{oneKey, manyKeys, mediumKey, longKey, longValue, nestedKeys}
+ keys := [][]string{
+ {"key0"},
+ {"key0", "key100"},
+ {mediumText, "key1"},
+ {longText, "key1"},
+ {"key0", "key1"},
+ {"key0"},
+ }
+ valueFmt := [][]string{
+ {"JSONString"},
+ {"JSONString", "JSONString"},
+ {"JSONString", "JSONString"},
+ {"JSONString", "JSONString"},
+ {"JSONString", "JSONString"},
+ {"JSONObject"},
+ }
+ benches := [][]string{}
+ for i, label := range jsonLabels {
+ for j, key := range keys[i] {
+ prog := fmt.Sprintf(`byte "%s"; byte "%s"; json_ref %s; pop;`, jsonSamples[i], key, valueFmt[i][j])
+
+ // indicate long key
+ keyLabel := key
+ if len(key) > 50 {
+ keyLabel = fmt.Sprintf("long_key_%d", len(key))
+ }
+
+ benches = append(benches, []string{
+ fmt.Sprintf("%s_%s", label, keyLabel), // label
+ "", // prefix
+ prog, // operation
+ "int 1", // suffix
+ })
+ }
+ }
+ for _, bench := range benches {
+ b.Run(bench[0], func(b *testing.B) {
+ benchmarkOperation(b, bench[1], bench[2], bench[3])
+ })
+ }
+}
+
func TestEvalVersions(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
text := `intcblock 1
@@ -4534,6 +4660,8 @@ func TestLog(t *testing.T) {
source string
runMode runMode
errContains string
+ // For cases where assembly errors, we manually put in the bytes
+ assembledBytes []byte
}{
{
source: fmt.Sprintf(`byte "%s"; log; int 1`, strings.Repeat("a", maxLogSize+1)),
@@ -4561,9 +4689,10 @@ func TestLog(t *testing.T) {
runMode: modeApp,
},
{
- source: `load 0; log`,
- errContains: "log arg 0 wanted []byte but got uint64",
- runMode: modeApp,
+ source: `load 0; log`,
+ errContains: "log arg 0 wanted []byte but got uint64",
+ runMode: modeApp,
+ assembledBytes: []byte{byte(ep.Proto.LogicSigVersion), 0x34, 0x00, 0xb0},
},
{
source: `byte "a logging message"; log; int 1`,
@@ -4575,7 +4704,11 @@ func TestLog(t *testing.T) {
for _, c := range failCases {
switch c.runMode {
case modeApp:
- testApp(t, c.source, ep, c.errContains)
+ if c.assembledBytes == nil {
+ testApp(t, c.source, ep, c.errContains)
+ } else {
+ testAppBytes(t, c.assembledBytes, ep, c.errContains)
+ }
default:
testLogic(t, c.source, AssemblerMaxVersion, ep, c.errContains, c.errContains)
}
@@ -4780,42 +4913,55 @@ int ` + fmt.Sprintf("%d", 20_000-3-6) + ` // base64_decode cost = 6 (68 bytes ->
testAccepts(t, source, fidoVersion)
}
-func TestHasDuplicateKeys(t *testing.T) {
+func TestIsPrimitive(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
testCases := []struct {
text []byte
}{
{
- text: []byte(`{"key0": "1","key0": "2", "key1":1}`),
+ text: []byte(`null`),
+ },
+ {
+ text: []byte(`[1, 2, 3]`),
},
{
- text: []byte(`{"key0": "1","key1": [1], "key0":{"key2": "a"}}`),
+ text: []byte(`2`),
},
}
for _, s := range testCases {
- hasDuplicates, _, err := hasDuplicateKeys(s.text)
+ isPrimitive, err := isPrimitiveJSON(s.text)
require.Nil(t, err)
- require.True(t, hasDuplicates)
+ require.True(t, isPrimitive)
}
- noDuplicates := []struct {
+ notPrimitive := []struct {
text []byte
}{
{
text: []byte(`{"key0": "1","key1": "2", "key2":3}`),
},
{
- text: []byte(`{"key0": "1","key1": [{"key0":1,"key0":2},{"key0":1,"key0":2}], "key2":{"key5": "a","key5": "b"}}`),
+ text: []byte(`{}`),
},
}
- for _, s := range noDuplicates {
- hasDuplicates, _, err := hasDuplicateKeys(s.text)
+ for _, s := range notPrimitive {
+ primitive, err := isPrimitiveJSON(s.text)
require.Nil(t, err)
- require.False(t, hasDuplicates)
+ require.False(t, primitive)
}
}
+func TestProtocolParseDuplicateErrMsg(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ text := `{"key0": "algo", "key0": "algo"}`
+ var parsed map[string]json.RawMessage
+ err := protocol.DecodeJSON([]byte(text), &parsed)
+ require.Contains(t, err.Error(), "cannot decode into a non-pointer value")
+ require.Error(t, err)
+}
+
func TestOpJSONRef(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -4953,6 +5099,33 @@ func TestOpJSONRef(t *testing.T) {
==`,
previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
},
+ // JavaScript MAX_SAFE_INTEGER
+ {
+ source: `byte "{\"maxSafeInt\": 9007199254740991}";
+ byte "maxSafeInt";
+ json_ref JSONUint64;
+ int 9007199254740991;
+ ==`,
+ previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ },
+ // maximum uint64
+ {
+ source: `byte "{\"maxUint64\": 18446744073709551615}";
+ byte "maxUint64";
+ json_ref JSONUint64;
+ int 18446744073709551615;
+ ==`,
+ previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ },
+ // larger-than-uint64s are allowed if not requested
+ {
+ source: `byte "{\"maxUint64\": 18446744073709551616, \"smallUint64\": 0}";
+ byte "smallUint64";
+ json_ref JSONUint64;
+ int 0;
+ ==`,
+ previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ },
}
for _, s := range testCases {
@@ -4978,6 +5151,9 @@ func TestOpJSONRef(t *testing.T) {
pass, _, err := EvalContract(ops.Program, 0, 888, ep)
require.NoError(t, err)
require.True(t, pass)
+
+ // reset pooled budget for new "app call"
+ *ep.PooledApplicationBudget = ep.Proto.MaxAppProgramCost
}
failedCases := []struct {
@@ -5096,11 +5272,11 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONObject;
byte "key4";
json_ref JSONObject;
- byte "key40"
+ byte "key40";
json_ref JSONString
`,
error: "error while parsing JSON text, invalid json text, duplicate keys not allowed",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}, {12, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}, {13, "unknown opcode: json_ref"}},
},
{
source: `byte "[1,2,3]";
@@ -5142,6 +5318,25 @@ func TestOpJSONRef(t *testing.T) {
error: "error while parsing JSON text, invalid json text, only json object is allowed",
previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
},
+ {
+ source: `byte "{noquotes: \"shouldn't work\"}";
+ byte "noquotes";
+ json_ref JSONString;
+ byte "shouldn't work";
+ ==`,
+ error: "error while parsing JSON text, invalid json text",
+ previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ },
+ // max uint64 + 1 should fail
+ {
+ source: `byte "{\"tooBig\": 18446744073709551616}";
+ byte "tooBig";
+ json_ref JSONUint64;
+ int 1;
+ return`,
+ error: "json: cannot unmarshal number 18446744073709551616 into Go value of type uint64",
+ previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ },
}
for _, s := range failedCases {
@@ -5170,6 +5365,9 @@ func TestOpJSONRef(t *testing.T) {
require.False(t, pass)
require.Error(t, err)
require.EqualError(t, err, s.error)
+
+ // reset pooled budget for new "app call"
+ *ep.PooledApplicationBudget = ep.Proto.MaxAppProgramCost
}
}
diff --git a/data/transactions/logic/jsonspec.md b/data/transactions/logic/jsonspec.md
index e747e40f5..817c01ece 100644
--- a/data/transactions/logic/jsonspec.md
+++ b/data/transactions/logic/jsonspec.md
@@ -12,7 +12,7 @@ Additional specifications used by **json_ref** that are extensions to the RFC715
- The byte order mark (BOM), "\uFEFF", is not allowed at the beginning of a JSON text
- Raw non-unicode characters not accepted
-## Invalid JSON text
+### Invalid JSON text
```json
\uFEFF{"key0": 1}
@@ -105,10 +105,6 @@ Comment blocks are not accepted.
```
```json
-{"key0": "algo"}/*comment*/
-```
-
-```json
{"key0": [1,/*comment*/,3]}
```
diff --git a/data/transactions/logic/jsonspec_test.go b/data/transactions/logic/jsonspec_test.go
index 7ab173a0f..3ebe131e8 100644
--- a/data/transactions/logic/jsonspec_test.go
+++ b/data/transactions/logic/jsonspec_test.go
@@ -58,9 +58,6 @@ func TestParseComments(t *testing.T) {
text := `{"key0": /*comment*/"algo"}`
_, err := parseJSON([]byte(text))
require.Error(t, err)
- text = `{"key0": "algo"}/*comment*/`
- _, err = parseJSON([]byte(text))
- require.Error(t, err)
text = `{"key0": [1,/*comment*/,3]}`
_, err = parseJSON([]byte(text))
require.Error(t, err)
@@ -210,7 +207,6 @@ func TestParseKeys(t *testing.T) {
text = `{1: 1}`
_, err = parseJSON([]byte(text))
require.Error(t, err)
-
}
func TestParseFileEncoding(t *testing.T) {
diff --git a/data/transactions/logic/langspec.json b/data/transactions/logic/langspec.json
index 1d978db8e..13df5e0e6 100644
--- a/data/transactions/logic/langspec.json
+++ b/data/transactions/logic/langspec.json
@@ -1240,6 +1240,29 @@
},
{
"Opcode": 92,
+ "Name": "replace2",
+ "Args": "BB",
+ "Returns": "B",
+ "Size": 2,
+ "Doc": "Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)",
+ "ImmediateNote": "{uint8 start position}",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 93,
+ "Name": "replace3",
+ "Args": "BUB",
+ "Returns": "B",
+ "Size": 1,
+ "Doc": "Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)",
+ "Groups": [
+ "Byte Array Manipulation"
+ ]
+ },
+ {
+ "Opcode": 94,
"Name": "base64_decode",
"Args": "B",
"Returns": "B",
@@ -1252,7 +1275,7 @@
]
},
{
- "Opcode": 93,
+ "Opcode": 95,
"Name": "json_ref",
"Args": "BB",
"Returns": ".",
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index 1d26d437a..af554fb22 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -62,9 +62,9 @@ const createdResourcesVersion = 6
// field.
const appAddressAvailableVersion = 7
-// EXPERIMENTAL. These should be revisited whenever a new LogiSigVersion is
+// EXPERIMENTAL. These should be revisited whenever a new LogicSigVersion is
// moved from vFuture to a new consensus version. If they remain unready, bump
-// their version.
+// their version, and fixup TestAssemble() in assembler_test.go.
const fidoVersion = 7 // base64, json, secp256r1
const pairingVersion = 7 // bn256 opcodes. will add bls12-381, and unify the available opcodes.// experimental-
@@ -72,6 +72,7 @@ type linearCost struct {
baseCost int
chunkCost int
chunkSize int
+ depth int
}
// divideCeilUnsafely provides `math.Ceil` semantics using integer division. The technique avoids slower floating point operations as suggested in https://stackoverflow.com/a/2745086.
@@ -84,22 +85,24 @@ func (lc *linearCost) compute(stack []stackValue) int {
cost := lc.baseCost
if lc.chunkCost != 0 && lc.chunkSize != 0 {
// Uses divideCeilUnsafely rather than (len/size) to match how Ethereum discretizes hashing costs.
- cost += divideCeilUnsafely(lc.chunkCost*len(stack[len(stack)-1].Bytes), lc.chunkSize)
+ cost += divideCeilUnsafely(lc.chunkCost*len(stack[len(stack)-1-lc.depth].Bytes), lc.chunkSize)
}
return cost
}
-func (lc *linearCost) docCost() string {
+func (lc *linearCost) docCost(argLen int) string {
if *lc == (linearCost{}) {
return ""
}
if lc.chunkCost == 0 {
return strconv.Itoa(lc.baseCost)
}
+ idxFromStart := argLen - lc.depth - 1
+ stackArg := rune(int('A') + idxFromStart)
if lc.chunkSize == 1 {
- return fmt.Sprintf("%d + %d per byte", lc.baseCost, lc.chunkCost)
+ return fmt.Sprintf("%d + %d per byte of %c", lc.baseCost, lc.chunkCost, stackArg)
}
- return fmt.Sprintf("%d + %d per %d bytes", lc.baseCost, lc.chunkCost, lc.chunkSize)
+ return fmt.Sprintf("%d + %d per %d bytes of %c", lc.baseCost, lc.chunkCost, lc.chunkSize, stackArg)
}
// OpDetails records details such as non-standard costs, immediate arguments, or
@@ -118,8 +121,8 @@ type OpDetails struct {
Immediates []immediate // details of each immediate arg to opcode
}
-func (d *OpDetails) docCost() string {
- cost := d.FullCost.docCost()
+func (d *OpDetails) docCost(argLen int) string {
+ cost := d.FullCost.docCost(argLen)
if cost != "" {
return cost
}
@@ -147,7 +150,7 @@ func (d *OpDetails) docCost() string {
// both static (the program, which can be used to find the immediate values
// supplied), and dynamic (the stack, which can be used to find the run-time
// arguments supplied). Cost is used at run-time. docCost returns similar
-// information in human-reable form.
+// information in human-readable form.
func (d *OpDetails) Cost(program []byte, pc int, stack []stackValue) int {
cost := d.FullCost.compute(stack)
if cost != 0 {
@@ -214,9 +217,9 @@ func (d OpDetails) only(m runMode) OpDetails {
return clone
}
-func (d OpDetails) costByLength(initial, perChunk, chunkSize int) OpDetails {
+func (d OpDetails) costByLength(initial, perChunk, chunkSize, depth int) OpDetails {
clone := d
- clone.FullCost = costByLength(initial, perChunk, chunkSize).FullCost
+ clone.FullCost = costByLength(initial, perChunk, chunkSize, depth).FullCost
return clone
}
@@ -263,12 +266,12 @@ func costByField(immediate string, group *FieldGroup, costs []int) OpDetails {
return opd
}
-func costByLength(initial int, perChunk int, chunkSize int) OpDetails {
+func costByLength(initial, perChunk, chunkSize, depth int) OpDetails {
if initial < 1 || perChunk <= 0 || chunkSize < 1 || chunkSize > maxStringSize {
panic("bad cost configuration")
}
d := opDefault()
- d.FullCost = linearCost{initial, perChunk, chunkSize}
+ d.FullCost = linearCost{initial, perChunk, chunkSize, depth}
return d
}
@@ -443,8 +446,8 @@ var OpSpecs = []OpSpec{
{0x32, "global", opGlobal, proto(":a"), 1, field("f", &GlobalFields)},
{0x33, "gtxn", opGtxn, proto(":a"), 1, immediates("t", "f").field("f", &TxnScalarFields)},
{0x33, "gtxn", opGtxn, proto(":a"), 2, immediates("t", "f").field("f", &TxnFields).assembler(asmGtxn2)},
- {0x34, "load", opLoad, proto(":a"), 1, immediates("i")},
- {0x35, "store", opStore, proto("a:"), 1, immediates("i")},
+ {0x34, "load", opLoad, proto(":a"), 1, stacky(typeLoad, "i")},
+ {0x35, "store", opStore, proto("a:"), 1, stacky(typeStore, "i")},
{0x36, "txna", opTxna, proto(":a"), 2, immediates("f", "i").field("f", &TxnArrayFields)},
{0x37, "gtxna", opGtxna, proto(":a"), 2, immediates("t", "f", "i").field("f", &TxnArrayFields)},
// Like gtxn, but gets txn index from stack, rather than immediate arg
@@ -458,8 +461,8 @@ var OpSpecs = []OpSpec{
{0x3d, "gaids", opGaids, proto("i:i"), 4, only(modeApp)},
// Like load/store, but scratch slot taken from TOS instead of immediate
- {0x3e, "loads", opLoads, proto("i:a"), 5, opDefault()},
- {0x3f, "stores", opStores, proto("ia:"), 5, opDefault()},
+ {0x3e, "loads", opLoads, proto("i:a"), 5, stacky(typeLoads)},
+ {0x3f, "stores", opStores, proto("ia:"), 5, stacky(typeStores)},
{0x40, "bnz", opBnz, proto("i:"), 1, opBranch()},
{0x41, "bz", opBz, proto("i:"), 2, opBranch()},
@@ -490,8 +493,11 @@ var OpSpecs = []OpSpec{
{0x59, "extract_uint16", opExtract16Bits, proto("bi:i"), 5, opDefault()},
{0x5a, "extract_uint32", opExtract32Bits, proto("bi:i"), 5, opDefault()},
{0x5b, "extract_uint64", opExtract64Bits, proto("bi:i"), 5, opDefault()},
- {0x5c, "base64_decode", opBase64Decode, proto("b:b"), fidoVersion, field("e", &Base64Encodings).costByLength(1, 1, 16)},
- {0x5d, "json_ref", opJSONRef, proto("bb:a"), fidoVersion, field("r", &JSONRefTypes)},
+ {0x5c, "replace2", opReplace2, proto("bb:b"), 7, immediates("s")},
+ {0x5d, "replace3", opReplace3, proto("bib:b"), 7, opDefault()},
+
+ {0x5e, "base64_decode", opBase64Decode, proto("b:b"), fidoVersion, field("e", &Base64Encodings).costByLength(1, 1, 16, 0)},
+ {0x5f, "json_ref", opJSONRef, proto("bb:a"), fidoVersion, field("r", &JSONRefTypes).costByLength(25, 2, 7, 1)},
{0x60, "balance", opBalance, proto("i:i"), 2, only(modeApp)},
{0x60, "balance", opBalance, proto("a:i"), directRefEnabledVersion, only(modeApp)},
diff --git a/data/transactions/logic/pairing_test.go b/data/transactions/logic/pairing_test.go
new file mode 100644
index 000000000..75f6e2bc2
--- /dev/null
+++ b/data/transactions/logic/pairing_test.go
@@ -0,0 +1,29 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+const pairingNonsense = `
+ pushbytes 0x012345
+ dup
+ bn256_add
+ dup
+ bn256_scalar_mul
+ dup
+ bn256_pairing
+`
+
+const pairingCompiled = "80030123454999499a499b"
diff --git a/data/transactions/logic/teal.tmLanguage.json b/data/transactions/logic/teal.tmLanguage.json
index 5219dbcd4..31edc4418 100644
--- a/data/transactions/logic/teal.tmLanguage.json
+++ b/data/transactions/logic/teal.tmLanguage.json
@@ -76,7 +76,7 @@
},
{
"name": "keyword.operator.teal",
- "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|bn256_add|bn256_pairing|bn256_scalar_mul|btoi|concat|divmodw|divw|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|exp|expw|getbit|getbyte|itob|keccak256|len|mulw|setbit|setbyte|sha256|sha3_256|sha512_256|shl|shr|sqrt|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|extract|extract3|extract_uint16|extract_uint32|extract_uint64|json_ref|substring|substring3|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b"
+ "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|bn256_add|bn256_pairing|bn256_scalar_mul|btoi|concat|divmodw|divw|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|exp|expw|getbit|getbyte|itob|keccak256|len|mulw|setbit|setbyte|sha256|sha3_256|sha512_256|shl|shr|sqrt|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|extract|extract3|extract_uint16|extract_uint32|extract_uint64|json_ref|replace2|replace3|substring|substring3|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b"
}
]
},
diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go
index 5c67f64dd..1c63c0c8a 100644
--- a/data/transactions/transaction.go
+++ b/data/transactions/transaction.go
@@ -726,9 +726,9 @@ func ProgramVersion(bytecode []byte) (version uint64, length int, err error) {
// matching versions between approval and clearstate.
const syncProgramsVersion = 6
-// CheckContractVersions ensures that for v6 and higher two programs are version
-// matched, and that they are not a downgrade. If proto.AllowV4InnerAppls, then
-// no downgrades are allowed, regardless of version.
+// CheckContractVersions ensures that for syncProgramsVersion and higher, two programs are version
+// matched, and that they are not a downgrade. If either program version is
+// >= proto.MinInnerApplVersion, downgrade of that program is not allowed.
func CheckContractVersions(approval []byte, clear []byte, previous basics.AppParams, proto *config.ConsensusParams) error {
av, _, err := ProgramVersion(approval)
if err != nil {
diff --git a/ledger/.gitignore b/ledger/.gitignore
new file mode 100644
index 000000000..5def1ed41
--- /dev/null
+++ b/ledger/.gitignore
@@ -0,0 +1 @@
+catchpoints
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index b771d1532..ca4432b6a 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -237,7 +237,7 @@ func TestAccountDBRound(t *testing.T) {
numElementsPerSegment := 10
// lastCreatableID stores asset or app max used index to get rid of conflicts
- lastCreatableID := crypto.RandUint64() % 512
+ lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512)
ctbsList, randomCtbs := randomCreatables(numElementsPerSegment)
expectedDbImage := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
var baseAccounts lruAccounts
@@ -247,7 +247,7 @@ func TestAccountDBRound(t *testing.T) {
baseResources.init(nil, 100, 80)
for i := 1; i < 10; i++ {
var updates ledgercore.AccountDeltas
- updates, newacctsTotals, _, lastCreatableID = ledgertesting.RandomDeltasFull(20, accts, 0, lastCreatableID)
+ updates, newacctsTotals, _ = ledgertesting.RandomDeltasFull(20, accts, 0, &lastCreatableID)
totals = ledgertesting.CalculateNewRoundAccountTotals(t, updates, 0, proto, accts, totals)
accts = applyPartialDeltas(accts, updates)
ctbsWithDeletes := randomCreatableSampling(i, ctbsList, randomCtbs,
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index d2faf6ec7..d597dfdbc 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -441,7 +441,7 @@ func TestAcctUpdates(t *testing.T) {
checkAcctUpdates(t, au, 0, 9, accts, rewardsLevels, proto)
// lastCreatableID stores asset or app max used index to get rid of conflicts
- lastCreatableID := crypto.RandUint64() % 512
+ lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512)
knownCreatables := make(map[basics.CreatableIndex]bool)
start := basics.Round(10)
@@ -452,7 +452,8 @@ func TestAcctUpdates(t *testing.T) {
var updates ledgercore.AccountDeltas
var totals map[basics.Address]ledgercore.AccountData
base := accts[i-1]
- updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
+ updates, totals = ledgertesting.RandomDeltasBalancedFull(
+ 1, base, rewardLevel, &lastCreatableID)
prevRound, prevTotals, err := au.LatestTotals()
require.Equal(t, i-1, prevRound)
require.NoError(t, err)
@@ -2221,7 +2222,7 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates,
checkAcctUpdates(t, au, 0, 9, accts, rewardsLevels, proto)
// lastCreatableID stores asset or app max used index to get rid of conflicts
- lastCreatableID := crypto.RandUint64() % 512
+ lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512)
knownCreatables := make(map[basics.CreatableIndex]bool)
for i := basics.Round(10); i < basics.Round(proto.MaxBalLookback+15); i++ {
@@ -2230,7 +2231,8 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates,
var updates ledgercore.AccountDeltas
var totals map[basics.Address]ledgercore.AccountData
base := accts[i-1]
- updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
+ updates, totals = ledgertesting.RandomDeltasBalancedFull(
+ 1, base, rewardLevel, &lastCreatableID)
prevRound, prevTotals, err := au.LatestTotals()
require.Equal(t, i-1, prevRound)
require.NoError(t, err)
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
index eb812937f..025c7ebfe 100644
--- a/ledger/catchpointtracker_test.go
+++ b/ledger/catchpointtracker_test.go
@@ -433,7 +433,7 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
const testCatchpointLabelsCount = 5
// lastCreatableID stores asset or app max used index to get rid of conflicts
- lastCreatableID := crypto.RandUint64() % 512
+ lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512)
knownCreatables := make(map[basics.CreatableIndex]bool)
catchpointLabels := make(map[basics.Round]string)
ledgerHistory := make(map[basics.Round]*mockLedgerForTracker)
@@ -444,7 +444,7 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
var updates ledgercore.AccountDeltas
var totals map[basics.Address]ledgercore.AccountData
base := accts[i-1]
- updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID)
+ updates, totals = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, &lastCreatableID)
prevRound, prevTotals, err := au.LatestTotals()
require.Equal(t, i-1, prevRound)
require.NoError(t, err)
diff --git a/ledger/internal/apptxn_test.go b/ledger/internal/apptxn_test.go
index 9194055c1..94f72b0e2 100644
--- a/ledger/internal/apptxn_test.go
+++ b/ledger/internal/apptxn_test.go
@@ -1890,7 +1890,7 @@ func TestInnerAppVersionCalling(t *testing.T) {
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // 31 allowed inner appls. vFuture enables proto.AllowV4InnerAppls (presumed v33, below)
+ // 31 allowed inner appls. v33 lowered proto.MinInnerApplVersion
testConsensusRange(t, 31, 0, func(t *testing.T, ver int) {
dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver])
defer dl.Close()
@@ -1994,7 +1994,7 @@ itxn_submit`,
createAndOptin.ApplicationArgs = [][]byte{six.Program, six.Program}
dl.txn(&createAndOptin, "overspend") // passed the checks, but is an overspend
} else {
- // after 32 proto.AllowV4InnerAppls should be in effect, so calls and optins to v5 are ok
+ // after 32 proto.MinInnerApplVersion is lowered to 4, so calls and optins to v5 are ok
dl.txn(&call, "overspend") // it tried to execute, but test doesn't bother funding
dl.txn(&optin, "overspend") // it tried to execute, but test doesn't bother funding
optin.ForeignApps[0] = v5withv3csp // but we can't optin to a v5 if it has an old csp
@@ -2150,7 +2150,7 @@ func TestAppDowngrade(t *testing.T) {
update.ClearStateProgram = five.Program
dl.fullBlock(&update)
- // Downgrade (allowed for pre 6 programs until AllowV4InnerAppls)
+ // Downgrade (allowed for pre 6 programs until MinInnerApplVersion was lowered)
update.ClearStateProgram = four.Program
if ver <= 32 {
dl.fullBlock(update.Noted("actually a repeat of first upgrade"))
diff --git a/ledger/internal/eval_blackbox_test.go b/ledger/internal/eval_blackbox_test.go
index 79d22d189..21240467c 100644
--- a/ledger/internal/eval_blackbox_test.go
+++ b/ledger/internal/eval_blackbox_test.go
@@ -882,7 +882,10 @@ var consensusByNumber = []protocol.ConsensusVersion{
protocol.ConsensusFuture,
}
-func TestContainsLatestVersion(t *testing.T) {
+// TestReleasedVersion ensures that the necessary tidying is done when a new
+// protocol release happens. The new version must be added to
+// consensusByNumber, and a new LogicSigVersion must be added to vFuture.
+func TestReleasedVersion(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -891,6 +894,13 @@ func TestContainsLatestVersion(t *testing.T) {
require.Len(t, config.Consensus[consensusByNumber[len(consensusByNumber)-2]].ApprovedUpgrades, 0)
// And no funny business with vFuture
require.Equal(t, protocol.ConsensusFuture, consensusByNumber[len(consensusByNumber)-1])
+
+ // Ensure that vFuture gets a new LogicSigVersion when we promote the
+ // existing one. That allows TestExperimental in the logic package to
+ // prevent unintended releases of experimental opcodes.
+ relV := config.Consensus[consensusByNumber[len(consensusByNumber)-2]].LogicSigVersion
+ futureV := config.Consensus[protocol.ConsensusFuture].LogicSigVersion
+ require.Equal(t, relV+1, futureV)
}
// testConsensusRange allows for running tests against a range of consensus
diff --git a/ledger/ledgercore/error.go b/ledger/ledgercore/error.go
index 60ec1a6aa..78da7526d 100644
--- a/ledger/ledgercore/error.go
+++ b/ledger/ledgercore/error.go
@@ -37,7 +37,7 @@ func (tile TransactionInLedgerError) Error() string {
return fmt.Sprintf("transaction already in ledger: %v", tile.Txid)
}
-// LeaseInLedgerError is returned when a transaction cannot be added because it has a lease that already being used in the relavant rounds
+// LeaseInLedgerError is returned when a transaction cannot be added because it has a lease that already being used in the relevant rounds
type LeaseInLedgerError struct {
txid transactions.Txid
lease Txlease
diff --git a/ledger/testing/randomAccounts.go b/ledger/testing/randomAccounts.go
index 6cbc7cbce..860031519 100644
--- a/ledger/testing/randomAccounts.go
+++ b/ledger/testing/randomAccounts.go
@@ -187,7 +187,7 @@ func RandomAppLocalState() basics.AppLocalState {
}
// RandomFullAccountData generates a random AccountData
-func RandomFullAccountData(rewardsLevel uint64, knownCreatables map[basics.CreatableIndex]basics.CreatableType, lastCreatableID uint64) (basics.AccountData, map[basics.CreatableIndex]basics.CreatableType, uint64) {
+func RandomFullAccountData(rewardsLevel uint64, lastCreatableID *basics.CreatableIndex, assets map[basics.AssetIndex]struct{}, apps map[basics.AppIndex]struct{}) basics.AccountData {
data := RandomAccountData(rewardsLevel)
crypto.RandBytes(data.VoteID[:])
@@ -202,28 +202,26 @@ func RandomFullAccountData(rewardsLevel uint64, knownCreatables map[basics.Creat
data.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, createdAssetsCount)
for i := uint64(0); i < createdAssetsCount; i++ {
ap := RandomAssetParams()
- lastCreatableID++
- data.AssetParams[basics.AssetIndex(lastCreatableID)] = ap
- knownCreatables[basics.CreatableIndex(lastCreatableID)] = basics.AssetCreatable
+ *lastCreatableID++
+ data.AssetParams[basics.AssetIndex(*lastCreatableID)] = ap
+ assets[basics.AssetIndex(*lastCreatableID)] = struct{}{}
}
}
- if (crypto.RandUint64()%2) == 1 && lastCreatableID > 0 {
+ if (crypto.RandUint64()%2 == 1) && (len(assets) > 0) {
// if account owns assets
ownedAssetsCount := crypto.RandUint64()%20 + 1
data.Assets = make(map[basics.AssetIndex]basics.AssetHolding, ownedAssetsCount)
for i := uint64(0); i < ownedAssetsCount; i++ {
ah := RandomAssetHolding(false)
- aidx := crypto.RandUint64() % lastCreatableID
+ var aidx basics.AssetIndex
for {
- ctype, ok := knownCreatables[basics.CreatableIndex(aidx)]
- if !ok || ctype == basics.AssetCreatable {
+ aidx = basics.AssetIndex(crypto.RandUint64()%uint64(*lastCreatableID) + 1)
+ if _, ok := assets[aidx]; ok {
break
}
- aidx = crypto.RandUint64() % lastCreatableID
}
- data.Assets[basics.AssetIndex(aidx)] = ah
- knownCreatables[basics.CreatableIndex(aidx)] = basics.AssetCreatable
+ data.Assets[aidx] = ah
}
}
if (crypto.RandUint64() % 5) == 1 {
@@ -235,26 +233,24 @@ func RandomFullAccountData(rewardsLevel uint64, knownCreatables map[basics.Creat
data.AppParams = make(map[basics.AppIndex]basics.AppParams, appParamsCount)
for i := uint64(0); i < appParamsCount; i++ {
ap := RandomAppParams()
- lastCreatableID++
- data.AppParams[basics.AppIndex(lastCreatableID)] = ap
- knownCreatables[basics.CreatableIndex(lastCreatableID)] = basics.AppCreatable
+ *lastCreatableID++
+ data.AppParams[basics.AppIndex(*lastCreatableID)] = ap
+ apps[basics.AppIndex(*lastCreatableID)] = struct{}{}
}
}
- if (crypto.RandUint64()%3) == 1 && lastCreatableID > 0 {
+ if (crypto.RandUint64()%3 == 1) && (len(apps) > 0) {
appStatesCount := crypto.RandUint64()%20 + 1
data.AppLocalStates = make(map[basics.AppIndex]basics.AppLocalState, appStatesCount)
for i := uint64(0); i < appStatesCount; i++ {
ap := RandomAppLocalState()
- aidx := crypto.RandUint64() % lastCreatableID
+ var aidx basics.AppIndex
for {
- ctype, ok := knownCreatables[basics.CreatableIndex(aidx)]
- if !ok || ctype == basics.AppCreatable {
+ aidx = basics.AppIndex(crypto.RandUint64()%uint64(*lastCreatableID) + 1)
+ if _, ok := apps[aidx]; ok {
break
}
- aidx = crypto.RandUint64() % lastCreatableID
}
data.AppLocalStates[basics.AppIndex(aidx)] = ap
- knownCreatables[basics.CreatableIndex(aidx)] = basics.AppCreatable
}
}
@@ -264,7 +260,8 @@ func RandomFullAccountData(rewardsLevel uint64, knownCreatables map[basics.Creat
NumByteSlice: crypto.RandUint64() % 50,
}
}
- return data, knownCreatables, lastCreatableID
+
+ return data
}
// RandomAccounts generates a random set of accounts map
@@ -275,10 +272,11 @@ func RandomAccounts(niter int, simpleAccounts bool) map[basics.Address]basics.Ac
res[RandomAddress()] = RandomAccountData(0)
}
} else {
- lastCreatableID := crypto.RandUint64() % 512
- knownCreatables := make(map[basics.CreatableIndex]basics.CreatableType)
+ lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512)
+ assets := make(map[basics.AssetIndex]struct{})
+ apps := make(map[basics.AppIndex]struct{})
for i := 0; i < niter; i++ {
- res[RandomAddress()], knownCreatables, lastCreatableID = RandomFullAccountData(0, knownCreatables, lastCreatableID)
+ res[RandomAddress()] = RandomFullAccountData(0, &lastCreatableID, assets, apps)
}
}
return res
@@ -286,18 +284,20 @@ func RandomAccounts(niter int, simpleAccounts bool) map[basics.Address]basics.Ac
// RandomDeltas generates a random set of accounts delta
func RandomDeltas(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64) {
- updates, totals, imbalance, _ = RandomDeltasImpl(niter, base, rewardsLevel, true, 0)
+ var lastCreatableID basics.CreatableIndex
+ updates, totals, imbalance =
+ RandomDeltasImpl(niter, base, rewardsLevel, true, &lastCreatableID)
return
}
// RandomDeltasFull generates a random set of accounts delta
-func RandomDeltasFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64, lastCreatableID uint64) {
- updates, totals, imbalance, lastCreatableID = RandomDeltasImpl(niter, base, rewardsLevel, false, lastCreatableIDIn)
+func RandomDeltasFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableID *basics.CreatableIndex) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64) {
+ updates, totals, imbalance = RandomDeltasImpl(niter, base, rewardsLevel, false, lastCreatableID)
return
}
// RandomDeltasImpl generates a random set of accounts delta
-func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64, lastCreatableID uint64) {
+func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableID *basics.CreatableIndex) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
totals = make(map[basics.Address]ledgercore.AccountData)
@@ -309,30 +309,21 @@ func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew
}
// if making a full delta then need to determine max asset/app id to get rid of conflicts
- lastCreatableID = lastCreatableIDIn
- knownCreatables := make(map[basics.CreatableIndex]basics.CreatableType)
+ assets := make(map[basics.AssetIndex]struct{})
+ apps := make(map[basics.AppIndex]struct{})
if !simple {
for _, ad := range base {
for aid := range ad.AssetParams {
- if uint64(aid) > lastCreatableID {
- lastCreatableID = uint64(aid)
- }
- knownCreatables[basics.CreatableIndex(aid)] = basics.AssetCreatable
+ assets[aid] = struct{}{}
}
for aid := range ad.Assets {
- // do not check lastCreatableID since lastCreatableID is only incremented for new params
- knownCreatables[basics.CreatableIndex(aid)] = basics.AssetCreatable
+ assets[aid] = struct{}{}
}
-
for aid := range ad.AppParams {
- if uint64(aid) > lastCreatableID {
- lastCreatableID = uint64(aid)
- }
- knownCreatables[basics.CreatableIndex(aid)] = basics.AppCreatable
+ apps[aid] = struct{}{}
}
for aid := range ad.AppLocalStates {
- // do not check lastCreatableID since lastCreatableID is only incremented for new params
- knownCreatables[basics.CreatableIndex(aid)] = basics.AppCreatable
+ apps[aid] = struct{}{}
}
}
}
@@ -357,7 +348,7 @@ func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew
new = ledgercore.ToAccountData(data)
updates.Upsert(addr, new)
} else {
- data, knownCreatables, lastCreatableID = RandomFullAccountData(rewardsLevel, knownCreatables, lastCreatableID)
+ data = RandomFullAccountData(rewardsLevel, lastCreatableID, assets, apps)
new = ledgercore.ToAccountData(data)
updates.Upsert(addr, new)
appResources := make(map[basics.AppIndex]ledgercore.AppResourceRecord)
@@ -442,7 +433,7 @@ func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew
new = ledgercore.ToAccountData(data)
updates.Upsert(addr, new)
} else {
- data, knownCreatables, lastCreatableID = RandomFullAccountData(rewardsLevel, knownCreatables, lastCreatableID)
+ data = RandomFullAccountData(rewardsLevel, lastCreatableID, assets, apps)
new = ledgercore.ToAccountData(data)
updates.Upsert(addr, new)
appResources := make(map[basics.AppIndex]ledgercore.AppResourceRecord)
@@ -489,23 +480,26 @@ func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew
// RandomDeltasBalanced generates a random set of accounts delta
func RandomDeltasBalanced(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData) {
- updates, totals, _ = RandomDeltasBalancedImpl(niter, base, rewardsLevel, true, 0)
+ var lastCreatableID basics.CreatableIndex
+ updates, totals = RandomDeltasBalancedImpl(
+ niter, base, rewardsLevel, true, &lastCreatableID)
return
}
// RandomDeltasBalancedFull generates a random set of accounts delta
-func RandomDeltasBalancedFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, lastCreatableID uint64) {
- updates, totals, lastCreatableID = RandomDeltasBalancedImpl(niter, base, rewardsLevel, false, lastCreatableIDIn)
+func RandomDeltasBalancedFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableID *basics.CreatableIndex) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData) {
+ updates, totals = RandomDeltasBalancedImpl(niter, base, rewardsLevel, false, lastCreatableID)
return
}
// RandomDeltasBalancedImpl generates a random set of accounts delta
-func RandomDeltasBalancedImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, lastCreatableID uint64) {
+func RandomDeltasBalancedImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableID *basics.CreatableIndex) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData) {
var imbalance int64
if simple {
updates, totals, imbalance = RandomDeltas(niter, base, rewardsLevel)
} else {
- updates, totals, imbalance, lastCreatableID = RandomDeltasFull(niter, base, rewardsLevel, lastCreatableIDIn)
+ updates, totals, imbalance =
+ RandomDeltasFull(niter, base, rewardsLevel, lastCreatableID)
}
oldPool := base[testPoolAddr]
@@ -516,5 +510,5 @@ func RandomDeltasBalancedImpl(niter int, base map[basics.Address]basics.AccountD
updates.Upsert(testPoolAddr, newPool)
totals[testPoolAddr] = newPool
- return updates, totals, lastCreatableID
+ return updates, totals
}
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index f766363f2..ea9954528 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -18,10 +18,12 @@ package libgoal
import (
"encoding/json"
+ "errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
+ "time"
algodclient "github.com/algorand/go-algorand/daemon/algod/api/client"
v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
@@ -938,6 +940,30 @@ func (c *Client) GetPendingTransactionsByAddress(addr string, maxTxns uint64) (r
return
}
+// VerifyParticipationKey checks if a given participationID is installed in a loop until timeout has elapsed.
+func (c *Client) VerifyParticipationKey(timeout time.Duration, participationID string) error {
+ start := time.Now()
+
+ for {
+ keysResp, err := c.GetParticipationKeys()
+ if err != nil {
+ return err
+ }
+ for _, key := range keysResp {
+ if key.Id == participationID {
+ // Installation successful.
+ return nil
+ }
+ }
+
+ if time.Since(start) > timeout {
+ return errors.New("timeout waiting for key to appear")
+ }
+
+ time.Sleep(1 * time.Second)
+ }
+}
+
// AddParticipationKey takes a participation key file and sends it to the node.
// The key will be loaded into the system when the function returns successfully.
func (c *Client) AddParticipationKey(keyfile string) (resp generated.PostParticipationResponse, err error) {
diff --git a/libgoal/participation.go b/libgoal/participation.go
index 2dbbbde98..88a1151a7 100644
--- a/libgoal/participation.go
+++ b/libgoal/participation.go
@@ -18,7 +18,6 @@ package libgoal
import (
"fmt"
- "io/ioutil"
"math"
"os"
"path/filepath"
@@ -27,76 +26,34 @@ import (
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
)
// chooseParticipation chooses which participation keys to use for going online
// based on the address, round number, and available participation databases
-func (c *Client) chooseParticipation(address basics.Address, round basics.Round) (part account.Participation, err error) {
- genID, err := c.GenesisID()
+func (c *Client) chooseParticipation(address basics.Address, round basics.Round) (part generated.ParticipationKey, err error) {
+ parts, err := c.ListParticipationKeys()
if err != nil {
return
}
- // Get a list of files in the participation keys directory
- keyDir := filepath.Join(c.DataDir(), genID)
- files, err := ioutil.ReadDir(keyDir)
- if err != nil {
- return
- }
- // This lambda will be used for finding the desired file.
- checkIfFileIsDesiredKey := func(file os.FileInfo, expiresAfter basics.Round) (part account.Participation, err error) {
- var handle db.Accessor
- var partCandidate account.PersistedParticipation
-
- // If it can't be a participation key database, skip it
- if !config.IsPartKeyFilename(file.Name()) {
- return
- }
-
- filename := file.Name()
-
- // Fetch a handle to this database
- handle, err = db.MakeErasableAccessor(filepath.Join(keyDir, filename))
- if err != nil {
- // Couldn't open it, skip it
- return
- }
-
- // Fetch an account.Participation from the database
- partCandidate, err = account.RestoreParticipation(handle)
- if err != nil {
- // Couldn't read it, skip it
- handle.Close()
- return
- }
- defer partCandidate.Close()
-
- // Return the Participation valid for this round that relates to the passed address
+ // Loop through each of the participation keys; pick the one that expires farthest in the future.
+ var expiry uint64 = 0
+ for _, info := range parts {
+ // Choose the Participation valid for this round that relates to the passed address
// that expires farthest in the future.
// Note that algod will sign votes with all possible Participations. so any should work
// in the short-term.
// In the future we should allow the user to specify exactly which partkeys to register.
- if partCandidate.FirstValid <= round && round <= partCandidate.LastValid && partCandidate.Parent == address && partCandidate.LastValid > expiresAfter {
- part = partCandidate.Participation
+ if info.Key.VoteFirstValid <= uint64(round) && uint64(round) <= info.Key.VoteLastValid && info.Address == address.String() && info.Key.VoteLastValid > expiry {
+ part = info
+ expiry = part.Key.VoteLastValid
}
- return
- }
- // Loop through each of the files; pick the one that expires farthest in the future.
- var expiry basics.Round
- for _, info := range files {
- // Use above lambda so the deferred handle closure happens each loop
- partCandidate, err := checkIfFileIsDesiredKey(info, expiry)
- if err == nil && (!partCandidate.Parent.IsZero()) {
- part = partCandidate
- expiry = part.LastValid
- }
}
- if part.Parent.IsZero() {
+ if part.Address == "" {
// Couldn't find one
- err = fmt.Errorf("Couldn't find a participation key database for address %v valid at round %v in directory %v", address.GetUserAddress(), round, keyDir)
+ err = fmt.Errorf("couldn't find a participation key database for address %v valid at round %v in participation registry", address.GetUserAddress(), round)
return
}
return
@@ -117,8 +74,12 @@ func (c *Client) GenParticipationKeys(address string, firstValid, lastValid, key
}
// GenParticipationKeysTo creates a .partkey database for a given address, fills
-// it with keys, and saves it in the specified output directory.
+// it with keys, and saves it in the specified output directory. If the output
+// directory is empty, the key will be installed.
func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, keyDilution uint64, outDir string) (part account.Participation, filePath string, err error) {
+
+ install := outDir == ""
+
// Parse the address
parsedAddr, err := basics.UnmarshalChecksumAddress(address)
if err != nil {
@@ -127,16 +88,9 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k
firstRound, lastRound := basics.Round(firstValid), basics.Round(lastValid)
- // If output directory wasn't specified, store it in the current ledger directory.
- if outDir == "" {
- // Get the GenesisID for use in the participation key path
- var genID string
- genID, err = c.GenesisID()
- if err != nil {
- return
- }
-
- outDir = filepath.Join(c.DataDir(), genID)
+ // If we are installing, generate in the temp dir
+ if install {
+ outDir = os.TempDir()
}
// Connect to the database
partKeyPath, err := participationKeysPath(outDir, parsedAddr, firstRound, lastRound)
@@ -152,6 +106,14 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k
return
}
+ // If the key is being installed, remove it afterwards.
+ if install {
+ // Explicitly ignore any errors
+ defer func(name string) {
+ _ = os.Remove(name)
+ }(partKeyPath)
+ }
+
partdb, err := db.MakeErasableAccessor(partKeyPath)
if err != nil {
return
@@ -165,79 +127,15 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k
newPart, err := account.FillDBWithParticipationKeys(partdb, parsedAddr, firstRound, lastRound, keyDilution)
part = newPart.Participation
partdb.Close()
- return part, partKeyPath, err
-}
-
-// InstallParticipationKeys creates a .partkey database for a given address,
-// based on an existing database from inputfile. On successful install, it
-// deletes the input file.
-func (c *Client) InstallParticipationKeys(inputfile string) (part account.Participation, filePath string, err error) {
- proto, ok := c.consensus[protocol.ConsensusCurrentVersion]
- if !ok {
- err = fmt.Errorf("Unknown consensus protocol %s", protocol.ConsensusCurrentVersion)
- return
- }
-
- // Get the GenesisID for use in the participation key path
- var genID string
- genID, err = c.GenesisID()
- if err != nil {
- return
- }
-
- outDir := filepath.Join(c.DataDir(), genID)
-
- inputdb, err := db.MakeErasableAccessor(inputfile)
- if err != nil {
- return
- }
- defer inputdb.Close()
-
- partkey, err := account.RestoreParticipationWithSecrets(inputdb)
- if err != nil {
- return
- }
-
- if partkey.Parent == (basics.Address{}) {
- err = fmt.Errorf("Cannot install partkey with missing (zero) parent address")
- return
- }
-
- newdbpath, err := participationKeysPath(outDir, partkey.Parent, partkey.FirstValid, partkey.LastValid)
- if err != nil {
- return
- }
- newdb, err := db.MakeErasableAccessor(newdbpath)
if err != nil {
return
}
- newpartkey := partkey
- newpartkey.Store = newdb
- err = newpartkey.PersistWithSecrets()
- if err != nil {
- newpartkey.Close()
- return
+ if install {
+ _, err = c.AddParticipationKey(partKeyPath)
}
-
- // After successful install, remove the input copy of the
- // partkey so that old keys cannot be recovered after they
- // are used by algod. We try to delete the data inside
- // sqlite first, so the key material is zeroed out from
- // disk blocks, but regardless of whether that works, we
- // delete the input file. The consensus protocol version
- // is irrelevant for the maxuint64 round number we pass in.
- errCh := partkey.DeleteOldKeys(basics.Round(math.MaxUint64), proto)
- err = <-errCh
- if err != nil {
- newpartkey.Close()
- return
- }
- os.Remove(inputfile)
- part = newpartkey.Participation
- newpartkey.Close()
- return part, newdbpath, nil
+ return part, partKeyPath, err
}
// ListParticipationKeys returns the available participation keys,
@@ -249,49 +147,3 @@ func (c *Client) ListParticipationKeys() (partKeyFiles generated.ParticipationKe
}
return
}
-
-// ListParticipationKeyFiles returns the available participation keys,
-// as a map from database filename to Participation key object.
-func (c *Client) ListParticipationKeyFiles() (partKeyFiles map[string]account.Participation, err error) {
- genID, err := c.GenesisID()
- if err != nil {
- return
- }
-
- // Get a list of files in the participation keys directory
- keyDir := filepath.Join(c.DataDir(), genID)
- files, err := ioutil.ReadDir(keyDir)
- if err != nil {
- return
- }
-
- partKeyFiles = make(map[string]account.Participation)
- for _, file := range files {
- // If it can't be a participation key database, skip it
- if !config.IsPartKeyFilename(file.Name()) {
- continue
- }
-
- filename := file.Name()
-
- // Fetch a handle to this database
- handle, err := db.MakeErasableAccessor(filepath.Join(keyDir, filename))
- if err != nil {
- // Couldn't open it, skip it
- continue
- }
-
- // Fetch an account.Participation from the database
- part, err := account.RestoreParticipation(handle)
- if err != nil {
- // Couldn't read it, skip it
- handle.Close()
- continue
- }
-
- partKeyFiles[filename] = part.Participation
- part.Close()
- }
-
- return
-}
diff --git a/libgoal/transactions.go b/libgoal/transactions.go
index bf704cc9e..a03a9d551 100644
--- a/libgoal/transactions.go
+++ b/libgoal/transactions.go
@@ -20,7 +20,10 @@ import (
"errors"
"fmt"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
@@ -191,8 +194,98 @@ func (c *Client) SignAndBroadcastTransaction(walletHandle, pw []byte, utx transa
return c.BroadcastTransaction(stx)
}
+// generateRegistrationTransaction returns a transaction object for registering a Participation with its parent this is
+// similar to account.Participation.GenerateRegistrationTransaction.
+func generateRegistrationTransaction(part generated.ParticipationKey, fee basics.MicroAlgos, txnFirstValid, txnLastValid basics.Round, leaseBytes [32]byte) (transactions.Transaction, error) {
+ addr, err := basics.UnmarshalChecksumAddress(part.Address)
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
+
+ if len(part.Key.VoteParticipationKey) != 32 {
+ return transactions.Transaction{}, fmt.Errorf("voting key is the wrong size, should be 32 but it is %d", len(part.Key.VoteParticipationKey))
+ }
+
+ var votePk [32]byte
+ copy(votePk[:], part.Key.VoteParticipationKey[:])
+
+ if len(part.Key.SelectionParticipationKey) != 32 {
+ return transactions.Transaction{}, fmt.Errorf("selection key is the wrong size, should be 32 but it is %d", len(part.Key.VoteParticipationKey))
+ }
+
+ var selectionPk [32]byte
+ copy(selectionPk[:], part.Key.SelectionParticipationKey[:])
+
+ if part.Key.StateProofKey == nil {
+ return transactions.Transaction{}, fmt.Errorf("state proof key pointer is nil")
+ }
+
+ if len(*part.Key.StateProofKey) != len(merklesignature.Verifier{}) {
+ return transactions.Transaction{}, fmt.Errorf("state proof key is the wrong size, should be %d but it is %d", len(merklesignature.Verifier{}), len(*part.Key.StateProofKey))
+ }
+
+ var stateProofPk merklesignature.Verifier
+ copy(stateProofPk[:], (*part.Key.StateProofKey)[:])
+
+ t := transactions.Transaction{
+ Type: protocol.KeyRegistrationTx,
+ Header: transactions.Header{
+ Sender: addr,
+ Fee: fee,
+ FirstValid: txnFirstValid,
+ LastValid: txnLastValid,
+ Lease: leaseBytes,
+ },
+ KeyregTxnFields: transactions.KeyregTxnFields{
+ VotePK: votePk,
+ SelectionPK: selectionPk,
+ StateProofPK: stateProofPk,
+ },
+ }
+ t.KeyregTxnFields.VoteFirst = basics.Round(part.Key.VoteFirstValid)
+ t.KeyregTxnFields.VoteLast = basics.Round(part.Key.VoteLastValid)
+ t.KeyregTxnFields.VoteKeyDilution = part.Key.VoteKeyDilution
+
+ return t, nil
+}
+
+// MakeRegistrationTransactionWithGenesisID Generates a Registration transaction with the genesis ID set from the suggested parameters of the client
+func (c *Client) MakeRegistrationTransactionWithGenesisID(part account.Participation, fee, txnFirstValid, txnLastValid uint64, leaseBytes [32]byte, includeStateProofKeys bool) (transactions.Transaction, error) {
+
+ // Get current round, protocol, genesis ID
+ params, err := c.SuggestedParams()
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
+
+ cparams, ok := c.consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
+ if !ok {
+ return transactions.Transaction{}, errors.New("unknown consensus version")
+ }
+
+ txnFirstValid, txnLastValid, err = computeValidityRounds(txnFirstValid, txnLastValid, 0, params.LastRound, cparams.MaxTxnLife)
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
+
+ goOnlineTx := part.GenerateRegistrationTransaction(
+ basics.MicroAlgos{Raw: fee},
+ basics.Round(txnFirstValid),
+ basics.Round(txnLastValid),
+ leaseBytes, includeStateProofKeys)
+
+ goOnlineTx.Header.GenesisID = params.GenesisID
+
+ // Check if the protocol supports genesis hash
+ if config.Consensus[protocol.ConsensusFuture].SupportGenesisHash {
+ copy(goOnlineTx.Header.GenesisHash[:], params.GenesisHash)
+ }
+
+ return goOnlineTx, nil
+}
+
// MakeUnsignedGoOnlineTx creates a transaction that will bring an address online using available participation keys
-func (c *Client) MakeUnsignedGoOnlineTx(address string, part *account.Participation, firstValid, lastValid, fee uint64, leaseBytes [32]byte) (transactions.Transaction, error) {
+func (c *Client) MakeUnsignedGoOnlineTx(address string, firstValid, lastValid, fee uint64, leaseBytes [32]byte) (transactions.Transaction, error) {
// Parse the address
parsedAddr, err := basics.UnmarshalChecksumAddress(address)
if err != nil {
@@ -217,19 +310,19 @@ func (c *Client) MakeUnsignedGoOnlineTx(address string, part *account.Participat
// Choose which participation keys to go online with;
// need to do this after filling in the round number.
- if part == nil {
- bestPart, err := c.chooseParticipation(parsedAddr, basics.Round(firstValid))
- if err != nil {
- return transactions.Transaction{}, err
- }
- part = &bestPart
+ part, err := c.chooseParticipation(parsedAddr, basics.Round(firstValid))
+ if err != nil {
+ return transactions.Transaction{}, err
}
parsedFrstValid := basics.Round(firstValid)
parsedLastValid := basics.Round(lastValid)
parsedFee := basics.MicroAlgos{Raw: fee}
- goOnlineTransaction := part.GenerateRegistrationTransaction(parsedFee, parsedFrstValid, parsedLastValid, leaseBytes, cparams.EnableStateProofKeyregCheck)
+ goOnlineTransaction, err := generateRegistrationTransaction(part, parsedFee, parsedFrstValid, parsedLastValid, leaseBytes)
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
if cparams.SupportGenesisHash {
var genHash crypto.Digest
copy(genHash[:], params.GenesisHash)
diff --git a/logging/log.go b/logging/log.go
index 527d6decb..d0384d0a8 100644
--- a/logging/log.go
+++ b/logging/log.go
@@ -157,7 +157,7 @@ type Logger interface {
EventWithDetails(category telemetryspec.Category, identifier telemetryspec.Event, details interface{})
StartOperation(category telemetryspec.Category, identifier telemetryspec.Operation) TelemetryOperation
GetTelemetrySession() string
- GetTelemetryHostName() string
+ GetTelemetryGUID() string
GetInstanceName() string
GetTelemetryURI() string
CloseTelemetry()
@@ -401,11 +401,11 @@ func (l logger) GetTelemetryVersion() string {
return l.loggerState.telemetry.telemetryConfig.Version
}
-func (l logger) GetTelemetryHostName() string {
+func (l logger) GetTelemetryGUID() string {
if !l.GetTelemetryEnabled() {
return ""
}
- return l.loggerState.telemetry.telemetryConfig.getHostName()
+ return l.loggerState.telemetry.telemetryConfig.getHostGUID()
}
func (l logger) GetInstanceName() string {
diff --git a/logging/telemetryConfig.go b/logging/telemetryConfig.go
index 0ef98e450..452202f91 100644
--- a/logging/telemetryConfig.go
+++ b/logging/telemetryConfig.go
@@ -105,13 +105,13 @@ func (cfg TelemetryConfig) Save(configPath string) error {
return err
}
-// getHostName returns the HostName for telemetry (GUID:Name -- :Name is optional if blank)
-func (cfg TelemetryConfig) getHostName() string {
- hostName := cfg.GUID
+// getHostGUID returns the Host GUID for telemetry (GUID:Name -- :Name is optional if blank)
+func (cfg TelemetryConfig) getHostGUID() string {
+ ret := cfg.GUID
if cfg.Enable && len(cfg.Name) > 0 {
- hostName += ":" + cfg.Name
+ ret += ":" + cfg.Name
}
- return hostName
+ return ret
}
// getInstanceName allows us to distinguish between multiple instances running on the same node.
diff --git a/logging/telemetryhook.go b/logging/telemetryhook.go
index 1a8c29729..b74d8a447 100644
--- a/logging/telemetryhook.go
+++ b/logging/telemetryhook.go
@@ -242,7 +242,7 @@ func createElasticHook(cfg TelemetryConfig) (hook logrus.Hook, err error) {
err = fmt.Errorf("Unable to create new elastic client on '%s' using '%s:%s' : %w", cfg.URI, cfg.UserName, cfg.Password, err)
return nil, err
}
- hostName := cfg.getHostName()
+ hostName := cfg.getHostGUID()
hook, err = elogrus.NewElasticHook(client, hostName, cfg.MinLogLevel, cfg.ChainID)
if err != nil {
diff --git a/logging/telemetryspec/event.go b/logging/telemetryspec/event.go
index dcd3d231c..81d228324 100644
--- a/logging/telemetryspec/event.go
+++ b/logging/telemetryspec/event.go
@@ -191,10 +191,10 @@ const ConnectPeerEvent Event = "ConnectPeer"
// PeerEventDetails contains details for the ConnectPeerEvent
type PeerEventDetails struct {
- Address string
- HostName string
- Incoming bool
- InstanceName string
+ Address string
+ TelemetryGUID string `json:"HostName"`
+ Incoming bool
+ InstanceName string
// Endpoint is the dialed-to address, for an outgoing connection. Not being used for incoming connection.
Endpoint string `json:",omitempty"`
// MessageDelay is the avarage relative message delay. Not being used for incoming connection.
@@ -206,11 +206,11 @@ const ConnectPeerFailEvent Event = "ConnectPeerFail"
// ConnectPeerFailEventDetails contains details for the ConnectPeerFailEvent
type ConnectPeerFailEventDetails struct {
- Address string
- HostName string
- Incoming bool
- InstanceName string
- Reason string
+ Address string
+ TelemetryGUID string `json:"HostName"`
+ Incoming bool
+ InstanceName string
+ Reason string
}
// DisconnectPeerEvent event
@@ -282,8 +282,8 @@ type PeersConnectionDetails struct {
type PeerConnectionDetails struct {
// Address is the IP address of the remote connected socket
Address string
- // The HostName is the TelemetryGUID passed via the X-Algorand-TelId header during the http connection handshake.
- HostName string
+ // The TelemetryGUID is the TelemetryGUID passed via the X-Algorand-TelId header during the http connection handshake.
+ TelemetryGUID string `json:"HostName"`
// InstanceName is the node-specific hashed instance name that was passed via X-Algorand-InstanceName header during the http connection handshake.
InstanceName string
// ConnectionDuration is the duration of the connection, in seconds.
diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go
index dcad2d390..3382edee0 100644
--- a/netdeploy/remote/deployedNetwork.go
+++ b/netdeploy/remote/deployedNetwork.go
@@ -394,7 +394,7 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g
roundTxnCnt: fileCfgs.RoundTransactionsCount,
round: basics.Round(0),
genesisID: genesis.ID(),
- genesisHash: crypto.HashObj(genesis),
+ genesisHash: genesis.Hash(),
poolAddr: poolAddr,
sinkAddr: sinkAddr,
}
diff --git a/network/requestTracker.go b/network/requestTracker.go
index 13cb2f205..fd78dadca 100644
--- a/network/requestTracker.go
+++ b/network/requestTracker.go
@@ -482,11 +482,11 @@ func (rt *RequestTracker) ServeHTTP(response http.ResponseWriter, request *http.
rt.log.With("connection", "http").With("count", originConnections).Debugf("Rejected connection due to excessive connections attempt rate")
rt.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent,
telemetryspec.ConnectPeerFailEventDetails{
- Address: trackedRequest.remoteHost,
- HostName: trackedRequest.otherTelemetryGUID,
- Incoming: true,
- InstanceName: trackedRequest.otherInstanceName,
- Reason: "Remote IP Connection Rate Limit",
+ Address: trackedRequest.remoteHost,
+ TelemetryGUID: trackedRequest.otherTelemetryGUID,
+ Incoming: true,
+ InstanceName: trackedRequest.otherInstanceName,
+ Reason: "Remote IP Connection Rate Limit",
})
response.Header().Add(TooManyRequestsRetryAfterHeader, fmt.Sprintf("%d", rt.config.ConnectionsRateLimitingWindowSeconds))
response.WriteHeader(http.StatusTooManyRequests)
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 1a4070fd1..eefd3b032 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -917,7 +917,7 @@ func (wn *WebsocketNetwork) ClearHandlers() {
}
func (wn *WebsocketNetwork) setHeaders(header http.Header) {
- localTelemetryGUID := wn.log.GetTelemetryHostName()
+ localTelemetryGUID := wn.log.GetTelemetryGUID()
localInstanceName := wn.log.GetInstanceName()
header.Set(TelemetryIDHeader, localTelemetryGUID)
header.Set(InstanceNameHeader, localInstanceName)
@@ -970,11 +970,11 @@ func (wn *WebsocketNetwork) checkIncomingConnectionLimits(response http.Response
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_limit"})
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent,
telemetryspec.ConnectPeerFailEventDetails{
- Address: remoteHost,
- HostName: otherTelemetryGUID,
- Incoming: true,
- InstanceName: otherInstanceName,
- Reason: "Connection Limit",
+ Address: remoteHost,
+ TelemetryGUID: otherTelemetryGUID,
+ Incoming: true,
+ InstanceName: otherInstanceName,
+ Reason: "Connection Limit",
})
response.WriteHeader(http.StatusServiceUnavailable)
return http.StatusServiceUnavailable
@@ -985,11 +985,11 @@ func (wn *WebsocketNetwork) checkIncomingConnectionLimits(response http.Response
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_per_ip_limit"})
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent,
telemetryspec.ConnectPeerFailEventDetails{
- Address: remoteHost,
- HostName: otherTelemetryGUID,
- Incoming: true,
- InstanceName: otherInstanceName,
- Reason: "Remote IP Connection Limit",
+ Address: remoteHost,
+ TelemetryGUID: otherTelemetryGUID,
+ Incoming: true,
+ InstanceName: otherInstanceName,
+ Reason: "Remote IP Connection Limit",
})
response.WriteHeader(http.StatusServiceUnavailable)
return http.StatusServiceUnavailable
@@ -1154,10 +1154,10 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt
wn.log.With("event", "ConnectedIn").With("remote", trackedRequest.otherPublicAddr).With("local", localAddr).Infof("Accepted incoming connection from peer %s", trackedRequest.otherPublicAddr)
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerEvent,
telemetryspec.PeerEventDetails{
- Address: trackedRequest.remoteHost,
- HostName: trackedRequest.otherTelemetryGUID,
- Incoming: true,
- InstanceName: trackedRequest.otherInstanceName,
+ Address: trackedRequest.remoteHost,
+ TelemetryGUID: trackedRequest.otherTelemetryGUID,
+ Incoming: true,
+ InstanceName: trackedRequest.otherInstanceName,
})
wn.maybeSendMessagesOfInterest(peer, nil)
@@ -1750,7 +1750,7 @@ func (wn *WebsocketNetwork) sendPeerConnectionsTelemetryStatus() {
for _, peer := range peers {
connDetail := telemetryspec.PeerConnectionDetails{
ConnectionDuration: uint(now.Sub(peer.createTime).Seconds()),
- HostName: peer.TelemetryGUID,
+ TelemetryGUID: peer.TelemetryGUID,
InstanceName: peer.InstanceName,
}
if peer.outgoing {
@@ -2094,11 +2094,11 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
wn.log.With("event", "ConnectedOut").With("remote", addr).With("local", localAddr).Infof("Made outgoing connection to peer %v", addr)
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerEvent,
telemetryspec.PeerEventDetails{
- Address: justHost(conn.RemoteAddr().String()),
- HostName: peer.TelemetryGUID,
- Incoming: false,
- InstanceName: peer.InstanceName,
- Endpoint: peer.GetAddress(),
+ Address: justHost(conn.RemoteAddr().String()),
+ TelemetryGUID: peer.TelemetryGUID,
+ Incoming: false,
+ InstanceName: peer.InstanceName,
+ Endpoint: peer.GetAddress(),
})
wn.maybeSendMessagesOfInterest(peer, nil)
@@ -2202,10 +2202,10 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) {
}
}
eventDetails := telemetryspec.PeerEventDetails{
- Address: peerAddr,
- HostName: peer.TelemetryGUID,
- Incoming: !peer.outgoing,
- InstanceName: peer.InstanceName,
+ Address: peerAddr,
+ TelemetryGUID: peer.TelemetryGUID,
+ Incoming: !peer.outgoing,
+ InstanceName: peer.InstanceName,
}
if peer.outgoing {
eventDetails.Endpoint = peer.GetAddress()
diff --git a/node/node.go b/node/node.go
index 156f8fe33..55668c1b0 100644
--- a/node/node.go
+++ b/node/node.go
@@ -166,7 +166,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
node.rootDir = rootDir
node.log = log.With("name", cfg.NetAddress)
node.genesisID = genesis.ID()
- node.genesisHash = crypto.HashObj(genesis)
+ node.genesisHash = genesis.Hash()
node.devMode = genesis.DevMode
if node.devMode {
@@ -195,8 +195,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
log.Errorf("Unable to create genesis directory: %v", err)
return nil, err
}
- var genalloc bookkeeping.GenesisBalances
- genalloc, err = bootstrapData(genesis, log)
+ genalloc, err := genesis.Balances()
if err != nil {
log.Errorf("Cannot load genesis allocation: %v", err)
return nil, err
@@ -313,40 +312,6 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
return node, err
}
-func bootstrapData(genesis bookkeeping.Genesis, log logging.Logger) (bookkeeping.GenesisBalances, error) {
- genalloc := make(map[basics.Address]basics.AccountData)
- for _, entry := range genesis.Allocation {
- addr, err := basics.UnmarshalChecksumAddress(entry.Address)
- if err != nil {
- log.Errorf("Cannot parse genesis addr %s: %v", entry.Address, err)
- return bookkeeping.GenesisBalances{}, err
- }
-
- _, present := genalloc[addr]
- if present {
- err = fmt.Errorf("repeated allocation to %s", entry.Address)
- log.Error(err)
- return bookkeeping.GenesisBalances{}, err
- }
-
- genalloc[addr] = entry.State
- }
-
- feeSink, err := basics.UnmarshalChecksumAddress(genesis.FeeSink)
- if err != nil {
- log.Errorf("Cannot parse fee sink addr %s: %v", genesis.FeeSink, err)
- return bookkeeping.GenesisBalances{}, err
- }
-
- rewardsPool, err := basics.UnmarshalChecksumAddress(genesis.RewardsPool)
- if err != nil {
- log.Errorf("Cannot parse rewards pool addr %s: %v", genesis.RewardsPool, err)
- return bookkeeping.GenesisBalances{}, err
- }
-
- return bookkeeping.MakeTimestampedGenesisBalances(genalloc, feeSink, rewardsPool, genesis.Timestamp), nil
-}
-
// Config returns a copy of the node's Local configuration
func (node *AlgorandFullNode) Config() config.Local {
return node.config
@@ -403,12 +368,7 @@ func (node *AlgorandFullNode) Start() {
// startMonitoringRoutines starts the internal monitoring routines used by the node.
func (node *AlgorandFullNode) startMonitoringRoutines() {
- node.monitoringRoutinesWaitGroup.Add(3)
-
- // PKI TODO: Remove this with #2596
- // Periodically check for new participation keys
- go node.checkForParticipationKeys(node.ctx.Done())
-
+ node.monitoringRoutinesWaitGroup.Add(2)
go node.txPoolGaugeThread(node.ctx.Done())
// Delete old participation keys
go node.oldKeyDeletionThread(node.ctx.Done())
@@ -781,24 +741,6 @@ func ensureParticipationDB(genesisDir string, log logging.Logger) (account.Parti
return account.MakeParticipationRegistry(accessor, log)
}
-// Reload participation keys from disk periodically
-func (node *AlgorandFullNode) checkForParticipationKeys(done <-chan struct{}) {
- defer node.monitoringRoutinesWaitGroup.Done()
- ticker := time.NewTicker(node.config.ParticipationKeysRefreshInterval)
- for {
- select {
- case <-ticker.C:
- err := node.loadParticipationKeys()
- if err != nil {
- node.log.Errorf("Could not refresh participation keys: %v", err)
- }
- case <-done:
- ticker.Stop()
- return
- }
- }
-}
-
// ListParticipationKeys returns all participation keys currently installed on the node
func (node *AlgorandFullNode) ListParticipationKeys() (partKeys []account.ParticipationRecord, err error) {
return node.accountManager.Registry().GetAll(), nil
@@ -916,7 +858,7 @@ func (node *AlgorandFullNode) InstallParticipationKey(partKeyBinary []byte) (acc
}
defer inputdb.Close()
- partkey, err := account.RestoreParticipation(inputdb)
+ partkey, err := account.RestoreParticipationWithSecrets(inputdb)
if err != nil {
return account.ParticipationID{}, err
}
@@ -927,20 +869,19 @@ func (node *AlgorandFullNode) InstallParticipationKey(partKeyBinary []byte) (acc
}
// Tell the AccountManager about the Participation (dupes don't matter) so we ignore the return value
- _ = node.accountManager.AddParticipation(partkey)
+ added := node.accountManager.AddParticipation(partkey)
+ if !added {
+ return account.ParticipationID{}, fmt.Errorf("ParticipationRegistry: cannot register duplicate participation key")
+ }
- err = node.accountManager.Registry().Flush(participationRegistryFlushMaxWaitDuration)
+ err = insertStateProofToRegistry(partkey, node)
if err != nil {
return account.ParticipationID{}, err
}
- newFilename := config.PartKeyFilename(partkey.ID().String(), uint64(partkey.FirstValid), uint64(partkey.LastValid))
- newFullyQualifiedFilename := filepath.Join(outDir, filepath.Base(newFilename))
-
- err = os.Rename(fullyQualifiedTempFile, newFullyQualifiedFilename)
-
+ err = node.accountManager.Registry().Flush(participationRegistryFlushMaxWaitDuration)
if err != nil {
- return account.ParticipationID{}, nil
+ return account.ParticipationID{}, err
}
return partkey.ID(), nil
diff --git a/scripts/release/mule/sign/sign.sh b/scripts/release/mule/sign/sign.sh
index cb0cbf42d..e08e2d52d 100755
--- a/scripts/release/mule/sign/sign.sh
+++ b/scripts/release/mule/sign/sign.sh
@@ -68,48 +68,45 @@ cd "$PKG_DIR"
# Note the surrounding parens turns the string created by `find` into an array.
OS_TYPES=($(find . -mindepth 1 -maxdepth 1 -type d -printf '%f\n'))
for os in "${OS_TYPES[@]}"; do
- if [ "$os" = linux ]
- then
- for arch in "${ARCHS[@]}"; do
- if [ -d "$os/$arch" ]
+ for arch in "${ARCHS[@]}"; do
+ if [ -d "$os/$arch" ]
+ then
+ # Only do the subsequent operations in a subshell if the directory is not empty.
+ if stat -t "$os/$arch/"* > /dev/null 2>&1
then
- # Only do the subsequent operations in a subshell if the directory is not empty.
- if stat -t "$os/$arch/"* > /dev/null 2>&1
- then
- (
- cd "$os/$arch"
-
- # Clean package directory of any previous operations.
- rm -rf hashes* *.sig *.asc *.asc.gz
-
- for file in *.tar.gz *.deb
- do
- gpg -u "$SIGNING_KEY_ADDR" --detach-sign "$file"
- done
-
- for file in *.rpm
- do
- gpg -u rpm@algorand.com --detach-sign "$file"
- done
-
- HASHFILE="hashes_${CHANNEL}_${os}_${arch}_${VERSION}"
- md5sum *.tar.gz *.deb *.rpm >> "$HASHFILE"
- shasum -a 256 *.tar.gz *.deb *.rpm >> "$HASHFILE"
- shasum -a 512 *.tar.gz *.deb *.rpm >> "$HASHFILE"
-
- gpg -u "$SIGNING_KEY_ADDR" --detach-sign "$HASHFILE"
- gpg -u "$SIGNING_KEY_ADDR" --clearsign "$HASHFILE"
-
- STATUSFILE="build_status_${CHANNEL}_${os}-${arch}_${VERSION}"
- if [[ -f "$STATUSFILE" ]]; then
- gpg -u "$SIGNING_KEY_ADDR" --clearsign "$STATUSFILE"
- gzip -c "$STATUSFILE.asc" > "$STATUSFILE.asc.gz"
- fi
- )
+ (
+ cd "$os/$arch"
+
+ # Clean package directory of any previous operations.
+ rm -rf hashes* *.sig *.asc *.asc.gz
+
+ for file in *.tar.gz *.deb
+ do
+ gpg -u "$SIGNING_KEY_ADDR" --detach-sign "$file"
+ done
+
+ for file in *.rpm
+ do
+ gpg -u rpm@algorand.com --detach-sign "$file"
+ done
+
+ HASHFILE="hashes_${CHANNEL}_${os}_${arch}_${VERSION}"
+ md5sum *.tar.gz *.deb *.rpm >> "$HASHFILE"
+ shasum -a 256 *.tar.gz *.deb *.rpm >> "$HASHFILE"
+ shasum -a 512 *.tar.gz *.deb *.rpm >> "$HASHFILE"
+
+ gpg -u "$SIGNING_KEY_ADDR" --detach-sign "$HASHFILE"
+ gpg -u "$SIGNING_KEY_ADDR" --clearsign "$HASHFILE"
+
+ STATUSFILE="build_status_${CHANNEL}_${os}-${arch}_${VERSION}"
+ if [[ -f "$STATUSFILE" ]]; then
+ gpg -u "$SIGNING_KEY_ADDR" --clearsign "$STATUSFILE"
+ gzip -c "$STATUSFILE.asc" > "$STATUSFILE.asc.gz"
fi
+ )
fi
- done
- fi
+ fi
+ done
done
echo
diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go
index 7cf8de405..fa8cfecdf 100644
--- a/shared/pingpong/accounts.go
+++ b/shared/pingpong/accounts.go
@@ -19,7 +19,6 @@ package pingpong
import (
"fmt"
"io/ioutil"
- "math"
"math/rand"
"os"
"path/filepath"
@@ -36,7 +35,6 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/util"
"github.com/algorand/go-algorand/util/db"
)
@@ -132,17 +130,6 @@ func (pps *WorkerState) ensureAccounts(ac libgoal.Client, initCfg PpConfig) (acc
return
}
-// throttle transaction rate
-func throttleTransactionRate(startTime time.Time, cfg PpConfig, totalSent uint64) {
- localTimeDelta := time.Since(startTime)
- currentTps := float64(totalSent) / localTimeDelta.Seconds()
- if currentTps > float64(cfg.TxnPerSec) {
- sleepSec := float64(totalSent)/float64(cfg.TxnPerSec) - localTimeDelta.Seconds()
- sleepTime := time.Duration(int64(math.Round(sleepSec*1000))) * time.Millisecond
- util.NanoSleep(sleepTime)
- }
-}
-
// Prepare assets for asset transaction testing
// Step 1) Create X assets for each of the participant accounts
// Step 2) For each participant account, opt-in to assets of all other participant accounts
@@ -153,13 +140,14 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie
return
}
- var startTime = time.Now()
- var totalSent uint64 = 0
resultAssetMaps = make(map[uint64]v1.AssetParams)
// optIns contains own and explicitly opted-in assets
optIns = make(map[uint64][]string)
numCreatedAssetsByAddr := make(map[string]int, len(accounts))
+
+ nextSendTime := time.Now()
+
// 1) Create X assets for each of the participant accounts
for addr := range accounts {
if addr == pps.cfg.SrcAccount {
@@ -179,6 +167,7 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie
fmt.Printf("cfg.NumAsset %v, addrAccount.AssetParams %v\n", pps.cfg.NumAsset, addrAccount.AssetParams)
totalSupply := pps.cfg.MinAccountAsset * uint64(pps.cfg.NumPartAccounts) * 9 * uint64(pps.cfg.GroupSize) * uint64(pps.cfg.RefreshTime.Seconds()) / pps.cfg.TxnPerSec
+
// create assets in participant account
for i := 0; i < toCreate; i++ {
var metaLen = 32
@@ -205,14 +194,12 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie
return
}
tx.Note = pps.makeNextUniqueNoteField()
+ schedule(pps.cfg.TxnPerSec, &nextSendTime)
_, err = signAndBroadcastTransaction(accounts[addr], tx, client)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset creation failed with error %v\n", err)
return
}
-
- totalSent++
- throttleTransactionRate(startTime, pps.cfg, totalSent)
}
}
@@ -255,10 +242,6 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie
// optInsByAddr tracks only explicitly opted-in assetsA
optInsByAddr := make(map[string]map[uint64]bool)
- // reset rate-control
- startTime = time.Now()
- totalSent = 0
-
// 2) For each participant account, opt-in up to proto.MaxAssetsPerAccount assets of all other participant accounts
for addr := range accounts {
if addr == pps.cfg.SrcAccount {
@@ -308,17 +291,14 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie
}
tx.Note = pps.makeNextUniqueNoteField()
+ schedule(pps.cfg.TxnPerSec, &nextSendTime)
_, err = signAndBroadcastTransaction(accounts[addr], tx, client)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset optin failed with error %v\n", err)
return
}
- totalSent++
-
optIns[k] = append(optIns[k], addr)
optInsByAddr[addr][k] = true
-
- throttleTransactionRate(startTime, pps.cfg, totalSent)
}
}
@@ -354,10 +334,6 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie
}
}
- // reset rate-control
- startTime = time.Now()
- totalSent = 0
-
// Step 3) Evenly distribute the assets across all opted-in accounts
for k, creator := range allAssets {
if !pps.cfg.Quiet {
@@ -403,14 +379,12 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie
}
}
+ schedule(pps.cfg.TxnPerSec, &nextSendTime)
_, err = signAndBroadcastTransaction(accounts[creator], tx, client)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset distribution failed with error %v\n", err)
return
}
-
- totalSent++
- throttleTransactionRate(startTime, pps.cfg, totalSent)
}
// append the asset to the result assets
resultAssetMaps[k] = assetParams[k]
diff --git a/shared/pingpong/config.go b/shared/pingpong/config.go
index 73ad4e4ec..db6cbb4ed 100644
--- a/shared/pingpong/config.go
+++ b/shared/pingpong/config.go
@@ -31,7 +31,6 @@ const ConfigFilename = "ppconfig.json"
// PpConfig defines configuration structure for
type PpConfig struct {
SrcAccount string
- DelayBetweenTxn time.Duration
RandomizeFee bool
RandomizeAmt bool
RandomizeDst bool
@@ -41,7 +40,6 @@ type PpConfig struct {
TxnPerSec uint64
NumPartAccounts uint32
RunTime time.Duration
- RestTime time.Duration
RefreshTime time.Duration
MinAccountFunds uint64
Quiet bool
@@ -71,7 +69,6 @@ type PpConfig struct {
// DefaultConfig object for Ping Pong
var DefaultConfig = PpConfig{
SrcAccount: "",
- DelayBetweenTxn: 100,
RandomizeFee: false,
RandomizeAmt: false,
RandomizeDst: false,
@@ -81,7 +78,6 @@ var DefaultConfig = PpConfig{
TxnPerSec: 200,
NumPartAccounts: 10,
RunTime: 10 * time.Second,
- RestTime: 1 * time.Hour, // Long default rest to avoid accidental DoS
RefreshTime: 10 * time.Second,
MinAccountFunds: 100000,
GroupSize: 1,
diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go
index 71d787f79..5fcde0373 100644
--- a/shared/pingpong/pingpong.go
+++ b/shared/pingpong/pingpong.go
@@ -262,6 +262,16 @@ func computeAccountMinBalance(client libgoal.Client, cfg PpConfig) (fundingRequi
return
}
+// Wait for `*nextSendTime` and update it afterwards.
+func schedule(tps uint64, nextSendTime *time.Time) {
+ dur := time.Until(*nextSendTime)
+ if dur > 0 {
+ time.Sleep(dur)
+ }
+
+ *nextSendTime = nextSendTime.Add(time.Second / time.Duration(tps))
+}
+
func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, client libgoal.Client, cfg PpConfig) error {
var srcFunds, minFund uint64
var err error
@@ -272,7 +282,6 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
return err
}
- startTime := time.Now()
var totalSent uint64
// Fee of 0 will make cause the function to use the suggested one by network
@@ -282,12 +291,12 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
if err != nil {
return err
}
-
fmt.Printf("adjusting account balance to %d\n", minFund)
+
+ nextSendTime := time.Now()
for {
accountsAdjusted := 0
for addr, acct := range accounts {
-
if addr == pps.cfg.SrcAccount {
continue
}
@@ -307,6 +316,7 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
fmt.Printf("adjusting balance of account %v by %d\n ", addr, toSend)
}
+ schedule(cfg.TxnPerSec, &nextSendTime)
tx, err = pps.sendPaymentFromSourceAccount(client, addr, fee, toSend)
if err != nil {
if strings.Contains(err.Error(), "broadcast queue full") {
@@ -323,7 +333,6 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
}
totalSent++
- throttleTransactionRate(startTime, cfg, totalSent)
}
accounts[cfg.SrcAccount].setBalance(srcFunds)
// wait until all the above transactions are sent, or that we have no more transactions
@@ -462,7 +471,6 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
if cfg.MaxRuntime > 0 {
endTime = time.Now().Add(cfg.MaxRuntime)
}
- restTime := cfg.RestTime
refreshTime := time.Now().Add(cfg.RefreshTime)
var nftThrottler *throttler
@@ -473,6 +481,7 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
lastLog := time.Now()
nextLog := lastLog.Add(logPeriod)
+ nextSendTime := time.Now()
for {
if ctx.Err() != nil {
_, _ = fmt.Fprintf(os.Stderr, "error bad context in RunPingPong: %v\n", ctx.Err())
@@ -520,7 +529,7 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
}
toList := listSufficientAccounts(pps.accounts, minimumAmount, cfg.SrcAccount)
- sent, succeeded, err := pps.sendFromTo(fromList, toList, ac)
+ sent, succeeded, err := pps.sendFromTo(fromList, toList, ac, &nextSendTime)
totalSent += sent
totalSucceeded += succeeded
if err != nil {
@@ -535,16 +544,10 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
refreshTime = refreshTime.Add(cfg.RefreshTime)
}
-
- throttleTransactionRate(startTime, cfg, totalSent)
}
timeDelta := time.Since(startTime)
_, _ = fmt.Fprintf(os.Stdout, "Sent %d transactions (%d attempted) in %d seconds\n", totalSucceeded, totalSent, int(math.Round(timeDelta.Seconds())))
- if cfg.RestTime > 0 {
- _, _ = fmt.Fprintf(os.Stdout, "Pausing %d seconds before sending more transactions\n", int(math.Round(cfg.RestTime.Seconds())))
- time.Sleep(restTime)
- }
}
}
@@ -672,7 +675,7 @@ func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64,
func (pps *WorkerState) sendFromTo(
fromList, toList []string,
- client libgoal.Client,
+ client libgoal.Client, nextSendTime *time.Time,
) (sentCount, successCount uint64, err error) {
accounts := pps.accounts
cinfo := pps.cinfo
@@ -693,8 +696,6 @@ func (pps *WorkerState) sendFromTo(
*ap = p
assetsByCreator[c] = append(assetsByCreator[c], ap)
}
- lastTransactionTime := time.Now()
- timeCredit := time.Duration(0)
for i := 0; i < len(fromList); i = (i + 1) % len(fromList) {
from := fromList[i]
@@ -770,6 +771,7 @@ func (pps *WorkerState) sendFromTo(
return
}
+ schedule(cfg.TxnPerSec, nextSendTime)
sentCount++
_, sendErr = client.BroadcastTransaction(stxn)
} else {
@@ -856,6 +858,7 @@ func (pps *WorkerState) sendFromTo(
}
}
+ schedule(cfg.TxnPerSec, nextSendTime)
sentCount++
sendErr = client.BroadcastTransactionGroup(stxGroup)
}
@@ -871,30 +874,6 @@ func (pps *WorkerState) sendFromTo(
accounts[from].addBalance(fromBalanceChange)
// avoid updating the "to" account.
- // the logic here would sleep for the remaining of time to match the desired cfg.DelayBetweenTxn
- if cfg.DelayBetweenTxn > 0 {
- time.Sleep(cfg.DelayBetweenTxn)
- }
- if cfg.TxnPerSec > 0 {
- timeCredit += time.Second / time.Duration(cfg.TxnPerSec)
-
- now := time.Now()
- took := now.Sub(lastTransactionTime)
- timeCredit -= took
- if timeCredit > 0 {
- time.Sleep(timeCredit)
- timeCredit -= time.Since(now)
- } else if timeCredit < -1000*time.Millisecond {
- // cap the "time debt" to 1000 ms.
- timeCredit = -1000 * time.Millisecond
- }
- lastTransactionTime = time.Now()
-
- // since we just slept enough here, we can take it off the counters
- sentCount--
- successCount--
- // fmt.Printf("itration took %v\n", took)
- }
}
return
}
diff --git a/test/e2e-go/cli/goal/expect/pingpongTest.exp b/test/e2e-go/cli/goal/expect/pingpongTest.exp
index 40aec03c8..99fb9a3ee 100644
--- a/test/e2e-go/cli/goal/expect/pingpongTest.exp
+++ b/test/e2e-go/cli/goal/expect/pingpongTest.exp
@@ -51,28 +51,28 @@ proc pingpongTest { TEST_ALGO_DIR TEST_DATA_DIR} {
set pingpong_duration 5
- set pingpongArray(1_smallops_smallhash) "--appprogops 2 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 5 --minaccount 100000000"
- set pingpongArray(2_smallops_mediumhash) "--appprogops 2 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(3_smallops_bighash) "--appprogops 2 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(4_mediumops_smallhash) "--appprogops 200 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(5_mediumops_mediumhash) "--appprogops 200 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(6_mediumops_bighash) "--appprogops 200 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(7_bigops_smallhash) "--appprogops 500 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(8_bigops_mediumhash) "--appprogops 300 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(9_bigops_bighash) "--appprogops 220 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
-
- set pingpongArray(10_payment_transaction) "--tps 200 --rest 0 --refresh 10 --numaccounts 50"
- set pingpongArray(11_teal_light_transaction) "--teal=light --tps 200 --rest 0 --refresh 10 --numaccounts 50"
- set pingpongArray(10_teal_normal_transaction) "--teal=normal --tps 200 --rest 0 --refresh 10 --numaccounts 50"
- set pingpongArray(12_teal_heavy_transaction) "--teal=heavy --tps 200 --rest 0 --refresh 10 --numaccounts 50"
- set pingpongArray(13_atomic_transfer_small_transaction) "--groupsize=5 --tps 200 --rest 0 --refresh 10 --numaccounts 50"
- set pingpongArray(14_atomic_transfer_large_transaction) "--groupsize=12 --tps 200 --rest 0 --refresh 10 --numaccounts 50"
- set pingpongArray(15_asset_transfer_small_transaction) "--tps 200 --numasset=5 --mf 0 --rest 0 --numaccounts 10 --refresh 10 --mf=1000"
- set pingpongArray(16_asset_transfer_large_transaction) "--tps 200 --numasset=10 --mf 0 --rest 0 --numaccounts 10 --refresh 10 --mf=1000"
- set pingpongArray(17_stateful_teal_small_transaction) "--numapp 10 --appprogops 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(18_stateful_teal_medium_transaction) "--numapp 10 --appprogops 200 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(19_stateful_teal_large_transaction) "--numapp 10 --appprogops 600 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000"
- set pingpongArray(20_rekey_payment_transaction) "--rekey=true --groupsize=2 --randomnote=true --tps 200 --rest 0 --refresh 10 --numaccounts 50"
+ set pingpongArray(1_smallops_smallhash) "--appprogops 2 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 5 --minaccount 100000000"
+ set pingpongArray(2_smallops_mediumhash) "--appprogops 2 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(3_smallops_bighash) "--appprogops 2 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(4_mediumops_smallhash) "--appprogops 200 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(5_mediumops_mediumhash) "--appprogops 200 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(6_mediumops_bighash) "--appprogops 200 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(7_bigops_smallhash) "--appprogops 500 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(8_bigops_mediumhash) "--appprogops 300 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(9_bigops_bighash) "--appprogops 220 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+
+ set pingpongArray(10_payment_transaction) "--tps 200 --refresh 10 --numaccounts 50"
+ set pingpongArray(11_teal_light_transaction) "--teal=light --tps 200 --refresh 10 --numaccounts 50"
+ set pingpongArray(10_teal_normal_transaction) "--teal=normal --tps 200 --refresh 10 --numaccounts 50"
+ set pingpongArray(12_teal_heavy_transaction) "--teal=heavy --tps 200 --refresh 10 --numaccounts 50"
+ set pingpongArray(13_atomic_transfer_small_transaction) "--groupsize=5 --tps 200 --refresh 10 --numaccounts 50"
+ set pingpongArray(14_atomic_transfer_large_transaction) "--groupsize=12 --tps 200 --refresh 10 --numaccounts 50"
+ set pingpongArray(15_asset_transfer_small_transaction) "--tps 200 --numasset=5 --mf 0 --numaccounts 10 --refresh 10 --mf=1000"
+ set pingpongArray(16_asset_transfer_large_transaction) "--tps 200 --numasset=10 --mf 0 --numaccounts 10 --refresh 10 --mf=1000"
+ set pingpongArray(17_stateful_teal_small_transaction) "--numapp 10 --appprogops 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(18_stateful_teal_medium_transaction) "--numapp 10 --appprogops 200 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(19_stateful_teal_large_transaction) "--numapp 10 --appprogops 600 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000"
+ set pingpongArray(20_rekey_payment_transaction) "--rekey=true --groupsize=2 --randomnote=true --tps 200 --refresh 10 --numaccounts 50"
foreach index [array names pingpongArray] {
diff --git a/test/e2e-go/features/participation/accountParticipationTransitions_test.go b/test/e2e-go/features/participation/accountParticipationTransitions_test.go
index a151ec2e2..9cf58310a 100644
--- a/test/e2e-go/features/participation/accountParticipationTransitions_test.go
+++ b/test/e2e-go/features/participation/accountParticipationTransitions_test.go
@@ -60,8 +60,8 @@ func registerParticipationAndWait(t *testing.T, client libgoal.Client, part acco
sAccount := part.Address().String()
sWH, err := client.GetUnencryptedWalletHandle()
require.NoError(t, err)
- goOnlineTx, err := client.MakeUnsignedGoOnlineTx(sAccount, &part, txParams.LastRound+1, txParams.LastRound+1, txParams.Fee, [32]byte{})
- require.NoError(t, err)
+ goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(part, txParams.Fee, txParams.LastRound+1, txParams.LastRound+1, [32]byte{}, true)
+ assert.NoError(t, err)
require.Equal(t, sAccount, goOnlineTx.Src().String())
onlineTxID, err := client.SignAndBroadcastTransaction(sWH, nil, goOnlineTx)
require.NoError(t, err)
diff --git a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
index c89ca262f..a09b566a7 100644
--- a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
+++ b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
@@ -177,11 +177,18 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) {
partKeyFirstValid := uint64(0)
partKeyValidityPeriod := uint64(10000)
partKeyLastValid := partKeyFirstValid + partKeyValidityPeriod
+
+ maxTxnLife := consensus[protocol.ConsensusVersion("shortpartkeysprotocol")].MaxTxnLife
+
+ if partKeyLastValid > maxTxnLife {
+ partKeyLastValid = maxTxnLife
+ }
+
partkeyResponse, _, err := client.GenParticipationKeys(newAccount, partKeyFirstValid, partKeyLastValid, 0)
a.NoError(err, "rest client should be able to add participation key to new account")
a.Equal(newAccount, partkeyResponse.Parent.String(), "partkey response should echo queried account")
// account uses part key to go online
- goOnlineTx, err := client.MakeUnsignedGoOnlineTx(newAccount, &partkeyResponse, 0, 0, transactionFee, [32]byte{})
+ goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(partkeyResponse, transactionFee, partKeyFirstValid, partKeyLastValid, [32]byte{}, true)
a.NoError(err, "should be able to make go online tx")
a.Equal(newAccount, goOnlineTx.Src().String(), "go online response should echo queried account")
onlineTxID, err := client.SignAndBroadcastTransaction(wh, nil, goOnlineTx)
@@ -290,7 +297,8 @@ func TestAccountGoesOnlineForShortPeriod(t *testing.T) {
a.NoError(err, "rest client should be able to add participation key to new account")
a.Equal(newAccount, partkeyResponse.Parent.String(), "partkey response should echo queried account")
// account uses part key to go online
- goOnlineTx, err := client.MakeUnsignedGoOnlineTx(newAccount, &partkeyResponse, partKeyFirstValid, partKeyLastValid, transactionFee, [32]byte{})
+ goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(partkeyResponse, transactionFee, partKeyFirstValid, partKeyLastValid, [32]byte{}, true)
+ a.NoError(err)
a.Equal(goOnlineTx.KeyregTxnFields.StateProofPK.IsEmpty(), false, "stateproof key should not be zero")
a.NoError(err, "should be able to make go online tx")
a.Equal(newAccount, goOnlineTx.Src().String(), "go online response should echo queried account")
diff --git a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go
index 18bdec369..45ac6c97c 100644
--- a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go
+++ b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go
@@ -39,6 +39,17 @@ import (
"github.com/algorand/go-algorand/util/db"
)
+// TestOverlappingParticipationKeys is a test that "overlaps" participation keys across
+// various nodes. Keys are installed in a rotating fashion across the nodes where:
+// ((Network Round - 1) Mod 10) = nodeIdx and nodeIdx is used to pull out from an
+// "array" of nodes similar to {Node1, Node2, Node3} etc. The Mod 10 simply pulls the
+// "digit" from the number:
+// Round: 13 -> 13 - 1 = 12 -> 12 Mod 10 -> 2 -> Node3 with nodeIdx == 2
+//
+// The keys are overlapped in the sense that a key is registered to a node and
+// "overlaps" with other installed keys that are also valid. Meaning there might be:
+// PKI 1 (Valid 3-15) and PKI 2 (Valid 13-25) and PKI 3 (Valid 23-35) all installed
+// on the same node
func TestOverlappingParticipationKeys(t *testing.T) {
partitiontest.PartitionTest(t)
defer fixtures.ShutdownSynchronizedTest(t)
@@ -50,6 +61,7 @@ func TestOverlappingParticipationKeys(t *testing.T) {
shortPartKeysProtocol := config.Consensus[protocol.ConsensusCurrentVersion]
shortPartKeysProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
// keys round = current - 2 * (2 * 1) (see selector.go)
+ // --> return r.SubSaturate(basics.Round(2 * cparams.SeedRefreshInterval * cparams.SeedLookback))
// new keys must exist at least 4 rounds prior use
shortPartKeysProtocol.SeedLookback = 2
shortPartKeysProtocol.SeedRefreshInterval = 1
@@ -68,17 +80,10 @@ func TestOverlappingParticipationKeys(t *testing.T) {
defer fixture.Shutdown()
accountsNum := len(fixture.NodeDataDirs())
- for _, dataDir := range fixture.NodeDataDirs() {
- cfg, err := config.LoadConfigFromDisk(dataDir)
- a.NoError(err)
- cfg.ParticipationKeysRefreshInterval = 500 * time.Millisecond
- err = cfg.SaveToDisk(dataDir)
- a.NoError(err)
- }
genesis, err := bookkeeping.LoadGenesisFromFile(filepath.Join(fixture.PrimaryDataDir(), "genesis.json"))
a.NoError(err)
- genesisHash := crypto.HashObj(genesis)
+ genesisHash := genesis.Hash()
rootKeys := make(map[int]*account.Root)
regTransactions := make(map[int]transactions.SignedTxn)
lastRound := uint64(39) // check 3 rounds of keys rotations
@@ -89,10 +94,23 @@ func TestOverlappingParticipationKeys(t *testing.T) {
continue
}
acctIdx := (round - 1) % 10
+
+ // Prepare the registration keys ahead of time. Note that the + 10 is because we use Mod 10
+
+ // These variables control when the transaction will be sent out to be valid from.
+ // These variables will also be the name of the file produced EXCEPT
+ // prepareParticipationKey() will add 2 to the txStartRound for the filename.
+ // so the file for round 1 will be 3.15
+ // For round 11 (the next round that Mod 10 will index to 1), that means the filename will be
+ // 13.25 which results in a 2 round overlap
txStartRound := round
txEndRound := txStartRound + 10 + 4
+ // The registration variables here control when the participation key will actually be valid from
+ // For round 1, that means from 1-16 (one round of overlap)
+ // For round 11 (the next round that Mod 10 will index to 1), that means the 11-26
regStartRound := round
regEndRound := regStartRound + 11 + 4
+
err = prepareParticipationKey(a, &fixture, acctIdx, txStartRound, txEndRound, regStartRound, regEndRound, genesisHash, rootKeys, regTransactions, config.Consensus[protocol.ConsensusCurrentVersion])
a.NoError(err)
}
@@ -100,17 +118,39 @@ func TestOverlappingParticipationKeys(t *testing.T) {
fixture.Start()
currentRound := uint64(0)
fixture.AlgodClient = fixture.GetAlgodClientForController(fixture.NC)
+
+ // ******** IMPORTANT ********
+ // It is CRITICAL that this for loop NOT BLOCK.
+ // This loop assumes that it stays current with the round of the network.
+ // Remember: this test is running while the network is advancing rounds in parallel
+ // If this test blocks for more than a couple seconds, then the network round count will have advanced
+ // farther than the current "currentRound" variable. This will mean that the "addParticipationKey" function
+ // will NOT install the participation key in time for the shortened SeedLookback variable resulting
+ // in a network stall and a test failure
for {
err := fixture.WaitForRoundWithTimeout(currentRound + 1)
a.NoError(err)
+
+ // A sanity check that makes sure that the round of the network is the same as our
+ // current round variable
+ sts, err := fixture.GetAlgodClientForController(fixture.NC).Status()
+ a.NoError(err, "the network stalled, see test comments and review node.log in each nodes data directory for details.")
+ a.Equal(sts.LastRound, currentRound+1)
+
currentRound++
if (currentRound-1)%10 < uint64(accountsNum) {
acctIdx := (currentRound - 1) % 10
+
+ // We do a plus two because the filenames were stored with a plus 2
startRound := currentRound + 2 // +2 and -2 below to balance, start/end must match in part key file name
endRound := startRound + 10 + 4 - 2
+
regStartRound := currentRound
regEndRound := regStartRound + 11 + 4
+ // This cannot block! (See above)
+ // We pull the files from the disk according to their start round end round filenames
+ // and install them as well as send out a transaction
pk, err := addParticipationKey(a, &fixture, acctIdx, startRound, endRound, regTransactions)
a.NoError(err)
t.Logf("[.] Round %d, Added reg key for node %d range [%d..%d] %s\n", currentRound, acctIdx, regStartRound, regEndRound, hex.EncodeToString(pk[:8]))
@@ -128,17 +168,20 @@ func TestOverlappingParticipationKeys(t *testing.T) {
func addParticipationKey(a *require.Assertions, fixture *fixtures.RestClientFixture, acctNum uint64, startRound, endRound uint64, regTransactions map[int]transactions.SignedTxn) (crypto.OneTimeSignatureVerifier, error) {
dataDir := fixture.NodeDataDirs()[acctNum]
nc := fixture.GetNodeControllerForDataDir(dataDir)
- genesisDir, err := nc.GetGenesisDir()
partKeyName := filepath.Join(dataDir, config.PartKeyFilename("Wallet", startRound, endRound))
- partKeyNameTarget := filepath.Join(genesisDir, config.PartKeyFilename("Wallet", startRound, endRound))
- // make the rename in the background to ensure it won't take too long. We have ~4 rounds to complete this.
- go os.Rename(partKeyName, partKeyNameTarget)
+ // This function can take more than a couple seconds, we can't have this function block so
+ // we wrap it in a go routine
+ go func() {
+ clientController := fixture.GetLibGoalClientFromNodeController(nc)
+ _, err := clientController.AddParticipationKey(partKeyName)
+ a.NoError(err)
+ }()
signedTxn := regTransactions[int(startRound-2)]
a.NotEmpty(signedTxn.Sig)
- _, err = fixture.GetAlgodClientForController(nc).SendRawTransaction(signedTxn)
+ _, err := fixture.GetAlgodClientForController(nc).SendRawTransaction(signedTxn)
a.NoError(err)
return signedTxn.Txn.KeyregTxnFields.VotePK, err
}
diff --git a/test/e2e-go/features/participation/participationExpiration_test.go b/test/e2e-go/features/participation/participationExpiration_test.go
index 126b5acf0..06b392856 100644
--- a/test/e2e-go/features/participation/participationExpiration_test.go
+++ b/test/e2e-go/features/participation/participationExpiration_test.go
@@ -31,7 +31,7 @@ import (
"github.com/algorand/go-algorand/test/partitiontest"
)
-func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, finalStatus basics.Status, protocolCheck string) {
+func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, finalStatus basics.Status, protocolCheck string, includeStateProofs bool) {
a := require.New(fixtures.SynchronizedTest(t))
pClient := fixture.GetLibGoalClientForNamedNode("Primary")
@@ -84,7 +84,7 @@ func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, f
a.Equal(sAccount, partkeyResponse.Parent.String())
// account uses part key to go online
- goOnlineTx, err := sClient.MakeUnsignedGoOnlineTx(sAccount, &partkeyResponse, 0, 0, transactionFee, [32]byte{})
+ goOnlineTx, err := sClient.MakeRegistrationTransactionWithGenesisID(partkeyResponse, transactionFee, 0, 0, [32]byte{}, includeStateProofs)
a.NoError(err)
a.Equal(sAccount, goOnlineTx.Src().String())
@@ -191,7 +191,7 @@ func TestParticipationAccountsExpirationFuture(t *testing.T) {
fixture.Start()
defer fixture.Shutdown()
- testExpirationAccounts(t, &fixture, basics.Offline, "future")
+ testExpirationAccounts(t, &fixture, basics.Offline, "future", true)
}
// TestParticipationAccountsExpirationNonFuture tests that sending a transaction to an account with
@@ -214,5 +214,5 @@ func TestParticipationAccountsExpirationNonFuture(t *testing.T) {
fixture.Start()
defer fixture.Shutdown()
- testExpirationAccounts(t, &fixture, basics.Online, string(protocol.ConsensusV29))
+ testExpirationAccounts(t, &fixture, basics.Online, string(protocol.ConsensusV29), false)
}
diff --git a/test/e2e-go/features/transactions/onlineStatusChange_test.go b/test/e2e-go/features/transactions/onlineStatusChange_test.go
index d12715beb..be2ff60ff 100644
--- a/test/e2e-go/features/transactions/onlineStatusChange_test.go
+++ b/test/e2e-go/features/transactions/onlineStatusChange_test.go
@@ -18,6 +18,8 @@ package transactions
import (
"fmt"
+ "io/ioutil"
+ "os"
"path/filepath"
"testing"
@@ -86,7 +88,7 @@ func testAccountsCanChangeOnlineState(t *testing.T, templatePath string) {
a.NoError(err, "should be no errors when creating partkeys")
a.Equal(initiallyOffline, partkeyResponse.Address().String(), "successful partkey creation should echo account")
- goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(initiallyOffline, nil, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{})
+ goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(initiallyOffline, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{})
a.NoError(err, "should be able to make go online tx")
wh, err := client.GetUnencryptedWalletHandle()
a.NoError(err, "should be able to get unencrypted wallet handle")
@@ -168,13 +170,20 @@ func TestCloseOnError(t *testing.T) {
// get the current round for partkey creation
_, curRound := fixture.GetBalanceAndRound(initiallyOnline)
+ tempDir, err := ioutil.TempDir(os.TempDir(), "test-close-on-error")
+ require.NoError(t, err)
+ defer os.RemoveAll(tempDir)
+
+ var partkeyFile string
+ _, partkeyFile, err = client.GenParticipationKeysTo(initiallyOffline, 0, curRound+1000, 0, tempDir)
+
// make a participation key for initiallyOffline
- _, _, err = client.GenParticipationKeys(initiallyOffline, 0, curRound+1000, 0)
+ _, err = client.AddParticipationKey(partkeyFile)
a.NoError(err)
// check duplicate keys does not crash
- _, _, err = client.GenParticipationKeys(initiallyOffline, 0, curRound+1000, 0)
- errMsg := fmt.Sprintf("ParticipationKeys exist for the range 0 to %d", curRound+1000)
- a.Equal(errMsg, err.Error())
+ _, err = client.AddParticipationKey(partkeyFile)
+ a.Error(err)
+ a.Contains(err.Error(), "cannot register duplicate participation key")
// check lastValid < firstValid does not crash
_, _, err = client.GenParticipationKeys(initiallyOffline, curRound+1001, curRound+1000, 0)
expected := fmt.Sprintf("FillDBWithParticipationKeys: firstValid %d is after lastValid %d", int(curRound+1001), int(curRound+1000))
diff --git a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go
index 77c11ccb4..064a61596 100644
--- a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go
+++ b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go
@@ -128,7 +128,7 @@ func TestManyAccountsCanGoOnline(t *testing.T) {
partkeyResponse, _, err := client.GenParticipationKeys(account, curRound-10, curRound+1000, 0)
a.NoError(err, "should be no errors when creating many partkeys, creation number %v", i)
a.Equal(account, partkeyResponse.Address, "successful partkey creation should echo account")
- goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(account, nil, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{})
+ goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(account, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{})
a.NoError(err, "should be able to make go online tx %v", i)
wh, err := client.GetUnencryptedWalletHandle()
a.NoError(err, "should be able to get unencrypted wallet handle")
@@ -149,7 +149,7 @@ func TestManyAccountsCanGoOnline(t *testing.T) {
a.NoError(err, "should be no errors when creating many partkeys, creation number %v", i)
a.Equal(account, partkeyResponse.Address, "successful partkey creation should echo account")
- goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(account, nil, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{})
+ goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(account, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{})
a.NoError(err, "should be able to make go online tx %v", i)
wh, err := client.GetUnencryptedWalletHandle()
a.NoError(err, "should be able to get unencrypted wallet handle")
diff --git a/test/heapwatch/bwstart.sh b/test/heapwatch/bwstart.sh
index 3770136f7..a2fa8ef28 100644
--- a/test/heapwatch/bwstart.sh
+++ b/test/heapwatch/bwstart.sh
@@ -35,10 +35,10 @@ python3 "${REPO_ROOT}/test/heapwatch/heapWatch.py" -o "${TESTDIR}/heaps" --no-he
echo "$!" > .heapWatch.pid
# TODO: other pingpong modes
-pingpong run -d "${TESTDIR}/node1" --tps 20 --rest 0 --run 0 &
+pingpong run -d "${TESTDIR}/node1" --tps 20 --run 0 &
echo "$!" > .pingpong1.pid
-pingpong run -d "${TESTDIR}/node2" --tps 20 --rest 0 --run 0 &
+pingpong run -d "${TESTDIR}/node2" --tps 20 --run 0 &
echo "$!" > .pingpong2.pid
diff --git a/test/heapwatch/start.sh b/test/heapwatch/start.sh
index cb4b37eca..82560f118 100755
--- a/test/heapwatch/start.sh
+++ b/test/heapwatch/start.sh
@@ -25,10 +25,10 @@ python3 "${REPO_ROOT}/test/heapwatch/heapWatch.py" -o "${TESTDIR}/heaps" --perio
echo "$!" > .heapWatch.pid
# TODO: other pingpong modes
-pingpong run -d "${TESTDIR}/Node1" --tps 10 --rest 0 --run 0 --nftasapersecond 200 &
+pingpong run -d "${TESTDIR}/Node1" --tps 10 --run 0 --nftasapersecond 200 &
echo "$!" > .pingpong1.pid
-pingpong run -d "${TESTDIR}/Node2" --tps 10 --rest 0 --run 0 --nftasapersecond 200 &
+pingpong run -d "${TESTDIR}/Node2" --tps 10 --run 0 --nftasapersecond 200 &
echo "$!" > .pingpong2.pid
diff --git a/test/muleCI/mule.yaml b/test/muleCI/mule.yaml
index eb585a3c9..8f5183503 100644
--- a/test/muleCI/mule.yaml
+++ b/test/muleCI/mule.yaml
@@ -10,6 +10,7 @@ agents:
- VERSION=$VERSION
- BUILD_NUMBER=$BUILD_NUMBER
- GOHOSTARCH=amd64
+ - FULLVERSION=${FULLVERSION}
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=amd64
@@ -53,6 +54,7 @@ agents:
- VERSION=$VERSION
- BUILD_NUMBER=$BUILD_NUMBER
- GOHOSTARCH=arm64
+ - FULLVERSION=${FULLVERSION}
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=arm64v8
@@ -68,6 +70,7 @@ agents:
- VERSION=$VERSION
- BUILD_NUMBER=$BUILD_NUMBER
- GOHOSTARCH=arm
+ - FULLVERSION=${FULLVERSION}
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=arm32v7
@@ -103,6 +106,12 @@ agents:
- $HOME/.gnupg/pubring.kbx:/root/.gnupg/pubring.kbx
tasks:
+ - task: shell.Make
+ name: build.darwin-arm64
+ target: ci-build
+ - task: shell.Make
+ name: build.darwin-amd64
+ target: ci-build
- task: docker.Make
name: build.amd64
agent: cicd.ubuntu.amd64
@@ -117,7 +126,7 @@ tasks:
target: ci-build
- task: docker.Make
- name: archive.amd64
+ name: archive
agent: cicd.centos8.amd64
target: archive
@@ -133,6 +142,15 @@ tasks:
# Stash tasks
- task: stash.Stash
+ name: darwin-arm64
+ bucketName: go-algorand-ci-cache
+ stashId: ${JENKINS_JOB_CACHE_ID}/darwin-arm64
+ globSpecs:
+ - tmp/node_pkgs/**/*
+ - installer/genesis/devnet/genesis.json
+ - installer/genesis/testnet/genesis.json
+ - installer/genesis/mainnet/genesis.json
+ - task: stash.Stash
name: linux-amd64
bucketName: go-algorand-ci-cache
stashId: ${JENKINS_JOB_CACHE_ID}/linux-amd64
@@ -196,6 +214,10 @@ tasks:
bucketName: go-algorand-ci-cache
stashId: ${JENKINS_JOB_CACHE_ID}/linux-arm
- task: stash.Unstash
+ name: darwin-arm64
+ bucketName: go-algorand-ci-cache
+ stashId: ${JENKINS_JOB_CACHE_ID}/darwin-arm64
+ - task: stash.Unstash
name: packages
bucketName: go-algorand-ci-cache
stashId: ${JENKINS_JOB_CACHE_ID}/packages
@@ -211,6 +233,14 @@ tasks:
target: mule-sign
jobs:
+ build-darwin-arm64:
+ tasks:
+ - shell.Make.build.darwin-arm64
+ - stash.Stash.darwin-arm64
+ build-darwin-amd64:
+ tasks:
+ - shell.Make.build.darwin-amd64
+ - stash.Stash.darwin-amd64
build-linux-amd64:
tasks:
- docker.Make.build.amd64
@@ -223,22 +253,24 @@ jobs:
tasks:
- docker.Make.build.arm
- stash.Stash.linux-arm
- package-linux-amd64:
+ package-all:
tasks:
- stash.Unstash.linux-amd64
- stash.Unstash.linux-arm64
- stash.Unstash.linux-arm
+ - stash.Unstash.darwin-arm64
+ - stash.Unstash.darwin-amd64
- docker.Make.deb.amd64
- docker.Make.rpm.amd64
- stash.Stash.packages
- archive-linux-amd64:
+ archive-all:
tasks:
- stash.Unstash.packages
- - docker.Make.archive.amd64
+ - docker.Make.archive
package-docker:
tasks:
- docker.Make.docker-image
- sign-signer:
+ sign-all:
tasks:
- stash.Unstash.packages
- docker.Make.docker-sign
diff --git a/test/scripts/e2e_subs/goal-partkey-commands.sh b/test/scripts/e2e_subs/goal-partkey-commands.sh
new file mode 100755
index 000000000..94c831c86
--- /dev/null
+++ b/test/scripts/e2e_subs/goal-partkey-commands.sh
@@ -0,0 +1,100 @@
+#!/usr/bin/env bash
+# TIMEOUT=300
+
+# errors are handled manually, so no -e
+set -x
+
+date "+$0 start %Y%m%d_%H%M%S"
+
+# Registered Account ParticipationID Last Used First round Last round
+# yes LFMT...RHJQ 4UPT6AQC... 4 0 3000000
+OUTPUT=$(goal account listpartkeys)
+# In case there are multiple keys, make sure we are checking the correct one.
+OUTPUT=$(echo "$OUTPUT"|grep "yes.*3000"|tr -s ' ')
+if [[ "$OUTPUT" != yes* ]]; then echo "Registered should be 'yes' but wasn't."; exit 1; fi
+if [[ $(echo "$OUTPUT" | cut -d' ' -f 4) == 0 ]]; then echo "Last Used shouldn't be 0 but was."; exit 1; fi
+if [[ $(echo "$OUTPUT" | cut -d' ' -f 5) != 0 ]]; then echo "First round should be 0 but wasn't."; exit 1; fi
+if [[ $(echo "$OUTPUT" | cut -d' ' -f 6) != 3000 ]]; then echo "Last round should be 3000 but wasn't."; exit 1; fi
+
+#Dumping participation key info from /tmp/tmpwtomya9x/net/Node...
+#
+#Participation ID: 4UPT6AQCFZU5ZDN3WKVPCFYOH2SFJ7SPHK7XPWI2CIDYKK7K3WMQ
+#Parent address: LFMTCXCY6WGSFSGLSNTFH532KVERJVNRD7W5H7GIQ4MPGM7SSVYMQYRHJQ
+#Last vote round: 3
+#Last block proposal round: 4
+#Effective first round: 0
+#Effective last round: 3000
+#First round: 0
+#Last round: 3000
+#Key dilution: 10000
+#Selection key: esIsBJB86P+sLeqO3gVoLBGfpuwYlWN4lNzz2AYslTo=
+#Voting key: W1OcXLZsaATyOd5FbhRgXHmcywvn++xEVUAQ0NejmW4=
+OUTPUT=$(goal account partkeyinfo)
+if ! echo "$OUTPUT" | grep -q 'First round:[[:space:]]* 0'; then echo "First round should have been 0."; exit 1; fi
+if ! echo "$OUTPUT" | grep -q 'Last round:[[:space:]]* 3000'; then echo "Last round should have been 3000."; exit 1; fi
+if ! echo "$OUTPUT" | grep -q 'Effective last round:[[:space:]]* 3000'; then echo "Effective last round should have been 3000."; exit 1; fi
+# 100 or 10000 due to arm64 bug
+if ! echo "$OUTPUT" | grep -q 'Key dilution:[[:space:]]* 100\(00\)\?'; then echo "Key dilution should have been 10000."; exit 1; fi
+if ! echo "$OUTPUT" | grep -q 'Participation ID:[[:space:]]*[[:alnum:]]\{52\}'; then echo "There should be a participation ID."; exit 1; fi
+
+# Test multiple data directory supported
+NUM_OUTPUT_1=$(echo "$OUTPUT"|grep -c 'Participation ID')
+OUTPUT=$(goal account partkeyinfo -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2")
+NUM_OUTPUT_2=$(echo "$OUTPUT"|grep -c 'Participation ID')
+if (( "$NUM_OUTPUT_2" <= "$NUM_OUTPUT_1" )); then echo "Should have found more participation keys when checking both data directories."; exit 1; fi
+
+# get stderr from this one
+OUTPUT=$(goal account listpartkeys -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2" 2>&1)
+EXPECTED_ERR="Only one data directory can be specified for this command."
+if [[ "$OUTPUT" != "$EXPECTED_ERR" ]]; then echo -e "Unexpected output from multiple data directories with 'listpartkeys': \n$OUTPUT"; exit 1; fi
+
+create_and_fund_account () {
+ local TEMP_ACCT=$(${gcmd} account new|awk '{ print $6 }')
+ ${gcmd} clerk send -f "$INITIAL_ACCOUNT" -t "$TEMP_ACCT" -a 1000000 > /dev/null
+ echo "$TEMP_ACCT"
+}
+
+# given key should be installed and have the expected yes/no state
+# $1 - yes or no
+# $2 - a participation id
+# $3 - error message
+verify_registered_state () {
+ # look for participation ID anywhere in the partkeyinfo output
+ if ! goal account partkeyinfo | grep -q "$2"; then
+ fail_test "Key was not installed properly: $3"
+ fi
+
+ # looking for yes/no, and the 8 character head of participation id in this line:
+ # yes LFMT...RHJQ 4UPT6AQC... 4 0 3000
+ if ! goal account listpartkeys | grep -q "$1.*$(echo "$2" | cut -c1-8)\.\.\."; then
+ fail_test "Unexpected key state: $3"
+ fi
+}
+
+# goal account installpartkey
+# install manually generated participation keys (do not register)
+NEW_ACCOUNT_1=$(create_and_fund_account)
+algokey part generate --keyfile test_partkey --first 0 --last 3000 --parent "$NEW_ACCOUNT_1"
+PARTICIPATION_ID_1=$(goal account installpartkey --delete-input --partkey test_partkey|awk '{ print $7 }')
+verify_registered_state "no" "$PARTICIPATION_ID_1" "goal account installpartkey"
+
+# goal account addpartkey
+# generate and install participation keys (do not register)
+NEW_ACCOUNT_2=$(create_and_fund_account)
+PARTICIPATION_ID_2=$(goal account addpartkey -a "$NEW_ACCOUNT_2" --roundFirstValid 0 --roundLastValid 3000|awk '{ print $7 }')
+verify_registered_state "no" "$PARTICIPATION_ID_2" "goal account addpartkey"
+
+# goal account renewpartkeys
+# generate, install, and register
+NEW_ACCOUNT_3=$(create_and_fund_account)
+PARTICIPATION_ID_3=$(${gcmd} account renewpartkey --roundLastValid 3000 -a "$NEW_ACCOUNT_3"|tail -n 1|awk '{ print $7 }')
+verify_registered_state "yes" "$PARTICIPATION_ID_3" "goal account renewpartkey"
+
+# goal account changeonlinstatus (--account)
+verify_registered_state "no" "$PARTICIPATION_ID_1" "goal account installpartkey (before)"
+${gcmd} account changeonlinestatus -a "$NEW_ACCOUNT_1"
+verify_registered_state "yes" "$PARTICIPATION_ID_1" "goal account installpartkey (after)"
+
+# goal account renewallpartkeys
+# goal account changeonlinstatus (--partkey)
+# These do not work as I expected them to. Do they work? I don't know, we should try to remove it.
diff --git a/test/scripts/e2e_subs/goal-partkey-information.sh b/test/scripts/e2e_subs/goal-partkey-information.sh
deleted file mode 100755
index 6d5069c55..000000000
--- a/test/scripts/e2e_subs/goal-partkey-information.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env bash
-# TIMEOUT=300
-
-# errors are handled manually, so no -e
-set -x
-
-date "+$0 start %Y%m%d_%H%M%S"
-
-# Registered Account ParticipationID Last Used First round Last round
-# yes LFMT...RHJQ 4UPT6AQC... 4 0 3000000
-OUTPUT=$(goal account listpartkeys)
-OUTPUT=$(echo "$OUTPUT"|tail -n 1|tr -s ' ')
-if [[ "$OUTPUT" != yes* ]]; then echo "Registered should be 'yes' but wasn't."; exit 1; fi
-if [[ $(echo "$OUTPUT" | cut -d' ' -f 4) == 0 ]]; then echo "Last Used shouldn't be 0 but was."; exit 1; fi
-if [[ $(echo "$OUTPUT" | cut -d' ' -f 5) != 0 ]]; then echo "First round should be 0 but wasn't."; exit 1; fi
-if [[ $(echo "$OUTPUT" | cut -d' ' -f 6) != 3000 ]]; then echo "Last round should be 3000 but wasn't."; exit 1; fi
-
-#Dumping participation key info from /tmp/tmpwtomya9x/net/Node...
-#
-#Participation ID: 4UPT6AQCFZU5ZDN3WKVPCFYOH2SFJ7SPHK7XPWI2CIDYKK7K3WMQ
-#Parent address: LFMTCXCY6WGSFSGLSNTFH532KVERJVNRD7W5H7GIQ4MPGM7SSVYMQYRHJQ
-#Last vote round: 3
-#Last block proposal round: 4
-#Effective first round: 0
-#Effective last round: 3000000
-#First round: 0
-#Last round: 3000000
-#Key dilution: 10000
-#Selection key: esIsBJB86P+sLeqO3gVoLBGfpuwYlWN4lNzz2AYslTo=
-#Voting key: W1OcXLZsaATyOd5FbhRgXHmcywvn++xEVUAQ0NejmW4=
-OUTPUT=$(goal account partkeyinfo)
-if ! echo "$OUTPUT" | grep -q 'First round:[[:space:]]* 0'; then echo "First round should have been 0."; exit 1; fi
-if ! echo "$OUTPUT" | grep -q 'Last round:[[:space:]]* 3000'; then echo "Last round should have been 3000."; exit 1; fi
-if ! echo "$OUTPUT" | grep -q 'Effective last round:[[:space:]]* 3000'; then echo "Effective last round should have been 3000."; exit 1; fi
-# 100 or 10000 due to arm64 bug
-if ! echo "$OUTPUT" | grep -q 'Key dilution:[[:space:]]* 100\(00\)\?'; then echo "Key dilution should have been 10000."; exit 1; fi
-if ! echo "$OUTPUT" | grep -q 'Participation ID:[[:space:]]*[[:alnum:]]\{52\}'; then echo "There should be a participation ID."; exit 1; fi
-
-# Test multiple data directory supported
-OUTPUT=$(goal account partkeyinfo -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2")
-OUTPUT=$(echo "$OUTPUT"|grep -c 'Participation ID')
-if [[ "$OUTPUT" != "2" ]]; then echo "Two Participation IDs should have been found."; exit 1; fi
-
-# get stderr from this one
-OUTPUT=$(goal account listpartkeys -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2" 2>&1)
-EXPECTED_ERR="Only one data directory can be specified for this command."
-if [[ "$OUTPUT" != "$EXPECTED_ERR" ]]; then echo -e "Unexpected output from multiple data directories with 'listpartkeys': \n$OUTPUT"; exit 1; fi
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/Makefile b/test/testdata/deployednettemplates/recipes/alphanet/Makefile
new file mode 100644
index 000000000..13130934d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/Makefile
@@ -0,0 +1,15 @@
+PARAMS=-w 8 -R 1 -N 4 -n 8 -H 2 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+
+all: topology.json net.json genesis.json
+
+topology.json: gen_topology.py
+ python gen_topology.py
+
+net.json: node.json relay.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile
+ netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS}
+
+genesis.json: node.json relay.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile
+ netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS}
+
+clean:
+ rm -f net.json genesis.json topology.json
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py b/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py
new file mode 100644
index 000000000..7298256d8
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py
@@ -0,0 +1,32 @@
+import json
+import os
+
+node_types = {"R":1, "N":4, "NPN":2}
+node_size = {"R":"-m5d.4xl", "N":"-m5d.4xl", "NPN":"-m5d.4xl"}
+regions = [
+ "AWS-US-EAST-2",
+ "AWS-US-WEST-2",
+ "AWS-EU-CENTRAL-1",
+ "AWS-EU-WEST-2",
+ "AWS-AP-SOUTHEAST-1",
+ "AWS-AP-SOUTHEAST-2"
+]
+
+network = "alphanet"
+
+host_elements = []
+region_count = len(regions)
+for node_type in node_types.keys():
+ node_count = node_types[node_type]
+ region_size = node_size[node_type]
+ for i in range(node_count):
+ host = {}
+ node_name = node_type + str(i + 1) + "-" + network
+ region = regions[i % region_count]
+ host["Name"] = node_name
+ host["Template"] = region + region_size
+ host_elements.append(host)
+
+ec2_hosts = {"Hosts": host_elements}
+with open("topology.json", "w") as f:
+ f.write(json.dumps(ec2_hosts, indent = 2) + os.linesep)
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/genesis.json b/test/testdata/deployednettemplates/recipes/alphanet/genesis.json
new file mode 100644
index 000000000..1d78dd782
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/genesis.json
@@ -0,0 +1,64 @@
+{
+ "NetworkName": "alphanet",
+ "VersionModifier": "",
+ "ConsensusProtocol": "alpha1",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": 3000000,
+ "PartKeyDilution": 0,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet4",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet5",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet6",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet7",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet8",
+ "Stake": 6.25,
+ "Online": true
+ },
+ {
+ "Name": "Wallet9",
+ "Stake": 25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet10",
+ "Stake": 25,
+ "Online": false
+ }
+ ],
+ "FeeSink": "OOZZ32IHB6SS6ZTARKJ2PQP3QKE7R3IWQTOPXRGLTAGPVCDS3FHJOEOYVM",
+ "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "DevMode": false,
+ "Comment": ""
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/net.json b/test/testdata/deployednettemplates/recipes/alphanet/net.json
new file mode 100644
index 000000000..e75a91d29
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/net.json
@@ -0,0 +1,232 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay1",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N1-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node1",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ },
+ {
+ "Name": "node5",
+ "Wallets": [
+ {
+ "Name": "Wallet2",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N2-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node2",
+ "Wallets": [
+ {
+ "Name": "Wallet3",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ },
+ {
+ "Name": "node6",
+ "Wallets": [
+ {
+ "Name": "Wallet4",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N3-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node3",
+ "Wallets": [
+ {
+ "Name": "Wallet5",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ },
+ {
+ "Name": "node7",
+ "Wallets": [
+ {
+ "Name": "Wallet6",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N4-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node4",
+ "Wallets": [
+ {
+ "Name": "Wallet7",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ },
+ {
+ "Name": "node8",
+ "Wallets": [
+ {
+ "Name": "Wallet8",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "NPN1-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode1",
+ "Wallets": [
+ {
+ "Name": "Wallet9",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN2-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode2",
+ "Wallets": [
+ {
+ "Name": "Wallet10",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/node.json b/test/testdata/deployednettemplates/recipes/alphanet/node.json
new file mode 100644
index 000000000..d3b429ee3
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/node.json
@@ -0,0 +1,10 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/nonPartNode.json b/test/testdata/deployednettemplates/recipes/alphanet/nonPartNode.json
new file mode 100644
index 000000000..5b0a52d9d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/nonPartNode.json
@@ -0,0 +1,5 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/recipe.json b/test/testdata/deployednettemplates/recipes/alphanet/recipe.json
new file mode 100644
index 000000000..a2f88f63b
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/recipe.json
@@ -0,0 +1,7 @@
+{
+ "GenesisFile":"genesis.json",
+ "NetworkFile":"net.json",
+ "ConfigFile": "../../configs/reference.json",
+ "HostTemplatesFile": "../../hosttemplates/hosttemplates.json",
+ "TopologyFile": "topology.json"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/relay.json b/test/testdata/deployednettemplates/recipes/alphanet/relay.json
new file mode 100644
index 000000000..db8fb939d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/relay.json
@@ -0,0 +1,11 @@
+{
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/topology.json b/test/testdata/deployednettemplates/recipes/alphanet/topology.json
new file mode 100644
index 000000000..8760eae20
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet/topology.json
@@ -0,0 +1,32 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1-alphanet",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N1-alphanet",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "N2-alphanet",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "N3-alphanet",
+ "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ },
+ {
+ "Name": "N4-alphanet",
+ "Template": "AWS-EU-WEST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN1-alphanet",
+ "Template": "AWS-US-EAST-2-m5d.4xl"
+ },
+ {
+ "Name": "NPN2-alphanet",
+ "Template": "AWS-US-WEST-2-m5d.4xl"
+ }
+ ]
+}