summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2022-10-21 15:51:12 -0400
committerGitHub <noreply@github.com>2022-10-21 15:51:12 -0400
commit99b37ac002ac53cc2ffa1814d3a515e72d3ddf7e (patch)
tree51148d1da953576931e0ebf6f92ce1dc06537ef9
parente1b890a49cf4951f2d819043630364317d9b1ccb (diff)
parentb42c4637dd8706158c6f51c973faa5f85cabd487 (diff)
Merge pull request #4683 from Algo-devops-service/relstable3.11.2v3.11.2-stable
-rw-r--r--buildnumber.dat2
-rw-r--r--cmd/goal/commands.go4
-rw-r--r--cmd/pingpong/runCmd.go17
-rw-r--r--config/localTemplate.go4
-rw-r--r--config/local_defaults.go4
-rw-r--r--crypto/batchverifier.go5
-rw-r--r--crypto/batchverifier_test.go27
-rw-r--r--daemon/algod/api/client/restClient.go74
-rw-r--r--daemon/kmd/client/client.go8
-rw-r--r--data/basics/fields_test.go165
-rw-r--r--installer/config.json.example4
-rw-r--r--ledger/accountdb.go22
-rw-r--r--ledger/accountdb_test.go177
-rw-r--r--ledger/catchpointwriter_test.go6
-rw-r--r--ledger/catchupaccessor.go121
-rw-r--r--ledger/catchupaccessor_test.go144
-rw-r--r--ledger/ledgercore/accountdata_test.go87
-rw-r--r--ledger/testing/randomAccounts.go137
-rw-r--r--ledger/testing/randomAccounts_test.go143
-rw-r--r--libgoal/libgoal.go22
-rw-r--r--shared/pingpong/accounts.go20
-rw-r--r--shared/pingpong/config.go9
-rw-r--r--test/e2e-go/features/catchup/catchpointCatchup_test.go2
-rw-r--r--test/e2e-go/restAPI/restClient_test.go6
-rw-r--r--test/reflectionhelpers/helpers.go253
-rw-r--r--test/testdata/configs/config-v24.json106
-rw-r--r--tools/debug/determaccount/main.go45
27 files changed, 1273 insertions, 341 deletions
diff --git a/buildnumber.dat b/buildnumber.dat
index 573541ac9..0cfbf0888 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-0
+2
diff --git a/cmd/goal/commands.go b/cmd/goal/commands.go
index c6103d259..8fb4550c0 100644
--- a/cmd/goal/commands.go
+++ b/cmd/goal/commands.go
@@ -29,9 +29,6 @@ import (
"github.com/spf13/cobra/doc"
"golang.org/x/crypto/ssh/terminal"
- algodclient "github.com/algorand/go-algorand/daemon/algod/api/client"
- kmdclient "github.com/algorand/go-algorand/daemon/kmd/client"
-
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/daemon/algod/api/spec/common"
"github.com/algorand/go-algorand/data/bookkeeping"
@@ -381,7 +378,6 @@ func getGoalClient(dataDir string, clientType libgoal.ClientType) (client libgoa
if err != nil {
return
}
- client.SetAPIVersionAffinity(algodclient.APIVersionV2, kmdclient.APIVersionV1)
return
}
diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go
index 9df0052ac..d49cea843 100644
--- a/cmd/pingpong/runCmd.go
+++ b/cmd/pingpong/runCmd.go
@@ -72,7 +72,8 @@ var pidFile string
var cpuprofile string
var randSeed int64
var deterministicKeys bool
-var generatedAccountsCount uint32
+var generatedAccountsCount uint64
+var generatedAccountsOffset uint64
var generatedAccountSampleMethod string
var configPath string
@@ -118,8 +119,9 @@ func init() {
runCmd.Flags().StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to `file`")
runCmd.Flags().Int64Var(&randSeed, "seed", 0, "input to math/rand.Seed(), defaults to time.Now().UnixNano()")
runCmd.Flags().BoolVar(&deterministicKeys, "deterministicKeys", false, "Draw from set of netgoal-created accounts using deterministic keys")
- runCmd.Flags().Uint32Var(&generatedAccountsCount, "genaccounts", 0, "The total number of accounts pre-generated by netgoal")
- runCmd.Flags().StringVar(&generatedAccountSampleMethod, "gensamplemethod", "random", "The method of sampling from the total # of pre-generated accounts")
+ runCmd.Flags().Uint64Var(&generatedAccountsCount, "genaccounts", 0, "The total number of accounts pre-generated by netgoal")
+ runCmd.Flags().Uint64Var(&generatedAccountsOffset, "genaccountsoffset", 0, "The initial offset for sampling from the total # of pre-generated accounts")
+ runCmd.Flags().StringVar(&generatedAccountSampleMethod, "gensamplemethod", "", "The method of sampling from the total # of pre-generated accounts")
}
var runCmd = &cobra.Command{
@@ -378,16 +380,23 @@ var runCmd = &cobra.Command{
if !deterministicKeys && generatedAccountsCount > 0 {
reportErrorf("generatedAccountsCount requires deterministicKeys=true")
}
- if deterministicKeys && numAccounts > generatedAccountsCount {
+ if deterministicKeys && uint64(numAccounts) > generatedAccountsCount {
reportErrorf("numAccounts must be <= generatedAccountsCount")
}
cfg.DeterministicKeys = deterministicKeys || cfg.DeterministicKeys
if generatedAccountsCount != 0 {
cfg.GeneratedAccountsCount = generatedAccountsCount
}
+ if generatedAccountsOffset != 0 {
+ cfg.GeneratedAccountsOffset = generatedAccountsOffset
+ }
if generatedAccountSampleMethod != "" {
cfg.GeneratedAccountSampleMethod = generatedAccountSampleMethod
}
+ // check if numAccounts is greater than the length of the mnemonic list, if provided
+ if cfg.DeterministicKeys && cfg.NumPartAccounts > uint32(len(cfg.GeneratedAccountsMnemonics)) {
+ reportErrorf("numAccounts is greater than number of account mnemonics provided")
+ }
cfg.SetDefaultWeights()
err = cfg.Check()
diff --git a/config/localTemplate.go b/config/localTemplate.go
index c5535e793..ed6eb4493 100644
--- a/config/localTemplate.go
+++ b/config/localTemplate.go
@@ -41,7 +41,7 @@ type Local struct {
// Version tracks the current version of the defaults so we can migrate old -> new
// This is specifically important whenever we decide to change the default value
// for an existing parameter. This field tag must be updated any time we add a new version.
- Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23"`
+ Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24"`
// environmental (may be overridden)
// When enabled, stores blocks indefinitely, otherwise, only the most recent blocks
@@ -71,7 +71,7 @@ type Local struct {
// Logging
BaseLoggerDebugLevel uint32 `version[0]:"1" version[1]:"4"`
// if this is 0, do not produce agreement.cadaver
- CadaverSizeTarget uint64 `version[0]:"1073741824"`
+ CadaverSizeTarget uint64 `version[0]:"1073741824" version[24]:"0"`
// IncomingConnectionsLimit specifies the max number of long-lived incoming
// connections. 0 means no connections allowed. Must be non-negative.
diff --git a/config/local_defaults.go b/config/local_defaults.go
index 4b6017050..2aa46eef1 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -20,7 +20,7 @@
package config
var defaultLocal = Local{
- Version: 23,
+ Version: 24,
AccountUpdatesStatsInterval: 5000000000,
AccountsRebuildSynchronousMode: 1,
AgreementIncomingBundlesQueueLength: 7,
@@ -31,7 +31,7 @@ var defaultLocal = Local{
BaseLoggerDebugLevel: 4,
BlockServiceCustomFallbackEndpoints: "",
BroadcastConnectionsLimit: -1,
- CadaverSizeTarget: 1073741824,
+ CadaverSizeTarget: 0,
CatchpointFileHistoryLength: 365,
CatchpointInterval: 10000,
CatchpointTracking: 0,
diff --git a/crypto/batchverifier.go b/crypto/batchverifier.go
index 9a46aac4b..96cf5e850 100644
--- a/crypto/batchverifier.go
+++ b/crypto/batchverifier.go
@@ -38,6 +38,7 @@ package crypto
import "C"
import (
"errors"
+ "runtime"
"unsafe"
)
@@ -172,6 +173,10 @@ func batchVerificationImpl(messages [][]byte, publicKeys []SignatureVerifier, si
C.size_t(len(messages)),
(*C.int)(unsafe.Pointer(valid)))
+ runtime.KeepAlive(messages)
+ runtime.KeepAlive(publicKeys)
+ runtime.KeepAlive(signatures)
+
failed = make([]bool, numberOfSignatures)
for i := 0; i < numberOfSignatures; i++ {
cint := *(*C.int)(unsafe.Pointer(uintptr(valid) + uintptr(i*C.sizeof_int)))
diff --git a/crypto/batchverifier_test.go b/crypto/batchverifier_test.go
index b0dc2f0fb..7c5455703 100644
--- a/crypto/batchverifier_test.go
+++ b/crypto/batchverifier_test.go
@@ -18,6 +18,7 @@ package crypto
import (
"math/rand"
+ "runtime"
"testing"
"github.com/stretchr/testify/require"
@@ -193,3 +194,29 @@ func TestBatchVerifierIndividualResultsAllValid(t *testing.T) {
}
}
}
+
+func TestBatchVerifierGC(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ const n = 128
+ for i := 0; i < 100; i++ {
+ t.Run("", func(t *testing.T) {
+ t.Parallel()
+
+ bv := MakeBatchVerifierWithHint(n)
+ var s Seed
+
+ for i := 0; i < n; i++ {
+ msg := randString()
+ RandBytes(s[:])
+ sigSecrets := GenerateSignatureSecrets(s)
+ sig := sigSecrets.Sign(msg)
+ bv.EnqueueSignature(sigSecrets.SignatureVerifier, msg, sig)
+ }
+ require.NoError(t, bv.Verify())
+
+ runtime.GC()
+ })
+ }
+
+}
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index b5d25816d..2bece32a6 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -46,16 +46,6 @@ const (
maxRawResponseBytes = 50e6
)
-// APIVersion is used to define which server side API version would be used when making http requests to the server
-type APIVersion string
-
-const (
- // APIVersionV1 suggests that the RestClient would use v1 calls whenever it's available for the given request.
- APIVersionV1 APIVersion = "v1"
- // APIVersionV2 suggests that the RestClient would use v2 calls whenever it's available for the given request.
- APIVersionV2 APIVersion = "v2"
-)
-
// rawRequestPaths is a set of paths where the body should not be urlencoded
var rawRequestPaths = map[string]bool{
"/v1/transactions": true,
@@ -91,27 +81,18 @@ func (e HTTPError) Error() string {
// RestClient manages the REST interface for a calling user.
type RestClient struct {
- serverURL url.URL
- apiToken string
- versionAffinity APIVersion
+ serverURL url.URL
+ apiToken string
}
// MakeRestClient is the factory for constructing a RestClient for a given endpoint
func MakeRestClient(url url.URL, apiToken string) RestClient {
return RestClient{
- serverURL: url,
- apiToken: apiToken,
- versionAffinity: APIVersionV1,
+ serverURL: url,
+ apiToken: apiToken,
}
}
-// SetAPIVersionAffinity sets the client affinity to use a specific version of the API
-func (client *RestClient) SetAPIVersionAffinity(affinity APIVersion) (previousAffinity APIVersion) {
- previousAffinity = client.versionAffinity
- client.versionAffinity = affinity
- return
-}
-
// filterASCII filter out the non-ascii printable characters out of the given input string.
// It's used as a security qualifier before adding network provided data into an error message.
// The function allows only characters in the range of [32..126], which excludes all the
@@ -251,31 +232,13 @@ func (client RestClient) post(response interface{}, path string, request interfa
// the StatusResponse includes data like the consensus version and current round
// Not supported
func (client RestClient) Status() (response generatedV2.NodeStatusResponse, err error) {
- switch client.versionAffinity {
- case APIVersionV2:
- err = client.get(&response, "/v2/status", nil)
- default:
- var nodeStatus v1.NodeStatus
- err = client.get(&nodeStatus, "/v1/status", nil)
- if err == nil {
- response = fillNodeStatusResponse(nodeStatus)
- }
- }
+ err = client.get(&response, "/v2/status", nil)
return
}
// WaitForBlock returns the node status after waiting for the given round.
func (client RestClient) WaitForBlock(round basics.Round) (response generatedV2.NodeStatusResponse, err error) {
- switch client.versionAffinity {
- case APIVersionV2:
- err = client.get(&response, fmt.Sprintf("/v2/status/wait-for-block-after/%d/", round), nil)
- default:
- var nodeStatus v1.NodeStatus
- err = client.get(&nodeStatus, fmt.Sprintf("/v1/status/wait-for-block-after/%d/", round), nil)
- if err == nil {
- response = fillNodeStatusResponse(nodeStatus)
- }
- }
+ err = client.get(&response, fmt.Sprintf("/v2/status/wait-for-block-after/%d/", round), nil)
return
}
@@ -302,17 +265,7 @@ func fillNodeStatusResponse(nodeStatus v1.NodeStatus) generatedV2.NodeStatusResp
// blocks on the node end
// Not supported
func (client RestClient) StatusAfterBlock(blockNum uint64) (response generatedV2.NodeStatusResponse, err error) {
- switch client.versionAffinity {
- case APIVersionV2:
- err = client.get(&response, fmt.Sprintf("/v2/status/wait-for-block-after/%d", blockNum), nil)
- default:
- var nodeStatus v1.NodeStatus
- err = client.get(&nodeStatus, fmt.Sprintf("/v1/status/wait-for-block-after/%d", blockNum), nil)
- if err == nil {
- response = fillNodeStatusResponse(nodeStatus)
- }
- }
-
+ err = client.get(&response, fmt.Sprintf("/v2/status/wait-for-block-after/%d", blockNum), nil)
return
}
@@ -546,16 +499,9 @@ func (client RestClient) Block(round uint64) (response v1.Block, err error) {
// RawBlock gets the encoded, raw msgpack block for the given round
func (client RestClient) RawBlock(round uint64) (response []byte, err error) {
- switch client.versionAffinity {
- case APIVersionV2:
- var blob Blob
- err = client.getRaw(&blob, fmt.Sprintf("/v2/blocks/%d", round), rawFormat{Format: "msgpack"})
- response = blob
- default:
- var raw v1.RawBlock
- err = client.getRaw(&raw, fmt.Sprintf("/v1/block/%d", round), rawblockParams{1})
- response = raw
- }
+ var blob Blob
+ err = client.getRaw(&blob, fmt.Sprintf("/v2/blocks/%d", round), rawFormat{Format: "msgpack"})
+ response = blob
return
}
diff --git a/daemon/kmd/client/client.go b/daemon/kmd/client/client.go
index ee0c5f131..413e3e8d7 100644
--- a/daemon/kmd/client/client.go
+++ b/daemon/kmd/client/client.go
@@ -25,14 +25,6 @@ const (
timeoutSecs = 120
)
-// APIVersion is used to define which server side API version would be used when making http requests to the server
-type APIVersion string
-
-const (
- // APIVersionV1 suggests that the RestClient would use v1 calls whenever it's available for the given request.
- APIVersionV1 APIVersion = "v1"
-)
-
// KMDClient is the client used to interact with the kmd API over its socket
type KMDClient struct {
httpClient http.Client
diff --git a/data/basics/fields_test.go b/data/basics/fields_test.go
index 2fcabbd4d..c3cf25284 100644
--- a/data/basics/fields_test.go
+++ b/data/basics/fields_test.go
@@ -17,133 +17,38 @@
package basics_test
import (
- "fmt"
"reflect"
- "strings"
"testing"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/test/reflectionhelpers"
)
-type typePath []string
-
-func (p typePath) addMapKey() typePath {
- return append(p, "map_key")
-}
-
-func (p typePath) addValue() typePath {
- return append(p, "value")
-}
-
-func (p typePath) addField(fieldName string) typePath {
- return append(p, "field "+fieldName)
-}
-
-func (p typePath) validatePathFrom(t reflect.Type) error {
- if len(p) == 0 {
- // path is empty, so it's vacuously valid
- return nil
- }
-
- value := p[0]
- switch {
- case value == "map_key":
- return p[1:].validatePathFrom(t.Key())
- case value == "value":
- return p[1:].validatePathFrom(t.Elem())
- case strings.HasPrefix(value, "field "):
- fieldName := value[len("field "):]
- fieldType, ok := t.FieldByName(fieldName)
- if !ok {
- return fmt.Errorf("Type '%s' does not have the field '%s'", t.Name(), fieldName)
- }
- return p[1:].validatePathFrom(fieldType.Type)
- default:
- return fmt.Errorf("Unexpected item in path: %s", value)
- }
-}
-
-func (p typePath) Equals(other typePath) bool {
- if len(p) != len(other) {
- return false
- }
- for i := range p {
- if p[i] != other[i] {
- return false
- }
- }
- return true
-}
-
-func (p typePath) String() string {
- return strings.Join(p, "->")
-}
-
-func checkReferencedTypes(seen map[reflect.Type]bool, path typePath, typeStack []reflect.Type, check func(path typePath, stack []reflect.Type) bool) {
- currentType := typeStack[len(typeStack)-1]
-
- if _, seenType := seen[currentType]; seenType {
- return
- }
-
- if !check(path, typeStack) {
- // if currentType is not ok, don't visit its children
- return
- }
-
- // add currentType to seen set, to avoid infinite recursion if currentType references itself
- seen[currentType] = true
-
- // after currentType's children are visited, "forget" the type, so we can examine it again if needed
- // if this didn't happen, only 1 error per invalid type would get reported
- defer delete(seen, currentType)
-
- switch currentType.Kind() {
- case reflect.Map:
- newPath := path.addMapKey()
- newStack := append(typeStack, currentType.Key())
- checkReferencedTypes(seen, newPath, newStack, check)
- fallthrough
- case reflect.Array, reflect.Slice, reflect.Ptr:
- newPath := path.addValue()
- newStack := append(typeStack, currentType.Elem())
- checkReferencedTypes(seen, newPath, newStack, check)
- case reflect.Struct:
- for i := 0; i < currentType.NumField(); i++ {
- field := currentType.Field(i)
- newPath := path.addField(field.Name)
- newStack := append(typeStack, field.Type)
- checkReferencedTypes(seen, newPath, newStack, check)
- }
- }
-}
-
-func makeTypeCheckFunction(t *testing.T, exceptions []typePath, startType reflect.Type) func(path typePath, stack []reflect.Type) bool {
+func makeTypeCheckFunction(t *testing.T, exceptions []reflectionhelpers.TypePath, startType reflect.Type) reflectionhelpers.ReferencedTypesIterationAction {
for _, exception := range exceptions {
- err := exception.validatePathFrom(startType)
- require.NoError(t, err)
+ // ensure all exceptions can resolve without panicking
+ exception.ResolveType(startType)
}
- return func(path typePath, stack []reflect.Type) bool {
+ return func(path reflectionhelpers.TypePath, stack []reflect.Type) bool {
currentType := stack[len(stack)-1]
for _, exception := range exceptions {
if path.Equals(exception) {
- t.Logf("Skipping exception for path: %s", path.String())
+ t.Logf("Skipping exception for path: %s", path)
return true
}
}
switch currentType.Kind() {
case reflect.String:
- t.Errorf("Invalid string type referenced from %s. Use []byte instead. Full path: %s", startType.Name(), path.String())
+ t.Errorf("Invalid string type referenced from %v. Use []byte instead. Full path: %s", startType, path)
return false
case reflect.Chan, reflect.Func, reflect.Interface, reflect.UnsafePointer:
// raise an error if one of these strange types is referenced too
- t.Errorf("Invalid type %s referenced from %s. Full path: %s", currentType.Name(), startType.Name(), path.String())
+ t.Errorf("Invalid type %v referenced from %v. Full path: %s", currentType, startType, path)
return false
default:
return true
@@ -157,26 +62,24 @@ func TestBlockFields(t *testing.T) {
typeToCheck := reflect.TypeOf(bookkeeping.Block{})
// These exceptions are for pre-existing usages of string. Only add to this list if you really need to use string.
- exceptions := []typePath{
- typePath{}.addField("BlockHeader").addField("GenesisID"),
- typePath{}.addField("BlockHeader").addField("UpgradeState").addField("CurrentProtocol"),
- typePath{}.addField("BlockHeader").addField("UpgradeState").addField("NextProtocol"),
- typePath{}.addField("BlockHeader").addField("UpgradeVote").addField("UpgradePropose"),
- typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("Type"),
- typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("Header").addField("GenesisID"),
- typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("AssetConfigTxnFields").addField("AssetParams").addField("UnitName"),
- typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("AssetConfigTxnFields").addField("AssetParams").addField("AssetName"),
- typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("SignedTxn").addField("Txn").addField("AssetConfigTxnFields").addField("AssetParams").addField("URL"),
- typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("GlobalDelta").addMapKey(),
- typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("GlobalDelta").addValue().addField("Bytes"),
- typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("LocalDeltas").addValue().addMapKey(),
- typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("LocalDeltas").addValue().addValue().addField("Bytes"),
- typePath{}.addField("Payset").addValue().addField("SignedTxnWithAD").addField("ApplyData").addField("EvalDelta").addField("Logs").addValue(),
+ exceptions := []reflectionhelpers.TypePath{
+ reflectionhelpers.TypePath{}.AddField("BlockHeader").AddField("GenesisID"),
+ reflectionhelpers.TypePath{}.AddField("BlockHeader").AddField("UpgradeState").AddField("CurrentProtocol"),
+ reflectionhelpers.TypePath{}.AddField("BlockHeader").AddField("UpgradeState").AddField("NextProtocol"),
+ reflectionhelpers.TypePath{}.AddField("BlockHeader").AddField("UpgradeVote").AddField("UpgradePropose"),
+ reflectionhelpers.TypePath{}.AddField("Payset").AddValue().AddField("SignedTxnWithAD").AddField("SignedTxn").AddField("Txn").AddField("Type"),
+ reflectionhelpers.TypePath{}.AddField("Payset").AddValue().AddField("SignedTxnWithAD").AddField("SignedTxn").AddField("Txn").AddField("Header").AddField("GenesisID"),
+ reflectionhelpers.TypePath{}.AddField("Payset").AddValue().AddField("SignedTxnWithAD").AddField("SignedTxn").AddField("Txn").AddField("AssetConfigTxnFields").AddField("AssetParams").AddField("UnitName"),
+ reflectionhelpers.TypePath{}.AddField("Payset").AddValue().AddField("SignedTxnWithAD").AddField("SignedTxn").AddField("Txn").AddField("AssetConfigTxnFields").AddField("AssetParams").AddField("AssetName"),
+ reflectionhelpers.TypePath{}.AddField("Payset").AddValue().AddField("SignedTxnWithAD").AddField("SignedTxn").AddField("Txn").AddField("AssetConfigTxnFields").AddField("AssetParams").AddField("URL"),
+ reflectionhelpers.TypePath{}.AddField("Payset").AddValue().AddField("SignedTxnWithAD").AddField("ApplyData").AddField("EvalDelta").AddField("GlobalDelta").AddMapKey(),
+ reflectionhelpers.TypePath{}.AddField("Payset").AddValue().AddField("SignedTxnWithAD").AddField("ApplyData").AddField("EvalDelta").AddField("GlobalDelta").AddValue().AddField("Bytes"),
+ reflectionhelpers.TypePath{}.AddField("Payset").AddValue().AddField("SignedTxnWithAD").AddField("ApplyData").AddField("EvalDelta").AddField("LocalDeltas").AddValue().AddMapKey(),
+ reflectionhelpers.TypePath{}.AddField("Payset").AddValue().AddField("SignedTxnWithAD").AddField("ApplyData").AddField("EvalDelta").AddField("LocalDeltas").AddValue().AddValue().AddField("Bytes"),
+ reflectionhelpers.TypePath{}.AddField("Payset").AddValue().AddField("SignedTxnWithAD").AddField("ApplyData").AddField("EvalDelta").AddField("Logs").AddValue(),
}
- seen := make(map[reflect.Type]bool)
-
- checkReferencedTypes(seen, nil, []reflect.Type{typeToCheck}, makeTypeCheckFunction(t, exceptions, typeToCheck))
+ reflectionhelpers.IterateReferencedTypes(typeToCheck, makeTypeCheckFunction(t, exceptions, typeToCheck))
}
func TestAccountDataFields(t *testing.T) {
@@ -185,17 +88,15 @@ func TestAccountDataFields(t *testing.T) {
typeToCheck := reflect.TypeOf(basics.AccountData{})
// These exceptions are for pre-existing usages of string. Only add to this list if you really need to use string.
- exceptions := []typePath{
- typePath{}.addField("AssetParams").addValue().addField("UnitName"),
- typePath{}.addField("AssetParams").addValue().addField("AssetName"),
- typePath{}.addField("AssetParams").addValue().addField("URL"),
- typePath{}.addField("AppLocalStates").addValue().addField("KeyValue").addMapKey(),
- typePath{}.addField("AppLocalStates").addValue().addField("KeyValue").addValue().addField("Bytes"),
- typePath{}.addField("AppParams").addValue().addField("GlobalState").addMapKey(),
- typePath{}.addField("AppParams").addValue().addField("GlobalState").addValue().addField("Bytes"),
+ exceptions := []reflectionhelpers.TypePath{
+ reflectionhelpers.TypePath{}.AddField("AssetParams").AddValue().AddField("UnitName"),
+ reflectionhelpers.TypePath{}.AddField("AssetParams").AddValue().AddField("AssetName"),
+ reflectionhelpers.TypePath{}.AddField("AssetParams").AddValue().AddField("URL"),
+ reflectionhelpers.TypePath{}.AddField("AppLocalStates").AddValue().AddField("KeyValue").AddMapKey(),
+ reflectionhelpers.TypePath{}.AddField("AppLocalStates").AddValue().AddField("KeyValue").AddValue().AddField("Bytes"),
+ reflectionhelpers.TypePath{}.AddField("AppParams").AddValue().AddField("GlobalState").AddMapKey(),
+ reflectionhelpers.TypePath{}.AddField("AppParams").AddValue().AddField("GlobalState").AddValue().AddField("Bytes"),
}
- seen := make(map[reflect.Type]bool)
-
- checkReferencedTypes(seen, nil, []reflect.Type{typeToCheck}, makeTypeCheckFunction(t, exceptions, typeToCheck))
+ reflectionhelpers.IterateReferencedTypes(typeToCheck, makeTypeCheckFunction(t, exceptions, typeToCheck))
}
diff --git a/installer/config.json.example b/installer/config.json.example
index 76b1700a7..91b9413d9 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -1,5 +1,5 @@
{
- "Version": 23,
+ "Version": 24,
"AccountUpdatesStatsInterval": 5000000000,
"AccountsRebuildSynchronousMode": 1,
"AgreementIncomingBundlesQueueLength": 7,
@@ -10,7 +10,7 @@
"BaseLoggerDebugLevel": 4,
"BlockServiceCustomFallbackEndpoints": "",
"BroadcastConnectionsLimit": -1,
- "CadaverSizeTarget": 1073741824,
+ "CadaverSizeTarget": 0,
"CatchpointFileHistoryLength": 365,
"CatchpointInterval": 10000,
"CatchpointTracking": 0,
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index 5a92ab919..264687b20 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -371,6 +371,8 @@ type normalizedAccountBalance struct {
normalizedBalance uint64
// encodedResources provides the encoded form of the resources
encodedResources map[basics.CreatableIndex][]byte
+ // partial balance indicates that the original account balance was split into multiple parts in catchpoint creation time
+ partialBalance bool
}
// prepareNormalizedBalancesV5 converts an array of encodedBalanceRecordV5 into an equal size array of normalizedAccountBalances.
@@ -427,12 +429,22 @@ func prepareNormalizedBalancesV6(bals []encodedBalanceRecordV6, proto config.Con
normalizedAccountBalances[i].accountData.MicroAlgos,
proto)
normalizedAccountBalances[i].encodedAccountData = balance.AccountData
- normalizedAccountBalances[i].accountHashes = make([][]byte, 1+len(balance.Resources))
- normalizedAccountBalances[i].accountHashes[0] = accountHashBuilderV6(balance.Address, &normalizedAccountBalances[i].accountData, balance.AccountData)
+ curHashIdx := 0
+ if balance.ExpectingMoreEntries {
+ // There is a single chunk in the catchpoint file with ExpectingMoreEntries
+ // set to false for this account. There may be multiple chunks with
+ // ExpectingMoreEntries set to true. In this case, we do not have to add the
+ // account's own hash to accountHashes.
+ normalizedAccountBalances[i].accountHashes = make([][]byte, len(balance.Resources))
+ normalizedAccountBalances[i].partialBalance = true
+ } else {
+ normalizedAccountBalances[i].accountHashes = make([][]byte, 1+len(balance.Resources))
+ normalizedAccountBalances[i].accountHashes[0] = accountHashBuilderV6(balance.Address, &normalizedAccountBalances[i].accountData, balance.AccountData)
+ curHashIdx++
+ }
if len(balance.Resources) > 0 {
normalizedAccountBalances[i].resources = make(map[basics.CreatableIndex]resourcesData, len(balance.Resources))
normalizedAccountBalances[i].encodedResources = make(map[basics.CreatableIndex][]byte, len(balance.Resources))
- resIdx := 0
for cidx, res := range balance.Resources {
var resData resourcesData
err = protocol.Decode(res, &resData)
@@ -447,10 +459,10 @@ func prepareNormalizedBalancesV6(bals []encodedBalanceRecordV6, proto config.Con
} else {
err = fmt.Errorf("unknown creatable for addr %s, aidx %d, data %v", balance.Address.String(), cidx, resData)
}
- normalizedAccountBalances[i].accountHashes[resIdx+1] = resourcesHashBuilderV6(balance.Address, basics.CreatableIndex(cidx), ctype, resData.UpdateRound, res)
+ normalizedAccountBalances[i].accountHashes[curHashIdx] = resourcesHashBuilderV6(balance.Address, basics.CreatableIndex(cidx), ctype, resData.UpdateRound, res)
normalizedAccountBalances[i].resources[basics.CreatableIndex(cidx)] = resData
normalizedAccountBalances[i].encodedResources[basics.CreatableIndex(cidx)] = res
- resIdx++
+ curHashIdx++
}
}
}
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index a13547294..5491cb21d 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -2131,6 +2131,183 @@ func TestResourcesDataSetData(t *testing.T) {
}
}
+// TestResourceDataRoundtripConversion ensures that basics.AppLocalState, basics.AppParams,
+// basics.AssetHolding, and basics.AssetParams can be converted to resourcesData and back without
+// losing any data. It uses reflection to be sure that no new fields are omitted.
+//
+// In other words, this test makes sure any new fields in basics.AppLocalState, basics.AppParams,
+// basics.AssetHolding, or basics.AssetParam also get added to resourcesData.
+func TestResourceDataRoundtripConversion(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ t.Run("basics.AppLocalState", func(t *testing.T) {
+ t.Parallel()
+ for i := 0; i < 1000; i++ {
+ randObj, _ := protocol.RandomizeObject(&basics.AppLocalState{})
+ basicsAppLocalState := *randObj.(*basics.AppLocalState)
+
+ var data resourcesData
+ data.SetAppLocalState(basicsAppLocalState)
+ roundTripAppLocalState := data.GetAppLocalState()
+
+ require.Equal(t, basicsAppLocalState, roundTripAppLocalState)
+ }
+ })
+
+ t.Run("basics.AppParams", func(t *testing.T) {
+ t.Parallel()
+ for i := 0; i < 1000; i++ {
+ randObj, _ := protocol.RandomizeObject(&basics.AppParams{})
+ basicsAppParams := *randObj.(*basics.AppParams)
+
+ for _, haveHoldings := range []bool{true, false} {
+ var data resourcesData
+ data.SetAppParams(basicsAppParams, haveHoldings)
+ roundTripAppParams := data.GetAppParams()
+
+ require.Equal(t, basicsAppParams, roundTripAppParams)
+ }
+ }
+ })
+
+ t.Run("basics.AssetHolding", func(t *testing.T) {
+ t.Parallel()
+ for i := 0; i < 1000; i++ {
+ randObj, _ := protocol.RandomizeObject(&basics.AssetHolding{})
+ basicsAssetHolding := *randObj.(*basics.AssetHolding)
+
+ var data resourcesData
+ data.SetAssetHolding(basicsAssetHolding)
+ roundTripAssetHolding := data.GetAssetHolding()
+
+ require.Equal(t, basicsAssetHolding, roundTripAssetHolding)
+ }
+ })
+
+ t.Run("basics.AssetParams", func(t *testing.T) {
+ t.Parallel()
+ for i := 0; i < 1000; i++ {
+ randObj, _ := protocol.RandomizeObject(&basics.AssetParams{})
+ basicsAssetParams := *randObj.(*basics.AssetParams)
+
+ for _, haveHoldings := range []bool{true, false} {
+ var data resourcesData
+ data.SetAssetParams(basicsAssetParams, haveHoldings)
+ roundTripAssetParams := data.GetAssetParams()
+
+ require.Equal(t, basicsAssetParams, roundTripAssetParams)
+ }
+ }
+ })
+}
+
+// TestBaseAccountDataRoundtripConversion ensures that baseAccountData can be converted to
+// ledgercore.AccountData and basics.AccountData and back without losing any data. It uses
+// reflection to be sure that no new fields are omitted.
+//
+// In other words, this test makes sure any new fields in baseAccountData also get added to
+// ledgercore.AccountData and basics.AccountData. You should add a manual override in this test if
+// the field really only belongs in baseAccountData.
+func TestBaseAccountDataRoundtripConversion(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ t.Run("ledgercore.AccountData", func(t *testing.T) {
+ t.Parallel()
+ for i := 0; i < 1000; i++ {
+ randObj, _ := protocol.RandomizeObject(&baseAccountData{})
+ baseAccount := *randObj.(*baseAccountData)
+
+ ledgercoreAccount := baseAccount.GetLedgerCoreAccountData()
+ var roundTripAccount baseAccountData
+ roundTripAccount.SetCoreAccountData(&ledgercoreAccount)
+
+ // Manually set UpdateRound, since it is lost in GetLedgerCoreAccountData
+ roundTripAccount.UpdateRound = baseAccount.UpdateRound
+
+ require.Equal(t, baseAccount, roundTripAccount)
+ }
+ })
+
+ t.Run("basics.AccountData", func(t *testing.T) {
+ t.Parallel()
+ for i := 0; i < 1000; i++ {
+ randObj, _ := protocol.RandomizeObject(&baseAccountData{})
+ baseAccount := *randObj.(*baseAccountData)
+
+ basicsAccount := baseAccount.GetAccountData()
+ var roundTripAccount baseAccountData
+ roundTripAccount.SetAccountData(&basicsAccount)
+
+ // Manually set UpdateRound, since it is lost in GetAccountData
+ roundTripAccount.UpdateRound = baseAccount.UpdateRound
+
+ // Manually set resources, since resource information is lost in GetAccountData
+ roundTripAccount.TotalAssetParams = baseAccount.TotalAssetParams
+ roundTripAccount.TotalAssets = baseAccount.TotalAssets
+ roundTripAccount.TotalAppLocalStates = baseAccount.TotalAppLocalStates
+ roundTripAccount.TotalAppParams = baseAccount.TotalAppParams
+
+ require.Equal(t, baseAccount, roundTripAccount)
+ }
+ })
+}
+
+// TestBasicsAccountDataRoundtripConversion ensures that basics.AccountData can be converted to
+// baseAccountData and back without losing any data. It uses reflection to be sure that this test is
+// always up-to-date with new fields.
+//
+// In other words, this test makes sure any new fields in basics.AccountData also get added to
+// baseAccountData.
+func TestBasicsAccountDataRoundtripConversion(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ for i := 0; i < 1000; i++ {
+ randObj, _ := protocol.RandomizeObject(&basics.AccountData{})
+ basicsAccount := *randObj.(*basics.AccountData)
+
+ var baseAccount baseAccountData
+ baseAccount.SetAccountData(&basicsAccount)
+ roundTripAccount := baseAccount.GetAccountData()
+
+ // Manually set resources, since GetAccountData doesn't attempt to restore them
+ roundTripAccount.AssetParams = basicsAccount.AssetParams
+ roundTripAccount.Assets = basicsAccount.Assets
+ roundTripAccount.AppLocalStates = basicsAccount.AppLocalStates
+ roundTripAccount.AppParams = basicsAccount.AppParams
+
+ require.Equal(t, basicsAccount, roundTripAccount)
+ require.Equal(t, uint64(len(roundTripAccount.AssetParams)), baseAccount.TotalAssetParams)
+ require.Equal(t, uint64(len(roundTripAccount.Assets)), baseAccount.TotalAssets)
+ require.Equal(t, uint64(len(roundTripAccount.AppLocalStates)), baseAccount.TotalAppLocalStates)
+ require.Equal(t, uint64(len(roundTripAccount.AppParams)), baseAccount.TotalAppParams)
+ }
+}
+
+// TestLedgercoreAccountDataRoundtripConversion ensures that ledgercore.AccountData can be converted
+// to baseAccountData and back without losing any data. It uses reflection to be sure that no new
+// fields are omitted.
+//
+// In other words, this test makes sure any new fields in ledgercore.AccountData also get added to
+// baseAccountData.
+func TestLedgercoreAccountDataRoundtripConversion(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ for i := 0; i < 1000; i++ {
+ randObj, _ := protocol.RandomizeObject(&ledgercore.AccountData{})
+ ledgercoreAccount := *randObj.(*ledgercore.AccountData)
+
+ var baseAccount baseAccountData
+ baseAccount.SetCoreAccountData(&ledgercoreAccount)
+ roundTripAccount := baseAccount.GetLedgerCoreAccountData()
+
+ require.Equal(t, ledgercoreAccount, roundTripAccount)
+ }
+}
+
func TestBaseAccountDataIsEmpty(t *testing.T) {
partitiontest.PartitionTest(t)
positiveTesting := func(t *testing.T) {
diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go
index e5765e506..8c8493372 100644
--- a/ledger/catchpointwriter_test.go
+++ b/ledger/catchpointwriter_test.go
@@ -394,6 +394,9 @@ func TestFullCatchpointWriter(t *testing.T) {
require.NoError(t, err)
}
+ err = accessor.BuildMerkleTrie(context.Background(), nil)
+ require.NoError(t, err)
+
err = l.trackerDBs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
err := applyCatchpointStagingBalances(ctx, tx, 0, 0)
return err
@@ -701,6 +704,9 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
require.NoError(t, err)
}
+ err = accessor.BuildMerkleTrie(context.Background(), nil)
+ require.NoError(t, err)
+
err = l.trackerDBs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
err := applyCatchpointStagingBalances(ctx, tx, 0, 0)
return err
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index 3c2f6acee..2a12377d3 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -91,10 +91,46 @@ type CatchpointCatchupAccessor interface {
Ledger() (l CatchupAccessorClientLedger)
}
-// CatchpointCatchupAccessorImpl is the concrete implementation of the CatchpointCatchupAccessor interface
-type CatchpointCatchupAccessorImpl struct {
+type stagingWriter interface {
+ writeBalances(context.Context, []normalizedAccountBalance) error
+ writeCreatables(context.Context, []normalizedAccountBalance) error
+ writeHashes(context.Context, []normalizedAccountBalance) error
+ isShared() bool
+}
+
+type stagingWriterImpl struct {
+ wdb db.Accessor
+}
+
+func (w *stagingWriterImpl) writeBalances(ctx context.Context, balances []normalizedAccountBalance) error {
+ return w.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ return writeCatchpointStagingBalances(ctx, tx, balances)
+ })
+}
+
+func (w *stagingWriterImpl) writeCreatables(ctx context.Context, balances []normalizedAccountBalance) error {
+ return w.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return writeCatchpointStagingCreatable(ctx, tx, balances)
+ })
+}
+
+func (w *stagingWriterImpl) writeHashes(ctx context.Context, balances []normalizedAccountBalance) error {
+ return w.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ err := writeCatchpointStagingHashes(ctx, tx, balances)
+ return err
+ })
+}
+
+func (w *stagingWriterImpl) isShared() bool {
+ return w.wdb.IsSharedCacheConnection()
+}
+
+// catchpointCatchupAccessorImpl is the concrete implementation of the CatchpointCatchupAccessor interface
+type catchpointCatchupAccessorImpl struct {
ledger *Ledger
+ stagingWriter stagingWriter
+
// log copied from ledger
log logging.Logger
@@ -135,14 +171,15 @@ type CatchupAccessorClientLedger interface {
// MakeCatchpointCatchupAccessor creates a CatchpointCatchupAccessor given a ledger
func MakeCatchpointCatchupAccessor(ledger *Ledger, log logging.Logger) CatchpointCatchupAccessor {
- return &CatchpointCatchupAccessorImpl{
- ledger: ledger,
- log: log,
+ return &catchpointCatchupAccessorImpl{
+ ledger: ledger,
+ stagingWriter: &stagingWriterImpl{wdb: ledger.trackerDB().Wdb},
+ log: log,
}
}
// GetState returns the current state of the catchpoint catchup
-func (c *CatchpointCatchupAccessorImpl) GetState(ctx context.Context) (state CatchpointCatchupState, err error) {
+func (c *catchpointCatchupAccessorImpl) GetState(ctx context.Context) (state CatchpointCatchupState, err error) {
var istate uint64
istate, err = readCatchpointStateUint64(ctx, c.ledger.trackerDB().Rdb.Handle, catchpointStateCatchupState)
if err != nil {
@@ -153,7 +190,7 @@ func (c *CatchpointCatchupAccessorImpl) GetState(ctx context.Context) (state Cat
}
// SetState set the state of the catchpoint catchup
-func (c *CatchpointCatchupAccessorImpl) SetState(ctx context.Context, state CatchpointCatchupState) (err error) {
+func (c *catchpointCatchupAccessorImpl) SetState(ctx context.Context, state CatchpointCatchupState) (err error) {
if state < CatchpointCatchupStateInactive || state > catchpointCatchupStateLast {
return fmt.Errorf("invalid catchpoint catchup state provided : %d", state)
}
@@ -165,7 +202,7 @@ func (c *CatchpointCatchupAccessorImpl) SetState(ctx context.Context, state Catc
}
// GetLabel returns the current catchpoint catchup label
-func (c *CatchpointCatchupAccessorImpl) GetLabel(ctx context.Context) (label string, err error) {
+func (c *catchpointCatchupAccessorImpl) GetLabel(ctx context.Context) (label string, err error) {
label, err = readCatchpointStateString(ctx, c.ledger.trackerDB().Rdb.Handle, catchpointStateCatchupLabel)
if err != nil {
return "", fmt.Errorf("unable to read catchpoint catchup state '%s': %v", catchpointStateCatchupLabel, err)
@@ -174,7 +211,7 @@ func (c *CatchpointCatchupAccessorImpl) GetLabel(ctx context.Context) (label str
}
// SetLabel set the catchpoint catchup label
-func (c *CatchpointCatchupAccessorImpl) SetLabel(ctx context.Context, label string) (err error) {
+func (c *catchpointCatchupAccessorImpl) SetLabel(ctx context.Context, label string) (err error) {
// verify it's parsable :
_, _, err = ledgercore.ParseCatchpointLabel(label)
if err != nil {
@@ -188,7 +225,7 @@ func (c *CatchpointCatchupAccessorImpl) SetLabel(ctx context.Context, label stri
}
// ResetStagingBalances resets the current staging balances, preparing for a new set of balances to be added
-func (c *CatchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context, newCatchup bool) (err error) {
+func (c *catchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context, newCatchup bool) (err error) {
wdb := c.ledger.trackerDB().Wdb
if !newCatchup {
c.ledger.setSynchronousMode(ctx, c.ledger.synchronousMode)
@@ -246,7 +283,7 @@ type CatchpointCatchupAccessorProgress struct {
}
// ProgressStagingBalances deserialize the given bytes as a temporary staging balances
-func (c *CatchpointCatchupAccessorImpl) ProgressStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error) {
+func (c *catchpointCatchupAccessorImpl) ProgressStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error) {
if sectionName == "content.msgpack" {
return c.processStagingContent(ctx, bytes, progress)
}
@@ -259,7 +296,7 @@ func (c *CatchpointCatchupAccessorImpl) ProgressStagingBalances(ctx context.Cont
}
// processStagingContent deserialize the given bytes as a temporary staging balances content
-func (c *CatchpointCatchupAccessorImpl) processStagingContent(ctx context.Context, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error) {
+func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Context, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error) {
if progress.SeenHeader {
return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: content chunk already seen")
}
@@ -307,12 +344,11 @@ func (c *CatchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex
}
// processStagingBalances deserialize the given bytes as a temporary staging balances
-func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Context, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error) {
+func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Context, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error) {
if !progress.SeenHeader {
return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingBalances: content chunk was missing")
}
- wdb := c.ledger.trackerDB().Wdb
start := time.Now()
ledgerProcessstagingbalancesCount.Inc(nil)
@@ -440,16 +476,13 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
wg.Add(1)
go func() {
defer wg.Done()
- errBalances = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- start := time.Now()
- err = writeCatchpointStagingBalances(ctx, tx, normalizedAccountBalances)
- durBalances = time.Since(start)
- return err
- })
+ start := time.Now()
+ errBalances = c.stagingWriter.writeBalances(ctx, normalizedAccountBalances)
+ durBalances = time.Since(start)
}()
// on a in-memory database, wait for the writer to finish before starting the new writer
- if wdb.IsSharedCacheConnection() {
+ if c.stagingWriter.isShared() {
wg.Wait()
}
@@ -467,17 +500,14 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
}
}
if hasCreatables {
- errCreatables = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- start := time.Now()
- err := writeCatchpointStagingCreatable(ctx, tx, normalizedAccountBalances)
- durCreatables = time.Since(start)
- return err
- })
+ start := time.Now()
+ errCreatables = c.stagingWriter.writeCreatables(ctx, normalizedAccountBalances)
+ durCreatables = time.Since(start)
}
}()
// on a in-memory database, wait for the writer to finish before starting the new writer
- if wdb.IsSharedCacheConnection() {
+ if c.stagingWriter.isShared() {
wg.Wait()
}
@@ -485,12 +515,9 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
wg.Add(1)
go func() {
defer wg.Done()
- errHashes = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- start := time.Now()
- err := writeCatchpointStagingHashes(ctx, tx, normalizedAccountBalances)
- durHashes = time.Since(start)
- return err
- })
+ start := time.Now()
+ errHashes = c.stagingWriter.writeHashes(ctx, normalizedAccountBalances)
+ durHashes = time.Since(start)
}()
wg.Wait()
@@ -510,10 +537,12 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
progress.HashesWriteDuration += durHashes
ledgerProcessstagingbalancesMicros.AddMicrosecondsSince(start, nil)
- progress.ProcessedAccounts += uint64(len(normalizedAccountBalances))
progress.ProcessedBytes += uint64(len(bytes))
for _, acctBal := range normalizedAccountBalances {
progress.TotalAccountHashes += uint64(len(acctBal.accountHashes))
+ if !acctBal.partialBalance {
+ progress.ProcessedAccounts++
+ }
}
// not strictly required, but clean up the pointer when we're done.
@@ -529,7 +558,7 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
}
// BuildMerkleTrie would process the catchpointpendinghashes and insert all the items in it into the merkle trie
-func (c *CatchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, progressUpdates func(uint64)) (err error) {
+func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, progressUpdates func(uint64)) (err error) {
wdb := c.ledger.trackerDB().Wdb
rdb := c.ledger.trackerDB().Rdb
err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
@@ -708,7 +737,7 @@ func (c *CatchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
}
// GetCatchupBlockRound returns the latest block round matching the current catchpoint
-func (c *CatchpointCatchupAccessorImpl) GetCatchupBlockRound(ctx context.Context) (round basics.Round, err error) {
+func (c *catchpointCatchupAccessorImpl) GetCatchupBlockRound(ctx context.Context) (round basics.Round, err error) {
var iRound uint64
iRound, err = readCatchpointStateUint64(ctx, c.ledger.trackerDB().Rdb.Handle, catchpointStateCatchupBlockRound)
if err != nil {
@@ -718,7 +747,7 @@ func (c *CatchpointCatchupAccessorImpl) GetCatchupBlockRound(ctx context.Context
}
// VerifyCatchpoint verifies that the catchpoint is valid by reconstructing the label.
-func (c *CatchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, blk *bookkeeping.Block) (err error) {
+func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, blk *bookkeeping.Block) (err error) {
rdb := c.ledger.trackerDB().Rdb
var balancesHash crypto.Digest
var blockRound basics.Round
@@ -780,7 +809,7 @@ func (c *CatchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl
// StoreBalancesRound calculates the balances round based on the first block and the associated consensus parameters, and
// store that to the database
-func (c *CatchpointCatchupAccessorImpl) StoreBalancesRound(ctx context.Context, blk *bookkeeping.Block) (err error) {
+func (c *catchpointCatchupAccessorImpl) StoreBalancesRound(ctx context.Context, blk *bookkeeping.Block) (err error) {
// calculate the balances round and store it. It *should* be identical to the one in the catchpoint file header, but we don't want to
// trust the one in the catchpoint file header, so we'll calculate it ourselves.
catchpointLookback := config.Consensus[blk.CurrentProtocol].CatchpointLookback
@@ -803,7 +832,7 @@ func (c *CatchpointCatchupAccessorImpl) StoreBalancesRound(ctx context.Context,
}
// StoreFirstBlock stores a single block to the blocks database.
-func (c *CatchpointCatchupAccessorImpl) StoreFirstBlock(ctx context.Context, blk *bookkeeping.Block) (err error) {
+func (c *catchpointCatchupAccessorImpl) StoreFirstBlock(ctx context.Context, blk *bookkeeping.Block) (err error) {
blockDbs := c.ledger.blockDB()
start := time.Now()
ledgerStorefirstblockCount.Inc(nil)
@@ -818,7 +847,7 @@ func (c *CatchpointCatchupAccessorImpl) StoreFirstBlock(ctx context.Context, blk
}
// StoreBlock stores a single block to the blocks database.
-func (c *CatchpointCatchupAccessorImpl) StoreBlock(ctx context.Context, blk *bookkeeping.Block) (err error) {
+func (c *catchpointCatchupAccessorImpl) StoreBlock(ctx context.Context, blk *bookkeeping.Block) (err error) {
blockDbs := c.ledger.blockDB()
start := time.Now()
ledgerCatchpointStoreblockCount.Inc(nil)
@@ -833,7 +862,7 @@ func (c *CatchpointCatchupAccessorImpl) StoreBlock(ctx context.Context, blk *boo
}
// FinishBlocks concludes the catchup of the blocks database.
-func (c *CatchpointCatchupAccessorImpl) FinishBlocks(ctx context.Context, applyChanges bool) (err error) {
+func (c *catchpointCatchupAccessorImpl) FinishBlocks(ctx context.Context, applyChanges bool) (err error) {
blockDbs := c.ledger.blockDB()
start := time.Now()
ledgerCatchpointFinishblocksCount.Inc(nil)
@@ -852,7 +881,7 @@ func (c *CatchpointCatchupAccessorImpl) FinishBlocks(ctx context.Context, applyC
}
// EnsureFirstBlock ensure that we have a single block in the staging block table, and returns that block
-func (c *CatchpointCatchupAccessorImpl) EnsureFirstBlock(ctx context.Context) (blk bookkeeping.Block, err error) {
+func (c *catchpointCatchupAccessorImpl) EnsureFirstBlock(ctx context.Context) (blk bookkeeping.Block, err error) {
blockDbs := c.ledger.blockDB()
start := time.Now()
ledgerCatchpointEnsureblock1Count.Inc(nil)
@@ -869,7 +898,7 @@ func (c *CatchpointCatchupAccessorImpl) EnsureFirstBlock(ctx context.Context) (b
// CompleteCatchup completes the catchpoint catchup process by switching the databases tables around
// and reloading the ledger.
-func (c *CatchpointCatchupAccessorImpl) CompleteCatchup(ctx context.Context) (err error) {
+func (c *catchpointCatchupAccessorImpl) CompleteCatchup(ctx context.Context) (err error) {
err = c.FinishBlocks(ctx, true)
if err != nil {
return err
@@ -883,7 +912,7 @@ func (c *CatchpointCatchupAccessorImpl) CompleteCatchup(ctx context.Context) (er
}
// finishBalances concludes the catchup of the balances(tracker) database.
-func (c *CatchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err error) {
+func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err error) {
wdb := c.ledger.trackerDB().Wdb
start := time.Now()
ledgerCatchpointFinishBalsCount.Inc(nil)
@@ -986,7 +1015,7 @@ func (c *CatchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err
}
// Ledger returns ledger instance as CatchupAccessorClientLedger interface
-func (c *CatchpointCatchupAccessorImpl) Ledger() (l CatchupAccessorClientLedger) {
+func (c *catchpointCatchupAccessorImpl) Ledger() (l CatchupAccessorClientLedger) {
return c.ledger
}
diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go
index 50a8d9b57..1394726bd 100644
--- a/ledger/catchupaccessor_test.go
+++ b/ledger/catchupaccessor_test.go
@@ -20,6 +20,7 @@ import (
"context"
"encoding/binary"
"fmt"
+ "math/rand"
"os"
"strings"
"testing"
@@ -36,6 +37,8 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/db"
+ "github.com/algorand/msgp/msgp"
)
func createTestingEncodedChunks(accountsCount uint64) (encodedAccountChunks [][]byte, last64KIndex int) {
@@ -419,6 +422,145 @@ func TestCatchupAccessorResourceCountMismatch(t *testing.T) {
encodedAccounts := protocol.Encode(&balances)
// expect error since there is a resource count mismatch
- err = catchpointAccessor.ProgressStagingBalances(context.Background(), "balances.XX.msgpack", encodedAccounts, &progress)
+ err = catchpointAccessor.ProgressStagingBalances(ctx, "balances.XX.msgpack", encodedAccounts, &progress)
require.Error(t, err)
}
+
+type testStagingWriter struct {
+ t *testing.T
+ hashes map[[4 + crypto.DigestSize]byte]int
+}
+
+func (w *testStagingWriter) writeBalances(ctx context.Context, balances []normalizedAccountBalance) error {
+ return nil
+}
+
+func (w *testStagingWriter) writeCreatables(ctx context.Context, balances []normalizedAccountBalance) error {
+ return nil
+}
+
+func (w *testStagingWriter) writeHashes(ctx context.Context, balances []normalizedAccountBalance) error {
+ for _, bal := range balances {
+ for _, hash := range bal.accountHashes {
+ var key [4 + crypto.DigestSize]byte
+ require.Len(w.t, hash, 4+crypto.DigestSize)
+ copy(key[:], hash)
+ w.hashes[key] = w.hashes[key] + 1
+ }
+ }
+ return nil
+}
+
+func (w *testStagingWriter) isShared() bool {
+ return false
+}
+
+// makeTestCatchpointCatchupAccessor creates a CatchpointCatchupAccessor given a ledger
+func makeTestCatchpointCatchupAccessor(ledger *Ledger, log logging.Logger, writer stagingWriter) *catchpointCatchupAccessorImpl {
+ return &catchpointCatchupAccessorImpl{
+ ledger: ledger,
+ stagingWriter: writer,
+ log: log,
+ }
+}
+
+func TestCatchupAccessorProcessStagingBalances(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ log := logging.TestingLog(t)
+ writer := &testStagingWriter{t: t, hashes: make(map[[4 + crypto.DigestSize]byte]int)}
+ l := Ledger{
+ log: log,
+ genesisProto: config.Consensus[protocol.ConsensusCurrentVersion],
+ synchronousMode: db.SynchronousMode(100), // non-existing in order to skip the underlying db call in ledger.setSynchronousMode
+ }
+ catchpointAccessor := makeTestCatchpointCatchupAccessor(&l, log, writer)
+
+ randomSimpleBaseAcct := func() baseAccountData {
+ accountData := baseAccountData{
+ RewardsBase: crypto.RandUint63(),
+ MicroAlgos: basics.MicroAlgos{Raw: crypto.RandUint63()},
+ AuthAddr: ledgertesting.RandomAddress(),
+ }
+ return accountData
+ }
+
+ encodedBalanceRecordFromBase := func(addr basics.Address, base baseAccountData, resources map[uint64]msgp.Raw, more bool) encodedBalanceRecordV6 {
+ ebr := encodedBalanceRecordV6{
+ Address: addr,
+ AccountData: protocol.Encode(&base),
+ Resources: resources,
+ ExpectingMoreEntries: more,
+ }
+ return ebr
+ }
+
+ const numAccounts = 5
+ const acctXNumRes = 13
+ const expectHashes = numAccounts + acctXNumRes
+ progress := CatchpointCatchupAccessorProgress{
+ TotalAccounts: numAccounts,
+ TotalChunks: 2,
+ SeenHeader: true,
+ Version: CatchpointFileVersionV6,
+ }
+
+ // create some walking gentlemen
+ acctA := randomSimpleBaseAcct()
+ acctB := randomSimpleBaseAcct()
+ acctC := randomSimpleBaseAcct()
+ acctD := randomSimpleBaseAcct()
+
+ // prepare chunked account
+ addrX := ledgertesting.RandomAddress()
+ acctX := randomSimpleBaseAcct()
+ acctX.TotalAssets = acctXNumRes
+ acctXRes1 := make(map[uint64]msgp.Raw, acctXNumRes/2+1)
+ acctXRes2 := make(map[uint64]msgp.Raw, acctXNumRes/2)
+ emptyRes := resourcesData{ResourceFlags: resourceFlagsEmptyAsset}
+ emptyResEnc := protocol.Encode(&emptyRes)
+ for i := 0; i < acctXNumRes; i++ {
+ if i <= acctXNumRes/2 {
+ acctXRes1[rand.Uint64()] = emptyResEnc
+ } else {
+ acctXRes2[rand.Uint64()] = emptyResEnc
+ }
+ }
+
+ // make chunks
+ chunks := []catchpointFileBalancesChunkV6{
+ {
+ Balances: []encodedBalanceRecordV6{
+ encodedBalanceRecordFromBase(ledgertesting.RandomAddress(), acctA, nil, false),
+ encodedBalanceRecordFromBase(ledgertesting.RandomAddress(), acctB, nil, false),
+ encodedBalanceRecordFromBase(addrX, acctX, acctXRes1, true),
+ },
+ },
+ {
+ Balances: []encodedBalanceRecordV6{
+ encodedBalanceRecordFromBase(addrX, acctX, acctXRes2, false),
+ encodedBalanceRecordFromBase(ledgertesting.RandomAddress(), acctC, nil, false),
+ encodedBalanceRecordFromBase(ledgertesting.RandomAddress(), acctD, nil, false),
+ },
+ },
+ }
+
+ // process chunks
+ ctx := context.Background()
+ progress.SeenHeader = true
+ for _, chunk := range chunks {
+ blob := protocol.Encode(&chunk)
+ err := catchpointAccessor.processStagingBalances(ctx, blob, &progress)
+ require.NoError(t, err)
+ }
+
+ // compare account counts and hashes
+ require.Equal(t, progress.TotalAccounts, progress.ProcessedAccounts)
+
+ // ensure no duplicate hashes
+ require.Equal(t, uint64(expectHashes), progress.TotalAccountHashes)
+ require.Equal(t, expectHashes, len(writer.hashes))
+ for _, count := range writer.hashes {
+ require.Equal(t, 1, count)
+ }
+}
diff --git a/ledger/ledgercore/accountdata_test.go b/ledger/ledgercore/accountdata_test.go
new file mode 100644
index 000000000..a22a6b5b0
--- /dev/null
+++ b/ledger/ledgercore/accountdata_test.go
@@ -0,0 +1,87 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledgercore
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+// TestBasicsAccountDataRoundtripConversion ensures that basics.AccountData can be converted to
+// ledgercore.AccountData and back without losing any data. It uses reflection to be sure that this
+// test is always up-to-date with new fields.
+//
+// In other words, this test makes sure any new fields in basics.AccountData also get added to
+// ledgercore.AccountData.
+func TestBasicsAccountDataRoundtripConversion(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ for i := 0; i < 1000; i++ {
+ randObj, _ := protocol.RandomizeObject(&basics.AccountData{})
+ basicsAccount := *randObj.(*basics.AccountData)
+
+ ledgercoreAccount := ToAccountData(basicsAccount)
+ var roundTripAccount basics.AccountData
+ AssignAccountData(&roundTripAccount, ledgercoreAccount)
+
+ // Manually set resources, since AssignAccountData doesn't attempt to restore them
+ roundTripAccount.AssetParams = basicsAccount.AssetParams
+ roundTripAccount.Assets = basicsAccount.Assets
+ roundTripAccount.AppLocalStates = basicsAccount.AppLocalStates
+ roundTripAccount.AppParams = basicsAccount.AppParams
+
+ require.Equal(t, basicsAccount, roundTripAccount)
+ require.Equal(t, uint64(len(roundTripAccount.AssetParams)), ledgercoreAccount.TotalAssetParams)
+ require.Equal(t, uint64(len(roundTripAccount.Assets)), ledgercoreAccount.TotalAssets)
+ require.Equal(t, uint64(len(roundTripAccount.AppLocalStates)), ledgercoreAccount.TotalAppLocalStates)
+ require.Equal(t, uint64(len(roundTripAccount.AppParams)), ledgercoreAccount.TotalAppParams)
+ }
+}
+
+// TestLedgercoreAccountDataRoundtripConversion ensures that ledgercore.AccountData can be converted
+// to basics.AccountData and back without losing any data. It uses reflection to be sure that no
+// new fields are omitted.
+//
+// In other words, this test makes sure any new fields in ledgercore.AccountData also get added to
+// basics.AccountData. You should add a manual override in this test if the field really only
+// belongs in ledgercore.AccountData.
+func TestLedgercoreAccountDataRoundtripConversion(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ for i := 0; i < 1000; i++ {
+ randObj, _ := protocol.RandomizeObject(&AccountData{})
+ ledgercoreAccount := *randObj.(*AccountData)
+
+ var basicsAccount basics.AccountData
+ AssignAccountData(&basicsAccount, ledgercoreAccount)
+ roundTripAccount := ToAccountData(basicsAccount)
+
+ // Manually set resources, since resource information is lost in AssignAccountData
+ roundTripAccount.TotalAssetParams = ledgercoreAccount.TotalAssetParams
+ roundTripAccount.TotalAssets = ledgercoreAccount.TotalAssets
+ roundTripAccount.TotalAppLocalStates = ledgercoreAccount.TotalAppLocalStates
+ roundTripAccount.TotalAppParams = ledgercoreAccount.TotalAppParams
+
+ require.Equal(t, ledgercoreAccount, roundTripAccount)
+ }
+}
diff --git a/ledger/testing/randomAccounts.go b/ledger/testing/randomAccounts.go
index cea52d434..947ddc1b4 100644
--- a/ledger/testing/randomAccounts.go
+++ b/ledger/testing/randomAccounts.go
@@ -88,15 +88,31 @@ func RandomAssetParams() basics.AssetParams {
Total: crypto.RandUint64(),
Decimals: uint32(crypto.RandUint64() % 20),
DefaultFrozen: crypto.RandUint64()%2 == 0,
- UnitName: fmt.Sprintf("un%x", uint32(crypto.RandUint64()%0x7fffffff)),
- AssetName: fmt.Sprintf("an%x", uint32(crypto.RandUint64()%0x7fffffff)),
- URL: fmt.Sprintf("url%x", uint32(crypto.RandUint64()%0x7fffffff)),
- }
- crypto.RandBytes(ap.MetadataHash[:])
- crypto.RandBytes(ap.Manager[:])
- crypto.RandBytes(ap.Reserve[:])
- crypto.RandBytes(ap.Freeze[:])
- crypto.RandBytes(ap.Clawback[:])
+ }
+ if crypto.RandUint64()%5 != 0 {
+ ap.UnitName = fmt.Sprintf("un%x", uint32(crypto.RandUint64()%0x7fffffff))
+ }
+ if crypto.RandUint64()%5 != 0 {
+ ap.AssetName = fmt.Sprintf("an%x", uint32(crypto.RandUint64()%0x7fffffff))
+ }
+ if crypto.RandUint64()%5 != 0 {
+ ap.URL = fmt.Sprintf("url%x", uint32(crypto.RandUint64()%0x7fffffff))
+ }
+ if crypto.RandUint64()%5 != 0 {
+ crypto.RandBytes(ap.MetadataHash[:])
+ }
+ if crypto.RandUint64()%5 != 0 {
+ crypto.RandBytes(ap.Manager[:])
+ }
+ if crypto.RandUint64()%5 != 0 {
+ crypto.RandBytes(ap.Reserve[:])
+ }
+ if crypto.RandUint64()%5 != 0 {
+ crypto.RandBytes(ap.Freeze[:])
+ }
+ if crypto.RandUint64()%5 != 0 {
+ crypto.RandBytes(ap.Clawback[:])
+ }
return ap
}
@@ -108,8 +124,13 @@ func RandomAssetHolding(forceFrozen bool) basics.AssetHolding {
frozen = true
}
+ var amount uint64
+ if crypto.RandUint64()%5 != 0 {
+ amount = crypto.RandUint64()
+ }
+
ah := basics.AssetHolding{
- Amount: crypto.RandUint64(),
+ Amount: amount,
Frozen: frozen,
}
return ah
@@ -117,20 +138,26 @@ func RandomAssetHolding(forceFrozen bool) basics.AssetHolding {
// RandomAppParams creates a random basics.AppParams
func RandomAppParams() basics.AppParams {
- ap := basics.AppParams{
- ApprovalProgram: make([]byte, int(crypto.RandUint63())%config.MaxAppProgramLen),
- ClearStateProgram: make([]byte, int(crypto.RandUint63())%config.MaxAppProgramLen),
- GlobalState: make(basics.TealKeyValue),
- StateSchemas: basics.StateSchemas{
+ var schemas basics.StateSchemas
+ if crypto.RandUint64()%10 != 0 {
+ schemas = basics.StateSchemas{
LocalStateSchema: basics.StateSchema{
- NumUint: crypto.RandUint64()%5 + 1,
+ NumUint: crypto.RandUint64() % 5,
NumByteSlice: crypto.RandUint64() % 5,
},
GlobalStateSchema: basics.StateSchema{
- NumUint: crypto.RandUint64()%5 + 1,
+ NumUint: crypto.RandUint64() % 5,
NumByteSlice: crypto.RandUint64() % 5,
},
- },
+ }
+ }
+
+ ap := basics.AppParams{
+ ApprovalProgram: make([]byte, int(crypto.RandUint63())%config.MaxAppProgramLen),
+ ClearStateProgram: make([]byte, int(crypto.RandUint63())%config.MaxAppProgramLen),
+ GlobalState: make(basics.TealKeyValue),
+ StateSchemas: schemas,
+ ExtraProgramPages: uint32(crypto.RandUint64() % 4),
}
if len(ap.ApprovalProgram) > 0 {
crypto.RandBytes(ap.ApprovalProgram[:])
@@ -143,22 +170,36 @@ func RandomAppParams() basics.AppParams {
ap.ClearStateProgram = nil
}
- for i := uint64(0); i < ap.StateSchemas.LocalStateSchema.NumUint+ap.StateSchemas.GlobalStateSchema.NumUint; i++ {
- appName := fmt.Sprintf("tapp%x-%x", crypto.RandUint64(), i)
- ap.GlobalState[appName] = basics.TealValue{
+ for i := uint64(0); i < ap.StateSchemas.GlobalStateSchema.NumUint; i++ {
+ var keyName string
+ if crypto.RandUint64()%5 != 0 {
+ keyName = fmt.Sprintf("tapp%x-%x", crypto.RandUint64(), i)
+ }
+ var value uint64
+ if crypto.RandUint64()%5 != 0 {
+ value = crypto.RandUint64()
+ }
+ ap.GlobalState[keyName] = basics.TealValue{
Type: basics.TealUintType,
- Uint: crypto.RandUint64(),
+ Uint: value,
}
}
- for i := uint64(0); i < ap.StateSchemas.LocalStateSchema.NumByteSlice+ap.StateSchemas.GlobalStateSchema.NumByteSlice; i++ {
- appName := fmt.Sprintf("tapp%x-%x", crypto.RandUint64(), i)
- tv := basics.TealValue{
- Type: basics.TealBytesType,
+ for i := uint64(0); i < ap.StateSchemas.GlobalStateSchema.NumByteSlice; i++ {
+ var keyName string
+ if crypto.RandUint64()%5 != 0 {
+ keyName = fmt.Sprintf("tapp%x-%x", crypto.RandUint64(), i)
+ }
+
+ var bytes []byte
+ if crypto.RandUint64()%5 != 0 {
+ bytes = make([]byte, crypto.RandUint64()%uint64(config.MaxBytesKeyValueLen-len(keyName)))
+ crypto.RandBytes(bytes[:])
+ }
+
+ ap.GlobalState[keyName] = basics.TealValue{
+ Type: basics.TealBytesType,
+ Bytes: string(bytes),
}
- bytes := make([]byte, crypto.RandUint64()%uint64(config.MaxBytesKeyValueLen))
- crypto.RandBytes(bytes[:])
- tv.Bytes = string(bytes)
- ap.GlobalState[appName] = tv
}
if len(ap.GlobalState) == 0 {
ap.GlobalState = nil
@@ -170,28 +211,41 @@ func RandomAppParams() basics.AppParams {
func RandomAppLocalState() basics.AppLocalState {
ls := basics.AppLocalState{
Schema: basics.StateSchema{
- NumUint: crypto.RandUint64()%5 + 1,
+ NumUint: crypto.RandUint64() % 5,
NumByteSlice: crypto.RandUint64() % 5,
},
KeyValue: make(map[string]basics.TealValue),
}
for i := uint64(0); i < ls.Schema.NumUint; i++ {
- appName := fmt.Sprintf("lapp%x-%x", crypto.RandUint64(), i)
- ls.KeyValue[appName] = basics.TealValue{
+ var keyName string
+ if crypto.RandUint64()%5 != 0 {
+ keyName = fmt.Sprintf("tapp%x-%x", crypto.RandUint64(), i)
+ }
+ var value uint64
+ if crypto.RandUint64()%5 != 0 {
+ value = crypto.RandUint64()
+ }
+ ls.KeyValue[keyName] = basics.TealValue{
Type: basics.TealUintType,
- Uint: crypto.RandUint64(),
+ Uint: value,
}
}
for i := uint64(0); i < ls.Schema.NumByteSlice; i++ {
- appName := fmt.Sprintf("lapp%x-%x", crypto.RandUint64(), i)
- tv := basics.TealValue{
- Type: basics.TealBytesType,
+ var keyName string
+ if crypto.RandUint64()%5 != 0 {
+ keyName = fmt.Sprintf("tapp%x-%x", crypto.RandUint64(), i)
+ }
+ var bytes []byte
+ if crypto.RandUint64()%5 != 0 {
+ bytes = make([]byte, crypto.RandUint64()%uint64(config.MaxBytesKeyValueLen-len(keyName)))
+ crypto.RandBytes(bytes[:])
+ }
+
+ ls.KeyValue[keyName] = basics.TealValue{
+ Type: basics.TealBytesType,
+ Bytes: string(bytes),
}
- bytes := make([]byte, crypto.RandUint64()%uint64(config.MaxBytesKeyValueLen-len(appName)))
- crypto.RandBytes(bytes[:])
- tv.Bytes = string(bytes)
- ls.KeyValue[appName] = tv
}
if len(ls.KeyValue) == 0 {
ls.KeyValue = nil
@@ -282,6 +336,7 @@ func RandomFullAccountData(rewardsLevel uint64, lastCreatableID *basics.Creatabl
NumUint: crypto.RandUint64() % 50,
NumByteSlice: crypto.RandUint64() % 50,
}
+ data.TotalExtraAppPages = uint32(crypto.RandUint64() % 50)
}
return data
diff --git a/ledger/testing/randomAccounts_test.go b/ledger/testing/randomAccounts_test.go
new file mode 100644
index 000000000..97744f497
--- /dev/null
+++ b/ledger/testing/randomAccounts_test.go
@@ -0,0 +1,143 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package testing
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/test/reflectionhelpers"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAccounts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ accountDataType := reflect.TypeOf(basics.AccountData{})
+
+ referencedAccountTypes := make([]reflectionhelpers.TypePath, 0)
+ reflectionhelpers.IterateReferencedTypes(accountDataType, func(path reflectionhelpers.TypePath, stack []reflect.Type) bool {
+ if len(path) == 0 {
+ // Ignore the top-level basics.AccountData type
+ return true
+ }
+ stackTop := stack[len(stack)-1]
+ if path[len(path)-1].FieldName == "_struct" && stackTop == reflect.TypeOf(struct{}{}) {
+ // Ignore the informational _struct field
+ return true
+ }
+ if stackTop.Kind() == reflect.Struct && stackTop.NumField() != 0 {
+ // If this is a struct, whether it's a zero value or not will depend on whether its
+ // fields are zero values or not. To avoid redundancy, ignore the containing struct type
+ return true
+ }
+ referencedAccountTypes = append(referencedAccountTypes, path.Clone())
+ return true
+ })
+
+ // If this test becomes flaky, increase niter
+ niter := 1000
+
+ accountFieldSeenZero := make([]bool, len(referencedAccountTypes))
+ accountFieldSeenNonzero := make([]bool, len(referencedAccountTypes))
+
+ accounts := RandomAccounts(niter, false)
+ for _, account := range accounts {
+ accountValue := reflect.ValueOf(account)
+ for i, typePath := range referencedAccountTypes {
+ values := typePath.ResolveValues(accountValue)
+
+ for _, value := range values {
+ isZero := value.IsZero()
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ fieldLen := value.Len()
+ isZero = fieldLen == 0
+ }
+ if !accountFieldSeenZero[i] && isZero {
+ accountFieldSeenZero[i] = true
+ }
+ if !accountFieldSeenNonzero[i] && !isZero {
+ accountFieldSeenNonzero[i] = true
+ }
+ }
+ }
+ }
+
+ // It's ok for these fields to never be the zero value. The intuition here is that it would be
+ // invalid to write an account to our DB that has the zero value for one of these fields. This
+ // could be because the field is non-optional, or the zero value of the field is an unachievable
+ // or invalid value.
+ zeroValueExceptions := []reflectionhelpers.TypePath{
+ reflectionhelpers.TypePath{}.AddField("MicroAlgos").AddField("Raw"),
+ reflectionhelpers.TypePath{}.AddField("AssetParams").AddMapKey(),
+ reflectionhelpers.TypePath{}.AddField("AssetParams").AddValue().AddField("Total"),
+ reflectionhelpers.TypePath{}.AddField("Assets").AddMapKey(),
+ reflectionhelpers.TypePath{}.AddField("AppLocalStates").AddMapKey(),
+ reflectionhelpers.TypePath{}.AddField("AppLocalStates").AddValue().AddField("KeyValue").AddValue().AddField("Type"),
+ reflectionhelpers.TypePath{}.AddField("AppParams").AddMapKey(),
+ reflectionhelpers.TypePath{}.AddField("AppParams").AddValue().AddField("ApprovalProgram"),
+ reflectionhelpers.TypePath{}.AddField("AppParams").AddValue().AddField("ClearStateProgram"),
+ reflectionhelpers.TypePath{}.AddField("AppParams").AddValue().AddField("GlobalState").AddValue().AddField("Type"),
+ }
+
+ for _, exception := range zeroValueExceptions {
+ // ensure all exceptions can resolve without panicking
+ exception.ResolveType(accountDataType)
+ }
+
+ // It's ok for these fields to always be the zero value
+ nonzeroValueExceptions := []reflectionhelpers.TypePath{
+ // It would be great to have these fields NOT always be zero, but ledger/accountdb_test.go
+ // currently depends on this.
+ reflectionhelpers.TypePath{}.AddField("RewardsBase"),
+ reflectionhelpers.TypePath{}.AddField("RewardedMicroAlgos").AddField("Raw"),
+ }
+
+ for _, exception := range nonzeroValueExceptions {
+ // ensure all exceptions can resolve without panicking
+ exception.ResolveType(accountDataType)
+ }
+
+ for i, typePath := range referencedAccountTypes {
+ skipZeroValueCheck := false
+ for _, exception := range zeroValueExceptions {
+ if exception.Equals(typePath) {
+ skipZeroValueCheck = true
+ break
+ }
+ }
+
+ skipNonZeroValueCheck := false
+ for _, exception := range nonzeroValueExceptions {
+ if exception.Equals(typePath) {
+ skipNonZeroValueCheck = true
+ break
+ }
+ }
+
+ referencedType := typePath.ResolveType(accountDataType)
+ if !skipZeroValueCheck {
+ assert.Truef(t, accountFieldSeenZero[i], "Path '%s' (type %v) was never seen with a zero value", typePath, referencedType)
+ }
+ if !skipNonZeroValueCheck {
+ assert.Truef(t, accountFieldSeenNonzero[i], "Path '%s' (type %v) was always seen with a zero value", typePath, referencedType)
+ }
+ }
+}
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index 3ec8cd45c..a7cdde4ea 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -55,13 +55,11 @@ const DefaultKMDDataDir = nodecontrol.DefaultKMDDataDir
// Client represents the entry point for all libgoal functions
type Client struct {
- nc nodecontrol.NodeController
- kmdStartArgs nodecontrol.KMDStartArgs
- dataDir string
- cacheDir string
- consensus config.ConsensusProtocols
- algodVersionAffinity algodclient.APIVersion
- kmdVersionAffinity kmdclient.APIVersion
+ nc nodecontrol.NodeController
+ kmdStartArgs nodecontrol.KMDStartArgs
+ dataDir string
+ cacheDir string
+ consensus config.ConsensusProtocols
suggestedParamsCache v1.TransactionParams
suggestedParamsExpire time.Time
@@ -148,8 +146,6 @@ func (c *Client) init(config ClientConfig, clientType ClientType) error {
}
c.dataDir = dataDir
c.cacheDir = config.CacheDir
- c.algodVersionAffinity = algodclient.APIVersionV1
- c.kmdVersionAffinity = kmdclient.APIVersionV1
// Get node controller
nc, err := getNodeController(config.BinDir, config.AlgodDataDir)
@@ -204,7 +200,6 @@ func (c *Client) ensureAlgodClient() (*algodclient.RestClient, error) {
if err != nil {
return nil, err
}
- algod.SetAPIVersionAffinity(c.algodVersionAffinity)
return &algod, err
}
@@ -1053,12 +1048,6 @@ func (c *Client) ConsensusParams(round uint64) (consensus config.ConsensusParams
return params, nil
}
-// SetAPIVersionAffinity sets the desired client API version affinity of the algod and kmd clients.
-func (c *Client) SetAPIVersionAffinity(algodVersionAffinity algodclient.APIVersion, kmdVersionAffinity kmdclient.APIVersion) {
- c.algodVersionAffinity = algodVersionAffinity
- c.kmdVersionAffinity = kmdVersionAffinity
-}
-
// AbortCatchup aborts the currently running catchup
func (c *Client) AbortCatchup() error {
algod, err := c.ensureAlgodClient()
@@ -1066,7 +1055,6 @@ func (c *Client) AbortCatchup() error {
return err
}
// we need to ensure we're using the v2 status so that we would get the catchpoint information.
- algod.SetAPIVersionAffinity(algodclient.APIVersionV2)
resp, err := algod.Status()
if err != nil {
return err
diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go
index 23bb810fc..610a0e9b5 100644
--- a/shared/pingpong/accounts.go
+++ b/shared/pingpong/accounts.go
@@ -29,6 +29,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/passphrase"
v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
algodAcct "github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
@@ -44,6 +45,8 @@ func deterministicAccounts(initCfg PpConfig) <-chan *crypto.SignatureSecrets {
go randomDeterministicAccounts(initCfg, out)
} else if initCfg.GeneratedAccountSampleMethod == "sequential" {
go sequentialDeterministicAccounts(initCfg, out)
+ } else if initCfg.GeneratedAccountSampleMethod == "mnemonic" {
+ go mnemonicDeterministicAccounts(initCfg, out)
}
return out
}
@@ -51,7 +54,7 @@ func deterministicAccounts(initCfg PpConfig) <-chan *crypto.SignatureSecrets {
func randomDeterministicAccounts(initCfg PpConfig, out chan *crypto.SignatureSecrets) {
numAccounts := initCfg.NumPartAccounts
totalAccounts := initCfg.GeneratedAccountsCount
- if totalAccounts < numAccounts*4 {
+ if totalAccounts < uint64(numAccounts)*4 {
// simpler rand strategy for smaller totalAccounts
order := rand.Perm(int(totalAccounts))[:numAccounts]
for _, acct := range order {
@@ -86,6 +89,21 @@ func sequentialDeterministicAccounts(initCfg PpConfig, out chan *crypto.Signatur
binary.LittleEndian.PutUint64(seed[:], uint64(acct))
out <- crypto.GenerateSignatureSecrets(seed)
}
+ close(out)
+}
+
+func mnemonicDeterministicAccounts(initCfg PpConfig, out chan *crypto.SignatureSecrets) {
+ for _, mnemonic := range initCfg.GeneratedAccountsMnemonics {
+ seedbytes, err := passphrase.MnemonicToKey(mnemonic)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot recover key seed from mnemonic: %v\n", err)
+ os.Exit(1)
+ }
+ var seed crypto.Seed
+ copy(seed[:], seedbytes)
+ out <- crypto.GenerateSignatureSecrets(seed)
+ }
+ close(out)
}
// load accounts from ${ALGORAND_DATA}/${netname}-${version}/*.rootkey
diff --git a/shared/pingpong/config.go b/shared/pingpong/config.go
index 5b9224c01..8e406d255 100644
--- a/shared/pingpong/config.go
+++ b/shared/pingpong/config.go
@@ -77,9 +77,10 @@ type PpConfig struct {
// configuration related to using bootstrapped ledgers built by netgoal
// TODO: support generatedAssetsCount, generatedApplicationCount
DeterministicKeys bool
- GeneratedAccountsCount uint32
+ GeneratedAccountsCount uint64
GeneratedAccountSampleMethod string
- GeneratedAccountsOffset uint32
+ GeneratedAccountsOffset uint64
+ GeneratedAccountsMnemonics []string
WeightPayment float64
WeightAsset float64
@@ -176,6 +177,7 @@ var accountSampleMethods = []string{
"",
"random",
"sequential",
+ "mnemonic",
}
// Check returns an error if config is invalid.
@@ -190,8 +192,9 @@ func (cfg *PpConfig) Check() error {
if !sampleOk {
return fmt.Errorf("unknown GeneratedAccountSampleMethod: %s", cfg.GeneratedAccountSampleMethod)
}
- if cfg.DeterministicKeys && (cfg.GeneratedAccountsOffset+cfg.NumPartAccounts > cfg.GeneratedAccountsCount) {
+ if cfg.DeterministicKeys && (cfg.GeneratedAccountsOffset+uint64(cfg.NumPartAccounts) > cfg.GeneratedAccountsCount) {
return fmt.Errorf("(GeneratedAccountsOffset %d) + (NumPartAccounts %d) > (GeneratedAccountsCount %d)", cfg.GeneratedAccountsOffset, cfg.NumPartAccounts, cfg.GeneratedAccountsCount)
}
+
return nil
}
diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go
index 99910942d..83b01b8c0 100644
--- a/test/e2e-go/features/catchup/catchpointCatchup_test.go
+++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go
@@ -198,7 +198,6 @@ func TestBasicCatchpointCatchup(t *testing.T) {
targetCatchpointRound := (basics.Round(expectedBlocksToDownload+minRound)/catchpointInterval + 1) * catchpointInterval
targetRound := uint64(targetCatchpointRound) + 1
primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode)
- primaryNodeRestClient.SetAPIVersionAffinity(algodclient.APIVersionV2)
log.Infof("Building ledger history..")
for {
err = fixture.ClientWaitForRound(primaryNodeRestClient, currentRound, 45*time.Second)
@@ -378,7 +377,6 @@ func TestCatchpointLabelGeneration(t *testing.T) {
currentRound := uint64(1)
targetRound := uint64(21)
primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode)
- primaryNodeRestClient.SetAPIVersionAffinity(algodclient.APIVersionV2)
log.Infof("Building ledger history..")
for {
err = fixture.ClientWaitForRound(primaryNodeRestClient, currentRound, 45*time.Second)
diff --git a/test/e2e-go/restAPI/restClient_test.go b/test/e2e-go/restAPI/restClient_test.go
index 3506b8b66..1c9c94216 100644
--- a/test/e2e-go/restAPI/restClient_test.go
+++ b/test/e2e-go/restAPI/restClient_test.go
@@ -36,9 +36,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklesignature"
- algodclient "github.com/algorand/go-algorand/daemon/algod/api/client"
v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
- kmdclient "github.com/algorand/go-algorand/daemon/kmd/client"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
@@ -201,7 +199,6 @@ func TestClientCanGetStatus(t *testing.T) {
statusResponse, err := testClient.Status()
a.NoError(err)
a.NotEmpty(statusResponse)
- testClient.SetAPIVersionAffinity(algodclient.APIVersionV2, kmdclient.APIVersionV1)
statusResponse2, err := testClient.Status()
a.NoError(err)
a.NotEmpty(statusResponse2)
@@ -218,7 +215,6 @@ func TestClientCanGetStatusAfterBlock(t *testing.T) {
statusResponse, err := testClient.WaitForRound(1)
a.NoError(err)
a.NotEmpty(statusResponse)
- testClient.SetAPIVersionAffinity(algodclient.APIVersionV2, kmdclient.APIVersionV1)
statusResponse, err = testClient.WaitForRound(statusResponse.LastRound + 1)
a.NoError(err)
a.NotEmpty(statusResponse)
@@ -955,8 +951,6 @@ func TestPendingTransactionInfoInnerTxnAssetCreate(t *testing.T) {
testClient.WaitForRound(1)
- testClient.SetAPIVersionAffinity(algodclient.APIVersionV2, kmdclient.APIVersionV1)
-
wh, err := testClient.GetUnencryptedWalletHandle()
a.NoError(err)
addresses, err := testClient.ListAddresses(wh)
diff --git a/test/reflectionhelpers/helpers.go b/test/reflectionhelpers/helpers.go
new file mode 100644
index 000000000..ad551aa0f
--- /dev/null
+++ b/test/reflectionhelpers/helpers.go
@@ -0,0 +1,253 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package reflectionhelpers
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// TypeSegmentKind is a enum for the types of TypeSegment
+type TypeSegmentKind int
+
+const (
+ // FieldSegmentKind represents a referenced field type on a Go type
+ FieldSegmentKind TypeSegmentKind = iota
+ // MapKeySegmentKind represents the key type of a Go map
+ MapKeySegmentKind
+ // ValueSegmentKind represents the value type of a Go map, array, or slice
+ ValueSegmentKind
+)
+
+// TypeSegment represents a single segment in a TypePath. This segment is a single reference from
+// one reflect.Type to another reflect.Type.
+type TypeSegment struct {
+ Kind TypeSegmentKind
+ // If Kind is FieldSegmentKind, then FieldName contains the name of the referenced field.
+ FieldName string
+}
+
+func (s TypeSegment) String() string {
+ switch s.Kind {
+ case FieldSegmentKind:
+ return "field " + s.FieldName
+ case MapKeySegmentKind:
+ return "map_key"
+ case ValueSegmentKind:
+ return "value"
+ default:
+ panic(fmt.Sprintf("Unknown TypeSegmentKind: %v", s.Kind))
+ }
+}
+
+// TypePath represents a path of referenced types starting from an origin reflect.Type. Note the
+// origin reflect.Type is not contained in TypePath.
+type TypePath []TypeSegment
+
+// Clone creates a deep copy of a TypePath
+func (p TypePath) Clone() TypePath {
+ cloned := make(TypePath, len(p))
+ copy(cloned, p)
+ return cloned
+}
+
+// AddMapKey adds a map key segment to a TypePath. The modification is done using append, so this
+// action may mutate the input TypePath.
+//
+// NOTE: There is no guarantee that this constructed TypePath is valid. Use ResolveType to verify
+// that after construction.
+func (p TypePath) AddMapKey() TypePath {
+ return append(p, TypeSegment{Kind: MapKeySegmentKind})
+}
+
+// AddValue adds a map, array, or slice value segment to a TypePath. The modification is done using
+// append, so this action may mutate the input TypePath.
+//
+// NOTE: There is no guarantee that this constructed TypePath is valid. Use ResolveType to verify
+// that after construction.
+func (p TypePath) AddValue() TypePath {
+ return append(p, TypeSegment{Kind: ValueSegmentKind})
+}
+
+// AddField adds a named field segment to a TypePath. The modification is done using append, so this
+// action may mutate the input TypePath.
+//
+// NOTE: There is no guarantee that this constructed TypePath is valid. Use ResolveType to verify
+// that after construction.
+func (p TypePath) AddField(fieldName string) TypePath {
+ return append(p, TypeSegment{Kind: FieldSegmentKind, FieldName: fieldName})
+}
+
+// ResolveType follows the TypePath to its end and returns the reflect.Type of the last referenced
+// type. The initial type, base, must be provided, since TypePath is a relative path. If the
+// TypePath represents a chain of type references that is not valid, this will panic.
+func (p TypePath) ResolveType(base reflect.Type) reflect.Type {
+ resolved := base
+ for _, segment := range p {
+ switch segment.Kind {
+ case MapKeySegmentKind:
+ resolved = resolved.Key()
+ case ValueSegmentKind:
+ resolved = resolved.Elem()
+ case FieldSegmentKind:
+ fieldType, ok := resolved.FieldByName(segment.FieldName)
+ if !ok {
+ panic(fmt.Errorf("Type '%v' does not have the field '%s'", resolved, segment.FieldName))
+ }
+ resolved = fieldType.Type
+ default:
+ panic(fmt.Errorf("Unexpected segment kind: %v", segment.Kind))
+ }
+ }
+ return resolved
+}
+
+// ResolveValues follows the TypePath to its end and returns a slice of all the values at that
+// location. The initial value, base, must have the type of the origin reflect.Type this TypePath
+// was made for. If the TypePath represents a chain of type references that is not valid, this will
+// panic.
+//
+// This function returns a slice of values because some segments may map to many values. Field
+// segments always map to a single value, but map key and (map, slice, or array) value segments may
+// map to zero or more values, depending on the value of the input argument.
+func (p TypePath) ResolveValues(base reflect.Value) []reflect.Value {
+ if len(p) == 0 {
+ return nil
+ }
+
+ var resolved []reflect.Value
+
+ segment := p[0]
+ switch segment.Kind {
+ case MapKeySegmentKind:
+ resolved = base.MapKeys()
+ case ValueSegmentKind:
+ switch base.Kind() {
+ case reflect.Map:
+ iter := base.MapRange()
+ for iter.Next() {
+ resolved = append(resolved, iter.Value())
+ }
+ case reflect.Array, reflect.Slice:
+ for i := 0; i < base.Len(); i++ {
+ resolved = append(resolved, base.Index(i))
+ }
+ default:
+ panic(fmt.Errorf("Unexpected kind %v", base.Kind()))
+ }
+ case FieldSegmentKind:
+ _, ok := base.Type().FieldByName(segment.FieldName)
+ if !ok {
+ panic(fmt.Errorf("Type '%v' does not have the field '%s'", base.Type(), segment.FieldName))
+ }
+ resolved = []reflect.Value{base.FieldByName(segment.FieldName)}
+ default:
+ panic(fmt.Errorf("Unexpected segment kind: %v", segment.Kind))
+ }
+
+ if len(p) > 1 {
+ rest := p[1:]
+ intermediateResolved := resolved
+ resolved = nil
+
+ for _, ir := range intermediateResolved {
+ resolvedToEnd := rest.ResolveValues(ir)
+ resolved = append(resolved, resolvedToEnd...)
+ }
+ }
+
+ return resolved
+}
+
+// Equals returns true if and only if the input TypePath has the exact same segments as this
+// TypePath.
+func (p TypePath) Equals(other TypePath) bool {
+ if len(p) != len(other) {
+ return false
+ }
+ for i := range p {
+ if p[i] != other[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (p TypePath) String() string {
+ segments := make([]string, len(p))
+ for i, s := range p {
+ segments[i] = s.String()
+ }
+ return strings.Join(segments, "->")
+}
+
+// ReferencedTypesIterationAction represents an action to be taken on each iteration of
+// IterateReferencedTypes. This function should return true to go deeper into the current type's
+// referenced types, or false to look no deeper at the current type's referenced types.
+//
+// NOTE: The TypePath argument this function receives is passed by reference. If you intend to save
+// this value for use after this function returns, you MUST call the Clone() method to keep a copy
+// of the TypePath as you currently see it.
+type ReferencedTypesIterationAction func(path TypePath, stack []reflect.Type) bool
+
+// IterateReferencedTypes recursively iterates over all referenced types from an initial
+// reflect.Type. The ReferencedTypesIterationAction argument is called for each referenced type.
+// This argument can also control whether the iteration goes deeper into a type or not.
+func IterateReferencedTypes(start reflect.Type, action ReferencedTypesIterationAction) {
+ seen := make(map[reflect.Type]bool)
+ iterateReferencedTypes(seen, nil, []reflect.Type{start}, action)
+}
+
+func iterateReferencedTypes(seen map[reflect.Type]bool, path TypePath, typeStack []reflect.Type, action ReferencedTypesIterationAction) {
+ currentType := typeStack[len(typeStack)-1]
+
+ if _, seenType := seen[currentType]; seenType {
+ return
+ }
+
+ if !action(path, typeStack) {
+ // if action returns false, don't visit its children
+ return
+ }
+
+ // add currentType to seen set, to avoid infinite recursion if currentType references itself
+ seen[currentType] = true
+
+ // after currentType's children are visited, "forget" the type, so we can examine it again if needed
+ // if this didn't happen, we would ignore any additional occurrences of this type
+ defer delete(seen, currentType)
+
+ switch currentType.Kind() {
+ case reflect.Map:
+ newPath := path.AddMapKey()
+ newStack := append(typeStack, currentType.Key())
+ iterateReferencedTypes(seen, newPath, newStack, action)
+ fallthrough
+ case reflect.Array, reflect.Slice, reflect.Ptr:
+ newPath := path.AddValue()
+ newStack := append(typeStack, currentType.Elem())
+ iterateReferencedTypes(seen, newPath, newStack, action)
+ case reflect.Struct:
+ for i := 0; i < currentType.NumField(); i++ {
+ field := currentType.Field(i)
+ newPath := path.AddField(field.Name)
+ newStack := append(typeStack, field.Type)
+ iterateReferencedTypes(seen, newPath, newStack, action)
+ }
+ }
+}
diff --git a/test/testdata/configs/config-v24.json b/test/testdata/configs/config-v24.json
new file mode 100644
index 000000000..54bd0f9f2
--- /dev/null
+++ b/test/testdata/configs/config-v24.json
@@ -0,0 +1,106 @@
+{
+ "Version": 24,
+ "AccountUpdatesStatsInterval": 5000000000,
+ "AccountsRebuildSynchronousMode": 1,
+ "AgreementIncomingBundlesQueueLength": 7,
+ "AgreementIncomingProposalsQueueLength": 25,
+ "AgreementIncomingVotesQueueLength": 10000,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BlockServiceCustomFallbackEndpoints": "",
+ "BroadcastConnectionsLimit": -1,
+ "CadaverSizeTarget": 0,
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchpointTracking": 0,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "DNSBootstrapID": "<network>.algorand.network",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DeadlockDetectionThreshold": 30,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": true,
+ "EnableCatchupFromArchiveServers": false,
+ "EnableDeveloperAPI": false,
+ "EnableGossipBlockService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableRuntimeMetrics": false,
+ "EnableTopAccountsReporting": false,
+ "EnableVerbosedTransactionSyncLogging": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceFetchTransactions": false,
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "IncomingConnectionsLimit": 800,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "IsIndexerActive": false,
+ "LedgerSynchronousMode": 2,
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogSizeLimit": 1073741824,
+ "MaxAcctLookback": 4,
+ "MaxAPIResourcesPerAccount": 100000,
+ "MaxCatchpointDownloadDuration": 7200000000000,
+ "MaxConnectionsPerIP": 30,
+ "MinCatchpointFileDownloadBytesPerSecond": 20480,
+ "NetAddress": "",
+ "NetworkMessageTraceServer": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "ParticipationKeysRefreshInterval": 60000000000,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "ProposalAssemblyTime": 500000000,
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestConnectionsHardLimit": 2048,
+ "RestConnectionsSoftLimit": 1024,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TransactionSyncDataExchangeRate": 0,
+ "TransactionSyncSignificantMessageThreshold": 0,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 75000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": "",
+ "VerifiedTranscationsCacheSize": 150000
+}
diff --git a/tools/debug/determaccount/main.go b/tools/debug/determaccount/main.go
new file mode 100644
index 000000000..84dfbf73f
--- /dev/null
+++ b/tools/debug/determaccount/main.go
@@ -0,0 +1,45 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "encoding/binary"
+ "flag"
+ "fmt"
+ "os"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+)
+
+var numAccounts = flag.Uint64("numaccounts", 0, "Use this many accounts")
+var offset = flag.Uint64("offset", 0, "Start at this offset")
+
+func main() {
+ flag.Parse()
+ if *numAccounts == 0 {
+ flag.Usage()
+ os.Exit(1)
+ }
+ for i := uint64(0); i < *numAccounts; i++ {
+ acct := i + *offset
+ var seed crypto.Seed
+ binary.LittleEndian.PutUint64(seed[:], uint64(acct))
+ secrets := crypto.GenerateSignatureSecrets(seed)
+ fmt.Println(i, acct, basics.Address(secrets.SignatureVerifier).String())
+ }
+}