summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2023-12-05 12:37:59 -0500
committerGitHub <noreply@github.com>2023-12-05 12:37:59 -0500
commit6a6a15de0180181079a610c12aaf583f8e6d7c0c (patch)
treef61dacb4e16815d7b0da5411e78c6d14bb71dc1a
parent7037cb3b4ee63d722c9d96eebcf97b9039e18e1c (diff)
parent14c0d8d0811e8a83a0f55f6692e233873b80199b (diff)
Merge pull request #5852 from Algo-devops-service/relstable3.20.1v3.20.1-stable
-rw-r--r--README.md2
-rw-r--r--buildnumber.dat2
-rw-r--r--catchup/catchpointService.go42
-rw-r--r--catchup/catchpointService_test.go70
-rw-r--r--catchup/peerSelector.go50
-rw-r--r--catchup/peerSelector_test.go133
-rw-r--r--catchup/service.go165
-rw-r--r--catchup/service_test.go140
-rw-r--r--catchup/universalFetcher.go21
-rw-r--r--catchup/universalFetcher_test.go10
-rw-r--r--cmd/algocfg/profileCommand.go20
-rw-r--r--cmd/algocfg/resetCommand.go2
-rw-r--r--cmd/algocfg/setCommand.go2
-rw-r--r--cmd/algokey/part.go3
-rw-r--r--cmd/goal/account.go7
-rw-r--r--cmd/goal/application.go8
-rw-r--r--cmd/goal/interact.go4
-rw-r--r--cmd/goal/tealsign.go2
-rw-r--r--cmd/opdoc/opdoc.go2
-rw-r--r--cmd/opdoc/tmLanguage.go2
-rw-r--r--cmd/tealdbg/cdtSession.go8
-rw-r--r--cmd/tealdbg/cdtSession_test.go6
-rw-r--r--cmd/tealdbg/util.go18
-rw-r--r--components/mocks/mockCatchpointCatchupAccessor.go5
-rw-r--r--config/config_test.go124
-rw-r--r--config/consensus.go32
-rw-r--r--config/localTemplate.go85
-rw-r--r--config/local_defaults.go7
-rw-r--r--config/version.go2
-rw-r--r--crypto/batchverifier.c20
-rw-r--r--crypto/batchverifier.go81
-rw-r--r--crypto/curve25519.go25
-rw-r--r--crypto/hashes.go2
-rw-r--r--crypto/hashes_test.go1
-rw-r--r--crypto/merklearray/layer.go6
-rw-r--r--crypto/merklearray/merkle_test.go5
-rw-r--r--crypto/merklearray/partial.go2
-rw-r--r--crypto/merklearray/worker.go6
-rw-r--r--crypto/onetimesig.go19
-rw-r--r--crypto/statetrie/nibbles/nibbles.go161
-rw-r--r--crypto/statetrie/nibbles/nibbles_test.go218
-rw-r--r--crypto/util.go12
-rw-r--r--crypto/util_test.go31
-rw-r--r--daemon/algod/api/algod.oas2.json80
-rw-r--r--daemon/algod/api/algod.oas3.yml106
-rw-r--r--daemon/algod/api/server/router.go8
-rw-r--r--daemon/algod/api/server/v2/generated/data/routes.go7
-rw-r--r--daemon/algod/api/server/v2/generated/experimental/routes.go408
-rw-r--r--daemon/algod/api/server/v2/generated/model/types.go12
-rw-r--r--daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go413
-rw-r--r--daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go81
-rw-r--r--daemon/algod/api/server/v2/generated/participating/private/routes.go462
-rw-r--r--daemon/algod/api/server/v2/generated/participating/public/routes.go25
-rw-r--r--daemon/algod/api/server/v2/handlers.go54
-rw-r--r--daemon/algod/api/server/v2/test/handlers_resources_test.go6
-rw-r--r--daemon/algod/api/server/v2/test/handlers_test.go65
-rw-r--r--daemon/algod/api/server/v2/test/helpers.go5
-rw-r--r--data/account/participation.go10
-rw-r--r--data/account/participationRegistry_test.go3
-rw-r--r--data/account/participation_test.go10
-rw-r--r--data/appRateLimiter.go322
-rw-r--r--data/appRateLimiter_test.go526
-rw-r--r--data/bookkeeping/genesis_test.go28
-rw-r--r--data/bookkeeping/lightBlockHeader.go20
-rw-r--r--data/bookkeeping/msgp_gen.go45
-rw-r--r--data/committee/common_test.go18
-rw-r--r--data/committee/credential_test.go37
-rw-r--r--data/pools/transactionPool.go8
-rw-r--r--data/transactions/logic/Makefile2
-rw-r--r--data/transactions/logic/README.md81
-rw-r--r--data/transactions/logic/README_in.md14
-rw-r--r--data/transactions/logic/TEAL_opcodes_v1.md2
-rw-r--r--data/transactions/logic/TEAL_opcodes_v10.md10
-rw-r--r--data/transactions/logic/TEAL_opcodes_v2.md2
-rw-r--r--data/transactions/logic/TEAL_opcodes_v3.md2
-rw-r--r--data/transactions/logic/TEAL_opcodes_v4.md2
-rw-r--r--data/transactions/logic/TEAL_opcodes_v5.md6
-rw-r--r--data/transactions/logic/TEAL_opcodes_v6.md6
-rw-r--r--data/transactions/logic/TEAL_opcodes_v7.md10
-rw-r--r--data/transactions/logic/TEAL_opcodes_v8.md10
-rw-r--r--data/transactions/logic/TEAL_opcodes_v9.md10
-rw-r--r--data/transactions/logic/assembler.go4
-rw-r--r--data/transactions/logic/assembler_test.go25
-rw-r--r--data/transactions/logic/crypto.go360
-rw-r--r--data/transactions/logic/crypto_test.go (renamed from data/transactions/logic/evalCrypto_test.go)60
-rw-r--r--data/transactions/logic/doc.go5
-rw-r--r--data/transactions/logic/doc_test.go11
-rw-r--r--data/transactions/logic/eval.go342
-rw-r--r--data/transactions/logic/evalStateful_test.go98
-rw-r--r--data/transactions/logic/eval_test.go2
-rw-r--r--data/transactions/logic/jsonspec.md2
-rw-r--r--data/transactions/logic/langspec_v1.json112
-rw-r--r--data/transactions/logic/langspec_v10.json162
-rw-r--r--data/transactions/logic/langspec_v2.json114
-rw-r--r--data/transactions/logic/langspec_v3.json122
-rw-r--r--data/transactions/logic/langspec_v4.json122
-rw-r--r--data/transactions/logic/langspec_v5.json136
-rw-r--r--data/transactions/logic/langspec_v6.json136
-rw-r--r--data/transactions/logic/langspec_v7.json150
-rw-r--r--data/transactions/logic/langspec_v8.json150
-rw-r--r--data/transactions/logic/langspec_v9.json150
-rw-r--r--data/transactions/logic/opcodes.go24
-rw-r--r--data/transactions/logic/program.go2
-rw-r--r--data/transactions/logic/teal.tmLanguage.json2
-rw-r--r--data/txHandler.go78
-rw-r--r--data/txHandler_test.go178
-rw-r--r--docs/follower_node.md4
-rw-r--r--go.mod4
-rw-r--r--go.sum4
-rw-r--r--installer/config.json.example7
-rw-r--r--ledger/bulletin.go48
-rw-r--r--ledger/bulletin_test.go108
-rw-r--r--ledger/catchpointtracker.go115
-rw-r--r--ledger/catchpointtracker_test.go108
-rw-r--r--ledger/catchupaccessor.go15
-rw-r--r--ledger/catchupaccessor_test.go8
-rw-r--r--ledger/eval/eval.go2
-rw-r--r--ledger/eval/prefetcher/prefetcher.go42
-rw-r--r--ledger/ledger.go16
-rw-r--r--ledger/store/blockdb/blockdb.go10
-rw-r--r--ledger/tracker.go81
-rw-r--r--ledger/tracker_test.go220
-rw-r--r--libgoal/participation.go84
-rw-r--r--libgoal/participation/participation.go104
-rw-r--r--libgoal/participation/participation_test.go113
-rw-r--r--network/netidentity.go5
-rw-r--r--network/netidentity_test.go3
-rw-r--r--network/netprio.go3
-rw-r--r--network/p2pNetwork.go16
-rw-r--r--network/p2pNetwork_test.go8
-rw-r--r--network/p2pPeer.go10
-rw-r--r--network/wsNetwork.go89
-rw-r--r--network/wsNetwork_test.go95
-rw-r--r--network/wsPeer.go119
-rw-r--r--network/wsPeer_test.go73
-rw-r--r--node/follower_node.go8
-rw-r--r--node/node.go2
-rw-r--r--node/node_test.go6
-rw-r--r--rpcs/blockService.go21
-rw-r--r--rpcs/blockService_test.go6
-rw-r--r--rpcs/ledgerService.go8
-rw-r--r--rpcs/ledgerService_test.go6
-rw-r--r--rpcs/txService_test.go3
-rw-r--r--rpcs/txSyncer_test.go30
-rw-r--r--scripts/windows/instructions.md2
-rw-r--r--shared/pingpong/accounts.go8
-rw-r--r--shared/pingpong/pingpong.go24
-rw-r--r--test/e2e-go/features/catchup/basicCatchup_test.go18
-rw-r--r--test/e2e-go/features/catchup/catchpointCatchup_test.go4
-rw-r--r--test/e2e-go/features/participation/accountParticipationTransitions_test.go7
-rw-r--r--test/e2e-go/features/stateproofs/stateproofs_test.go9
-rw-r--r--test/e2e-go/features/transactions/onlineStatusChange_test.go7
-rw-r--r--test/framework/fixtures/fixture.go2
-rw-r--r--test/heapwatch/metrics_viz.py25
-rwxr-xr-xtest/heapwatch/plot_crr_csv.py7
-rw-r--r--test/heapwatch/requirements.txt1
-rw-r--r--test/testdata/configs/config-v32.json139
-rw-r--r--test/testdata/configs/config-v33.json140
-rw-r--r--tools/block-generator/go.mod2
-rw-r--r--tools/block-generator/go.sum4
-rw-r--r--util/codecs/json.go67
-rw-r--r--util/codecs/json_test.go121
-rw-r--r--util/condvar/timedwait.go6
-rw-r--r--util/metrics/counter.go9
-rw-r--r--util/metrics/counterCommon.go5
-rw-r--r--util/metrics/gauge.go10
-rw-r--r--util/metrics/metrics.go2
167 files changed, 6570 insertions, 2679 deletions
diff --git a/README.md b/README.md
index 682d88436..f1ebcecea 100644
--- a/README.md
+++ b/README.md
@@ -96,7 +96,7 @@ Please refer to our [CONTRIBUTING](CONTRIBUTING.md) document.
## Project Layout
-`go-algorand` is split into various subsystems containing varius packages.
+`go-algorand` is split into various subsystems containing various packages.
### Core
diff --git a/buildnumber.dat b/buildnumber.dat
index 573541ac9..d00491fd7 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-0
+1
diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go
index 7ad45305f..a0a22c5e3 100644
--- a/catchup/catchpointService.go
+++ b/catchup/catchpointService.go
@@ -24,6 +24,7 @@ import (
"github.com/algorand/go-deadlock"
+ "github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
@@ -370,9 +371,11 @@ func (cs *CatchpointCatchupService) processStageLatestBlockDownload() (err error
attemptsCount := 0
var blk *bookkeeping.Block
+ var cert *agreement.Certificate
// check to see if the current ledger might have this block. If so, we should try this first instead of downloading anything.
- if ledgerBlock, err := cs.ledger.Block(blockRound); err == nil {
+ if ledgerBlock, ledgerCert, err0 := cs.ledger.BlockCert(blockRound); err0 == nil {
blk = &ledgerBlock
+ cert = &ledgerCert
}
var protoParams config.ConsensusParams
var ok bool
@@ -384,7 +387,7 @@ func (cs *CatchpointCatchupService) processStageLatestBlockDownload() (err error
blockDownloadDuration := time.Duration(0)
if blk == nil {
var stop bool
- blk, blockDownloadDuration, psp, stop, err = cs.fetchBlock(blockRound, uint64(attemptsCount))
+ blk, cert, blockDownloadDuration, psp, stop, err = cs.fetchBlock(blockRound, uint64(attemptsCount))
if stop {
return err
} else if blk == nil {
@@ -462,7 +465,7 @@ func (cs *CatchpointCatchupService) processStageLatestBlockDownload() (err error
return cs.abort(fmt.Errorf("processStageLatestBlockDownload failed when calling StoreBalancesRound : %v", err))
}
- err = cs.ledgerAccessor.StoreFirstBlock(cs.ctx, blk)
+ err = cs.ledgerAccessor.StoreFirstBlock(cs.ctx, blk, cert)
if err != nil {
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
@@ -542,21 +545,24 @@ func (cs *CatchpointCatchupService) processStageBlocksDownload() (err error) {
prevBlock := &topBlock
blocksFetched := uint64(1) // we already got the first block in the previous step.
var blk *bookkeeping.Block
+ var cert *agreement.Certificate
for retryCount := uint64(1); blocksFetched <= lookback; {
if err := cs.ctx.Err(); err != nil {
return cs.stopOrAbort()
}
blk = nil
+ cert = nil
// check to see if the current ledger might have this block. If so, we should try this first instead of downloading anything.
- if ledgerBlock, err := cs.ledger.Block(topBlock.Round() - basics.Round(blocksFetched)); err == nil {
+ if ledgerBlock, ledgerCert, err0 := cs.ledger.BlockCert(topBlock.Round() - basics.Round(blocksFetched)); err0 == nil {
blk = &ledgerBlock
+ cert = &ledgerCert
} else {
- switch err.(type) {
+ switch err0.(type) {
case ledgercore.ErrNoEntry:
// this is expected, ignore this one.
default:
- cs.log.Warnf("processStageBlocksDownload encountered the following error when attempting to retrieve the block for round %d : %v", topBlock.Round()-basics.Round(blocksFetched), err)
+ cs.log.Warnf("processStageBlocksDownload encountered the following error when attempting to retrieve the block for round %d : %v", topBlock.Round()-basics.Round(blocksFetched), err0)
}
}
@@ -564,7 +570,7 @@ func (cs *CatchpointCatchupService) processStageBlocksDownload() (err error) {
blockDownloadDuration := time.Duration(0)
if blk == nil {
var stop bool
- blk, blockDownloadDuration, psp, stop, err = cs.fetchBlock(topBlock.Round()-basics.Round(blocksFetched), retryCount)
+ blk, cert, blockDownloadDuration, psp, stop, err = cs.fetchBlock(topBlock.Round()-basics.Round(blocksFetched), retryCount)
if stop {
return err
} else if blk == nil {
@@ -624,7 +630,7 @@ func (cs *CatchpointCatchupService) processStageBlocksDownload() (err error) {
}
// all good, persist and move on.
- err = cs.ledgerAccessor.StoreBlock(cs.ctx, blk)
+ err = cs.ledgerAccessor.StoreBlock(cs.ctx, blk, cert)
if err != nil {
cs.log.Warnf("processStageBlocksDownload failed to store downloaded staging block for round %d", blk.Round())
cs.updateBlockRetrievalStatistics(-1, -1)
@@ -649,17 +655,17 @@ func (cs *CatchpointCatchupService) processStageBlocksDownload() (err error) {
// fetchBlock uses the internal peer selector blocksDownloadPeerSelector to pick a peer and then attempt to fetch the block requested from that peer.
// The method return stop=true if the caller should exit the current operation
// If the method return a nil block, the caller is expected to retry the operation, increasing the retry counter as needed.
-func (cs *CatchpointCatchupService) fetchBlock(round basics.Round, retryCount uint64) (blk *bookkeeping.Block, downloadDuration time.Duration, psp *peerSelectorPeer, stop bool, err error) {
+func (cs *CatchpointCatchupService) fetchBlock(round basics.Round, retryCount uint64) (blk *bookkeeping.Block, cert *agreement.Certificate, downloadDuration time.Duration, psp *peerSelectorPeer, stop bool, err error) {
psp, err = cs.blocksDownloadPeerSelector.getNextPeer()
if err != nil {
if err == errPeerSelectorNoPeerPoolsAvailable {
cs.log.Infof("fetchBlock: unable to obtain a list of peers to retrieve the latest block from; will retry shortly.")
// this is a possible on startup, since the network package might have yet to retrieve the list of peers.
time.Sleep(noPeersAvailableSleepInterval)
- return nil, time.Duration(0), psp, false, nil
+ return nil, nil, time.Duration(0), psp, false, nil
}
err = fmt.Errorf("fetchBlock: unable to obtain a list of peers to retrieve the latest block from : %w", err)
- return nil, time.Duration(0), psp, true, cs.abort(err)
+ return nil, nil, time.Duration(0), psp, true, cs.abort(err)
}
peer := psp.Peer
@@ -669,26 +675,26 @@ func (cs *CatchpointCatchupService) fetchBlock(round basics.Round, retryCount ui
cs.blocksDownloadPeerSelector.rankPeer(psp, peerRankInvalidDownload)
if retryCount <= uint64(cs.config.CatchupBlockDownloadRetryAttempts) {
// try again.
- return nil, time.Duration(0), psp, false, nil
+ return nil, nil, time.Duration(0), psp, false, nil
}
- return nil, time.Duration(0), psp, true, cs.abort(fmt.Errorf("fetchBlock: recurring non-HTTP peer was provided by the peer selector"))
+ return nil, nil, time.Duration(0), psp, true, cs.abort(fmt.Errorf("fetchBlock: recurring non-HTTP peer was provided by the peer selector"))
}
fetcher := makeUniversalBlockFetcher(cs.log, cs.net, cs.config)
- blk, _, downloadDuration, err = fetcher.fetchBlock(cs.ctx, round, httpPeer)
+ blk, cert, downloadDuration, err = fetcher.fetchBlock(cs.ctx, round, httpPeer)
if err != nil {
if cs.ctx.Err() != nil {
- return nil, time.Duration(0), psp, true, cs.stopOrAbort()
+ return nil, nil, time.Duration(0), psp, true, cs.stopOrAbort()
}
if retryCount <= uint64(cs.config.CatchupBlockDownloadRetryAttempts) {
// try again.
cs.log.Infof("Failed to download block %d on attempt %d out of %d. %v", round, retryCount, cs.config.CatchupBlockDownloadRetryAttempts, err)
cs.blocksDownloadPeerSelector.rankPeer(psp, peerRankDownloadFailed)
- return nil, time.Duration(0), psp, false, nil
+ return nil, nil, time.Duration(0), psp, false, nil
}
- return nil, time.Duration(0), psp, true, cs.abort(fmt.Errorf("fetchBlock failed after multiple blocks download attempts"))
+ return nil, nil, time.Duration(0), psp, true, cs.abort(fmt.Errorf("fetchBlock failed after multiple blocks download attempts"))
}
// success
- return blk, downloadDuration, psp, false, nil
+ return blk, cert, downloadDuration, psp, false, nil
}
// processStageLedgerDownload is the fifth catchpoint catchup stage. It completes the catchup process, swap the new tables and restart the node functionality.
diff --git a/catchup/catchpointService_test.go b/catchup/catchpointService_test.go
index 48cea110d..34f1adf0f 100644
--- a/catchup/catchpointService_test.go
+++ b/catchup/catchpointService_test.go
@@ -22,12 +22,14 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/components/mocks"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -35,7 +37,7 @@ import (
type catchpointCatchupLedger struct {
}
-func (l *catchpointCatchupLedger) Block(rnd basics.Round) (blk bookkeeping.Block, err error) {
+func (l *catchpointCatchupLedger) BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreement.Certificate, err error) {
blk = bookkeeping.Block{
BlockHeader: bookkeeping.BlockHeader{
UpgradeState: bookkeeping.UpgradeState{
@@ -43,13 +45,14 @@ func (l *catchpointCatchupLedger) Block(rnd basics.Round) (blk bookkeeping.Block
},
},
}
+ cert = agreement.Certificate{}
commitments, err := blk.PaysetCommit()
if err != nil {
- return blk, err
+ return blk, cert, err
}
blk.TxnCommitments = commitments
- return blk, nil
+ return blk, cert, nil
}
func (l *catchpointCatchupLedger) GenesisHash() (d crypto.Digest) {
@@ -95,3 +98,64 @@ func TestCatchpointServicePeerRank(t *testing.T) {
err := cs.processStageLatestBlockDownload()
require.NoError(t, err)
}
+
+type catchpointAccessorMock struct {
+ mocks.MockCatchpointCatchupAccessor
+ t *testing.T
+ topBlk bookkeeping.Block
+}
+
+func (m *catchpointAccessorMock) EnsureFirstBlock(ctx context.Context) (blk bookkeeping.Block, err error) {
+ return m.topBlk, nil
+}
+
+func (m *catchpointAccessorMock) StoreBlock(ctx context.Context, blk *bookkeeping.Block, cert *agreement.Certificate) (err error) {
+ require.NotNil(m.t, blk)
+ require.NotNil(m.t, cert)
+ return nil
+}
+
+type catchpointCatchupLedger2 struct {
+ catchpointCatchupLedger
+ blk bookkeeping.Block
+}
+
+func (l *catchpointCatchupLedger2) BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreement.Certificate, err error) {
+ return l.blk, agreement.Certificate{}, nil
+}
+
+// TestProcessStageBlocksDownloadNilCert ensures StoreBlock does not receive a nil certificate when ledger has already had a block.
+// It uses two mocks catchpointAccessorMock and catchpointCatchupLedger2 and pre-crafted blocks to make a single iteration of processStageBlocksDownload.
+func TestProcessStageBlocksDownloadNilCert(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var err error
+ blk1 := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: 1,
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: protocol.ConsensusCurrentVersion,
+ },
+ },
+ }
+ blk1.TxnCommitments, err = blk1.PaysetCommit()
+ require.NoError(t, err)
+
+ blk2 := blk1
+ blk2.BlockHeader.Round = 2
+ blk2.BlockHeader.Branch = blk1.Hash()
+ blk2.TxnCommitments, err = blk2.PaysetCommit()
+ require.NoError(t, err)
+
+ ctx, cf := context.WithCancel(context.Background())
+ cs := CatchpointCatchupService{
+ ctx: ctx,
+ cancelCtxFunc: cf,
+ ledgerAccessor: &catchpointAccessorMock{topBlk: blk2, t: t},
+ ledger: &catchpointCatchupLedger2{blk: blk1},
+ log: logging.TestingLog(t),
+ }
+
+ err = cs.processStageBlocksDownload()
+ require.NoError(t, err)
+}
diff --git a/catchup/peerSelector.go b/catchup/peerSelector.go
index a75e7c61f..e9f256e1b 100644
--- a/catchup/peerSelector.go
+++ b/catchup/peerSelector.go
@@ -51,6 +51,10 @@ const (
peerRank4LowBlockTime = 801
peerRank4HighBlockTime = 999
+ // peerRankNoBlockForRound is used for responses failed because of no block for round
+ // This indicates a peer is either behind or a block has not happened yet, or does not have a block that is old enough.
+ peerRankNoBlockForRound = 2000
+
// peerRankDownloadFailed is used for responses which could be temporary, such as missing files, or such that we don't
// have clear resolution
peerRankDownloadFailed = 10000
@@ -110,11 +114,13 @@ type peerPool struct {
// client to provide feedback regarding the peer's performance, and to have the subsequent
// query(s) take advantage of that intel.
type peerSelector struct {
- mu deadlock.Mutex
- net peersRetriever
+ mu deadlock.Mutex
+ net peersRetriever
+ // peerClasses is the list of peer classes we want to have in the peerSelector.
peerClasses []peerClass
- pools []peerPool
- counter uint64
+ // pools is the list of peer pools, each pool contains a list of peers with the same rank.
+ pools []peerPool
+ counter uint64
}
// historicStats stores the past windowSize ranks for the peer passed
@@ -141,7 +147,7 @@ func makeHistoricStatus(windowSize int, class peerClass) *historicStats {
// that will determine the rank of the peer.
hs := historicStats{
windowSize: windowSize,
- rankSamples: make([]int, windowSize, windowSize),
+ rankSamples: make([]int, windowSize),
requestGaps: make([]uint64, 0, windowSize),
rankSum: uint64(class.initialRank) * uint64(windowSize),
gapSum: 0.0}
@@ -227,18 +233,24 @@ func (hs *historicStats) push(value int, counter uint64, class peerClass) (avera
// Download may fail for various reasons. Give it additional tries
// and see if it recovers/improves.
- if value == peerRankDownloadFailed {
+ factor := float64(1.0)
+ switch value {
+ // - Set the rank to the class upper bound multiplied
+ // by the number of downloadFailures.
+ // - Each downloadFailure increments the counter, and
+ // each non-failure decrements it, until it gets to 0.
+ // - When the peer is consistently failing to
+ // download, the value added to rankSum will
+ // increase at an increasing rate to evict the peer
+ // from the class sooner.
+ case peerRankNoBlockForRound:
+ // for the no block errors apply very smooth rank increase
+ factor = 0.1
+ fallthrough
+ case peerRankDownloadFailed:
hs.downloadFailures++
- // - Set the rank to the class upper bound multiplied
- // by the number of downloadFailures.
- // - Each downloadFailure increments the counter, and
- // each non-failure decrements it, until it gets to 0.
- // - When the peer is consistently failing to
- // download, the value added to rankSum will
- // increase at an increasing rate to evict the peer
- // from the class sooner.
- value = upperBound(class) * int(math.Exp2(float64(hs.downloadFailures)))
- } else {
+ value = upperBound(class) * int(math.Exp2(float64(hs.downloadFailures)*factor))
+ default:
if hs.downloadFailures > 0 {
hs.downloadFailures--
}
@@ -250,12 +262,12 @@ func (hs *historicStats) push(value int, counter uint64, class peerClass) (avera
// The average performance of the peer
average := float64(hs.rankSum) / float64(len(hs.rankSamples))
- if int(average) > upperBound(class) && initialRank == peerRankDownloadFailed {
+ if int(average) > upperBound(class) && (initialRank == peerRankDownloadFailed || initialRank == peerRankNoBlockForRound) {
// peerRankDownloadFailed will be delayed, to give the peer
// additional time to improve. If does not improve over time,
// the average will exceed the class limit. At this point,
// it will be pushed down to download failed class.
- return peerRankDownloadFailed
+ return initialRank
}
// A penalty is added relative to how freequently the peer is used
@@ -468,7 +480,7 @@ func (ps *peerSelector) refreshAvailablePeers() {
for peerIdx := len(pool.peers) - 1; peerIdx >= 0; peerIdx-- {
peer := pool.peers[peerIdx].peer
if peerAddress := peerAddress(peer); peerAddress != "" {
- if toRemove, _ := existingPeers[pool.peers[peerIdx].class.peerClass][peerAddress]; toRemove {
+ if toRemove := existingPeers[pool.peers[peerIdx].class.peerClass][peerAddress]; toRemove {
// need to be removed.
pool.peers = append(pool.peers[:peerIdx], pool.peers[peerIdx+1:]...)
}
diff --git a/catchup/peerSelector_test.go b/catchup/peerSelector_test.go
index 4991143ef..680b65cf3 100644
--- a/catchup/peerSelector_test.go
+++ b/catchup/peerSelector_test.go
@@ -21,9 +21,11 @@ import (
"context"
"encoding/binary"
"net/http"
+ "reflect"
"testing"
"time"
+ "github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/crypto"
@@ -68,8 +70,9 @@ func (d *mockUnicastPeer) GetConnectionLatency() time.Duration {
return time.Duration(0)
}
-func TestPeerAddress(t *testing.T) {
+func TestPeerSelector_PeerAddress(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
httpPeer := &mockHTTPPeer{address: "12345"}
require.Equal(t, "12345", peerAddress(httpPeer))
@@ -81,8 +84,9 @@ func TestPeerAddress(t *testing.T) {
require.Equal(t, "", peerAddress(t))
}
-func TestDownloadDurationToRank(t *testing.T) {
+func TestPeerSelector_DownloadDurationToRank(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
// verify mid value
require.Equal(t, 1500, downloadDurationToRank(50*time.Millisecond, 0*time.Millisecond, 100*time.Millisecond, 1000, 2000))
@@ -121,8 +125,9 @@ func makePeersRetrieverStub(fnc func(options ...network.PeerOption) []network.Pe
getPeersStub: fnc,
}
}
-func TestPeerSelector(t *testing.T) {
+func TestPeerSelector_RankPeer(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
peers := []network.Peer{&mockHTTPPeer{address: "12345"}}
@@ -175,14 +180,13 @@ func TestPeerSelector(t *testing.T) {
r1, r2 = peerSelector.rankPeer(nil, 10)
require.False(t, r1 != r2)
- r2, r2 = peerSelector.rankPeer(&peerSelectorPeer{&mockHTTPPeer{address: "abc123"}, 1}, 10)
+ r1, r2 = peerSelector.rankPeer(&peerSelectorPeer{&mockHTTPPeer{address: "abc123"}, 1}, 10)
require.False(t, r1 != r2)
-
- return
}
-func TestPeerDownloadRanking(t *testing.T) {
+func TestPeerSelector_PeerDownloadRanking(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
peers1 := []network.Peer{&mockHTTPPeer{address: "1234"}, &mockHTTPPeer{address: "5678"}}
peers2 := []network.Peer{&mockHTTPPeer{address: "abcd"}, &mockHTTPPeer{address: "efgh"}}
@@ -232,8 +236,9 @@ func TestPeerDownloadRanking(t *testing.T) {
require.Equal(t, peerRankInvalidDownload, peerSelector.peerDownloadDurationToRank(&peerSelectorPeer{mockHTTPPeer{address: "abc123"}, 0}, time.Millisecond))
}
-func TestFindMissingPeer(t *testing.T) {
+func TestPeerSelector_FindMissingPeer(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
peerSelector := makePeerSelector(
makePeersRetrieverStub(func(options ...network.PeerOption) []network.Peer {
@@ -246,8 +251,9 @@ func TestFindMissingPeer(t *testing.T) {
require.Equal(t, -1, peerIdx)
}
-func TestHistoricData(t *testing.T) {
+func TestPeerSelector_HistoricData(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}, &mockHTTPPeer{address: "a2"}, &mockHTTPPeer{address: "a3"}}
peers2 := []network.Peer{&mockHTTPPeer{address: "b1"}, &mockHTTPPeer{address: "b2"}}
@@ -319,8 +325,9 @@ func peerSelectorTestRandVal(t *testing.T, seed int) float64 {
randVal = randVal + 1
return randVal
}
-func TestPeersDownloadFailed(t *testing.T) {
+func TestPeerSelector_PeersDownloadFailed(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}, &mockHTTPPeer{address: "a2"}, &mockHTTPPeer{address: "a3"}}
peers2 := []network.Peer{&mockHTTPPeer{address: "b1"}, &mockHTTPPeer{address: "b2"}}
@@ -392,10 +399,11 @@ func TestPeersDownloadFailed(t *testing.T) {
}
-// TestPenalty tests that the penalty is calculated correctly and one peer
+// TestPeerSelector_Penalty tests that the penalty is calculated correctly and one peer
// is not dominating all the selection.
-func TestPenalty(t *testing.T) {
+func TestPeerSelector_Penalty(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}, &mockHTTPPeer{address: "a2"}, &mockHTTPPeer{address: "a3"}}
peers2 := []network.Peer{&mockHTTPPeer{address: "b1"}, &mockHTTPPeer{address: "b2"}}
@@ -451,9 +459,10 @@ func TestPenalty(t *testing.T) {
require.Equal(t, counters[4], 0)
}
-// TestPeerDownloadDurationToRank tests all the cases handled by peerDownloadDurationToRank
-func TestPeerDownloadDurationToRank(t *testing.T) {
+// TestPeerSelector_PeerDownloadDurationToRank tests all the cases handled by peerDownloadDurationToRank
+func TestPeerSelector_PeerDownloadDurationToRank(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}, &mockHTTPPeer{address: "a2"}, &mockHTTPPeer{address: "a3"}}
peers2 := []network.Peer{&mockHTTPPeer{address: "b1"}, &mockHTTPPeer{address: "b2"}}
@@ -500,8 +509,9 @@ func TestPeerDownloadDurationToRank(t *testing.T) {
}
-func TestLowerUpperBounds(t *testing.T) {
+func TestPeerSelector_LowerUpperBounds(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
classes := []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
@@ -522,8 +532,9 @@ func TestLowerUpperBounds(t *testing.T) {
require.Equal(t, peerRank4HighBlockTime, upperBound(classes[4]))
}
-func TestFullResetRequestPenalty(t *testing.T) {
+func TestPeerSelector_FullResetRequestPenalty(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
class := peerClass{initialRank: 0, peerClass: network.PeersPhonebookArchivers}
hs := makeHistoricStatus(10, class)
@@ -534,10 +545,11 @@ func TestFullResetRequestPenalty(t *testing.T) {
require.Equal(t, 0, len(hs.requestGaps))
}
-// TesPenaltyBounds makes sure that the penalty does not demote the peer to a lower class,
+// TestPeerSelector_PenaltyBounds makes sure that the penalty does not demote the peer to a lower class,
// and resetting the penalty of a demoted peer does not promote it back
-func TestPenaltyBounds(t *testing.T) {
+func TestPeerSelector_PenaltyBounds(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
class := peerClass{initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivers}
hs := makeHistoricStatus(peerHistoryWindowSize, class)
@@ -558,11 +570,12 @@ func TestPenaltyBounds(t *testing.T) {
require.Equal(t, peerRankDownloadFailed, r3)
}
-// TestClassUpperBound makes sure the peer rank does not exceed the class upper bound
+// TestPeerSelector_ClassUpperBound makes sure the peer rank does not exceed the class upper bound
// This was a bug where the resetRequestPenalty was not bounding the returned rank, and was having download failures.
// Initializing rankSamples to 0 makes this works, since the dropped value subtracts 0 from rankSum.
-func TestClassUpperBound(t *testing.T) {
+func TestPeerSelector_ClassUpperBound(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}, &mockHTTPPeer{address: "a2"}}
pClass := peerClass{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers}
@@ -592,11 +605,12 @@ func TestClassUpperBound(t *testing.T) {
}
}
-// TestClassLowerBound makes sure the peer rank does not go under the class lower bound
+// TestPeerSelector_ClassLowerBound makes sure the peer rank does not go under the class lower bound
// This was a bug where the resetRequestPenalty was not bounding the returned rank, and the rankSum was not
// initialized to give the average of class.initialRank
-func TestClassLowerBound(t *testing.T) {
+func TestPeerSelector_ClassLowerBound(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}, &mockHTTPPeer{address: "a2"}}
pClass := peerClass{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers}
@@ -623,8 +637,8 @@ func TestClassLowerBound(t *testing.T) {
}
}
-// TestEviction tests that the peer is evicted after several download failures, and it handles same address for different peer classes
-func TestEvictionAndUpgrade(t *testing.T) {
+// TestPeerSelector_Eviction tests that the peer is evicted after several download failures, and it handles same address for different peer classes
+func TestPeerSelector_EvictionAndUpgrade(t *testing.T) {
partitiontest.PartitionTest(t)
peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}}
@@ -656,5 +670,76 @@ func TestEvictionAndUpgrade(t *testing.T) {
peerSelector.rankPeer(psp, peerRankDownloadFailed)
}
psp, err := peerSelector.getNextPeer()
+ require.NoError(t, err)
require.Equal(t, psp.peerClass, network.PeersPhonebookRelays)
}
+
+// TestPeerSelector_RefreshAvailablePeers tests addition/removal of peers from the pool
+func TestPeerSelector_RefreshAvailablePeers(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // check new peers added to the pool
+ p1 := mockHTTPPeer{address: "p1"}
+ p2 := mockHTTPPeer{address: "p2"}
+ ps := peerSelector{
+ peerClasses: []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ },
+ pools: []peerPool{
+ {
+ rank: peerRankInitialFirstPriority,
+ peers: []peerPoolEntry{
+ {
+ peer: &p1,
+ class: peerClass{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
+ },
+ },
+ },
+ },
+ }
+
+ ps.net = makePeersRetrieverStub(func(options ...network.PeerOption) []network.Peer {
+ return []network.Peer{&p1, &p2}
+ })
+
+ ps.refreshAvailablePeers()
+
+ peerComparer := func(x, y peerPoolEntry) bool {
+ return reflect.DeepEqual(x.peer, y.peer)
+ }
+
+ require.Equal(t, 2, len(ps.pools))
+ require.Equal(t, 2, len(ps.pools[0].peers))
+ require.Equal(t, 2, len(ps.pools[1].peers))
+
+ require.True(t, cmp.Equal(
+ ps.pools[0].peers,
+ []peerPoolEntry{{peer: &p1}, {peer: &p2}},
+ cmp.Comparer(peerComparer),
+ ))
+ require.True(t, cmp.Equal(
+ ps.pools[1].peers,
+ []peerPoolEntry{{peer: &p1}, {peer: &p2}},
+ cmp.Comparer(peerComparer),
+ ))
+
+ // ensure removal peers from a pool and pools themselves
+ // when returning only p1 for the first class and empty for the second
+ ps.net = makePeersRetrieverStub(func(options ...network.PeerOption) []network.Peer {
+ if options[0] == network.PeersConnectedOut {
+ return []network.Peer{&p1}
+ }
+ return []network.Peer{}
+ })
+
+ ps.refreshAvailablePeers()
+ require.Equal(t, 1, len(ps.pools))
+ require.Equal(t, 1, len(ps.pools[0].peers))
+ require.True(t, cmp.Equal(
+ ps.pools[0].peers,
+ []peerPoolEntry{{peer: &p1}},
+ cmp.Comparer(peerComparer),
+ ))
+}
diff --git a/catchup/service.go b/catchup/service.go
index bc23b3d73..bcf204b13 100644
--- a/catchup/service.go
+++ b/catchup/service.go
@@ -41,9 +41,16 @@ import (
const catchupPeersForSync = 10
const blockQueryPeerLimit = 10
+// uncapParallelDownloadRate is a simple threshold to detect whether or not the node is caught up.
+// If a block is downloaded in less than this duration, it's assumed that the node is not caught up
+// and allow the block downloader to start N=parallelBlocks concurrent fetches.
+const uncapParallelDownloadRate = time.Second
+
// this should be at least the number of relays
const catchupRetryLimit = 500
+const followLatestBackoff = 100 * time.Millisecond
+
// ErrSyncRoundInvalid is returned when the sync round requested is behind the current ledger round
var ErrSyncRoundInvalid = errors.New("requested sync round cannot be less than the latest round")
@@ -63,6 +70,7 @@ type Ledger interface {
Block(basics.Round) (bookkeeping.Block, error)
BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
IsWritingCatchpointDataFile() bool
+ IsBehindCommittingDeltas() bool
Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledgercore.ValidatedBlock, error)
AddValidatedBlock(vb ledgercore.ValidatedBlock, cert agreement.Certificate) error
WaitMem(r basics.Round) chan struct{}
@@ -70,10 +78,10 @@ type Ledger interface {
// Service represents the catchup service. Once started and until it is stopped, it ensures that the ledger is up to date with network.
type Service struct {
- syncStartNS int64 // at top of struct to keep 64 bit aligned for atomic.* ops
// disableSyncRound, provided externally, is the first round we will _not_ fetch from the network
// any round >= disableSyncRound will not be fetched. If set to 0, it will be disregarded.
- disableSyncRound uint64
+ disableSyncRound atomic.Uint64
+ syncStartNS atomic.Int64
cfg config.Local
ledger Ledger
ctx context.Context
@@ -84,17 +92,24 @@ type Service struct {
auth BlockAuthenticator
parallelBlocks uint64
deadlineTimeout time.Duration
+ prevBlockFetchTime time.Time
blockValidationPool execpool.BacklogPool
- // suspendForCatchpointWriting defines whether we've run into a state where the ledger is currently busy writing the
- // catchpoint file. If so, we want to suspend the catchup process until the catchpoint file writing is complete,
+ // followLatest is set to true if this is a follower node: meaning there is no
+ // agreement service to follow the latest round, so catchup continuously runs,
+ // polling for new blocks as they appear. This enables a different behavior
+ // to avoid aborting the catchup service once you get to the tip of the chain.
+ followLatest bool
+
+ // suspendForLedgerOps defines whether we've run into a state where the ledger is currently busy writing the
+ // catchpoint file or flushing accounts. If so, we want to suspend the catchup process until the catchpoint file writing is complete,
// and resume from there without stopping the catchup timer.
- suspendForCatchpointWriting bool
+ suspendForLedgerOps bool
// The channel gets closed when the initial sync is complete. This allows for other services to avoid
// the overhead of starting prematurely (before this node is caught-up and can validate messages for example).
InitialSyncDone chan struct{}
- initialSyncNotified uint32
+ initialSyncNotified atomic.Uint32
protocolErrorLogged bool
unmatchedPendingCertificates <-chan PendingUnmatchedCertificate
// This channel signals periodSync to attempt catchup immediately. This allows us to start fetching rounds from
@@ -124,6 +139,7 @@ func MakeService(log logging.Logger, config config.Local, net network.GossipNode
s = &Service{}
s.cfg = config
+ s.followLatest = s.cfg.EnableFollowMode
s.ledger = ledger
s.net = net
s.auth = auth
@@ -140,7 +156,7 @@ func MakeService(log logging.Logger, config config.Local, net network.GossipNode
// Start the catchup service
func (s *Service) Start() {
s.ctx, s.cancel = context.WithCancel(context.Background())
- atomic.StoreUint32(&s.initialSyncNotified, 0)
+ s.initialSyncNotified.Store(0)
s.InitialSyncDone = make(chan struct{})
s.workers.Add(1)
go s.periodicSync()
@@ -150,7 +166,7 @@ func (s *Service) Start() {
func (s *Service) Stop() {
s.cancel()
s.workers.Wait()
- if atomic.CompareAndSwapUint32(&s.initialSyncNotified, 0, 1) {
+ if s.initialSyncNotified.CompareAndSwap(0, 1) {
close(s.InitialSyncDone)
}
}
@@ -159,8 +175,8 @@ func (s *Service) Stop() {
// or attempting to catchup after too-long waiting for next block.
// Also returns a 2nd bool indicating if this is our initial sync
func (s *Service) IsSynchronizing() (synchronizing bool, initialSync bool) {
- synchronizing = atomic.LoadInt64(&s.syncStartNS) != 0
- initialSync = atomic.LoadUint32(&s.initialSyncNotified) == 0
+ synchronizing = s.syncStartNS.Load() != 0
+ initialSync = s.initialSyncNotified.Load() == 0
return
}
@@ -180,25 +196,25 @@ func (s *Service) SetDisableSyncRound(rnd uint64) error {
if basics.Round(rnd) < s.ledger.LastRound() {
return ErrSyncRoundInvalid
}
- atomic.StoreUint64(&s.disableSyncRound, rnd)
+ s.disableSyncRound.Store(rnd)
s.triggerSync()
return nil
}
// UnsetDisableSyncRound removes any previously set disabled sync round
func (s *Service) UnsetDisableSyncRound() {
- atomic.StoreUint64(&s.disableSyncRound, 0)
+ s.disableSyncRound.Store(0)
s.triggerSync()
}
// GetDisableSyncRound returns the disabled sync round
func (s *Service) GetDisableSyncRound() uint64 {
- return atomic.LoadUint64(&s.disableSyncRound)
+ return s.disableSyncRound.Load()
}
// SynchronizingTime returns the time we've been performing a catchup operation (0 if not currently catching up)
func (s *Service) SynchronizingTime() time.Duration {
- startNS := atomic.LoadInt64(&s.syncStartNS)
+ startNS := s.syncStartNS.Load()
if startNS == 0 {
return time.Duration(0)
}
@@ -242,6 +258,8 @@ func (s *Service) innerFetch(ctx context.Context, r basics.Round, peer network.P
return
}
+const errNoBlockForRoundThreshold = 5
+
// fetchAndWrite fetches a block, checks the cert, and writes it to the ledger. Cert checking and ledger writing both wait for the ledger to advance if necessary.
// Returns false if we should stop trying to catch up. This may occur for several reasons:
// - If the context is canceled (e.g. if the node is shutting down)
@@ -253,6 +271,11 @@ func (s *Service) fetchAndWrite(ctx context.Context, r basics.Round, prevFetchCo
if dontSyncRound := s.GetDisableSyncRound(); dontSyncRound != 0 && r >= basics.Round(dontSyncRound) {
return false
}
+
+ // peerErrors tracks occurrences of errNoBlockForRound in order to quit earlier without making
+ // repeated requests for a block that most likely does not exist yet
+ peerErrors := map[network.Peer]int{}
+
i := 0
for {
i++
@@ -301,8 +324,27 @@ func (s *Service) fetchAndWrite(ctx context.Context, r basics.Round, prevFetchCo
s.log.Infof("fetchAndWrite(%d): the block is already in the ledger. The catchup is complete", r)
return false
}
+ failureRank := peerRankDownloadFailed
+ var nbfe noBlockForRoundError
+ if errors.As(err, &nbfe) {
+ failureRank = peerRankNoBlockForRound
+ // remote peer doesn't have the block, try another peer
+ // quit if the the same peer peer encountered errNoBlockForRound more than errNoBlockForRoundThreshold times
+ if s.followLatest {
+ // back off between retries to allow time for the next block to appear;
+ // this will provide 50s (catchupRetryLimit * followLatestBackoff) of
+ // polling when continuously running catchup instead of agreement.
+ time.Sleep(followLatestBackoff)
+ } else {
+ if count := peerErrors[peer]; count > errNoBlockForRoundThreshold {
+ s.log.Infof("fetchAndWrite(%d): remote peers do not have the block. Quitting", r)
+ return false
+ }
+ peerErrors[peer]++
+ }
+ }
s.log.Debugf("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i)
- peerSelector.rankPeer(psp, peerRankDownloadFailed)
+ peerSelector.rankPeer(psp, failureRank)
// we've just failed to retrieve a block; wait until the previous block is fetched before trying again
// to avoid the usecase where the first block doesn't exist, and we're making many requests down the chain
@@ -428,9 +470,16 @@ func (s *Service) fetchAndWrite(ctx context.Context, r basics.Round, prevFetchCo
// TODO the following code does not handle the following case: seedLookback upgrades during fetch
func (s *Service) pipelinedFetch(seedLookback uint64) {
- parallelRequests := s.parallelBlocks
- if parallelRequests < seedLookback {
- parallelRequests = seedLookback
+ maxParallelRequests := s.parallelBlocks
+ if maxParallelRequests < seedLookback {
+ maxParallelRequests = seedLookback
+ }
+ minParallelRequests := seedLookback
+
+ // Start the limited requests at max(1, 'seedLookback')
+ limitedParallelRequests := uint64(1)
+ if limitedParallelRequests < seedLookback {
+ limitedParallelRequests = seedLookback
}
completed := make(map[basics.Round]chan bool)
@@ -460,7 +509,8 @@ func (s *Service) pipelinedFetch(seedLookback uint64) {
nextRound := firstRound
for {
- for nextRound < firstRound+basics.Round(parallelRequests) {
+ // launch N=parallelRequests block download go routines.
+ for nextRound < firstRound+basics.Round(limitedParallelRequests) {
if s.roundIsNotSupported(nextRound) {
// Break out of the loop to avoid fetching
// blocks that we don't support. If there
@@ -484,6 +534,7 @@ func (s *Service) pipelinedFetch(seedLookback uint64) {
nextRound++
}
+ // wait for the first round to complete before starting the next download.
select {
case completedOK := <-completed[firstRound]:
delete(completed, firstRound)
@@ -494,11 +545,35 @@ func (s *Service) pipelinedFetch(seedLookback uint64) {
return
}
+ fetchTime := time.Now()
+ fetchDur := fetchTime.Sub(s.prevBlockFetchTime)
+ s.prevBlockFetchTime = fetchTime
+ if fetchDur < uncapParallelDownloadRate {
+ limitedParallelRequests = maxParallelRequests
+ } else {
+ limitedParallelRequests = minParallelRequests
+ }
+
+ // if ledger is busy, pause for some time to let the fetchAndWrite goroutines to finish fetching in-flight blocks.
+ start := time.Now()
+ for (s.ledger.IsWritingCatchpointDataFile() || s.ledger.IsBehindCommittingDeltas()) && time.Since(start) < s.deadlineTimeout {
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ // if ledger is still busy after s.deadlineTimeout timeout then abort the current pipelinedFetch invocation.
+
// if we're writing a catchpoint file, stop catching up to reduce the memory pressure. Once we finish writing the file we
// could resume with the catchup.
if s.ledger.IsWritingCatchpointDataFile() {
s.log.Info("Catchup is stopping due to catchpoint file being written")
- s.suspendForCatchpointWriting = true
+ s.suspendForLedgerOps = true
+ return
+ }
+
+ // if the ledger has too many non-flushed account changes, stop catching up to reduce the memory pressure.
+ if s.ledger.IsBehindCommittingDeltas() {
+ s.log.Info("Catchup is stopping due to too many non-flushed account changes")
+ s.suspendForLedgerOps = true
return
}
@@ -555,10 +630,10 @@ func (s *Service) periodicSync() {
sleepDuration = time.Duration(crypto.RandUint63()) % s.deadlineTimeout
continue
case <-s.syncNow:
- if s.parallelBlocks == 0 || s.ledger.IsWritingCatchpointDataFile() {
+ if s.parallelBlocks == 0 || s.ledger.IsWritingCatchpointDataFile() || s.ledger.IsBehindCommittingDeltas() {
continue
}
- s.suspendForCatchpointWriting = false
+ s.suspendForLedgerOps = false
s.log.Info("Immediate resync triggered; resyncing")
s.sync()
case <-time.After(sleepDuration):
@@ -575,7 +650,12 @@ func (s *Service) periodicSync() {
// keep the existing sleep duration and try again later.
continue
}
- s.suspendForCatchpointWriting = false
+ // if the ledger has too many non-flushed account changes, skip
+ if s.ledger.IsBehindCommittingDeltas() {
+ continue
+ }
+
+ s.suspendForLedgerOps = false
s.log.Info("It's been too long since our ledger advanced; resyncing")
s.sync()
case cert := <-s.unmatchedPendingCertificates:
@@ -608,8 +688,8 @@ func (s *Service) sync() {
start := time.Now()
timeInNS := start.UnixNano()
- if !atomic.CompareAndSwapInt64(&s.syncStartNS, 0, timeInNS) {
- s.log.Infof("resuming previous sync from %d (now=%d)", atomic.LoadInt64(&s.syncStartNS), timeInNS)
+ if !s.syncStartNS.CompareAndSwap(0, timeInNS) {
+ s.log.Infof("resuming previous sync from %d (now=%d)", s.syncStartNS.Load(), timeInNS)
}
pr := s.ledger.LastRound()
@@ -630,18 +710,18 @@ func (s *Service) sync() {
initSync := false
// if the catchupWriting flag is set, it means that we aborted the sync due to the ledger writing the catchup file.
- if !s.suspendForCatchpointWriting {
+ if !s.suspendForLedgerOps {
// in that case, don't change the timer so that the "timer" would keep running.
- atomic.StoreInt64(&s.syncStartNS, 0)
+ s.syncStartNS.Store(0)
// close the initial sync channel if not already close
- if atomic.CompareAndSwapUint32(&s.initialSyncNotified, 0, 1) {
+ if s.initialSyncNotified.CompareAndSwap(0, 1) {
close(s.InitialSyncDone)
initSync = true
}
}
- elapsedTime := time.Now().Sub(start)
+ elapsedTime := time.Since(start)
s.log.EventWithDetails(telemetryspec.ApplicationState, telemetryspec.CatchupStopEvent, telemetryspec.CatchupStopEventDetails{
StartRound: uint64(pr),
EndRound: uint64(s.ledger.LastRound()),
@@ -668,6 +748,8 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy
return
}
+ peerErrors := map[network.Peer]int{}
+
blockHash := bookkeeping.BlockHash(cert.Proposal.BlockDigest) // semantic digest (i.e., hash of the block header), not byte-for-byte digest
peerSelector := createPeerSelector(s.net, s.cfg, false)
for s.ledger.LastRound() < cert.Round {
@@ -689,8 +771,31 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy
return
default:
}
+ failureRank := peerRankDownloadFailed
+ var nbfe noBlockForRoundError
+ if errors.As(err, &nbfe) {
+ failureRank = peerRankNoBlockForRound
+ // If a peer does not have the block after few attempts it probably has not persisted the block yet.
+ // Give it some time to persist the block and try again.
+ // None, there is no exit condition on too many retries as per the function contract.
+ if count, ok := peerErrors[peer]; ok {
+ if count > errNoBlockForRoundThreshold {
+ time.Sleep(50 * time.Millisecond)
+ }
+ if count > errNoBlockForRoundThreshold*10 {
+ // for the low number of connected peers (like 2) the following scenatio is possible:
+ // - both peers do not have the block
+ // - peer selector punishes one of the peers more than the other
+ // - the punoshed peer gets the block, and the less punished peer stucks.
+ // It this case reset the peer selector to let it re-learn priorities.
+ peerSelector = createPeerSelector(s.net, s.cfg, false)
+ }
+ }
+ peerErrors[peer]++
+ }
+ // remote peer doesn't have the block, try another peer
logging.Base().Warnf("fetchRound could not acquire block, fetcher errored out: %v", err)
- peerSelector.rankPeer(psp, peerRankDownloadFailed)
+ peerSelector.rankPeer(psp, failureRank)
continue
}
diff --git a/catchup/service_test.go b/catchup/service_test.go
index 6807b1119..fc0ae38e1 100644
--- a/catchup/service_test.go
+++ b/catchup/service_test.go
@@ -20,6 +20,7 @@ import (
"context"
"errors"
"math/rand"
+ "strings"
"sync"
"sync/atomic"
"testing"
@@ -181,6 +182,27 @@ func (cl *periodicSyncLogger) Warnf(s string, args ...interface{}) {
cl.Logger.Warnf(s, args...)
}
+type periodicSyncDebugLogger struct {
+ periodicSyncLogger
+ debugMsgFilter []string
+ debugMsgs atomic.Uint32
+}
+
+func (cl *periodicSyncDebugLogger) Debugf(s string, args ...interface{}) {
+ // save debug messages for later inspection.
+ if len(cl.debugMsgFilter) > 0 {
+ for _, filter := range cl.debugMsgFilter {
+ if strings.Contains(s, filter) {
+ cl.debugMsgs.Add(1)
+ break
+ }
+ }
+ } else {
+ cl.debugMsgs.Add(1)
+ }
+ cl.Logger.Debugf(s, args...)
+}
+
func TestSyncRound(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -209,7 +231,7 @@ func TestSyncRound(t *testing.T) {
auth := &mockedAuthenticator{fail: true}
initialLocalRound := local.LastRound()
- require.True(t, 0 == initialLocalRound)
+ require.Zero(t, initialLocalRound)
// Make Service
localCfg := config.GetDefaultLocal()
@@ -254,7 +276,7 @@ func TestSyncRound(t *testing.T) {
s.UnsetDisableSyncRound()
// wait until the catchup is done
waitStart = time.Now()
- for time.Now().Sub(waitStart) < 8*s.deadlineTimeout {
+ for time.Since(waitStart) < 8*s.deadlineTimeout {
if remote.LastRound() == local.LastRound() {
break
}
@@ -299,7 +321,7 @@ func TestPeriodicSync(t *testing.T) {
auth := &mockedAuthenticator{fail: true}
initialLocalRound := local.LastRound()
- require.True(t, 0 == initialLocalRound)
+ require.Zero(t, initialLocalRound)
// Make Service
s := MakeService(logging.Base(), defaultConfig, net, local, auth, nil, nil)
@@ -316,7 +338,7 @@ func TestPeriodicSync(t *testing.T) {
// wait until the catchup is done. Since we've might have missed the sleep window, we need to wait
// until the synchronization is complete.
waitStart := time.Now()
- for time.Now().Sub(waitStart) < 10*s.deadlineTimeout {
+ for time.Since(waitStart) < 10*s.deadlineTimeout {
if remote.LastRound() == local.LastRound() {
break
}
@@ -507,7 +529,6 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) {
localBlock, err := local.Block(i)
require.NoError(t, err)
require.Equal(t, *blk, localBlock)
- return
}
}
@@ -830,6 +851,18 @@ func (m *mockedLedger) IsWritingCatchpointDataFile() bool {
return false
}
+func (m *mockedLedger) IsBehindCommittingDeltas() bool {
+ return false
+}
+
+type mockedBehindDeltasLedger struct {
+ mockedLedger
+}
+
+func (m *mockedBehindDeltasLedger) IsBehindCommittingDeltas() bool {
+ return true
+}
+
func testingenvWithUpgrade(
t testing.TB,
numBlocks,
@@ -1085,7 +1118,7 @@ func TestSynchronizingTime(t *testing.T) {
s := MakeService(logging.Base(), cfg, &httpTestPeerSource{}, ledger, &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
require.Equal(t, time.Duration(0), s.SynchronizingTime())
- atomic.StoreInt64(&s.syncStartNS, 1000000)
+ s.syncStartNS.Store(1000000)
require.NotEqual(t, time.Duration(0), s.SynchronizingTime())
}
@@ -1127,3 +1160,98 @@ func TestDownloadBlocksToSupportStateProofs(t *testing.T) {
lookback = lookbackForStateproofsSupport(&topBlk)
assert.Equal(t, uint64(0), lookback)
}
+
+// TestServiceLedgerUnavailable checks a local ledger that is unavailable cannot catchup up to remote round
+func TestServiceLedgerUnavailable(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // Make Ledger
+ local := new(mockedBehindDeltasLedger)
+ local.blocks = append(local.blocks, bookkeeping.Block{})
+
+ remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ numBlocks := 10
+ addBlocks(t, remote, blk, numBlocks)
+
+ // Create a network and block service
+ blockServiceConfig := config.GetDefaultLocal()
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID")
+
+ nodeA := basicRPCNode{}
+ nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ nodeA.start()
+ defer nodeA.stop()
+ rootURL := nodeA.rootURL()
+ net.addPeer(rootURL)
+
+ require.Equal(t, basics.Round(0), local.LastRound())
+ require.Equal(t, basics.Round(numBlocks+1), remote.LastRound())
+
+ // Make Service
+ auth := &mockedAuthenticator{fail: false}
+ cfg := config.GetDefaultLocal()
+ cfg.CatchupParallelBlocks = 2
+ s := MakeService(logging.Base(), cfg, net, local, auth, nil, nil)
+ s.log = &periodicSyncLogger{Logger: logging.Base()}
+ s.deadlineTimeout = 2 * time.Second
+
+ s.testStart()
+ defer s.Stop()
+ s.sync()
+ require.Greater(t, local.LastRound(), basics.Round(0))
+ require.Less(t, local.LastRound(), remote.LastRound())
+}
+
+// TestServiceNoBlockForRound checks if fetchAndWrite does not repeats 500 times if a block not avaialble
+func TestServiceNoBlockForRound(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // Make Ledger
+ local := new(mockedLedger)
+ local.blocks = append(local.blocks, bookkeeping.Block{})
+
+ remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ numBlocks := 10
+ addBlocks(t, remote, blk, numBlocks)
+
+ // Create a network and block service
+ blockServiceConfig := config.GetDefaultLocal()
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID")
+
+ nodeA := basicRPCNode{}
+ nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ nodeA.start()
+ defer nodeA.stop()
+ rootURL := nodeA.rootURL()
+ net.addPeer(rootURL)
+
+ require.Equal(t, basics.Round(0), local.LastRound())
+ require.Equal(t, basics.Round(numBlocks+1), remote.LastRound())
+
+ // Make Service
+ auth := &mockedAuthenticator{fail: false}
+ cfg := config.GetDefaultLocal()
+ cfg.CatchupParallelBlocks = 8
+ s := MakeService(logging.Base(), cfg, net, local, auth, nil, nil)
+ pl := &periodicSyncDebugLogger{periodicSyncLogger: periodicSyncLogger{Logger: logging.Base()}}
+ s.log = pl
+ s.deadlineTimeout = 1 * time.Second
+
+ s.testStart()
+ defer s.Stop()
+ s.sync()
+
+ // without the fix there are about 2k messages (4x catchupRetryLimit)
+ // with the fix expect less than catchupRetryLimit
+ require.Less(t, int(pl.debugMsgs.Load()), catchupRetryLimit)
+}
diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go
index c8dd8b9f9..6d9fcce8d 100644
--- a/catchup/universalFetcher.go
+++ b/catchup/universalFetcher.go
@@ -19,9 +19,9 @@ package catchup
import (
"context"
"encoding/binary"
- "errors"
"fmt"
"net/http"
+ "strconv"
"time"
"github.com/algorand/go-deadlock"
@@ -132,7 +132,7 @@ func (w *wsFetcherClient) getBlockBytes(ctx context.Context, r basics.Round) ([]
defer func() {
cancelFunc()
// note that we don't need to have additional Unlock here since
- // we already have a defered Unlock above ( which executes in reversed order )
+ // we already have a deferred Unlock above ( which executes in reversed order )
w.mu.Lock()
}()
@@ -173,6 +173,9 @@ func (w *wsFetcherClient) requestBlock(ctx context.Context, round basics.Round)
}
if errMsg, found := resp.Topics.GetValue(network.ErrorKey); found {
+ if latest, lfound := resp.Topics.GetValue(rpcs.LatestRoundKey); lfound {
+ return nil, noBlockForRoundError{round: round, latest: basics.Round(binary.BigEndian.Uint64(latest))}
+ }
return nil, makeErrWsFetcherRequestFailed(round, w.target.GetAddress(), string(errMsg))
}
@@ -195,7 +198,11 @@ func (w *wsFetcherClient) requestBlock(ctx context.Context, round basics.Round)
// set max fetcher size to 10MB, this is enough to fit the block and certificate
const fetcherMaxBlockBytes = 10 << 20
-var errNoBlockForRound = errors.New("No block available for given round")
+type noBlockForRoundError struct {
+ latest, round basics.Round
+}
+
+func (noBlockForRoundError) Error() string { return "no block available for given round" }
// HTTPFetcher implements FetcherClient doing an HTTP GET of the block
type HTTPFetcher struct {
@@ -239,7 +246,13 @@ func (hf *HTTPFetcher) getBlockBytes(ctx context.Context, r basics.Round) (data
case http.StatusOK:
case http.StatusNotFound: // server could not find a block with that round numbers.
response.Body.Close()
- return nil, errNoBlockForRound
+ noBlockErr := noBlockForRoundError{round: r}
+ if latestBytes := response.Header.Get(rpcs.BlockResponseLatestRoundHeader); latestBytes != "" {
+ if latest, pErr := strconv.ParseUint(latestBytes, 10, 64); pErr == nil {
+ noBlockErr.latest = basics.Round(latest)
+ }
+ }
+ return nil, noBlockErr
default:
bodyBytes, err := rpcs.ResponseBytes(response, hf.log, fetcherMaxBlockBytes)
hf.log.Warnf("HTTPFetcher.getBlockBytes: response status code %d from '%s'. Response body '%s' ", response.StatusCode, blockURL, string(bodyBytes))
diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go
index 836360139..c8dcbd984 100644
--- a/catchup/universalFetcher_test.go
+++ b/catchup/universalFetcher_test.go
@@ -74,7 +74,9 @@ func TestUGetBlockWs(t *testing.T) {
block, cert, duration, err = fetcher.fetchBlock(context.Background(), next+1, up)
require.Error(t, err)
- require.Contains(t, err.Error(), "requested block is not available")
+ require.Error(t, noBlockForRoundError{}, err)
+ require.Equal(t, next+1, err.(noBlockForRoundError).round)
+ require.Equal(t, next, err.(noBlockForRoundError).latest)
require.Nil(t, block)
require.Nil(t, cert)
require.Equal(t, int64(duration), int64(0))
@@ -118,8 +120,10 @@ func TestUGetBlockHTTP(t *testing.T) {
block, cert, duration, err = fetcher.fetchBlock(context.Background(), next+1, net.GetPeers()[0])
- require.Error(t, errNoBlockForRound, err)
- require.Contains(t, err.Error(), "No block available for given round")
+ require.Error(t, noBlockForRoundError{}, err)
+ require.Equal(t, next+1, err.(noBlockForRoundError).round)
+ require.Equal(t, next, err.(noBlockForRoundError).latest)
+ require.Contains(t, err.Error(), "no block available for given round")
require.Nil(t, block)
require.Nil(t, cert)
require.Equal(t, int64(duration), int64(0))
diff --git a/cmd/algocfg/profileCommand.go b/cmd/algocfg/profileCommand.go
index e629f2578..0a368fc97 100644
--- a/cmd/algocfg/profileCommand.go
+++ b/cmd/algocfg/profileCommand.go
@@ -92,6 +92,7 @@ func init() {
rootCmd.AddCommand(profileCmd)
profileCmd.AddCommand(setProfileCmd)
setProfileCmd.Flags().BoolVarP(&forceUpdate, "yes", "y", false, "Force updates to be written")
+ profileCmd.AddCommand(printProfileCmd)
profileCmd.AddCommand(listProfileCmd)
}
@@ -133,6 +134,23 @@ var listProfileCmd = &cobra.Command{
},
}
+var printProfileCmd = &cobra.Command{
+ Use: "print",
+ Short: "Print config.json to stdout.",
+ Args: cobra.ExactArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ cfg, err := getConfigForArg(args[0])
+ if err != nil {
+ reportErrorf("%v", err)
+ }
+ err = codecs.WriteNonDefaultValues(os.Stdout, cfg, config.GetDefaultLocal(), nil)
+ if err != nil {
+ reportErrorf("Error writing config file to stdout: %s", err)
+ }
+ fmt.Fprintf(os.Stdout, "\n")
+ },
+}
+
var setProfileCmd = &cobra.Command{
Use: "set",
Short: "Set config.json file from a profile.",
@@ -157,7 +175,7 @@ var setProfileCmd = &cobra.Command{
return
}
}
- err = codecs.SaveNonDefaultValuesToFile(file, cfg, config.GetDefaultLocal(), nil, true)
+ err = codecs.SaveNonDefaultValuesToFile(file, cfg, config.GetDefaultLocal(), nil)
if err != nil {
reportErrorf("Error saving updated config file '%s' - %s", file, err)
}
diff --git a/cmd/algocfg/resetCommand.go b/cmd/algocfg/resetCommand.go
index 24f9cf1da..2ec8c55aa 100644
--- a/cmd/algocfg/resetCommand.go
+++ b/cmd/algocfg/resetCommand.go
@@ -63,7 +63,7 @@ var resetCmd = &cobra.Command{
}
file := filepath.Join(dataDir, config.ConfigFilename)
- err = codecs.SaveNonDefaultValuesToFile(file, cfg, defaults, nil, true)
+ err = codecs.SaveNonDefaultValuesToFile(file, cfg, defaults, nil)
if err != nil {
reportWarnf("Error saving updated config file '%s' - %s", file, err)
anyError = true
diff --git a/cmd/algocfg/setCommand.go b/cmd/algocfg/setCommand.go
index 836785759..58f7ee796 100644
--- a/cmd/algocfg/setCommand.go
+++ b/cmd/algocfg/setCommand.go
@@ -66,7 +66,7 @@ var setCmd = &cobra.Command{
}
file := filepath.Join(dataDir, config.ConfigFilename)
- err = codecs.SaveNonDefaultValuesToFile(file, cfg, config.GetDefaultLocal(), nil, true)
+ err = codecs.SaveNonDefaultValuesToFile(file, cfg, config.GetDefaultLocal(), nil)
if err != nil {
reportWarnf("Error saving updated config file '%s' - %s", file, err)
anyError = true
diff --git a/cmd/algokey/part.go b/cmd/algokey/part.go
index 034b9821a..76148ae1f 100644
--- a/cmd/algokey/part.go
+++ b/cmd/algokey/part.go
@@ -19,7 +19,6 @@ package main
import (
"encoding/base64"
"fmt"
- "math"
"os"
"github.com/spf13/cobra"
@@ -58,7 +57,7 @@ var partGenerateCmd = &cobra.Command{
}
if partKeyDilution == 0 {
- partKeyDilution = 1 + uint64(math.Sqrt(float64(partLastRound-partFirstRound)))
+ partKeyDilution = account.DefaultKeyDilution(basics.Round(partFirstRound), basics.Round(partLastRound))
}
var err error
diff --git a/cmd/goal/account.go b/cmd/goal/account.go
index fcf4fbe09..60b8ce72d 100644
--- a/cmd/goal/account.go
+++ b/cmd/goal/account.go
@@ -41,6 +41,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/libgoal"
+ "github.com/algorand/go-algorand/libgoal/participation"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util"
"github.com/algorand/go-algorand/util/db"
@@ -912,7 +913,11 @@ var addParticipationKeyCmd = &cobra.Command{
var err error
var part algodAcct.Participation
participationGen := func() {
- part, _, err = client.GenParticipationKeysTo(accountAddress, roundFirstValid, roundLastValid, keyDilution, partKeyOutDir)
+ installFunc := func(keyPath string) error {
+ _, installErr := client.AddParticipationKey(keyPath)
+ return installErr
+ }
+ part, _, err = participation.GenParticipationKeysTo(accountAddress, roundFirstValid, roundLastValid, keyDilution, partKeyOutDir, installFunc)
}
util.RunFuncWithSpinningCursor(participationGen)
diff --git a/cmd/goal/application.go b/cmd/goal/application.go
index 8d442bf5c..2aebe89de 100644
--- a/cmd/goal/application.go
+++ b/cmd/goal/application.go
@@ -488,7 +488,7 @@ var createAppCmd = &cobra.Command{
reportErrorf(errorBroadcastingTX, err2)
}
- reportInfof("Attempting to create app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
+ reportInfof("Attempting to create app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), logic.HashProgram(clearProg))
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
@@ -563,7 +563,7 @@ var updateAppCmd = &cobra.Command{
reportErrorf(errorBroadcastingTX, err2)
}
- reportInfof("Attempting to update app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
+ reportInfof("Attempting to update app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), logic.HashProgram(clearProg))
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
@@ -1455,9 +1455,9 @@ var methodAppCmd = &cobra.Command{
// Report tx details to user
if methodCreatesApp {
- reportInfof("Attempting to create app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
+ reportInfof("Attempting to create app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), logic.HashProgram(clearProg))
} else if onCompletionEnum == transactions.UpdateApplicationOC {
- reportInfof("Attempting to update app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
+ reportInfof("Attempting to update app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), logic.HashProgram(clearProg))
}
reportInfof("Issued %d transaction(s):", len(signedTxnGroup))
diff --git a/cmd/goal/interact.go b/cmd/goal/interact.go
index 825d74388..ca79daf0d 100644
--- a/cmd/goal/interact.go
+++ b/cmd/goal/interact.go
@@ -625,9 +625,9 @@ var appExecuteCmd = &cobra.Command{
}
if appIdx == 0 {
- reportInfof("Attempting to create app (global ints %d, global blobs %d, local ints %d, local blobs %d, approval size %d, hash %v; clear size %d, hash %v)", globalSchema.NumUint, globalSchema.NumByteSlice, localSchema.NumUint, localSchema.NumByteSlice, len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
+ reportInfof("Attempting to create app (global ints %d, global blobs %d, local ints %d, local blobs %d, approval size %d, hash %v; clear size %d, hash %v)", globalSchema.NumUint, globalSchema.NumByteSlice, localSchema.NumUint, localSchema.NumByteSlice, len(approvalProg), logic.HashProgram(approvalProg), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
} else if onCompletion == transactions.UpdateApplicationOC {
- reportInfof("Attempting to update app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
+ reportInfof("Attempting to update app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), logic.HashProgram(clearProg))
}
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
diff --git a/cmd/goal/tealsign.go b/cmd/goal/tealsign.go
index 9d9f144da..45fbd3a98 100644
--- a/cmd/goal/tealsign.go
+++ b/cmd/goal/tealsign.go
@@ -139,7 +139,7 @@ The base64 encoding of the signature will always be printed to stdout. Optionall
reportErrorf(tealsignEmptyLogic)
}
- progHash = crypto.HashObj(logic.Program(stxn.Lsig.Logic))
+ progHash = logic.HashProgram(stxn.Lsig.Logic)
} else {
// Otherwise, the contract address is the logic hash
parsedAddr, err := basics.UnmarshalChecksumAddress(contractAddr)
diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go
index 58d32705b..aaa55c5d2 100644
--- a/cmd/opdoc/opdoc.go
+++ b/cmd/opdoc/opdoc.go
@@ -466,7 +466,7 @@ func main() {
AVMType: t.AVMType.String(),
})
}
- sort.Slice(named, func(i, j int) bool { return named[i].Name > named[j].Name })
+ sort.Slice(named, func(i, j int) bool { return named[i].Name < named[j].Name })
constants := create("named_integer_constants.md")
integerConstantsTableMarkdown(constants)
diff --git a/cmd/opdoc/tmLanguage.go b/cmd/opdoc/tmLanguage.go
index fb1438101..63c558ea3 100644
--- a/cmd/opdoc/tmLanguage.go
+++ b/cmd/opdoc/tmLanguage.go
@@ -205,7 +205,7 @@ func buildSyntaxHighlight(version uint64) *tmLanguage {
// and only add to keyword.Patterns later, when all
// have been collected.
case "Arithmetic", "Byte Array Manipulation", "Byte Array Arithmetic",
- "Byte Array Logic", "Inner Transactions":
+ "Byte Array Logic", "Cryptography", "Inner Transactions":
escape := map[rune]bool{
'*': true,
'+': true,
diff --git a/cmd/tealdbg/cdtSession.go b/cmd/tealdbg/cdtSession.go
index f7b74eb9e..5d0bdd08b 100644
--- a/cmd/tealdbg/cdtSession.go
+++ b/cmd/tealdbg/cdtSession.go
@@ -51,8 +51,8 @@ type cdtSession struct {
verbose bool
}
-var contextCounter int32 = 0
-var scriptCounter int32 = 0
+var contextCounter atomic.Int32
+var scriptCounter atomic.Int32
func makeCdtSession(uuid string, debugger Control, ch chan Notification) *cdtSession {
s := new(cdtSession)
@@ -60,8 +60,8 @@ func makeCdtSession(uuid string, debugger Control, ch chan Notification) *cdtSes
s.debugger = debugger
s.notifications = ch
s.done = make(chan struct{})
- s.contextID = int(atomic.AddInt32(&contextCounter, 1))
- s.scriptID = strconv.Itoa(int(atomic.AddInt32(&scriptCounter, 1)))
+ s.contextID = int(contextCounter.Add(1))
+ s.scriptID = strconv.Itoa(int(scriptCounter.Add(1)))
return s
}
diff --git a/cmd/tealdbg/cdtSession_test.go b/cmd/tealdbg/cdtSession_test.go
index e4cae925c..186e0d7df 100644
--- a/cmd/tealdbg/cdtSession_test.go
+++ b/cmd/tealdbg/cdtSession_test.go
@@ -521,9 +521,7 @@ func TestCdtSessionGetObjects(t *testing.T) {
{Type: basics.TealUintType, Uint: 1},
{Type: basics.TealBytesType, Bytes: "\x01\x02"},
},
- pc: atomicInt{1},
- line: atomicInt{1},
- err: e,
+ err: e,
AppState: AppState{
appIdx: basics.AppIndex(1),
schemas: basics.StateSchemas{
@@ -582,6 +580,8 @@ func TestCdtSessionGetObjects(t *testing.T) {
},
},
}
+ state.pc.Store(1)
+ state.line.Store(1)
req.Method = "Runtime.getProperties"
req.Params = map[string]interface{}{}
diff --git a/cmd/tealdbg/util.go b/cmd/tealdbg/util.go
index d91220e71..971a08223 100644
--- a/cmd/tealdbg/util.go
+++ b/cmd/tealdbg/util.go
@@ -44,35 +44,31 @@ func (s *atomicString) Length() int {
}
type atomicBool struct {
- value uint32
+ value atomic.Bool
}
func (b *atomicBool) SetTo(other bool) {
- var converted uint32 = 0
- if other {
- converted = 1
- }
- atomic.StoreUint32(&b.value, converted)
+ b.value.Store(other)
}
func (b *atomicBool) IsSet() bool {
- return atomic.LoadUint32(&b.value) != 0
+ return b.value.Load()
}
type atomicInt struct {
- value int32
+ value atomic.Int32
}
func (i *atomicInt) Store(other int) {
- atomic.StoreInt32(&i.value, int32(other))
+ i.value.Store(int32(other))
}
func (i *atomicInt) Load() int {
- return int(atomic.LoadInt32(&i.value))
+ return int(i.value.Load())
}
func (i *atomicInt) Add(other int) int {
- return int(atomic.AddInt32(&i.value, int32(other)))
+ return int(i.value.Add(int32(other)))
}
// IsText checks if the input has all printable characters with strconv.IsPrint
diff --git a/components/mocks/mockCatchpointCatchupAccessor.go b/components/mocks/mockCatchpointCatchupAccessor.go
index f488879e7..d095b703a 100644
--- a/components/mocks/mockCatchpointCatchupAccessor.go
+++ b/components/mocks/mockCatchpointCatchupAccessor.go
@@ -19,6 +19,7 @@ package mocks
import (
"context"
+ "github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
@@ -86,12 +87,12 @@ func (m *MockCatchpointCatchupAccessor) StoreBalancesRound(ctx context.Context,
}
// StoreFirstBlock stores a single block to the blocks database.
-func (m *MockCatchpointCatchupAccessor) StoreFirstBlock(ctx context.Context, blk *bookkeeping.Block) (err error) {
+func (m *MockCatchpointCatchupAccessor) StoreFirstBlock(ctx context.Context, blk *bookkeeping.Block, cert *agreement.Certificate) (err error) {
return nil
}
// StoreBlock stores a single block to the blocks database.
-func (m *MockCatchpointCatchupAccessor) StoreBlock(ctx context.Context, blk *bookkeeping.Block) (err error) {
+func (m *MockCatchpointCatchupAccessor) StoreBlock(ctx context.Context, blk *bookkeeping.Block, cert *agreement.Certificate) (err error) {
return nil
}
diff --git a/config/config_test.go b/config/config_test.go
index 87b4cc4d4..ef58bffb9 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -675,6 +675,12 @@ func TestEnsureAbsDir(t *testing.T) {
require.Equal(t, testDirectory+"/myGenesisID", t2Abs)
}
+type tLogger struct{ t *testing.T }
+
+func (l tLogger) Infof(fmts string, args ...interface{}) {
+ l.t.Logf(fmts, args...)
+}
+
// TestEnsureAndResolveGenesisDirs confirms that paths provided in the config are resolved to absolute paths and are created if relevant
func TestEnsureAndResolveGenesisDirs(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -689,7 +695,7 @@ func TestEnsureAndResolveGenesisDirs(t *testing.T) {
cfg.StateproofDir = filepath.Join(testDirectory, "/RELATIVEPATHS/../RELATIVE/../custom_stateproof")
cfg.CatchpointDir = filepath.Join(testDirectory, "custom_catchpoint")
- paths, err := cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID")
+ paths, err := cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID", tLogger{t: t})
require.NoError(t, err)
// confirm that the paths are absolute, and contain the genesisID
@@ -711,7 +717,7 @@ func TestEnsureAndResolveGenesisDirs_hierarchy(t *testing.T) {
cfg := GetDefaultLocal()
testDirectory := t.TempDir()
- paths, err := cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID")
+ paths, err := cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID", tLogger{t: t})
require.NoError(t, err)
// confirm that if only the root is specified, it is used for all directories
require.Equal(t, testDirectory+"/myGenesisID", paths.TrackerGenesisDir)
@@ -731,21 +737,125 @@ func TestEnsureAndResolveGenesisDirs_hierarchy(t *testing.T) {
cold := filepath.Join(testDirectory, "cold")
cfg.HotDataDir = hot
cfg.ColdDataDir = cold
- paths, err = cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID")
+ paths, err = cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID", tLogger{t: t})
require.NoError(t, err)
// confirm that if hot/cold are specified, hot/cold are used for appropriate directories
require.Equal(t, hot+"/myGenesisID", paths.TrackerGenesisDir)
require.DirExists(t, paths.TrackerGenesisDir)
require.Equal(t, cold+"/myGenesisID", paths.BlockGenesisDir)
require.DirExists(t, paths.BlockGenesisDir)
- require.Equal(t, cold+"/myGenesisID", paths.CrashGenesisDir)
+ require.Equal(t, hot+"/myGenesisID", paths.CrashGenesisDir)
require.DirExists(t, paths.CrashGenesisDir)
- require.Equal(t, cold+"/myGenesisID", paths.StateproofGenesisDir)
+ require.Equal(t, hot+"/myGenesisID", paths.StateproofGenesisDir)
require.DirExists(t, paths.StateproofGenesisDir)
require.Equal(t, cold+"/myGenesisID", paths.CatchpointGenesisDir)
require.DirExists(t, paths.CatchpointGenesisDir)
}
+func TestEnsureAndResolveGenesisDirs_migrate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ cfg := GetDefaultLocal()
+ testDirectory := t.TempDir()
+ cfg.HotDataDir = filepath.Join(testDirectory, "hot")
+ cfg.ColdDataDir = filepath.Join(testDirectory, "cold")
+ coldDir := filepath.Join(cfg.ColdDataDir, "myGenesisID")
+ hotDir := filepath.Join(cfg.HotDataDir, "myGenesisID")
+ err := os.MkdirAll(coldDir, 0755)
+ require.NoError(t, err)
+ // put a crash.sqlite file in the ColdDataDir
+ err = os.WriteFile(filepath.Join(coldDir, "crash.sqlite"), []byte("test"), 0644)
+ require.NoError(t, err)
+ err = os.WriteFile(filepath.Join(coldDir, "crash.sqlite-shm"), []byte("test"), 0644)
+ require.NoError(t, err)
+ // put a stateproof.sqlite file in the ColdDataDir
+ err = os.WriteFile(filepath.Join(coldDir, "stateproof.sqlite"), []byte("test"), 0644)
+ require.NoError(t, err)
+ err = os.WriteFile(filepath.Join(coldDir, "stateproof.sqlite-wal"), []byte("test"), 0644)
+ require.NoError(t, err)
+ // Resolve
+ paths, err := cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID", tLogger{t: t})
+ require.NoError(t, err)
+ // Confirm that crash.sqlite was moved to HotDataDir
+ require.DirExists(t, paths.CrashGenesisDir)
+ require.Equal(t, hotDir, paths.CrashGenesisDir)
+ require.NoFileExists(t, filepath.Join(coldDir, "crash.sqlite"))
+ require.NoFileExists(t, filepath.Join(coldDir, "crash.sqlite-shm"))
+ require.FileExists(t, filepath.Join(hotDir, "crash.sqlite"))
+ require.FileExists(t, filepath.Join(hotDir, "crash.sqlite-shm"))
+ // Confirm that stateproof.sqlite was moved to HotDataDir
+ require.DirExists(t, paths.StateproofGenesisDir)
+ require.Equal(t, hotDir, paths.StateproofGenesisDir)
+ require.NoFileExists(t, filepath.Join(coldDir, "stateproof.sqlite"))
+ require.NoFileExists(t, filepath.Join(coldDir, "stateproof.sqlite-wal"))
+ require.FileExists(t, filepath.Join(hotDir, "stateproof.sqlite"))
+ require.FileExists(t, filepath.Join(hotDir, "stateproof.sqlite-wal"))
+}
+
+func TestEnsureAndResolveGenesisDirs_migrateCrashFail(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ cfg := GetDefaultLocal()
+ testDirectory := t.TempDir()
+ cfg.HotDataDir = filepath.Join(testDirectory, "hot")
+ cfg.ColdDataDir = filepath.Join(testDirectory, "cold")
+ coldDir := filepath.Join(cfg.ColdDataDir, "myGenesisID")
+ hotDir := filepath.Join(cfg.HotDataDir, "myGenesisID")
+ err := os.MkdirAll(coldDir, 0755)
+ require.NoError(t, err)
+ err = os.MkdirAll(hotDir, 0755)
+ require.NoError(t, err)
+ // put a crash.sqlite file in the ColdDataDir
+ err = os.WriteFile(filepath.Join(coldDir, "crash.sqlite"), []byte("test"), 0644)
+ require.NoError(t, err)
+ err = os.WriteFile(filepath.Join(coldDir, "crash.sqlite-shm"), []byte("test"), 0644)
+ require.NoError(t, err)
+ // also put a crash.sqlite file in the HotDataDir
+ err = os.WriteFile(filepath.Join(hotDir, "crash.sqlite"), []byte("test"), 0644)
+ require.NoError(t, err)
+ // Resolve
+ paths, err := cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID", tLogger{t: t})
+ require.Error(t, err)
+ require.Empty(t, paths)
+ // Confirm that crash.sqlite was not moved to HotDataDir
+ require.FileExists(t, filepath.Join(coldDir, "crash.sqlite"))
+ require.FileExists(t, filepath.Join(coldDir, "crash.sqlite-shm"))
+ require.FileExists(t, filepath.Join(hotDir, "crash.sqlite"))
+ require.NoFileExists(t, filepath.Join(hotDir, "crash.sqlite-shm"))
+}
+
+func TestEnsureAndResolveGenesisDirs_migrateSPFail(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ cfg := GetDefaultLocal()
+ testDirectory := t.TempDir()
+ cfg.HotDataDir = filepath.Join(testDirectory, "hot")
+ cfg.ColdDataDir = filepath.Join(testDirectory, "cold")
+ coldDir := filepath.Join(cfg.ColdDataDir, "myGenesisID")
+ hotDir := filepath.Join(cfg.HotDataDir, "myGenesisID")
+ err := os.MkdirAll(coldDir, 0755)
+ require.NoError(t, err)
+ err = os.MkdirAll(hotDir, 0755)
+ require.NoError(t, err)
+ // put a stateproof.sqlite file in the ColdDataDir
+ err = os.WriteFile(filepath.Join(coldDir, "stateproof.sqlite"), []byte("test"), 0644)
+ require.NoError(t, err)
+ err = os.WriteFile(filepath.Join(coldDir, "stateproof.sqlite-wal"), []byte("test"), 0644)
+ require.NoError(t, err)
+ // also put a stateproof.sqlite-wal file in the HotDataDir
+ err = os.WriteFile(filepath.Join(hotDir, "stateproof.sqlite-wal"), []byte("test"), 0644)
+ require.NoError(t, err)
+ // Resolve
+ paths, err := cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID", tLogger{t: t})
+ require.Error(t, err)
+ require.Empty(t, paths)
+ // Confirm that stateproof.sqlite was not moved to HotDataDir
+ require.FileExists(t, filepath.Join(coldDir, "stateproof.sqlite"))
+ require.FileExists(t, filepath.Join(coldDir, "stateproof.sqlite-wal"))
+ require.NoFileExists(t, filepath.Join(hotDir, "stateproof.sqlite"))
+ require.FileExists(t, filepath.Join(hotDir, "stateproof.sqlite-wal"))
+}
+
// TestEnsureAndResolveGenesisDirsError confirms that if a path can't be created, an error is returned
func TestEnsureAndResolveGenesisDirsError(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -761,7 +871,7 @@ func TestEnsureAndResolveGenesisDirsError(t *testing.T) {
cfg.CatchpointDir = filepath.Join(testDirectory, "custom_catchpoint")
// first try an error with an empty root dir
- paths, err := cfg.EnsureAndResolveGenesisDirs("", "myGenesisID")
+ paths, err := cfg.EnsureAndResolveGenesisDirs("", "myGenesisID", tLogger{t: t})
require.Empty(t, paths)
require.Error(t, err)
require.Contains(t, err.Error(), "rootDir is required")
@@ -769,7 +879,7 @@ func TestEnsureAndResolveGenesisDirsError(t *testing.T) {
require.NoError(t, os.Chmod(testDirectory, 0200))
// now try an error with a root dir that can't be written to
- paths, err = cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID")
+ paths, err = cfg.EnsureAndResolveGenesisDirs(testDirectory, "myGenesisID", tLogger{t: t})
require.Empty(t, paths)
require.Error(t, err)
require.Contains(t, err.Error(), "permission denied")
diff --git a/config/consensus.go b/config/consensus.go
index 190e5ce16..a2f28b97d 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -419,6 +419,11 @@ type ConsensusParams struct {
// their account balances.
StateProofExcludeTotalWeightWithRewards bool
+ // StateProofBlockHashInLightHeader specifies that the LightBlockHeader
+ // committed to by state proofs should contain the BlockHash of each
+ // block, instead of the seed.
+ StateProofBlockHashInLightHeader bool
+
// EnableAssetCloseAmount adds an extra field to the ApplyData. The field contains the amount of the remaining
// asset that were sent to the close-to address.
EnableAssetCloseAmount bool
@@ -633,6 +638,9 @@ var StateProofTopVoters int
// in a block must not exceed MaxTxnBytesPerBlock.
var MaxTxnBytesPerBlock int
+// MaxAppTxnForeignApps is the max number of foreign apps per txn across all consensus versions
+var MaxAppTxnForeignApps int
+
func checkSetMax(value int, curMax *int) {
if value > *curMax {
*curMax = value
@@ -681,6 +689,8 @@ func checkSetAllocBounds(p ConsensusParams) {
checkSetMax(p.MaxAppKeyLen, &MaxAppBytesKeyLen)
checkSetMax(int(p.StateProofTopVoters), &StateProofTopVoters)
checkSetMax(p.MaxTxnBytesPerBlock, &MaxTxnBytesPerBlock)
+
+ checkSetMax(p.MaxAppTxnForeignApps, &MaxAppTxnForeignApps)
}
// SaveConfigurableConsensus saves the configurable protocols file to the provided data directory.
@@ -754,15 +764,22 @@ func LoadConfigurableConsensusProtocols(dataDirectory string) error {
return err
}
if newConsensus != nil {
- Consensus = newConsensus
- // Set allocation limits
- for _, p := range Consensus {
- checkSetAllocBounds(p)
- }
+ SetConfigurableConsensusProtocols(newConsensus)
}
return nil
}
+// SetConfigurableConsensusProtocols sets the configurable protocols.
+func SetConfigurableConsensusProtocols(newConsensus ConsensusProtocols) ConsensusProtocols {
+ oldConsensus := Consensus
+ Consensus = newConsensus
+ // Set allocation limits
+ for _, p := range Consensus {
+ checkSetAllocBounds(p)
+ }
+ return oldConsensus
+}
+
// PreloadConfigurableConsensusProtocols loads the configurable protocols from the data directory
// and merge it with a copy of the Consensus map. Then, it returns it to the caller.
func PreloadConfigurableConsensusProtocols(dataDirectory string) (ConsensusProtocols, error) {
@@ -788,6 +805,9 @@ func PreloadConfigurableConsensusProtocols(dataDirectory string) (ConsensusProto
return Consensus.Merge(configurableConsensus), nil
}
+// initConsensusProtocols defines the consensus protocol values and how values change across different versions of the protocol.
+//
+// These are the only valid and tested consensus values and transitions. Other settings are not tested and may lead to unexpected behavior.
func initConsensusProtocols() {
// WARNING: copying a ConsensusParams by value into a new variable
// does not copy the ApprovedUpgrades map. Make sure that each new
@@ -1369,6 +1389,8 @@ func initConsensusProtocols() {
vFuture.LogicSigVersion = 10 // When moving this to a release, put a new higher LogicSigVersion here
vFuture.EnableLogicSigCostPooling = true
+ vFuture.StateProofBlockHashInLightHeader = true
+
// Setting DynamicFilterTimeout in vFuture will impact e2e test performance
// by reducing round time. Hence, it is commented out for now.
// vFuture.DynamicFilterTimeout = true
diff --git a/config/localTemplate.go b/config/localTemplate.go
index 8a5dc0a5d..61c2381fa 100644
--- a/config/localTemplate.go
+++ b/config/localTemplate.go
@@ -42,7 +42,7 @@ type Local struct {
// Version tracks the current version of the defaults so we can migrate old -> new
// This is specifically important whenever we decide to change the default value
// for an existing parameter. This field tag must be updated any time we add a new version.
- Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24" version[25]:"25" version[26]:"26" version[27]:"27" version[28]:"28" version[29]:"29" version[30]:"30" version[31]:"31"`
+ Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24" version[25]:"25" version[26]:"26" version[27]:"27" version[28]:"28" version[29]:"29" version[30]:"30" version[31]:"31" version[32]:"32" version[33]:"33"`
// Archival nodes retain a full copy of the block history. Non-Archival nodes will delete old blocks and only retain what's need to properly validate blockchain messages (the precise number of recent blocks depends on the consensus parameters. Currently the last 1321 blocks are required). This means that non-Archival nodes require significantly less storage than Archival nodes. Relays (nodes with a valid NetAddress) are always Archival, regardless of this setting. This may change in the future. If setting this to true for the first time, the existing ledger may need to be deleted to get the historical values stored as the setting only effects current blocks forward. To do this, shutdown the node and delete all .sqlite files within the data/testnet-version directory, except the crash.sqlite file. Restart the node and wait for the node to sync.
Archival bool `version[0]:"false"`
@@ -111,13 +111,13 @@ type Local struct {
// For isolation, the node will create a subdirectory in this location, named by the genesis-id of the network.
// If not specified, the node will use the ColdDataDir.
CatchpointDir string `version[31]:""`
- // StateproofDir is an optional directory to store stateproof data.
+ // StateproofDir is an optional directory to persist state about observed and issued state proof messages.
// For isolation, the node will create a subdirectory in this location, named by the genesis-id of the network.
- // If not specified, the node will use the ColdDataDir.
+ // If not specified, the node will use the HotDataDir.
StateproofDir string `version[31]:""`
- // CrashDBDir is an optional directory to store the crash database.
+ // CrashDBDir is an optional directory to persist agreement's consensus participation state.
// For isolation, the node will create a subdirectory in this location, named by the genesis-id of the network.
- // If not specified, the node will use the ColdDataDir.
+ // If not specified, the node will use the HotDataDir
CrashDBDir string `version[31]:""`
// LogFileDir is an optional directory to store the log, node.log
@@ -128,8 +128,8 @@ type Local struct {
// If not specified, the node will use the ColdDataDir.
LogArchiveDir string `version[31]:""`
- // IncomingConnectionsLimit specifies the max number of long-lived incoming
- // connections. 0 means no connections allowed. Must be non-negative.
+ // IncomingConnectionsLimit specifies the max number of incoming connections
+ // for the port configured in NetAddress. 0 means no connections allowed. Must be non-negative.
// Estimating 1.5MB per incoming connection, 1.5MB*2400 = 3.6GB
IncomingConnectionsLimit int `version[0]:"-1" version[1]:"10000" version[17]:"800" version[27]:"2400"`
@@ -231,7 +231,21 @@ type Local struct {
// TxBacklogReservedCapacityPerPeer determines how much dedicated serving capacity the TxBacklog gives each peer
TxBacklogReservedCapacityPerPeer int `version[27]:"20"`
- // EnableTxBacklogRateLimiting controls if a rate limiter and congestion manager shouild be attached to the tx backlog enqueue process
+ // TxBacklogAppTxRateLimiterMaxSize denotes a max size for the tx rate limiter
+ // calculated as "a thousand apps on a network of thousand of peers"
+ TxBacklogAppTxRateLimiterMaxSize int `version[32]:"1048576"`
+
+ // TxBacklogAppTxPerSecondRate determines a target app per second rate for the app tx rate limiter
+ TxBacklogAppTxPerSecondRate int `version[32]:"100"`
+
+ // TxBacklogRateLimitingCongestionRatio determines the backlog filling threshold percentage at which the app limiter kicks in
+ // or the tx backlog rate limiter kicks off.
+ TxBacklogRateLimitingCongestionPct int `version[32]:"50"`
+
+ // EnableTxBacklogAppRateLimiting controls if an app rate limiter should be attached to the tx backlog enqueue process
+ EnableTxBacklogAppRateLimiting bool `version[32]:"true"`
+
+ // EnableTxBacklogRateLimiting controls if a rate limiter and congestion manager should be attached to the tx backlog enqueue process
// if enabled, the over-all TXBacklog Size will be larger by MAX_PEERS*TxBacklogReservedCapacityPerPeer
EnableTxBacklogRateLimiting bool `version[27]:"false" version[30]:"true"`
@@ -361,6 +375,10 @@ type Local struct {
// 0 means don't store any, -1 mean unlimited and positive number suggest the maximum number of most recent catchpoint files to store.
CatchpointFileHistoryLength int `version[7]:"365"`
+ // EnableGossipService enables the gossip network HTTP websockets endpoint. The functionality of this depends on NetAddress, which must also be provided.
+ // This functionality is required for serving gossip traffic.
+ EnableGossipService bool `version[33]:"true"`
+
// EnableLedgerService enables the ledger serving service. The functionality of this depends on NetAddress, which must also be provided.
// This functionality is required for the catchpoint catchup.
EnableLedgerService bool `version[7]:"false"`
@@ -652,7 +670,7 @@ func (cfg Local) SaveAllToDisk(root string) error {
func (cfg Local) SaveToFile(filename string) error {
var alwaysInclude []string
alwaysInclude = append(alwaysInclude, "Version")
- return codecs.SaveNonDefaultValuesToFile(filename, cfg, defaultLocal, alwaysInclude, true)
+ return codecs.SaveNonDefaultValuesToFile(filename, cfg, defaultLocal, alwaysInclude)
}
// DNSSecuritySRVEnforced returns true if SRV response verification enforced
@@ -771,9 +789,13 @@ func (cfg *Local) ResolveLogPaths(rootDir string) (liveLog, archive string) {
return liveLog, archive
}
+type logger interface {
+ Infof(format string, args ...interface{})
+}
+
// EnsureAndResolveGenesisDirs will resolve the supplied config paths to absolute paths, and will create the genesis directories of each
// returns a ResolvedGenesisDirs struct with the resolved paths for use during runtime
-func (cfg *Local) EnsureAndResolveGenesisDirs(rootDir, genesisID string) (ResolvedGenesisDirs, error) {
+func (cfg *Local) EnsureAndResolveGenesisDirs(rootDir, genesisID string, logger logger) (ResolvedGenesisDirs, error) {
var resolved ResolvedGenesisDirs
var err error
if rootDir != "" {
@@ -829,27 +851,62 @@ func (cfg *Local) EnsureAndResolveGenesisDirs(rootDir, genesisID string) (Resolv
} else {
resolved.CatchpointGenesisDir = resolved.ColdGenesisDir
}
- // if StateproofDir is not set, use ColdDataDir
+ // if StateproofDir is not set, use HotDataDir
if cfg.StateproofDir != "" {
resolved.StateproofGenesisDir, err = ensureAbsGenesisDir(cfg.StateproofDir, genesisID)
if err != nil {
return ResolvedGenesisDirs{}, err
}
} else {
- resolved.StateproofGenesisDir = resolved.ColdGenesisDir
+ resolved.StateproofGenesisDir = resolved.HotGenesisDir
+ // if separate HotDataDir and ColdDataDir was configured, but StateproofDir was not configured
+ if resolved.ColdGenesisDir != resolved.HotGenesisDir {
+ // move existing stateproof DB files from ColdDataDir to HotDataDir
+ moveErr := moveDirIfExists(logger, resolved.ColdGenesisDir, resolved.HotGenesisDir, StateProofFileName, StateProofFileName+"-shm", StateProofFileName+"-wal")
+ if moveErr != nil {
+ return ResolvedGenesisDirs{}, fmt.Errorf("error moving stateproof DB files from ColdDataDir %s to HotDataDir %s: %v", resolved.ColdGenesisDir, resolved.HotGenesisDir, moveErr)
+ }
+ }
}
- // if CrashDBDir is not set, use ColdDataDir
+ // if CrashDBDir is not set, use HotDataDir
if cfg.CrashDBDir != "" {
resolved.CrashGenesisDir, err = ensureAbsGenesisDir(cfg.CrashDBDir, genesisID)
if err != nil {
return ResolvedGenesisDirs{}, err
}
} else {
- resolved.CrashGenesisDir = resolved.ColdGenesisDir
+ resolved.CrashGenesisDir = resolved.HotGenesisDir
+ // if separate HotDataDir and ColdDataDir was configured, but CrashDBDir was not configured
+ if resolved.ColdGenesisDir != resolved.HotGenesisDir {
+ // move existing crash DB files from ColdDataDir to HotDataDir
+ moveErr := moveDirIfExists(logger, resolved.ColdGenesisDir, resolved.HotGenesisDir, CrashFilename, CrashFilename+"-shm", CrashFilename+"-wal")
+ if moveErr != nil {
+ return ResolvedGenesisDirs{}, fmt.Errorf("error moving crash DB files from ColdDataDir %s to HotDataDir %s: %v", resolved.ColdGenesisDir, resolved.HotGenesisDir, moveErr)
+ }
+ }
}
return resolved, nil
}
+func moveDirIfExists(logger logger, srcdir, dstdir string, files ...string) error {
+ // first, check if any files already exist in dstdir, and quit if so
+ for _, file := range files {
+ if _, err := os.Stat(filepath.Join(dstdir, file)); err == nil {
+ return fmt.Errorf("destination file %s already exists, not overwriting", filepath.Join(dstdir, file))
+ }
+ }
+ // then, check if any files exist in srcdir, and move them to dstdir
+ for _, file := range files {
+ if _, err := os.Stat(filepath.Join(srcdir, file)); err == nil {
+ if err := os.Rename(filepath.Join(srcdir, file), filepath.Join(dstdir, file)); err != nil {
+ return fmt.Errorf("failed to move file %s from %s to %s: %v", file, srcdir, dstdir, err)
+ }
+ logger.Infof("Moved DB file %s from ColdDataDir %s to HotDataDir %s", file, srcdir, dstdir)
+ }
+ }
+ return nil
+}
+
// AdjustConnectionLimits updates RestConnectionsSoftLimit, RestConnectionsHardLimit, IncomingConnectionsLimit
// if requiredFDs greater than maxFDs
func (cfg *Local) AdjustConnectionLimits(requiredFDs, maxFDs uint64) bool {
diff --git a/config/local_defaults.go b/config/local_defaults.go
index 12689e4e9..3df773a76 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -20,7 +20,7 @@
package config
var defaultLocal = Local{
- Version: 31,
+ Version: 33,
AccountUpdatesStatsInterval: 5000000000,
AccountsRebuildSynchronousMode: 1,
AgreementIncomingBundlesQueueLength: 15,
@@ -70,6 +70,7 @@ var defaultLocal = Local{
EnableExperimentalAPI: false,
EnableFollowMode: false,
EnableGossipBlockService: true,
+ EnableGossipService: true,
EnableIncomingMessageFilter: false,
EnableLedgerService: false,
EnableMetricReporting: false,
@@ -81,6 +82,7 @@ var defaultLocal = Local{
EnableRequestLogger: false,
EnableRuntimeMetrics: false,
EnableTopAccountsReporting: false,
+ EnableTxBacklogAppRateLimiting: true,
EnableTxBacklogRateLimiting: true,
EnableTxnEvalTracer: false,
EnableUsageLog: false,
@@ -141,6 +143,9 @@ var defaultLocal = Local{
TrackerDBDir: "",
TransactionSyncDataExchangeRate: 0,
TransactionSyncSignificantMessageThreshold: 0,
+ TxBacklogAppTxPerSecondRate: 100,
+ TxBacklogAppTxRateLimiterMaxSize: 1048576,
+ TxBacklogRateLimitingCongestionPct: 50,
TxBacklogReservedCapacityPerPeer: 20,
TxBacklogServiceRateWindowSeconds: 10,
TxBacklogSize: 26000,
diff --git a/config/version.go b/config/version.go
index 1c3cb549f..17bd00a9b 100644
--- a/config/version.go
+++ b/config/version.go
@@ -33,7 +33,7 @@ const VersionMajor = 3
// VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced.
// Not enforced until after initial public release (x > 0).
-const VersionMinor = 19
+const VersionMinor = 20
// Version is the type holding our full version information.
type Version struct {
diff --git a/crypto/batchverifier.c b/crypto/batchverifier.c
new file mode 100644
index 000000000..118542aa7
--- /dev/null
+++ b/crypto/batchverifier.c
@@ -0,0 +1,20 @@
+#include "sodium.h"
+int ed25519_batch_wrapper(const unsigned char **messages2D,
+ const unsigned char **publicKeys2D,
+ const unsigned char **signatures2D,
+ const unsigned char *messages1D,
+ const unsigned long long *mlen,
+ const unsigned char *publicKeys1D,
+ const unsigned char *signatures1D,
+ size_t num,
+ int *valid) {
+ // fill 2-D arrays for messages, pks, sigs from provided 1-D arrays
+ unsigned long long mpos = 0;
+ for (size_t i = 0; i < num; i++) {
+ messages2D[i] = &messages1D[mpos];
+ mpos += mlen[i];
+ publicKeys2D[i] = &publicKeys1D[i*crypto_sign_ed25519_PUBLICKEYBYTES];
+ signatures2D[i] = &signatures1D[i*crypto_sign_ed25519_BYTES];
+ }
+ return crypto_sign_ed25519_open_batch(messages2D, mlen, publicKeys2D, signatures2D, num, valid);
+}
diff --git a/crypto/batchverifier.go b/crypto/batchverifier.go
index 9c14771ba..af7a677ac 100644
--- a/crypto/batchverifier.go
+++ b/crypto/batchverifier.go
@@ -30,15 +30,22 @@ package crypto
// #cgo windows,amd64 CFLAGS: -I${SRCDIR}/libs/windows/amd64/include
// #cgo windows,amd64 LDFLAGS: ${SRCDIR}/libs/windows/amd64/lib/libsodium.a
// #include <stdint.h>
-// #include "sodium.h"
// enum {
// sizeofPtr = sizeof(void*),
// sizeofULongLong = sizeof(unsigned long long),
// };
+// int ed25519_batch_wrapper(const unsigned char **messages2D,
+// const unsigned char **publicKeys2D,
+// const unsigned char **signatures2D,
+// const unsigned char *messages1D,
+// const unsigned long long *mlen,
+// const unsigned char *publicKeys1D,
+// const unsigned char *signatures1D,
+// size_t num,
+// int *valid_p);
import "C"
import (
"errors"
- "runtime"
"unsafe"
)
@@ -120,14 +127,21 @@ func (b *BatchVerifier) Verify() error {
// if some signatures are invalid, true will be set in failed at the corresponding indexes, and
// ErrBatchVerificationFailed for err
func (b *BatchVerifier) VerifyWithFeedback() (failed []bool, err error) {
- if b.GetNumberOfEnqueuedSignatures() == 0 {
+ if len(b.messages) == 0 {
return nil, nil
}
- var messages = make([][]byte, b.GetNumberOfEnqueuedSignatures())
+
+ const estimatedMessageSize = 64
+ msgLengths := make([]uint64, 0, len(b.messages))
+ var messages = make([]byte, 0, len(b.messages)*estimatedMessageSize)
+
+ lenWas := 0
for i := range b.messages {
- messages[i] = HashRep(b.messages[i])
+ messages = HashRepToBuff(b.messages[i], messages)
+ msgLengths = append(msgLengths, uint64(len(messages)-lenWas))
+ lenWas = len(messages)
}
- allValid, failed := batchVerificationImpl(messages, b.publicKeys, b.signatures)
+ allValid, failed := batchVerificationImpl(messages, msgLengths, b.publicKeys, b.signatures)
if allValid {
return failed, nil
}
@@ -137,50 +151,27 @@ func (b *BatchVerifier) VerifyWithFeedback() (failed []bool, err error) {
// batchVerificationImpl invokes the ed25519 batch verification algorithm.
// it returns true if all the signatures were authentically signed by the owners
// otherwise, returns false, and sets the indexes of the failed sigs in failed
-func batchVerificationImpl(messages [][]byte, publicKeys []SignatureVerifier, signatures []Signature) (allSigsValid bool, failed []bool) {
-
- numberOfSignatures := len(messages)
-
- messagesAllocation := C.malloc(C.size_t(C.sizeofPtr * numberOfSignatures))
- messagesLenAllocation := C.malloc(C.size_t(C.sizeofULongLong * numberOfSignatures))
- publicKeysAllocation := C.malloc(C.size_t(C.sizeofPtr * numberOfSignatures))
- signaturesAllocation := C.malloc(C.size_t(C.sizeofPtr * numberOfSignatures))
- valid := C.malloc(C.size_t(C.sizeof_int * numberOfSignatures))
-
- defer func() {
- // release staging memory
- C.free(messagesAllocation)
- C.free(messagesLenAllocation)
- C.free(publicKeysAllocation)
- C.free(signaturesAllocation)
- C.free(valid)
- }()
-
- // load all the data pointers into the array pointers.
- for i := 0; i < numberOfSignatures; i++ {
- *(*uintptr)(unsafe.Pointer(uintptr(messagesAllocation) + uintptr(i*C.sizeofPtr))) = uintptr(unsafe.Pointer(&messages[i][0]))
- *(*C.ulonglong)(unsafe.Pointer(uintptr(messagesLenAllocation) + uintptr(i*C.sizeofULongLong))) = C.ulonglong(len(messages[i]))
- *(*uintptr)(unsafe.Pointer(uintptr(publicKeysAllocation) + uintptr(i*C.sizeofPtr))) = uintptr(unsafe.Pointer(&publicKeys[i][0]))
- *(*uintptr)(unsafe.Pointer(uintptr(signaturesAllocation) + uintptr(i*C.sizeofPtr))) = uintptr(unsafe.Pointer(&signatures[i][0]))
- }
+func batchVerificationImpl(messages []byte, msgLengths []uint64, publicKeys []SignatureVerifier, signatures []Signature) (allSigsValid bool, failed []bool) {
+
+ numberOfSignatures := len(msgLengths)
+ valid := make([]C.int, numberOfSignatures)
+ messages2D := make([]*C.uchar, numberOfSignatures)
+ publicKeys2D := make([]*C.uchar, numberOfSignatures)
+ signatures2D := make([]*C.uchar, numberOfSignatures)
// call the batch verifier
- allValid := C.crypto_sign_ed25519_open_batch(
- (**C.uchar)(unsafe.Pointer(messagesAllocation)),
- (*C.ulonglong)(unsafe.Pointer(messagesLenAllocation)),
- (**C.uchar)(unsafe.Pointer(publicKeysAllocation)),
- (**C.uchar)(unsafe.Pointer(signaturesAllocation)),
- C.size_t(len(messages)),
- (*C.int)(unsafe.Pointer(valid)))
-
- runtime.KeepAlive(messages)
- runtime.KeepAlive(publicKeys)
- runtime.KeepAlive(signatures)
+ allValid := C.ed25519_batch_wrapper(
+ &messages2D[0], &publicKeys2D[0], &signatures2D[0],
+ (*C.uchar)(&messages[0]),
+ (*C.ulonglong)(&msgLengths[0]),
+ (*C.uchar)(&publicKeys[0][0]),
+ (*C.uchar)(&signatures[0][0]),
+ C.size_t(numberOfSignatures),
+ (*C.int)(&valid[0]))
failed = make([]bool, numberOfSignatures)
for i := 0; i < numberOfSignatures; i++ {
- cint := *(*C.int)(unsafe.Pointer(uintptr(valid) + uintptr(i*C.sizeof_int)))
- failed[i] = (cint == 0)
+ failed[i] = (valid[i] == 0)
}
return allValid == 0, failed
}
diff --git a/crypto/curve25519.go b/crypto/curve25519.go
index 58950a3de..a8637399d 100644
--- a/crypto/curve25519.go
+++ b/crypto/curve25519.go
@@ -35,6 +35,7 @@ import "C"
import (
"fmt"
+ "unsafe"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/util/metrics"
@@ -64,6 +65,30 @@ func init() {
_ = [C.crypto_sign_ed25519_PUBLICKEYBYTES]byte(ed25519PublicKey{})
_ = [C.crypto_sign_ed25519_SECRETKEYBYTES]byte(ed25519PrivateKey{})
_ = [C.crypto_sign_ed25519_SEEDBYTES]byte(ed25519Seed{})
+
+ // Check that this platform makes slices []Signature and []SignatureVerifier that use a backing
+ // array of contiguously allocated 64- and 32-byte segments, respectively, with no padding.
+ // These slice's backing arrays are passed to C.ed25519_batch_wrapper. In practice, this check
+ // should always succeed, but to be careful we can double-check, since the Go specification does
+ // not explicitly define platform-specific alignment sizes and slice allocation behavior.
+ length := 1024
+ sigs := make([]Signature, length) // same as [][64]byte
+ pks := make([]SignatureVerifier, length) // same as [][32]byte
+
+ for i := 1; i < length; i++ {
+ if uintptr(unsafe.Pointer(&sigs[i]))-uintptr(unsafe.Pointer(&sigs[0])) != uintptr(i)*C.crypto_sign_ed25519_BYTES {
+ panic("Unexpected alignment for a slice of signatures")
+ }
+ if uintptr(unsafe.Pointer(&pks[i]))-uintptr(unsafe.Pointer(&pks[0])) != uintptr(i)*C.crypto_sign_ed25519_PUBLICKEYBYTES {
+ panic("Unexpected alignment for a slice of public keys")
+ }
+ }
+ if uintptr(unsafe.Pointer(&sigs[length-1]))-uintptr(unsafe.Pointer(&sigs[0])) != uintptr(length-1)*C.crypto_sign_ed25519_BYTES {
+ panic("Unexpected total size for a backing array of signatures")
+ }
+ if uintptr(unsafe.Pointer(&pks[length-1]))-uintptr(unsafe.Pointer(&pks[0])) != uintptr(length-1)*C.crypto_sign_ed25519_PUBLICKEYBYTES {
+ panic("Unexpected total size for a backing array of public keys")
+ }
}
// A Seed holds the entropy needed to generate cryptographic keys.
diff --git a/crypto/hashes.go b/crypto/hashes.go
index 8933717e4..04db757f3 100644
--- a/crypto/hashes.go
+++ b/crypto/hashes.go
@@ -118,7 +118,7 @@ func (z *HashFactory) Validate() error {
}
// GenericHashObj Makes it easier to sum using hash interface and Hashable interface
-func GenericHashObj(hsh hash.Hash, h Hashable) []byte {
+func GenericHashObj[H Hashable](hsh hash.Hash, h H) []byte {
rep := HashRep(h)
return hashBytes(hsh, rep)
}
diff --git a/crypto/hashes_test.go b/crypto/hashes_test.go
index dd4b8c3bd..9f8b57fe3 100644
--- a/crypto/hashes_test.go
+++ b/crypto/hashes_test.go
@@ -53,7 +53,6 @@ func TestHashSum(t *testing.T) {
dgst := HashObj(TestingHashable{})
a.Equal(GenericHashObj(h, TestingHashable{}), dgst[:])
-
}
func TestEmptyHash(t *testing.T) {
diff --git a/crypto/merklearray/layer.go b/crypto/merklearray/layer.go
index 5018ae074..88eed6ffe 100644
--- a/crypto/merklearray/layer.go
+++ b/crypto/merklearray/layer.go
@@ -37,14 +37,14 @@ type pair struct {
hashDigestSize int
}
-func (p *pair) ToBeHashed() (protocol.HashID, []byte) {
+func (p pair) ToBeHashed() (protocol.HashID, []byte) {
// hashing of internal node will always be fixed length.
// If one of the children is missing we use [0...0].
// The size of the slice is based on the relevant hash function output size
buf := make([]byte, 2*p.hashDigestSize)
copy(buf[:], p.l[:])
copy(buf[len(p.l):], p.r[:])
- return protocol.MerkleArrayNode, buf[:]
+ return protocol.MerkleArrayNode, buf
}
func upWorker(ws *workerState, in Layer, out Layer, h hash.Hash) {
@@ -69,7 +69,7 @@ func upWorker(ws *workerState, in Layer, out Layer, h hash.Hash) {
p.r = in[i+1]
}
- out[i/2] = crypto.GenericHashObj(h, &p)
+ out[i/2] = crypto.GenericHashObj(h, p)
}
batchSize += 2
diff --git a/crypto/merklearray/merkle_test.go b/crypto/merklearray/merkle_test.go
index 9a1a5c0fd..0d392dcef 100644
--- a/crypto/merklearray/merkle_test.go
+++ b/crypto/merklearray/merkle_test.go
@@ -1172,12 +1172,13 @@ func merkleCommitBench(b *testing.B, hashType crypto.HashType) {
msg := make(TestBuf, sz)
crypto.RandBytes(msg[:])
- for cnt := 10; cnt <= 10000000; cnt *= 10 {
+ for cnt := 10; cnt <= 100000; cnt *= 10 {
var a TestRepeatingArray
a.item = msg
a.count = uint64(cnt)
b.Run(fmt.Sprintf("Item%d/Count%d", sz, cnt), func(b *testing.B) {
+ b.ReportAllocs()
for i := 0; i < b.N; i++ {
tree, err := Build(a, crypto.HashFactory{HashType: hashType})
require.NoError(b, err)
@@ -1205,6 +1206,7 @@ func benchmarkMerkleProve1M(b *testing.B, hashType crypto.HashType) {
require.NoError(b, err)
b.ResetTimer()
+ b.ReportAllocs()
for i := uint64(0); i < uint64(b.N); i++ {
_, err := tree.Prove([]uint64{i % a.count})
@@ -1238,6 +1240,7 @@ func benchmarkMerkleVerify1M(b *testing.B, hashType crypto.HashType) {
}
b.ResetTimer()
+ b.ReportAllocs()
for i := uint64(0); i < uint64(b.N); i++ {
err := Verify(root, map[uint64]crypto.Hashable{i % a.count: msg}, proofs[i])
diff --git a/crypto/merklearray/partial.go b/crypto/merklearray/partial.go
index 4baf777f3..b1aa07c52 100644
--- a/crypto/merklearray/partial.go
+++ b/crypto/merklearray/partial.go
@@ -118,7 +118,7 @@ func (pl partialLayer) up(s *siblings, l uint64, doHash bool, hsh hash.Hash) (pa
p.l = siblingHash
p.r = posHash
}
- nextLayerHash = crypto.GenericHashObj(hsh, &p)
+ nextLayerHash = crypto.GenericHashObj(hsh, p)
}
res = append(res, layerItem{
diff --git a/crypto/merklearray/worker.go b/crypto/merklearray/worker.go
index 2a8059336..b6d273e37 100644
--- a/crypto/merklearray/worker.go
+++ b/crypto/merklearray/worker.go
@@ -28,7 +28,7 @@ type workerState struct {
// maxidx is the total number of elements to process, and nextidx
// is the next element that a worker should process.
maxidx uint64
- nextidx uint64
+ nextidx atomic.Uint64
// nworkers is the number of workers that can be started.
// This field gets decremented once workers are launched,
@@ -65,7 +65,7 @@ func newWorkerState(max uint64) *workerState {
// by delta. This implicitly means that the worker that calls next
// is promising to process delta elements at the returned position.
func (ws *workerState) next(delta uint64) uint64 {
- return atomic.AddUint64(&ws.nextidx, delta) - delta
+ return ws.nextidx.Add(delta) - delta
}
// wait waits for all of the workers to finish.
@@ -82,7 +82,7 @@ func (ws *workerState) nextWorker() bool {
_ = <-ws.starting
- curidx := atomic.LoadUint64(&ws.nextidx)
+ curidx := ws.nextidx.Load()
if curidx >= ws.maxidx {
return false
}
diff --git a/crypto/onetimesig.go b/crypto/onetimesig.go
index 344fd33f7..a2db211a7 100644
--- a/crypto/onetimesig.go
+++ b/crypto/onetimesig.go
@@ -35,8 +35,8 @@ import (
// of a secret-key compromise.
type OneTimeSignature struct {
// Unfortunately we forgot to mark this struct as omitempty at
- // one point, and now it's hard to recover from that if we want
- // to preserve encodings..
+ // one point, and now it's hard to change if we want to preserve
+ // encodings.
_struct struct{} `codec:""`
// Sig is a signature of msg under the key PK.
@@ -319,8 +319,21 @@ func (v OneTimeSignatureVerifier) Verify(id OneTimeSignatureIdentifier, message
Batch: id.Batch,
}
+ // serialize encoded batchID, offsetID, message into a continuous memory buffer with the layout
+ // hashRep(batchID)... hashRep(offsetID)... hashRep(message)...
+ const estimatedSize = 256
+ messageBuffer := make([]byte, 0, estimatedSize)
+
+ messageBuffer = HashRepToBuff(batchID, messageBuffer)
+ batchIDLen := uint64(len(messageBuffer))
+ messageBuffer = HashRepToBuff(offsetID, messageBuffer)
+ offsetIDLen := uint64(len(messageBuffer)) - batchIDLen
+ messageBuffer = HashRepToBuff(message, messageBuffer)
+ messageLen := uint64(len(messageBuffer)) - offsetIDLen - batchIDLen
+ msgLengths := []uint64{batchIDLen, offsetIDLen, messageLen}
allValid, _ := batchVerificationImpl(
- [][]byte{HashRep(batchID), HashRep(offsetID), HashRep(message)},
+ messageBuffer,
+ msgLengths,
[]PublicKey{PublicKey(v), PublicKey(batchID.SubKeyPK), PublicKey(offsetID.SubKeyPK)},
[]Signature{Signature(sig.PK2Sig), Signature(sig.PK1Sig), Signature(sig.Sig)},
)
diff --git a/crypto/statetrie/nibbles/nibbles.go b/crypto/statetrie/nibbles/nibbles.go
new file mode 100644
index 000000000..8a8409b6b
--- /dev/null
+++ b/crypto/statetrie/nibbles/nibbles.go
@@ -0,0 +1,161 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package nibbles
+
+import (
+ "bytes"
+ "errors"
+)
+
+// Nibbles are 4-bit values stored in an 8-bit byte arrays
+type Nibbles []byte
+
+const (
+ // oddIndicator for serialization when the last nibble in a byte array
+ // is not part of the nibble array.
+ oddIndicator = 0x01
+ // evenIndicator for when it is.
+ evenIndicator = 0x03
+)
+
+// Pack the nibble array into a byte array.
+// Return the byte array and a bool indicating if the last byte is a full byte or
+// only the high 4 bits are part of the encoding
+// the last four bits of a oddLength byte encoding will always be zero.
+// Allocates a new byte slice.
+//
+// [0x1, 0x2, 0x3] -> [0x12, 0x30], true
+// [0x1, 0x2, 0x3, 0x4] -> [0x12, 0x34], false
+// [0x1] -> [0x10], true
+// [] -> [], false
+func Pack(nyb Nibbles) ([]byte, bool) {
+ length := len(nyb)
+ data := make([]byte, length/2+length%2, length/2+length%2+1)
+ for i := 0; i < length; i++ {
+ if i%2 == 0 {
+ data[i/2] = nyb[i] << 4
+ } else {
+ data[i/2] = data[i/2] | nyb[i]
+ }
+ }
+
+ return data, length%2 != 0
+}
+
+// Equal returns true if the two nibble arrays are equal
+// [0x1, 0x2, 0x3], [0x1, 0x2, 0x3] -> true
+// [0x1, 0x2, 0x3], [0x1, 0x2, 0x4] -> false
+// [0x1, 0x2, 0x3], [0x1] -> false
+// [0x1, 0x2, 0x3], [0x1, 0x2, 0x3, 0x4] -> false
+// [], [] -> true
+// [], [0x1] -> false
+func Equal(nyb1 Nibbles, nyb2 Nibbles) bool {
+ return bytes.Equal(nyb1, nyb2)
+}
+
+// ShiftLeft returns a slice of nyb1 that contains the Nibbles after the first
+// numNibbles
+func ShiftLeft(nyb1 Nibbles, numNibbles int) Nibbles {
+ if numNibbles <= 0 {
+ return nyb1
+ }
+ if numNibbles > len(nyb1) {
+ return nyb1[:0]
+ }
+
+ return nyb1[numNibbles:]
+}
+
+// SharedPrefix returns a slice from nyb1 that contains the shared prefix
+// between nyb1 and nyb2
+func SharedPrefix(nyb1 Nibbles, nyb2 Nibbles) Nibbles {
+ minLength := len(nyb1)
+ if len(nyb2) < minLength {
+ minLength = len(nyb2)
+ }
+ for i := 0; i < minLength; i++ {
+ if nyb1[i] != nyb2[i] {
+ return nyb1[:i]
+ }
+ }
+ return nyb1[:minLength]
+}
+
+// Serialize returns a byte array that represents the Nibbles
+// an empty nibble array is serialized as a single byte with value 0x3
+// as the empty nibble is considered to be full width
+//
+// [0x1, 0x2, 0x3] -> [0x12, 0x30, 0x01]
+// [0x1, 0x2, 0x3, 0x4] -> [0x12, 0x34, 0x03]
+// [] -> [0x03]
+func Serialize(nyb Nibbles) (data []byte) {
+ p, h := Pack(nyb)
+ if h {
+ // 0x01 is the odd length indicator
+ return append(p, oddIndicator)
+ }
+ // 0x03 is the even length indicator
+ return append(p, evenIndicator)
+}
+
+// Deserialize returns a nibble array from the byte array.
+func Deserialize(encoding []byte) (Nibbles, error) {
+ var ns Nibbles
+ length := len(encoding)
+ if length == 0 {
+ return nil, errors.New("invalid encoding")
+ }
+ if encoding[length-1] == oddIndicator {
+ if length == 1 {
+ return nil, errors.New("invalid encoding")
+ }
+ ns = makeNibbles(encoding[:length-1], true)
+ } else if encoding[length-1] == evenIndicator {
+ ns = makeNibbles(encoding[:length-1], false)
+ } else {
+ return nil, errors.New("invalid encoding")
+ }
+ return ns, nil
+}
+
+// makeNibbles returns a nibble array from the byte array. If oddLength is true,
+// the last 4 bits of the last byte of the array are ignored.
+//
+// [0x12, 0x30], true -> [0x1, 0x2, 0x3]
+// [0x12, 0x34], false -> [0x1, 0x2, 0x3, 0x4]
+// [0x12, 0x34], true -> [0x1, 0x2, 0x3] <-- last byte last 4 bits ignored
+// [], false -> []
+// never to be called with [], true
+// Allocates a new byte slice.
+func makeNibbles(data []byte, oddLength bool) Nibbles {
+ length := len(data) * 2
+ if oddLength {
+ length = length - 1
+ }
+ ns := make([]byte, length)
+
+ j := 0
+ for i := 0; i < length; i++ {
+ if i%2 == 0 {
+ ns[i] = data[j] >> 4
+ } else {
+ ns[i] = data[j] & 0x0f
+ j++
+ }
+ }
+ return ns
+}
diff --git a/crypto/statetrie/nibbles/nibbles_test.go b/crypto/statetrie/nibbles/nibbles_test.go
new file mode 100644
index 000000000..c088f1dd8
--- /dev/null
+++ b/crypto/statetrie/nibbles/nibbles_test.go
@@ -0,0 +1,218 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package nibbles
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestNibblesRandom(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ seed := time.Now().UnixNano()
+ localRand := rand.New(rand.NewSource(seed))
+ defer func() {
+ if t.Failed() {
+ t.Logf("The seed was %d", seed)
+ }
+ }()
+
+ for i := 0; i < 1_000; i++ {
+ length := localRand.Intn(8192) + 1
+ data := make([]byte, length)
+ localRand.Read(data)
+ half := localRand.Intn(2) == 0 // half of the time, we have an odd number of nibbles
+ if half && localRand.Intn(2) == 0 {
+ data[len(data)-1] &= 0xf0 // sometimes clear the last nibble, sometimes do not
+ }
+ nibbles := makeNibbles(data, half)
+
+ data2 := Serialize(nibbles)
+ nibbles2, err := Deserialize(data2)
+ require.NoError(t, err)
+ require.Equal(t, nibbles, nibbles2)
+
+ if half {
+ data[len(data)-1] &= 0xf0 // clear last nibble
+ }
+ packed, odd := Pack(nibbles)
+ require.Equal(t, odd, half)
+ require.Equal(t, packed, data)
+ unpacked := makeNibbles(packed, odd)
+ require.Equal(t, nibbles, unpacked)
+
+ packed, odd = Pack(nibbles2)
+ require.Equal(t, odd, half)
+ require.Equal(t, packed, data)
+ unpacked = makeNibbles(packed, odd)
+ require.Equal(t, nibbles2, unpacked)
+ }
+}
+
+func TestNibblesDeserialize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ enc := []byte{0x01}
+ _, err := Deserialize(enc)
+ require.Error(t, err, "should return invalid encoding error")
+}
+
+func TestNibbles(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ sampleNibbles := []Nibbles{
+ {0x0, 0x1, 0x2, 0x3, 0x4},
+ {0x4, 0x1, 0x2, 0x3, 0x4},
+ {0x0, 0x0, 0x2, 0x3, 0x5},
+ {0x0, 0x1, 0x2, 0x3, 0x4, 0x5},
+ {},
+ {0x1},
+ }
+
+ sampleNibblesPacked := [][]byte{
+ {0x01, 0x23, 0x40},
+ {0x41, 0x23, 0x40},
+ {0x00, 0x23, 0x50},
+ {0x01, 0x23, 0x45},
+ {},
+ {0x10},
+ }
+
+ sampleNibblesShifted1 := []Nibbles{
+ {0x1, 0x2, 0x3, 0x4},
+ {0x1, 0x2, 0x3, 0x4},
+ {0x0, 0x2, 0x3, 0x5},
+ {0x1, 0x2, 0x3, 0x4, 0x5},
+ {},
+ {},
+ }
+
+ sampleNibblesShifted2 := []Nibbles{
+ {0x2, 0x3, 0x4},
+ {0x2, 0x3, 0x4},
+ {0x2, 0x3, 0x5},
+ {0x2, 0x3, 0x4, 0x5},
+ {},
+ {},
+ }
+
+ for i, n := range sampleNibbles {
+ b, oddLength := Pack(n)
+ if oddLength {
+ // require that oddLength packs returns a byte slice with the last nibble set to 0x0
+ require.Equal(t, b[len(b)-1]&0x0f == 0x00, true)
+ }
+
+ require.Equal(t, oddLength == (len(n)%2 == 1), true)
+ require.Equal(t, bytes.Equal(b, sampleNibblesPacked[i]), true)
+
+ unp := makeNibbles(b, oddLength)
+ require.Equal(t, bytes.Equal(unp, n), true)
+
+ }
+ for i, n := range sampleNibbles {
+ require.Equal(t, bytes.Equal(ShiftLeft(n, -2), sampleNibbles[i]), true)
+ require.Equal(t, bytes.Equal(ShiftLeft(n, -1), sampleNibbles[i]), true)
+ require.Equal(t, bytes.Equal(ShiftLeft(n, 0), sampleNibbles[i]), true)
+ require.Equal(t, bytes.Equal(ShiftLeft(n, 1), sampleNibblesShifted1[i]), true)
+ require.Equal(t, bytes.Equal(ShiftLeft(n, 2), sampleNibblesShifted2[i]), true)
+ }
+
+ sampleSharedNibbles := [][]Nibbles{
+ {{0x0, 0x1, 0x2, 0x9, 0x2}, {0x0, 0x1, 0x2}},
+ {{0x4, 0x1}, {0x4, 0x1}},
+ {{0x9, 0x2, 0x3}, {}},
+ {{0x0}, {0x0}},
+ {{}, {}},
+ }
+ for i, n := range sampleSharedNibbles {
+ shared := SharedPrefix(n[0], sampleNibbles[i])
+ require.Equal(t, bytes.Equal(shared, n[1]), true)
+ shared = SharedPrefix(sampleNibbles[i], n[0])
+ require.Equal(t, bytes.Equal(shared, n[1]), true)
+ }
+
+ sampleSerialization := []Nibbles{
+ {0x0, 0x1, 0x2, 0x9, 0x2},
+ {0x4, 0x1},
+ {0x4, 0x1, 0x4, 0xf},
+ {0x4, 0x1, 0x4, 0xf, 0x0},
+ {0x9, 0x2, 0x3},
+ {},
+ {0x05},
+ {},
+ }
+
+ for _, n := range sampleSerialization {
+ nbytes := Serialize(n)
+ n2, err := Deserialize(nbytes)
+ require.NoError(t, err)
+ require.True(t, bytes.Equal(n, n2))
+ require.Equal(t, len(nbytes), len(n)/2+len(n)%2+1, fmt.Sprintf("nbytes: %v, n: %v", nbytes, n))
+ if len(n)%2 == 0 {
+ require.Equal(t, nbytes[len(nbytes)-1], uint8(evenIndicator))
+ } else {
+ require.Equal(t, nbytes[len(nbytes)-1], uint8(oddIndicator))
+ require.Equal(t, nbytes[len(nbytes)-2]&0x0F, uint8(0))
+ }
+ }
+
+ makeNibblesTestExpected := Nibbles{0x0, 0x1, 0x2, 0x9, 0x2}
+ makeNibblesTestData := []byte{0x01, 0x29, 0x20}
+ mntr := makeNibbles(makeNibblesTestData, true)
+ require.Equal(t, bytes.Equal(mntr, makeNibblesTestExpected), true)
+ makeNibblesTestExpectedFW := Nibbles{0x0, 0x1, 0x2, 0x9, 0x2, 0x0}
+ mntr2 := makeNibbles(makeNibblesTestData, false)
+ require.Equal(t, bytes.Equal(mntr2, makeNibblesTestExpectedFW), true)
+
+ sampleEqualFalse := [][]Nibbles{
+ {{0x0, 0x1, 0x2, 0x9, 0x2}, {0x0, 0x1, 0x2, 0x9}},
+ {{0x0, 0x1, 0x2, 0x9}, {0x0, 0x1, 0x2, 0x9, 0x2}},
+ {{0x0, 0x1, 0x2, 0x9, 0x2}, {}},
+ {{}, {0x0, 0x1, 0x2, 0x9, 0x2}},
+ {{0x0}, {}},
+ {{}, {0x0}},
+ {{}, {0x1}},
+ }
+ for _, n := range sampleEqualFalse {
+ ds := Serialize(n[0])
+ us, e := Deserialize(ds)
+ require.NoError(t, e)
+ require.Equal(t, Equal(n[0], us), true)
+ require.Equal(t, Equal(n[0], n[0]), true)
+ require.Equal(t, Equal(us, n[0]), true)
+ require.Equal(t, Equal(n[0], n[1]), false)
+ require.Equal(t, Equal(us, n[1]), false)
+ require.Equal(t, Equal(n[1], n[0]), false)
+ require.Equal(t, Equal(n[1], us), false)
+ }
+
+ _, e := Deserialize([]byte{})
+ require.Error(t, e)
+ _, e = Deserialize([]byte{0x02})
+ require.Error(t, e)
+}
diff --git a/crypto/util.go b/crypto/util.go
index 60bb12aef..078d52c0c 100644
--- a/crypto/util.go
+++ b/crypto/util.go
@@ -35,11 +35,19 @@ type Hashable interface {
}
// HashRep appends the correct hashid before the message to be hashed.
-func HashRep(h Hashable) []byte {
+func HashRep[H Hashable](h H) []byte {
hashid, data := h.ToBeHashed()
return append([]byte(hashid), data...)
}
+// HashRepToBuff appends the correct hashid before the message to be hashed into the provided buffer
+func HashRepToBuff(h Hashable, buffer []byte) []byte {
+ hashid, data := h.ToBeHashed()
+ buffer = append(buffer, hashid...)
+ buffer = append(buffer, data...)
+ return buffer
+}
+
// DigestSize is the number of bytes in the preferred hash Digest used here.
const DigestSize = sha512.Size256
@@ -86,7 +94,7 @@ func Hash(data []byte) Digest {
}
// HashObj computes a hash of a Hashable object and its type
-func HashObj(h Hashable) Digest {
+func HashObj[H Hashable](h H) Digest {
return Hash(HashRep(h))
}
diff --git a/crypto/util_test.go b/crypto/util_test.go
index 667da0bcd..2e0828bcc 100644
--- a/crypto/util_test.go
+++ b/crypto/util_test.go
@@ -17,8 +17,10 @@
package crypto
import (
+ "fmt"
"testing"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/stretchr/testify/require"
)
@@ -46,3 +48,32 @@ func TestDigest_IsZero(t *testing.T) {
require.NotZero(t, d2)
}
+
+type testToBeHashed struct {
+ i int
+}
+
+func (tbh *testToBeHashed) ToBeHashed() (protocol.HashID, []byte) {
+ data := make([]byte, tbh.i)
+ for x := 0; x < tbh.i; x++ {
+ data[x] = byte(tbh.i)
+ }
+ return protocol.HashID(fmt.Sprintf("ID%d", tbh.i)), data
+}
+
+func TestHashRepToBuff(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ values := []int{32, 64, 512, 1024}
+ buffer := make([]byte, 0, 128)
+ for _, val := range values {
+ tbh := &testToBeHashed{i: val}
+ buffer = HashRepToBuff(tbh, buffer)
+ }
+ pos := 0
+ for _, val := range values {
+ tbh := &testToBeHashed{i: val}
+ data := HashRep(tbh)
+ require.Equal(t, data, buffer[pos:pos+len(data)])
+ pos = pos + len(data)
+ }
+}
diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json
index b28e35aba..b37f2001c 100644
--- a/daemon/algod/api/algod.oas2.json
+++ b/daemon/algod/api/algod.oas2.json
@@ -934,6 +934,86 @@
}
}
},
+ "/v2/participation/generate/{address}": {
+ "post": {
+ "tags": [
+ "private",
+ "participating"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Generate and install participation keys to the node.",
+ "operationId": "GenerateParticipationKeys",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "An account public key",
+ "name": "address",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "integer",
+ "description": "Key dilution for two-level participation keys (defaults to sqrt of validity window).",
+ "name": "dilution",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "First round for participation key.",
+ "name": "first",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "integer",
+ "description": "Last round for participation key.",
+ "name": "last",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "An empty JSON object is returned if the generation process was started. Currently no status is available.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "503": {
+ "description": "Service Temporarily Unavailable",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ }
+ },
"/v2/participation/{participation-id}": {
"delete": {
"tags": [
diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml
index ecb205869..c4fb9394c 100644
--- a/daemon/algod/api/algod.oas3.yml
+++ b/daemon/algod/api/algod.oas3.yml
@@ -5379,6 +5379,109 @@
"x-codegen-request-body-name": "participationkey"
}
},
+ "/v2/participation/generate/{address}": {
+ "post": {
+ "operationId": "GenerateParticipationKeys",
+ "parameters": [
+ {
+ "description": "An account public key",
+ "in": "path",
+ "name": "address",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "description": "Key dilution for two-level participation keys (defaults to sqrt of validity window).",
+ "in": "query",
+ "name": "dilution",
+ "schema": {
+ "type": "integer"
+ }
+ },
+ {
+ "description": "First round for participation key.",
+ "in": "query",
+ "name": "first",
+ "required": true,
+ "schema": {
+ "type": "integer"
+ }
+ },
+ {
+ "description": "Last round for participation key.",
+ "in": "query",
+ "name": "last",
+ "required": true,
+ "schema": {
+ "type": "integer"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "type": "string"
+ }
+ }
+ },
+ "description": "An empty JSON object is returned if the generation process was started. Currently no status is available."
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Bad Request"
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "503": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Service Temporarily Unavailable"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Generate and install participation keys to the node.",
+ "tags": [
+ "private",
+ "participating"
+ ]
+ }
+ },
"/v2/participation/{participation-id}": {
"delete": {
"description": "Delete a given participation key by ID",
@@ -7089,5 +7192,6 @@
{
"name": "private"
}
- ]
+ ],
+ "x-original-swagger-version": "2.0"
} \ No newline at end of file
diff --git a/daemon/algod/api/server/router.go b/daemon/algod/api/server/router.go
index b599194ab..e380cb026 100644
--- a/daemon/algod/api/server/router.go
+++ b/daemon/algod/api/server/router.go
@@ -19,6 +19,7 @@ package server
import (
"fmt"
+ "golang.org/x/sync/semaphore"
"net"
"net/http"
@@ -119,9 +120,10 @@ func NewRouter(logger logging.Logger, node APINodeInterface, shutdown <-chan str
// Registering v2 routes
v2Handler := v2.Handlers{
- Node: node,
- Log: logger,
- Shutdown: shutdown,
+ Node: node,
+ Log: logger,
+ Shutdown: shutdown,
+ KeygenLimiter: semaphore.NewWeighted(1),
}
nppublic.RegisterHandlers(e, &v2Handler, publicMiddleware...)
npprivate.RegisterHandlers(e, &v2Handler, adminMiddleware...)
diff --git a/daemon/algod/api/server/v2/generated/data/routes.go b/daemon/algod/api/server/v2/generated/data/routes.go
index a17b5f1ca..1ffe766db 100644
--- a/daemon/algod/api/server/v2/generated/data/routes.go
+++ b/daemon/algod/api/server/v2/generated/data/routes.go
@@ -314,9 +314,10 @@ var swaggerSpec = []string{
"4ru9Z2L8LnQF0x21KUbBuSdr2Q4/lKqH++v3vu9Rs1Pdi23Q5F+M4F+M4IiMQNeSJ49ocH9hgSWoXG5d",
"TvMl7OIHw9syuOAnlYhlkF/sYBaujUGKV1x0eUUbqTV5/m5csybnfrCW5QIUc63yUaswInMr9MuGI/kz",
"j9FPwV7v6kP78f0f4n5/Qbk/z50dtzU+qCwZyIYKKB92lvgXF/hvwwVsixxq93VKNJSlCs++Fnj2rSvG",
- "1c3j1kU2kg90yhy2wnTn59MPnT+7CpFa1roQN8G3aFC33qCh7mAe1qr/9+kNZTqbC+lq5mHH7OHHGmh5",
- "6hpk9H5ta1IPnmCh7eDHMDst+uspdUpE7Fnle8pHH/YV2dhTp8glXvKhof5xa9QKjUTIPRvz0Lv3hndh",
- "K1zHWFubx/PTU8wVWAqlTycfpx969pDw4fuGXHwHt0kl2RpLlL//+P8DAAD//wKdD2Ya9wAA",
+ "1c3j1kU2kg90yhy2wnTn51NvQIjpkN03P3T+7KpOalnrQtwEs6Dp3fqNhlqGeVir/t+nN5TpbC6kq66H",
+ "vbWHH2ug5alrpdH7ta1ePXiCJbmDH8M8tuivp9SpG7Fnle8+H33YV3ljT53Kl3jJB5H6x635KzQnIZ9t",
+ "DEnv3hsuh01zHQturSPPT08xq2AplD6dfJx+6FlOwofvG8Lyvd4mlWRrLGb+fjrZZEKyBeO0zJxVou0H",
+ "NHly8mjy8f8HAAD//3CL32ln9wAA",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/experimental/routes.go b/daemon/algod/api/server/v2/generated/experimental/routes.go
index 77f2fccbe..3fdcd1341 100644
--- a/daemon/algod/api/server/v2/generated/experimental/routes.go
+++ b/daemon/algod/api/server/v2/generated/experimental/routes.go
@@ -90,210 +90,210 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+y9e3MbN7Yg/lVQvLfKjx9b8iu5Y/1q6q5iJxmt7dhlKZm91/ImYPchiVET6AHQFBmv",
- "v/sWDoBudDdANiXFzlTtX7bYeBwcHADnfT5NcrGqBAeu1eTk06Sikq5Ag8S/aJ6LmuuMFeavAlQuWaWZ",
- "4JMT/40oLRlfTKYTZn6tqF5OphNOV9C2Mf2nEwn/rJmEYnKiZQ3TicqXsKJmYL2tTOtmpE22EJkb4tQO",
- "cfZy8nnHB1oUEpQaQvmWl1vCeF7WBRAtKVc0N58UuWZ6SfSSKeI6E8aJ4EDEnOhlpzGZMygLdeQX+c8a",
- "5DZYpZs8vaTPLYiZFCUM4XwhVjPGwUMFDVDNhhAtSAFzbLSkmpgZDKy+oRZEAZX5ksyF3AOqBSKEF3i9",
- "mpx8mCjgBUjcrRzYGv87lwC/Q6apXICefJzGFjfXIDPNVpGlnTnsS1B1qRXBtrjGBVsDJ6bXEXlTK01m",
- "QCgn7394QZ4+ffrcLGRFtYbCEVlyVe3s4Zps98nJpKAa/OchrdFyISTlRda0f//DC5z/3C1wbCuqFMQP",
- "y6n5Qs5ephbgO0ZIiHENC9yHDvWbHpFD0f48g7mQMHJPbOM73ZRw/q+6KznV+bISjOvIvhD8Suzn6B0W",
- "dN91hzUAdNpXBlPSDPrhUfb846fH08ePPv/bh9Psv92f3zz9PHL5L5px92Ag2jCvpQSeb7OFBIqnZUn5",
- "EB/vHT2opajLgizpGjefrvCqd32J6WuvzjUta0MnLJfitFwIRagjowLmtC418ROTmpfmmjKjOWonTJFK",
- "ijUroJia2/d6yfIlyamyQ2A7cs3K0tBgraBI0Vp8dTsO0+cQJQauG+EDF/TnRUa7rj2YgA3eBlleCgWZ",
- "FnueJ//iUF6Q8EFp3yp12GNFLpZAcHLzwT62iDtuaLost0TjvhaEKkKJf5qmhM3JVtTkGjenZFfY363G",
- "YG1FDNJwczrvqDm8KfQNkBFB3kyIEihH5PlzN0QZn7NFLUGR6yXopXvzJKhKcAVEzP4BuTbb/j/P3/5E",
- "hCRvQCm6gHc0vyLAc1FAcUTO5oQLHZCGoyXEoemZWoeDK/bI/0MJQxMrtahofhV/0Uu2YpFVvaEbtqpX",
- "hNerGUizpf4J0YJI0LXkKYDsiHtIcUU3w0kvZM1z3P922g4vZ6iNqaqkW0TYim7++mjqwFGEliWpgBeM",
- "L4je8CQfZ+beD14mRc2LEWyONnsaPKyqgpzNGRSkGWUHJG6affAwfhg8LfMVgOMHSYLTzLIHHA6bCM2Y",
- "022+kIouICCZI/Kzu9zwqxZXwBtCJ7MtfqokrJmoVdMpASNOvZsD50JDVkmYswiNnTt0mAvGtnE38Mrx",
- "QLngmjIOhbmcEWihwV5WSZiCCXfLO8NXfEYVfPss9ca3X0fu/lz0d33njo/abWyU2SMZeTrNV3dg45xV",
- "p/8I+TCcW7FFZn8ebCRbXJjXZs5KfIn+YfbPo6FWeAl0EOHfJsUWnOpawsklf2j+Ihk515QXVBbml5X9",
- "6U1danbOFuan0v70WixYfs4WCWQ2sEYFLuy2sv+Y8eLXsd5E5YrXQlzVVbigvCO4zrbk7GVqk+2YhxLm",
- "aSPthoLHxcYLI4f20JtmIxNAJnFXUdPwCrYSDLQ0n+M/mznSE53L380/VVWa3rqax1Br6Ng9yag+cGqF",
- "06oqWU4NEt+7z+aruQTAChK0bXGMD+rJpwDESooKpGZ2UFpVWSlyWmZKU40j/buE+eRk8m/Hrf7l2HZX",
- "x8Hkr02vc+xkWFbLBmW0qg4Y451hfdSOy8Jc0PgJrwl77SHTxLjdRENKzFzBJawp10etyNK5D5oD/MHN",
- "1OLbcjsW3z0RLIlwYhvOQFkO2Da8p0iAeoJoJYhWZEgXpZg1P9w/raoWg/j9tKosPpB7BIaMGWyY0uoB",
- "Lp+2Jymc5+zlEfkxHBtZccHLrXkcLKth3oa5e7XcK9boltwa2hHvKYLbKeSR2RqPBsPm3wXFoVixFKXh",
- "evbSimn8N9c2JDPz+6jO/xokFuI2TVwoaDnMWRkHfwmEm/s9yhkSjlP3HJHTft+bkY0ZJU4wN6KVnftp",
- "x92BxwaF15JWFkD3xb6ljKOQZhtZWG95m4686KIwB2c4oDWE6sZnbe95iEKCpNCD4btS5Fd/o2p5B2d+",
- "5scaHj+chiyBFiDJkqrl0STGZYTHqx1tzBEzDVHAJ7NgqqNmiXe1vD1LK6imwdIcvHG2xKIe++GlBzIi",
- "u7zF/9CSmM/mbJur3w57RC7wAlP2ODsjQ2GkfSsg2JlMA9RCCLKyAj4xUvdBUL5oJ4/v06g9+t7qFNwO",
- "uUU0O3SxYYW6q23CwVJ7FTKoZy+tRKdhpSJSW7MqKiXdxtdu5xqDgAtRkRLWUPZBsFcWjmYRIjZ3fi98",
- "JzYxmL4Tm8GdIDZwJzthxkG+2mN3D3wvHWRC7sc8jj0G6WaBhpdXeD3wkAUys7Ta6tOZkDe7jnv3LCet",
- "Dp5QM2rwGk17SMKmdZW5sxnR49kGvYFas+fuW7Q/fAxjHSyca/oHYEGZUe8CC92B7hoLYlWxEu6A9JfR",
- "V3BGFTx9Qs7/dvrN4ye/PvnmW0OSlRQLSVdkttWgyH0nrBKltyU8GK4MxcW61PHRv33mNbfdcWPjKFHL",
- "HFa0Gg5lNcKWJ7TNiGk3xFoXzbjqBsBRNyKYp82inVhjhwHtJVOG5VzN7mQzUggr2lkK4iApYC8xHbq8",
- "dpptuES5lfVdyPYgpZDRp6uSQotclNkapGIiYl5651oQ18Lz+1X/dwstuaaKmLlRF15z5LAilKU3fPy9",
- "b4e+2PAWNztvfrveyOrcvGP2pYt8r1pVpAKZ6Q0nBczqRUc0nEuxIpQU2BHf6B9BW76FreBc01X1dj6/",
- "G9lZ4EARGZatQJmZiG1huAYFueDWNWSPuOpGHYOePmK8zlKnAXAYOd/yHBWvd3Fs05L8inG0AqktzwOx",
- "3sBYQrHokOXtxfcUOuxU91QEHIOO1/gZNT8vodT0ByEvWrbvRynq6s6ZvP6cY5dD3WKcbqkwfb1SgfFF",
- "2XVHWhjYj2Jr/CoLeuGPr1sDQo8U+ZotljqQs95JIeZ3D2Nslhig+MFKqaXpM5RVfxKFuUx0re6ABWsH",
- "a284Q7fhvUZnotaEEi4KwM2vVZw5SziwoOUcDf465Pf00gqeMzDUldParLauCJqzB+9F2zGjuT2hGaJG",
- "JYx5jRXWtrLTWeeIUgIttmQGwImYOYuZs+XhIina4rVnbxxrGLkvOnBVUuSgFBSZ09TtBc23s0+H3oEn",
- "BBwBbmYhSpA5lbcG9mq9F84r2GboOaLI/Ve/qAdfAV4tNC33IBbbxNDb6D2cWXQI9bjpdxFcf/KQ7KgE",
- "4t8VogVysyVoSKHwIJwk968P0WAXb4+WNUg0UP6hFO8nuR0BNaD+wfR+W2jrKuEP6cRbw+GZDeOUC89Y",
- "xQYrqdLZvmvZNOrI4GYFwU0Yu4lx4ATj9ZoqbY3qjBeoC7TPCc5jmTAzRRrgpBhiRv7FSyDDsXPzDnJV",
- "q0YcUXVVCamhiK2Bw2bHXD/BpplLzIOxG5lHC1Ir2DdyCkvB+A5ZdiUWQVQ3tifndTJcHFpozDu/jaKy",
- "A0SLiF2AnPtWAXZDn7AEIEy1iLaEw1SPchpHtOlEaVFV5rbQWc2bfik0ndvWp/rntu2QuKhu3+1CgEJX",
- "NNfeQX5tMWu9AZdUEQcHWdErw3ugGsRa/4cwm8OYKcZzyHZRPop4plV4BPYe0rpaSFpAVkBJt8NBf7af",
- "if28awDc8VbcFRoy69YV3/SWkr0XzY6hBY6nYswjwS8kN0fQiAItgbjee0YuAMeOXU6Oju41Q+Fc0S3y",
- "4+Gy7VZHRsTXcC202XFHDwiyu9HHAJzAQzP0zVGBnbNW9uxP8V+g3AQNH3H4JFtQqSW04x+0gIQO1XnM",
- "B+eld733buDotZm8xvbcI6kjm1DovqNSs5xVKOu8gu2di379CaJ2V1KApqyEggQfrBhYhf2JdUjqj3kz",
- "UXCU7m0I/kD5FllOyRSyPF3gr2CLMvc76+kaqDruQpaNjGreJ8oJAur95wwLHjaBDc11uTWMml7CllyD",
- "BKLq2YppbT3Yu6KuFlUWDhC1a+yY0Vk1ozbFnWbWcxwqWN5wK6YTKxPshu+iJxh00OFkgUqIcoSGbICM",
- "KASjHGBIJcyuM+dM792pPSV1gHSXNpq0m+f/nuqgGVdA/kvUJKccRa5aQ8PTCImMAjKQZgbDgjVzOleX",
- "FkNQwgqsJIlfHj7sL/zhQ7fnTJE5XPsIFNOwj46HD1GP804o3Tlcd6APNcftLPJ8oMHHPHxOCunfKftd",
- "LdzIY3byXW/wxkpkzpRSjnDN8m99AfRO5mbM2kMaGedmguOOsuV0TPbDdeO+n7NVXVJ9F1YrWNMyE2uQ",
- "khWw9yZ3EzPBv1/T8m3TDaNrIDc0mkOWY0zIyLHgwvSxYSRmHMaZOcDWhXQsQHBme53bTntEzNZLj61W",
- "UDCqodySSkIONnrCcI6qWeoRsX6V+ZLyBQoMUtQL59hnx8ELv1ZWNSNrPhgiylTpDc9QyR17AJwztw+g",
- "MewUUCPS9TXkVoC5ps18LmZqzMsc7EHfYhA1kk0nSYnXIHXdSrwWOd0ooBGPQYffC/DTTjzSlIKoM7zP",
- "EF/htpjDZDb3j1HZt0PHoBxOHLgath9T3oZG3C63d8D02IGIhEqCwicqVFMp+1XMw4g/94aprdKwGmry",
- "bddfE8fvfVJeFLxkHLKV4LCNBrkzDm/wY/Q44TOZ6IwMS6pvXwbpwN8DqzvPGGq8LX5xt/sntG+xUj8I",
- "eVcmUTvgaPZ+hAVyr7ndTXlTOykty4hp0cUD9S8ANW3yDzBJqFIiZ8iznRVqag+as0a64KEu+t81Xs53",
- "cPb64/ZsaGGoKeqIoawIJXnJUIMsuNKyzvUlp6ijCpYacX7ywnhaa/nCN4mrSSNaTDfUJafo+NZorqIO",
- "G3OIqGl+APDKS1UvFqB0T9aZA1xy14pxUnOmca6VOS6ZPS8VSPRAOrItV3RL5oYmtCC/gxRkVusu94/h",
- "bkqzsnQGPTMNEfNLTjUpgSpN3jB+scHhvNHfH1kO+lrIqwYL8dd9ARwUU1ncSetH+xUdit3yl865GNMT",
- "2M/eWbONv52YZXZC7v/3/f88+XCa/TfNfn+UPf//jj9+evb5wcPBj08+//Wv/6f709PPf33wn/8e2ykP",
- "eywYy0F+9tJJxmcvUfxpbUAD2L+Y/n/FeBYlstCbo0db5D4GHjsCetBVjuklXHK94YaQ1rRkhblbbkIO",
- "/RdmcBbt6ehRTWcjesowv9YDhYpb3DIkcsn0rsYbc1FDv8Z42CMaJV0kI56Xec3tVnru20b1eP8yMZ82",
- "oa02680JwbjHJfXOke7PJ998O5m28YrN98l04r5+jFAyKzaxqNQCNjFZ0R0QPBj3FKnoVoGO3x4Ie9SV",
- "zvp2hMOuYDUDqZas+vI3hdJsFr/hfKyE0zlt+Bm3jvHm/KCJc+ssJ2L+5eHWEqCASi9j2TA6jBq2ancT",
- "oOd2UkmxBj4l7AiO+jqfwsiLzqmvBDrHrAwofYox0lBzDiyheaoIsB4uZJRiJUY/vbAA9/irOxeH3MAx",
- "uPpzNvZM/7cW5N6P31+QY3dhqns2QNoOHYS0RkRpF7XVcUgyt5nNAWSZvEt+yV/CHLUPgp9c8oJqejyj",
- "iuXquFYgv6Ml5TkcLQQ58YFgL6mml3zAaSXTdAUheKSqZyXLyVUokLTkaVOvDEe4vPxAy4W4vPw48M0Y",
- "ig9uquj9YifIDCMsap25xBGZhGsqY7Yv1SQOwJFtZphds1omW9RWQeoTU7jx43cerSrVDyAeLr+qSrP8",
- "gAyVC481W0aUFtLzIoZBsdDg/v4k3MMg6bXXq9QKFPltRasPjOuPJLusHz16CqQTUfube/INTW4rGK1d",
- "SQY495UquHArVsJGS5pVdBEzsV1eftBAK9x95JdXqOMoS4LdOpG83jEfh2oX4PGR3gALx8FRibi4c9vL",
- "JwmLLwE/4RZiG8NutIb/m+5XENt74+3qxQcPdqnWy8yc7eiqlCFxvzNN7qCFYbK8N4ZiC5RWXZqlGZB8",
- "CfmVy38Dq0pvp53u3uHHMZr+6mDKZkaykXmYmwMNFDMgdVVQx4pTvu0nSVCgtXcrfg9XsL0QbWqPQ7Ii",
- "dIP0VeqgIqUG3KUh1vDYujH6m++8ylCwryof645Bj54sThq68H3SB9myvHdwiGNE0QkiTyGCyggiLPEn",
- "UHCDhZrxbkX6seUZKWNmX75IliR/9xPXpBWenANYuBrUutvvK8A0a+JakRk1fLtwGcJsIHpwi9WKLiDB",
- "IYc2opHh3h27Eg6y792LvnRi3n/QBu9NFGTbODNrjlIKmC+GVFCY6bn9+ZmsGdJZJjDxp0PYrEQ2qfGP",
- "tJcOlR1bnc1kmAItTsAgectweDC6GAk5myVVPnkZ5njzZ3kUD/AHJlbYlU7nLPBYCxK5Ncly/J3bP6cD",
- "6dIl1fGZdHz6nFC0HJEKx3D46CQf2w7BkQEqoISFXbht7AmlTfLQbpCB4+18XjIOJIs5vwVq0OCZcXOA",
- "4Y8fEmI18GT0CDEyDsBG8zoOTH4S4dnki0OA5C5JBfVjo2E++Bvi4WPWHdywPKIyVzhLWLVyfwNQ5zHZ",
- "vF89v10chjA+JeaaW9PSXHNO4msHGWR1Qba1l8PFOXg8SLGzOwwg9mE5aE32KbrJakKeyQMdZ+h2QDwT",
- "m8zGj0Y53tlmZug96iGP0ayxg2nz59xTZCY26DSET4v1yN4DSxoOD0Yg4W+YQnrFfqnX3AKza9rd3FSM",
- "ChWSjFPnNeSSYifGTJ3gYFLkcj9IiXMjAHrKjja/tBN+9wqpXfZk+Ji3r9q0TfXmg49ixz91hKK7lMDf",
- "UAvTJLF51+dYonqKru9LN39PwELGiN5cE0MjzdAUpKAEFAqyDhOVXcUsp0a2AXxxzn23QHmBWYIo3z4I",
- "HKokLJjS0CrRvZ/E11BPUkxOKMQ8vTpdyblZ33shmmfKmhGxY2eZX3wF6JE8Z1LpDC0Q0SWYRj8oFKp/",
- "ME3jvFLXZcum8mVF/G7Aaa9gmxWsrOP06uZ99dJM+1NzJap6hvct49ZhZYapp6OOnDumtr6+Oxf82i74",
- "Nb2z9Y47DaapmVgacunO8S9yLno3767rIEKAMeIY7loSpTsuyCAAd3g7BnxTYOM/2qV9HRymwo+912vH",
- "hwGn3ig7UnQtgcJg5yoYmokMW8J0kLl5GBmbOAO0qlix6elC7ahJiZkepPDw+e56WMDddYPtwUDXLy/q",
- "5tzJFei8/5zO5xgZ5GPDwll3QOfrBhKlHBsTWtQSlWodZ7thYsqGsRu59le/nGsh6QKcYjSzIN1qCFzO",
- "IWgI0j4qopm1cBZsPodQIahuoszqANdX+0SLO4wgsrjWsGZcf/ssRkZ7qKeFcT/K4hQToYWUmehiqHj1",
- "bFUgdzaVS4KtuYH2NBpB+gq22S9GQiEVZVK1HmNOE9q9/w7Y9fXqFWxx5L2OWAawPbuCYup7QBqMqQWb",
- "TzZwohGBwhymmPShs4UH7NRpfJfuaGtc1tk08bdu2Z2srN2l3OZgtHY7A8uY3TiPm8vM6YEu4vukvG8T",
- "WEIZF5JjwHKFUzHla/QMn6ImPHof7V4ALT3x4nImn6eT2xmnYq+ZG3EPrt81D2gUz+j8ZI0VHVvzgSin",
- "VSXFmpaZM+GlHn8p1u7xx+be4veFmck4ZV98f/r6nQP/83SSl0Bl1ghjyVVhu+pfZlU2T+3upwQ5Fq8V",
- "scJ6sPlNcs3Q7He9BFdMIZD3B1mfW5NucBSdGXAe98Hce/c567Nd4g4rNFSNEbo1kFgbdNfuTNeUld4y",
- "4aFN+Evi4salDo/eCuEAt7ZfB24I2Z1eN4PTHT8dLXXtuZNwrreYLS0ucXCXSw2vImePpnfOPf0gZOfy",
- "d8EyUXv2H8dWGSbb4jHhPugL9PSZqSNiGa/fFr+Z0/jwYXjUHj6ckt9K9yEAEH+fud9Rvnj4MGpqiGoS",
- "zCWBigJOV/CgcfxNbsSXVTtxuB73QJ+uVw1nKdJk2FCoNUx7dF877F1L5vBZuF8KKMH8tD+2rrfpFt0h",
- "MGNO0HkqOKbxe1rZmkCKCN5388O4LENaeNmvKGY9t5ab4RHi9QqtHZkqWR63A/OZMtcrt/49pjHBxgmF",
- "mRmxZgl3MV6zYCzTbEwavx6QwRxRZKpoJsEWdzPhjnfN2T9rIKwwUs2cgcR3rffUeeEARx0wpEb0HM7l",
- "BrZeBO3wt9GDhBn/+zwjArFbCRJ6Ew3Afdmo9f1CG6tZKzMd6pQYzji4uHc4FDr6cNRsAyyWXa+gcXLM",
- "mNqQ/qJzpQcSc0RrPTKVzaX4HeK6aFThR2KzfY0Dhp64v0MonoUVzjpXSmOBaktWtrPv2+7xsnFq428t",
- "C/tFN2UVbvKYxk/1YRt5E6FXxTOIOiSnhLDQHNn1Vk1cLXi8Av8szGjvXRUot+fJBiZ3gh7ipzIMLzq2",
- "47en0sE8CMkq6fWMxtL9G1nIwBRsb8epQgviO/sNUE3YrZ2dBE6FTVtmkxtVINvcFMNEiTeUa+y0oyWa",
- "VoBBigpFl6l1BCuViAxT82vKbZlE08/eV663AmsFNb2uhcTUZCru/1FAzlZRdezl5YciH9r6C7ZgtgJg",
- "rSAoMecGstVVLRW5Mn1NMLlDzdmcPJoGdS7dbhRszRSblYAtHtsWM6rwuWwskk0Xszzgeqmw+ZMRzZc1",
- "LyQUeqksYpUgjeyJTF7jxTQDfQ3AySNs9/g5uY/+W4qt4YHBomOCJiePn6P13f7xKPbKugqOu67sAu/s",
- "v7s7O07H6MBmxzCXpBv1KJrFyZZwTr8OO06T7TrmLGFL96DsP0sryukC4i7Dqz0w2b64m2hR7eGFW2sA",
- "KC3FljAdnx80NfdTIgzRXH8WDJKL1YrplfPyUWJl6KmtH2cn9cPZYqau9IeHy39EZ7nK+wr1dF1fWIyh",
- "q0QYAbo0/kRX0EXrlFCbj65krRurL0hEzny6S6yF0pRAsbgxc5mlIy+JXq1zUknGNeo/aj3P/mLEYklz",
- "c/0dpcDNZt8+i9QU6abd54cB/sXxLkGBXMdRLxNk73kW15fc54JnK3OjFA/asN/gVCa9+uL+Wyknst1D",
- "j+V8zShZktzqDrnR4Ka+FeHxHQPekhSb9RxEjwev7ItTZi3j5EFrs0M/v3/tuIyVkLEc1u1xdxyHBC0Z",
- "rDGII75JZsxb7oUsR+3CbaD/ui4onuUM2DJ/lqOCQGDR3BW/abj4X960yXjRsGqDY3o6QCEj2k6nt/vC",
- "Dl+Had369lvrs4PfEpgbjTZb6X2AlYSrrvXFbfp84XDeqLrX7nlH4fj4NyKNDI58/MOHCPTDh1PHBv/2",
- "pPvZXu8PH8ZzYkZVbubXFgu3kYixb2wPvxMRBZgvQNU4FLmQ3YgCMvVImQ/mEpy5oaakW+zny3MRdxMM",
- "Enf4i5+Cy8sP+MXjAf/oI+IrX5a4ga1Lc/qwd4udRUmmaL4HrsaUfCc2Ywmn9wZ54vkToCiBkpHqOVzJ",
- "oJhb1Fy/118koFEz6gxKYYTMsE5FqM//18GzWfx0B7ZrVha/tOmGeg+JpDxfRh01Z6bjr23R9WaJ9qqM",
- "pr5fUs6hjA5nZdtfvQwckdL/IcbOs2J8ZNt+MUG73N7iWsC7YHqg/IQGvUyXZoIQq91MLk2kcLkQBcF5",
- "2jzr7eU4rMoZlAr7Zw1Kx44GfrDRSmjsMpevrVRFgBeo/ToiP2JOBQNLJ4kuap18esJuqq66KgUtppg2",
- "8eL709fEzmr72NLBtlLWApUu3VVEteTjU5c1VYDjMfnjx9kdJGxWrXTWFLaKZT0yLdrSW6znOoHqmBA7",
- "R+Sl1YQpr2exkxBMvilXUAR1tKwshjRh/qM1zZeoYuo8ZGmSH1/izVNlq4AP6kU3dRXw3Bm4XZU3W+Rt",
- "SoRegrxmCjAKE9bQTbTUZB1zKk6feKm7PFlzbinl6ACeoqmicCjaPXCWIfG24ShkPcQfqGCwFRIPrXh3",
- "jr2iaZ775fN6xluftqepA/zG6YhzygVnOSZZjjFEmBRmnLVpRD7quJlITdwJjRyuaNG+Jv7LYTFZxs9f",
- "hA5xQ8tt8NVsqqUO+6eGjSvmsgCt3M0GxdTXnnR2DcYVuDoZhojCe1LIiG9K1J+9sYMfSEaY7yGhqPrB",
- "fPvJqTExEPqKcVRYOLQ5NttaHkrF0MDICdNkIUC59XSTXqkPps8R5n8qYPPx6LVYsPycLXAM6w1llm1d",
- "/4ZDnXpHQOd4Z9q+MG1dVt7m545Xj530tKrcpOnKpPFyzBueRHDM/cT7AwTIbcYPR9tBbjs9ePE9NYQG",
- "a3Q+ggrf4QFhNFU6eyWxjYhgKQpbEBubFE3Nx3gEjNeMe0tY/IHIo08Cbgye10Q/lUuqLQs46k67AFom",
- "/Ngx1s+aUm87VD8nsUEJrtHPkd7GtsBo4uJoGrSMG+Vb4g+Foe6AmXhBy8YDNlIuFLkqx0QVGCPSKyAa",
- "uzjMxe1LFHcfgD1Vyadtd8zzfehLlMp+NKuLBeiMFkWsbMl3+JXgVx/rAxvI66a8RVWRHJN9drOfDqnN",
- "TZQLrurVjrl8g1tOF1TkjVBDWBXY7zBmV5ht8d9D6sU3vq8Hx7d5R9fisJS/w3i9GNdraDpTbJGNxwS+",
- "KbdHRzv1zQi97X+nlF6KRReQr6EkTdxy4R7F7rfvzcMRpgQcuBnbp6XJ2IcuvQK/+yQXTa6p7q2ET9mg",
- "ggkar5s67bvVEOmK61N8/BIxpaHK276vVg2ciizNk4HQVLuULJqSnVdQMs2FdfnsKdGHlqCUm6f18rw7",
- "5bNb606Epk0wrzoGF+vq014WSUPLzWwh7QYfagx5tU4FG/sM4Pi9X5H5ClyetkrCmonaO9F4V1YvEtpf",
- "O/WNm3Dv6PqjDuJfW/mcVJVfuMp4dplOJn/1izWmEeBabv8EivPBpg9qPQ+5XaueapuQpqjSqCJLnVdx",
- "THb8WCJ2xxt2qk3vqZU9IKuXY9iBYe3r6eSsOOjBjCXzn9hRYscuXsk6neu4zW+MR6wSirW1zWIlrkf6",
- "jF9gleogV/NwLO9LuIZcY0G71kdKAhySudlM5nX3/y/ncVqcblzrXarjXfmNh1Xs9rzxgxQkQRodWwHs",
- "aHw239PGE9YG8lxThbnvJeq4u6GvowPw5nPINVvvSfny9yXwIJ3I1OtlEJZ5kAGGNeEomDH0cK1jC9Cu",
- "jCw74Qky998anFQ48hVs7ynSoYZoSbImFusmySIRA3g7ZIZEhIp5mllFsnP+YaqhDMSC9+y03aFNu52s",
- "ZhwkMLrhXJ4kzcPRJjXaMWW8nOqouUzXg1J9YWRFKivMsBpjWv54icUvlfNzok2yyVBKJ2fDlPzXLlkl",
- "JuhpbCc+bSUo/5vPxmVnKdkVhPWW0VJ1TWXhW0RVL16rk+14jwapXHwlwT7Q82Zm1vrhD23VkSTPGNKS",
- "l8KwEVkqLqjr+t74jd1T1sGvzcOCcM1Burr0yP+WQkGmhffb3wXHLlRYL8YbIUElCytY4JLpTt+3+Vyx",
- "wAzF9KbUOS+GCyQSVtRAJ4Osq+k5dyH7hf3uY6l9gZG9GqaGXvdXuvMRGEwNkBhS/Zy413J/jPZNlE2M",
- "c5CZtzz1U7BykF1rSCVFUef2gQ4PRqOQG50CZcdVEtXT5MNV9mSEINb5CrbHVgjyJQL9DoZAW87Jgh6k",
- "7utt8p2q31QM7sWdgPc1NVfTSSVEmSWMHWfDvLF9ir9i+RUUxLwU3lM5Uf2V3Ecde2PNvl5ufZ7UqgIO",
- "xYMjQk65jQ3xhu1u4aLe5Pye3jX/BmctapvK2SnVji553MkekyzLW95mfpjdd5gCc9Xdcio7yJ6spJtE",
- "zlpJryO1kI/GSuVDU3O/Pm1LVBaKGE9ybi1WL/CgxxRHGMkepFxAQyYlztJFVCliLpk3ibY3Q8UxFU6G",
- "AGngY4K+Gyjc4FEERCuuRk6hzWDmcpeJOZHQGpFvmsRtWBw2JtH3Z25m6d53cyGhU+bV9Bay8CwPU209",
- "ZipnTEsqtzdJtTYoTjvQniSxvNcdq/HEahfSemMNcViW4jrDyyprcpvHRFvTTnUfY1/Ope1nTvUMAr8u",
- "qhyjtiVLWpBcSAl52CMetmehWgkJWSnQzStmgZ5rw3evMFaHk1IsiKhyUYCtERCnoNRcNecU2SYIvGqi",
- "KLC0g0Gftk9AxyOnvKvKyDY5j110Zm2ZCcdTUC4Zj8OQbTyEd0dV4YOy85/NUSPE0NelG3ttuc+wtjIc",
- "WFqZlaVXGKSqK5OfVY3uSBh4Y6Z4RlZCaSfZ2ZFUM1Tr4nU/F1xLUZZdJZBliRdOs/2Gbk7zXL8W4mpG",
- "86sHKEdyoZuVFlMfltp3xmtnkr2MTCPLQF8sI3penMWfuoNrPbub4+ASrQGYH/ffWPt13KexUtbddfVr",
- "s/NE7kwtViyP0/C/lndb0ictdiVEUz3ZKkk2OB+b4UUdPg6NMwNeSUM0AzcEG9svd6c5oy5eHua/yPH2",
- "xyVzcI9E4mEa3pOOa8nyJG/VAwAhtRGjupa2tFLI+TS3iljYCHM0SfcBHXmLo+fP7WAzI9w5UBpuBdTA",
- "27AB8L4V9qc2JZf1XJyJjf/+oM3ZdSPgP++m8lg5+sgpbkjLVcv3+T0SN0I8M/BO/yMsHO5f0P1eSE0Z",
- "vJEvagBA2i+pA8Mo76RDwZhTVkKRUZ143FEnNA0kWxfR0i9uypS7yXNqH+wlEDN2LcHlm7Asda8YekUN",
- "KYmm+VBzywvYgMJkELaiM1XWzuDtHVDaslI94VtUWQlr6LhruSQYNbJ2bA2+r2o6kwKgQutfXycV80MK",
- "3/KeosKtPQs8WcZgN6q5sIi1O0X2qCWiSpQNz+wxUWOPkoFozYqadvCnDmU5umo3c5QjqBrw5JmX28ZO",
- "87Md4b0f4NT3j7EyHhMfx91DB19BcdTtuoD2+iXWKnXqedwtMczw0hg0cLaiMXxaEm/vDVXRa55WAA5J",
- "vhVvRu4TEzxA7PcbyJGr6frd3R4nBAcjqpe9KcmCy2aHb65I/io0vJOEk+PFRA0FeMHu1NR4unAMOzbA",
- "cpbcsL2Ga8YSUu7+d/ffFCvw24GMXG0rWoUS3EvwFjtMKN0YKxxDy5oHzfsXTl0+wb5QzgLP6hXdEiHx",
- "HyOv/bOmJZtv8YRa8H03opbUkJAzEVrbtfNXNBPvZkymHjCvFxB+KrtuNnbMYLitGSUA2jyBTjmFmYGu",
- "INwGNMvbmyfX5spR9WzFlMLHrredQyy4xfucECtahDIyZqbrlhL1uUpN7/+/jdoKp/IJpaqS5r5+GRBF",
- "Vz2FuK1R6IlLL2G1O6xvKB57EmjqHrZEK304b3ED5d6BnhsxX/lUvYcO2IN6cINSF7daxiEFitvI6B0B",
- "kaOWcte7MNY/ZAA0Gpl9Vq894NtsjD4D2JfAfzRpZGoZY8D/s+A9UUYvhNdWzPsCWO6E/EdgtXrVmdhk",
- "EuZqnyuEVawaQVi2yQK8cpLxXAJV1jfk7K0T2dqciIwbEdJ6LzbWt2aUAuaMt5cl41WtIxIApkbk2wBh",
- "oXoa0Zow9qS4BMOGrWn5dg1SsiK1ceZ02DJeYU56r5J3fSPCf/OmDgdgqpV+MJIQ2ki1oJl5wG3VG+tY",
- "qDTlBZVF2JxxkoM07z65plt1c9uHgVbWhr/YY/2gATfTjW8P7CBI2haQcuvMl7e0TDQA0js0UYwwLaAH",
- "a8SsYJUiWiQsCUMY4mkV6CYrxQLjyxIE6JJPou3HCiuCo8LW8kOHzaPY77B7Gsy77Q6+FjjrmCl2n7O3",
- "iDoUeH7mTO88aVab1g/4sx6Z9iB4+ueL1i3cbs6Q/mMxmhcYxNCJ0+wXnfd7bd1D7HyQsGR0NbiJXUQD",
- "uQvwDdW14+sZdW3wsUhQK8NmKNuqHY7foFonZ5o7x52h0mcgFFukTF0c7YE6IatJ9u9AAjxbqdadre60",
- "jTOFGeeQIlC7I2ezSlRZPsYb0KbmL5xC20HahTFBH4G6OrHuxnFCNcUqOolNOlUrDq2Dlayasc8uU+W7",
- "hOyUQiNxg3aV5WKOdxkeYavGwRiPRnkx7UcfdRU2zSVBKJGQ1xIVmtd0u7+uUCIl7PnfTr95/OTXJ998",
- "S0wDUrAFqDatcK8uT+sxxnhfz/JlfcQGy9PxTfBx6RZx3lLmw22aTXFnzd62qs0ZOKhKdIgmNPIARI5j",
- "pB7MjfYKx2mdvv9c2xVb5J3vWAwFf8yeOc/W+AJOuZNfxJzsvjO6Nf90/L4wzH/kkfJbe4MFpvSx6bjo",
- "m9Bjq5D901BhJND7zmivWe4fQXFRLvNm5XNHgTYM+o2QBwKQiObrxGGF1bXbfJXS6nZRC+wNZv1H7E1r",
- "SNvrdo6Q+A57wAvD89p2jae0A+crJ3580yAlWMrHFCV0lr8v4s8tsLU8BlvkRF2tQdlrSQyZiyCcU71o",
- "oiQTvO0gmBJLaRv5piwjQZhW+sYzFRKOYSzlmpZf/tbAGuuniA8o3qdDL8JIvBDJFpXqZnnAXtNRcwdR",
- "d3c3NX+HgZ9/B7NH0XfODeWMjoPXDHUnWNh44V8FG0tKrnFM61Ty+FsycznZKwk5U31jprU4BV6Ba5Bs",
- "7hz4YKP3RLrtW+cvQt+CjOfe84D8FBglBCp/WgjbI/qVL5XEyY1SeYz6BmQRwV/sjgprOO55Lm6Zv/tm",
- "aSWCBFEHppUYVqccuzybOsE8OrWC4TpHv9Yd3EYe6nZtY3OijE4Dfnn5Qc/GpDKJp+w23TGXyp3k7j4o",
- "c/cfkEXF4siN4eaNUcwvqbyaNndkIoVrbz9qVu51M+gk5P08nSyAg2IKU87+6koMfNm31ENgI7uHR9XC",
- "ept0FBYxkbV2Jg+mClLtjsiy67pFcupi1FReS6a3WF7Sq2HYr9F8Lz82uQNc7onGAuLePi2uoCnx22Ya",
- "qJV/XX8UtMT3yBpmuHmFRHlEvt/QVVU6pSL5673Zf8DTvzwrHj19/B+zvzz65lEOz755/ugRff6MPn7+",
- "9DE8+cs3zx7B4/m3z2dPiifPnsyePXn27TfP86fPHs+effv8P+6Ze8iAbAH1GaBPJv8rOy0XIjt9d5Zd",
- "GGBbnNCKvQKzNygrzwWWPzNIzfEkwoqycnLif/of/oQd5WLVDu9/nbgyHpOl1pU6OT6+vr4+CrscLzC0",
- "ONOizpfHfh4sStXhV96dNT7J1nsCd7TVQeKmOlI4xW/vvz+/IKfvzo5agpmcTB4dPTp67Cqgclqxycnk",
- "Kf6Ep2eJ+37siG1y8unzdHK8BFpiJg7zxwq0ZLn/JIEWW/d/dU0XC5BH6HZuf1o/OfZsxfEnF2L9ede3",
- "49Awf/ypE4le7OmJRuXjT74O4u7WnRp4zp8n6DASil3NjmdY+2BsU1BB4/RSUNhQx5+QXU7+fux0HvGP",
- "KLbY83Ds0zXEW3aw9ElvDKx7emxYEawkpzpf1tXxJ/wPUm8AtE3ld6w3/Bjtb8efOmt1nwdr7f7edg9b",
- "rFeiAA+cmM9tfchdn48/2X+DiWBTgWSGLbTpM5ytsTl0Z8XkZPJ90OjFEvKrCdaUQs8vPE1PHj2K5DkN",
- "ehF7uOmshMKczGePno3owIUOO7mwnmHHn/kVF9ecYFY8e9PXqxWVW+SgdC25Im9fETYn0J+CKT8D3i50",
- "odDCUM9Klk+mkw56Pn52SLNZoI6xitK2xaX/ecvz6I/Dbe5kwEn8fPyp82f3rKhlrQtxHfRFWcsqCobz",
- "mY+16v99fE2ZNtyTS6eCxRSHnTXQ8tjlTu792qYrHHzBHIzBj6HjcvTXY+oQOKmEihDje3odKEhPsbFl",
- "MUDp7wTe1RNXbqWX6uN4k80YR7r4NGnryLcslv04lNEGb5WRONEi7bVUw1BojMeUgha5kf218GnIJyE/",
- "pGUNn6OHCQ/Jox1rcW/QZFw9/G7CyMiKvqMF8WGsGXlDS4MVKMipe8g7S7NH+PGXg+6MW6dKc2QtL/N5",
- "OvnmS+LnjBu2m5b+kjHTP/1y05+DXLMcyAWsKiGpZOWW/Mwbv9AbX48/IHFKml8hy9UQrHVikPS662oq",
- "42GC3Sz7PmoUiN6QJeVF6QKrRI0FOg1loVZZBNYx86z4KhOVkAiATd8Dhc27oI7I+dKrmjC21Do1Y7Gc",
- "NZSiQrUPJqWzk1COaeBxNeH13r3VjQxpDvECeOaukWwmiq2veS3ptd7YGKnBXdUUL49+7PNcsa+O50g0",
- "8l5M/nMrf4XyzOTkQyDJfPj4+aP5JtfobvHhU8Cenxwfo1vrUih9PPk8/dRj3cOPHxuE+WJDk0qyNWbT",
- "/fj5/wYAAP//423gG8XxAAA=",
+ "H4sIAAAAAAAC/+y9e3MbN7Yg/lVQvLfKjx9bkh/JnehXU3cVO8loY8cuS8nsvZY3AbsPSYyaQA+Apsh4",
+ "/d23cAB0o7sBsikpdqZq/7LFxuPg4AA47/NxkotVJThwrSanHycVlXQFGiT+RfNc1FxnrDB/FaByySrN",
+ "BJ+c+m9Eacn4YjKdMPNrRfVyMp1wuoK2jek/nUj4Z80kFJNTLWuYTlS+hBU1A+ttZVo3I22yhcjcEGd2",
+ "iPOXk087PtCikKDUEMo3vNwSxvOyLoBoSbmiufmkyA3TS6KXTBHXmTBOBAci5kQvO43JnEFZqCO/yH/W",
+ "ILfBKt3k6SV9akHMpChhCOcLsZoxDh4qaIBqNoRoQQqYY6Ml1cTMYGD1DbUgCqjMl2Qu5B5QLRAhvMDr",
+ "1eT0/UQBL0DibuXA1vjfuQT4HTJN5QL05MM0tri5Bplptoos7dxhX4KqS60ItsU1LtgaODG9jsjrWmky",
+ "A0I5eff9C/Ls2bNvzEJWVGsoHJElV9XOHq7Jdp+cTgqqwX8e0hotF0JSXmRN+3ffv8D5L9wCx7aiSkH8",
+ "sJyZL+T8ZWoBvmOEhBjXsMB96FC/6RE5FO3PM5gLCSP3xDa+100J5/+iu5JTnS8rwbiO7AvBr8R+jt5h",
+ "Qfddd1gDQKd9ZTAlzaDvT7JvPnx8Mn1y8unf3p9l/+3+/OrZp5HLf9GMuwcD0YZ5LSXwfJstJFA8LUvK",
+ "h/h45+hBLUVdFmRJ17j5dIVXvetLTF97da5pWRs6YbkUZ+VCKEIdGRUwp3WpiZ+Y1Lw015QZzVE7YYpU",
+ "UqxZAcXU3L43S5YvSU6VHQLbkRtWloYGawVFitbiq9txmD6FKDFw3QofuKA/LzLade3BBGzwNsjyUijI",
+ "tNjzPPkXh/KChA9K+1apwx4rcrkEgpObD/axRdxxQ9NluSUa97UgVBFK/NM0JWxOtqImN7g5JbvG/m41",
+ "BmsrYpCGm9N5R83hTaFvgIwI8mZClEA5Is+fuyHK+JwtagmK3CxBL92bJ0FVgisgYvYPyLXZ9v958eYn",
+ "IiR5DUrRBbyl+TUBnosCiiNyPidc6IA0HC0hDk3P1DocXLFH/h9KGJpYqUVF8+v4i16yFYus6jXdsFW9",
+ "IrxezUCaLfVPiBZEgq4lTwFkR9xDiiu6GU56KWue4/6303Z4OUNtTFUl3SLCVnTz15OpA0cRWpakAl4w",
+ "viB6w5N8nJl7P3iZFDUvRrA52uxp8LCqCnI2Z1CQZpQdkLhp9sHD+GHwtMxXAI4fJAlOM8secDhsIjRj",
+ "Trf5Qiq6gIBkjsjP7nLDr1pcA28Incy2+KmSsGaiVk2nBIw49W4OnAsNWSVhziI0duHQYS4Y28bdwCvH",
+ "A+WCa8o4FOZyRqCFBntZJWEKJtwt7wxf8RlV8PXz1Bvffh25+3PR3/WdOz5qt7FRZo9k5Ok0X92BjXNW",
+ "nf4j5MNwbsUWmf15sJFscWlemzkr8SX6h9k/j4Za4SXQQYR/mxRbcKprCadX/LH5i2TkQlNeUFmYX1b2",
+ "p9d1qdkFW5ifSvvTK7Fg+QVbJJDZwBoVuLDbyv5jxotfx3oTlSteCXFdV+GC8o7gOtuS85epTbZjHkqY",
+ "Z420GwoelxsvjBzaQ2+ajUwAmcRdRU3Da9hKMNDSfI7/bOZIT3Qufzf/VFVpeutqHkOtoWP3JKP6wKkV",
+ "zqqqZDk1SHznPpuv5hIAK0jQtsUxPqinHwMQKykqkJrZQWlVZaXIaZkpTTWO9O8S5pPTyb8dt/qXY9td",
+ "HQeTvzK9LrCTYVktG5TRqjpgjLeG9VE7LgtzQeMnvCbstYdME+N2Ew0pMXMFl7CmXB+1IkvnPmgO8Hs3",
+ "U4tvy+1YfPdEsCTCiW04A2U5YNvwgSIB6gmilSBakSFdlGLW/PDwrKpaDOL3s6qy+EDuERgyZrBhSqtH",
+ "uHzanqRwnvOXR+SHcGxkxQUvt+ZxsKyGeRvm7tVyr1ijW3JraEd8oAhup5BHZms8Ggybfx8Uh2LFUpSG",
+ "69lLK6bx31zbkMzM76M6/2uQWIjbNHGhoOUwZ2Uc/CUQbh72KGdIOE7dc0TO+n1vRzZmlDjB3IpWdu6n",
+ "HXcHHhsU3khaWQDdF/uWMo5Cmm1kYb3jbTryoovCHJzhgNYQqluftb3nIQoJkkIPhm9LkV//jarlPZz5",
+ "mR9rePxwGrIEWoAkS6qWR5MYlxEer3a0MUfMNEQBn8yCqY6aJd7X8vYsraCaBktz8MbZEot67IeXHsiI",
+ "7PIG/0NLYj6bs22ufjvsEbnEC0zZ4+yMDIWR9q2AYGcyDVALIcjKCvjESN0HQfminTy+T6P26DurU3A7",
+ "5BbR7NDlhhXqvrYJB0vtVcignr+0Ep2GlYpIbc2qqJR0G1+7nWsMAi5FRUpYQ9kHwV5ZOJpFiNjc+73w",
+ "rdjEYPpWbAZ3gtjAveyEGQf5ao/dPfC9dJAJuR/zOPYYpJsFGl5e4fXAQxbIzNJqq89mQt7uOu7ds5y0",
+ "OnhCzajBazTtIQmb1lXmzmZEj2cb9AZqzZ67b9H+8DGMdbBwoekfgAVlRr0PLHQHum8siFXFSrgH0l9G",
+ "X8EZVfDsKbn429lXT57++vSrrw1JVlIsJF2R2VaDIg+dsEqU3pbwaLgyFBfrUsdH//q519x2x42No0Qt",
+ "c1jRajiU1QhbntA2I6bdEGtdNOOqGwBH3YhgnjaLdmKNHQa0l0wZlnM1u5fNSCGsaGcpiIOkgL3EdOjy",
+ "2mm24RLlVtb3IduDlEJGn65KCi1yUWZrkIqJiHnprWtBXAvP71f93y205IYqYuZGXXjNkcOKUJbe8PH3",
+ "vh36csNb3Oy8+e16I6tz847Zly7yvWpVkQpkpjecFDCrFx3RcC7FilBSYEd8o38AbfkWtoILTVfVm/n8",
+ "fmRngQNFZFi2AmVmIraF4RoU5IJb15A94qobdQx6+ojxOkudBsBh5GLLc1S83sexTUvyK8bRCqS2PA/E",
+ "egNjCcWiQ5Z3F99T6LBTPVARcAw6XuFn1Py8hFLT74W8bNm+H6Soq3tn8vpzjl0OdYtxuqXC9PVKBcYX",
+ "ZdcdaWFgP4qt8Yss6IU/vm4NCD1S5Cu2WOpAznorhZjfP4yxWWKA4gcrpZamz1BW/UkU5jLRtboHFqwd",
+ "rL3hDN2G9xqdiVoTSrgoADe/VnHmLOHAgpZzNPjrkN/TSyt4zsBQV05rs9q6ImjOHrwXbceM5vaEZoga",
+ "lTDmNVZY28pOZ50jSgm02JIZACdi5ixmzpaHi6Roi9eevXGsYeS+6MBVSZGDUlBkTlO3FzTfzj4degee",
+ "EHAEuJmFKEHmVN4Z2Ov1XjivYZuh54giD3/8RT36AvBqoWm5B7HYJobeRu/hzKJDqMdNv4vg+pOHZEcl",
+ "EP+uEC2Qmy1BQwqFB+EkuX99iAa7eHe0rEGigfIPpXg/yd0IqAH1D6b3u0JbVwl/SCfeGg7PbBinXHjG",
+ "KjZYSZXO9l3LplFHBjcrCG7C2E2MAycYr1dUaWtUZ7xAXaB9TnAey4SZKdIAJ8UQM/IvXgIZjp2bd5Cr",
+ "WjXiiKqrSkgNRWwNHDY75voJNs1cYh6M3cg8WpBawb6RU1gKxnfIsiuxCKK6sT05r5Ph4tBCY975bRSV",
+ "HSBaROwC5MK3CrAb+oQlAGGqRbQlHKZ6lNM4ok0nSouqMreFzmre9Euh6cK2PtM/t22HxEV1+24XAhS6",
+ "orn2DvIbi1nrDbikijg4yIpeG94D1SDW+j+E2RzGTDGeQ7aL8lHEM63CI7D3kNbVQtICsgJKuh0O+rP9",
+ "TOznXQPgjrfirtCQWbeu+Ka3lOy9aHYMLXA8FWMeCX4huTmCRhRoCcT13jNyATh27HJydPSgGQrnim6R",
+ "Hw+Xbbc6MiK+hmuhzY47ekCQ3Y0+BuAEHpqhb48K7Jy1smd/iv8C5SZo+IjDJ9mCSi2hHf+gBSR0qM5j",
+ "Pjgvveu9dwNHr83kNbbnHkkd2YRC9y2VmuWsQlnnR9jeu+jXnyBqdyUFaMpKKEjwwYqBVdifWIek/pi3",
+ "EwVH6d6G4A+Ub5HllEwhy9MF/hq2KHO/tZ6ugarjPmTZyKjmfaKcIKDef86w4GET2NBcl1vDqOklbMkN",
+ "SCCqnq2Y1taDvSvqalFl4QBRu8aOGZ1VM2pT3GlmvcChguUNt2I6sTLBbvgue4JBBx1OFqiEKEdoyAbI",
+ "iEIwygGGVMLsOnPO9N6d2lNSB0h3aaNJu3n+H6gOmnEF5L9ETXLKUeSqNTQ8jZDIKCADaWYwLFgzp3N1",
+ "aTEEJazASpL45fHj/sIfP3Z7zhSZw42PQDEN++h4/Bj1OG+F0p3DdQ/6UHPcziPPBxp8zMPnpJD+nbLf",
+ "1cKNPGYn3/YGb6xE5kwp5QjXLP/OF0DvZG7GrD2kkXFuJjjuKFtOx2Q/XDfu+wVb1SXV92G1gjUtM7EG",
+ "KVkBe29yNzET/Ls1Ld803TC6BnJDozlkOcaEjBwLLk0fG0ZixmGcmQNsXUjHAgTntteF7bRHxGy99Nhq",
+ "BQWjGsotqSTkYKMnDOeomqUeEetXmS8pX6DAIEW9cI59dhy88GtlVTOy5oMhokyV3vAMldyxB8A5c/sA",
+ "GsNOATUiXV9DbgWYG9rM52KmxrzMwR70LQZRI9l0kpR4DVLXrcRrkdONAhrxGHT4vQA/7cQjTSmIOsP7",
+ "DPEVbos5TGZz/xiVfTt0DMrhxIGrYfsx5W1oxO1yew9Mjx2ISKgkKHyiQjWVsl/FPIz4c2+Y2ioNq6Em",
+ "33b9NXH83iXlRcFLxiFbCQ7baJA74/AaP0aPEz6Tic7IsKT69mWQDvw9sLrzjKHGu+IXd7t/QvsWK/W9",
+ "kPdlErUDjmbvR1gg95rb3ZS3tZPSsoyYFl08UP8CUNMm/wCThColcoY823mhpvagOWukCx7qov9t4+V8",
+ "D2evP27PhhaGmqKOGMqKUJKXDDXIgist61xfcYo6qmCpEecnL4yntZYvfJO4mjSixXRDXXGKjm+N5irq",
+ "sDGHiJrmewCvvFT1YgFK92SdOcAVd60YJzVnGudameOS2fNSgUQPpCPbckW3ZG5oQgvyO0hBZrXucv8Y",
+ "7qY0K0tn0DPTEDG/4lSTEqjS5DXjlxsczhv9/ZHloG+EvG6wEH/dF8BBMZXFnbR+sF/Rodgtf+mcizE9",
+ "gf3snTXb+NuJWWYn5P5/P/zP0/dn2X/T7PeT7Jv/7/jDx+efHj0e/Pj001//+n+6Pz379NdH//nvsZ3y",
+ "sMeCsRzk5y+dZHz+EsWf1gY0gP2z6f9XjGdRIgu9OXq0RR5i4LEjoEdd5ZhewhXXG24IaU1LVpi75Tbk",
+ "0H9hBmfRno4e1XQ2oqcM82s9UKi4wy1DIpdM72q8NRc19GuMhz2iUdJFMuJ5mdfcbqXnvm1Uj/cvE/Np",
+ "E9pqs96cEox7XFLvHOn+fPrV15NpG6/YfJ9MJ+7rhwgls2ITi0otYBOTFd0BwYPxQJGKbhXo+O2BsEdd",
+ "6axvRzjsClYzkGrJqs9/UyjNZvEbzsdKOJ3Thp9z6xhvzg+aOLfOciLmnx9uLQEKqPQylg2jw6hhq3Y3",
+ "AXpuJ5UUa+BTwo7gqK/zKYy86Jz6SqBzzMqA0qcYIw0158ASmqeKAOvhQkYpVmL00wsLcI+/undxyA0c",
+ "g6s/Z2PP9H9rQR788N0lOXYXpnpgA6Tt0EFIa0SUdlFbHYckc5vZHECWybviV/wlzFH7IPjpFS+opscz",
+ "qliujmsF8ltaUp7D0UKQUx8I9pJqesUHnFYyTVcQgkeqelaynFyHAklLnjb1ynCEq6v3tFyIq6sPA9+M",
+ "ofjgporeL3aCzDDCotaZSxyRSbihMmb7Uk3iABzZZobZNatlskVtFaQ+MYUbP37n0apS/QDi4fKrqjTL",
+ "D8hQufBYs2VEaSE9L2IYFAsN7u9Pwj0Mkt54vUqtQJHfVrR6z7j+QLKr+uTkGZBORO1v7sk3NLmtYLR2",
+ "JRng3Feq4MKtWAkbLWlW0UXMxHZ19V4DrXD3kV9eoY6jLAl260Tyesd8HKpdgMdHegMsHAdHJeLiLmwv",
+ "nyQsvgT8hFuIbQy70Rr+b7tfQWzvrberFx882KVaLzNztqOrUobE/c40uYMWhsny3hiKLVBadWmWZkDy",
+ "JeTXLv8NrCq9nXa6e4cfx2j6q4MpmxnJRuZhbg40UMyA1FVBHStO+bafJEGB1t6t+B1cw/ZStKk9DsmK",
+ "0A3SV6mDipQacJeGWMNj68bob77zKkPBvqp8rDsGPXqyOG3owvdJH2TL8t7DIY4RRSeIPIUIKiOIsMSf",
+ "QMEtFmrGuxPpx5ZnpIyZffkiWZL83U9ck1Z4cg5g4WpQ626/rwDTrIkbRWbU8O3CZQizgejBLVYruoAE",
+ "hxzaiEaGe3fsSjjIvncv+tKJef9BG7w3UZBt48ysOUopYL4YUkFhpuf252eyZkhnmcDEnw5hsxLZpMY/",
+ "0l46VHZsdTaTYQq0OAGD5C3D4cHoYiTkbJZU+eRlmOPNn+VRPMAfmFhhVzqd88BjLUjk1iTL8Xdu/5wO",
+ "pEuXVMdn0vHpc0LRckQqHMPho5N8bDsERwaogBIWduG2sSeUNslDu0EGjjfzeck4kCzm/BaoQYNnxs0B",
+ "hj9+TIjVwJPRI8TIOAAbzes4MPlJhGeTLw4BkrskFdSPjYb54G+Ih49Zd3DD8ojKXOEsYdXK/Q1Ancdk",
+ "8371/HZxGML4lJhrbk1Lc805ia8dZJDVBdnWXg4X5+DxKMXO7jCA2IfloDXZp+g2qwl5Jg90nKHbAfFM",
+ "bDIbPxrleGebmaH3qIc8RrPGDqbNn/NAkZnYoNMQPi3WI3sPLGk4PBiBhL9hCukV+6VecwvMrml3c1Mx",
+ "KlRIMk6d15BLip0YM3WCg0mRy8MgJc6tAOgpO9r80k743SukdtmT4WPevmrTNtWbDz6KHf/UEYruUgJ/",
+ "Qy1Mk8TmbZ9jieopur4v3fw9AQsZI3pzTQyNNENTkIISUCjIOkxUdh2znBrZBvDFufDdAuUFZgmifPso",
+ "cKiSsGBKQ6tE934SX0I9STE5oRDz9Op0Jedmfe+EaJ4pa0bEjp1lfvYVoEfynEmlM7RARJdgGn2vUKj+",
+ "3jSN80pdly2bypcV8bsBp72GbVawso7Tq5v3x5dm2p+aK1HVM7xvGbcOKzNMPR115NwxtfX13bngV3bB",
+ "r+i9rXfcaTBNzcTSkEt3jn+Rc9G7eXddBxECjBHHcNeSKN1xQQYBuMPbMeCbAhv/0S7t6+AwFX7svV47",
+ "Pgw49UbZkaJrCRQGO1fB0Exk2BKmg8zNw8jYxBmgVcWKTU8XakdNSsz0IIWHz3fXwwLurhtsDwa6fnlR",
+ "N+dOrkDn/ed0PsfIIB8bFs66AzpfN5Ao5diY0KKWqFTrONsNE1M2jN3Itf/4y4UWki7AKUYzC9KdhsDl",
+ "HIKGIO2jIppZC2fB5nMIFYLqNsqsDnB9tU+0uMMIIotrDWvG9dfPY2S0h3paGPejLE4xEVpImYkuh4pX",
+ "z1YFcmdTuSTYmltoT6MRpD/CNvvFSCikokyq1mPMaUK7998Bu75e/QhbHHmvI5YBbM+uoJj6DpAGY2rB",
+ "5pMNnGhEoDCHKSZ96GzhATt1Ft+le9oal3U2TfytW3YnK2t3KXc5GK3dzsAyZjcu4uYyc3qgi/g+Ke/b",
+ "BJZQxoXkGLBc4VRM+Ro9w6eoCY/eR7uXQEtPvLicyafp5G7Gqdhr5kbcg+u3zQMaxTM6P1ljRcfWfCDK",
+ "aVVJsaZl5kx4qcdfirV7/LG5t/h9ZmYyTtmX3529euvA/zSd5CVQmTXCWHJV2K76l1mVzVO7+ylBjsVr",
+ "RaywHmx+k1wzNPvdLMEVUwjk/UHW59akGxxFZwacx30w9959zvpsl7jDCg1VY4RuDSTWBt21O9M1ZaW3",
+ "THhoE/6SuLhxqcOjt0I4wJ3t14EbQnav183gdMdPR0tde+4knOsNZkuLSxzc5VLDq8jZo+m9c0/fC9m5",
+ "/F2wTNSe/cexVYbJtnhMuA/6Aj19ZuqIWMbrt8Vv5jQ+fhwetcePp+S30n0IAMTfZ+53lC8eP46aGqKa",
+ "BHNJoKKA0xU8ahx/kxvxedVOHG7GPdBn61XDWYo0GTYUag3THt03Dns3kjl8Fu6XAkowP+2PrettukV3",
+ "CMyYE3SRCo5p/J5WtiaQIoL33fwwLsuQFl72K4pZz63lZniEeL1Ca0emSpbH7cB8psz1yq1/j2lMsHFC",
+ "YWZGrFnCXYzXLBjLNBuTxq8HZDBHFJkqmkmwxd1MuONdc/bPGggrjFQzZyDxXes9dV44wFEHDKkRPYdz",
+ "uYGtF0E7/F30IGHG/z7PiEDsVoKE3kQDcF82an2/0MZq1spMhzolhjMOLu4dDoWOPhw12wCLZdcraJwc",
+ "M6Y2pL/oXOmBxBzRWo9MZXMpfoe4LhpV+JHYbF/jgKEn7u8QimdhhbPOldJYoNqSle3s+7Z7vGyc2vg7",
+ "y8J+0U1Zhds8pvFTfdhG3kboVfEMog7JKSEsNEd2vVUTVwser8A/CzPae1cFyu15soHJnaCH+KkMw4uO",
+ "7fjtqXQwD0KySnozo7F0/0YWMjAF29txqtCC+M5+A1QTdmtnJ4FTYdOW2eRGFcg2N8UwUeIt5Ro77WiJ",
+ "phVgkKJC0WVqHcFKJSLD1PyGclsm0fSz95XrrcBaQU2vGyExNZmK+38UkLNVVB17dfW+yIe2/oItmK0A",
+ "WCsISsy5gWx1VUtFrkxfE0zuUHM+JyfToM6l242CrZlisxKwxRPbYkYVPpeNRbLpYpYHXC8VNn86ovmy",
+ "5oWEQi+VRawSpJE9kclrvJhmoG8AODnBdk++IQ/Rf0uxNTwyWHRM0OT0yTdofbd/nMReWVfBcdeVXeCd",
+ "/Xd3Z8fpGB3Y7BjmknSjHkWzONkSzunXYcdpsl3HnCVs6R6U/WdpRTldQNxleLUHJtsXdxMtqj28cGsN",
+ "AKWl2BKm4/ODpuZ+SoQhmuvPgkFysVoxvXJePkqsDD219ePspH44W8zUlf7wcPmP6CxXeV+hnq7rM4sx",
+ "dJUII0CXxp/oCrponRJq89GVrHVj9QWJyLlPd4m1UJoSKBY3Zi6zdOQl0at1TirJuEb9R63n2V+MWCxp",
+ "bq6/oxS42ezr55GaIt20+/wwwD873iUokOs46mWC7D3P4vqSh1zwbGVulOJRG/YbnMqkV1/cfyvlRLZ7",
+ "6LGcrxklS5Jb3SE3GtzUdyI8vmPAO5Jis56D6PHglX12yqxlnDxobXbo53evHJexEjKWw7o97o7jkKAl",
+ "gzUGccQ3yYx5x72Q5ahduAv0X9YFxbOcAVvmz3JUEAgsmrviNw0X/8vrNhkvGlZtcExPByhkRNvp9Haf",
+ "2eHrMK1b335rfXbwWwJzo9FmK70PsJJw1bW+uE2fzxzOG1X32j3vKByf/EakkcGRj3/8GIF+/Hjq2ODf",
+ "nnY/2+v98eN4Tsyoys382mLhLhIx9o3t4bciogDzBagahyIXshtRQKYeKfPBXIIzN9SUdIv9fH4u4n6C",
+ "QeIOf/FTcHX1Hr94POAffUR84csSN7B1aU4f9m6xsyjJFM33wNWYkm/FZizh9N4gTzx/AhQlUDJSPYcr",
+ "GRRzi5rr9/qLBDRqRp1BKYyQGdapCPX5/zp4Nouf7sB2zcrilzbdUO8hkZTny6ij5sx0/LUtut4s0V6V",
+ "0dT3S8o5lNHhrGz7q5eBI1L6P8TYeVaMj2zbLyZol9tbXAt4F0wPlJ/QoJfp0kwQYrWbyaWJFC4XoiA4",
+ "T5tnvb0ch1U5g1Jh/6xB6djRwA82WgmNXebytZWqCPACtV9H5AfMqWBg6STRRa2TT0/YTdVVV6WgxRTT",
+ "Jl5+d/aK2FltH1s62FbKWqDSpbuKqJZ8fOqypgpwPCZ//Di7g4TNqpXOmsJWsaxHpkVbeov1XCdQHRNi",
+ "54i8tJow5fUsdhKCyTflCoqgjpaVxZAmzH+0pvkSVUydhyxN8uNLvHmqbBXwQb3opq4CnjsDt6vyZou8",
+ "TYnQS5A3TAFGYcIauomWmqxjTsXpEy91lydrzi2lHB3AUzRVFA5FuwfOMiTeNhyFrIf4AxUMtkLioRXv",
+ "LrBXNM1zv3xez3jr0/Y0dYBfOx1xTrngLMckyzGGCJPCjLM2jchHHTcTqYk7oZHDFS3a18R/OSwmy/j5",
+ "i9Ahbmi5Db6aTbXUYf/UsHHFXBaglbvZoJj62pPOrsG4AlcnwxBReE8KGfFNifqzN3bwA8kI8z0kFFXf",
+ "m28/OTUmBkJfM44KC4c2x2Zby0OpGBoYOWGaLAQot55u0iv13vQ5wvxPBWw+HL0SC5ZfsAWOYb2hzLKt",
+ "699wqDPvCOgc70zbF6aty8rb/Nzx6rGTnlWVmzRdmTRejnnDkwiOuZ94f4AAuc344Wg7yG2nBy++p4bQ",
+ "YI3OR1DhOzwgjKZKZ68kthERLEVhC2Jjk6Kp+RiPgPGKcW8Jiz8QefRJwI3B85rop3JJtWUBR91pl0DL",
+ "hB87xvpZU+pdh+rnJDYowTX6OdLb2BYYTVwcTYOWcaN8S/yhMNQdMBMvaNl4wEbKhSJX5ZioAmNEegVE",
+ "YxeHubh9ieLuA7CnKvm07Y55vg99iVLZj2Z1sQCd0aKIlS35Fr8S/OpjfWADed2Ut6gqkmOyz2720yG1",
+ "uYlywVW92jGXb3DH6YKKvBFqCKsC+x3G7AqzLf57SL34xvf14Pg27+haHJbydxivF+N6DU1nii2y8ZjA",
+ "N+Xu6Ginvh2ht/3vldJLsegC8iWUpIlbLtyj2P32nXk4wpSAAzdj+7Q0GfvQpVfgd5/kosk11b2V8Ckb",
+ "VDBB43VTp323GiJdcX2Kj18ipjRUedv31aqBU5GleTIQmmqXkkVTsvMKSqa5sC6fPSX60BKUcvO0Xp73",
+ "p3x2a92J0LQJ5seOwcW6+rSXRdLQcjtbSLvBhxpDflyngo19BnD83q/IfA0uT1slYc1E7Z1ovCurFwnt",
+ "r536xk24d3T9UQfxL618TqrKL11lPLtMJ5P/+Is1phHgWm7/BIrzwaYPaj0PuV2rnmqbkKao0qgiS51X",
+ "cUx2/FgidscbdqpN76mVPSCrl2PYgWHt6+nkvDjowYwl85/YUWLHLl7JOp3ruM1vjEesEoq1tc1iJa5H",
+ "+oxfYpXqIFfzcCzvS7iGXGNBu9ZHSgIckrnZTOZ19/8v53FanG5c612q4135jYdV7Pa88YMUJEEaHVsB",
+ "7Gh8Nt+zxhPWBvLcUIW57yXquLuhr6MD8OZzyDVb70n58vcl8CCdyNTrZRCWeZABhjXhKJgx9HCtYwvQ",
+ "rowsO+EJMvffGZxUOPI1bB8o0qGGaEmyJhbrNskiEQN4O2SGRISKeZpZRbJz/mGqoQzEgvfstN2hTbud",
+ "rGYcJDC65VyeJM3D0SY12jFlvJzqqLlM14NSfWFkRSorzLAaY1r+eInFL5Xzc6JNsslQSifnw5T8Ny5Z",
+ "JSboaWwnPm0lKP+bz8ZlZynZNYT1ltFSdUNl4VtEVS9eq5PteI8GqVx8JcE+0PNmZtb64Q9t1ZEkzxjS",
+ "kpfCsBFZKi6o6/re+I09UNbBr83DgnDNQbq69Mj/lkJBpoX3298Fxy5UWC/GWyFBJQsrWOCS6U7ftflc",
+ "scAMxfSm1DkvhgskElbUQCeDrKvpOXch+4X97mOpfYGRvRqmhl73V7rzERhMDZAYUv2cuNdyf4z2bZRN",
+ "jHOQmbc89VOwcpBda0glRVHn9oEOD0ajkBudAmXHVRLV0+TDVfZkhCDW+Rq2x1YI8iUC/Q6GQFvOyYIe",
+ "pO7rbfK9qt9UDO7FvYD3JTVX00klRJkljB3nw7yxfYq/Zvk1FMS8FN5TOVH9lTxEHXtjzb5Zbn2e1KoC",
+ "DsWjI0LOuI0N8YbtbuGi3uT8gd41/wZnLWqbytkp1Y6ueNzJHpMsyzveZn6Y3XeYAnPV3XEqO8ierKSb",
+ "RM5aSW8itZCPxkrlQ1Nzvz5tS1QWihhPcmEtVi/woMcURxjJHqRcQEMmJc7SRVQpYi6Zt4m2N0PFMRVO",
+ "hgBp4GOCvhso3OBRBEQrrkZOoc1g5nKXiTmR0BqRb5vEbVgcNibR92duZuned3MhoVPm1fQWsvAsD1Nt",
+ "PWYqZ0xLKre3SbU2KE470J4ksbzXHavxxGoX0npjDXFYluImw8sqa3Kbx0Rb0051H2NfzqXtZ071DAK/",
+ "Lqoco7YlS1qQXEgJedgjHrZnoVoJCVkp0M0rZoGea8N3rzBWh5NSLIioclGArREQp6DUXDXnFNkmCLxq",
+ "oiiwtINBn7ZPQMcjp7yvysg2OY9ddGZtmQnHU1AuGY/DkG08hHdHVeGDsvOfz1EjxNDXpRt7bbnPsLYy",
+ "HFhamZWlVxikqiuTn1WN7kgYeGOmeE5WQmkn2dmRVDNU6+L1MBdcS1GWXSWQZYkXTrP9mm7O8ly/EuJ6",
+ "RvPrRyhHcqGblRZTH5bad8ZrZ5K9jEwjy0BfLiN6XpzFn7qDaz27m+PgEq0BmB/231j7ddxnsVLW3XX1",
+ "a7PzRO5MLVYsj9Pwv5Z3W9InLXYlRFM92SpJNjgfm+FFHT4OjTMDXklDNAM3BBvbL3enOaMuXh7mv8jx",
+ "9sclc3CPROJhGt6TjmvJ8iRv1QMAIbURo7qWtrRSyPk0t4pY2AhzNEn3AR15i6Pnz91gMyPcO1Aa7gTU",
+ "wNuwAfChFfanNiWX9VyciY3//qjN2XUr4D/tpvJYOfrIKW5Iy1XL9/k9EjdCPDPwTv8jLBzuX9D9XkhN",
+ "GbyRL2oAQNovqQPDKO+kQ8GYU1ZCkVGdeNxRJzQNJFsX0dIvbsqUu8lzah/sJRAzdi3B5ZuwLHWvGHpF",
+ "DSmJpvlQc8sL2IDCZBC2ojNV1s7g7R1Q2rJSPeFbVFkJa+i4a7kkGDWydmwNvq9qOpMCoELrX18nFfND",
+ "Ct/ynqLCrT0LPFnGYDequbCItTtF9qglokqUDc/sMVFjj5KBaM2Kmnbwpw5lObpqN3OUI6ga8OSZl9vG",
+ "TvOzHeGdH+DM94+xMh4TH8bdQwdfQXHU7bqA9vol1ip16nncLTHM8NIYNHC2ojF8WhJv7w1V0RueVgAO",
+ "Sb4Vb0buExM8QOx3G8iRq+n63d0dJwQHI6qXvSnJgstmh2+vSP4iNLyThJPjxUQNBXjB7tTUeLpwDDs2",
+ "wHKW3LC9hmvGElLu/nf33xQr8NuBjFxtK1qFEtxL8BY7TCjdGCscQ8uaB837F05dPsG+UM4Cz+oV3RIh",
+ "8R8jr/2zpiWbb/GEWvB9N6KW1JCQMxFa27XzVzQT72ZMph4wrxcQfiq7bjZ2zGC4rRklANo8gU45hZmB",
+ "riHcBjTL25sn1+bKUfVsxZTCx663nUMsuMX7nBArWoQyMmam65YS9blKTe//v43aCqfyCaWqkua+fhkQ",
+ "RVc9hbitUeiJSy9htTusbygeexJo6h62RCt9OG9xC+XegZ4bMV/5VL2HDtiDenCDUhd3WsYhBYrbyOgd",
+ "AZGjlnLfuzDWP2QANBqZfVavPeDbbIw+A9jnwH80aWRqGWPA/7PgPVFGL4TXVsz7DFjuhPxHYLV61ZnY",
+ "ZBLmap8rhFWsGkFYtskCvHKS8VwCVdY35PyNE9nanIiMGxHSei821rdmlALmjLeXJeNVrSMSAKZG5NsA",
+ "YaF6GtGaMPakuATDhq1p+WYNUrIitXHmdNgyXmFOeq+Sd30jwn/zpg4HYKqVfjCSENpItaCZecBt1Rvr",
+ "WKg05QWVRdiccZKDNO8+uaFbdXvbh4FW1oa/2GP9oAE3041vD+wgSNoWkHLrzJd3tEw0ANJ7NFGMMC2g",
+ "B2vErGCVIlokLAlDGOJpFegmK8UC48sSBOiST6LtxworgqPC1vJDh82j2O+wexrMu+0OvhY465gpdp+z",
+ "N4g6FHh+5kzvPGlWm9YP+LMemfYgePrni9Yt3G7OkP5jMZqXGMTQidPsF533e23dQ+x8kLBkdDW4iV1E",
+ "A7kL8A3VtePrGXVt8LFIUCvDZijbqh2O36BaJ2eaO8edodJnIBRbpExdHO2BOiGrSfbvQAI8W6nWna3u",
+ "tI0zhRnnkCJQuyNns0pUWT7GG9Cm5i+cQttB2oUxQR+Bujqx7sZxQjXFKjqJTTpVKw6tg5WsmrHPLlPl",
+ "u4TslEIjcYN2leVijncZHmGrxsEYj0Z5Me1HH3UVNs0lQSiRkNcSFZo3dLu/rlAiJezF386+evL016df",
+ "fU1MA1KwBag2rXCvLk/rMcZ4X8/yeX3EBsvT8U3wcekWcd5S5sNtmk1xZ83etqrNGTioSnSIJjTyAESO",
+ "Y6QezK32Csdpnb7/XNsVW+S971gMBX/MnjnP1vgCzriTX8Sc7L4zujX/dPy+MMx/5JHyW3uLBab0sem4",
+ "6NvQY6uQ/dNQYSTQ+95or1nuH0FxUS7zduVzR4E2DPqNkAcCkIjm68RhhdW123yV0up2UQvsDWb9R+x1",
+ "a0jb63aOkPgOe8ALw/Pado2ntAPnCyd+fN0gJVjKhxQldJa/L+LPLbC1PAZb5ERdrUHZa0kMmYsgnFO9",
+ "aKIkE7ztIJgSS2kb+aYsI0GYVvrGMxUSjmEs5ZqWn//WwBrrZ4gPKN6lQy/CSLwQyRaV6nZ5wF7RUXMH",
+ "UXf3NzV/i4GffwezR9F3zg3ljI6D1wx1J1jYeOFfBRtLSm5wTOtU8uRrMnM52SsJOVN9Y6a1OAVegWuQ",
+ "bO4c+GCj90S67VvnL0LfgYzn3vOA/BQYJQQqf1oI2yP6hS+VxMmNUnmM+gZkEcFf7I4KazjueS7umL/7",
+ "dmklggRRB6aVGFanHLs8mzrBPDq1guE6R7/WHdxGHup2bWNzooxOA3519V7PxqQyiafsNt0xl8q95O4+",
+ "KHP3H5BFxeLIjeHmjVHML6m8mjZ3ZCKFa28/albudTPoJOT9NJ0sgINiClPO/upKDHzet9RDYCO7h0fV",
+ "wnqXdBQWMZG1diYPpgpS7Y7Isuu6RXLqYtRUXkumt1he0qth2K/RfC8/NLkDXO6JxgLi3j4trqEp8dtm",
+ "GqiVf11/ELTE98gaZrh5hUR5RL7b0FVVOqUi+euD2X/As788L06ePfmP2V9OvjrJ4flX35yc0G+e0yff",
+ "PHsCT//y1fMTeDL/+pvZ0+Lp86ez50+ff/3VN/mz509mz7/+5j8emHvIgGwB9RmgTyf/KzsrFyI7e3ue",
+ "XRpgW5zQiv0IZm9QVp4LLH9mkJrjSYQVZeXk1P/0P/wJO8rFqh3e/zpxZTwmS60rdXp8fHNzcxR2OV5g",
+ "aHGmRZ0vj/08WJSqw6+8PW98kq33BO5oq4PETXWkcIbf3n13cUnO3p4ftQQzOZ2cHJ0cPXEVUDmt2OR0",
+ "8gx/wtOzxH0/dsQ2Of34aTo5XgItMROH+WMFWrLcf5JAi637v7qhiwXII3Q7tz+tnx57tuL4owux/rTr",
+ "23FomD/+2IlEL/b0RKPy8UdfB3F3604NPOfPE3QYCcWuZsczrH0wtimooHF6KShsqOOPyC4nfz92Oo/4",
+ "RxRb7Hk49uka4i07WPqoNwbWPT02rAhWklOdL+vq+CP+B6k3ANqm8jvWG36M9rfjj521us+DtXZ/b7uH",
+ "LdYrUYAHTszntj7krs/HH+2/wUSwqUAywxba9BnO1tgcuvNicjr5Lmj0Ygn59QRrSqHnF56mpycnkTyn",
+ "QS9iDzedlVCYk/n85PmIDlzosJML6xl2/Jlfc3HDCWbFszd9vVpRuUUOSteSK/LmR8LmBPpTMOVnwNuF",
+ "LhRaGOpZyfLJdNJBz4dPDmk2C9QxVlHatrj0P295Hv1xuM2dDDiJn4/92xK7XrotP3b+7J4qtax1IW6C",
+ "WVAqsyqFIWTmY636fx/fUKYNn+USr2DZxWFnDbQ8dlmWe7+2iQ0HXzBbY/Bj6OIc/fWYOlRPKqEiZPuO",
+ "3gSq1DNsbJkRUPpbgbf6xBVm6SUFOd5kM8aRgj5O2orzLTNmPw6lucGrZmRTtF17fdYwaBojN6WgRU4V",
+ "lvtzCcsnIeekZQ2foscOj9PJjrW412oyrnJ+N7VkZEXf0oL4gNeMvKalwQoU5Mw9+Z2l2cP+5PNBd86t",
+ "+6U53Jbr+TSdfPU58XPODYNOS38dmemffb7pL0CuWQ7kElaVkFSyckt+5o0H6a0v0u+ROCXNr5E5awjW",
+ "ujtIetN1SpXxgMJuPn4fXwpEb8iS8qJ0IViixlKehrJQ/ywCO5p5gHw9ikpIBMAm+oHCZmhQR+Ri6ZVS",
+ "GIVq3Z+xrM4aSlGhggjT19lJKMeE8bia8CHo3v9G2jSHeAE8c9dINhPF1lfHlvRGb2w01eCuasqcRz/2",
+ "ubPYV8edJBp5fyf/uZXUQslncvo+kHnef/j0wXyTa3TMeP8xYORPj4/RAXYplD6efJp+7DH54ccPDcJ8",
+ "WaJJJdka8+4i0oRkC8ZpmTkGui1dMXl6dDL59H8DAAD//y3ahYgS8gAA",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/model/types.go b/daemon/algod/api/server/v2/generated/model/types.go
index 4be4676f9..c30caa6c9 100644
--- a/daemon/algod/api/server/v2/generated/model/types.go
+++ b/daemon/algod/api/server/v2/generated/model/types.go
@@ -1485,6 +1485,18 @@ type GetTransactionGroupLedgerStateDeltasForRoundParams struct {
// GetTransactionGroupLedgerStateDeltasForRoundParamsFormat defines parameters for GetTransactionGroupLedgerStateDeltasForRound.
type GetTransactionGroupLedgerStateDeltasForRoundParamsFormat string
+// GenerateParticipationKeysParams defines parameters for GenerateParticipationKeys.
+type GenerateParticipationKeysParams struct {
+ // Dilution Key dilution for two-level participation keys (defaults to sqrt of validity window).
+ Dilution *uint64 `form:"dilution,omitempty" json:"dilution,omitempty"`
+
+ // First First round for participation key.
+ First uint64 `form:"first" json:"first"`
+
+ // Last Last round for participation key.
+ Last uint64 `form:"last" json:"last"`
+}
+
// ShutdownNodeParams defines parameters for ShutdownNode.
type ShutdownNodeParams struct {
Timeout *uint64 `form:"timeout,omitempty" json:"timeout,omitempty"`
diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go
index 04a123651..0e0d78aa4 100644
--- a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go
@@ -139,212 +139,213 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9/XMbN7Lgv4Livip/HEfyZ3atq613ip1kdXESl6Vk7z3Ll4AzTRKrITABMBIZn//3",
- "KzSAGcwMQA4lxk7qvZ9scfDRaDQa/YXuD5NcrCrBgWs1OfkwqaikK9Ag8S+a56LmOmOF+asAlUtWaSb4",
- "5MR/I0pLxheT6YSZXyuql5PphNMVtG1M/+lEwq81k1BMTrSsYTpR+RJW1AysN5Vp3Yy0zhYic0Oc2iHO",
- "Xk0+bvlAi0KCUkMof+DlhjCel3UBREvKFc3NJ0VumF4SvWSKuM6EcSI4EDEnetlpTOYMykId+UX+WoPc",
- "BKt0k6eX9LEFMZOihCGcL8Vqxjh4qKABqtkQogUpYI6NllQTM4OB1TfUgiigMl+SuZA7QLVAhPACr1eT",
- "k3cTBbwAibuVA7vG/84lwG+QaSoXoCfvp7HFzTXITLNVZGlnDvsSVF1qRbAtrnHBroET0+uIfFcrTWZA",
- "KCdvv35Jnj59+sIsZEW1hsIRWXJV7ezhmmz3ycmkoBr85yGt0XIhJOVF1rR/+/VLnP/cLXBsK6oUxA/L",
- "qflCzl6lFuA7RkiIcQ0L3IcO9ZsekUPR/jyDuZAwck9s44NuSjj/Z92VnOp8WQnGdWRfCH4l9nOUhwXd",
- "t/GwBoBO+8pgSppB3z3KXrz/8Hj6+NHHv7w7zf7T/fn86ceRy3/ZjLsDA9GGeS0l8HyTLSRQPC1Lyof4",
- "eOvoQS1FXRZkSa9x8+kKWb3rS0xfyzqvaVkbOmG5FKflQihCHRkVMKd1qYmfmNS8NGzKjOaonTBFKimu",
- "WQHF1HDfmyXLlySnyg6B7cgNK0tDg7WCIkVr8dVtOUwfQ5QYuG6FD1zQHxcZ7bp2YALWyA2yvBQKMi12",
- "XE/+xqG8IOGF0t5Var/LilwsgeDk5oO9bBF33NB0WW6Ixn0tCFWEEn81TQmbk42oyQ1uTsmusL9bjcHa",
- "ihik4eZ07lFzeFPoGyAjgryZECVQjsjz526IMj5ni1qCIjdL0Et350lQleAKiJj9C3Jttv1/n//wPRGS",
- "fAdK0QW8ofkVAZ6LAoojcjYnXOiANBwtIQ5Nz9Q6HFyxS/5fShiaWKlFRfOr+I1eshWLrOo7umarekV4",
- "vZqBNFvqrxAtiARdS54CyI64gxRXdD2c9ELWPMf9b6ftyHKG2piqSrpBhK3o+u+Ppg4cRWhZkgp4wfiC",
- "6DVPynFm7t3gZVLUvBgh5mizp8HFqirI2ZxBQZpRtkDiptkFD+P7wdMKXwE4fpAkOM0sO8DhsI7QjDnd",
- "5gup6AICkjkiPzrmhl+1uALeEDqZbfBTJeGaiVo1nRIw4tTbJXAuNGSVhDmL0Ni5Q4dhMLaN48ArJwPl",
- "gmvKOBSGOSPQQoNlVkmYggm36zvDW3xGFXzxLHXHt19H7v5c9Hd9646P2m1slNkjGbk6zVd3YOOSVaf/",
- "CP0wnFuxRWZ/HmwkW1yY22bOSryJ/mX2z6OhVsgEOojwd5NiC051LeHkkj80f5GMnGvKCyoL88vK/vRd",
- "XWp2zhbmp9L+9FosWH7OFglkNrBGFS7strL/mPHi7Fivo3rFayGu6ipcUN5RXGcbcvYqtcl2zH0J87TR",
- "dkPF42LtlZF9e+h1s5EJIJO4q6hpeAUbCQZams/xn/Uc6YnO5W/mn6oqTW9dzWOoNXTsrmQ0HzizwmlV",
- "lSynBolv3Wfz1TABsIoEbVsc44V68iEAsZKiAqmZHZRWVVaKnJaZ0lTjSP8mYT45mfzluLW/HNvu6jiY",
- "/LXpdY6djMhqxaCMVtUeY7wxoo/awiwMg8ZPyCYs20OhiXG7iYaUmGHBJVxTro9alaXDD5oD/M7N1OLb",
- "SjsW3z0VLIlwYhvOQFkJ2Da8p0iAeoJoJYhWFEgXpZg1P9w/raoWg/j9tKosPlB6BIaCGayZ0uoBLp+2",
- "Jymc5+zVEfkmHBtFccHLjbkcrKhh7oa5u7XcLdbYltwa2hHvKYLbKeSR2RqPBiPmH4LiUK1YitJIPTtp",
- "xTT+h2sbkpn5fVTnPweJhbhNExcqWg5zVsfBXwLl5n6PcoaE48w9R+S03/d2ZGNGiRPMrWhl637acbfg",
- "sUHhjaSVBdB9sXcp46ik2UYW1jty05GMLgpzcIYDWkOobn3Wdp6HKCRICj0YvixFfvUPqpYHOPMzP9bw",
- "+OE0ZAm0AEmWVC2PJjEpIzxe7WhjjphpiAo+mQVTHTVLPNTydiytoJoGS3PwxsUSi3rsh0wPZER3+QH/",
- "Q0tiPpuzbVi/HfaIXCADU/Y4OydDYbR9qyDYmUwDtEIIsrIKPjFa915Qvmwnj+/TqD36ytoU3A65RTQ7",
- "dLFmhTrUNuFgqb0KBdSzV1aj07BSEa2tWRWVkm7ia7dzjUHAhahICddQ9kGwLAtHswgR64PzhS/FOgbT",
- "l2I94AliDQfZCTMOytUeuzvge+UgE3I35nHsMUg3CzSyvEL2wEMRyMzSWqtPZ0Lejh33+CwnrQ2eUDNq",
- "cBtNe0jCpnWVubMZsePZBr2BWrfndi7aHz6GsQ4WzjX9HbCgzKiHwEJ3oENjQawqVsIBSH8ZvQVnVMHT",
- "J+T8H6fPHz/5+cnzLwxJVlIsJF2R2UaDIvedskqU3pTwYLgyVBfrUsdH/+KZt9x2x42No0Qtc1jRajiU",
- "tQhbmdA2I6bdEGtdNOOqGwBHcUQwV5tFO7HODgPaK6aMyLmaHWQzUggr2lkK4iApYCcx7bu8dppNuES5",
- "kfUhdHuQUsjo1VVJoUUuyuwapGIi4l5641oQ18LL+1X/dwstuaGKmLnRFl5zlLAilKXXfDzft0NfrHmL",
- "m62c3643sjo375h96SLfm1YVqUBmes1JAbN60VEN51KsCCUFdsQ7+hvQVm5hKzjXdFX9MJ8fRncWOFBE",
- "h2UrUGYmYlsYqUFBLrgNDdmhrrpRx6Cnjxhvs9RpABxGzjc8R8PrIY5tWpNfMY5eILXheaDWGxhLKBYd",
- "sry7+p5Ch53qnoqAY9DxGj+j5ecVlJp+LeRFK/Z9I0VdHVzI6885djnULcbZlgrT1xsVGF+U3XCkhYH9",
- "KLbGz7Kgl/74ujUg9EiRr9liqQM9640UYn54GGOzxADFD1ZLLU2foa76vSgMM9G1OoAI1g7WcjhDtyFf",
- "ozNRa0IJFwXg5tcqLpwlAljQc44Ofx3Ke3ppFc8ZGOrKaW1WW1cE3dmD+6LtmNHcntAMUaMSzrzGC2tb",
- "2elscEQpgRYbMgPgRMycx8z58nCRFH3x2os3TjSM8IsOXJUUOSgFReYsdTtB8+3s1aG34AkBR4CbWYgS",
- "ZE7lnYG9ut4J5xVsMowcUeT+tz+pB58BXi00LXcgFtvE0NvYPZxbdAj1uOm3EVx/8pDsqATi7xWiBUqz",
- "JWhIoXAvnCT3rw/RYBfvjpZrkOig/F0p3k9yNwJqQP2d6f2u0NZVIh7SqbdGwjMbxikXXrCKDVZSpbNd",
- "bNk06ujgZgUBJ4xxYhw4IXi9pkpbpzrjBdoC7XWC81ghzEyRBjiphpiRf/IayHDs3NyDXNWqUUdUXVVC",
- "aihia+Cw3jLX97Bu5hLzYOxG59GC1Ap2jZzCUjC+Q5ZdiUUQ1Y3vyUWdDBeHHhpzz2+iqOwA0SJiGyDn",
- "vlWA3TAmLAEIUy2iLeEw1aOcJhBtOlFaVJXhFjqredMvhaZz2/pU/9i2HRIX1e29XQhQGIrm2jvIbyxm",
- "bTTgkiri4CAremVkDzSDWO//EGZzGDPFeA7ZNspHFc+0Co/AzkNaVwtJC8gKKOlmOOiP9jOxn7cNgDve",
- "qrtCQ2bDuuKb3lKyj6LZMrTA8VRMeCT4heTmCBpVoCUQ13vHyAXg2DHm5OjoXjMUzhXdIj8eLttudWRE",
- "vA2vhTY77ugBQXYcfQzACTw0Q98eFdg5a3XP/hT/AcpN0MgR+0+yAZVaQjv+XgtI2FBdxHxwXnrsvceB",
- "o2wzycZ28JHUkU0YdN9QqVnOKtR1voXNwVW//gRRvyspQFNWQkGCD1YNrML+xAYk9ce8nSo4yvY2BH9g",
- "fIssp2QKRZ4u8FewQZ37jY10DUwdh9BlI6Oa+4lygoD6+DkjgodNYE1zXW6MoKaXsCE3IIGoerZiWtsI",
- "9q6qq0WVhQNE/RpbZnRezahPcaub9RyHCpY33IrpxOoE2+G76CkGHXQ4XaASohxhIRsgIwrBqAAYUgmz",
- "68wF0/twak9JHSAd00aXdnP931MdNOMKyH+ImuSUo8pVa2hkGiFRUEAB0sxgRLBmThfq0mIISliB1STx",
- "y8OH/YU/fOj2nCkyhxv/AsU07KPj4UO047wRSncO1wHsoea4nUWuD3T4mIvPaSF9nrI71MKNPGYn3/QG",
- "b7xE5kwp5QjXLP/ODKB3Mtdj1h7SyLgwExx3lC+n47Ifrhv3/Zyt6pLqQ3it4JqWmbgGKVkBOzm5m5gJ",
- "/tU1LX9ouuHrGsgNjeaQ5fgmZORYcGH62GckZhzGmTnANoR0LEBwZnud2047VMw2So+tVlAwqqHckEpC",
- "Dvb1hJEcVbPUI2LjKvMl5QtUGKSoFy6wz46DDL9W1jQjaz4YIipU6TXP0MgduwBcMLd/QGPEKaBGpetb",
- "yK0Cc0Ob+dybqTE3c7AHfY9B1Ek2nSQ1XoPU61bjtcjpvgIacRl05L0AP+3EI10piDoj+wzxFW6LOUxm",
- "c38fk307dAzK4cRBqGH7MRVtaNTtcnMAoccORCRUEhReUaGZStmvYh6++HN3mNooDauhJd92/Tlx/N4m",
- "9UXBS8YhWwkOm+gjd8bhO/wYPU54TSY6o8CS6tvXQTrw98DqzjOGGu+KX9zt/gnte6zU10IeyiVqBxwt",
- "3o/wQO50t7spb+snpWUZcS2690B9BqCmTf4BJglVSuQMZbazQk3tQXPeSPd4qIv+N02U8wHOXn/cng8t",
- "fGqKNmIoK0JJXjK0IAuutKxzfckp2qiCpUaCn7wynrZavvRN4mbSiBXTDXXJKQa+NZaraMDGHCJmmq8B",
- "vPFS1YsFKN3TdeYAl9y1YpzUnGmca2WOS2bPSwUSI5CObMsV3ZC5oQktyG8gBZnVuiv943M3pVlZOoee",
- "mYaI+SWnmpRAlSbfMX6xxuG8098fWQ76RsirBgvx230BHBRTWTxI6xv7FQOK3fKXLrgY0xPYzz5Ys31/",
- "OzHL7Dy5/7/3//3k3Wn2nzT77VH24n8cv//w7OODh4Mfn3z8+9//X/enpx///uDf/y22Ux722GMsB/nZ",
- "K6cZn71C9af1AQ1g/2T2/xXjWZTIwmiOHm2R+/jw2BHQg65xTC/hkus1N4R0TUtWGN5yG3Lo3zCDs2hP",
- "R49qOhvRM4b5te6pVNyBy5AIk+mxxltLUcO4xvizR3RKupeMeF7mNbdb6aVv+6rHx5eJ+bR52mqz3pwQ",
- "fPe4pD440v355PkXk2n7XrH5PplO3Nf3EUpmxTr2KrWAdUxXdAcED8Y9RSq6UaDj3ANhj4bS2diOcNgV",
- "rGYg1ZJVn55TKM1mcQ7n30o4m9Oan3EbGG/OD7o4N85zIuafHm4tAQqo9DKWDaMjqGGrdjcBemEnlRTX",
- "wKeEHcFR3+ZTGH3RBfWVQOeYlQG1TzFGG2rOgSU0TxUB1sOFjDKsxOin9yzAXf7q4OqQGzgGV3/Oxp/p",
- "/9aC3Pvmqwty7BimumcfSNuhgyetEVXavdrqBCQZbmZzAFkh75Jf8lcwR+uD4CeXvKCaHs+oYrk6rhXI",
- "L2lJeQ5HC0FO/EOwV1TTSz6QtJJpuoIneKSqZyXLyVWokLTkaVOvDEe4vHxHy4W4vHw/iM0Yqg9uqih/",
- "sRNkRhAWtc5c4ohMwg2VMd+XahIH4Mg2M8y2Wa2QLWprIPWJKdz4cZ5Hq0r1HxAPl19VpVl+QIbKPY81",
- "W0aUFtLLIkZAsdDg/n4v3MUg6Y23q9QKFPllRat3jOv3JLusHz16CqTzovYXd+UbmtxUMNq6knzg3Deq",
- "4MKtWglrLWlW0UXMxXZ5+U4DrXD3UV5eoY2jLAl267zk9YH5OFS7AI+P9AZYOPZ+lYiLO7e9fJKw+BLw",
- "E24htjHiRuv4v+1+BW97b71dvffBg12q9TIzZzu6KmVI3O9MkztoYYQsH42h2AK1VZdmaQYkX0J+5fLf",
- "wKrSm2mnuw/4cYKmZx1M2cxI9mUe5uZAB8UMSF0V1InilG/6SRIUaO3Dit/CFWwuRJvaY5+sCN1H+ip1",
- "UJFSA+nSEGt4bN0Y/c13UWWo2FeVf+uOjx49WZw0dOH7pA+yFXkPcIhjRNF5RJ5CBJURRFjiT6DgFgs1",
- "492J9GPLM1rGzN58kSxJnvcT16RVnlwAWLgatLrb7yvANGviRpEZNXK7cBnC7EP0gIvVii4gISGHPqKR",
- "z707fiUcZNe9F73pxLx/oQ3umyjItnFm1hylFDBfDKmgMtML+/MzWTek80xg4k+HsFmJYlITH2mZDpUd",
- "X53NZJgCLU7AIHkrcHgwuhgJJZslVT55GeZ482d5lAzwOyZW2JZO5yyIWAsSuTXJcjzP7Z/TgXbpkur4",
- "TDo+fU6oWo5IhWMkfAySj22H4CgAFVDCwi7cNvaE0iZ5aDfIwPHDfF4yDiSLBb8FZtDgmnFzgJGPHxJi",
- "LfBk9AgxMg7ARvc6Dky+F+HZ5It9gOQuSQX1Y6NjPvgb4s/HbDi4EXlEZVg4S3i1cs8BqIuYbO6vXtwu",
- "DkMYnxLD5q5padic0/jaQQZZXVBs7eVwcQEeD1Li7BYHiL1Y9lqTvYpus5pQZvJAxwW6LRDPxDqz70ej",
- "Eu9sPTP0Ho2Qx9essYNp8+fcU2Qm1hg0hFeLjcjeAUsaDg9GoOGvmUJ6xX6p29wCs23a7dJUjAoVkowz",
- "5zXkkhInxkydkGBS5HI/SIlzKwB6xo42v7RTfncqqV3xZHiZt7fatE315h8fxY5/6ghFdymBv6EVpkli",
- "86YvsUTtFN3Yl27+nkCEjBG9YRNDJ83QFaSgBFQKso4QlV3FPKdGtwG8cc59t8B4gVmCKN88CAKqJCyY",
- "0tAa0X2cxOcwT1JMTijEPL06Xcm5Wd9bIZpryroRsWNnmZ98BRiRPGdS6Qw9ENElmEZfK1SqvzZN47JS",
- "N2TLpvJlRZw34LRXsMkKVtZxenXzfvvKTPt9wxJVPUN+y7gNWJlh6uloIOeWqW2s79YFv7YLfk0Ptt5x",
- "p8E0NRNLQy7dOf4k56LHebexgwgBxohjuGtJlG5hkMED3CF3DOSmwMd/tM36OjhMhR97Z9SOfwacuqPs",
- "SNG1BAaDratg6CYyYgnTQebm4cvYxBmgVcWKdc8WakdNasx0L4OHz3fXwwLurhtsBwa6cXnRMOdOrkAX",
- "/edsPscoIB8bEc6GA7pYN5Co5dg3oUUt0ajWCbYbJqZsBLuRa//2p3MtJF2AM4xmFqQ7DYHL2QcNQdpH",
- "RTSzHs6CzecQGgTVbYxZHeD6Zp9ocYcRRBa3GtaM6y+exchoB/W0MO5GWZxiIrSQchNdDA2vXqwK9M6m",
- "ckmwNbewnkZfkH4Lm+wno6GQijKp2ogxZwnt8r89dv169S1scOSdgVgGsB27gmrqW0AajJkFm0/24USj",
- "AoU5TDHpQ2cL99ip0/guHWhrXNbZNPG3YdmdrKzdpdzlYLR+OwPLmN04j7vLzOmBLuL7pLxrE1jCGBeS",
- "YyByhVMx5Wv0DK+i5nn0Ltq9AFp64sXlTD5OJ3dzTsVuMzfiDly/aS7QKJ4x+Mk6Kzq+5j1RTqtKimta",
- "Zs6Fl7r8pbh2lz829x6/TyxMxin74qvT128c+B+nk7wEKrNGGUuuCttVf5pV2Ty1268SlFi8VcQq68Hm",
- "N8k1Q7ffzRJcMYVA3x9kfW5dusFRdG7AeTwGcyfvc95nu8QtXmioGid06yCxPuiu35leU1Z6z4SHNhEv",
- "iYsblzo8yhXCAe7svw7CELKDspvB6Y6fjpa6dvAknOsHzJYW1zi4y6WGrMj5o+nBpaevhewwf/dYJurP",
- "/v3EKiNkWzwmwgd9gZ6+MHVErOD1y+IXcxofPgyP2sOHU/JL6T4EAOLvM/c76hcPH0ZdDVFLgmESaCjg",
- "dAUPmsDf5EZ8WrMTh5txF/Tp9aqRLEWaDBsKtY5pj+4bh70byRw+C/dLASWYn3a/rettukV3CMyYE3Se",
- "ehzTxD2tbE0gRQTvh/nhuyxDWsjsVxSznlvPzfAI8XqF3o5MlSyP+4H5TBn2ym18j2lMsHHCYGZGrFki",
- "XIzXLBjLNBuTxq8HZDBHFJkqmkmwxd1MuONdc/ZrDYQVRquZM5B4r/WuOq8c4KgDgdSonsO53MA2iqAd",
- "/i52kDDjf19mRCC2G0HCaKIBuK8as75faOM1a3WmfYMSwxkHjHtLQKGjD0fN9oHFshsVNE6PGVMb0jM6",
- "V3ogMUe01iNT2VyK3yBui0YTfuRttq9xwDAS9zcI1bOwwlmHpTQeqLZkZTv7ru0erxunNv7OurBfdFNW",
- "4TaXafxU77eRt1F6VTyDqENySgkL3ZHdaNUEa8HjFcRnYUZ7H6pAuT1P9mFy59FD/FSGz4uO7fjtqXQw",
- "D55klfRmRmPp/o0uZGAKtrcTVKEF8Z39Bqjm2a2dnQRBhU1bZpMbVSDb3BTDRIm31GvstKM1mlaBQYoK",
- "VZepDQQrlYgMU/Mbym2ZRNPP8ivXW4H1gppeN0JiajIVj/8oIGerqDn28vJdkQ99/QVbMFsBsFYQlJhz",
- "A9nqqpaKXJm+5jG5Q83ZnDyaBnUu3W4U7JopNisBWzy2LWZU4XXZeCSbLmZ5wPVSYfMnI5ova15IKPRS",
- "WcQqQRrdE4W8JoppBvoGgJNH2O7xC3If47cUu4YHBotOCJqcPH6B3nf7x6PYLesqOG5j2QXy7H86nh2n",
- "Ywxgs2MYJulGPYpmcbIlnNO3w5bTZLuOOUvY0l0ou8/SinK6gHjI8GoHTLYv7iZ6VHt44dYbAEpLsSFM",
- "x+cHTQ1/SjxDNOzPgkFysVoxvXJRPkqsDD219ePspH44W8zUlf7wcPmPGCxX+Vihnq3rE6sxdJV4RoAh",
- "jd/TFXTROiXU5qMrWRvG6gsSkTOf7hJroTQlUCxuzFxm6ShLYlTrnFSScY32j1rPs78ZtVjS3LC/oxS4",
- "2eyLZ5GaIt20+3w/wD853iUokNdx1MsE2XuZxfUl97ng2cpwlOJB++w3OJXJqL54/FYqiGz70GMlXzNK",
- "liS3ukNuNODUdyI8vmXAO5Jis5696HHvlX1yyqxlnDxobXbox7evnZSxEjKWw7o97k7ikKAlg2t8xBHf",
- "JDPmHfdClqN24S7Qf94QFC9yBmKZP8tRRSDwaG57v2mk+J++a5PxomPVPo7p2QCFjFg7nd3uEwd87Wd1",
- "6/tvbcwOfktgbjTabKX3AVYSobo2Frfp84mf80bNvXbPOwbHx78QaXRwlOMfPkSgHz6cOjH4lyfdz5a9",
- "P3wYz4kZNbmZX1ss3EUjxr6xPfxSRAxgvgBVE1DknuxGDJCpS8p8MExw5oaakm6xn08vRRzmMUg84C9+",
- "Ci4v3+EXjwf8o4+Iz8wscQPbkOb0Ye8WO4uSTNF8D0KNKflSrMcSTu8O8sTzB0BRAiUjzXO4kkExt6i7",
- "fme8SECjZtQZlMIomWGditCe/+fBs1n8dAu2a1YWP7XphnoXiaQ8X0YDNWem489t0fVmiZZVRlPfLynn",
- "UEaHs7rtz14Hjmjp/xJj51kxPrJtv5igXW5vcS3gXTA9UH5Cg16mSzNBiNVuJpfmpXC5EAXBedo86y1z",
- "HFblDEqF/VqD0rGjgR/sayV0dhnmaytVEeAFWr+OyDeYU8HA0kmii1Ynn56wm6qrrkpBiymmTbz46vQ1",
- "sbPaPrZ0sK2UtUCjS3cVUSv5+NRlTRXg+Jv88eNsfyRsVq101hS2imU9Mi3a0lusFzqB5pgQO0fklbWE",
- "KW9nsZMQTL4pV1AEdbSsLoY0Yf6jNc2XaGLqXGRpkh9f4s1TZWuAD+pFN3UV8NwZuF2VN1vkbUqEXoK8",
- "YQrwFSZcQzfRUpN1zJk4feKl7vJkzbmllKM9ZIqmisK+aPfAWYHE+4ajkPUQv6eBwVZI3Lfi3Tn2iqZ5",
- "7pfP6zlvfdqepg7wd85GnFMuOMsxyXJMIMKkMOO8TSPyUcfdRGriTmjkcEWL9jXvvxwWk2X8PCN0iBt6",
- "boOvZlMtddg/NaxdMZcFaOU4GxRTX3vS+TUYV+DqZBgiCvmkkJHYlGg8e+MH35OMMN9DwlD1tfn2vTNj",
- "4kPoK8bRYOHQ5sRs63koFUMHIydMk4UA5dbTTXql3pk+R5j/qYD1+6PXYsHyc7bAMWw0lFm2Df0bDnXq",
- "AwFd4J1p+9K0dVl5m587UT120tOqcpOmK5PGyzGveRLBsfATHw8QILcZPxxtC7ltjeDF+9QQGlxj8BFU",
- "eA8PCKOp0tkriW1UBEtR2ILYt0nR1HyMR8B4zbj3hMUviDx6JeDG4HlN9FO5pNqKgKN42gXQMhHHjm/9",
- "rCv1rkP1cxIblOAa/RzpbWwLjCYYR9OgFdwo3xB/KAx1B8LES1o2EbCRcqEoVTkhqsA3Ir0CojHGYRi3",
- "L1HcvQB2VCWftt0xz/e+N1Eq+9GsLhagM1oUsbIlX+JXgl/9Wx9YQ1435S2qiuSY7LOb/XRIbW6iXHBV",
- "r7bM5RvccbqgIm+EGsKqwH6HMbvCbIP/7lMvvol93ft9mw90LfZL+Tt8rxeTeg1NZ4otsvGYwDvl7uho",
- "p74dobf9D0rppVh0AfkcRtIElwv3KMbfvjIXR5gScBBmbK+WJmMfhvQK/O6TXDS5prpcCa+yQQUTdF43",
- "ddq3myHSFdenePkl3pSGJm97v1ozcOplaZ58CE21S8miKdnKgpJpLmzIZ8+IPvQEpcI8bZTn4YzPbq1b",
- "EZp2wXzbcbjYUJ+WWSQdLbfzhbQbvK8z5Nvr1GNjnwEcv/crMl+By9NWSbhmovZBND6U1auE9tdOfePm",
- "uXd0/dEA8c9tfE6ayi9cZTy7TKeTf/uTdaYR4Fpu/gCG88GmD2o9D6Vda55qm5CmqNKoIkudW3FMdvxY",
- "InYnG3aqTe+olT0gq1djxIFh7evp5KzY68KMJfOf2FFixy5eyTqd67jNb4xHrBKKtbXNYiWuR8aMX2CV",
- "6iBX83AsH0t4DbnGgnZtjJQE2Cdzs5nM2+7/O+dxWp1uQutdquNt+Y2HVex23PGDFCRBGh1bAexofDbf",
- "0yYS1j7kuaEKc99LtHF3n76OfoA3n0Ou2fWOlC//XAIP0olMvV0GYZkHGWBY8xwFM4bub3VsAdqWkWUr",
- "PEHm/juDk3qOfAWbe4p0qCFakqx5i3WbZJGIAeQOmSERoWKRZtaQ7IJ/mGooA7HgIzttd2jTbierGQcJ",
- "jG45lydJc3G0SY22TBkvpzpqLtN1r1Rf+LIilRVmWI0xrX+8wuKXysU50SbZZKilk7NhSv4bl6wSE/Q0",
- "vhOfthKU/81n47KzlOwKwnrL6Km6obLwLaKmF2/VybbcR4NULr6SYB/oeTMza+Pwh77qSJJnfNKSl8KI",
- "EVnqXVA39L2JG7unbIBfm4cF4ZqDdHXpUf4thYJMCx+3vw2ObaiwUYy3QoJKFlawwCXTnb5t87ligRmK",
- "6U2pC14MF0gkrKiBTgZZV9NzbkP2S/vdv6X2BUZ2Wpgaet1d6c6/wGBqgMSQ6ufE3Za732jfxtjEOAeZ",
- "ec9TPwUrB9n1hlRSFHVuL+jwYDQGudEpULawkqidJh+usqcjBG+dr2BzbJUgXyLQ72AItJWcLOhB6r7e",
- "Jh/U/KZicC8OAt7ntFxNJ5UQZZZwdpwN88b2Kf6K5VdQEHNT+EjlRPVXch9t7I03+2a58XlSqwo4FA+O",
- "CDnl9m2Id2x3Cxf1Juf39Lb51zhrUdtUzs6odnTJ40H2mGRZ3pGb+WG28zAFhtXdcSo7yI6spOtEzlpJ",
- "byK1kI/GauVDV3O/Pm1LVBaKmExybj1WL/GgxwxH+JI9SLmAjkxKnKeLqFLEQjJv89reDBXHVDgZAqSB",
- "j3n03UDhBo8iIFpxNXIKbQYzl7tMzImE1ol82yRuw+KwMY2+P3MzS5ffzYWETplX01vIwos8TLX1mKmc",
- "MS2p3Nwm1dqgOO3AepLE8s5wrCYSq11IG401xGFZipsMmVXW5DaPqbamnepexr6cS9vPnOoZBHFdVDlB",
- "bUOWtCC5kBLysEf82Z6FaiUkZKXAMK+YB3qujdy9wrc6nJRiQUSViwJsjYA4BaXmqjmnKDZBEFUTRYGl",
- "HXz0afsEdDxyykNVRrbJeeyiM+vLTASegnLJeByGbOMhvFuqCu+Vnf9sjhYhhrEu3bfXVvoMayvDnqWV",
- "WVl6g0GqujL5UdUYjoQPb8wUz8hKKO00OzuSaoZqQ7zu54JrKcqyawSyIvHCWba/o+vTPNevhbia0fzq",
- "AeqRXOhmpcXUP0vtB+O1M8leRqaRZaAvlhE7L87iT93etZ4d59i7RGsA5vvdHGu3jfs0Vsq6u65+bXae",
- "yJ2pxYrlcRr+c0W3JWPSYiwhmurJVkmyj/OxGTLq8HJoghmQJQ3RDNwQbGy/HE9zTl1kHua/KPH2xyVz",
- "cJdE4mIa8kkntWR5UrbqAYCQ2hejupa2tFIo+TRcRSzsC3N0SfcBHcnFMfLnbrCZEQ4OlIY7ATWINmwA",
- "vG+V/alNyWUjF2di7b8/aHN23Qr4j9upPFaOPnKKG9Jy1fJ9fo8ER4hnBt4af4SFw/0NujsKqSmDN/JG",
- "DQBIxyV1YBgVnbQvGHPKSigyqhOXO9qEpoFm61609IubMuU4eU7thb0EYsauJbh8E1ak7hVDr6ghJdE0",
- "H1pueQFrUJgMwlZ0psr6Gby/A0pbVqqnfIsqK+EaOuFaLglGjaIduwbfVzWdSQFQofevb5OKxSGFd3nP",
- "UOHWngWRLGOwG7VcWMTanSI7zBJRI8qaZ/aYqLFHyUB0zYqadvCn9hU5umY3c5QjqBrI5JnX28ZO86Md",
- "4a0f4NT3j4kyHhPvx/GhvVlQHHXbGNDOuMRapU49j4clhhleGocGzlY0jk9L4i3fUBW94WkD4JDkW/Vm",
- "5D4xwQPEfrWGHKWabtzd3XFCcDCietmbkiK4bHb49obkz0LDW0k4OV5M1VCADHarpcbThRPYsQGWs+RG",
- "7DVSM5aQcvzf8b8pVuC3Axm92la0CjW4V+A9dphQunFWOIGWNReajy+cunyCfaWcBZHVK7ohQuI/Rl/7",
- "taYlm2/whFrwfTeiltSQkHMRWt+1i1c0E28XTKYeMG8XEH4qu242dsxguI0ZJQDaXIHOOIWZga4g3AZ0",
- "y1vOk2vDclQ9WzGl8LLrbecQC27xPifEihahjoyZ6bqlRH2uUtP7f7avtsKpfEKpqqS5r18GRNFVzyBu",
- "axR64tJLWG1/1jdUjz0JNHUPW6KV/jlvcQvj3p6RG7FY+VS9hw7Yg3pwg1IXd1rGPgWK25fRWx5EjlrK",
- "oXdhbHzIAGh0MvusXjvAt9kYfQawT4H/aNLI1DLGgP9HwXuijF4Ir62Y9wmw3HnyH4HV2lVnYp1JmKtd",
- "oRDWsGoUYdkmC/DGScZzCVTZ2JCzH5zK1uZEZNyokDZ6sfG+NaMUMGe8ZZaMV7WOaACYGpFvAoSF5mlE",
- "a8LZk5ISjBh2TcsfrkFKVqQ2zpwOW8YrzEnvTfKub0T5b+7U4QBMtdoPviSE9qVa0Mxc4LbqjQ0sVJry",
- "gsoibM44yUGae5/c0I26ve/DQCtrI1/s8H7QQJrpvm8P/CBI2haQcuPcl3f0TDQA0gO6KEa4FjCCNeJW",
- "sEYRLRKehCEM8bQKdJ2VYoHvyxIE6JJPou/HKiuCo8HWykP7zaPYb7B9Gsy77Q6+FjjrmCm2n7MfEHWo",
- "8PzImd560qw1rf/gz0Zk2oPg6Z8v2rBwuzlD+o+90bzARwydd5r9ovN+r214iJ0PEp6MrgU3sYvoIHcP",
- "fENz7fh6Rl0ffOwlqNVhM9Rt1ZbAb1BtkDPNXeDO0OgzUIotUqbuHe2eNiFrSfb3QAI8W6nWna3utE0w",
- "hRlnnyJQ21/OZpWosnxMNKBNzV84g7aDtAtjgj4Cc3Vi3U3ghGqKVXQSm3SqVuxbBytZNWOXX6bKtynZ",
- "KYNGgoN2jeVijrwMj7A14+Abj8Z4Me2/PuoabBomQSiRkNcSDZo3dLO7rlAiJez5P06fP37y85PnXxDT",
- "gBRsAapNK9yry9NGjDHet7N82hixwfJ0fBP8u3SLOO8p889tmk1xZ81yW9XmDBxUJdrHEhq5ACLHMVIP",
- "5lZ7heO0Qd9/rO2KLfLgOxZDwe+zZy6yNb6AU+70FzEn23lGt+afjvMLI/xHLim/tbdYYMoem34XfRt6",
- "bA2yfxgqjDz0PhjtNcv9PSguKmXernzuKNCGj34j5IEAJF7zdd5hhdW123yV0tp20QrsHWb9S+y71pG2",
- "M+wcIfEddoAXPs9r2zWR0g6cz5z48bsGKcFS3qcoobP8XS/+3AJbz2OwRU7V1RqUZUtiKFwEzznVy+aV",
- "ZEK2HTymxFLaRr8py8gjTKt945kKCccIlvKalp+ea2CN9VPEBxRv008vwpd4IZItKtXt8oC9pqPmDl7d",
- "HW5q/gYffv4TzB5F7zk3lHM6Dm4ztJ1gYeOFvxXsW1Jyg2PaoJLHX5CZy8leSciZ6jszrccpiAq8Bsnm",
- "LoAP1nrHS7dd6/xJ6DuQ8dxHHpDvA6eEQONPC2F7RD8zU0mc3CiVx6hvQBYR/MV4VFjDccd1ccf83bdL",
- "KxEkiNozrcSwOuXY5dnUCebSqRUM1zn6tu7gNnJRt2sbmxNldBrwy8t3ejYmlUk8ZbfpjrlUDpK7e6/M",
- "3b9DFhWLIzeGmzdGMT+l8mra3JGJFK69/ahZuTPMoJOQ9+N0sgAOiilMOfuzKzHwae9SD4F92T08qhbW",
- "u6SjsIiJrLUzeTBVkGp3RJZd1y2SUxdfTeW1ZHqD5SW9GYb9HM338k2TO8Dlnmg8IO7u0+IKmhK/baaB",
- "Wvnb9RtBS7yPrGOGm1tIlEfkqzVdVaUzKpK/35v9FZ7+7Vnx6Onjv87+9uj5oxyePX/x6BF98Yw+fvH0",
- "MTz52/Nnj+Dx/IsXsyfFk2dPZs+ePPvi+Yv86bPHs2dfvPjrPcOHDMgWUJ8B+mTyf7LTciGy0zdn2YUB",
- "tsUJrdi3YPYGdeW5wPJnBqk5nkRYUVZOTvxP/8ufsKNcrNrh/a8TV8ZjstS6UifHxzc3N0dhl+MFPi3O",
- "tKjz5bGfB4tSdeSVN2dNTLKNnsAdbW2QuKmOFE7x29uvzi/I6Zuzo5ZgJieTR0ePjh67CqicVmxyMnmK",
- "P+HpWeK+Hztim5x8+DidHC+BlpiJw/yxAi1Z7j9JoMXG/V/d0MUC5BGGndufrp8ce7Hi+IN7Yv1x27fj",
- "0DF//KHzEr3Y0ROdyscffB3E7a07NfBcPE/QYSQU25odz7D2wdimoILG6aWgsqGOP6C4nPz92Nk84h9R",
- "bbHn4dina4i37GDpg14bWHf0WLMiWElOdb6sq+MP+B+k3o+WnZQQS91gc3JT0jafEqYJnQmJlfN0vjQc",
- "xJfsYipoGRbSPSvMMTC9XloIfAVU9NJOTt4NA9BxIOJHQp5hDkR7pDsztVwbHZxBnf/mTuq0b2+md4+y",
- "F+8/PJ4+fvTxL+bmcX8+f/px5FuNl8245Ly5VkY2fI/1rjAqDU/6k0ePPHtzykNAmsfuJAeLGyhR7SLt",
- "JjVBb8Nb39FCOsDYbVVvINIgY0ddnt7wQ+EFOfqzPVe81dLUSTSIw/cLIRTEv4vEuR9/urnPuA21MzeH",
- "veE+TifPP+Xqz7gheVoSbBkUWhxu/Y/8iosb7lsacaRerajc+GOsOkyBuM3GS48uFDq+JLumKAVywYPs",
- "SXwxeY/v8GNvUxP8Rml6C35zbnr9N7/pNIwX2rbmD1eUM3DX2sukqUECPqWcD9GkxTXluY8Gb4NMcb+s",
- "wOsIo4ljqhXM69K/O65KNre1ToUo/USqrirDceZUNZTlIluNBGufcTZDk5rngluPOAYR+1yK+BwTH26q",
- "K1Z1urC5oSpXhZMDuJd6uOm/1iA37a6vmBFF2+0dxGz8nizc4vEALLw70IFZ+JM92eiff8X/tS+tZ4/+",
- "9ukg8NkKLtgKRK3/rJfmub3B7nRpOhneJtw+1mt+jFFyxx86Gon7PNBIur+33cMW1ytRgFchxHxuq7hv",
- "+3z8wf4bTATrCiRbAbflVN2v9uY4xmKem+HPG55Hfxyuo5OIMfHz8YfOn12VTS1rXYgbW1UqKoXgpUhL",
- "V0UZ7fONrm9uNzdAm/mR/FA1149LZ0AoVtERtW6NMTbO170TbNxleE+ppfNLLBjHCdDvgbPYcuE0uJYV",
- "mBsPTQw9icdB9r0oYCjxxK43B2PnimsIPFKc+87X3ZCdftyP/NE/Y52LQ+IwH2vV//v4hjJt5CKXghEx",
- "OuysgZbHrt5K79c2xfngC+ZtD34MHztGfz2mXWrvmj5suf/Ex75dJPbV2QUSjXyksf/c2khDmyOSS2Nt",
- "fPfe7DpWVnaU1JrQTo6P8enJUih9jPJl17wWfnzfbLQvCNhs+Mf3H/9/AAAA//9K9iBFafkAAA==",
+ "H4sIAAAAAAAC/+x9/XMbN7Lgv4Livip/HEeSv7JrX229U+wkq4uTuCwle+9ZvgScaZJYDYEJgJHI+Py/",
+ "X6EBzGBmAHIoMXZS7/1ki4OPRqPR6C90f5jkYlUJDlyryYsPk4pKugINEv+ieS5qrjNWmL8KULlklWaC",
+ "T174b0RpyfhiMp0w82tF9XIynXC6graN6T+dSPi1ZhKKyQsta5hOVL6EFTUD601lWjcjrbOFyNwQp3aI",
+ "s1eTj1s+0KKQoNQQyh94uSGM52VdANGSckVz80mRG6aXRC+ZIq4zYZwIDkTMiV52GpM5g7JQR36Rv9Yg",
+ "N8Eq3eTpJX1sQcykKGEI50uxmjEOHipogGo2hGhBCphjoyXVxMxgYPUNtSAKqMyXZC7kDlAtECG8wOvV",
+ "5MW7iQJegMTdyoFd43/nEuA3yDSVC9CT99PY4uYaZKbZKrK0M4d9CaoutSLYFte4YNfAiel1RL6rlSYz",
+ "IJSTt1+/JE+ePHluFrKiWkPhiCy5qnb2cE22++TFpKAa/OchrdFyISTlRda0f/v1S5z/3C1wbCuqFMQP",
+ "y6n5Qs5epRbgO0ZIiHENC9yHDvWbHpFD0f48g7mQMHJPbOODbko4/2fdlZzqfFkJxnVkXwh+JfZzlIcF",
+ "3bfxsAaATvvKYEqaQd+dZM/ff3g0fXTy8S/vTrP/dH8+e/Jx5PJfNuPuwEC0YV5LCTzfZAsJFE/LkvIh",
+ "Pt46elBLUZcFWdJr3Hy6Qlbv+hLT17LOa1rWhk5YLsVpuRCKUEdGBcxpXWriJyY1Lw2bMqM5aidMkUqK",
+ "a1ZAMTXc92bJ8iXJqbJDYDtyw8rS0GCtoEjRWnx1Ww7TxxAlBq5b4QMX9MdFRruuHZiANXKDLC+FgkyL",
+ "HdeTv3EoL0h4obR3ldrvsiIXSyA4uflgL1vEHTc0XZYbonFfC0IVocRfTVPC5mQjanKDm1OyK+zvVmOw",
+ "tiIGabg5nXvUHN4U+gbIiCBvJkQJlCPy/LkboozP2aKWoMjNEvTS3XkSVCW4AiJm/4Jcm23/3+c/fE+E",
+ "JN+BUnQBb2h+RYDnooDiiJzNCRc6IA1HS4hD0zO1DgdX7JL/lxKGJlZqUdH8Kn6jl2zFIqv6jq7Zql4R",
+ "Xq9mIM2W+itECyJB15KnALIj7iDFFV0PJ72QNc9x/9tpO7KcoTamqpJuEGEruv77ydSBowgtS1IBLxhf",
+ "EL3mSTnOzL0bvEyKmhcjxBxt9jS4WFUFOZszKEgzyhZI3DS74GF8P3ha4SsAxw+SBKeZZQc4HNYRmjGn",
+ "23whFV1AQDJH5EfH3PCrFlfAG0Insw1+qiRcM1GrplMCRpx6uwTOhYaskjBnERo7d+gwDMa2cRx45WSg",
+ "XHBNGYfCMGcEWmiwzCoJUzDhdn1neIvPqIIvnqbu+PbryN2fi/6ub93xUbuNjTJ7JCNXp/nqDmxcsur0",
+ "H6EfhnMrtsjsz4ONZIsLc9vMWYk30b/M/nk01AqZQAcR/m5SbMGpriW8uOQPzV8kI+ea8oLKwvyysj99",
+ "V5eanbOF+am0P70WC5afs0UCmQ2sUYULu63sP2a8ODvW66he8VqIq7oKF5R3FNfZhpy9Sm2yHXNfwjxt",
+ "tN1Q8bhYe2Vk3x563WxkAsgk7ipqGl7BRoKBluZz/Gc9R3qic/mb+aeqStNbV/MYag0duysZzQfOrHBa",
+ "VSXLqUHiW/fZfDVMAKwiQdsWx3ihvvgQgFhJUYHUzA5KqyorRU7LTGmqcaR/kzCfvJj85bi1vxzb7uo4",
+ "mPy16XWOnYzIasWgjFbVHmO8MaKP2sIsDIPGT8gmLNtDoYlxu4mGlJhhwSVcU66PWpWlww+aA/zOzdTi",
+ "20o7Ft89FSyJcGIbzkBZCdg2vKdIgHqCaCWIVhRIF6WYNT/cP62qFoP4/bSqLD5QegSGghmsmdLqAS6f",
+ "ticpnOfs1RH5JhwbRXHBy425HKyoYe6Gubu13C3W2JbcGtoR7ymC2ynkkdkajwYj5h+C4lCtWIrSSD07",
+ "acU0/odrG5KZ+X1U5z8HiYW4TRMXKloOc1bHwV8C5eZ+j3KGhOPMPUfktN/3dmRjRokTzK1oZet+2nG3",
+ "4LFB4Y2klQXQfbF3KeOopNlGFtY7ctORjC4Kc3CGA1pDqG591naehygkSAo9GL4sRX71D6qWBzjzMz/W",
+ "8PjhNGQJtABJllQtjyYxKSM8Xu1oY46YaYgKPpkFUx01SzzU8nYsraCaBktz8MbFEot67IdMD2REd/kB",
+ "/0NLYj6bs21Yvx32iFwgA1P2ODsnQ2G0fasg2JlMA7RCCLKyCj4xWvdeUL5sJ4/v06g9+sraFNwOuUU0",
+ "O3SxZoU61DbhYKm9CgXUs1dWo9OwUhGtrVkVlZJu4mu3c41BwIWoSAnXUPZBsCwLR7MIEeuD84UvxToG",
+ "05diPeAJYg0H2QkzDsrVHrs74HvlIBNyN+Zx7DFINws0srxC9sBDEcjM0lqrT2dC3o4d9/gsJ60NnlAz",
+ "anAbTXtIwqZ1lbmzGbHj2Qa9gVq353Yu2h8+hrEOFs41/R2woMyoh8BCd6BDY0GsKlbCAUh/Gb0FZ1TB",
+ "k8fk/B+nzx49/vnxsy8MSVZSLCRdkdlGgyL3nbJKlN6U8GC4MlQX61LHR//iqbfcdseNjaNELXNY0Wo4",
+ "lLUIW5nQNiOm3RBrXTTjqhsAR3FEMFebRTuxzg4D2iumjMi5mh1kM1IIK9pZCuIgKWAnMe27vHaaTbhE",
+ "uZH1IXR7kFLI6NVVSaFFLsrsGqRiIuJeeuNaENfCy/tV/3cLLbmhipi50RZec5SwIpSl13w837dDX6x5",
+ "i5utnN+uN7I6N++Yfeki35tWFalAZnrNSQGzetFRDedSrAglBXbEO/ob0FZuYSs413RV/TCfH0Z3FjhQ",
+ "RIdlK1BmJmJbGKlBQS64DQ3Zoa66Ucegp48Yb7PUaQAcRs43PEfD6yGObVqTXzGOXiC14Xmg1hsYSygW",
+ "HbK8u/qeQoed6p6KgGPQ8Ro/o+XnFZSafi3kRSv2fSNFXR1cyOvPOXY51C3G2ZYK09cbFRhflN1wpIWB",
+ "/Si2xs+yoJf++Lo1IPRIka/ZYqkDPeuNFGJ+eBhjs8QAxQ9WSy1Nn6Gu+r0oDDPRtTqACNYO1nI4Q7ch",
+ "X6MzUWtCCRcF4ObXKi6cJQJY0HOODn8dynt6aRXPGRjqymltVltXBN3Zg/ui7ZjR3J7QDFGjEs68xgtr",
+ "W9npbHBEKYEWGzID4ETMnMfM+fJwkRR98dqLN040jPCLDlyVFDkoBUXmLHU7QfPt7NWht+AJAUeAm1mI",
+ "EmRO5Z2BvbreCecVbDKMHFHk/rc/qQefAV4tNC13IBbbxNDb2D2cW3QI9bjptxFcf/KQ7KgE4u8VogVK",
+ "syVoSKFwL5wk968P0WAX746Wa5DooPxdKd5PcjcCakD9nen9rtDWVSIe0qm3RsIzG8YpF16wig1WUqWz",
+ "XWzZNOro4GYFASeMcWIcOCF4vaZKW6c64wXaAu11gvNYIcxMkQY4qYaYkX/yGshw7Nzcg1zVqlFHVF1V",
+ "QmooYmvgsN4y1/ewbuYS82DsRufRgtQKdo2cwlIwvkOWXYlFENWN78lFnQwXhx4ac89voqjsANEiYhsg",
+ "575VgN0wJiwBCFMtoi3hMNWjnCYQbTpRWlSV4RY6q3nTL4Wmc9v6VP/Yth0SF9XtvV0IUBiK5to7yG8s",
+ "Zm004JIq4uAgK3plZA80g1jv/xBmcxgzxXgO2TbKRxXPtAqPwM5DWlcLSQvICijpZjjoj/YzsZ+3DYA7",
+ "3qq7QkNmw7rim95Sso+i2TK0wPFUTHgk+IXk5ggaVaAlENd7x8gF4Ngx5uTo6F4zFM4V3SI/Hi7bbnVk",
+ "RLwNr4U2O+7oAUF2HH0MwAk8NEPfHhXYOWt1z/4U/wHKTdDIEftPsgGVWkI7/l4LSNhQXcR8cF567L3H",
+ "gaNsM8nGdvCR1JFNGHTfUKlZzirUdb6FzcFVv/4EUb8rKUBTVkJBgg9WDazC/sQGJPXHvJ0qOMr2NgR/",
+ "YHyLLKdkCkWeLvBXsEGd+42NdA1MHYfQZSOjmvuJcoKA+vg5I4KHTWBNc11ujKCml7AhNyCBqHq2Ylrb",
+ "CPauqqtFlYUDRP0aW2Z0Xs2oT3Grm/UchwqWN9yK6cTqBNvhu+gpBh10OF2gEqIcYSEbICMKwagAGFIJ",
+ "s+vMBdP7cGpPSR0gHdNGl3Zz/d9THTTjCsh/iJrklKPKVWtoZBohUVBAAdLMYESwZk4X6tJiCEpYgdUk",
+ "8cvDh/2FP3zo9pwpMocb/wLFNOyj4+FDtOO8EUp3DtcB7KHmuJ1Frg90+JiLz2khfZ6yO9TCjTxmJ9/0",
+ "Bm+8ROZMKeUI1yz/zgygdzLXY9Ye0si4MBMcd5Qvp+OyH64b9/2creqS6kN4reCalpm4BilZATs5uZuY",
+ "Cf7VNS1/aLrh6xrIDY3mkOX4JmTkWHBh+thnJGYcxpk5wDaEdCxAcGZ7ndtOO1TMNkqPrVZQMKqh3JBK",
+ "Qg729YSRHFWz1CNi4yrzJeULVBikqBcusM+Ogwy/VtY0I2s+GCIqVOk1z9DIHbsAXDC3f0BjxCmgRqXr",
+ "W8itAnNDm/ncm6kxN3OwB32PQdRJNp0kNV6D1OtW47XI6b4CGnEZdOS9AD/txCNdKYg6I/sM8RVuizlM",
+ "ZnN/H5N9O3QMyuHEQahh+zEVbWjU7XJzAKHHDkQkVBIUXlGhmUrZr2Ievvhzd5jaKA2roSXfdv05cfze",
+ "JvVFwUvGIVsJDpvoI3fG4Tv8GD1OeE0mOqPAkurb10E68PfA6s4zhhrvil/c7f4J7Xus1NdCHsolagcc",
+ "Ld6P8EDudLe7KW/rJ6VlGXEtuvdAfQagpk3+ASYJVUrkDGW2s0JN7UFz3kj3eKiL/jdNlPMBzl5/3J4P",
+ "LXxqijZiKCtCSV4ytCALrrSsc33JKdqogqVGgp+8Mp62Wr70TeJm0ogV0w11ySkGvjWWq2jAxhwiZpqv",
+ "AbzxUtWLBSjd03XmAJfctWKc1JxpnGtljktmz0sFEiOQjmzLFd2QuaEJLchvIAWZ1bor/eNzN6VZWTqH",
+ "npmGiPklp5qUQJUm3zF+scbhvNPfH1kO+kbIqwYL8dt9ARwUU1k8SOsb+xUDit3yly64GNMT2M8+WLN9",
+ "fzsxy+w8uf+/9//9xbvT7D9p9ttJ9vx/HL//8PTjg4eDHx9//Pvf/1/3pycf//7g3/8ttlMe9thjLAf5",
+ "2SunGZ+9QvWn9QENYP9k9v8V41mUyMJojh5tkfv48NgR0IOucUwv4ZLrNTeEdE1LVhjechty6N8wg7No",
+ "T0ePajob0TOG+bXuqVTcgcuQCJPpscZbS1HDuMb4s0d0SrqXjHhe5jW3W+mlb/uqx8eXifm0edpqs968",
+ "IPjucUl9cKT78/GzLybT9r1i830ynbiv7yOUzIp17FVqAeuYrugOCB6Me4pUdKNAx7kHwh4NpbOxHeGw",
+ "K1jNQKolqz49p1CazeIczr+VcDanNT/jNjDenB90cW6c50TMPz3cWgIUUOllLBtGR1DDVu1uAvTCTiop",
+ "roFPCTuCo77NpzD6ogvqK4HOMSsDap9ijDbUnANLaJ4qAqyHCxllWInRT+9ZgLv81cHVITdwDK7+nI0/",
+ "0/+tBbn3zVcX5NgxTHXPPpC2QwdPWiOqtHu11QlIMtzM5gCyQt4lv+SvYI7WB8FfXPKCano8o4rl6rhW",
+ "IL+kJeU5HC0EeeEfgr2iml7ygaSVTNMVPMEjVT0rWU6uQoWkJU+bemU4wuXlO1ouxOXl+0FsxlB9cFNF",
+ "+YudIDOCsKh15hJHZBJuqIz5vlSTOABHtplhts1qhWxRWwOpT0zhxo/zPFpVqv+AeLj8qirN8gMyVO55",
+ "rNkyorSQXhYxAoqFBvf3e+EuBklvvF2lVqDILytavWNcvyfZZX1y8gRI50XtL+7KNzS5qWC0dSX5wLlv",
+ "VMGFW7US1lrSrKKLmIvt8vKdBlrh7qO8vEIbR1kS7NZ5yesD83GodgEeH+kNsHDs/SoRF3due/kkYfEl",
+ "4CfcQmxjxI3W8X/b/Qre9t56u3rvgwe7VOtlZs52dFXKkLjfmSZ30MIIWT4aQ7EFaqsuzdIMSL6E/Mrl",
+ "v4FVpTfTTncf8OMETc86mLKZkezLPMzNgQ6KGZC6KqgTxSnf9JMkKNDahxW/hSvYXIg2tcc+WRG6j/RV",
+ "6qAipQbSpSHW8Ni6Mfqb76LKULGvKv/WHR89erJ40dCF75M+yFbkPcAhjhFF5xF5ChFURhBhiT+Bglss",
+ "1Ix3J9KPLc9oGTN780WyJHneT1yTVnlyAWDhatDqbr+vANOsiRtFZtTI7cJlCLMP0QMuViu6gISEHPqI",
+ "Rj737viVcJBd9170phPz/oU2uG+iINvGmVlzlFLAfDGkgspML+zPz2TdkM4zgYk/HcJmJYpJTXykZTpU",
+ "dnx1NpNhCrQ4AYPkrcDhwehiJJRsllT55GWY482f5VEywO+YWGFbOp2zIGItSOTWJMvxPLd/TgfapUuq",
+ "4zPp+PQ5oWo5IhWOkfAxSD62HYKjAFRACQu7cNvYE0qb5KHdIAPHD/N5yTiQLBb8FphBg2vGzQFGPn5I",
+ "iLXAk9EjxMg4ABvd6zgw+V6EZ5Mv9gGSuyQV1I+Njvngb4g/H7Ph4EbkEZVh4Szh1co9B6AuYrK5v3px",
+ "uzgMYXxKDJu7pqVhc07jawcZZHVBsbWXw8UFeDxIibNbHCD2YtlrTfYqus1qQpnJAx0X6LZAPBPrzL4f",
+ "jUq8s/XM0Hs0Qh5fs8YOps2fc0+RmVhj0BBeLTYiewcsaTg8GIGGv2YK6RX7pW5zC8y2abdLUzEqVEgy",
+ "zpzXkEtKnBgzdUKCSZHL/SAlzq0A6Bk72vzSTvndqaR2xZPhZd7eatM21Zt/fBQ7/qkjFN2lBP6GVpgm",
+ "ic2bvsQStVN0Y1+6+XsCETJG9IZNDJ00Q1eQghJQKcg6QlR2FfOcGt0G8MY5990C4wVmCaJ88yAIqJKw",
+ "YEpDa0T3cRKfwzxJMTmhEPP06nQl52Z9b4VorinrRsSOnWV+8hVgRPKcSaUz9EBEl2Aafa1Qqf7aNI3L",
+ "St2QLZvKlxVx3oDTXsEmK1hZx+nVzfvtKzPt9w1LVPUM+S3jNmBlhqmno4GcW6a2sb5bF/zaLvg1Pdh6",
+ "x50G09RMLA25dOf4k5yLHufdxg4iBBgjjuGuJVG6hUEGD3CH3DGQmwIf/9E26+vgMBV+7J1RO/4ZcOqO",
+ "siNF1xIYDLaugqGbyIglTAeZm4cvYxNngFYVK9Y9W6gdNakx070MHj7fXQ8LuLtusB0Y6MblRcOcO7kC",
+ "XfSfs/kco4B8bEQ4Gw7oYt1AopZj34QWtUSjWifYbpiYshHsRq7925/OtZB0Ac4wmlmQ7jQELmcfNARp",
+ "HxXRzHo4CzafQ2gQVLcxZnWA65t9osUdRhBZ3GpYM66/eBojox3U08K4G2VxionQQspNdDE0vHqxKtA7",
+ "m8olwdbcwnoafUH6LWyyn4yGQirKpGojxpwltMv/9tj169W3sMGRdwZiGcB27AqqqW8BaTBmFmw+2YcT",
+ "jQoU5jDFpA+dLdxjp07ju3SgrXFZZ9PE34Zld7Kydpdyl4PR+u0MLGN24zzuLjOnB7qI75Pyrk1gCWNc",
+ "SI6ByBVOxZSv0TO8iprn0bto9wJo6YkXlzP5OJ3czTkVu83ciDtw/aa5QKN4xuAn66zo+Jr3RDmtKimu",
+ "aZk5F17q8pfi2l3+2Nx7/D6xMBmn7IuvTl+/ceB/nE7yEqjMGmUsuSpsV/1pVmXz1G6/SlBi8VYRq6wH",
+ "m98k1wzdfjdLcMUUAn1/kPW5dekGR9G5AefxGMydvM95n+0St3ihoWqc0K2DxPqgu35nek1Z6T0THtpE",
+ "vCQublzq8ChXCAe4s/86CEPIDspuBqc7fjpa6trBk3CuHzBbWlzj4C6XGrIi54+mB5eevhayw/zdY5mo",
+ "P/v3E6uMkG3xmAgf9AV6+sLUEbGC1y+LX8xpfPgwPGoPH07JL6X7EACIv8/c76hfPHwYdTVELQmGSaCh",
+ "gNMVPGgCf5Mb8WnNThxuxl3Qp9erRrIUaTJsKNQ6pj26bxz2biRz+CzcLwWUYH7a/baut+kW3SEwY07Q",
+ "eepxTBP3tLI1gRQRvB/mh++yDGkhs19RzHpuPTfDI8TrFXo7MlWyPO4H5jNl2Cu38T2mMcHGCYOZGbFm",
+ "iXAxXrNgLNNsTBq/HpDBHFFkqmgmwRZ3M+GOd83ZrzUQVhitZs5A4r3Wu+q8coCjDgRSo3oO53ID2yiC",
+ "dvi72EHCjP99mRGB2G4ECaOJBuC+asz6fqGN16zVmfYNSgxnHDDuLQGFjj4cNdsHFstuVNA4PWZMbUjP",
+ "6FzpgcQc0VqPTGVzKX6DuC0aTfiRt9m+xgHDSNzfIFTPwgpnHZbSeKDakpXt7Lu2e7xunNr4O+vCftFN",
+ "WYXbXKbxU73fRt5G6VXxDKIOySklLHRHdqNVE6wFj1cQn4UZ7X2oAuX2PNmHyZ1HD/FTGT4vOrbjt6fS",
+ "wTx4klXSmxmNpfs3upCBKdjeTlCFFsR39hugmme3dnYSBBU2bZlNblSBbHNTDBMl3lKvsdOO1mhaBQYp",
+ "KlRdpjYQrFQiMkzNbyi3ZRJNP8uvXG8F1gtqet0IianJVDz+o4CcraLm2MvLd0U+9PUXbMFsBcBaQVBi",
+ "zg1kq6taKnJl+prH5A41Z3NyMg3qXLrdKNg1U2xWArZ4ZFvMqMLrsvFINl3M8oDrpcLmj0c0X9a8kFDo",
+ "pbKIVYI0uicKeU0U0wz0DQAnJ9ju0XNyH+O3FLuGBwaLTgiavHj0HL3v9o+T2C3rKjhuY9kF8ux/Op4d",
+ "p2MMYLNjGCbpRj2KZnGyJZzTt8OW02S7jjlL2NJdKLvP0opyuoB4yPBqB0y2L+4melR7eOHWGwBKS7Eh",
+ "TMfnB00Nf0o8QzTsz4JBcrFaMb1yUT5KrAw9tfXj7KR+OFvM1JX+8HD5jxgsV/lYoZ6t6xOrMXSVeEaA",
+ "IY3f0xV00Tol1OajK1kbxuoLEpEzn+4Sa6E0JVAsbsxcZukoS2JU65xUknGN9o9az7O/GbVY0tywv6MU",
+ "uNnsi6eRmiLdtPt8P8A/Od4lKJDXcdTLBNl7mcX1Jfe54NnKcJTiQfvsNziVyai+ePxWKohs+9BjJV8z",
+ "SpYkt7pDbjTg1HciPL5lwDuSYrOevehx75V9csqsZZw8aG126Me3r52UsRIylsO6Pe5O4pCgJYNrfMQR",
+ "3yQz5h33QpajduEu0H/eEBQvcgZimT/LUUUg8Ghue79ppPifvmuT8aJj1T6O6dkAhYxYO53d7hMHfO1n",
+ "dev7b23MDn5LYG402myl9wFWEqG6Nha36fOJn/NGzb12zzsGx0e/EGl0cJTjHz5EoB8+nDox+JfH3c+W",
+ "vT98GM+JGTW5mV9bLNxFI8a+sT38UkQMYL4AVRNQ5J7sRgyQqUvKfDBMcOaGmpJusZ9PL0Uc5jFIPOAv",
+ "fgouL9/hF48H/KOPiM/MLHED25Dm9GHvFjuLkkzRfA9CjSn5UqzHEk7vDvLE8wdAUQIlI81zuJJBMbeo",
+ "u35nvEhAo2bUGZTCKJlhnYrQnv/nwbNZ/HQLtmtWFj+16YZ6F4mkPF9GAzVnpuPPbdH1ZomWVUZT3y8p",
+ "51BGh7O67c9eB45o6f8SY+dZMT6ybb+YoF1ub3Et4F0wPVB+QoNepkszQYjVbiaX5qVwuRAFwXnaPOst",
+ "cxxW5QxKhf1ag9Kxo4Ef7GsldHYZ5msrVRHgBVq/jsg3mFPBwNJJootWJ5+esJuqq65KQYsppk28+Or0",
+ "NbGz2j62dLCtlLVAo0t3FVEr+fjUZU0V4Pib/PHjbH8kbFatdNYUtoplPTIt2tJbrBc6geaYEDtH5JW1",
+ "hClvZ7GTEEy+KVdQBHW0rC6GNGH+ozXNl2hi6lxkaZIfX+LNU2VrgA/qRTd1FfDcGbhdlTdb5G1KhF6C",
+ "vGEK8BUmXEM30VKTdcyZOH3ipe7yZM25pZSjPWSKporCvmj3wFmBxPuGo5D1EL+ngcFWSNy34t059oqm",
+ "ee6Xz+s5b33anqYO8HfORpxTLjjLMclyTCDCpDDjvE0j8lHH3URq4k5o5HBFi/Y1778cFpNl/DwjdIgb",
+ "em6Dr2ZTLXXYPzWsXTGXBWjlOBsUU1970vk1GFfg6mQYIgr5pJCR2JRoPHvjB9+TjDDfQ8JQ9bX59r0z",
+ "Y+JD6CvG0WDh0ObEbOt5KBVDByMnTJOFAOXW0016pd6ZPkeY/6mA9fuj12LB8nO2wDFsNJRZtg39Gw51",
+ "6gMBXeCdafvStHVZeZufO1E9dtLTqnKTpiuTxssxr3kSwbHwEx8PECC3GT8cbQu5bY3gxfvUEBpcY/AR",
+ "VHgPDwijqdLZK4ltVARLUdiC2LdJ0dR8jEfAeM2494TFL4g8eiXgxuB5TfRTuaTaioCjeNoF0DIRx45v",
+ "/awr9a5D9XMSG5TgGv0c6W1sC4wmGEfToBXcKN8QfygMdQfCxEtaNhGwkXKhKFU5IarANyK9AqIxxmEY",
+ "ty9R3L0AdlQln7bdMc/3vjdRKvvRrC4WoDNaFLGyJV/iV4Jf/VsfWENeN+UtqorkmOyzm/10SG1uolxw",
+ "Va+2zOUb3HG6oCJvhBrCqsB+hzG7wmyD/+5TL76Jfd37fZsPdC32S/k7fK8Xk3oNTWeKLbLxmMA75e7o",
+ "aKe+HaG3/Q9K6aVYdAH5HEbSBJcL9yjG374yF0eYEnAQZmyvliZjH4b0Cvzuk1w0uaa6XAmvskEFE3Re",
+ "N3Xat5sh0hXXp3j5Jd6UhiZve79aM3DqZWmefAhNtUvJoinZyoKSaS5syGfPiD70BKXCPG2U5+GMz26t",
+ "WxGadsF823G42FCfllkkHS2384W0G7yvM+Tb69RjY58BHL/3KzJfgcvTVkm4ZqL2QTQ+lNWrhPbXTn3j",
+ "5rl3dP3RAPHPbXxOmsovXGU8u0ynk3/7k3WmEeBabv4AhvPBpg9qPQ+lXWueapuQpqjSqCJLnVtxTHb8",
+ "WCJ2Jxt2qk3vqJU9IKtXY8SBYe3r6eSs2OvCjCXzn9hRYscuXsk6neu4zW+MR6wSirW1zWIlrkfGjF9g",
+ "leogV/NwLB9LeA25xoJ2bYyUBNgnc7OZzNvu/zvncVqdbkLrXarjbfmNh1XsdtzxgxQkQRodWwHsaHw2",
+ "39MmEtY+5LmhCnPfS7Rxd5++jn6AN59Drtn1jpQv/1wCD9KJTL1dBmGZBxlgWPMcBTOG7m91bAHalpFl",
+ "KzxB5v47g5N6jnwFm3uKdKghWpKseYt1m2SRiAHkDpkhEaFikWbWkOyCf5hqKAOx4CM7bXdo024nqxkH",
+ "CYxuOZcnSXNxtEmNtkwZL6c6ai7Tda9UX/iyIpUVZliNMa1/vMLil8rFOdEm2WSopZOzYUr+G5esEhP0",
+ "NL4Tn7YSlP/NZ+Oys5TsCsJ6y+ipuqGy8C2iphdv1cm23EeDVC6+kmAf6HkzM2vj8Ie+6kiSZ3zSkpfC",
+ "iBFZ6l1QN/S9iRu7p2yAX5uHBeGag3R16VH+LYWCTAsft78Njm2osFGMt0KCShZWsMAl052+bfO5YoEZ",
+ "iulNqQteDBdIJKyogU4GWVfTc25D9kv73b+l9gVGdlqYGnrdXenOv8BgaoDEkOrnxN2Wu99o38bYxDgH",
+ "mXnPUz8FKwfZ9YZUUhR1bi/o8GA0BrnRKVC2sJKonSYfrrKnIwRvna9gc2yVIF8i0O9gCLSVnCzoQeq+",
+ "3iYf1PymYnAvDgLe57RcTSeVEGWWcHacDfPG9in+iuVXUBBzU/hI5UT1V3IfbeyNN/tmufF5UqsKOBQP",
+ "jgg55fZtiHdsdwsX9Sbn9/S2+dc4a1HbVM7OqHZ0yeNB9phkWd6Rm/lhtvMwBYbV3XEqO8iOrKTrRM5a",
+ "SW8itZCPxmrlQ1dzvz5tS1QWiphMcm49Vi/xoMcMR/iSPUi5gI5MSpyni6hSxEIyb/Pa3gwVx1Q4GQKk",
+ "gY959N1A4QaPIiBacTVyCm0GM5e7TMyJhNaJfNskbsPisDGNvj9zM0uX382FhE6ZV9NbyMKLPEy19Zip",
+ "nDEtqdzcJtXaoDjtwHqSxPLOcKwmEqtdSBuNNcRhWYqbDJlV1uQ2j6m2pp3qXsa+nEvbz5zqGQRxXVQ5",
+ "QW1DlrQguZAS8rBH/NmehWolJGSlwDCvmAd6ro3cvcK3OpyUYkFElYsCbI2AOAWl5qo5pyg2QRBVE0WB",
+ "pR189Gn7BHQ8cspDVUa2yXnsojPry0wEnoJyyXgchmzjIbxbqgrvlZ3/bI4WIYaxLt2311b6DGsrw56l",
+ "lVlZeoNBqroy+VHVGI6ED2/MFE/JSijtNDs7kmqGakO87ueCaynKsmsEsiLxwlm2v6Pr0zzXr4W4mtH8",
+ "6gHqkVzoZqXF1D9L7QfjtTPJXkamkWWgL5YROy/O4k/d3rWeHefYu0RrAOb73Rxrt437NFbKuruufm12",
+ "nsidqcWK5XEa/nNFtyVj0mIsIZrqyVZJso/zsRky6vByaIIZkCUN0QzcEGxsvxxPc05dZB7mvyjx9scl",
+ "c3CXROJiGvJJJ7VkeVK26gGAkNoXo7qWtrRSKPk0XEUs7AtzdEn3AR3JxTHy526wmREODpSGOwE1iDZs",
+ "ALxvlf2pTcllIxdnYu2/P2hzdt0K+I/bqTxWjj5yihvSctXyfX6PBEeIZwbeGn+EhcP9Dbo7Cqkpgzfy",
+ "Rg0ASMcldWAYFZ20LxhzykooMqoTlzvahKaBZutetPSLmzLlOHlO7YW9BGLGriW4fBNWpO4VQ6+oISXR",
+ "NB9abnkBa1CYDMJWdKbK+hm8vwNKW1aqp3yLKivhGjrhWi4JRo2iHbsG31c1nUkBUKH3r2+TisUhhXd5",
+ "z1Dh1p4FkSxjsBu1XFjE2p0iO8wSUSPKmmf2mKixR8lAdM2Kmnbwp/YVObpmN3OUI6gayOSZ19vGTvOj",
+ "HeGtH+DU94+JMh4T78fxob1ZUBx12xjQzrjEWqVOPY+HJYYZXhqHBs5WNI5PS+It31AVveFpA+CQ5Fv1",
+ "ZuQ+McEDxH61hhylmm7c3d1xQnAwonrZm5IiuGx2+PaG5M9Cw1tJODleTNVQgAx2q6XG04UT2LEBlrPk",
+ "Ruw1UjOWkHL83/G/KVbgtwMZvdpWtAo1uFfgPXaYULpxVjiBljUXmo8vnLp8gn2lnAWR1Su6IULiP0Zf",
+ "+7WmJZtv8IRa8H03opbUkJBzEVrftYtXNBNvF0ymHjBvFxB+KrtuNnbMYLiNGSUA2lyBzjiFmYGuINwG",
+ "dMtbzpNrw3JUPVsxpfCy623nEAtu8T4nxIoWoY6Mmem6pUR9rlLT+3+2r7bCqXxCqaqkua9fBkTRVc8g",
+ "bmsUeuLSS1htf9Y3VI89CTR1D1uilf45b3EL496ekRuxWPlUvYcO2IN6cINSF3daxj4FituX0VseRI5a",
+ "yqF3YWx8yABodDL7rF47wLfZGH0GsE+B/2jSyNQyxoD/R8F7ooxeCK+tmPcJsNx58h+B1dpVZ2KdSZir",
+ "XaEQ1rBqFGHZJgvwxknGcwlU2diQsx+cytbmRGTcqJA2erHxvjWjFDBnvGWWjFe1jmgAmBqRbwKEheZp",
+ "RGvC2ZOSEowYdk3LH65BSlakNs6cDlvGK8xJ703yrm9E+W/u1OEATLXaD74khPalWtDMXOC26o0NLFSa",
+ "8oLKImzOOMlBmnuf3NCNur3vw0ArayNf7PB+0ECa6b5vD/wgSNoWkHLj3Jd39Ew0ANIDuihGuBYwgjXi",
+ "VrBGES0SnoQhDPG0CnSdlWKB78sSBOiST6LvxyorgqPB1spD+82j2G+wfRrMu+0OvhY465gptp+zHxB1",
+ "qPD8yJneetKsNa3/4M9GZNqD4OmfL9qwcLs5Q/qPvdG8wEcMnXea/aLzfq9teIidDxKejK4FN7GL6CB3",
+ "D3xDc+34ekZdH3zsJajVYTPUbdWWwG9QbZAzzV3gztDoM1CKLVKm7h3tnjYha0n290ACPFup1p2t7rRN",
+ "MIUZZ58iUNtfzmaVqLJ8TDSgTc1fOIO2g7QLY4I+AnN1Yt1N4IRqilV0Ept0qlbsWwcrWTVjl1+myrcp",
+ "2SmDRoKDdo3lYo68DI+wNePgG4/GeDHtvz7qGmwaJkEokZDXEg2aN3Szu65QIiXs+T9Onz16/PPjZ18Q",
+ "04AUbAGqTSvcq8vTRowx3rezfNoYscHydHwT/Lt0izjvKfPPbZpNcWfNclvV5gwcVCXaxxIauQAixzFS",
+ "D+ZWe4XjtEHff6ztii3y4DsWQ8Hvs2cusjW+gFPu9BcxJ9t5Rrfmn47zCyP8Ry4pv7W3WGDKHpt+F30b",
+ "emwNsn8YKow89D4Y7TXL/T0oLipl3q587ijQho9+I+SBACRe83XeYYXVtdt8ldLadtEK7B1m/Uvsu9aR",
+ "tjPsHCHxHXaAFz7Pa9s1kdIOnM+c+PG7BinBUt6nKKGz/F0v/twCW89jsEVO1dUalGVLYihcBM851cvm",
+ "lWRCth08psRS2ka/KcvII0yrfeOZCgnHCJbympafnmtgjfVTxAcUb9NPL8KXeCGSLSrV7fKAvaaj5g5e",
+ "3R1uav4GH37+E8weRe85N5RzOg5uM7SdYGHjhb8V7FtScoNj2qCSR1+QmcvJXknImeo7M63HKYgKvAbJ",
+ "5i6AD9Z6x0u3Xev8Seg7kPHcRx6Q7wOnhEDjTwthe0Q/M1NJnNwolceob0AWEfzFeFRYw3HHdXHH/N23",
+ "SysRJIjaM63EsDrl2OXZ1Anm0qkVDNc5+rbu4DZyUbdrG5sTZXQa8MvLd3o2JpVJPGW36Y65VA6Su3uv",
+ "zN2/QxYViyM3hps3RjE/pfJq2tyRiRSuvf2oWbkzzKCTkPfjdLIADoopTDn7sysx8GnvUg+Bfdk9PKoW",
+ "1ruko7CIiay1M3kwVZBqd0SWXdctklMXX03ltWR6g+UlvRmG/RzN9/JNkzvA5Z5oPCDu7tPiCpoSv22m",
+ "gVr52/UbQUu8j6xjhptbSJRH5Ks1XVWlMyqSv9+b/RWe/O1pcfLk0V9nfzt5dpLD02fPT07o86f00fMn",
+ "j+Dx3549PYFH8y+ezx4Xj58+nj19/PSLZ8/zJ08fzZ5+8fyv9wwfMiBbQH0G6BeT/5OdlguRnb45yy4M",
+ "sC1OaMW+BbM3qCvPBZY/M0jN8STCirJy8sL/9L/8CTvKxaod3v86cWU8JkutK/Xi+Pjm5uYo7HK8wKfF",
+ "mRZ1vjz282BRqo688uasiUm20RO4o60NEjfVkcIpfnv71fkFOX1zdtQSzOTF5OTo5OiRq4DKacUmLyZP",
+ "8Cc8PUvc92NHbJMXHz5OJ8dLoCVm4jB/rEBLlvtPEmixcf9XN3SxAHmEYef2p+vHx16sOP7gnlh/3Pbt",
+ "OHTMH3/ovEQvdvREp/LxB18HcXvrTg08F88TdBgJxbZmxzOsfTC2KaigcXopqGyo4w8oLid/P3Y2j/hH",
+ "VFvseTj26RriLTtY+qDXBtYdPdasCFaSU50v6+r4A/4HqfejZSclxFI32JzclLTNp4RpQmdCYuU8nS8N",
+ "B/Elu5gKWoaFdM8KcwxMr5cWAl8BFb20kxfvhgHoOBDxIyHPMAeiPdKdmVqujQ7OoM5/cyd12rc307uT",
+ "7Pn7D4+mj04+/sXcPO7PZ08+jnyr8bIZl5w318rIhu+x3hVGpeFJf3xy4tmbUx4C0jx2JzlY3ECJahdp",
+ "N6kJehve+o4W0gHGbqt6A5EGGTvq8vSGHwovyNGf7rnirZamTqJBHL5fCKEg/l0kzv3o0819xm2onbk5",
+ "7A33cTp59ilXf8YNydOSYMug0OJw63/kV1zccN/SiCP1akXlxh9j1WEKxG02Xnp0odDxJdk1RSmQCx5k",
+ "T+KLyXt8hx97m5rgN0rTW/Cbc9Prv/lNp2G80LY1f7iinIG71l4mTQ0S8CnlfIgmLa4pz300eBtkivtl",
+ "BV5HGE0cU61gXpf+3XFVsrmtdSpE6SdSdVUZjjOnqqEsF9lqJFj7jLMZmtQ8F9x6xDGI2OdSxOeY+HBT",
+ "XbGq04XNDVW5KpwcwL3Uw03/tQa5aXd9xYwo2m7vIGbj92ThFo8HYOHdgQ7Mwh/vyUb//Cv+r31pPT35",
+ "26eDwGcruGArELX+s16a5/YGu9Ol6WR4m3D7WK/5MUbJHX/oaCTu80Aj6f7edg9bXK9EAV6FEPO5reK+",
+ "7fPxB/tvMBGsK5BsBdyWU3W/2pvjGIt5boY/b3ge/XG4jk4ixsTPx97EEdNyuy0/dP7sKndqWetC3Nj6",
+ "U1F5Ba9PWrp6y2jJb6wC5h50A7Q5IskPVXNRucQHhGK9HVHr1mxjI4Ldi8LGsYY3mlo6D8aCcZwAPSQ4",
+ "iy0sToMLXIG5G9EY0ZONHGTfiwKGslHsInQwdi7D5ihEynjf+WIcMt6P+x0U9ORYN+SQjMzHWvX/Pr6h",
+ "TBsJyiVrRIwOO2ug5bGrzNL7tU2GPviCGd6DH8NnkdFfj2n3XHSNJGbLUh0HFpTYV2dBSDTyMcn+c2tN",
+ "Da2TSC6NXfLde7PrWIPZUVJrbHtxfIyPVJZC6WOURLuGuPDj+2ajfenAZsPNt3UmJFswTsvMGbna8lKT",
+ "x0cnk4//PwAA///7V+betvkAAA==",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go
index 223956bf3..6d7d2c417 100644
--- a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go
+++ b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go
@@ -964,46 +964,47 @@ var swaggerSpec = []string{
"5NK3mau/sYUBjgnNMnsa602AFcgNUeVUGVmHNwPD76jmedmDYcC6AMnMFU3z2gFv1YQjW95jWxzRqX3j",
"ipdWixfZoiKyGbXob1ZXckTMyEuWSnGcz4XycahqozQsO61C3ae/9xSJ9oaEbsyq4DnjkCwFjzWw/Bmf",
"vsSHsa+xRErfx2fmYd+3rfu2CX8LrOY8Q+7kq+L3Mzn9Vwp0aa1WQiGk0W6ntqm2pf89j5I/NBuedk/S",
- "hqeBU8s9DAYK2102fj760PjTFfdxb6pFqTNxEXyLmr0NUhxS1yNorH8JS1qrQb26XlvadfqQAjzETkz1",
- "NNKqsH7Y363wL5rP5lwuIZFgqHkqViBVSz27TWr7UyW1Dd73vXisbc27i6OV6rASySuRgR232Rk7Vk+e",
- "iwxcB+GuIFIFO8YTgfytVL/XSs1IaTlfaFIWRItYEkj9YUJTy2QTq97EJwwqOFolCKdb0BUQmmNfZjIF",
- "4ERMzaLr+xEXSRXW0PSZJC6kMyoKBXAVUqSgFGSJr5+/C7SqLzMGoOsteELAEeBqFqIEmVF5ZWDPVzvh",
- "PIdNgiquInd/+tUozDcOrxUFtyPWVu6LoLeqDuSkvS7Uw6bfRnDtyUOyoxKIFw0w8U0sixxc6lsEhXvh",
- "pHf/2hB1dvHqaMHcMHbNFO8nuRoBVaBeM71fFdqySMz93QXxmX16xpYoiXHKhbcrxgbLqdLJLrZsXgrX",
- "oswKAk4Y48Q4cI/C+YIq/cZlQWdYMcteJziPlbHNFP0AV534YyP/ah/Gxk7NfchVqYgbwWc2QRZbA4f1",
- "lrlewbqaC9PQ/dhV6pS18O0auQ9LwfgOWUETAUJ14M03w0UWh/ZH6gwUXVQ2gKgRsQ2QU/9WgN3Qjd8D",
- "CFM1oi3hYFHkkHKmQuRAuc1AFUVhuIVOSl5914emU/v2sf6lfrdLXFTX93YmQIVpbQ7yC4tZhQbaBVXE",
- "wUGW9Nxlvs1dU7guzOYwJlixItlG+WiyNW+FR2DnIS2LuaQZJBnkNGJK+cU+JvbxtgFwxz15JiuhIZnC",
- "TEiIb3pNybLXRFQNLXA8FRMeCT4hqTmCRnmuCcR9vWPkDHDsGHNydHSnGgrnim6RHw+Xbbe6xyxlxjA7",
- "7ugBQXYcfQjAPXiohr48KvDjpDYftKf4Jyg3QSVH7D/JBlTfEurx91pA25wXXmCNm6LF3lscOMo2e9nY",
- "Dj7Sd2RjBsQv0tjfjl26xtS5pgE1UAAnl1Fujy4o08lMSCtIJ3SmQe4MiP8HZd4d7pNyhaulQnAEd2+6",
- "cZDJh615HBexIBB3XRgSmZCzBUgwdxglD8mS8VLbJ6LUY1tJVAJNF0ZoDy2rdiRsrujaDUqYU5nl2Hhv",
- "Vt2bQuJlxHTrgkegI1mGTY3frPsHIQfVJ25W4aJMk5Jrlgc9Giq9/fOzXt5aJG4tErcWiVuLxK1F4tYi",
- "cWuRuLVI3Fokbi0StxaJW4vEX9ci8amKHyVe4vB1GLngSTtE8jZC8k9VoLe6qryBBK0TF5Rp13HY1x7o",
- "t1vsYQjSQHPEAcuhP2bbhpKefX/8gihRyhRIaiBknBQ5NaoBrHXV/7LZWdn3fLdNdG3TZqrg8SNy+vdj",
- "X0d04epdNt+9e2wbvBGlNznccx1mgGdWEvWtZoAbpLtOM9RfCb5PpusaynKMd1fke3z7OawgFwVIW6KQ",
- "aFlGGs2fAc2fOdzsMPj8w0zuAmjfm9HejxtGL4e2JS28mO/XShWhNo+SPA8yK9/PaK7gfV9ypR1vSYtY",
- "q8rq4rOmIGQm34ls0zohZteOcAObZ6OuJso4lZtI7aduYkObNLQw7MoRVteW9fHgNW+7RNsls10UFpPW",
- "JajoOd5G5dFir9WGdYay6bezFp2MYpmj7QqnowrAQeX+MPnB7gl5Y7/7tMX9ECJ3xGpm/tlEMTbfrJgG",
- "vmuUCMd6vtQMAY/46OnFsz82hJ2VKRCmFfFlc3dfL+PROjEjzYEnjgElU5Ftkgb7GjVuoYwpqhQsp7tv",
- "opB/uubs7vIxT7bfU5/mGnkeLG4bTw6JZp04BtzDnTcaBvPmCls4omPPAcavm0X3sdEQBOL4U8yo1OJ9",
- "+zK9eprNLeO7ZXzBaWxJBIy7MuNtJjK5RsYnN7Lk/Tzv+zWkpQEuPMl30TqPLjlY64aTNYNpOZ9jk/mO",
- "j84sDXA8JvgnYoV2uUO54H4UZAevGg9fNfW8PVyXuwTZ4Hd9vcV7uB2Ub9CZsSwo33iXLySKLcvc4tD2",
- "5zwso7WVwGOFo2vbX59V+7U3+QW2W3fVNn+3aCEXVBG7v5CRkmcuj6lTsXrNh1cvsUOfrXnNprdWKrHr",
- "jazOzTvkivC73EwgV6QAmeg1tweqcZhcXwJ7cie3zbX/GteGTT+HHgbbrbFfM4QD3R4y4Gt4fQSdlOrE",
- "vEZ/JdpMEmw8Q4tGf4pL2HLJvnnQwJLO8M34ktrc4vynkBeEkjRn6F0VXGlZpvotp+i/CRY26caeeEN1",
- "P+975l+JuxAjHj431FtOMcio8upEeeAMIi6MHwA8i1XlfA7K8NGQgGYAb7l7i3FScqOFiRlZslSKxCbM",
- "mvNlZJeJfXNJN2SGdUoE+QOkIFNz6we7bm3JSrM8d8EuZhoiZm851SQHqjR5yQwHNsP5IglVyBnoCyHP",
- "KyzEO/DMgYNiKokbZn60T7HJjVu+NwCiMdM+rptT3Gx3Gw87y3ohP3mOMWpYYzlnStfxER3Yb8w3vmQ8",
- "iRLZ2QKICxdr0xa5i5XdHAHdazqO9ALecnP7aUGQ41N9OXJoe4A6Z9GejhbVNDai5Sjyax2k/h2Ey5AI",
- "k7l1u/yJUkgDOvCeTdx4WzW/tfd7ulgaVy7wzDztuZDtU9cUseclp0A0jGStsjXujbMGyFv9F19+scjD",
- "65IejQfTJrsDdtlVs+0d4s1v+JjQXPC5rZZotEuB+8R4UWoMAL9OAx6saJ6IFUjJMlADV8oE/35F85+r",
- "zz6OR7CGNNGSppBYi8JQrJ2ZbyydYvtAzjSjeYJa9VCA4MR+dWo/2nEfBz1El0vIGNWQb0ghIYXMlhdj",
- "itT6/MQWaCDpgvI5Xt1SlPOFfc2OcwESqnaLRoVuDxEv77LmiS0114XxmFhbaFiNF2i6iLSDwQvO6Oye",
- "oLJGp6mBe9AoJNqnpI9HvYK2QeqqDp2zyGmymQFSREMeCPBTT3yIyqu3RH9L9F860ccKJSLqZi1rhcVX",
- "uC3XbNa67rKgN2gl+yQ1g28L7//ZC+97DqQIJZI2dJB4xzeqCNPkAssiTYGY+6tE67xro+f0dcy0C466",
- "q5+pXNO9dEEZdzV1qrwGhEO7HvDaN529FsOmZWZo0TTogLSUTG9Qa6EF+/0czP/fGbFfgVx5haaU+ejp",
- "aKF18fToKBcpzRdC6aPRx3H4TLUevqvg/+B1kUKyldGvPr77+H8DAAD//yXqGqTfpgEA",
+ "hqeBU8s9DAYK2102fj7y6QiN5pfRNz80/nRlgNybalHqTFwEs6ANwIYzDqkAErTgv4TNrdXKXl2v1e06",
+ "vU0BHmJnq3oaaWpYP+zva/gXzXxzzpmQSDAoPRUrkKqlyN2mv/2p0t8G7/te3Ng28d3F0Up1WNnllcjA",
+ "jtvsoR2rPM9FBq7XcFdkqcIi4ylD/v6q32slcaS0nC80KQuiRSxdpP4woallsolVhOITBrUerbqE0y3o",
+ "CgjNsYMzmQJwIqZm0fVNioukCqtt+pwTF/wZFZoCuAopUlAKssRX2t8FWtXBGUPV9RY8IeAIcDULUYLM",
+ "qLwysOernXCewyZBZViRuz/9alTrG4fXCo3bEWtr/EXQW9URcnJhF+ph028juPbkIdlRCcSLBpgiJ5ZF",
+ "Di5JLoLCvXDSu39tiDq7eHW0YBYZu2aK95NcjYAqUK+Z3q8KbVkk5v7ugvjMPj1jS5TEOOXCWyBjg+VU",
+ "6WQXWzYvhWtRZgUBJ4xxYhy4RzV9QZV+4/KlM6ytZa8TnMfK2GaKfoCrnv2xkX+1D2Njp+Y+5KpUxI3g",
+ "c6Agi62Bw3rLXK9gXc2FCet+7CrJytoCd43ch6VgfIesoN0AoTrw+5vhIotDSyV1powuKhtA1IjYBsip",
+ "fyvAbujw7wGEqRrRlnCwfHJIOVMhcqDc5qqKojDcQiclr77rQ9OpfftY/1K/2yUuqut7OxOgwgQ4B/mF",
+ "xaxCU+6CKuLgIEt67nLk5q59XBdmcxgTrG2RbKN8NO6at8IjsPOQlsVc0gySDHIaMbr8Yh8T+3jbALjj",
+ "njyTldCQTGEmJMQ3vaZk2WtMqoYWOJ6KCY8En5DUHEGjPNcE4r7eMXIGOHaMOTk6ulMNhXNFt8iPh8u2",
+ "W91jwDJjmB139IAgO44+BOAePFRDXx4V+HFSmw/aU/wTlJugkiP2n2QDqm8J9fh7LaBt+AsvsMZN0WLv",
+ "LQ4cZZu9bGwHH+k7sjFT4xfpFmhHOV1jkl3T1BoogJPLKLdHF5TpZCakFaQTOtMgd4bO/4My7zj36bvC",
+ "VV0hOIK7N904yOTDJj6Oi1gQiLsuDIlMyNkCJJg7jJKHZMl4qe0TUeqxrTkqgaYLI7SHNlg7ErZhdI0J",
+ "JcypzHJs0Ter7k0h8TJiunXBI9CRfMSmxm/W/YOQgyoZN+t1UaZJyTXLg24Old7++Vkvby0StxaJW4vE",
+ "rUXi1iJxa5G4tUjcWiRuLRK3Folbi8StReKva5H4VGWSEi9x+IqNXPCkHUx5G0v5pyrlW11V3kCC1okL",
+ "yrTrTeyrFPTbLfYwBGmgOeKA5dAf3W2DTs++P35BlChlCiQ1EDJOipwa1QDWuuqU2ezB7LvD23a7tr0z",
+ "VfD4ETn9+7GvOLpwlTGb7949tvFqROlNDvdcLxrgmZVEfVMa4AbpricN9VeC76jp+ouyHCPjFfke334O",
+ "K8hFAdIWMyRalpGW9GdA82cONzsMPv8wk7tQ2/dmtPfjhtHLoW1JCy/m+7VSRajNuCTPgxzM9zOaK3jf",
+ "l4Zpx1vSItbUsrr4rCkImcl3Itu0TojZtSPcwObZqOuOMk7lJlIlqpsC0SYNLQy7coTVtWV9PHh13C7R",
+ "dslsF4XFpHUJKnqOt1F5tCxstWGdoWyi7qxFJ6NYjmm7FuqoAnBQYUBMk7B7Qt7Y7z5tGUCEyB2xmpl/",
+ "NlGMzTcrpoHvGiXCsZ4vNZfAIz56evHsjw1hZ2UKhGlFfIHd3dfLeLROzEhz4IljQMlUZJukwb5GjVso",
+ "Y4oqBcvp7pso5J+ujbu7fMyT7ffUp7lGngeL28aTQ6JZJ44B93DnjYbBvLnCFo7o2HOA8etm0X1sNASB",
+ "OP4UMyq1eN++TK+eZnPL+G4ZX3AaWxIB464geZuJTK6R8cmNLHk/z/t+DWlpgAtP8l20zqNLDta64WTN",
+ "YFrO59iOvuOjM0sDHI8J/olYoV3uUC64HwXZwasWxVdNUm8P1+UuQd74XV+Z8R5uB+UbdGYsC8o33uUL",
+ "iWLLMrc4tJ08D8tobc3wWInp2vbXZ9V+7U1+ge3WXbXN3y1ayAVVxO4vZKTkmct46tS2XvPhdU7s0Gdr",
+ "XrPprTVN7Hojq3PzDrki/C43U80VKUAmes3tgWocJtfBwJ7cyW0b7r/GtWET1aGHwXar8dcM4UC3hwz4",
+ "Gl4fQc+lOjGv0YmJNtMJG8/QotGf4hI2Z7JvHjSwpDN8M76kNrc4/ynkBaEkzRl6VwVXWpapfssp+m+C",
+ "hU26sSfeUN3P+575V+IuxIiHzw31llMMMqq8OlEeOIOIC+MHAM9iVTmfgzJ8NCSgGcBb7t5inJTcaGFi",
+ "RpYslSKxqbXmfBnZZWLfXNINmWFFE0H+ACnI1Nz6wa5bW7LSLM9dsIuZhojZW041yYEqTV4yw4HNcL6c",
+ "QhVyBvpCyPMKC/FePXPgoJhK4oaZH+1TbIfjlu8NgGjMtI/rNhY32wfHw86yXshPnmOMGlZjzpnSdXxE",
+ "B/Yb840vGU+iRHa2AOLCxdq0Re5iDThHQPeajiO9gLfc3H5aEOT4VF+OHNoeoM5ZtKejRTWNjWg5ivxa",
+ "B6l/B+EyJMJkbt0uf6IU0oAOvGcTN97W12/t/Z4ulsaVCzwzT3suZPvUtU/seckpEA0jWavAjXvjrAHy",
+ "Vv/Fl19W8vC6pEfjwbTJ7oBddtVskId48xs+JjQXfG7rKhrtUuA+MV6UGgPAr9OAByuaJ2IFUrIM1MCV",
+ "MsG/X9H85+qzj+MRrCFNtKQpJNaiMBRrZ+YbS6fYaJAzzWieoFY9FCA4sV+d2o923MdBt9HlEjJGNeQb",
+ "UkhIIbOFyJgitT4/sQUaSLqgfI5XtxTlfGFfs+NcgISqMaNRodtDxAvBrHlii9J1YTwm1hYa1u0Fmi4i",
+ "jWPwgjM6uyeorNGTauAeNEqO9inp41GvoG2QuqpD5yxymmxmgBTRkAcC/NQTH6JG6y3R3xL9l070sZKK",
+ "iLpZy1ph8RVuyzWbta67gOgNWsk+SXXh2xL9f/YS/Z4DKUKJpA0dJN4bjirCNLnAskhTIOb+KtE67xru",
+ "OX0dM+2Co+4qbSrXni9dUMZdTZ0qrwHh0K5bvPbtaa/FsGmZGVo0DTogLSXTG9RaaMF+Pwfz/3dG7Fcg",
+ "V16hKWU+ejpaaF08PTrKRUrzhVD6aPRxHD5TrYfvKvg/eF2kkGxl9KuPCLaQbM64uXMv6HwOsjYhjh5N",
+ "How+/t8AAAD//0RubYkspwEA",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/participating/private/routes.go b/daemon/algod/api/server/v2/generated/participating/private/routes.go
index 49c6517c1..f352c24c7 100644
--- a/daemon/algod/api/server/v2/generated/participating/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/participating/private/routes.go
@@ -27,6 +27,9 @@ type ServerInterface interface {
// Add a participation key to the node
// (POST /v2/participation)
AddParticipationKey(ctx echo.Context) error
+ // Generate and install participation keys to the node.
+ // (POST /v2/participation/generate/{address})
+ GenerateParticipationKeys(ctx echo.Context, address string, params GenerateParticipationKeysParams) error
// Delete a given participation key by ID
// (DELETE /v2/participation/{participation-id})
DeleteParticipationKeyByID(ctx echo.Context, participationId string) error
@@ -65,6 +68,47 @@ func (w *ServerInterfaceWrapper) AddParticipationKey(ctx echo.Context) error {
return err
}
+// GenerateParticipationKeys converts echo context to params.
+func (w *ServerInterfaceWrapper) GenerateParticipationKeys(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "address" -------------
+ var address string
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "address", runtime.ParamLocationPath, ctx.Param("address"), &address)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter address: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params GenerateParticipationKeysParams
+ // ------------- Optional query parameter "dilution" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "dilution", ctx.QueryParams(), &params.Dilution)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter dilution: %s", err))
+ }
+
+ // ------------- Required query parameter "first" -------------
+
+ err = runtime.BindQueryParameter("form", true, true, "first", ctx.QueryParams(), &params.First)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter first: %s", err))
+ }
+
+ // ------------- Required query parameter "last" -------------
+
+ err = runtime.BindQueryParameter("form", true, true, "last", ctx.QueryParams(), &params.Last)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter last: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GenerateParticipationKeys(ctx, address, params)
+ return err
+}
+
// DeleteParticipationKeyByID converts echo context to params.
func (w *ServerInterfaceWrapper) DeleteParticipationKeyByID(ctx echo.Context) error {
var err error
@@ -149,6 +193,7 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
router.GET(baseURL+"/v2/participation", wrapper.GetParticipationKeys, m...)
router.POST(baseURL+"/v2/participation", wrapper.AddParticipationKey, m...)
+ router.POST(baseURL+"/v2/participation/generate/:address", wrapper.GenerateParticipationKeys, m...)
router.DELETE(baseURL+"/v2/participation/:participation-id", wrapper.DeleteParticipationKeyByID, m...)
router.GET(baseURL+"/v2/participation/:participation-id", wrapper.GetParticipationKeyByID, m...)
router.POST(baseURL+"/v2/participation/:participation-id", wrapper.AppendKeys, m...)
@@ -158,213 +203,216 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9/ZPbNrLgv4LSe1X+OHHGn3kbX229m9hJdi5O4vJMsveex5dAZEvCDgVwAVAjxef/",
- "/QoNgARJQKJmFHtTlZ/sEfHRaDQa/YXuD5NcrCrBgWs1efFhUlFJV6BB4l80z0XNdcYK81cBKpes0kzw",
- "yQv/jSgtGV9MphNmfq2oXk6mE05X0LYx/acTCf+smYRi8kLLGqYTlS9hRc3AeluZ1s1Im2whMjfEmR3i",
- "/NXk444PtCgkKDWE8kdebgnjeVkXQLSkXNHcfFLkhukl0UumiOtMGCeCAxFzopedxmTOoCzUiV/kP2uQ",
- "22CVbvL0kj62IGZSlDCE86VYzRgHDxU0QDUbQrQgBcyx0ZJqYmYwsPqGWhAFVOZLMhdyD6gWiBBe4PVq",
- "8uLdRAEvQOJu5cDW+N+5BPgNMk3lAvTk/TS2uLkGmWm2iizt3GFfgqpLrQi2xTUu2Bo4Mb1OyPe10mQG",
- "hHLy9puX5OnTp1+ahayo1lA4Ikuuqp09XJPtPnkxKagG/3lIa7RcCEl5kTXt337zEue/cAsc24oqBfHD",
- "cma+kPNXqQX4jhESYlzDAvehQ/2mR+RQtD/PYC4kjNwT2/iomxLO/1l3Jac6X1aCcR3ZF4Jfif0c5WFB",
- "9108rAGg074ymJJm0HePsi/ff3g8ffzo47+9O8v+2/35/OnHkct/2Yy7BwPRhnktJfB8my0kUDwtS8qH",
- "+Hjr6EEtRV0WZEnXuPl0haze9SWmr2Wda1rWhk5YLsVZuRCKUEdGBcxpXWriJyY1Lw2bMqM5aidMkUqK",
- "NSugmBrue7Nk+ZLkVNkhsB25YWVpaLBWUKRoLb66HYfpY4gSA9et8IEL+tdFRruuPZiADXKDLC+FgkyL",
- "PdeTv3EoL0h4obR3lTrssiKXSyA4uflgL1vEHTc0XZZbonFfC0IVocRfTVPC5mQranKDm1Oya+zvVmOw",
- "tiIGabg5nXvUHN4U+gbIiCBvJkQJlCPy/LkboozP2aKWoMjNEvTS3XkSVCW4AiJm/4Bcm23/3xc//kCE",
- "JN+DUnQBb2h+TYDnooDihJzPCRc6IA1HS4hD0zO1DgdX7JL/hxKGJlZqUdH8On6jl2zFIqv6nm7Yql4R",
- "Xq9mIM2W+itECyJB15KnALIj7iHFFd0MJ72UNc9x/9tpO7KcoTamqpJuEWEruvnro6kDRxFalqQCXjC+",
- "IHrDk3KcmXs/eJkUNS9GiDna7GlwsaoKcjZnUJBmlB2QuGn2wcP4YfC0wlcAjh8kCU4zyx5wOGwiNGNO",
- "t/lCKrqAgGROyE+OueFXLa6BN4ROZlv8VElYM1GrplMCRpx6twTOhYaskjBnERq7cOgwDMa2cRx45WSg",
- "XHBNGYfCMGcEWmiwzCoJUzDhbn1neIvPqIIvnqXu+PbryN2fi/6u79zxUbuNjTJ7JCNXp/nqDmxcsur0",
- "H6EfhnMrtsjsz4ONZItLc9vMWYk30T/M/nk01AqZQAcR/m5SbMGpriW8uOIPzV8kIxea8oLKwvyysj99",
- "X5eaXbCF+am0P70WC5ZfsEUCmQ2sUYULu63sP2a8ODvWm6he8VqI67oKF5R3FNfZlpy/Sm2yHfNQwjxr",
- "tN1Q8bjceGXk0B5602xkAsgk7ipqGl7DVoKBluZz/GczR3qic/mb+aeqStNbV/MYag0duysZzQfOrHBW",
- "VSXLqUHiW/fZfDVMAKwiQdsWp3ihvvgQgFhJUYHUzA5KqyorRU7LTGmqcaR/lzCfvJj822lrfzm13dVp",
- "MPlr0+sCOxmR1YpBGa2qA8Z4Y0QftYNZGAaNn5BNWLaHQhPjdhMNKTHDgktYU65PWpWlww+aA/zOzdTi",
- "20o7Ft89FSyJcGIbzkBZCdg2vKdIgHqCaCWIVhRIF6WYNT/cP6uqFoP4/ayqLD5QegSGghlsmNLqAS6f",
- "ticpnOf81Qn5NhwbRXHBy625HKyoYe6Gubu13C3W2JbcGtoR7ymC2ynkidkajwYj5h+D4lCtWIrSSD17",
- "acU0/ptrG5KZ+X1U5z8GiYW4TRMXKloOc1bHwV8C5eZ+j3KGhOPMPSfkrN/3dmRjRokTzK1oZed+2nF3",
- "4LFB4Y2klQXQfbF3KeOopNlGFtY7ctORjC4Kc3CGA1pDqG591vaehygkSAo9GL4qRX79N6qWRzjzMz/W",
- "8PjhNGQJtABJllQtTyYxKSM8Xu1oY46YaYgKPpkFU500SzzW8vYsraCaBktz8MbFEot67IdMD2REd/kR",
- "/0NLYj6bs21Yvx32hFwiA1P2ODsnQ2G0fasg2JlMA7RCCLKyCj4xWvdBUL5sJ4/v06g9+traFNwOuUU0",
- "O3S5YYU61jbhYKm9CgXU81dWo9OwUhGtrVkVlZJu42u3c41BwKWoSAlrKPsgWJaFo1mEiM3R+cJXYhOD",
- "6SuxGfAEsYGj7IQZB+Vqj9098L1ykAm5H/M49hikmwUaWV4he+ChCGRmaa3VZzMhb8eOe3yWk9YGT6gZ",
- "NbiNpj0kYdO6ytzZjNjxbIPeQK3bczcX7Q8fw1gHCxea/g5YUGbUY2ChO9CxsSBWFSvhCKS/jN6CM6rg",
- "6RNy8bez54+f/PLk+ReGJCspFpKuyGyrQZH7TlklSm9LeDBcGaqLdanjo3/xzFtuu+PGxlGiljmsaDUc",
- "ylqErUxomxHTboi1Lppx1Q2AozgimKvNop1YZ4cB7RVTRuRczY6yGSmEFe0sBXGQFLCXmA5dXjvNNlyi",
- "3Mr6GLo9SClk9OqqpNAiF2W2BqmYiLiX3rgWxLXw8n7V/91CS26oImZutIXXHCWsCGXpDR/P9+3Qlxve",
- "4mYn57frjazOzTtmX7rI96ZVRSqQmd5wUsCsXnRUw7kUK0JJgR3xjv4WtJVb2AouNF1VP87nx9GdBQ4U",
- "0WHZCpSZidgWRmpQkAtuQ0P2qKtu1DHo6SPG2yx1GgCHkYstz9Hweoxjm9bkV4yjF0hteR6o9QbGEopF",
- "hyzvrr6n0GGnuqci4Bh0vMbPaPl5BaWm3wh52Yp930pRV0cX8vpzjl0OdYtxtqXC9PVGBcYXZTccaWFg",
- "P4mt8bMs6KU/vm4NCD1S5Gu2WOpAz3ojhZgfH8bYLDFA8YPVUkvTZ6ir/iAKw0x0rY4ggrWDtRzO0G3I",
- "1+hM1JpQwkUBuPm1igtniQAW9Jyjw1+H8p5eWsVzBoa6clqb1dYVQXf24L5oO2Y0tyc0Q9SohDOv8cLa",
- "VnY6GxxRSqDFlswAOBEz5zFzvjxcJEVfvPbijRMNI/yiA1clRQ5KQZE5S91e0Hw7e3XoHXhCwBHgZhai",
- "BJlTeWdgr9d74byGbYaRI4rc/+5n9eAzwKuFpuUexGKbGHobu4dziw6hHjf9LoLrTx6SHZVA/L1CtEBp",
- "tgQNKRQehJPk/vUhGuzi3dGyBokOyt+V4v0kdyOgBtTfmd7vCm1dJeIhnXprJDyzYZxy4QWr2GAlVTrb",
- "x5ZNo44OblYQcMIYJ8aBE4LXa6q0daozXqAt0F4nOI8VwswUaYCTaogZ+WevgQzHzs09yFWtGnVE1VUl",
- "pIYitgYOmx1z/QCbZi4xD8ZudB4tSK1g38gpLAXjO2TZlVgEUd34nlzUyXBx6KEx9/w2isoOEC0idgFy",
- "4VsF2A1jwhKAMNUi2hIOUz3KaQLRphOlRVUZbqGzmjf9Umi6sK3P9E9t2yFxUd3e24UAhaForr2D/MZi",
- "1kYDLqkiDg6yotdG9kAziPX+D2E2hzFTjOeQ7aJ8VPFMq/AI7D2kdbWQtICsgJJuh4P+ZD8T+3nXALjj",
- "rborNGQ2rCu+6S0l+yiaHUMLHE/FhEeCX0hujqBRBVoCcb33jFwAjh1jTo6O7jVD4VzRLfLj4bLtVkdG",
- "xNtwLbTZcUcPCLLj6GMATuChGfr2qMDOWat79qf4L1BugkaOOHySLajUEtrxD1pAwobqIuaD89Jj7z0O",
- "HGWbSTa2h4+kjmzCoPuGSs1yVqGu8x1sj6769SeI+l1JAZqyEgoSfLBqYBX2JzYgqT/m7VTBUba3IfgD",
- "41tkOSVTKPJ0gb+GLercb2yka2DqOIYuGxnV3E+UEwTUx88ZETxsAhua63JrBDW9hC25AQlE1bMV09pG",
- "sHdVXS2qLBwg6tfYMaPzakZ9ijvdrBc4VLC84VZMJ1Yn2A3fZU8x6KDD6QKVEOUIC9kAGVEIRgXAkEqY",
- "XWcumN6HU3tK6gDpmDa6tJvr/57qoBlXQP5L1CSnHFWuWkMj0wiJggIKkGYGI4I1c7pQlxZDUMIKrCaJ",
- "Xx4+7C/84UO350yROdz4FyimYR8dDx+iHeeNULpzuI5gDzXH7TxyfaDDx1x8Tgvp85T9oRZu5DE7+aY3",
- "eOMlMmdKKUe4Zvl3ZgC9k7kZs/aQRsaFmeC4o3w5HZf9cN247xdsVZdUH8NrBWtaZmINUrIC9nJyNzET",
- "/Os1LX9suuHrGsgNjeaQ5fgmZORYcGn62GckZhzGmTnANoR0LEBwbntd2E57VMw2So+tVlAwqqHckkpC",
- "Dvb1hJEcVbPUE2LjKvMl5QtUGKSoFy6wz46DDL9W1jQjaz4YIipU6Q3P0MgduwBcMLd/QGPEKaBGpetb",
- "yK0Cc0Ob+dybqTE3c7AHfY9B1Ek2nSQ1XoPUdavxWuR0XwGNuAw68l6An3bika4URJ2RfYb4CrfFHCaz",
- "ub+Pyb4dOgblcOIg1LD9mIo2NOp2uT2C0GMHIhIqCQqvqNBMpexXMQ9f/Lk7TG2VhtXQkm+7/pI4fm+T",
- "+qLgJeOQrQSHbfSRO+PwPX6MHie8JhOdUWBJ9e3rIB34e2B15xlDjXfFL+52/4T2PVbqGyGP5RK1A44W",
- "70d4IPe6292Ut/WT0rKMuBbde6A+A1DTJv8Ak4QqJXKGMtt5oab2oDlvpHs81EX/mybK+Qhnrz9uz4cW",
- "PjVFGzGUFaEkLxlakAVXWta5vuIUbVTBUiPBT14ZT1stX/omcTNpxIrphrriFAPfGstVNGBjDhEzzTcA",
- "3nip6sUClO7pOnOAK+5aMU5qzjTOtTLHJbPnpQKJEUgntuWKbsnc0IQW5DeQgsxq3ZX+8bmb0qwsnUPP",
- "TEPE/IpTTUqgSpPvGb/c4HDe6e+PLAd9I+R1g4X47b4ADoqpLB6k9a39igHFbvlLF1yM6QnsZx+s2b6/",
- "nZhldp7c/9/7//ni3Vn23zT77VH25f84ff/h2ccHDwc/Pvn417/+v+5PTz/+9cF//ntspzzsscdYDvLz",
- "V04zPn+F6k/rAxrA/sns/yvGsyiRhdEcPdoi9/HhsSOgB13jmF7CFdcbbghpTUtWGN5yG3Lo3zCDs2hP",
- "R49qOhvRM4b5tR6oVNyBy5AIk+mxxltLUcO4xvizR3RKupeMeF7mNbdb6aVv+6rHx5eJ+bR52mqz3rwg",
- "+O5xSX1wpPvzyfMvJtP2vWLzfTKduK/vI5TMik3sVWoBm5iu6A4IHox7ilR0q0DHuQfCHg2ls7Ed4bAr",
- "WM1AqiWrPj2nUJrN4hzOv5VwNqcNP+c2MN6cH3Rxbp3nRMw/PdxaAhRQ6WUsG0ZHUMNW7W4C9MJOKinW",
- "wKeEncBJ3+ZTGH3RBfWVQOeYlQG1TzFGG2rOgSU0TxUB1sOFjDKsxOin9yzAXf7q6OqQGzgGV3/Oxp/p",
- "/9aC3Pv260ty6himumcfSNuhgyetEVXavdrqBCQZbmZzAFkh74pf8VcwR+uD4C+ueEE1PZ1RxXJ1WiuQ",
- "X9GS8hxOFoK88A/BXlFNr/hA0kqm6Qqe4JGqnpUsJ9ehQtKSp029Mhzh6uodLRfi6ur9IDZjqD64qaL8",
- "xU6QGUFY1DpziSMyCTdUxnxfqkkcgCPbzDC7ZrVCtqitgdQnpnDjx3kerSrVf0A8XH5VlWb5ARkq9zzW",
- "bBlRWkgvixgBxUKD+/uDcBeDpDferlIrUOTXFa3eMa7fk+yqfvToKZDOi9pf3ZVvaHJbwWjrSvKBc9+o",
- "ggu3aiVstKRZRRcxF9vV1TsNtMLdR3l5hTaOsiTYrfOS1wfm41DtAjw+0htg4Tj4VSIu7sL28knC4kvA",
- "T7iF2MaIG63j/7b7FbztvfV29d4HD3ap1svMnO3oqpQhcb8zTe6ghRGyfDSGYgvUVl2apRmQfAn5tct/",
- "A6tKb6ed7j7gxwmannUwZTMj2Zd5mJsDHRQzIHVVUCeKU77tJ0lQoLUPK34L17C9FG1qj0OyInQf6avU",
- "QUVKDaRLQ6zhsXVj9DffRZWhYl9V/q07Pnr0ZPGioQvfJ32Qrch7hEMcI4rOI/IUIqiMIMISfwIFt1io",
- "Ge9OpB9bntEyZvbmi2RJ8ryfuCat8uQCwMLVoNXdfl8BplkTN4rMqJHbhcsQZh+iB1ysVnQBCQk59BGN",
- "fO7d8SvhIPvuvehNJ+b9C21w30RBto0zs+YopYD5YkgFlZle2J+fybohnWcCE386hM1KFJOa+EjLdKjs",
- "+OpsJsMUaHECBslbgcOD0cVIKNksqfLJyzDHmz/Lo2SA3zGxwq50OudBxFqQyK1JluN5bv+cDrRLl1TH",
- "Z9Lx6XNC1XJEKhwj4WOQfGw7BEcBqIASFnbhtrEnlDbJQ7tBBo4f5/OScSBZLPgtMIMG14ybA4x8/JAQ",
- "a4Eno0eIkXEANrrXcWDygwjPJl8cAiR3SSqoHxsd88HfEH8+ZsPBjcgjKsPCWcKrlXsOQF3EZHN/9eJ2",
- "cRjC+JQYNrempWFzTuNrBxlkdUGxtZfDxQV4PEiJszscIPZiOWhN9iq6zWpCmckDHRfodkA8E5vMvh+N",
- "SryzzczQezRCHl+zxg6mzZ9zT5GZ2GDQEF4tNiJ7DyxpODwYgYa/YQrpFfulbnMLzK5pd0tTMSpUSDLO",
- "nNeQS0qcGDN1QoJJkcv9ICXOrQDoGTva/NJO+d2rpHbFk+Fl3t5q0zbVm398FDv+qSMU3aUE/oZWmCaJ",
- "zZu+xBK1U3RjX7r5ewIRMkb0hk0MnTRDV5CCElApyDpCVHYd85wa3Qbwxrnw3QLjBWYJonz7IAiokrBg",
- "SkNrRPdxEp/DPEkxOaEQ8/TqdCXnZn1vhWiuKetGxI6dZX7yFWBE8pxJpTP0QESXYBp9o1Cp/sY0jctK",
- "3ZAtm8qXFXHegNNewzYrWFnH6dXN+90rM+0PDUtU9Qz5LeM2YGWGqaejgZw7praxvjsX/Nou+DU92nrH",
- "nQbT1EwsDbl05/iDnIse593FDiIEGCOO4a4lUbqDQQYPcIfcMZCbAh//yS7r6+AwFX7svVE7/hlw6o6y",
- "I0XXEhgMdq6CoZvIiCVMB5mbhy9jE2eAVhUrNj1bqB01qTHTgwwePt9dDwu4u26wPRjoxuVFw5w7uQJd",
- "9J+z+ZyigHxqRDgbDuhi3UCilmPfhBa1RKNaJ9humJiyEexGrv27ny+0kHQBzjCaWZDuNAQu5xA0BGkf",
- "FdHMejgLNp9DaBBUtzFmdYDrm32ixR1GEFncalgzrr94FiOjPdTTwrgfZXGKidBCyk10OTS8erEq0Dub",
- "yiXB1tzCehp9QfodbLOfjYZCKsqkaiPGnCW0y/8O2PX16jvY4sh7A7EMYHt2BdXUt4A0GDMLNp/sw4lG",
- "BQpzmGLSh84WHrBTZ/FdOtLWuKyzaeJvw7I7WVm7S7nLwWj9dgaWMbtxEXeXmdMDXcT3SXnfJrCEMS4k",
- "x0DkCqdiytfoGV5FzfPofbR7CbT0xIvLmXycTu7mnIrdZm7EPbh+01ygUTxj8JN1VnR8zQeinFaVFGta",
- "Zs6Fl7r8pVi7yx+be4/fJxYm45R9+fXZ6zcO/I/TSV4ClVmjjCVXhe2qP8yqbJ7a3VcJSizeKmKV9WDz",
- "m+SaodvvZgmumEKg7w+yPrcu3eAoOjfgPB6DuZf3Oe+zXeIOLzRUjRO6dZBYH3TX70zXlJXeM+GhTcRL",
- "4uLGpQ6PcoVwgDv7r4MwhOyo7GZwuuOno6WuPTwJ5/oRs6XFNQ7ucqkhK3L+aHp06ekbITvM3z2Wifqz",
- "fz+xygjZFo+J8EFfoKcvTJ0QK3j9uvjVnMaHD8Oj9vDhlPxaug8BgPj7zP2O+sXDh1FXQ9SSYJgEGgo4",
- "XcGDJvA3uRGf1uzE4WbcBX22XjWSpUiTYUOh1jHt0X3jsHcjmcNn4X4poATz0/63db1Nt+gOgRlzgi5S",
- "j2OauKeVrQmkiOD9MD98l2VIC5n9imLWc+u5GR4hXq/Q25GpkuVxPzCfKcNeuY3vMY0JNk4YzMyINUuE",
- "i/GaBWOZZmPS+PWADOaIIlNFMwm2uJsJd7xrzv5ZA2GF0WrmDCTea72rzisHOOpAIDWq53AuN7CNImiH",
- "v4sdJMz435cZEYjdRpAwmmgA7qvGrO8X2njNWp3p0KDEcMYB494RUOjow1GzfWCx7EYFjdNjxtSG9IzO",
- "lR5IzBGt9chUNpfiN4jbotGEH3mb7WscMIzE/Q1C9SyscNZhKY0Hqi1Z2c6+b7vH68apjb+zLuwX3ZRV",
- "uM1lGj/Vh23kbZReFc8g6pCcUsJCd2Q3WjXBWvB4BfFZmNHehypQbs+TfZjcefQQP5Xh86JTO357Kh3M",
- "gydZJb2Z0Vi6f6MLGZiC7e0EVWhBfGe/Aap5dmtnJ0FQYdOW2eRGFcg2N8UwUeIt9Ro77WiNplVgkKJC",
- "1WVqA8FKJSLD1PyGclsm0fSz/Mr1VmC9oKbXjZCYmkzF4z8KyNkqao69unpX5ENff8EWzFYArBUEJebc",
- "QLa6qqUiV6aveUzuUHM+J4+mQZ1LtxsFWzPFZiVgi8e2xYwqvC4bj2TTxSwPuF4qbP5kRPNlzQsJhV4q",
- "i1glSKN7opDXRDHNQN8AcPII2z3+ktzH+C3F1vDAYNEJQZMXj79E77v941HslnUVHHex7AJ59t8dz47T",
- "MQaw2TEMk3SjnkSzONkSzunbYcdpsl3HnCVs6S6U/WdpRTldQDxkeLUHJtsXdxM9qj28cOsNAKWl2BKm",
- "4/ODpoY/JZ4hGvZnwSC5WK2YXrkoHyVWhp7a+nF2Uj+cLWbqSn94uPxHDJarfKxQz9b1idUYuko8I8CQ",
- "xh/oCrponRJq89GVrA1j9QWJyLlPd4m1UJoSKBY3Zi6zdJQlMap1TirJuEb7R63n2V+MWixpbtjfSQrc",
- "bPbFs0hNkW7afX4Y4J8c7xIUyHUc9TJB9l5mcX3JfS54tjIcpXjQPvsNTmUyqi8ev5UKIts99FjJ14yS",
- "Jcmt7pAbDTj1nQiP7xjwjqTYrOcgejx4ZZ+cMmsZJw9amx366e1rJ2WshIzlsG6Pu5M4JGjJYI2POOKb",
- "ZMa8417IctQu3AX6zxuC4kXOQCzzZzmqCAQezV3vN40U//P3bTJedKzaxzE9G6CQEWuns9t94oCvw6xu",
- "ff+tjdnBbwnMjUabrfQ+wEoiVNfG4jZ9PvFz3qi51+55x+D4+FcijQ6OcvzDhwj0w4dTJwb/+qT72bL3",
- "hw/jOTGjJjfza4uFu2jE2De2h1+JiAHMF6BqAorck92IATJ1SZkPhgnO3FBT0i328+mliOM8BokH/MVP",
- "wdXVO/zi8YB/9BHxmZklbmAb0pw+7N1iZ1GSKZrvQagxJV+JzVjC6d1Bnnj+BVCUQMlI8xyuZFDMLequ",
- "3xsvEtCoGXUGpTBKZlinIrTn/3HwbBY/3YHtmpXFz226od5FIinPl9FAzZnp+EtbdL1ZomWV0dT3S8o5",
- "lNHhrG77i9eBI1r6P8TYeVaMj2zbLyZol9tbXAt4F0wPlJ/QoJfp0kwQYrWbyaV5KVwuREFwnjbPessc",
- "h1U5g1Jh/6xB6djRwA/2tRI6uwzztZWqCPACrV8n5FvMqWBg6STRRauTT0/YTdVVV6WgxRTTJl5+ffaa",
- "2FltH1s62FbKWqDRpbuKqJV8fOqypgpw/E3++HF2PxI2q1Y6awpbxbIemRZt6S3WC51Ac0yInRPyylrC",
- "lLez2EkIJt+UKyiCOlpWF0OaMP/RmuZLNDF1LrI0yY8v8eapsjXAB/Wim7oKeO4M3K7Kmy3yNiVCL0He",
- "MAX4ChPW0E201GQdcyZOn3ipuzxZc24p5eQAmaKponAo2j1wViDxvuEoZD3EH2hgsBUSD614d4G9omme",
- "++Xzes5bn7anqQP8vbMR55QLznJMshwTiDApzDhv04h81HE3kZq4Exo5XNGifc37L4fFZBk/zwgd4oae",
- "2+Cr2VRLHfZPDRtXzGUBWjnOBsXU1550fg3GFbg6GYaIQj4pZCQ2JRrP3vjBDyQjzPeQMFR9Y7794MyY",
- "+BD6mnE0WDi0OTHbeh5KxdDByAnTZCFAufV0k16pd6bPCeZ/KmDz/uS1WLD8gi1wDBsNZZZtQ/+GQ535",
- "QEAXeGfavjRtXVbe5udOVI+d9Kyq3KTpyqTxcswbnkRwLPzExwMEyG3GD0fbQW47I3jxPjWEBmsMPoIK",
- "7+EBYTRVOnslsY2KYCkKWxD7Nimamo/xCBivGfeesPgFkUevBNwYPK+JfiqXVFsRcBRPuwRaJuLY8a2f",
- "daXedah+TmKDElyjnyO9jW2B0QTjaBq0ghvlW+IPhaHuQJh4ScsmAjZSLhSlKidEFfhGpFdANMY4DOP2",
- "JYq7F8CequTTtjvm+T70JkplP5rVxQJ0RosiVrbkK/xK8Kt/6wMbyOumvEVVkRyTfXaznw6pzU2UC67q",
- "1Y65fIM7ThdU5I1QQ1gV2O8wZleYbfHfQ+rFN7GvB79v84GuxWEpf4fv9WJSr6HpTLFFNh4TeKfcHR3t",
- "1Lcj9Lb/USm9FIsuIJ/DSJrgcuEexfjb1+biCFMCDsKM7dXSZOzDkF6B332SiybXVJcr4VU2qGCCzuum",
- "TvtuM0S64voUL7/Em9LQ5G3vV2sGTr0szZMPoal2KVk0JTtZUDLNhQ357BnRh56gVJinjfI8nvHZrXUn",
- "QtMumO86Dhcb6tMyi6Sj5Xa+kHaDD3WGfLdOPTb2GcDxe78i8zW4PG2VhDUTtQ+i8aGsXiW0v3bqGzfP",
- "vaPrjwaIf27jc9JUfukq49llOp38u5+tM40A13L7L2A4H2z6oNbzUNq15qm2CWmKKo0qstS5Fcdkx48l",
- "YneyYafa9J5a2QOyejVGHBjWvp5OzouDLsxYMv+JHSV27OKVrNO5jtv8xnjEKqFYW9ssVuJ6ZMz4JVap",
- "DnI1D8fysYRryDUWtGtjpCTAIZmbzWTedv9nzuO0Ot2E1rtUx7vyGw+r2O254wcpSII0OrYC2Mn4bL5n",
- "TSSsfchzQxXmvpdo4+4+fR39AG8+h1yz9Z6UL39fAg/SiUy9XQZhmQcZYFjzHAUzhh5udWwB2pWRZSc8",
- "Qeb+O4OTeo58Ddt7inSoIVqSrHmLdZtkkYgB5A6ZIRGhYpFm1pDsgn+YaigDseAjO213aNNuJ6sZBwmM",
- "bjmXJ0lzcbRJjXZMGS+nOmou0/WgVF/4siKVFWZYjTGtf7zC4pfKxTnRJtlkqKWT82FK/huXrBIT9DS+",
- "E5+2EpT/zWfjsrOU7BrCesvoqbqhsvAtoqYXb9XJdtxHg1QuvpJgH+h5MzNr4/CHvupIkmd80pKXwogR",
- "WepdUDf0vYkbu6dsgF+bhwXhmoN0delR/i2FgkwLH7e/C45dqLBRjLdCgkoWVrDAJdOdvm3zuWKBGYrp",
- "TakLXgwXSCSsqIFOBllX03PuQvZL+92/pfYFRvZamBp63V/pzr/AYGqAxJDq58TdlvvfaN/G2MQ4B5l5",
- "z1M/BSsH2fWGVFIUdW4v6PBgNAa50SlQdrCSqJ0mH66ypyMEb52vYXtqlSBfItDvYAi0lZws6EHqvt4m",
- "H9X8pmJwL44C3ue0XE0nlRBllnB2nA/zxvYp/prl11AQc1P4SOVE9VdyH23sjTf7Zrn1eVKrCjgUD04I",
- "OeP2bYh3bHcLF/Um5/f0rvk3OGtR21TOzqh2csXjQfaYZFnekZv5YXbzMAWG1d1xKjvInqykm0TOWklv",
- "IrWQT8Zq5UNXc78+bUtUFoqYTHJhPVYv8aDHDEf4kj1IuYCOTEqcp4uoUsRCMm/z2t4MFcdUOBkCpIGP",
- "efTdQOEGjyIgWnE1cgptBjOXu0zMiYTWiXzbJG7D4rAxjb4/czNLl9/NhYROmVfTW8jCizxMtfWYqZwx",
- "Lanc3ibV2qA47cB6ksTy3nCsJhKrXUgbjTXEYVmKmwyZVdbkNo+ptqad6l7GvpxL28+c6hkEcV1UOUFt",
- "S5a0ILmQEvKwR/zZnoVqJSRkpcAwr5gHeq6N3L3CtzqclGJBRJWLAmyNgDgFpeaqOacoNkEQVRNFgaUd",
- "fPRp+wR0PHLKY1VGtsl57KIz68tMBJ6Ccsl4HIZs4yG8O6oKH5Sd/3yOFiGGsS7dt9dW+gxrK8OBpZVZ",
- "WXqDQaq6MvlJ1RiOhA9vzBTPyEoo7TQ7O5JqhmpDvO7ngmspyrJrBLIi8cJZtr+nm7M816+FuJ7R/PoB",
- "6pFc6GalxdQ/S+0H47UzyV5GppFloC+XETsvzuJP3cG1nh3nOLhEawDm+/0ca7+N+yxWyrq7rn5tdp7I",
- "nanFiuVxGv5jRbclY9JiLCGa6slWSbKP87EZMurwcmiCGZAlDdEM3BBsbL8cT3NOXWQe5r8o8fbHJXNw",
- "l0TiYhrySSe1ZHlStuoBgJDaF6O6lra0Uij5NFxFLOwLc3RJ9wEdycUx8udusJkRjg6UhjsBNYg2bAC8",
- "b5X9qU3JZSMXZ2Ljvz9oc3bdCviPu6k8Vo4+coob0nLV8n1+jwRHiGcG3hl/hIXD/Q26PwqpKYM38kYN",
- "AEjHJXVgGBWddCgYc8pKKDKqE5c72oSmgWbrXrT0i5sy5Th5Tu2FvQRixq4luHwTVqTuFUOvqCEl0TQf",
- "Wm55ARtQmAzCVnSmyvoZvL8DSltWqqd8iyorYQ2dcC2XBKNG0Y6twfdVTWdSAFTo/evbpGJxSOFd3jNU",
- "uLVnQSTLGOxGLRcWsXanyB6zRNSIsuGZPSZq7FEyEK1ZUdMO/tShIkfX7GaOcgRVA5k883rb2Gl+siO8",
- "9QOc+f4xUcZj4v04PnQwC4qjbhcD2huXWKvUqefxsMQww0vj0MDZisbxaUm85Ruqojc8bQAcknyr3ozc",
- "JyZ4gNivN5CjVNONu7s7TggORlQve1NSBJfNDt/ekPxZaHgnCSfHi6kaCpDB7rTUeLpwAjs2wHKW3Ii9",
- "RmrGElKO/zv+N8UK/HYgo1fbilahBvcKvMcOE0o3zgon0LLmQvPxhVOXT7CvlLMgsnpFt0RI/Mfoa/+s",
- "acnmWzyhFnzfjaglNSTkXITWd+3iFc3EuwWTqQfM2wWEn8qum40dMxhua0YJgDZXoDNOYWagawi3Ad3y",
- "lvPk2rAcVc9WTCm87HrbOcSCW7zPCbGiRagjY2a6bilRn6vU9P6f7autcCqfUKoqae7rlwFRdNUziNsa",
- "hZ649BJWu5/1DdVjTwJN3cOWaKV/zlvcwrh3YORGLFY+Ve+hA/agHtyg1MWdlnFIgeL2ZfSOB5GjlnLs",
- "XRgbHzIAGp3MPqvXHvBtNkafAexT4D+aNDK1jDHg/6vgPVFGL4TXVsz7BFjuPPmPwGrtqjOxySTM1b5Q",
- "CGtYNYqwbJMFeOMk47kEqmxsyPmPTmVrcyIyblRIG73YeN+aUQqYM94yS8arWkc0AEyNyLcBwkLzNKI1",
- "4exJSQlGDFvT8sc1SMmK1MaZ02HLeIU56b1J3vWNKP/NnTocgKlW+8GXhNC+VAuamQvcVr2xgYVKU15Q",
- "WYTNGSc5SHPvkxu6Vbf3fRhoZW3kiz3eDxpIM9337YEfBEnbAlJunfvyjp6JBkB6RBfFCNcCRrBG3ArW",
- "KKJFwpMwhCGeVoFuslIs8H1ZggBd8kn0/VhlRXA02Fp56LB5FPsNdk+DebfdwdcCZx0zxe5z9iOiDhWe",
- "nzjTO0+atab1H/zZiEx7EDz980UbFm43Z0j/sTeal/iIofNOs1903u+1DQ+x80HCk9G14CZ2ER3k7oFv",
- "aK4dX8+o64OPvQS1OmyGuq3aEfgNqg1yprkL3BkafQZKsUXK1L2jPdAmZC3J/h5IgGcr1bqz1Z22CaYw",
- "4xxSBGr3y9msElWWj4kGtKn5C2fQdpB2YUzQR2CuTqy7CZxQTbGKTmKTTtWKQ+tgJatm7PPLVPkuJTtl",
- "0Ehw0K6xXMyRl+ERtmYcfOPRGC+m/ddHXYNNwyQIJRLyWqJB84Zu99cVSqSEvfjb2fPHT3558vwLYhqQ",
- "gi1AtWmFe3V52ogxxvt2lk8bIzZYno5vgn+XbhHnPWX+uU2zKe6sWW6r2pyBg6pEh1hCIxdA5DhG6sHc",
- "aq9wnDbo+19ru2KLPPqOxVDw++yZi2yNL+CMO/1FzMluntGt+afj/MII/5FLym/tLRaYssem30Xfhh5b",
- "g+y/DBVGHnofjfaa5f4eFBeVMm9XPncUaMNHvxHyQAASr/k677DC6tptvkppbbtoBfYOs/4l9n3rSNsb",
- "do6Q+A57wAuf57XtmkhpB85nTvz4fYOUYCnvU5TQWf6+F39uga3nMdgip+pqDcqyJTEULoLnnOpl80oy",
- "IdsOHlNiKW2j35Rl5BGm1b7xTIWEYwRLuablp+caWGP9DPEBxdv004vwJV6IZItKdbs8YK/pqLmDV3fH",
- "m5q/wYeffwezR9F7zg3lnI6D2wxtJ1jYeOFvBfuWlNzgmDao5PEXZOZyslcScqb6zkzrcQqiAtcg2dwF",
- "8MFG73nptm+dPwt9BzKe+8gD8kPglBBo/GkhbI/oZ2YqiZMbpfIY9Q3IIoK/GI8KazjuuS7umL/7dmkl",
- "ggRRB6aVGFanHLs8mzrBXDq1guE6R9/WHdxGLup2bWNzooxOA3519U7PxqQyiafsNt0xl8pRcncflLn7",
- "d8iiYnHkxnDzxijm51ReTZs7MpHCtbcfNSv3hhl0EvJ+nE4WwEExhSlnf3ElBj7tXeohsC+7h0fVwnqX",
- "dBQWMZG1diYPpgpS7Y7Isuu6RXLq4qupvJZMb7G8pDfDsF+i+V6+bXIHuNwTjQfE3X1aXENT4rfNNFAr",
- "f7t+K2iJ95F1zHBzC4nyhHy9oauqdEZF8td7s/+Ap395Vjx6+vg/Zn959PxRDs+ef/noEf3yGX385dPH",
- "8OQvz589gsfzL76cPSmePHsye/bk2RfPv8yfPns8e/bFl/9xz/AhA7IF1GeAfjH5P9lZuRDZ2Zvz7NIA",
- "2+KEVuw7MHuDuvJcYPkzg9QcTyKsKCsnL/xP/8ufsJNcrNrh/a8TV8ZjstS6Ui9OT29ubk7CLqcLfFqc",
- "aVHny1M/Dxal6sgrb86bmGQbPYE72togcVMdKZzht7dfX1ySszfnJy3BTF5MHp08OnnsKqByWrHJi8lT",
- "/AlPzxL3/dQR2+TFh4/TyekSaImZOMwfK9CS5f6TBFps3f/VDV0sQJ5g2Ln9af3k1IsVpx/cE+uPu76d",
- "ho750w+dl+jFnp7oVD794Osg7m7dqYHn4nmCDiOh2NXsdIa1D8Y2BRU0Ti8FlQ11+gHF5eTvp87mEf+I",
- "aos9D6c+XUO8ZQdLH/TGwLqnx4YVwUpyqvNlXZ1+wP8g9QZA21R+p3rDT9H/dvqhs1b3ebDW7u9t97DF",
- "eiUK8MCJ+dzWh9z1+fSD/TeYCDYVSGbEQkyf4X61aY5OsUzQdvjzlufRH4fr6KR4Mecu6st8a/OKU1Iy",
- "5Z3S3cwwKiwhfF4gf9b9dDOmkQ9Iw0P+5NEjz9mc3hBQ5ak7xJO2oPi4x+v9JDfDG2/I2nat7ON08uxA",
- "QHfahjqpASPAfEUL4l8y4tyPP93c59wGxxleb+8khODZp4Ogs33kO9iSH4Qm36Dy9HE6ef4pd+KcG1GO",
- "lgRbBmUah0fkJ37NxQ33LY0wU69WVG5HHx9NFwq9Z5KtqRMlm2Z8MXmPL/nt69buUTsrigHRW6EOlP5K",
- "4O2YwthKLSqXCLhFWivTMm6WMFSKB6i6tNVKe/mibFYT74LlooBJKG1qWcPHO/KEntueSn0esfGgsRLj",
- "Zee+sGoAajT5Ud+paUce6iP7SLit/duGmf7JU/7kKQ1Pef7o6aeb/gLkmuVALmFVCUklK7fkJ97EL9+a",
- "x50VRTRjXPfo7+Vx08kmy0UBC+CZY2DZTBRbX9+8M8E1WPV1IMicfuj86cTXiY3EiGXDMr8TShZYjmW4",
- "iNmWnL8aSDi2W5/zfrXFpm083uTFuw9W/zPKTaue9UEccMZpsOd93vQ+zjV3kb1ZyELoJh7FLupPRvQn",
- "I7qTcDP68IyRb6Lahy2SRAd39tTXO4pV7qR6CMoYHeWzHt+jbPxQ/4npOzbzHhQk+GAfgvXR/CeL+JNF",
- "3I1FfAuRw4in1jGNCNEdpg+NZRj43rfo+MWxVjwmnbLN65LKIPZ+n5njDEd0xo1PwTU+tVIXxZXV6Sgn",
- "sGE2yiGygcfV8/5keX+yvD8Oyzvbz2i6gsmdNaNr2K5o1ehDalnrQtwEXhCExUYoDe3A5mOt+n+f3lCm",
- "s7mQLo8znWuQw84aaHnqirb1fm3rpAy+YPGX4McwY0L011PaNWx3/SeG9aY6Dpwrsa/OuZBo5J8r+c+t",
- "ozV0XCLbb1yW794blq1Arv2N0PrhXpye4vvVpVD6dPJx+qHnows/vm/I40Nzjzgy+fj+4/8PAAD///89",
- "16auAQEA",
+ "H4sIAAAAAAAC/+y9e5PbtpIo/lVQ2q3y4yfO+Jk98a9O7Z3YSc5snMTlmeTcXY9vApEtCWcogAFAjRRf",
+ "f/dbaAAkSAISNaPY51TlL3tEEmg0Go1+94dJLlaV4MC1mrz4MKmopCvQIPEvmuei5jpjhfmrAJVLVmkm",
+ "+OSFf0aUlowvJtMJM79WVC8n0wmnK2jfMd9PJxJ+q5mEYvJCyxqmE5UvYUXNwHpbmbebkTbZQmRuiDM7",
+ "xPmryccdD2hRSFBqCOWPvNwSxvOyLoBoSbmiuXmkyA3TS6KXTBH3MWGcCA5EzIledl4mcwZloU78In+r",
+ "QW6DVbrJ00v62IKYSVHCEM6XYjVjHDxU0ADVbAjRghQwx5eWVBMzg4HVv6gFUUBlviRzIfeAaoEI4QVe",
+ "ryYv3k0U8AIk7lYObI3/nUuA3yHTVC5AT95PY4uba5CZZqvI0s4d9iWoutSK4Lu4xgVbAyfmqxPyfa00",
+ "mQGhnLz95iV5+vTpl2YhK6o1FI7IkqtqZw/XZD+fvJgUVIN/PKQ1Wi6EpLzImvfffvMS579wCxz7FlUK",
+ "4oflzDwh569SC/AfRkiIcQ0L3IcO9ZsvIoei/XkGcyFh5J7Yl4+6KeH8n3VXcqrzZSUY15F9IfiU2MdR",
+ "HhZ8vouHNQB03q8MpqQZ9N2j7Mv3Hx5PHz/6+G/vzrL/cX8+f/px5PJfNuPuwUD0xbyWEni+zRYSKJ6W",
+ "JeVDfLx19KCWoi4LsqRr3Hy6QlbvviXmW8s617SsDZ2wXIqzciEUoY6MCpjTutTET0xqXho2ZUZz1E6Y",
+ "IpUUa1ZAMTXc92bJ8iXJqbJD4HvkhpWlocFaQZGitfjqdhymjyFKDFy3wgcu6J8XGe269mACNsgNsrwU",
+ "CjIt9lxP/sahvCDhhdLeVeqwy4pcLoHg5OaBvWwRd9zQdFluicZ9LQhVhBJ/NU0Jm5OtqMkNbk7JrvF7",
+ "txqDtRUxSMPN6dyj5vCm0DdARgR5MyFKoByR58/dEGV8zha1BEVulqCX7s6ToCrBFRAx+wfk2mz7f138",
+ "+AMRknwPStEFvKH5NQGeiwKKE3I+J1zogDQcLSEOzZepdTi4Ypf8P5QwNLFSi4rm1/EbvWQrFlnV93TD",
+ "VvWK8Ho1A2m21F8hWhAJupY8BZAdcQ8pruhmOOmlrHmO+99O25HlDLUxVZV0iwhb0c1fH00dOIrQsiQV",
+ "8ILxBdEbnpTjzNz7wcukqHkxQszRZk+Di1VVkLM5g4I0o+yAxE2zDx7GD4OnFb4CcPwgSXCaWfaAw2ET",
+ "oRlzus0TUtEFBCRzQn5yzA2fanENvCF0Mtvio0rCmolaNR8lYMSpd0vgXGjIKglzFqGxC4cOw2DsO44D",
+ "r5wMlAuuKeNQGOaMQAsNllklYQom3K3vDG/xGVXwxbPUHd8+Hbn7c9Hf9Z07Pmq38aXMHsnI1WmeugMb",
+ "l6w634/QD8O5FVtk9ufBRrLFpblt5qzEm+gfZv88GmqFTKCDCH83KbbgVNcSXlzxh+YvkpELTXlBZWF+",
+ "Wdmfvq9LzS7YwvxU2p9eiwXLL9gigcwG1qjChZ+t7D9mvDg71puoXvFaiOu6CheUdxTX2Zacv0ptsh3z",
+ "UMI8a7TdUPG43Hhl5NAv9KbZyASQSdxV1Lx4DVsJBlqaz/GfzRzpic7l7+afqirN17qax1Br6NhdyWg+",
+ "cGaFs6oqWU4NEt+6x+apYQJgFQnavnGKF+qLDwGIlRQVSM3soLSqslLktMyUphpH+ncJ88mLyb+dtvaX",
+ "U/u5Og0mf22+usCPjMhqxaCMVtUBY7wxoo/awSwMg8ZHyCYs20OhiXG7iYaUmGHBJawp1yetytLhB80B",
+ "fudmavFtpR2L754KlkQ4sS/OQFkJ2L54T5EA9QTRShCtKJAuSjFrfrh/VlUtBvH5WVVZfKD0CAwFM9gw",
+ "pdUDXD5tT1I4z/mrE/JtODaK4oKXW3M5WFHD3A1zd2u5W6yxLbk1tCPeUwS3U8gTszUeDUbMPwbFoVqx",
+ "FKWRevbSinn5b+7dkMzM76M+/tcgsRC3aeJCRcthzuo4+Eug3NzvUc6QcJy554Sc9b+9HdmYUeIEcyta",
+ "2bmfdtwdeGxQeCNpZQF0T+xdyjgqafYlC+sduelIRheFOTjDAa0hVLc+a3vPQxQSJIUeDF+VIr/+G1XL",
+ "I5z5mR9rePxwGrIEWoAkS6qWJ5OYlBEer3a0MUfMvIgKPpkFU500SzzW8vYsraCaBktz8MbFEot6/A6Z",
+ "HsiI7vIj/oeWxDw2Z9uwfjvsCblEBqbscXZOhsJo+1ZBsDOZF9AKIcjKKvjEaN0HQfmynTy+T6P26Gtr",
+ "U3A75BbR7NDlhhXqWNuEg6X2KhRQz19ZjU7DSkW0tmZVVEq6ja/dzjUGAZeiIiWsoeyDYFkWjmYRIjZH",
+ "5wtfiU0Mpq/EZsATxAaOshNmHJSrPXb3wPfKQSbkfszj2GOQbhZoZHmF7IGHIpCZpbVWn82EvB077vFZ",
+ "TlobPKFm1OA2mvaQhK/WVebOZsSOZ1/oDdS6PXdz0f7wMYx1sHCh6R+ABWVGPQYWugMdGwtiVbESjkD6",
+ "y+gtOKMKnj4hF387e/74yS9Pnn9hSLKSYiHpisy2GhS575RVovS2hAfDlaG6WJc6PvoXz7zltjtubBwl",
+ "apnDilbDoaxF2MqE9jVi3htirYtmXHUD4CiOCOZqs2gn1tlhQHvFlBE5V7OjbEYKYUU7S0EcJAXsJaZD",
+ "l9dOsw2XKLeyPoZuD1IKGb26Kim0yEWZrUEqJiLupTfuDeLe8PJ+1f/dQktuqCJmbrSF1xwlrAhl6Q0f",
+ "z/ft0Jcb3uJmJ+e3642szs07Zl+6yPemVUUqkJnecFLArF50VMO5FCtCSYEf4h39LWgrt7AVXGi6qn6c",
+ "z4+jOwscKKLDshUoMxOxbxipQUEuuA0N2aOuulHHoKePGG+z1GkAHEYutjxHw+sxjm1ak18xjl4gteV5",
+ "oNYbGEsoFh2yvLv6nkKHneqeioBj0PEaH6Pl5xWUmn4j5GUr9n0rRV0dXcjrzzl2OdQtxtmWCvOtNyow",
+ "vii74UgLA/tJbI2fZUEv/fF1a0DokSJfs8VSB3rWGynE/PgwxmaJAYoPrJZamm+GuuoPojDMRNfqCCJY",
+ "O1jL4QzdhnyNzkStCSVcFICbX6u4cJYIYEHPOTr8dSjv6aVVPGdgqCuntVltXRF0Zw/ui/bDjOb2hGaI",
+ "GpVw5jVeWPuWnc4GR5QSaLElMwBOxMx5zJwvDxdJ0RevvXjjRMMIv+jAVUmRg1JQZM5Stxc0/569OvQO",
+ "PCHgCHAzC1GCzKm8M7DX671wXsM2w8gRRe5/97N68Bng1ULTcg9i8Z0Yehu7h3OLDqEeN/0ugutPHpId",
+ "lUD8vUK0QGm2BA0pFB6Ek+T+9SEa7OLd0bIGiQ7KP5Ti/SR3I6AG1D+Y3u8KbV0l4iGdemskPLNhnHLh",
+ "BavYYCVVOtvHls1LHR3crCDghDFOjAMnBK/XVGnrVGe8QFugvU5wHiuEmSnSACfVEDPyz14DGY6dm3uQ",
+ "q1o16oiqq0pIDUVsDRw2O+b6ATbNXGIejN3oPFqQWsG+kVNYCsZ3yLIrsQiiuvE9uaiT4eLQQ2Pu+W0U",
+ "lR0gWkTsAuTCvxVgN4wJSwDCVItoSzhM9SinCUSbTpQWVWW4hc5q3nyXQtOFfftM/9S+OyQuqtt7uxCg",
+ "MBTNve8gv7GYtdGAS6qIg4Os6LWRPdAMYr3/Q5jNYcwU4zlkuygfVTzzVngE9h7SulpIWkBWQEm3w0F/",
+ "so+JfbxrANzxVt0VGjIb1hXf9JaSfRTNjqEFjqdiwiPBJyQ3R9CoAi2BuK/3jFwAjh1jTo6O7jVD4VzR",
+ "LfLj4bLtVkdGxNtwLbTZcUcPCLLj6GMATuChGfr2qMCPs1b37E/x36DcBI0ccfgkW1CpJbTjH7SAhA3V",
+ "RcwH56XH3nscOMo2k2xsDx9JHdmEQfcNlZrlrEJd5zvYHl31608Q9buSAjRlJRQkeGDVwCr8ntiApP6Y",
+ "t1MFR9nehuAPjG+R5ZRMocjTBf4atqhzv7GRroGp4xi6bGRUcz9RThBQHz9nRPDwFdjQXJdbI6jpJWzJ",
+ "DUggqp6tmNY2gr2r6mpRZeEAUb/GjhmdVzPqU9zpZr3AoYLlDbdiOrE6wW74LnuKQQcdTheohChHWMgG",
+ "yIhCMCoAhlTC7DpzwfQ+nNpTUgdIx7TRpd1c//dUB824AvLfoiY55ahy1RoamUZIFBRQgDQzGBGsmdOF",
+ "urQYghJWYDVJfPLwYX/hDx+6PWeKzOHGZ6CYF/voePgQ7ThvhNKdw3UEe6g5bueR6wMdPubic1pIn6fs",
+ "D7VwI4/ZyTe9wRsvkTlTSjnCNcu/MwPonczNmLWHNDIuzATHHeXL6bjsh+vGfb9gq7qk+hheK1jTMhNr",
+ "kJIVsJeTu4mZ4F+vaflj8xlm10BuaDSHLMeckJFjwaX5xqaRmHEYZ+YA2xDSsQDBuf3qwn60R8Vso/TY",
+ "agUFoxrKLakk5GCzJ4zkqJqlnhAbV5kvKV+gwiBFvXCBfXYcZPi1sqYZWfPBEFGhSm94hkbu2AXggrl9",
+ "Ao0Rp4Aala5vIbcKzA1t5nM5U2Nu5mAP+h6DqJNsOklqvAap61bjtcjpZgGNuAw68l6An3bika4URJ2R",
+ "fYb4CrfFHCazuX+Myb4dOgblcOIg1LB9mIo2NOp2uT2C0GMHIhIqCQqvqNBMpexTMQ8z/twdprZKw2po",
+ "ybef/pI4fm+T+qLgJeOQrQSHbTTJnXH4Hh9GjxNek4mPUWBJfdvXQTrw98DqzjOGGu+KX9zt/gnte6zU",
+ "N0IeyyVqBxwt3o/wQO51t7spb+snpWUZcS26fKA+A1DTpv4Ak4QqJXKGMtt5oab2oDlvpEse6qL/TRPl",
+ "fISz1x+350MLU03RRgxlRSjJS4YWZMGVlnWurzhFG1Ww1Ejwk1fG01bLl/6VuJk0YsV0Q11xioFvjeUq",
+ "GrAxh4iZ5hsAb7xU9WIBSvd0nTnAFXdvMU5qzjTOtTLHJbPnpQKJEUgn9s0V3ZK5oQktyO8gBZnVuiv9",
+ "Y7qb0qwsnUPPTEPE/IpTTUqgSpPvGb/c4HDe6e+PLAd9I+R1g4X47b4ADoqpLB6k9a19igHFbvlLF1yM",
+ "5QnsYx+s2ebfTswyOyn3/+f+f754d5b9D81+f5R9+f+dvv/w7OODh4Mfn3z861//b/enpx//+uA//z22",
+ "Ux72WDKWg/z8ldOMz1+h+tP6gAawfzL7/4rxLEpkYTRHj7bIfUw8dgT0oGsc00u44nrDDSGtackKw1tu",
+ "Qw79G2ZwFu3p6FFNZyN6xjC/1gOVijtwGRJhMj3WeGspahjXGE97RKeky2TE8zKvud1KL33brB4fXybm",
+ "0ya11Va9eUEw73FJfXCk+/PJ8y8m0zZfsXk+mU7c0/cRSmbFJpaVWsAmpiu6A4IH454iFd0q0HHugbBH",
+ "Q+lsbEc47ApWM5BqyapPzymUZrM4h/O5Es7mtOHn3AbGm/ODLs6t85yI+aeHW0uAAiq9jFXD6Ahq+Fa7",
+ "mwC9sJNKijXwKWEncNK3+RRGX3RBfSXQOVZlQO1TjNGGmnNgCc1TRYD1cCGjDCsx+umlBbjLXx1dHXID",
+ "x+Dqz9n4M/3fWpB73359SU4dw1T3bIK0HTpIaY2o0i5rqxOQZLiZrQFkhbwrfsVfwRytD4K/uOIF1fR0",
+ "RhXL1WmtQH5FS8pzOFkI8sIngr2iml7xgaSVLNMVpOCRqp6VLCfXoULSkqctvTIc4erqHS0X4urq/SA2",
+ "Y6g+uKmi/MVOkBlBWNQ6c4UjMgk3VMZ8X6opHIAj28owu2a1QraorYHUF6Zw48d5Hq0q1U8gHi6/qkqz",
+ "/IAMlUuPNVtGlBbSyyJGQLHQ4P7+INzFIOmNt6vUChT5dUWrd4zr9yS7qh89egqkk1H7q7vyDU1uKxht",
+ "XUkmOPeNKrhwq1bCRkuaVXQRc7FdXb3TQCvcfZSXV2jjKEuCn3UyeX1gPg7VLsDjI70BFo6DsxJxcRf2",
+ "K18kLL4EfIRbiO8YcaN1/N92v4Lc3ltvVy8/eLBLtV5m5mxHV6UMifudaWoHLYyQ5aMxFFugturKLM2A",
+ "5EvIr139G1hVejvtfO4Dfpyg6VkHU7Yyks3Mw9oc6KCYAamrgjpRnPJtv0iCAq19WPFbuIbtpWhLexxS",
+ "FaGbpK9SBxUpNZAuDbGGx9aN0d98F1WGin1V+Vx3THr0ZPGioQv/TfogW5H3CIc4RhSdJPIUIqiMIMIS",
+ "fwIFt1ioGe9OpB9bntEyZvbmi1RJ8ryfuFda5ckFgIWrQau7fb4CLLMmbhSZUSO3C1chzCaiB1ysVnQB",
+ "CQk59BGNTPfu+JVwkH33XvSmE/P+hTa4b6Ig25czs+YopYB5YkgFlZle2J+fybohnWcCC386hM1KFJOa",
+ "+EjLdKjs+OpsJcMUaHECBslbgcOD0cVIKNksqfLFy7DGmz/Lo2SAP7Cwwq5yOudBxFpQyK0pluN5bv+c",
+ "DrRLV1THV9Lx5XNC1XJEKRwj4WOQfGw7BEcBqIASFnbh9mVPKG2Rh3aDDBw/zucl40CyWPBbYAYNrhk3",
+ "Bxj5+CEh1gJPRo8QI+MAbHSv48DkBxGeTb44BEjuilRQPzY65oO/IZ4+ZsPBjcgjKsPCWcKrlXsOQF3E",
+ "ZHN/9eJ2cRjC+JQYNrempWFzTuNrBxlUdUGxtVfDxQV4PEiJszscIPZiOWhN9iq6zWpCmckDHRfodkA8",
+ "E5vM5o9GJd7ZZmboPRohj9mssYNp6+fcU2QmNhg0hFeLjcjeA0saDg9GoOFvmEJ6xe9St7kFZte0u6Wp",
+ "GBUqJBlnzmvIJSVOjJk6IcGkyOV+UBLnVgD0jB1tfWmn/O5VUrviyfAyb2+1aVvqzScfxY5/6ghFdymB",
+ "v6EVpili86YvsUTtFN3Yl279nkCEjBG9YRNDJ83QFaSgBFQKso4QlV3HPKdGtwG8cS78Z4HxAqsEUb59",
+ "EARUSVgwpaE1ovs4ic9hnqRYnFCIeXp1upJzs763QjTXlHUj4oedZX7yFWBE8pxJpTP0QESXYF76RqFS",
+ "/Y15NS4rdUO2bClfVsR5A057DdusYGUdp1c373evzLQ/NCxR1TPkt4zbgJUZlp6OBnLumNrG+u5c8Gu7",
+ "4Nf0aOsddxrMq2ZiacilO8e/yLnocd5d7CBCgDHiGO5aEqU7GGSQgDvkjoHcFPj4T3ZZXweHqfBj743a",
+ "8WnAqTvKjhRdS2Aw2LkKhm4iI5YwHVRuHmbGJs4ArSpWbHq2UDtqUmOmBxk8fL27HhZwd91gezDQjcuL",
+ "hjl3agW66D9n8zlFAfnUiHA2HNDFuoFELcfmhBa1RKNaJ9huWJiyEexGrv27ny+0kHQBzjCaWZDuNAQu",
+ "5xA0BGUfFdHMejgLNp9DaBBUtzFmdYDrm32izR1GEFncalgzrr94FiOjPdTTwrgfZXGKidBCyk10OTS8",
+ "erEq0DubziXB1tzCehrNIP0OttnPRkMhFWVStRFjzhLa5X8H7Pp69R1sceS9gVgGsD27gmrqW0AajJkF",
+ "m0c2caJRgcIaplj0obOFB+zUWXyXjrQ1rupsmvjbsOxOVdbuUu5yMFq/nYFlzG5cxN1l5vRAF/F9Ut63",
+ "CSxhjAvJMRC5wqmY8j16hldRkx69j3YvgZaeeHE5k4/Tyd2cU7HbzI24B9dvmgs0imcMfrLOio6v+UCU",
+ "06qSYk3LzLnwUpe/FGt3+ePr3uP3iYXJOGVffn32+o0D/+N0kpdAZdYoY8lV4XvVv8yqbJ3a3VcJSize",
+ "KmKV9WDzm+KaodvvZgmumUKg7w+qPrcu3eAoOjfgPB6DuZf3Oe+zXeIOLzRUjRO6dZBYH3TX70zXlJXe",
+ "M+GhTcRL4uLGlQ6PcoVwgDv7r4MwhOyo7GZwuuOno6WuPTwJ5/oRq6XFNQ7uaqkhK3L+aHp06ekbITvM",
+ "3yXLRP3Zf5xYZYRsi8dE+KBv0NMXpk6IFbx+XfxqTuPDh+FRe/hwSn4t3YMAQPx95n5H/eLhw6irIWpJ",
+ "MEwCDQWcruBBE/ib3IhPa3bicDPugj5brxrJUqTJsKFQ65j26L5x2LuRzOGzcL8UUIL5aX9uXW/TLbpD",
+ "YMacoItUckwT97SyPYEUEbwf5od5WYa0kNmvKFY9t56b4RHi9Qq9HZkqWR73A/OZMuyV2/ge8zLBlxMG",
+ "MzNizRLhYrxmwVjmtTFl/HpABnNEkamilQRb3M2EO941Z7/VQFhhtJo5A4n3Wu+q88oBjjoQSI3qOZzL",
+ "DWyjCNrh72IHCSv+92VGBGK3ESSMJhqA+6ox6/uFNl6zVmc6NCgxnHHAuHcEFDr6cNRsEyyW3aigcXrM",
+ "mN6QntG51gOJOaK9HpnK5lL8DnFbNJrwI7nZvscBw0jc3yFUz8IOZx2W0nig2paV7ez7tnu8bpza+Dvr",
+ "wn7RTVuF21ym8VN92EbeRulV8QqiDskpJSx0R3ajVROsBY9XEJ+FFe19qALl9jzZxORO0kP8VIbpRad2",
+ "/PZUOpgHKVklvZnRWLl/owsZmILt7QRVaEH8x34DVJN2a2cnQVBh8y6zxY0qkG1timGhxFvqNXba0RpN",
+ "q8AgRYWqy9QGgpVKRIap+Q3ltk2i+c7yK/e1AusFNV/dCImlyVQ8/qOAnK2i5tirq3dFPvT1F2zBbAfA",
+ "WkHQYs4NZLurWipybfqaZHKHmvM5eTQN+ly63SjYmik2KwHfeGzfmFGF12XjkWw+McsDrpcKX38y4vVl",
+ "zQsJhV4qi1glSKN7opDXRDHNQN8AcPII33v8JbmP8VuKreGBwaITgiYvHn+J3nf7x6PYLes6OO5i2QXy",
+ "7L87nh2nYwxgs2MYJulGPYlWcbItnNO3w47TZD8dc5bwTXeh7D9LK8rpAuIhw6s9MNlvcTfRo9rDC7fe",
+ "AFBaii1hOj4/aGr4UyIN0bA/CwbJxWrF9MpF+SixMvTU9o+zk/rhbDNT1/rDw+UfYrBc5WOFerauT6zG",
+ "0FUijQBDGn+gK+iidUqorUdXsjaM1TckIue+3CX2QmlaoFjcmLnM0lGWxKjWOakk4xrtH7WeZ38xarGk",
+ "uWF/Jylws9kXzyI9Rbpl9/lhgH9yvEtQINdx1MsE2XuZxX1L7nPBs5XhKMWDNu03OJXJqL54/FYqiGz3",
+ "0GMlXzNKliS3ukNuNODUdyI8vmPAO5Jis56D6PHglX1yyqxlnDxobXbop7evnZSxEjJWw7o97k7ikKAl",
+ "gzUmccQ3yYx5x72Q5ahduAv0nzcExYucgVjmz3JUEQg8mrvyN40U//P3bTFedKza5JieDVDIiLXT2e0+",
+ "ccDXYVa3vv/WxuzgswTmRqPNdnofYCURqmtjcZtvPnE6b9Tca/e8Y3B8/CuRRgdHOf7hQwT64cOpE4N/",
+ "fdJ9bNn7w4fxmphRk5v5tcXCXTRi/Da2h1+JiAHMN6BqAopcym7EAJm6pMwDwwRnbqgp6Tb7+fRSxHGS",
+ "QeIBf/FTcHX1Dp94POAffUR8ZmaJG9iGNKcPe7fZWZRkiuZ5EGpMyVdiM5ZweneQJ55/AhQlUDLSPIcr",
+ "GTRzi7rr98aLBDRqRp1BKYySGfapCO35/zp4Nouf7sB2zcri57bcUO8ikZTny2ig5sx8+EvbdL1ZomWV",
+ "0dL3S8o5lNHhrG77i9eBI1r6P8TYeVaMj3y330zQLre3uBbwLpgeKD+hQS/TpZkgxGq3kkuTKVwuREFw",
+ "nrbOessch105g1Zhv9WgdOxo4AObrYTOLsN8bacqArxA69cJ+RZrKhhYOkV00erkyxN2S3XVVSloMcWy",
+ "iZdfn70mdlb7jW0dbDtlLdDo0l1F1Eo+vnRZ0wU4npM/fpzdScJm1UpnTWOrWNUj80bbeov1QifQHBNi",
+ "54S8spYw5e0sdhKCxTflCoqgj5bVxZAmzH+0pvkSTUydiyxN8uNbvHmqbA3wQb/opq8CnjsDt+vyZpu8",
+ "TYnQS5A3TAFmYcIauoWWmqpjzsTpCy91lydrzi2lnBwgUzRdFA5FuwfOCiTeNxyFrIf4Aw0MtkPioR3v",
+ "LvCraJnnfvu8nvPWl+1p+gB/72zEOeWCsxyLLMcEIiwKM87bNKIeddxNpCbuhEYOV7RpX5P/5bCYbOPn",
+ "GaFD3NBzGzw1m2qpw/6pYeOauSxAK8fZoJj63pPOr8G4AtcnwxBRyCeFjMSmROPZGz/4gWSE9R4Shqpv",
+ "zLMfnBkTE6GvGUeDhUObE7Ot56FUDB2MnDBNFgKUW0+36JV6Z745wfpPBWzen7wWC5ZfsAWOYaOhzLJt",
+ "6N9wqDMfCOgC78y7L827ripv83MnqsdOelZVbtJ0Z9J4O+YNTyI4Fn7i4wEC5Dbjh6PtILedEbx4nxpC",
+ "gzUGH0GF9/CAMJounb2W2EZFsBSFbxCbmxQtzcd4BIzXjHtPWPyCyKNXAm4MntfEdyqXVFsRcBRPuwRa",
+ "JuLYMdfPulLvOlS/JrFBCa7Rz5HexrbBaIJxNC+0ghvlW+IPhaHuQJh4ScsmAjbSLhSlKidEFZgj0msg",
+ "GmMchnH7FsXdC2BPV/Jp+znW+T70JkpVP5rVxQJ0Rosi1rbkK3xK8KnP9YEN5HXT3qKqSI7FPrvVT4fU",
+ "5ibKBVf1asdc/oU7Thd05I1QQ9gV2O8wVleYbfHfQ/rFN7GvB+e3+UDX4rCSv8N8vZjUa2g6U2yRjccE",
+ "3il3R0c79e0Ivf3+qJReikUXkM9hJE1wuXCPYvzta3NxhCUBB2HG9mppKvZhSK/A577IRVNrqsuV8Cob",
+ "dDBB53XTp323GSLdcX2Kl18ipzQ0edv71ZqBU5mleTIRmmpXkkVTspMFJctc2JDPnhF96AlKhXnaKM/j",
+ "GZ/dWnciNO2C+a7jcLGhPi2zSDpabucLaTf4UGfId+tUsrGvAI7P+x2Zr8HVaaskrJmofRCND2X1KqH9",
+ "tdPfuEn3jq4/GiD+uY3PSVP5peuMZ5fpdPLvfrbONAJcy+0/geF8sOmDXs9Dadeap9pXSNNUaVSTpc6t",
+ "OKY6fqwQu5MNO92m9/TKHpDVqzHiwLD39XRyXhx0YcaK+U/sKLFjF+9kna513NY3xiNWCcXa3maxFtcj",
+ "Y8YvsUt1UKt5OJaPJVxDrrGhXRsjJQEOqdxsJvO2+z9rHqfV6Sa03pU63lXfeNjFbs8dPyhBEpTRsR3A",
+ "TsZX8z1rImFtIs8NVVj7XqKNu5v6OjoBbz6HXLP1npIvf18CD8qJTL1dBmGZBxVgWJOOghVDD7c6tgDt",
+ "qsiyE56gcv+dwUmlI1/D9p4iHWqItiRrcrFuUywSMYDcITMkIlQs0swakl3wD1MNZSAWfGSn/RzastvJ",
+ "bsZBAaNbzuVJ0lwcbVGjHVPG26mOmst8elCpL8ysSFWFGXZjTOsfr7D5pXJxTrQpNhlq6eR8WJL/xhWr",
+ "xAI9je/El60E5X/z1bjsLCW7hrDfMnqqbqgs/BtR04u36mQ77qNBKRffSbAP9LyZmbVx+ENfdaTIM6a0",
+ "5KUwYkSWygvqhr43cWP3lA3wa+uwIFxzkK4vPcq/pVCQaeHj9nfBsQsVNorxVkhQycYKFrhkudO3bT1X",
+ "bDBDsbwpdcGL4QKJhBU10Mmg6mp6zl3Ifmmf+1xq32Bkr4Wpodf9ne58BgZTAySGVD8n7rbcn6N9G2MT",
+ "4xxk5j1P/RKsHGTXG1JJUdS5vaDDg9EY5EaXQNnBSqJ2mny4yp6OEOQ6X8P21CpBvkWg38EQaCs5WdCD",
+ "0n29TT6q+U3F4F4cBbzPabmaTiohyizh7Dgf1o3tU/w1y6+hIOam8JHKie6v5D7a2Btv9s1y6+ukVhVw",
+ "KB6cEHLGbW6Id2x3Gxf1Juf39K75NzhrUdtSzs6odnLF40H2WGRZ3pGb+WF28zAFhtXdcSo7yJ6qpJtE",
+ "zVpJbyK9kE/GauVDV3O/P21LVBaKmExyYT1WL/GgxwxHmMkelFxARyYlztNFVCliIZm3ybY3Q8UxFU6G",
+ "AGngY5K+Gyjc4FEERDuuRk6hrWDmapeJOZHQOpFvW8Rt2Bw2ptH3Z25m6fK7uZDQafNqvhay8CIPU20/",
+ "ZipnTEsqt7cptTZoTjuwniSxvDccq4nEahfSRmMNcViW4iZDZpU1tc1jqq15T3UvY9/Opf3OnOoZBHFd",
+ "VDlBbUuWtCC5kBLy8It42p6FaiUkZKXAMK+YB3qujdy9wlwdTkqxIKLKRQG2R0CcglJz1ZxTFJsgiKqJ",
+ "osDSDiZ92m8COh455bE6I9viPHbRmfVlJgJPQbliPA5D9uUhvDu6Ch9Unf98jhYhhrEu3dxrK32GvZXh",
+ "wNbKrCy9wSDVXZn8pGoMR8LEGzPFM7ISSjvNzo6kmqHaEK/7ueBairLsGoGsSLxwlu3v6eYsz/VrIa5n",
+ "NL9+gHokF7pZaTH1aan9YLx2JtmryDSyDfTlMmLnxVn8qTu417PjHAe3aA3AfL+fY+23cZ/FWll319Xv",
+ "zc4TtTO1WLE8TsP/WtFtyZi0GEuIlnqyXZJscj6+how6vByaYAZkSUM0AzcEG9svx9OcUxeZh/kvSrz9",
+ "cckc3CWRuJiGfNJJLVmelK16ACCkNmNU19K2Vgoln4ariIXNMEeXdB/QkVwcI3/uBpsZ4ehAabgTUINo",
+ "wwbA+1bZn9qSXDZycSY2/vmDtmbXrYD/uJvKY+3oI6e4IS3XLd/X90hwhHhl4J3xR9g43N+g+6OQmjZ4",
+ "I2/UAIB0XFIHhlHRSYeCMaeshCKjOnG5o01oGmi2LqOl39yUKcfJc2ov7CUQM3YtwdWbsCJ1rxl6RQ0p",
+ "ieb1oeWWF7ABhcUgbEdnqqyfwfs7oLRtpXrKt6iyEtbQCddyRTBqFO3YGvy3qvmYFAAVev/6NqlYHFJ4",
+ "l/cMFW7tWRDJMga7UcuFRazdKbLHLBE1omx4Zo+JGnuUDERrVtS0gz91qMjRNbuZoxxB1UAmz7zeNnaa",
+ "n+wIb/0AZ/77mCjjMfF+HB86mAXFUbeLAe2NS6xV6tTzeFhiWOGlcWjgbEXj+LQk3vINVdEbnjYADkm+",
+ "VW9G7hMTPEDs1xvIUarpxt3dHScEByOqV70pKYLLZodvb0j+LDS8k4ST48VUDQXIYHdaajxdOIEdX8B2",
+ "ltyIvUZqxhZSjv87/jfFDvx2IKNX245WoQb3CrzHDgtKN84KJ9Cy5kLz8YVTV0+wr5SzILJ6RbdESPzH",
+ "6Gu/1bRk8y2eUAu+/4yoJTUk5FyE1nft4hXNxLsFk6kHzNsFhJ/KrpuNHTMYbmtGCYA2V6AzTmFloGsI",
+ "twHd8pbz5NqwHFXPVkwpvOx62znEglu8rwmxokWoI2Nlum4rUV+r1Hz9/7dZW+FUvqBUVdLc9y8Douiq",
+ "ZxC3PQo9ceklrHan9Q3VY08CTd/DlmilT+ctbmHcOzByIxYrn+r30AF70A9u0OriTss4pEFxmxm9IyFy",
+ "1FKOvQtj40MGQKOT2Vf12gO+rcboK4B9CvxHi0amljEG/H8WvCfa6IXw2o55nwDLnZT/CKzWrjoTm0zC",
+ "XO0LhbCGVaMIy7ZYgDdOMp5LoMrGhpz/6FS2tiYi40aFtNGLjfetGaWAOeMts2S8qnVEA8DSiHwbICw0",
+ "TyNaE86elJRgxLA1LX9cg5SsSG2cOR22jVdYk96b5N23EeW/uVOHAzDVaj+YSQhtplrwmrnAbdcbG1io",
+ "NOUFlUX4OuMkB2nufXJDt+r2vg8DrayNfLHH+0EDaaab3x74QZC0LSDl1rkv7+iZaACkR3RRjHAtYARr",
+ "xK1gjSJaJDwJQxjiZRXoJivFAvPLEgToik+i78cqK4KjwdbKQ4fNo9jvsHsarLvtDr4WOOuYKXafsx8R",
+ "dajw/MSZ3nnSrDWtn/BnIzLtQfD0zxdtWLjdnCH9x3I0LzGJoZOn2W867/fahofY+SDhyehacBO7iA5y",
+ "l+AbmmvH9zPq+uBjmaBWh81Qt1U7Ar9BtUHONHeBO0Ojz0AptkiZujzaA21C1pLs74EEeLZTrTtb3Wmb",
+ "YAozziFNoHZnzmaVqLJ8TDSgLc1fOIO2g7QLY4I+AnN1Yt1N4IRqmlV0Cpt0ulYc2gcr2TVjn1+myncp",
+ "2SmDRoKDdo3lYo68DI+wNeNgjkdjvJj2s4+6BpuGSRBKJOS1RIPmDd3u7yuUKAl78bez54+f/PLk+RfE",
+ "vEAKtgDVlhXu9eVpI8YY79tZPm2M2GB5Or4JPi/dIs57yny6TbMp7qxZbqvamoGDrkSHWEIjF0DkOEb6",
+ "wdxqr3CcNuj7n2u7Yos8+o7FUPDH7JmLbI0v4Iw7/UXMyW6e0e35p+P8wgj/kUvKb+0tFpiyx6bzom9D",
+ "j61B9p+GCiOJ3kejvWa5fwTFRaXM27XPHQXaMOk3Qh4IQCKbr5OHFXbXbutVSmvbRSuwd5j1L7HvW0fa",
+ "3rBzhMR/sAe8MD2vfa+JlHbgfObCj983SAmW8j5FCZ3l78v4cwtsPY/BFjlVV2tQli2JoXARpHOql02W",
+ "ZEK2HSRTYitto9+UZSQJ02rfeKZCwjGCpVzT8tNzDeyxfob4gOJtOvUizMQLkWxRqW5XB+w1HTV3kHV3",
+ "vKn5G0z8/DuYPYrec24o53Qc3GZoO8HGxgt/K9hcUnKDY9qgksdfkJmryV5JyJnqOzOtxymIClyDZHMX",
+ "wAcbvSfTbd86fxb6DmQ895EH5IfAKSHQ+NNC2B7Rz8xUEic3SuUx6huQRQR/MR4V9nDcc13csX737cpK",
+ "BAWiDiwrMexOOXZ5tnSCuXRqBcN1jr6tO7iNXNTt2sbWRBldBvzq6p2ejSllEi/ZbT7HWipHqd19UOXu",
+ "P6CKisWRG8PNG6OYn1N1NW3tyEQJ195+1KzcG2bQKcj7cTpZAAfFFJac/cW1GPi0d6mHwGZ2D4+qhfUu",
+ "5SgsYiJr7UweTBWU2h1RZdd9Fqmpi1lTeS2Z3mJ7SW+GYb9E671829QOcLUnGg+Iu/u0uIamxW9baaBW",
+ "/nb9VtAS7yPrmOHmFhLlCfl6Q1dV6YyK5K/3Zv8BT//yrHj09PF/zP7y6PmjHJ49//LRI/rlM/r4y6eP",
+ "4clfnj97BI/nX3w5e1I8efZk9uzJsy+ef5k/ffZ49uyLL//jnuFDBmQLqK8A/WLyv7OzciGyszfn2aUB",
+ "tsUJrdh3YPYGdeW5wPZnBqk5nkRYUVZOXvif/pc/YSe5WLXD+18nro3HZKl1pV6cnt7c3JyEn5wuMLU4",
+ "06LOl6d+HmxK1ZFX3pw3Mck2egJ3tLVB4qY6UjjDZ2+/vrgkZ2/OT1qCmbyYPDp5dPLYdUDltGKTF5On",
+ "+BOeniXu+6kjtsmLDx+nk9Ml0BIrcZg/VqAly/0jCbTYuv+rG7pYgDzBsHP70/rJqRcrTj+4FOuPu56d",
+ "ho750w+dTPRiz5foVD794Psg7n670wPPxfMEH4yEYtdrpzPsfTD2VVDBy+mloLKhTj+guJz8/dTZPOIP",
+ "UW2x5+HUl2uIv9nB0ge9MbDu+WLDimAlOdX5sq5OP+B/kHoDoG0pv1O94afofzv90FmrezxYa/f39vPw",
+ "jfVKFOCBE/O57Q+56/HpB/tvMBFsKpDMiIVYPsP9asscnWKboO3w5y3Poz8O19Ep8WLOXdSX+dbWFaek",
+ "ZMo7pbuVYVTYQvi8QP6s++VmzEs+IA0P+ZNHjzxnc3pDQJWn7hBP2obi45LX+0VuhjfekLXtWtnH6eTZ",
+ "gYDutA11SgNGgPmKFsRnMuLcjz/d3OfcBscZXm/vJITg2aeDoLN95DvYkh+EJt+g8vRxOnn+KXfinBtR",
+ "jpYE3wzaNA6PyE/8mosb7t80wky9WlG5HX18NF0o9J5JtqZOlGxe44vJe8zkt9mt3aN2VhQDordCHSj9",
+ "lcDbMYWxlVpUrhBwi7RWpmXcLGGoFA9QdWm7lfbqRdmqJt4Fy0UBk1Da1LKGj3fkCT23PZX6PGLjQWMl",
+ "xsvOfWPVANRo8aO+U9OOPNRH9pFw2/u3DTP9k6f8yVManvL80dNPN/0FyDXLgVzCqhKSSlZuyU+8iV++",
+ "NY87K4poxbju0d/L46aTTZaLAhbAM8fAspkotr6/eWeCa7Dq60CQOfXqXkfiT3BPr0jGpJU2qm7y4l3M",
+ "T+macVb1rGQ5saYu1PWMIhOoYk0Jry7zmwbbOmA/kTKxpGBl3aST6hvh0rWGFwq5HyZZq99sn248iExv",
+ "yQ3jhbjBJsUI7m81IJ938PppJhEAg9CtYUeE1oJvAByAlZoPTf9jsLNj8tf0dnOX9NCp39/xytp7mTYl",
+ "cv7r4scfgqQOm4gKhS/w5cgc4z+lwLjGG4qBPlJDcUJeWtNLuSVcoJG/Vp2mLSd/3kN/8v678/5vm5qJ",
+ "tl2Lxj4MQ5YU3AUnowTeKG//0PnTmSYmNsouVunQ/E4oWWCrreEFNduS81cD7dV+1r8Svtriq71bIcLv",
+ "+yAexPgT7GWXSGMWshC6iTW0i/pTyPxTyLyT4jr68IzRXaOWJdsAjw70sanvZRfrykz1EJQx9qfPenyP",
+ "svFD21bMlmWrqkJBggc2ybeP5j9ZxJ8s4m4s4luIHEY8tY5pRIjuMFvXWIaBtRyKTsyTlzr863VJZZBX",
+ "tc+EfYYjxlXBP4RrfGqDXRRX1l5HOYENsxFskQ08rg3vT5b3J8v712F5Z/sZTVcwubPV6xq2K1o1ti61",
+ "rHUhbgIPN8Jio0+HPj6r+Pf/Pr2hTGdzIV2NfjrXIIcfa6DlqWvI2fu17YE1eIKNvYIfw2o40V9Paddp",
+ "2fWNG9ab+nDgOI89dY7jxEs+FdU/boNowqAUZPtNOMq794ZlK5BrfyO0MRYvTk+xNsFSKH06+Tj90Iu/",
+ "CB++b8jjQ3OPODL5iHQhJFswTsvMxTa0XYUnT04eTT7+vwAAAP//tpMWzK0HAQA=",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/participating/public/routes.go b/daemon/algod/api/server/v2/generated/participating/public/routes.go
index 3003f8b27..107ad4eba 100644
--- a/daemon/algod/api/server/v2/generated/participating/public/routes.go
+++ b/daemon/algod/api/server/v2/generated/participating/public/routes.go
@@ -380,18 +380,19 @@ var swaggerSpec = []string{
"Xc05yBVLgVxAUQpJJcs35AdeB1kH/XL77O8HfsXFNfeIMFplVRRUbpwQTWueU/Gg+8dW/tOrc9II2shF",
"6UJh3AOKqFam9bXQ+GLy/qPXAUYqFtteO5phO7Oxr4IKXh7WTtB/oI4+oAV88Pcj58aMP0RPhFVxj3wF",
"tvibLcXng14bWHd8sWZZsJKU6nRZlUcf8D+okAZA2+rcR3rNjzCk7uhDa63ucW+t7d+bz8M3VoXIwAMn",
- "5nPb8n3b46MP9t9gIliXIJm5cbAinvvVVi49ws6fm/7PG55Gf+yvo1W1ceDnow+tP9vEoJaVzsR18C36",
- "B6xzqz+feVip7t9H15RpI924EoDYALz/sQaaH7l+H51fmxLbvSdYNzz4sSMPlcLWAGmrom/p9UUrtUza",
- "3PuvBJoPhjjlOpkxjuwjZG+N1c8+7Os2PaZ2sQQbJOkdpxHhUQsyk4JmKVXYV9p1xukptR9vqTh1SwWc",
- "RdxiCCbaCfrV5AwjONzpK8Fxx0iHwb6Qsxd+wiYr5zeXqHoQfUUz4ovGJOQVzc2GQ0ZOndzewsZvLQ19",
- "fvHlM8sbn0xA+MofPkUoVtBqaXYyXoMjaGE1Rhow6p9hAAvgiWNByUxkG9dlaCLptV7blP8uczui7Xug",
- "bUGkkhZq6OEdmBd/3zbFXabEPy14f1rw/rTx/GnB+3N3/7TgjbTg/Wnf+tO+9T/SvrWPUSsmZjqjzrC0",
- "iW2PaWteq/fRprx8zeLbxYiYrmWyVq4fVrJn+pCQC6yHQc0tASuQNCcpVVa6ckWXCgyexJJGkJ1c8qQF",
- "iQ1RNBPfb/5rY0Mvq+Pjx0COH3S/UZrlecib+9+ivIuPbOuvL8nl5HLSG0lCIVaQ2YzFsLyx/WrnsP+r",
- "Hvd1ry46pgZjwRFf+Yioaj5nKbMozwVfELoQTVwz1nfkAp+ANMDZ7jKE6anr4MRcqUjXfLpdhbktufcl",
- "gLNmC3fGAnTIJR4GYAhvzxiAfxsTAPA/Wkq/aYmf2zLSrWP3uOqfXOVTcJXPzlf+6N7VwLT431LMfHL8",
- "5A+7oNAQ/b3Q5BuM2b+dOFY39I812bmpoOWrZ3hzXxP3G8bR4i1aR9C+e28uAgVy5S/YJiz05OgIyykt",
- "hdJHE3P9tUNGw4fva5g/+NuplGyFXVzff/z/AQAA//8OtxrjPRABAA==",
+ "5nPb8n3b46MP9t9gIliXIJm5cbAinvvVVi49ws6fm/7PG55Gf+yvo1W1ceDnI28PianE7Tc/tP5sk41a",
+ "VjoT18Es6EmwbrA+ZOZhpbp/H11Tpo0c5IoFYqvw/scaaH7kOoN0fm2KcfeeYIXx4MeO5FQKWy2krbS+",
+ "pdcXrSQ0abP0vxJoaBjiqetkxjgympARNvZB+7CvBfXY38USbDild7FGxEwtyEwKmqVUYQdq10Onp/5+",
+ "vKWK1S0qcBZxoCGYaFHo150zLONwp1cFxx0jRwb7Qs5e+Amb/J3fXPbqQfQVzYgvL5OQVzQ3Gw4ZOXUS",
+ "fgsbv7Xc9PkFnc8smXwyUeIrf/gUoVhrq6UDyni1jqDZ1Ri5wSiKhgEsgCeOBSUzkW1cP6KJpNd6bYsD",
+ "dJnbEW3fGG1bI5W0UEMP78AQ+fu2Pu4yOv5p6/vT1venNehPW9+fu/unrW+kre9PS9iflrD/kZawfcxf",
+ "MTHTmX+GpU1skExb81q9jzaF6GsW3y5bxHQtk7WyArHmPdOHhFxg5QxqbglYgaQ5Samy0pUrz1RgmCUW",
+ "P4Ls5JInLUhsMKOZ+H7zXxtFelkdHz8Gcvyg+43SLM9D3tz/FuVdfGSbhH1JLieXk95IEgqxgszmNoaF",
+ "kO1XO4f9X/W4r3sV1DGJGEuT+BpJRFXzOUuZRXku+ILQhWgioLESJBf4BKQBzvahIUxPXa8n5opKujbV",
+ "7XrNbcm9LwGcNVu4M2qgQy7xgAFDeHtGC/zbmFCB/9FS+k2LAd2WkW4du8dV/+Qqn4KrfHa+8kf3wwam",
+ "xf+WYuaT4yd/2AWFhujvhSbfYHT/7cSxuvV/rB3PTQUtX2fDm/uaCOEw4hZv0TrW9t17cxEokCt/wTYB",
+ "pCdHR1h4aSmUPpqY668dXBo+fF/D/MHfTqVkK+z3itZNIdmCcZonLnAzaYJEHx0eTz7+/wAAAP//5fUK",
+ "SIoQAQA=",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index bd40224f9..0282594f4 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -25,10 +25,12 @@ import (
"io"
"math"
"net/http"
+ "os"
"strings"
"time"
"github.com/labstack/echo/v4"
+ "golang.org/x/sync/semaphore"
"github.com/algorand/avm-abi/apps"
"github.com/algorand/go-codec/codec"
@@ -48,6 +50,7 @@ import (
"github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger/simulation"
+ "github.com/algorand/go-algorand/libgoal/participation"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/node"
"github.com/algorand/go-algorand/protocol"
@@ -74,6 +77,9 @@ type Handlers struct {
Node NodeInterface
Log logging.Logger
Shutdown <-chan struct{}
+
+ // KeygenLimiter is used to limit the number of concurrent key generation requests.
+ KeygenLimiter *semaphore.Weighted
}
// LedgerForAPI describes the Ledger methods used by the v2 API.
@@ -90,6 +96,7 @@ type LedgerForAPI interface {
LatestTotals() (basics.Round, ledgercore.AccountTotals, error)
BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error)
Wait(r basics.Round) chan struct{}
+ WaitWithCancel(r basics.Round) (chan struct{}, func())
GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error)
EncodedBlockCert(rnd basics.Round) (blk []byte, cert []byte, err error)
Block(rnd basics.Round) (blk bookkeeping.Block, err error)
@@ -240,6 +247,47 @@ func (v2 *Handlers) GetParticipationKeys(ctx echo.Context) error {
return ctx.JSON(http.StatusOK, response)
}
+func (v2 *Handlers) generateKeyHandler(address string, params model.GenerateParticipationKeysParams) error {
+ installFunc := func(path string) error {
+ bytes, err := os.ReadFile(path)
+ if err != nil {
+ return err
+ }
+ partKeyBinary := bytes
+
+ if len(partKeyBinary) == 0 {
+ return fmt.Errorf("cannot install partkey '%s' is empty", partKeyBinary)
+ }
+
+ partID, err := v2.Node.InstallParticipationKey(partKeyBinary)
+ v2.Log.Infof("Installed participation key %s", partID)
+ return err
+ }
+ _, _, err := participation.GenParticipationKeysTo(address, params.First, params.Last, nilToZero(params.Dilution), "", installFunc)
+ return err
+}
+
+// GenerateParticipationKeys generates and installs participation keys to the node.
+// (POST /v2/participation/generate/{address})
+func (v2 *Handlers) GenerateParticipationKeys(ctx echo.Context, address string, params model.GenerateParticipationKeysParams) error {
+ if !v2.KeygenLimiter.TryAcquire(1) {
+ err := fmt.Errorf("participation key generation already in progress")
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+
+ // Semaphore was acquired, generate the key.
+ go func() {
+ defer v2.KeygenLimiter.Release(1)
+ err := v2.generateKeyHandler(address, params)
+ if err != nil {
+ v2.Log.Warnf("Error generating participation keys: %v", err)
+ }
+ }()
+
+ // Empty object. In the future we may want to add a field for the participation ID.
+ return ctx.String(http.StatusOK, "{}")
+}
+
// AddParticipationKey Add a participation key to the node
// (POST /v2/participation)
func (v2 *Handlers) AddParticipationKey(ctx echo.Context) error {
@@ -893,11 +941,15 @@ func (v2 *Handlers) WaitForBlock(ctx echo.Context, round uint64) error {
}
// Wait
+ ledgerWaitCh, cancelLedgerWait := ledger.WaitWithCancel(basics.Round(round + 1))
+ defer cancelLedgerWait()
select {
case <-v2.Shutdown:
return internalError(ctx, err, errServiceShuttingDown, v2.Log)
+ case <-ctx.Request().Context().Done():
+ return ctx.NoContent(http.StatusRequestTimeout)
case <-time.After(WaitForBlockTimeout):
- case <-ledger.Wait(basics.Round(round + 1)):
+ case <-ledgerWaitCh:
}
// Return status after the wait
diff --git a/daemon/algod/api/server/v2/test/handlers_resources_test.go b/daemon/algod/api/server/v2/test/handlers_resources_test.go
index 1de86ddc1..adf187053 100644
--- a/daemon/algod/api/server/v2/test/handlers_resources_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_resources_test.go
@@ -19,11 +19,12 @@ package test
import (
"encoding/json"
"fmt"
- "github.com/algorand/go-algorand/data/transactions/logic"
"net/http"
"net/http/httptest"
"testing"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+
"github.com/labstack/echo/v4"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
@@ -135,6 +136,9 @@ func (l *mockLedger) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error)
func (l *mockLedger) Wait(r basics.Round) chan struct{} {
panic("not implemented")
}
+func (l *mockLedger) WaitWithCancel(r basics.Round) (chan struct{}, func()) {
+ panic("not implemented")
+}
func (l *mockLedger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (c basics.Address, ok bool, err error) {
panic("not implemented")
}
diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go
index afe074eda..aa5a7492b 100644
--- a/daemon/algod/api/server/v2/test/handlers_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_test.go
@@ -32,6 +32,8 @@ import (
"testing"
"time"
+ "golang.org/x/sync/semaphore"
+
"github.com/algorand/go-algorand/daemon/algod/api/server"
"github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
@@ -271,8 +273,7 @@ func addBlockHelper(t *testing.T) (v2.Handlers, echo.Context, *httptest.Response
// make an app call txn with eval delta
lsig := transactions.LogicSig{Logic: retOneProgram} // int 1
- program := logic.Program(lsig.Logic)
- lhash := crypto.HashObj(&program)
+ lhash := logic.HashProgram(lsig.Logic)
var sender basics.Address
copy(sender[:], lhash[:])
stx := transactions.SignedTxn{
@@ -2378,3 +2379,63 @@ func TestRouterRequestBody(t *testing.T) {
e.ServeHTTP(rec, req)
assert.Equal(t, http.StatusRequestEntityTooLarge, rec.Code)
}
+
+func TestGeneratePartkeys(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ numAccounts := 1
+ numTransactions := 1
+ offlineAccounts := true
+ mockLedger, _, _, _, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts)
+ defer releasefunc()
+ dummyShutdownChan := make(chan struct{})
+ mockNode := makeMockNode(mockLedger, t.Name(), nil, cannedStatusReportGolden, false)
+ handler := v2.Handlers{
+ Node: mockNode,
+ Log: logging.Base(),
+ Shutdown: dummyShutdownChan,
+ KeygenLimiter: semaphore.NewWeighted(1),
+ }
+ e := echo.New()
+
+ var addr basics.Address
+ addr[0] = 1
+
+ {
+ require.Len(t, mockNode.PartKeyBinary, 0)
+ req := httptest.NewRequest(http.MethodPost, "/", nil)
+ rec := httptest.NewRecorder()
+ c := e.NewContext(req, rec)
+
+ err := handler.GenerateParticipationKeys(c, addr.String(), model.GenerateParticipationKeysParams{
+ First: 1000,
+ Last: 2000,
+ })
+
+ require.NoError(t, err)
+ assert.Equal(t, http.StatusOK, rec.Code)
+
+ // Wait for keygen to complete
+ err = handler.KeygenLimiter.Acquire(context.Background(), 1)
+ require.NoError(t, err)
+ require.Greater(t, len(mockNode.PartKeyBinary), 0)
+ handler.KeygenLimiter.Release(1)
+ }
+
+ {
+ req := httptest.NewRequest(http.MethodPost, "/", nil)
+ rec := httptest.NewRecorder()
+ c := e.NewContext(req, rec)
+ // Simulate a blocked keygen process (and block until the previous keygen is complete)
+ err := handler.KeygenLimiter.Acquire(context.Background(), 1)
+ require.NoError(t, err)
+ err = handler.GenerateParticipationKeys(c, addr.String(), model.GenerateParticipationKeysParams{
+ First: 1000,
+ Last: 2000,
+ })
+ require.NoError(t, err)
+ assert.Equal(t, http.StatusBadRequest, rec.Code)
+ }
+
+}
diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go
index aedcede91..e2b52fc7c 100644
--- a/daemon/algod/api/server/v2/test/helpers.go
+++ b/daemon/algod/api/server/v2/test/helpers.go
@@ -99,9 +99,11 @@ type mockNode struct {
status node.StatusReport
devmode bool
timestampOffset *int64
+ PartKeyBinary []byte
}
func (m *mockNode) InstallParticipationKey(partKeyBinary []byte) (account.ParticipationID, error) {
+ m.PartKeyBinary = partKeyBinary
return account.ParticipationID{}, nil
}
@@ -340,8 +342,7 @@ func testingenvWithBalances(t testing.TB, minMoneyAtStart, maxMoneyAtStart, numA
genesis[poolAddr] = basics_testing.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 100000 * uint64(proto.RewardsRateRefreshInterval)})
- program := logic.Program(retOneProgram)
- lhash := crypto.HashObj(&program)
+ lhash := logic.HashProgram(retOneProgram)
var addr basics.Address
copy(addr[:], lhash[:])
ad := basics_testing.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 100000 * uint64(proto.RewardsRateRefreshInterval)})
diff --git a/data/account/participation.go b/data/account/participation.go
index 376e9090f..9493f21f0 100644
--- a/data/account/participation.go
+++ b/data/account/participation.go
@@ -20,6 +20,7 @@ import (
"context"
"database/sql"
"fmt"
+ "math"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -79,8 +80,8 @@ func (id *ParticipationKeyIdentity) ToBeHashed() (protocol.HashID, []byte) {
}
// ID creates a ParticipationID hash from the identity file.
-func (id ParticipationKeyIdentity) ID() ParticipationID {
- return ParticipationID(crypto.HashObj(&id))
+func (id *ParticipationKeyIdentity) ID() ParticipationID {
+ return ParticipationID(crypto.HashObj(id))
}
// ID computes a ParticipationID.
@@ -215,6 +216,11 @@ func (part PersistedParticipation) PersistNewParent() error {
})
}
+// DefaultKeyDilution computes the default dilution based on first and last rounds as the sqrt of validity window.
+func DefaultKeyDilution(first, last basics.Round) uint64 {
+ return 1 + uint64(math.Sqrt(float64(last-first)))
+}
+
// FillDBWithParticipationKeys initializes the passed database with participation keys
func FillDBWithParticipationKeys(store db.Accessor, address basics.Address, firstValid, lastValid basics.Round, keyDilution uint64) (part PersistedParticipation, err error) {
if lastValid < firstValid {
diff --git a/data/account/participationRegistry_test.go b/data/account/participationRegistry_test.go
index ef77283ce..e0328bb7f 100644
--- a/data/account/participationRegistry_test.go
+++ b/data/account/participationRegistry_test.go
@@ -23,7 +23,6 @@ import (
"encoding/binary"
"errors"
"fmt"
- "math"
"os"
"path/filepath"
"strconv"
@@ -99,7 +98,7 @@ func makeTestParticipationWithLifetime(a *require.Assertions, addrID int, first,
// Generate part keys like in partGenerateCmd and FillDBWithParticipationKeys
if dilution == 0 {
- dilution = 1 + uint64(math.Sqrt(float64(last-first)))
+ dilution = DefaultKeyDilution(first, last)
}
// Compute how many distinct participation keys we should generate
diff --git a/data/account/participation_test.go b/data/account/participation_test.go
index 4a933d72b..ccf300122 100644
--- a/data/account/participation_test.go
+++ b/data/account/participation_test.go
@@ -606,3 +606,13 @@ func BenchmarkParticipationSign(b *testing.B) {
_ = part.Voting.Sign(ephID, msg)
}
}
+
+func BenchmarkID(b *testing.B) {
+ pki := ParticipationKeyIdentity{}
+ b.Run("existing", func(b *testing.B) {
+ b.ReportAllocs() // demonstrate this is a single alloc
+ for i := 0; i < b.N; i++ {
+ pki.ID()
+ }
+ })
+}
diff --git a/data/appRateLimiter.go b/data/appRateLimiter.go
new file mode 100644
index 000000000..b7684409a
--- /dev/null
+++ b/data/appRateLimiter.go
@@ -0,0 +1,322 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package data
+
+import (
+ "encoding/binary"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util"
+ "github.com/algorand/go-deadlock"
+ "golang.org/x/crypto/blake2b"
+)
+
+const numBuckets = 128
+
+type keyType [8]byte
+
+// appRateLimiter implements a sliding window counter rate limiter for applications.
+// It is a sharded map with numBuckets of maps each protected by its own mutex.
+// Bucket is selected by hashing the application index with a seed (see memhash64).
+// LRU is used to evict entries from each bucket, and "last use" is updated on each attempt, not admission.
+// This is mostly done to simplify the implementation and does not look affecting the correctness.
+type appRateLimiter struct {
+ maxBucketSize int
+ serviceRatePerWindow uint64
+ serviceRateWindow time.Duration
+
+ // seed for hashing application index to bucket
+ seed uint64
+ // salt for hashing application index + origin address
+ salt [16]byte
+
+ buckets [numBuckets]appRateLimiterBucket
+
+ // evictions
+ // TODO: delete?
+ evictions uint64
+ evictionTime uint64
+}
+
+type appRateLimiterBucket struct {
+ entries map[keyType]*appRateLimiterEntry
+ lru *util.List[keyType]
+ mu deadlock.RWMutex // mutex protects both map and the list access
+}
+
+type appRateLimiterEntry struct {
+ prev atomic.Int64
+ cur atomic.Int64
+ interval int64 // numeric representation of the current interval value
+ lruElement *util.ListNode[keyType]
+}
+
+// makeAppRateLimiter creates a new appRateLimiter from the parameters:
+// maxCacheSize is the maximum number of entries to keep in the cache to keep it memory bounded
+// maxAppPeerRate is the maximum number of admitted apps per peer per second
+// serviceRateWindow is the service window
+func makeAppRateLimiter(maxCacheSize int, maxAppPeerRate uint64, serviceRateWindow time.Duration) *appRateLimiter {
+ // convert target per app rate to per window service rate
+ serviceRatePerWindow := maxAppPeerRate * uint64(serviceRateWindow/time.Second)
+ maxBucketSize := maxCacheSize / numBuckets
+ if maxBucketSize == 0 {
+ // got the max size less then buckets, use maps of 1
+ maxBucketSize = 1
+ }
+ r := &appRateLimiter{
+ maxBucketSize: maxBucketSize,
+ serviceRatePerWindow: serviceRatePerWindow,
+ serviceRateWindow: serviceRateWindow,
+ seed: crypto.RandUint64(),
+ }
+ crypto.RandBytes(r.salt[:])
+
+ for i := 0; i < numBuckets; i++ {
+ r.buckets[i] = appRateLimiterBucket{entries: make(map[keyType]*appRateLimiterEntry), lru: util.NewList[keyType]()}
+ }
+ return r
+}
+
+func (r *appRateLimiter) entry(b *appRateLimiterBucket, key keyType, curInt int64) (*appRateLimiterEntry, bool) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ if len(b.entries) >= r.maxBucketSize {
+ // evict the oldest entry
+ start := time.Now()
+
+ el := b.lru.Back()
+ delete(b.entries, el.Value)
+ b.lru.Remove(el)
+
+ atomic.AddUint64(&r.evictions, 1)
+ atomic.AddUint64(&r.evictionTime, uint64(time.Since(start)))
+ }
+
+ entry, ok := b.entries[key]
+ if ok {
+ el := entry.lruElement
+ // note, the entry is marked as recently used even before the rate limiting decision
+ // since it does not make sense to evict keys that are actively attempted
+ b.lru.MoveToFront(el)
+
+ // the same logic is applicable to the intervals: if a new interval is started, update the entry
+ // by moving the current value to the previous and resetting the current.
+ // this is done under a lock so that the interval is not updated concurrently.
+ // The rationale is even this requests is going to be dropped the new interval already started
+ // and it is OK to start a new interval and have it prepared for upcoming requests
+ var newPrev int64 = 0
+ switch entry.interval {
+ case curInt:
+ // the interval is the same, do nothing
+ case curInt - 1:
+ // these are continuous intervals, use current value as a new previous
+ newPrev = entry.cur.Load()
+ fallthrough
+ default:
+ // non-contiguous intervals, reset the entry
+ entry.prev.Store(newPrev)
+ entry.cur.Store(0)
+ entry.interval = curInt
+ }
+ } else {
+ el := b.lru.PushFront(key)
+ entry = &appRateLimiterEntry{interval: curInt, lruElement: el}
+ b.entries[key] = entry
+ }
+ return entry, ok
+}
+
+// interval calculates the interval numeric representation based on the given time
+func (r *appRateLimiter) interval(nowNano int64) int64 {
+ return nowNano / int64(r.serviceRateWindow)
+}
+
+// fraction calculates the fraction of the interval that is elapsed since the given time
+func (r *appRateLimiter) fraction(nowNano int64) float64 {
+ return float64(nowNano%int64(r.serviceRateWindow)) / float64(r.serviceRateWindow)
+}
+
+// shouldDrop returns true if the given transaction group should be dropped based on the
+// on the rate for the applications in the group: the entire group is dropped if a single application
+// exceeds the rate.
+func (r *appRateLimiter) shouldDrop(txgroup []transactions.SignedTxn, origin []byte) bool {
+ return r.shouldDropAt(txgroup, origin, time.Now().UnixNano())
+}
+
+// shouldDropAt is the same as shouldDrop but accepts the current time as a parameter
+// in order to make it testable
+func (r *appRateLimiter) shouldDropAt(txgroup []transactions.SignedTxn, origin []byte, nowNano int64) bool {
+ keysBuckets := txgroupToKeys(txgroup, origin, r.seed, r.salt, numBuckets)
+ defer putAppKeyBuf(keysBuckets)
+ if len(keysBuckets.keys) == 0 {
+ return false
+ }
+ return r.shouldDropKeys(keysBuckets.buckets, keysBuckets.keys, nowNano)
+}
+
+func (r *appRateLimiter) shouldDropKeys(buckets []int, keys []keyType, nowNano int64) bool {
+ curInt := r.interval(nowNano)
+ curFraction := r.fraction(nowNano)
+
+ for i, key := range keys {
+ // TODO: reuse last entry for matched keys and buckets?
+ b := buckets[i]
+ entry, has := r.entry(&r.buckets[b], key, curInt)
+ if !has {
+ // new entry, defaults are provided by entry() function
+ // admit and increment
+ entry.cur.Add(1)
+ continue
+ }
+
+ rate := int64(float64(entry.prev.Load())*(1-curFraction)) + entry.cur.Load() + 1
+ if rate > int64(r.serviceRatePerWindow) {
+ return true
+ }
+ entry.cur.Add(1)
+ }
+
+ return false
+}
+
+func (r *appRateLimiter) len() int {
+ var count int
+ for i := 0; i < numBuckets; i++ {
+ r.buckets[i].mu.RLock()
+ count += len(r.buckets[i].entries)
+ r.buckets[i].mu.RUnlock()
+ }
+ return count
+}
+
+var appKeyPool = sync.Pool{
+ New: func() interface{} {
+ return &appKeyBuf{
+ // max config.MaxTxGroupSize apps per txgroup, each app has up to MaxAppTxnForeignApps extra foreign apps
+ // at moment of writing config.MaxTxGroupSize = 16, config.MaxAppTxnForeignApps = 8
+ keys: make([]keyType, 0, config.MaxTxGroupSize*(1+config.MaxAppTxnForeignApps)),
+ buckets: make([]int, 0, config.MaxTxGroupSize*(1+config.MaxAppTxnForeignApps)),
+ }
+ },
+}
+
+// appKeyBuf is a reusable storage for key and bucket slices
+type appKeyBuf struct {
+ keys []keyType
+ buckets []int
+}
+
+func getAppKeyBuf() *appKeyBuf {
+ buf := appKeyPool.Get().(*appKeyBuf)
+ buf.buckets = buf.buckets[:0]
+ buf.keys = buf.keys[:0]
+ return buf
+}
+
+func putAppKeyBuf(buf *appKeyBuf) {
+ appKeyPool.Put(buf)
+}
+
+// txgroupToKeys converts txgroup data to keys
+func txgroupToKeys(txgroup []transactions.SignedTxn, origin []byte, seed uint64, salt [16]byte, numBuckets int) *appKeyBuf {
+ keysBuckets := getAppKeyBuf()
+ // since blake2 is a crypto hash function it seems OK to shrink 32 bytes digest down to 8.
+ // Rationale: we expect thousands of apps sent from thousands of peers,
+ // so required millions of unique pairs => 8 bytes should be enough.
+ // The 16 bytes salt makes it harder to find collisions if an adversary attempts to censor
+ // some app by finding a collision with some app and flood a network with such transactions:
+ // h(app + relay_ip) = h(app2 + relay_ip).
+
+ // uint64 + 16 bytes of salt + up to 16 bytes of address
+ // salt and origin are fixed so pre-copy them into the buf
+ var buf [8 + 16 + 16]byte
+ copy(buf[8:], salt[:])
+ copied := copy(buf[8+16:], origin)
+ bufLen := 8 + 16 + copied
+
+ txnToDigest := func(appIdx basics.AppIndex) (key keyType) {
+ binary.LittleEndian.PutUint64(buf[:8], uint64(appIdx))
+ h := blake2b.Sum256(buf[:bufLen])
+ copy(key[:], h[:len(keyType{})])
+ return
+ }
+ txnToBucket := func(appIdx basics.AppIndex) int {
+ return int(memhash64(uint64(appIdx), seed) % uint64(numBuckets))
+ }
+ seen := make(map[basics.AppIndex]struct{}, len(txgroup)*(1+config.MaxAppTxnForeignApps))
+ valid := func(appIdx basics.AppIndex) bool {
+ if appIdx != 0 {
+ _, ok := seen[appIdx]
+ return !ok
+ }
+ return false
+ }
+ for i := range txgroup {
+ if txgroup[i].Txn.Type == protocol.ApplicationCallTx {
+ appIdx := txgroup[i].Txn.ApplicationID
+ if valid(appIdx) {
+ keysBuckets.buckets = append(keysBuckets.buckets, txnToBucket(appIdx))
+ keysBuckets.keys = append(keysBuckets.keys, txnToDigest(appIdx))
+ seen[appIdx] = struct{}{}
+ }
+ // hash appIdx into a bucket, do not use modulo without hashing first since it could
+ // assign two vanilla (and presumable, popular) apps to the same bucket.
+ if len(txgroup[i].Txn.ForeignApps) > 0 {
+ for _, appIdx := range txgroup[i].Txn.ForeignApps {
+ if valid(appIdx) {
+ keysBuckets.buckets = append(keysBuckets.buckets, txnToBucket(appIdx))
+ keysBuckets.keys = append(keysBuckets.keys, txnToDigest(appIdx))
+ seen[appIdx] = struct{}{}
+ }
+ }
+ }
+ }
+ }
+ return keysBuckets
+}
+
+const (
+ // Constants for multiplication: four random odd 64-bit numbers.
+ m1 = 16877499708836156737
+ m2 = 2820277070424839065
+ m3 = 9497967016996688599
+ m4 = 15839092249703872147
+)
+
+// memhash64 is uint64 hash function from go runtime
+// https://go-review.googlesource.com/c/go/+/59352/4/src/runtime/hash64.go#96
+func memhash64(val uint64, seed uint64) uint64 {
+ h := seed
+ h ^= val
+ h = rotl31(h*m1) * m2
+ h ^= h >> 29
+ h *= m3
+ h ^= h >> 32
+ return h
+}
+
+func rotl31(x uint64) uint64 {
+ return (x << 31) | (x >> (64 - 31))
+}
diff --git a/data/appRateLimiter_test.go b/data/appRateLimiter_test.go
new file mode 100644
index 000000000..2da9fddfa
--- /dev/null
+++ b/data/appRateLimiter_test.go
@@ -0,0 +1,526 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package data
+
+import (
+ "encoding/binary"
+ "testing"
+ "time"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/crypto/blake2b"
+ "golang.org/x/exp/rand"
+)
+
+func TestAppRateLimiter_Make(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ rate := uint64(10)
+ window := 1 * time.Second
+ rm := makeAppRateLimiter(10, rate, window)
+
+ require.Equal(t, 1, rm.maxBucketSize)
+ require.NotEmpty(t, rm.seed)
+ require.NotEmpty(t, rm.salt)
+ for i := 0; i < len(rm.buckets); i++ {
+ require.NotNil(t, rm.buckets[i].entries)
+ require.NotNil(t, rm.buckets[i].lru)
+ }
+}
+
+func TestAppRateLimiter_NoApps(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ rate := uint64(10)
+ window := 1 * time.Second
+ rm := makeAppRateLimiter(10, rate, window)
+
+ txns := []transactions.SignedTxn{
+ {
+ Txn: transactions.Transaction{
+ Type: protocol.AssetConfigTx,
+ },
+ },
+ {
+ Txn: transactions.Transaction{
+ Type: protocol.PaymentTx,
+ },
+ },
+ }
+ drop := rm.shouldDrop(txns, nil)
+ require.False(t, drop)
+}
+
+func getAppTxnGroup(appIdx basics.AppIndex) []transactions.SignedTxn {
+ apptxn := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: appIdx,
+ },
+ }
+
+ return []transactions.SignedTxn{{Txn: apptxn}}
+}
+
+func TestAppRateLimiter_Basics(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ rate := uint64(10)
+ window := 1 * time.Second
+ rm := makeAppRateLimiter(512, rate, window)
+
+ txns := getAppTxnGroup(1)
+ now := time.Now().UnixNano()
+ drop := rm.shouldDropAt(txns, nil, now)
+ require.False(t, drop)
+
+ for i := len(txns); i < int(rate); i++ {
+ drop = rm.shouldDropAt(txns, nil, now)
+ require.False(t, drop)
+ }
+
+ drop = rm.shouldDropAt(txns, nil, now)
+ require.True(t, drop)
+
+ require.Equal(t, 1, rm.len())
+
+ // check a single group cannot exceed the rate
+ apptxn2 := txns[0].Txn
+ apptxn2.ApplicationID = 2
+ txns = make([]transactions.SignedTxn, 0, rate+1)
+ for i := 0; i < int(rate+1); i++ {
+ txns = append(txns, transactions.SignedTxn{
+ Txn: apptxn2,
+ })
+ }
+ drop = rm.shouldDropAt(txns, nil, now)
+ require.False(t, drop)
+
+ // check multple groups can exceed the rate (-1 comes from the previous check)
+ for i := 0; i < int(rate)-1; i++ {
+ drop = rm.shouldDropAt(txns, nil, now)
+ require.False(t, drop)
+ }
+ drop = rm.shouldDropAt(txns, nil, now)
+ require.True(t, drop)
+
+ require.Equal(t, 2, rm.len())
+
+ // check foreign apps in the same group do not trigger the rate limit
+ apptxn3 := txns[0].Txn
+ apptxn3.ApplicationID = 3
+ for i := 0; i < int(rate); i++ {
+ apptxn3.ForeignApps = append(apptxn3.ForeignApps, 3)
+ }
+ txns = []transactions.SignedTxn{{Txn: apptxn3}}
+ drop = rm.shouldDropAt(txns, nil, now)
+ require.False(t, drop)
+
+ // check multple groups with foreign apps can exceed the rate (-1 comes from the previous check)
+ for i := 0; i < int(rate)-1; i++ {
+ drop = rm.shouldDropAt(txns, nil, now)
+ require.False(t, drop)
+ }
+ drop = rm.shouldDropAt(txns, nil, now)
+ require.True(t, drop)
+
+ require.Equal(t, 3, rm.len())
+}
+
+// TestAppRateLimiter_Interval checks prev + cur rate approximation logic
+func TestAppRateLimiter_Interval(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ rate := uint64(10)
+ window := 10 * time.Second
+ perSecondRate := uint64(window) / rate / uint64(time.Second)
+ rm := makeAppRateLimiter(512, perSecondRate, window)
+
+ txns := getAppTxnGroup(1)
+ now := time.Date(2023, 9, 11, 10, 10, 11, 0, time.UTC).UnixNano() // 11 sec => 1 sec into the interval
+
+ // fill 80% of the current interval
+ // switch to the next interval
+ // ensure only 30% of the rate is available (8 * 0.9 = 7.2 => 7)
+ // 0.9 is calculated as 1 - 0.1 (fraction of the interval elapsed)
+ // since the next interval at second 21 would by 1 sec (== 10% == 0.1) after the interval beginning
+ for i := 0; i < int(0.8*float64(rate)); i++ {
+ drop := rm.shouldDropAt(txns, nil, now)
+ require.False(t, drop)
+ }
+
+ next := now + int64(window)
+ for i := 0; i < int(0.3*float64(rate)); i++ {
+ drop := rm.shouldDropAt(txns, nil, next)
+ require.False(t, drop)
+ }
+
+ drop := rm.shouldDropAt(txns, nil, next)
+ require.True(t, drop)
+}
+
+// TestAppRateLimiter_IntervalFull checks the cur counter accounts only admitted requests
+func TestAppRateLimiter_IntervalAdmitted(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ rate := uint64(10)
+ window := 10 * time.Second
+ perSecondRate := uint64(window) / rate / uint64(time.Second)
+ rm := makeAppRateLimiter(512, perSecondRate, window)
+
+ txns := getAppTxnGroup(1)
+ bk := txgroupToKeys(getAppTxnGroup(basics.AppIndex(1)), nil, rm.seed, rm.salt, numBuckets)
+ require.Equal(t, 1, len(bk.buckets))
+ require.Equal(t, 1, len(bk.keys))
+ b := bk.buckets[0]
+ k := bk.keys[0]
+ now := time.Date(2023, 9, 11, 10, 10, 11, 0, time.UTC).UnixNano() // 11 sec => 1 sec into the interval
+
+ // fill a current interval with more than rate requests
+ // ensure the counter does not exceed the rate
+ for i := 0; i < int(rate); i++ {
+ drop := rm.shouldDropAt(txns, nil, now)
+ require.False(t, drop)
+ }
+ drop := rm.shouldDropAt(txns, nil, now)
+ require.True(t, drop)
+
+ entry := rm.buckets[b].entries[k]
+ require.NotNil(t, entry)
+ require.Equal(t, int64(rate), entry.cur.Load())
+}
+
+// TestAppRateLimiter_IntervalSkip checks that the rate is reset when no requests within some interval
+func TestAppRateLimiter_IntervalSkip(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ rate := uint64(10)
+ window := 10 * time.Second
+ perSecondRate := uint64(window) / rate / uint64(time.Second)
+ rm := makeAppRateLimiter(512, perSecondRate, window)
+
+ txns := getAppTxnGroup(1)
+ now := time.Date(2023, 9, 11, 10, 10, 11, 0, time.UTC).UnixNano() // 11 sec => 1 sec into the interval
+
+ // fill 80% of the current interval
+ // switch to the next next interval
+ // ensure all capacity is available
+
+ for i := 0; i < int(0.8*float64(rate)); i++ {
+ drop := rm.shouldDropAt(txns, nil, now)
+ require.False(t, drop)
+ }
+
+ nextnext := now + int64(2*window)
+ for i := 0; i < int(rate); i++ {
+ drop := rm.shouldDropAt(txns, nil, nextnext)
+ require.False(t, drop)
+ }
+
+ drop := rm.shouldDropAt(txns, nil, nextnext)
+ require.True(t, drop)
+}
+
+func TestAppRateLimiter_IPAddr(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ rate := uint64(10)
+ window := 10 * time.Second
+ perSecondRate := uint64(window) / rate / uint64(time.Second)
+ rm := makeAppRateLimiter(512, perSecondRate, window)
+
+ txns := getAppTxnGroup(1)
+ now := time.Now().UnixNano()
+
+ for i := 0; i < int(rate); i++ {
+ drop := rm.shouldDropAt(txns, []byte{1}, now)
+ require.False(t, drop)
+ drop = rm.shouldDropAt(txns, []byte{2}, now)
+ require.False(t, drop)
+ }
+
+ drop := rm.shouldDropAt(txns, []byte{1}, now)
+ require.True(t, drop)
+ drop = rm.shouldDropAt(txns, []byte{2}, now)
+ require.True(t, drop)
+}
+
+// TestAppRateLimiter_MaxSize puts size+1 elements into a single bucket and ensures the total size is capped
+func TestAppRateLimiter_MaxSize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ const bucketSize = 4
+ const size = bucketSize * numBuckets
+ const rate uint64 = 10
+ window := 10 * time.Second
+ rm := makeAppRateLimiter(size, rate, window)
+
+ for i := 1; i <= int(size)+1; i++ {
+ drop := rm.shouldDrop(getAppTxnGroup(basics.AppIndex(1)), []byte{byte(i)})
+ require.False(t, drop)
+ }
+ bucket := int(memhash64(uint64(1), rm.seed) % numBuckets)
+ require.Equal(t, bucketSize, len(rm.buckets[bucket].entries))
+ var totalSize int
+ for i := 0; i < len(rm.buckets); i++ {
+ totalSize += len(rm.buckets[i].entries)
+ if i != bucket {
+ require.Equal(t, 0, len(rm.buckets[i].entries))
+ }
+ }
+ require.LessOrEqual(t, totalSize, int(size))
+}
+
+// TestAppRateLimiter_EvictOrder ensures that the least recent used is evicted
+func TestAppRateLimiter_EvictOrder(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ const bucketSize = 4
+ const size = bucketSize * numBuckets
+ const rate uint64 = 10
+ window := 10 * time.Second
+ rm := makeAppRateLimiter(size, rate, window)
+
+ keys := make([]keyType, 0, int(bucketSize)+1)
+ bucket := int(memhash64(uint64(1), rm.seed) % numBuckets)
+ for i := 0; i < bucketSize; i++ {
+ bk := txgroupToKeys(getAppTxnGroup(basics.AppIndex(1)), []byte{byte(i)}, rm.seed, rm.salt, numBuckets)
+ require.Equal(t, 1, len(bk.buckets))
+ require.Equal(t, 1, len(bk.keys))
+ require.Equal(t, bucket, bk.buckets[0])
+ keys = append(keys, bk.keys[0])
+ drop := rm.shouldDrop(getAppTxnGroup(basics.AppIndex(1)), []byte{byte(i)})
+ require.False(t, drop)
+ }
+ require.Equal(t, bucketSize, len(rm.buckets[bucket].entries))
+
+ // add one more and expect the first evicted
+ bk := txgroupToKeys(getAppTxnGroup(basics.AppIndex(1)), []byte{byte(bucketSize)}, rm.seed, rm.salt, numBuckets)
+ require.Equal(t, 1, len(bk.buckets))
+ require.Equal(t, 1, len(bk.keys))
+ require.Equal(t, bucket, bk.buckets[0])
+ drop := rm.shouldDrop(getAppTxnGroup(basics.AppIndex(1)), []byte{byte(bucketSize)})
+ require.False(t, drop)
+
+ require.Equal(t, bucketSize, len(rm.buckets[bucket].entries))
+ require.NotContains(t, rm.buckets[bucket].entries, keys[0])
+ for i := 1; i < len(keys); i++ {
+ require.Contains(t, rm.buckets[bucket].entries, keys[i])
+ }
+
+ var totalSize int
+ for i := 0; i < len(rm.buckets); i++ {
+ totalSize += len(rm.buckets[i].entries)
+ if i != bucket {
+ require.Equal(t, 0, len(rm.buckets[i].entries))
+ }
+ }
+ require.LessOrEqual(t, totalSize, int(size))
+}
+
+func BenchmarkBlake2(b *testing.B) {
+ var salt [16]byte
+ crypto.RandBytes(salt[:])
+ origin := make([]byte, 4)
+
+ var buf [8 + 16 + 16]byte // uint64 + 16 bytes of salt + up to 16 bytes of address
+
+ b.Run("blake2b-sum256", func(b *testing.B) {
+ total := 0
+ for i := 0; i < b.N; i++ {
+ binary.LittleEndian.PutUint64(buf[:8], rand.Uint64())
+ copy(buf[8:], salt[:])
+ copied := copy(buf[8+16:], origin)
+ h := blake2b.Sum256(buf[:8+16+copied])
+ total += len(h[:])
+ }
+ b.Logf("total1: %d", total) // to prevent optimizing out the loop
+ })
+
+ b.Run("blake2b-sum8", func(b *testing.B) {
+ total := 0
+ for i := 0; i < b.N; i++ {
+ d, err := blake2b.New(8, nil)
+ require.NoError(b, err)
+
+ binary.LittleEndian.PutUint64(buf[:8], rand.Uint64())
+ copy(buf[8:], salt[:])
+ copied := copy(buf[8+16:], origin)
+
+ _, err = d.Write(buf[:8+16+copied])
+ require.NoError(b, err)
+ h := d.Sum([]byte{})
+ total += len(h[:])
+ }
+ b.Logf("total2: %d", total)
+ })
+}
+
+func BenchmarkAppRateLimiter(b *testing.B) {
+ cfg := config.GetDefaultLocal()
+
+ b.Run("multi bucket no evict", func(b *testing.B) {
+ rm := makeAppRateLimiter(
+ cfg.TxBacklogAppTxRateLimiterMaxSize,
+ uint64(cfg.TxBacklogAppTxPerSecondRate),
+ time.Duration(cfg.TxBacklogServiceRateWindowSeconds)*time.Second,
+ )
+ dropped := 0
+ for i := 0; i < b.N; i++ {
+ if rm.shouldDrop(getAppTxnGroup(basics.AppIndex(i%512)), []byte{byte(i), byte(i % 256)}) {
+ dropped++
+ }
+ }
+ b.ReportMetric(float64(dropped)/float64(b.N), "%_drop")
+ if rm.evictions > 0 {
+ b.Logf("# evictions %d, time %d us", rm.evictions, rm.evictionTime/uint64(time.Microsecond))
+ }
+ })
+
+ b.Run("single bucket no evict", func(b *testing.B) {
+ rm := makeAppRateLimiter(
+ cfg.TxBacklogAppTxRateLimiterMaxSize,
+ uint64(cfg.TxBacklogAppTxPerSecondRate),
+ time.Duration(cfg.TxBacklogServiceRateWindowSeconds)*time.Second,
+ )
+ dropped := 0
+ for i := 0; i < b.N; i++ {
+ if rm.shouldDrop(getAppTxnGroup(basics.AppIndex(1)), []byte{byte(i), byte(i % 256)}) {
+ dropped++
+ }
+ }
+ b.ReportMetric(float64(dropped)/float64(b.N), "%_drop")
+ if rm.evictions > 0 {
+ b.Logf("# evictions %d, time %d us", rm.evictions, rm.evictionTime/uint64(time.Microsecond))
+ }
+ })
+
+ b.Run("single bucket w evict", func(b *testing.B) {
+ rm := makeAppRateLimiter(
+ cfg.TxBacklogAppTxRateLimiterMaxSize,
+ uint64(cfg.TxBacklogAppTxPerSecondRate),
+ time.Duration(cfg.TxBacklogServiceRateWindowSeconds)*time.Second,
+ )
+ dropped := 0
+ for i := 0; i < b.N; i++ {
+ if rm.shouldDrop(getAppTxnGroup(basics.AppIndex(1)), []byte{byte(i), byte(i / 256), byte(i % 256)}) {
+ dropped++
+ }
+ }
+ b.ReportMetric(float64(dropped)/float64(b.N), "%_drop")
+ if rm.evictions > 0 {
+ b.Logf("# evictions %d, time %d us", rm.evictions, rm.evictionTime/uint64(time.Microsecond))
+ }
+ })
+}
+
+func TestAppRateLimiter_TxgroupToKeys(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ apptxn := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: 0,
+ ForeignApps: []basics.AppIndex{0},
+ },
+ }
+ txgroup := []transactions.SignedTxn{{Txn: apptxn}}
+
+ kb := txgroupToKeys(txgroup, nil, 123, [16]byte{}, 1)
+ require.Equal(t, 0, len(kb.keys))
+ require.Equal(t, len(kb.buckets), len(kb.buckets))
+ putAppKeyBuf(kb)
+
+ txgroup[0].Txn.ApplicationID = 1
+ kb = txgroupToKeys(txgroup, nil, 123, [16]byte{}, 1)
+ require.Equal(t, 1, len(kb.keys))
+ require.Equal(t, len(kb.buckets), len(kb.buckets))
+ putAppKeyBuf(kb)
+
+ txgroup[0].Txn.ForeignApps = append(txgroup[0].Txn.ForeignApps, 1)
+ kb = txgroupToKeys(txgroup, nil, 123, [16]byte{}, 1)
+ require.Equal(t, 1, len(kb.keys))
+ require.Equal(t, len(kb.buckets), len(kb.buckets))
+ putAppKeyBuf(kb)
+
+ txgroup[0].Txn.ForeignApps = append(txgroup[0].Txn.ForeignApps, 2)
+ kb = txgroupToKeys(txgroup, nil, 123, [16]byte{}, 1)
+ require.Equal(t, 2, len(kb.keys))
+ require.Equal(t, len(kb.buckets), len(kb.buckets))
+ putAppKeyBuf(kb)
+
+ apptxn.ApplicationID = 2
+ txgroup = append(txgroup, transactions.SignedTxn{Txn: apptxn})
+ kb = txgroupToKeys(txgroup, nil, 123, [16]byte{}, 1)
+ require.Equal(t, 2, len(kb.keys))
+ require.Equal(t, len(kb.buckets), len(kb.buckets))
+ putAppKeyBuf(kb)
+}
+
+func BenchmarkAppRateLimiter_TxgroupToKeys(b *testing.B) {
+ rnd := rand.New(rand.NewSource(123))
+
+ txgroups := make([][]transactions.SignedTxn, 0, b.N)
+ for i := 0; i < b.N; i++ {
+ txgroup := make([]transactions.SignedTxn, 0, config.MaxTxGroupSize)
+ for j := 0; j < config.MaxTxGroupSize; j++ {
+ apptxn := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: basics.AppIndex(rnd.Uint64()),
+ ForeignApps: []basics.AppIndex{basics.AppIndex(rnd.Uint64()), basics.AppIndex(rnd.Uint64()), basics.AppIndex(rnd.Uint64()), basics.AppIndex(rnd.Uint64())},
+ },
+ }
+ txgroup = append(txgroup, transactions.SignedTxn{Txn: apptxn})
+ }
+ txgroups = append(txgroups, txgroup)
+ }
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ origin := make([]byte, 4)
+ _, err := rnd.Read(origin)
+ require.NoError(b, err)
+ require.NotEmpty(b, origin)
+
+ salt := [16]byte{}
+ _, err = rnd.Read(salt[:])
+ require.NoError(b, err)
+ require.NotEmpty(b, salt)
+
+ for i := 0; i < b.N; i++ {
+ kb := txgroupToKeys(txgroups[i], origin, 123, salt, numBuckets)
+ putAppKeyBuf(kb)
+ }
+}
diff --git a/data/bookkeeping/genesis_test.go b/data/bookkeeping/genesis_test.go
index 72e94947f..393e38922 100644
--- a/data/bookkeeping/genesis_test.go
+++ b/data/bookkeeping/genesis_test.go
@@ -23,6 +23,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -155,3 +156,30 @@ func TestGenesis_Balances(t *testing.T) {
})
}
}
+
+func (genesis Genesis) hashOld() crypto.Digest {
+ return hashObjOld(genesis)
+}
+
+// hashObjOld computes a hash of a Hashable object and its type, doing so the
+// "old way" to show it requires an extra allocation in benchmarks.
+func hashObjOld(h crypto.Hashable) crypto.Digest {
+ return crypto.Hash(crypto.HashRep(h))
+}
+
+func BenchmarkGenesisHash(b *testing.B) {
+ b.ReportAllocs()
+ g := Genesis{}
+ b.Run("new", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ g.Hash()
+ }
+ })
+ b.Run("old", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ g.hashOld()
+ }
+ })
+}
diff --git a/data/bookkeeping/lightBlockHeader.go b/data/bookkeeping/lightBlockHeader.go
index ea283e039..90edea3ba 100644
--- a/data/bookkeeping/lightBlockHeader.go
+++ b/data/bookkeeping/lightBlockHeader.go
@@ -17,6 +17,7 @@
package bookkeeping
import (
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/committee"
@@ -38,8 +39,15 @@ type LightBlockHeader struct {
In addition, we make sure that the Seed (The unpredictable value) would be the first field that gets
hashed (give it the lowest codec value in the LightBlockHeader struct) to mitigate a collision attack
on the merkle damgard construction.
+
+ The BlockHash serves a similar role, in that it also depends on the seed and introduces some
+ uncontrollable input. It is slightly weaker, in the sense that an adversary can influence
+ the BlockHash to some degree (e.g., by including specific transactions in the payset), but
+ it comes with the added benefit of allowing to authenticate the entire blockchain based on
+ the BlockHash value.
*/
Seed committee.Seed `codec:"0"`
+ BlockHash BlockHash `codec:"1"`
Round basics.Round `codec:"r"`
GenesisHash crypto.Digest `codec:"gh"`
Sha256TxnCommitment crypto.GenericDigest `codec:"tc,allocbound=crypto.Sha256Size"`
@@ -47,12 +55,20 @@ type LightBlockHeader struct {
// ToLightBlockHeader creates returns a LightBlockHeader from a given block header
func (bh *BlockHeader) ToLightBlockHeader() LightBlockHeader {
- return LightBlockHeader{
- Seed: bh.Seed,
+ res := LightBlockHeader{
GenesisHash: bh.GenesisHash,
Round: bh.Round,
Sha256TxnCommitment: bh.Sha256Commitment[:],
}
+
+ proto := config.Consensus[bh.CurrentProtocol]
+ if proto.StateProofBlockHashInLightHeader {
+ res.BlockHash = bh.Hash()
+ } else {
+ res.Seed = bh.Seed
+ }
+
+ return res
}
// ToBeHashed implements the crypto.Hashable interface
diff --git a/data/bookkeeping/msgp_gen.go b/data/bookkeeping/msgp_gen.go
index 7eda4de55..cb3a63ad2 100644
--- a/data/bookkeeping/msgp_gen.go
+++ b/data/bookkeeping/msgp_gen.go
@@ -2663,23 +2663,27 @@ func GenesisAllocationMaxSize() (s int) {
func (z *LightBlockHeader) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0001Len := uint32(4)
- var zb0001Mask uint8 /* 5 bits */
+ zb0001Len := uint32(5)
+ var zb0001Mask uint8 /* 6 bits */
if (*z).Seed.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x1
}
+ if (*z).BlockHash.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
if (*z).GenesisHash.MsgIsZero() {
zb0001Len--
- zb0001Mask |= 0x4
+ zb0001Mask |= 0x8
}
if (*z).Round.MsgIsZero() {
zb0001Len--
- zb0001Mask |= 0x8
+ zb0001Mask |= 0x10
}
if (*z).Sha256TxnCommitment.MsgIsZero() {
zb0001Len--
- zb0001Mask |= 0x10
+ zb0001Mask |= 0x20
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
@@ -2689,17 +2693,22 @@ func (z *LightBlockHeader) MarshalMsg(b []byte) (o []byte) {
o = append(o, 0xa1, 0x30)
o = (*z).Seed.MarshalMsg(o)
}
- if (zb0001Mask & 0x4) == 0 { // if not empty
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "1"
+ o = append(o, 0xa1, 0x31)
+ o = (*z).BlockHash.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).GenesisHash.MarshalMsg(o)
}
- if (zb0001Mask & 0x8) == 0 { // if not empty
+ if (zb0001Mask & 0x10) == 0 { // if not empty
// string "r"
o = append(o, 0xa1, 0x72)
o = (*z).Round.MarshalMsg(o)
}
- if (zb0001Mask & 0x10) == 0 { // if not empty
+ if (zb0001Mask & 0x20) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
o = (*z).Sha256TxnCommitment.MarshalMsg(o)
@@ -2741,6 +2750,14 @@ func (z *LightBlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalSt
}
if zb0001 > 0 {
zb0001--
+ bts, err = (*z).BlockHash.UnmarshalMsgWithState(bts, st)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "BlockHash")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
bts, err = (*z).Round.UnmarshalMsgWithState(bts, st)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Round")
@@ -2792,6 +2809,12 @@ func (z *LightBlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalSt
err = msgp.WrapError(err, "Seed")
return
}
+ case "1":
+ bts, err = (*z).BlockHash.UnmarshalMsgWithState(bts, st)
+ if err != nil {
+ err = msgp.WrapError(err, "BlockHash")
+ return
+ }
case "r":
bts, err = (*z).Round.UnmarshalMsgWithState(bts, st)
if err != nil {
@@ -2833,18 +2856,18 @@ func (_ *LightBlockHeader) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *LightBlockHeader) Msgsize() (s int) {
- s = 1 + 2 + (*z).Seed.Msgsize() + 2 + (*z).Round.Msgsize() + 3 + (*z).GenesisHash.Msgsize() + 3 + (*z).Sha256TxnCommitment.Msgsize()
+ s = 1 + 2 + (*z).Seed.Msgsize() + 2 + (*z).BlockHash.Msgsize() + 2 + (*z).Round.Msgsize() + 3 + (*z).GenesisHash.Msgsize() + 3 + (*z).Sha256TxnCommitment.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *LightBlockHeader) MsgIsZero() bool {
- return ((*z).Seed.MsgIsZero()) && ((*z).Round.MsgIsZero()) && ((*z).GenesisHash.MsgIsZero()) && ((*z).Sha256TxnCommitment.MsgIsZero())
+ return ((*z).Seed.MsgIsZero()) && ((*z).BlockHash.MsgIsZero()) && ((*z).Round.MsgIsZero()) && ((*z).GenesisHash.MsgIsZero()) && ((*z).Sha256TxnCommitment.MsgIsZero())
}
// MaxSize returns a maximum valid message size for this message type
func LightBlockHeaderMaxSize() (s int) {
- s = 1 + 2 + committee.SeedMaxSize() + 2 + basics.RoundMaxSize() + 3 + crypto.DigestMaxSize() + 3 + crypto.GenericDigestMaxSize()
+ s = 1 + 2 + committee.SeedMaxSize() + 2 + BlockHashMaxSize() + 2 + basics.RoundMaxSize() + 3 + crypto.DigestMaxSize() + 3 + crypto.GenericDigestMaxSize()
return
}
diff --git a/data/committee/common_test.go b/data/committee/common_test.go
index 89e72fd79..26a110a9c 100644
--- a/data/committee/common_test.go
+++ b/data/committee/common_test.go
@@ -47,11 +47,19 @@ func signTx(s *crypto.SignatureSecrets, t transactions.Transaction) transactions
return t.Sign(s)
}
-func testingenv(t testing.TB, numAccounts, numTxs int) (selectionParameterFn, selectionParameterListFn, basics.Round, []basics.Address, []*crypto.SignatureSecrets, []*crypto.VrfPrivkey, []*crypto.OneTimeSignatureSecrets, []transactions.SignedTxn) {
- return testingenvMoreKeys(t, numAccounts, numTxs, uint(5))
+// testingenv creates a random set of participating accounts and random transactions between them, and
+// the associated selection parameters for use testing committee membership and credential validation.
+// seedGen is provided as an external source of randomness for the selection seed and transaction notes;
+// if the caller persists seedGen between calls to testingenv, each iteration that calls testingenv will
+// exercise a new selection seed.
+func testingenv(t testing.TB, numAccounts, numTxs int, seedGen io.Reader) (selectionParameterFn, selectionParameterListFn, basics.Round, []basics.Address, []*crypto.SignatureSecrets, []*crypto.VrfPrivkey, []*crypto.OneTimeSignatureSecrets, []transactions.SignedTxn) {
+ return testingenvMoreKeys(t, numAccounts, numTxs, uint(5), seedGen)
}
-func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward uint) (selectionParameterFn, selectionParameterListFn, basics.Round, []basics.Address, []*crypto.SignatureSecrets, []*crypto.VrfPrivkey, []*crypto.OneTimeSignatureSecrets, []transactions.SignedTxn) {
+func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward uint, seedGen io.Reader) (selectionParameterFn, selectionParameterListFn, basics.Round, []basics.Address, []*crypto.SignatureSecrets, []*crypto.VrfPrivkey, []*crypto.OneTimeSignatureSecrets, []transactions.SignedTxn) {
+ if seedGen == nil {
+ seedGen = rand.New(rand.NewSource(1)) // same source as setting GODEBUG=randautoseed=0, same as pre-Go 1.20 default seed
+ }
P := numAccounts // n accounts
TXs := numTxs // n txns
maxMoneyAtStart := 100000 // max money start
@@ -89,7 +97,7 @@ func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward
}
var seed Seed
- rand.Read(seed[:])
+ seedGen.Read(seed[:])
tx := make([]transactions.SignedTxn, TXs)
for i := 0; i < TXs; i++ {
@@ -115,7 +123,7 @@ func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward
Amount: amt,
},
}
- rand.Read(t.Note)
+ seedGen.Read(t.Note) // to match output from previous versions, which shared global RNG for seed & note
tx[i] = t.Sign(secrets[send])
}
diff --git a/data/committee/credential_test.go b/data/committee/credential_test.go
index 0e3c3ea40..22dfcff83 100644
--- a/data/committee/credential_test.go
+++ b/data/committee/credential_test.go
@@ -20,6 +20,7 @@ import (
"math/rand" // used for replicability of sortition benchmark
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/data/basics"
@@ -31,9 +32,10 @@ import (
func TestAccountSelected(t *testing.T) {
partitiontest.PartitionTest(t)
+ seedGen := rand.New(rand.NewSource(1))
N := 1
for i := 0; i < N; i++ {
- selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000)
+ selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, seedGen)
period := Period(0)
leaders := uint64(0)
@@ -85,13 +87,18 @@ func TestAccountSelected(t *testing.T) {
if (committee < uint64(0.8*float64(step.CommitteeSize(proto)))) || (committee > uint64(1.2*float64(step.CommitteeSize(proto)))) {
t.Errorf("bad number of committee members %v expected %v", committee, step.CommitteeSize(proto))
}
+ if i == 0 {
+ // pin down deterministic outputs for first iteration
+ assert.EqualValues(t, 17, leaders)
+ assert.EqualValues(t, 2918, committee)
+ }
}
}
func TestRichAccountSelected(t *testing.T) {
partitiontest.PartitionTest(t)
- selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 10, 2000)
+ selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 10, 2000, nil)
period := Period(0)
ok, record, selectionSeed, _ := selParams(addresses[0])
@@ -139,16 +146,20 @@ func TestRichAccountSelected(t *testing.T) {
if (ccred.Weight < uint64(0.4*float64(step.CommitteeSize(proto)))) || (ccred.Weight > uint64(.6*float64(step.CommitteeSize(proto)))) {
t.Errorf("bad number of committee members %v expected %v", ccred.Weight, step.CommitteeSize(proto))
}
+ // pin down deterministic outputs, given initial seed values
+ assert.EqualValues(t, 6, lcred.Weight)
+ assert.EqualValues(t, 735, ccred.Weight)
}
func TestPoorAccountSelectedLeaders(t *testing.T) {
partitiontest.PartitionTest(t)
+ seedGen := rand.New(rand.NewSource(1))
N := 2
failsLeaders := 0
leaders := make([]uint64, N)
for i := 0; i < N; i++ {
- selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000)
+ selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, seedGen)
period := Period(0)
for j := range addresses {
ok, record, selectionSeed, _ := selParams(addresses[j])
@@ -184,15 +195,19 @@ func TestPoorAccountSelectedLeaders(t *testing.T) {
if failsLeaders == 2 {
t.Errorf("bad number of leaders %v expected %v", leaders, proto.NumProposers)
}
+ // pin down deterministic outputs, given initial seed values
+ assert.EqualValues(t, 18, leaders[0])
+ assert.EqualValues(t, 20, leaders[1])
}
func TestPoorAccountSelectedCommittee(t *testing.T) {
partitiontest.PartitionTest(t)
+ seedGen := rand.New(rand.NewSource(1))
N := 1
committee := uint64(0)
for i := 0; i < N; i++ {
- selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000)
+ selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, seedGen)
period := Period(0)
step := Cert
@@ -223,15 +238,19 @@ func TestPoorAccountSelectedCommittee(t *testing.T) {
if (committee < uint64(0.8*float64(step.CommitteeSize(proto)))) || (committee > uint64(1.2*float64(step.CommitteeSize(proto)))) {
t.Errorf("bad number of committee members %v expected %v", committee, step.CommitteeSize(proto))
}
+ if i == 0 { // pin down deterministic committee size, given initial seed value
+ assert.EqualValues(t, 1513, committee)
+ }
}
}
func TestNoMoneyAccountNotSelected(t *testing.T) {
partitiontest.PartitionTest(t)
+ seedGen := rand.New(rand.NewSource(1))
N := 1
for i := 0; i < N; i++ {
- selParams, _, round, addresses, _, _, _, _ := testingenv(t, 10, 2000)
+ selParams, _, round, addresses, _, _, _, _ := testingenv(t, 10, 2000, seedGen)
lookback := basics.Round(2*proto.SeedRefreshInterval + proto.SeedLookback + 1)
gen := rand.New(rand.NewSource(2))
_, _, zeroVRFSecret, _ := newAccount(t, gen, lookback, 5)
@@ -262,7 +281,7 @@ func TestNoMoneyAccountNotSelected(t *testing.T) {
func TestLeadersSelected(t *testing.T) {
partitiontest.PartitionTest(t)
- selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000)
+ selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, nil)
period := Period(0)
step := Propose
@@ -294,7 +313,7 @@ func TestLeadersSelected(t *testing.T) {
func TestCommitteeSelected(t *testing.T) {
partitiontest.PartitionTest(t)
- selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000)
+ selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, nil)
period := Period(0)
step := Soft
@@ -326,7 +345,7 @@ func TestCommitteeSelected(t *testing.T) {
func TestAccountNotSelected(t *testing.T) {
partitiontest.PartitionTest(t)
- selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000)
+ selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, nil)
period := Period(0)
leaders := uint64(0)
for i := range addresses {
@@ -356,7 +375,7 @@ func TestAccountNotSelected(t *testing.T) {
// TODO update to remove VRF verification overhead
func BenchmarkSortition(b *testing.B) {
- selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(b, 100, 2000)
+ selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(b, 100, 2000, nil)
period := Period(0)
step := Soft
diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go
index a03baea4f..47deae4d3 100644
--- a/data/pools/transactionPool.go
+++ b/data/pools/transactionPool.go
@@ -49,9 +49,6 @@ import (
// TransactionPool.AssembleBlock constructs a valid block for
// proposal given a deadline.
type TransactionPool struct {
- // feePerByte is stored at the beginning of this struct to ensure it has a 64 bit aligned address. This is needed as it's being used
- // with atomic operations which require 64 bit alignment on arm.
- feePerByte uint64
// const
logProcessBlockStats bool
@@ -65,6 +62,7 @@ type TransactionPool struct {
expiredTxCount map[basics.Round]int
pendingBlockEvaluator BlockEvaluator
numPendingWholeBlocks basics.Round
+ feePerByte atomic.Uint64
feeThresholdMultiplier uint64
statusCache *statusCache
@@ -295,7 +293,7 @@ func (pool *TransactionPool) checkPendingQueueSize(txnGroup []transactions.Signe
// FeePerByte returns the current minimum microalgos per byte a transaction
// needs to pay in order to get into the pool.
func (pool *TransactionPool) FeePerByte() uint64 {
- return atomic.LoadUint64(&pool.feePerByte)
+ return pool.feePerByte.Load()
}
// computeFeePerByte computes and returns the current minimum microalgos per byte a transaction
@@ -332,7 +330,7 @@ func (pool *TransactionPool) computeFeePerByte() uint64 {
}
// Update the counter for fast reads
- atomic.StoreUint64(&pool.feePerByte, feePerByte)
+ pool.feePerByte.Store(feePerByte)
return feePerByte
}
diff --git a/data/transactions/logic/Makefile b/data/transactions/logic/Makefile
index 4ed18ecbc..21d72a71c 100644
--- a/data/transactions/logic/Makefile
+++ b/data/transactions/logic/Makefile
@@ -19,7 +19,7 @@ fields_string.go: fields.go
go generate
README.md: TEAL_opcodes.md README_in.md
- python merge.py > README.md
+ python3 merge.py > README.md
@if [ -e $(SPECS)/dev/TEAL.md ]; then \
sed '/^$$/q' $(SPECS)/dev/TEAL.md | cat - README.md > teal.spec; \
mv teal.spec $(SPECS)/dev/TEAL.md; \
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index 07b4a078b..3ba9037fc 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -52,17 +52,19 @@ assembly time to do type checking and to provide more informative error messages
| Name | Bound | AVM Type |
| ---- | ---- | -------- |
-| uint64 | x <= 18446744073709551615 | uint64 |
-| stateKey | len(x) <= 64 | []byte |
-| none | | none |
-| method | len(x) == 4 | []byte |
-| boxName | 1 <= len(x) <= 64 | []byte |
-| bool | x <= 1 | uint64 |
-| bigint | len(x) <= 64 | []byte |
-| any | | any |
-| address | len(x) == 32 | []byte |
-| []byte | len(x) <= 4096 | []byte |
| [32]byte | len(x) == 32 | []byte |
+| [64]byte | len(x) == 64 | []byte |
+| [80]byte | len(x) == 80 | []byte |
+| []byte | len(x) <= 4096 | []byte |
+| address | len(x) == 32 | []byte |
+| any | | any |
+| bigint | len(x) <= 64 | []byte |
+| bool | x <= 1 | uint64 |
+| boxName | 1 <= len(x) <= 64 | []byte |
+| method | len(x) == 4 | []byte |
+| none | | none |
+| stateKey | len(x) <= 64 | []byte |
+| uint64 | x <= 18446744073709551615 | uint64 |
@@ -339,7 +341,7 @@ An application transaction must indicate the action to be taken following the ex
Most operations work with only one type of argument, uint64 or bytes, and fail if the wrong type value is on the stack.
-Many instructions accept values to designate Accounts, Assets, or Applications. Beginning with v4, these values may be given as an _offset_ in the corresponding Txn fields (Txn.Accounts, Txn.ForeignAssets, Txn.ForeignApps) _or_ as the value itself (a byte-array address for Accounts, or a uint64 ID). The values, however, must still be present in the Txn fields. Before v4, most opcodes required the use of an offset, except for reading account local values of assets or applications, which accepted the IDs directly and did not require the ID to be present in they corresponding _Foreign_ array. (Note that beginning with v4, those IDs _are_ required to be present in their corresponding _Foreign_ array.) See individual opcodes for details. In the case of account offsets or application offsets, 0 is specially defined to Txn.Sender or the ID of the current application, respectively.
+Many instructions accept values to designate Accounts, Assets, or Applications. Beginning with v4, these values may be given as an _offset_ in the corresponding Txn fields (Txn.Accounts, Txn.ForeignAssets, Txn.ForeignApps) _or_ as the value itself (a byte-array address for Accounts, or a uint64 ID). The values, however, must still be present in the Txn fields. Before v4, most opcodes required the use of an offset, except for reading account local values of assets or applications, which accepted the IDs directly and did not require the ID to be present in the corresponding _Foreign_ array. (Note that beginning with v4, those IDs _are_ required to be present in their corresponding _Foreign_ array.) See individual opcodes for details. In the case of account offsets or application offsets, 0 is specially defined to Txn.Sender or the ID of the current application, respectively.
This summary is supplemented by more detail in the [opcodes document](TEAL_opcodes.md).
@@ -359,26 +361,10 @@ an opcode manipulates the stack in such a way that a value changes
position but is otherwise unchanged, the name of the output on the
return stack matches the name of the input value.
-### Arithmetic, Logic, and Cryptographic Operations
+### Arithmetic and Logic Operations
| Opcode | Description |
| - | -- |
-| `sha256` | SHA256 hash of value A, yields [32]byte |
-| `keccak256` | Keccak256 hash of value A, yields [32]byte |
-| `sha512_256` | SHA512_256 hash of value A, yields [32]byte |
-| `sha3_256` | SHA3_256 hash of value A, yields [32]byte |
-| `ed25519verify` | for (data A, signature B, pubkey C) verify the signature of ("ProgData" \|\| program_hash \|\| data) against the pubkey => {0 or 1} |
-| `ed25519verify_bare` | for (data A, signature B, pubkey C) verify the signature of the data against the pubkey => {0 or 1} |
-| `ecdsa_verify v` | for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1} |
-| `ecdsa_pk_recover v` | for (data A, recovery id B, signature C, D) recover a public key |
-| `ecdsa_pk_decompress v` | decompress pubkey A into components X, Y |
-| `vrf_verify s` | Verify the proof B of message A against pubkey C. Returns vrf output and verification flag. |
-| `ec_add g` | for curve points A and B, return the curve point A + B |
-| `ec_scalar_mul g` | for curve point A and scalar B, return the curve point BA, the point A multiplied by the scalar B. |
-| `ec_pairing_check g` | 1 if the product of the pairing of each point in A with its respective point in B is equal to the identity element of the target group Gt, else 0 |
-| `ec_multi_scalar_mul g` | for curve points A and scalars B, return curve point B0A0 + B1A1 + B2A2 + ... + BnAn |
-| `ec_subgroup_check g` | 1 if A is in the main prime-order subgroup of G (including the point at infinity) else 0. Program fails if A is not in G at all. |
-| `ec_map_to g` | maps field element A to group G |
| `+` | A plus B. Fail on overflow. |
| `-` | A minus B. Fail if B > A. |
| `/` | A divided by B (truncated division). Fail if B == 0. |
@@ -397,7 +383,6 @@ return stack matches the name of the input value.
| `==` | A is equal to B => {0 or 1} |
| `!=` | A is not equal to B => {0 or 1} |
| `!` | A == 0 yields 1; else 0 |
-| `len` | yields length of byte value A |
| `itob` | converts uint64 A to big-endian byte array, always of length 8 |
| `btoi` | converts big-endian byte array A to uint64. Fails if len(A) > 8. Padded by leading 0s if len(A) < 8. |
| `%` | A modulo B. Fail if B == 0. |
@@ -410,16 +395,17 @@ return stack matches the name of the input value.
| `divw` | A,B / C. Fail if C == 0 or if result overflows. |
| `divmodw` | W,X = (A,B / C,D); Y,Z = (A,B modulo C,D) |
| `expw` | A raised to the Bth power as a 128-bit result in two uint64s. X is the high 64 bits, Y is the low. Fail if A == B == 0 or if the results exceeds 2^128-1 |
-| `getbit` | Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails |
-| `setbit` | Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails |
-| `getbyte` | Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails |
-| `setbyte` | Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails |
-| `concat` | join A and B |
### Byte Array Manipulation
| Opcode | Description |
| - | -- |
+| `getbit` | Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails |
+| `setbit` | Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails |
+| `getbyte` | Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails |
+| `setbyte` | Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails |
+| `concat` | join A and B |
+| `len` | yields length of byte value A |
| `substring s e` | A range of bytes from A starting at S up to but not including E. If E < S, or either is larger than the array length, the program fails |
| `substring3` | A range of bytes from A starting at B up to but not including C. If C < B, or either is larger than the array length, the program fails |
| `extract s l` | A range of bytes from A starting at S up to but not including S+L. If L is 0, then extract to the end of the string. If S or S+L is larger than the array length, the program fails |
@@ -472,6 +458,27 @@ these results may contain leading zero bytes.
| `b^` | A bitwise-xor B. A and B are zero-left extended to the greater of their lengths |
| `b~` | A with all bits inverted |
+### Cryptographic Operations
+
+| Opcode | Description |
+| - | -- |
+| `sha256` | SHA256 hash of value A, yields [32]byte |
+| `keccak256` | Keccak256 hash of value A, yields [32]byte |
+| `sha512_256` | SHA512_256 hash of value A, yields [32]byte |
+| `sha3_256` | SHA3_256 hash of value A, yields [32]byte |
+| `ed25519verify` | for (data A, signature B, pubkey C) verify the signature of ("ProgData" \|\| program_hash \|\| data) against the pubkey => {0 or 1} |
+| `ed25519verify_bare` | for (data A, signature B, pubkey C) verify the signature of the data against the pubkey => {0 or 1} |
+| `ecdsa_verify v` | for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1} |
+| `ecdsa_pk_recover v` | for (data A, recovery id B, signature C, D) recover a public key |
+| `ecdsa_pk_decompress v` | decompress pubkey A into components X, Y |
+| `vrf_verify s` | Verify the proof B of message A against pubkey C. Returns vrf output and verification flag. |
+| `ec_add g` | for curve points A and B, return the curve point A + B |
+| `ec_scalar_mul g` | for curve point A and scalar B, return the curve point BA, the point A multiplied by the scalar B. |
+| `ec_pairing_check g` | 1 if the product of the pairing of each point in A with its respective point in B is equal to the identity element of the target group Gt, else 0 |
+| `ec_multi_scalar_mul g` | for curve points A and scalars B, return curve point B0A0 + B1A1 + B2A2 + ... + BnAn |
+| `ec_subgroup_check g` | 1 if A is in the main prime-order subgroup of G (including the point at infinity) else 0. Program fails if A is not in G at all. |
+| `ec_map_to g` | maps field element A to group G |
+
### Loading Values
Opcodes for getting data onto the stack.
@@ -768,7 +775,7 @@ are sure to be _available_.
The following opcodes allow for "inner transactions". Inner
transactions allow stateful applications to have many of the effects
-of a true top-level transaction, programatically. However, they are
+of a true top-level transaction, programmatically. However, they are
different in significant ways. The most important differences are
that they are not signed, duplicates are not rejected, and they do not
appear in the block in the usual away. Instead, their effects are
@@ -779,7 +786,7 @@ account that has been rekeyed to that hash.
In v5, inner transactions may perform `pay`, `axfer`, `acfg`, and
`afrz` effects. After executing an inner transaction with
-`itxn_submit`, the effects of the transaction are visible begining
+`itxn_submit`, the effects of the transaction are visible beginning
with the next instruction with, for example, `balance` and
`min_balance` checks. In v6, inner transactions may also perform
`keyreg` and `appl` effects. Inner `appl` calls fail if they attempt
@@ -799,7 +806,7 @@ setting is used when `itxn_submit` executes. For this purpose `Type`
and `TypeEnum` are considered to be the same field. When using
`itxn_field` to set an array field (`ApplicationArgs` `Accounts`,
`Assets`, or `Applications`) each use adds an element to the end of
-the the array, rather than setting the entire array at once.
+the array, rather than setting the entire array at once.
`itxn_field` fails immediately for unsupported fields, unsupported
transaction types, or improperly typed values for a particular
diff --git a/data/transactions/logic/README_in.md b/data/transactions/logic/README_in.md
index 04c67758c..e98d6c244 100644
--- a/data/transactions/logic/README_in.md
+++ b/data/transactions/logic/README_in.md
@@ -300,7 +300,7 @@ of (varuint, bytes) length prefixed byte strings.
Most operations work with only one type of argument, uint64 or bytes, and fail if the wrong type value is on the stack.
-Many instructions accept values to designate Accounts, Assets, or Applications. Beginning with v4, these values may be given as an _offset_ in the corresponding Txn fields (Txn.Accounts, Txn.ForeignAssets, Txn.ForeignApps) _or_ as the value itself (a byte-array address for Accounts, or a uint64 ID). The values, however, must still be present in the Txn fields. Before v4, most opcodes required the use of an offset, except for reading account local values of assets or applications, which accepted the IDs directly and did not require the ID to be present in they corresponding _Foreign_ array. (Note that beginning with v4, those IDs _are_ required to be present in their corresponding _Foreign_ array.) See individual opcodes for details. In the case of account offsets or application offsets, 0 is specially defined to Txn.Sender or the ID of the current application, respectively.
+Many instructions accept values to designate Accounts, Assets, or Applications. Beginning with v4, these values may be given as an _offset_ in the corresponding Txn fields (Txn.Accounts, Txn.ForeignAssets, Txn.ForeignApps) _or_ as the value itself (a byte-array address for Accounts, or a uint64 ID). The values, however, must still be present in the Txn fields. Before v4, most opcodes required the use of an offset, except for reading account local values of assets or applications, which accepted the IDs directly and did not require the ID to be present in the corresponding _Foreign_ array. (Note that beginning with v4, those IDs _are_ required to be present in their corresponding _Foreign_ array.) See individual opcodes for details. In the case of account offsets or application offsets, 0 is specially defined to Txn.Sender or the ID of the current application, respectively.
This summary is supplemented by more detail in the [opcodes document](TEAL_opcodes.md).
@@ -320,7 +320,7 @@ an opcode manipulates the stack in such a way that a value changes
position but is otherwise unchanged, the name of the output on the
return stack matches the name of the input value.
-### Arithmetic, Logic, and Cryptographic Operations
+### Arithmetic and Logic Operations
@@ Arithmetic.md @@
@@ -350,6 +350,10 @@ these results may contain leading zero bytes.
@@ Byte_Array_Logic.md @@
+### Cryptographic Operations
+
+@@ Cryptography.md @@
+
### Loading Values
Opcodes for getting data onto the stack.
@@ -418,7 +422,7 @@ are sure to be _available_.
The following opcodes allow for "inner transactions". Inner
transactions allow stateful applications to have many of the effects
-of a true top-level transaction, programatically. However, they are
+of a true top-level transaction, programmatically. However, they are
different in significant ways. The most important differences are
that they are not signed, duplicates are not rejected, and they do not
appear in the block in the usual away. Instead, their effects are
@@ -429,7 +433,7 @@ account that has been rekeyed to that hash.
In v5, inner transactions may perform `pay`, `axfer`, `acfg`, and
`afrz` effects. After executing an inner transaction with
-`itxn_submit`, the effects of the transaction are visible begining
+`itxn_submit`, the effects of the transaction are visible beginning
with the next instruction with, for example, `balance` and
`min_balance` checks. In v6, inner transactions may also perform
`keyreg` and `appl` effects. Inner `appl` calls fail if they attempt
@@ -449,7 +453,7 @@ setting is used when `itxn_submit` executes. For this purpose `Type`
and `TypeEnum` are considered to be the same field. When using
`itxn_field` to set an array field (`ApplicationArgs` `Accounts`,
`Assets`, or `Applications`) each use adds an element to the end of
-the the array, rather than setting the entire array at once.
+the array, rather than setting the entire array at once.
`itxn_field` fails immediately for unsupported fields, unsupported
transaction types, or improperly typed values for a particular
diff --git a/data/transactions/logic/TEAL_opcodes_v1.md b/data/transactions/logic/TEAL_opcodes_v1.md
index f9ef580cf..d98190643 100644
--- a/data/transactions/logic/TEAL_opcodes_v1.md
+++ b/data/transactions/logic/TEAL_opcodes_v1.md
@@ -33,7 +33,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
## ed25519verify
- Bytecode: 0x04
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
+- Stack: ..., A: []byte, B: [64]byte, C: [32]byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of ("ProgData" || program_hash || data) against the pubkey => {0 or 1}
- **Cost**: 1900
- Mode: Signature
diff --git a/data/transactions/logic/TEAL_opcodes_v10.md b/data/transactions/logic/TEAL_opcodes_v10.md
index 31beb6c38..4ec00a52b 100644
--- a/data/transactions/logic/TEAL_opcodes_v10.md
+++ b/data/transactions/logic/TEAL_opcodes_v10.md
@@ -33,7 +33,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
## ed25519verify
- Bytecode: 0x04
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
+- Stack: ..., A: []byte, B: [64]byte, C: [32]byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of ("ProgData" || program_hash || data) against the pubkey => {0 or 1}
- **Cost**: 1900
@@ -43,7 +43,7 @@ The 32 byte public key is the last element on the stack, preceded by the 64 byte
- Syntax: `ecdsa_verify V` ∋ V: [ECDSA](#field-group-ecdsa)
- Bytecode: 0x05 {uint8}
-- Stack: ..., A: []byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., bool
+- Stack: ..., A: [32]byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., bool
- for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1}
- **Cost**: Secp256k1=1700; Secp256r1=2500
- Availability: v5
@@ -75,7 +75,7 @@ The 33 byte public key in a compressed form to be decompressed into X and Y (top
- Syntax: `ecdsa_pk_recover V` ∋ V: [ECDSA](#field-group-ecdsa)
- Bytecode: 0x07 {uint8}
-- Stack: ..., A: []byte, B: uint64, C: []byte, D: []byte &rarr; ..., X: []byte, Y: []byte
+- Stack: ..., A: [32]byte, B: uint64, C: [32]byte, D: [32]byte &rarr; ..., X: []byte, Y: []byte
- for (data A, recovery id B, signature C, D) recover a public key
- **Cost**: 2000
- Availability: v5
@@ -1136,7 +1136,7 @@ pushints args are not added to the intcblock during assembly processes
## ed25519verify_bare
- Bytecode: 0x84
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
+- Stack: ..., A: []byte, B: [64]byte, C: [32]byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of the data against the pubkey => {0 or 1}
- **Cost**: 1900
- Availability: v7
@@ -1607,7 +1607,7 @@ For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `bo
- Syntax: `vrf_verify S` ∋ S: [vrf_verify](#field-group-vrf_verify)
- Bytecode: 0xd0 {uint8}
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., X: []byte, Y: bool
+- Stack: ..., A: []byte, B: [80]byte, C: [32]byte &rarr; ..., X: []byte, Y: bool
- Verify the proof B of message A against pubkey C. Returns vrf output and verification flag.
- **Cost**: 5700
- Availability: v7
diff --git a/data/transactions/logic/TEAL_opcodes_v2.md b/data/transactions/logic/TEAL_opcodes_v2.md
index 52e65591b..180f899be 100644
--- a/data/transactions/logic/TEAL_opcodes_v2.md
+++ b/data/transactions/logic/TEAL_opcodes_v2.md
@@ -33,7 +33,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
## ed25519verify
- Bytecode: 0x04
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
+- Stack: ..., A: []byte, B: [64]byte, C: [32]byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of ("ProgData" || program_hash || data) against the pubkey => {0 or 1}
- **Cost**: 1900
- Mode: Signature
diff --git a/data/transactions/logic/TEAL_opcodes_v3.md b/data/transactions/logic/TEAL_opcodes_v3.md
index c09a75df1..9915f6102 100644
--- a/data/transactions/logic/TEAL_opcodes_v3.md
+++ b/data/transactions/logic/TEAL_opcodes_v3.md
@@ -33,7 +33,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
## ed25519verify
- Bytecode: 0x04
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
+- Stack: ..., A: []byte, B: [64]byte, C: [32]byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of ("ProgData" || program_hash || data) against the pubkey => {0 or 1}
- **Cost**: 1900
- Mode: Signature
diff --git a/data/transactions/logic/TEAL_opcodes_v4.md b/data/transactions/logic/TEAL_opcodes_v4.md
index 96e51dee0..3da0bcba0 100644
--- a/data/transactions/logic/TEAL_opcodes_v4.md
+++ b/data/transactions/logic/TEAL_opcodes_v4.md
@@ -33,7 +33,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
## ed25519verify
- Bytecode: 0x04
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
+- Stack: ..., A: []byte, B: [64]byte, C: [32]byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of ("ProgData" || program_hash || data) against the pubkey => {0 or 1}
- **Cost**: 1900
- Mode: Signature
diff --git a/data/transactions/logic/TEAL_opcodes_v5.md b/data/transactions/logic/TEAL_opcodes_v5.md
index 012d504ce..584e8bc1f 100644
--- a/data/transactions/logic/TEAL_opcodes_v5.md
+++ b/data/transactions/logic/TEAL_opcodes_v5.md
@@ -33,7 +33,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
## ed25519verify
- Bytecode: 0x04
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
+- Stack: ..., A: []byte, B: [64]byte, C: [32]byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of ("ProgData" || program_hash || data) against the pubkey => {0 or 1}
- **Cost**: 1900
@@ -43,7 +43,7 @@ The 32 byte public key is the last element on the stack, preceded by the 64 byte
- Syntax: `ecdsa_verify V` ∋ V: [ECDSA](#field-group-ecdsa)
- Bytecode: 0x05 {uint8}
-- Stack: ..., A: []byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., bool
+- Stack: ..., A: [32]byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., bool
- for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1}
- **Cost**: Secp256k1=1700
- Availability: v5
@@ -74,7 +74,7 @@ The 33 byte public key in a compressed form to be decompressed into X and Y (top
- Syntax: `ecdsa_pk_recover V` ∋ V: [ECDSA](#field-group-ecdsa)
- Bytecode: 0x07 {uint8}
-- Stack: ..., A: []byte, B: uint64, C: []byte, D: []byte &rarr; ..., X: []byte, Y: []byte
+- Stack: ..., A: [32]byte, B: uint64, C: [32]byte, D: [32]byte &rarr; ..., X: []byte, Y: []byte
- for (data A, recovery id B, signature C, D) recover a public key
- **Cost**: 2000
- Availability: v5
diff --git a/data/transactions/logic/TEAL_opcodes_v6.md b/data/transactions/logic/TEAL_opcodes_v6.md
index d21931430..eab29a84c 100644
--- a/data/transactions/logic/TEAL_opcodes_v6.md
+++ b/data/transactions/logic/TEAL_opcodes_v6.md
@@ -33,7 +33,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
## ed25519verify
- Bytecode: 0x04
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
+- Stack: ..., A: []byte, B: [64]byte, C: [32]byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of ("ProgData" || program_hash || data) against the pubkey => {0 or 1}
- **Cost**: 1900
@@ -43,7 +43,7 @@ The 32 byte public key is the last element on the stack, preceded by the 64 byte
- Syntax: `ecdsa_verify V` ∋ V: [ECDSA](#field-group-ecdsa)
- Bytecode: 0x05 {uint8}
-- Stack: ..., A: []byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., bool
+- Stack: ..., A: [32]byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., bool
- for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1}
- **Cost**: Secp256k1=1700
- Availability: v5
@@ -74,7 +74,7 @@ The 33 byte public key in a compressed form to be decompressed into X and Y (top
- Syntax: `ecdsa_pk_recover V` ∋ V: [ECDSA](#field-group-ecdsa)
- Bytecode: 0x07 {uint8}
-- Stack: ..., A: []byte, B: uint64, C: []byte, D: []byte &rarr; ..., X: []byte, Y: []byte
+- Stack: ..., A: [32]byte, B: uint64, C: [32]byte, D: [32]byte &rarr; ..., X: []byte, Y: []byte
- for (data A, recovery id B, signature C, D) recover a public key
- **Cost**: 2000
- Availability: v5
diff --git a/data/transactions/logic/TEAL_opcodes_v7.md b/data/transactions/logic/TEAL_opcodes_v7.md
index 41b1468ae..5eb79e38f 100644
--- a/data/transactions/logic/TEAL_opcodes_v7.md
+++ b/data/transactions/logic/TEAL_opcodes_v7.md
@@ -33,7 +33,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
## ed25519verify
- Bytecode: 0x04
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
+- Stack: ..., A: []byte, B: [64]byte, C: [32]byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of ("ProgData" || program_hash || data) against the pubkey => {0 or 1}
- **Cost**: 1900
@@ -43,7 +43,7 @@ The 32 byte public key is the last element on the stack, preceded by the 64 byte
- Syntax: `ecdsa_verify V` ∋ V: [ECDSA](#field-group-ecdsa)
- Bytecode: 0x05 {uint8}
-- Stack: ..., A: []byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., bool
+- Stack: ..., A: [32]byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., bool
- for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1}
- **Cost**: Secp256k1=1700; Secp256r1=2500
- Availability: v5
@@ -75,7 +75,7 @@ The 33 byte public key in a compressed form to be decompressed into X and Y (top
- Syntax: `ecdsa_pk_recover V` ∋ V: [ECDSA](#field-group-ecdsa)
- Bytecode: 0x07 {uint8}
-- Stack: ..., A: []byte, B: uint64, C: []byte, D: []byte &rarr; ..., X: []byte, Y: []byte
+- Stack: ..., A: [32]byte, B: uint64, C: [32]byte, D: [32]byte &rarr; ..., X: []byte, Y: []byte
- for (data A, recovery id B, signature C, D) recover a public key
- **Cost**: 2000
- Availability: v5
@@ -1081,7 +1081,7 @@ pushint args are not added to the intcblock during assembly processes
## ed25519verify_bare
- Bytecode: 0x84
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
+- Stack: ..., A: []byte, B: [64]byte, C: [32]byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of the data against the pubkey => {0 or 1}
- **Cost**: 1900
- Availability: v7
@@ -1446,7 +1446,7 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
- Syntax: `vrf_verify S` ∋ S: [vrf_verify](#field-group-vrf_verify)
- Bytecode: 0xd0 {uint8}
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., X: []byte, Y: bool
+- Stack: ..., A: []byte, B: [80]byte, C: [32]byte &rarr; ..., X: []byte, Y: bool
- Verify the proof B of message A against pubkey C. Returns vrf output and verification flag.
- **Cost**: 5700
- Availability: v7
diff --git a/data/transactions/logic/TEAL_opcodes_v8.md b/data/transactions/logic/TEAL_opcodes_v8.md
index b2d1b851d..b8efb37fa 100644
--- a/data/transactions/logic/TEAL_opcodes_v8.md
+++ b/data/transactions/logic/TEAL_opcodes_v8.md
@@ -33,7 +33,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
## ed25519verify
- Bytecode: 0x04
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
+- Stack: ..., A: []byte, B: [64]byte, C: [32]byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of ("ProgData" || program_hash || data) against the pubkey => {0 or 1}
- **Cost**: 1900
@@ -43,7 +43,7 @@ The 32 byte public key is the last element on the stack, preceded by the 64 byte
- Syntax: `ecdsa_verify V` ∋ V: [ECDSA](#field-group-ecdsa)
- Bytecode: 0x05 {uint8}
-- Stack: ..., A: []byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., bool
+- Stack: ..., A: [32]byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., bool
- for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1}
- **Cost**: Secp256k1=1700; Secp256r1=2500
- Availability: v5
@@ -75,7 +75,7 @@ The 33 byte public key in a compressed form to be decompressed into X and Y (top
- Syntax: `ecdsa_pk_recover V` ∋ V: [ECDSA](#field-group-ecdsa)
- Bytecode: 0x07 {uint8}
-- Stack: ..., A: []byte, B: uint64, C: []byte, D: []byte &rarr; ..., X: []byte, Y: []byte
+- Stack: ..., A: [32]byte, B: uint64, C: [32]byte, D: [32]byte &rarr; ..., X: []byte, Y: []byte
- for (data A, recovery id B, signature C, D) recover a public key
- **Cost**: 2000
- Availability: v5
@@ -1134,7 +1134,7 @@ pushints args are not added to the intcblock during assembly processes
## ed25519verify_bare
- Bytecode: 0x84
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
+- Stack: ..., A: []byte, B: [64]byte, C: [32]byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of the data against the pubkey => {0 or 1}
- **Cost**: 1900
- Availability: v7
@@ -1605,7 +1605,7 @@ For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `bo
- Syntax: `vrf_verify S` ∋ S: [vrf_verify](#field-group-vrf_verify)
- Bytecode: 0xd0 {uint8}
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., X: []byte, Y: bool
+- Stack: ..., A: []byte, B: [80]byte, C: [32]byte &rarr; ..., X: []byte, Y: bool
- Verify the proof B of message A against pubkey C. Returns vrf output and verification flag.
- **Cost**: 5700
- Availability: v7
diff --git a/data/transactions/logic/TEAL_opcodes_v9.md b/data/transactions/logic/TEAL_opcodes_v9.md
index 231343564..54f053686 100644
--- a/data/transactions/logic/TEAL_opcodes_v9.md
+++ b/data/transactions/logic/TEAL_opcodes_v9.md
@@ -33,7 +33,7 @@ Ops have a 'cost' of 1 unless otherwise specified.
## ed25519verify
- Bytecode: 0x04
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
+- Stack: ..., A: []byte, B: [64]byte, C: [32]byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of ("ProgData" || program_hash || data) against the pubkey => {0 or 1}
- **Cost**: 1900
@@ -43,7 +43,7 @@ The 32 byte public key is the last element on the stack, preceded by the 64 byte
- Syntax: `ecdsa_verify V` ∋ V: [ECDSA](#field-group-ecdsa)
- Bytecode: 0x05 {uint8}
-- Stack: ..., A: []byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., bool
+- Stack: ..., A: [32]byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., bool
- for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1}
- **Cost**: Secp256k1=1700; Secp256r1=2500
- Availability: v5
@@ -75,7 +75,7 @@ The 33 byte public key in a compressed form to be decompressed into X and Y (top
- Syntax: `ecdsa_pk_recover V` ∋ V: [ECDSA](#field-group-ecdsa)
- Bytecode: 0x07 {uint8}
-- Stack: ..., A: []byte, B: uint64, C: []byte, D: []byte &rarr; ..., X: []byte, Y: []byte
+- Stack: ..., A: [32]byte, B: uint64, C: [32]byte, D: [32]byte &rarr; ..., X: []byte, Y: []byte
- for (data A, recovery id B, signature C, D) recover a public key
- **Cost**: 2000
- Availability: v5
@@ -1134,7 +1134,7 @@ pushints args are not added to the intcblock during assembly processes
## ed25519verify_bare
- Bytecode: 0x84
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
+- Stack: ..., A: []byte, B: [64]byte, C: [32]byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of the data against the pubkey => {0 or 1}
- **Cost**: 1900
- Availability: v7
@@ -1605,7 +1605,7 @@ For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `bo
- Syntax: `vrf_verify S` ∋ S: [vrf_verify](#field-group-vrf_verify)
- Bytecode: 0xd0 {uint8}
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., X: []byte, Y: bool
+- Stack: ..., A: []byte, B: [80]byte, C: [32]byte &rarr; ..., X: []byte, Y: bool
- Verify the proof B of message A against pubkey C. Returns vrf output and verification flag.
- **Cost**: 5700
- Availability: v7
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index aa32c51eb..e8011fd81 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -226,7 +226,7 @@ type SourceLocation struct {
type OpStream struct {
Version uint64
Trace *strings.Builder
- Warnings []error // informational warnings, shouldn't stop assembly
+ Warnings []sourceError // informational warnings, shouldn't stop assembly
Errors []sourceError // errors that should prevent final assembly
Program []byte // Final program bytes. Will stay nil if any errors
@@ -2694,7 +2694,7 @@ func (ops *OpStream) record(se *sourceError) {
}
func (ops *OpStream) warn(t token, format string, a ...interface{}) {
- warning := &sourceError{t.line, t.col, fmt.Errorf(format, a...)}
+ warning := sourceError{t.line, t.col, fmt.Errorf(format, a...)}
ops.Warnings = append(ops.Warnings, warning)
}
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index d98349250..19c75d201 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -518,7 +518,7 @@ func TestAssemble(t *testing.T) {
// this month that we did last month.
bytecode, ok := compiled[v]
require.True(t, ok, "Need v%d bytecode", v)
- expectedBytes, _ := hex.DecodeString(bytecode)
+ expectedBytes, _ := hex.DecodeString(strings.ReplaceAll(bytecode, " ", ""))
require.NotEmpty(t, expectedBytes)
// the hex is for convenience if the program has been changed. the
// hex string can be copy pasted back in as a new expected result.
@@ -3604,10 +3604,13 @@ func TestReportMultipleErrors(t *testing.T) {
{Line: 1, Err: errors.New("error 1")},
{Err: errors.New("error 2")},
{Line: 3, Err: errors.New("error 3")},
+ {Line: 4, Column: 1, Err: errors.New("error 4")},
},
- Warnings: []error{
- errors.New("warning 1"),
- errors.New("warning 2"),
+ Warnings: []sourceError{
+ {Line: 5, Err: errors.New("warning 1")},
+ {Err: errors.New("warning 2")},
+ {Line: 7, Err: errors.New("warning 3")},
+ {Line: 8, Column: 1, Err: errors.New("warning 4")},
},
}
@@ -3617,8 +3620,11 @@ func TestReportMultipleErrors(t *testing.T) {
expected := `test.txt: 1: error 1
test.txt: 0: error 2
test.txt: 3: error 3
-test.txt: warning 1
-test.txt: warning 2
+test.txt: 4:1: error 4
+test.txt: 5: warning 1
+test.txt: 0: warning 2
+test.txt: 7: warning 3
+test.txt: 8:1: warning 4
`
require.Equal(t, expected, b.String())
@@ -3628,8 +3634,11 @@ test.txt: warning 2
expected = `1: error 1
0: error 2
3: error 3
-warning 1
-warning 2
+4:1: error 4
+5: warning 1
+0: warning 2
+7: warning 3
+8:1: warning 4
`
require.Equal(t, expected, b.String())
diff --git a/data/transactions/logic/crypto.go b/data/transactions/logic/crypto.go
new file mode 100644
index 000000000..43fe4d50d
--- /dev/null
+++ b/data/transactions/logic/crypto.go
@@ -0,0 +1,360 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/sha256"
+ "crypto/sha512"
+ "errors"
+ "fmt"
+ "math/big"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/secp256k1"
+ "github.com/algorand/go-algorand/protocol"
+ "golang.org/x/crypto/sha3"
+)
+
+func opSHA256(cx *EvalContext) error {
+ last := len(cx.Stack) - 1
+ hash := sha256.Sum256(cx.Stack[last].Bytes)
+ cx.Stack[last].Bytes = hash[:]
+ return nil
+}
+
+// The NIST SHA3-256 is implemented for compatibility with ICON
+func opSHA3_256(cx *EvalContext) error {
+ last := len(cx.Stack) - 1
+ hash := sha3.Sum256(cx.Stack[last].Bytes)
+ cx.Stack[last].Bytes = hash[:]
+ return nil
+}
+
+// The Keccak256 variant of SHA-3 is implemented for compatibility with Ethereum
+func opKeccak256(cx *EvalContext) error {
+ last := len(cx.Stack) - 1
+ hasher := sha3.NewLegacyKeccak256()
+ hasher.Write(cx.Stack[last].Bytes)
+ hv := make([]byte, 0, hasher.Size())
+ hv = hasher.Sum(hv)
+ cx.Stack[last].Bytes = hv
+ return nil
+}
+
+// This is the hash commonly used in Algorand in crypto/util.go Hash()
+//
+// It is explicitly implemented here in terms of the specific hash for
+// stability and portability in case the rest of Algorand ever moves
+// to a different default hash. For stability of this language, at
+// that time a new opcode should be made with the new hash.
+func opSHA512_256(cx *EvalContext) error {
+ last := len(cx.Stack) - 1
+ hash := sha512.Sum512_256(cx.Stack[last].Bytes)
+ cx.Stack[last].Bytes = hash[:]
+ return nil
+}
+
+// Msg is data meant to be signed and then verified with the
+// ed25519verify opcode.
+type Msg struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+ ProgramHash crypto.Digest `codec:"p"`
+ Data []byte `codec:"d"`
+}
+
+// ToBeHashed implements crypto.Hashable
+func (msg Msg) ToBeHashed() (protocol.HashID, []byte) {
+ return protocol.ProgramData, append(msg.ProgramHash[:], msg.Data...)
+}
+
+// programHash lets us lazily compute H(cx.program)
+func (cx *EvalContext) programHash() crypto.Digest {
+ if cx.programHashCached == (crypto.Digest{}) {
+ cx.programHashCached = crypto.HashObj(Program(cx.program))
+ }
+ return cx.programHashCached
+}
+
+func opEd25519Verify(cx *EvalContext) error {
+ last := len(cx.Stack) - 1 // index of PK
+ prev := last - 1 // index of signature
+ pprev := prev - 1 // index of data
+
+ var sv crypto.SignatureVerifier
+ if len(cx.Stack[last].Bytes) != len(sv) {
+ return errors.New("invalid public key")
+ }
+ copy(sv[:], cx.Stack[last].Bytes)
+
+ var sig crypto.Signature
+ if len(cx.Stack[prev].Bytes) != len(sig) {
+ return errors.New("invalid signature")
+ }
+ copy(sig[:], cx.Stack[prev].Bytes)
+
+ msg := Msg{ProgramHash: cx.programHash(), Data: cx.Stack[pprev].Bytes}
+ cx.Stack[pprev] = boolToSV(sv.Verify(msg, sig))
+ cx.Stack = cx.Stack[:prev]
+ return nil
+}
+
+func opEd25519VerifyBare(cx *EvalContext) error {
+ last := len(cx.Stack) - 1 // index of PK
+ prev := last - 1 // index of signature
+ pprev := prev - 1 // index of data
+
+ var sv crypto.SignatureVerifier
+ if len(cx.Stack[last].Bytes) != len(sv) {
+ return errors.New("invalid public key")
+ }
+ copy(sv[:], cx.Stack[last].Bytes)
+
+ var sig crypto.Signature
+ if len(cx.Stack[prev].Bytes) != len(sig) {
+ return errors.New("invalid signature")
+ }
+ copy(sig[:], cx.Stack[prev].Bytes)
+
+ cx.Stack[pprev] = boolToSV(sv.VerifyBytes(cx.Stack[pprev].Bytes, sig))
+ cx.Stack = cx.Stack[:prev]
+ return nil
+}
+
+func leadingZeros(size int, b *big.Int) ([]byte, error) {
+ byteLength := (b.BitLen() + 7) / 8
+ if size < byteLength {
+ return nil, fmt.Errorf("insufficient buffer size: %d < %d", size, byteLength)
+ }
+ buf := make([]byte, size)
+ b.FillBytes(buf)
+ return buf, nil
+}
+
+var ecdsaVerifyCosts = []int{
+ Secp256k1: 1700,
+ Secp256r1: 2500,
+}
+
+var secp256r1 = elliptic.P256()
+
+func opEcdsaVerify(cx *EvalContext) error {
+ ecdsaCurve := EcdsaCurve(cx.program[cx.pc+1])
+ fs, ok := ecdsaCurveSpecByField(ecdsaCurve)
+ if !ok || fs.version > cx.version {
+ return fmt.Errorf("invalid curve %d", ecdsaCurve)
+ }
+
+ if fs.field != Secp256k1 && fs.field != Secp256r1 {
+ return fmt.Errorf("unsupported curve %d", fs.field)
+ }
+
+ last := len(cx.Stack) - 1 // index of PK y
+ prev := last - 1 // index of PK x
+ pprev := prev - 1 // index of signature s
+ fourth := pprev - 1 // index of signature r
+ fifth := fourth - 1 // index of data
+
+ pkY := cx.Stack[last].Bytes
+ pkX := cx.Stack[prev].Bytes
+ sigS := cx.Stack[pprev].Bytes
+ sigR := cx.Stack[fourth].Bytes
+ msg := cx.Stack[fifth].Bytes
+
+ if len(msg) != 32 {
+ return fmt.Errorf("the signed data must be 32 bytes long, not %d", len(msg))
+ }
+
+ x := new(big.Int).SetBytes(pkX)
+ y := new(big.Int).SetBytes(pkY)
+
+ var result bool
+ if fs.field == Secp256k1 {
+ signature := make([]byte, 0, len(sigR)+len(sigS))
+ signature = append(signature, sigR...)
+ signature = append(signature, sigS...)
+
+ pubkey := secp256k1.S256().Marshal(x, y)
+ result = secp256k1.VerifySignature(pubkey, msg, signature)
+ } else if fs.field == Secp256r1 {
+ if !cx.Proto.EnablePrecheckECDSACurve || secp256r1.IsOnCurve(x, y) {
+ pubkey := ecdsa.PublicKey{
+ Curve: secp256r1,
+ X: x,
+ Y: y,
+ }
+ r := new(big.Int).SetBytes(sigR)
+ s := new(big.Int).SetBytes(sigS)
+ result = ecdsa.Verify(&pubkey, msg, r, s)
+ }
+ }
+
+ cx.Stack[fifth] = boolToSV(result)
+ cx.Stack = cx.Stack[:fourth]
+ return nil
+}
+
+var ecdsaDecompressCosts = []int{
+ Secp256k1: 650,
+ Secp256r1: 2400,
+}
+
+func opEcdsaPkDecompress(cx *EvalContext) error {
+ ecdsaCurve := EcdsaCurve(cx.program[cx.pc+1])
+ fs, ok := ecdsaCurveSpecByField(ecdsaCurve)
+ if !ok || fs.version > cx.version {
+ return fmt.Errorf("invalid curve %d", ecdsaCurve)
+ }
+
+ if fs.field != Secp256k1 && fs.field != Secp256r1 {
+ return fmt.Errorf("unsupported curve %d", fs.field)
+ }
+
+ last := len(cx.Stack) - 1 // compressed PK
+
+ pubkey := cx.Stack[last].Bytes
+ var x, y *big.Int
+ if fs.field == Secp256k1 {
+ x, y = secp256k1.DecompressPubkey(pubkey)
+ if x == nil {
+ return fmt.Errorf("invalid pubkey")
+ }
+ } else if fs.field == Secp256r1 {
+ x, y = elliptic.UnmarshalCompressed(elliptic.P256(), pubkey)
+ if x == nil {
+ return fmt.Errorf("invalid compressed pubkey")
+ }
+ }
+
+ var err error
+ cx.Stack[last].Uint = 0
+ cx.Stack[last].Bytes, err = leadingZeros(32, x)
+ if err != nil {
+ return fmt.Errorf("x component zeroing failed: %w", err)
+ }
+
+ var sv stackValue
+ sv.Bytes, err = leadingZeros(32, y)
+ if err != nil {
+ return fmt.Errorf("y component zeroing failed: %w", err)
+ }
+
+ cx.Stack = append(cx.Stack, sv)
+ return nil
+}
+
+func opEcdsaPkRecover(cx *EvalContext) error {
+ ecdsaCurve := EcdsaCurve(cx.program[cx.pc+1])
+ fs, ok := ecdsaCurveSpecByField(ecdsaCurve)
+ if !ok || fs.version > cx.version {
+ return fmt.Errorf("invalid curve %d", ecdsaCurve)
+ }
+
+ if fs.field != Secp256k1 {
+ return fmt.Errorf("unsupported curve %d", fs.field)
+ }
+
+ last := len(cx.Stack) - 1 // index of signature s
+ prev := last - 1 // index of signature r
+ pprev := prev - 1 // index of recovery id
+ fourth := pprev - 1 // index of data
+
+ sigS := cx.Stack[last].Bytes
+ sigR := cx.Stack[prev].Bytes
+ recid := cx.Stack[pprev].Uint
+ msg := cx.Stack[fourth].Bytes
+
+ if recid > 3 {
+ return fmt.Errorf("invalid recovery id: %d", recid)
+ }
+
+ signature := make([]byte, 0, len(sigR)+len(sigS)+1)
+ signature = append(signature, sigR...)
+ signature = append(signature, sigS...)
+ signature = append(signature, uint8(recid))
+
+ pk, err := secp256k1.RecoverPubkey(msg, signature)
+ if err != nil {
+ return fmt.Errorf("pubkey recover failed: %s", err.Error())
+ }
+ x, y := secp256k1.S256().Unmarshal(pk)
+ if x == nil {
+ return fmt.Errorf("pubkey unmarshal failed")
+ }
+
+ cx.Stack[fourth].Uint = 0
+ cx.Stack[fourth].Bytes, err = leadingZeros(32, x)
+ if err != nil {
+ return fmt.Errorf("x component zeroing failed: %s", err.Error())
+ }
+ cx.Stack[pprev].Uint = 0
+ cx.Stack[pprev].Bytes, err = leadingZeros(32, y)
+ if err != nil {
+ return fmt.Errorf("y component zeroing failed: %s", err.Error())
+ }
+ cx.Stack = cx.Stack[:prev]
+ return nil
+}
+
+type rawMessage []byte
+
+func (rm rawMessage) ToBeHashed() (protocol.HashID, []byte) {
+ return "", []byte(rm)
+}
+
+func opVrfVerify(cx *EvalContext) error {
+ last := len(cx.Stack) - 1 // PK
+ prev := last - 1 // proof
+ pprev := prev - 1 // data
+
+ data := rawMessage(cx.Stack[pprev].Bytes)
+ proofbytes := cx.Stack[prev].Bytes
+ var proof crypto.VrfProof
+ if len(proofbytes) != len(proof) {
+ return fmt.Errorf("vrf proof wrong size %d != %d", len(proofbytes), len(proof))
+ }
+ copy(proof[:], proofbytes[:])
+
+ pubkeybytes := cx.Stack[last].Bytes
+ var pubkey crypto.VrfPubkey
+ if len(pubkeybytes) != len(pubkey) {
+ return fmt.Errorf("vrf pubkey wrong size %d != %d", len(pubkeybytes), len(pubkey))
+ }
+ copy(pubkey[:], pubkeybytes[:])
+
+ var verified bool
+ var output []byte
+ std := VrfStandard(cx.program[cx.pc+1])
+ ss, ok := vrfStandardSpecByField(std)
+ if !ok || ss.version > cx.version {
+ return fmt.Errorf("invalid VRF standard %s", std)
+ }
+ switch std {
+ case VrfAlgorand:
+ var out crypto.VrfOutput
+ verified, out = pubkey.Verify(proof, data)
+ output = out[:]
+ default:
+ return fmt.Errorf("unsupported vrf_verify standard %s", std)
+ }
+
+ cx.Stack[pprev].Bytes = output[:]
+ cx.Stack[prev] = boolToSV(verified)
+ cx.Stack = cx.Stack[:last] // pop 1 because we take 3 args and return 2
+ return nil
+}
diff --git a/data/transactions/logic/evalCrypto_test.go b/data/transactions/logic/crypto_test.go
index 946b64956..7c0dc5f58 100644
--- a/data/transactions/logic/evalCrypto_test.go
+++ b/data/transactions/logic/crypto_test.go
@@ -117,9 +117,9 @@ func TestVrfVerify(t *testing.T) {
testApp(t, notrack("byte 0x1122; byte 0x2233; int 3; vrf_verify VrfAlgorand"), ep, "arg 2 wanted")
ep = defaultSigParams()
- testLogic(t, "byte 0x1122; byte 0x2233; byte 0x3344; vrf_verify VrfAlgorand", LogicVersion, ep, "vrf proof wrong size")
+ testLogic(t, notrack("byte 0x1122; byte 0x2233; byte 0x3344; vrf_verify VrfAlgorand"), LogicVersion, ep, "vrf proof wrong size")
// 80 byte proof
- testLogic(t, "byte 0x1122; int 80; bzero; byte 0x3344; vrf_verify VrfAlgorand", LogicVersion, ep, "vrf pubkey wrong size")
+ testLogic(t, notrack("byte 0x1122; int 80; bzero; byte 0x3344; vrf_verify VrfAlgorand"), LogicVersion, ep, "vrf pubkey wrong size")
// 32 byte pubkey
testLogic(t, "byte 0x3344; int 80; bzero; int 32; bzero; vrf_verify VrfAlgorand", LogicVersion, ep, "stack len is 2")
@@ -194,33 +194,32 @@ func TestEd25519verify(t *testing.T) {
msg := "62fdfc072182654f163f5f0f9a621d729566c74d0aa413bf009c9800418c19cd"
data, err := hex.DecodeString(msg)
require.NoError(t, err)
- pk := basics.Address(c.SignatureVerifier)
- pkStr := pk.String()
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops := testProg(t, fmt.Sprintf(`arg 0
-arg 1
-addr %s
-ed25519verify`, pkStr), v)
+ ops := testProg(t, fmt.Sprintf("arg 0; arg 1; arg 2; ed25519verify"), v)
sig := c.Sign(Msg{
ProgramHash: crypto.HashObj(Program(ops.Program)),
Data: data[:],
})
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
- txn.Lsig.Args = [][]byte{data[:], sig[:]}
+ txn.Lsig.Args = [][]byte{data[:], sig[:], c.SignatureVerifier[:]}
testLogicBytes(t, ops.Program, defaultSigParams(txn))
// short sig will fail
- txn.Lsig.Args[1] = sig[1:]
+ txn.Lsig.Args = [][]byte{data[:], sig[1:], c.SignatureVerifier[:]}
testLogicBytes(t, ops.Program, defaultSigParams(txn), "invalid signature")
+ // short pk will fail
+ txn.Lsig.Args = [][]byte{data[:], sig[:], c.SignatureVerifier[1:]}
+ testLogicBytes(t, ops.Program, defaultSigParams(txn), "invalid public key")
+
// flip a bit and it should not pass
- msg1 := "52fdfc072182654f163f5f0f9a621d729566c74d0aa413bf009c9800418c19cd"
+ msg1 := "5" + msg[1:]
data1, err := hex.DecodeString(msg1)
require.NoError(t, err)
- txn.Lsig.Args = [][]byte{data1, sig[:]}
+ txn.Lsig.Args = [][]byte{data1, sig[:], c.SignatureVerifier[:]}
testLogicBytes(t, ops.Program, defaultSigParams(txn), "REJECT")
})
}
@@ -236,31 +235,30 @@ func TestEd25519VerifyBare(t *testing.T) {
msg := "62fdfc072182654f163f5f0f9a621d729566c74d0aa413bf009c9800418c19cd"
data, err := hex.DecodeString(msg)
require.NoError(t, err)
- pk := basics.Address(c.SignatureVerifier)
- pkStr := pk.String()
for v := uint64(7); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- ops := testProg(t, fmt.Sprintf(`arg 0
-arg 1
-addr %s
-ed25519verify_bare`, pkStr), v)
+ ops := testProg(t, "arg 0; arg 1; arg 2; ed25519verify_bare", v)
require.NoError(t, err)
sig := c.SignBytes(data)
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
- txn.Lsig.Args = [][]byte{data[:], sig[:]}
+ txn.Lsig.Args = [][]byte{data[:], sig[:], c.SignatureVerifier[:]}
testLogicBytes(t, ops.Program, defaultSigParams(txn))
// short sig will fail
- txn.Lsig.Args[1] = sig[1:]
+ txn.Lsig.Args = [][]byte{data[:], sig[1:], c.SignatureVerifier[:]}
testLogicBytes(t, ops.Program, defaultSigParams(txn), "invalid signature")
+ // short pk will fail
+ txn.Lsig.Args = [][]byte{data[:], sig[:], c.SignatureVerifier[1:]}
+ testLogicBytes(t, ops.Program, defaultSigParams(txn), "invalid public key")
+
// flip a bit and it should not pass
- msg1 := "52fdfc072182654f163f5f0f9a621d729566c74d0aa413bf009c9800418c19cd"
+ msg1 := "5" + msg[1:]
data1, err := hex.DecodeString(msg1)
require.NoError(t, err)
- txn.Lsig.Args = [][]byte{data1, sig[:]}
+ txn.Lsig.Args = [][]byte{data1, sig[:], c.SignatureVerifier[:]}
testLogicBytes(t, ops.Program, defaultSigParams(txn), "REJECT")
})
}
@@ -360,15 +358,7 @@ byte 0x%s
}
// ecdsa verify tests
- source = `
-byte "%s"
-sha512_256
-byte 0x%s
-byte 0x%s
-byte 0x%s
-byte 0x%s
-ecdsa_verify Secp256k1
-`
+ source = `byte "%s"; sha512_256; byte 0x%s; byte 0x%s; byte 0x%s; byte 0x%s; ecdsa_verify Secp256k1`
data := []byte("testdata")
msg := sha512.Sum512_256(data)
@@ -403,6 +393,9 @@ ecdsa_verify Secp256k1
})
}
+ // coverage for pk length check
+ testPanics(t, `int 31; bzero; byte 0x; byte 0x; byte 0x; byte 0x; ecdsa_verify Secp256k1`, 5, "must be 32")
+
// ecdsa recover tests
source = `
byte 0x%s
@@ -434,7 +427,10 @@ load 1
{v, testAccepts},
{v ^ 1, testRejects},
{3, func(t *testing.T, program string, introduced uint64) {
- testPanics(t, program, introduced)
+ testPanics(t, program, introduced, "recover failed")
+ }},
+ {4, func(t *testing.T, program string, introduced uint64) {
+ testPanics(t, program, introduced, "invalid recovery id")
}},
}
pkExpanded := secp256k1.S256().Marshal(key.PublicKey.X, key.PublicKey.Y)
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index ad4f1674b..c060d8262 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -343,10 +343,11 @@ func OpDocExtra(opName string) string {
// here is the order args opcodes are presented, so place related
// opcodes consecutively, even if their opcode values are not.
var OpGroups = map[string][]string{
- "Arithmetic": {"sha256", "keccak256", "sha512_256", "sha3_256", "ed25519verify", "ed25519verify_bare", "ecdsa_verify", "ecdsa_pk_recover", "ecdsa_pk_decompress", "vrf_verify", "ec_add", "ec_scalar_mul", "ec_pairing_check", "ec_multi_scalar_mul", "ec_subgroup_check", "ec_map_to", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "shl", "shr", "sqrt", "bitlen", "exp", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "divw", "divmodw", "expw", "getbit", "setbit", "getbyte", "setbyte", "concat"},
- "Byte Array Manipulation": {"substring", "substring3", "extract", "extract3", "extract_uint16", "extract_uint32", "extract_uint64", "replace2", "replace3", "base64_decode", "json_ref"},
+ "Arithmetic": {"+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "shl", "shr", "sqrt", "bitlen", "exp", "==", "!=", "!", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "divw", "divmodw", "expw"},
+ "Byte Array Manipulation": {"getbit", "setbit", "getbyte", "setbyte", "concat", "len", "substring", "substring3", "extract", "extract3", "extract_uint16", "extract_uint32", "extract_uint64", "replace2", "replace3", "base64_decode", "json_ref"},
"Byte Array Arithmetic": {"b+", "b-", "b/", "b*", "b<", "b>", "b<=", "b>=", "b==", "b!=", "b%", "bsqrt"},
"Byte Array Logic": {"b|", "b&", "b^", "b~"},
+ "Cryptography": {"sha256", "keccak256", "sha512_256", "sha3_256", "ed25519verify", "ed25519verify_bare", "ecdsa_verify", "ecdsa_pk_recover", "ecdsa_pk_decompress", "vrf_verify", "ec_add", "ec_scalar_mul", "ec_pairing_check", "ec_multi_scalar_mul", "ec_subgroup_check", "ec_map_to"},
"Loading Values": {"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "pushints", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "pushbytess", "bzero", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "args", "txn", "gtxn", "txna", "txnas", "gtxna", "gtxnas", "gtxns", "gtxnsa", "gtxnsas", "global", "load", "loads", "store", "stores", "gload", "gloads", "gloadss", "gaid", "gaids"},
"Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "popn", "dup", "dup2", "dupn", "dig", "bury", "cover", "uncover", "frame_dig", "frame_bury", "swap", "select", "assert", "callsub", "proto", "retsub", "switch", "match"},
"State Access": {"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get", "app_params_get", "acct_params_get", "log", "block"},
diff --git a/data/transactions/logic/doc_test.go b/data/transactions/logic/doc_test.go
index 300c0ffdb..9c9dad4ec 100644
--- a/data/transactions/logic/doc_test.go
+++ b/data/transactions/logic/doc_test.go
@@ -51,9 +51,9 @@ func TestOpGroupCoverage(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- opsSeen := make(map[string]bool, len(OpSpecs))
+ opsSeen := make(map[string]int, len(OpSpecs))
for _, op := range OpSpecs {
- opsSeen[op.Name] = false
+ opsSeen[op.Name] = 0
}
for _, names := range OpGroups {
for _, name := range names {
@@ -62,13 +62,16 @@ func TestOpGroupCoverage(t *testing.T) {
t.Errorf("op %#v in group list but not in OpSpecs\n", name)
continue
}
- opsSeen[name] = true
+ opsSeen[name]++
}
}
for name, seen := range opsSeen {
- if !seen {
+ if seen == 0 {
t.Errorf("op %#v not in any group of OpGroups\n", name)
}
+ if seen > 1 {
+ t.Errorf("op %#v in %d groups of OpGroups\n", name, seen)
+ }
}
}
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index ad4acd69d..2db51b24a 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -18,10 +18,6 @@ package logic
import (
"bytes"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/sha256"
- "crypto/sha512"
"encoding/base64"
"encoding/binary"
"encoding/hex"
@@ -34,12 +30,10 @@ import (
"runtime"
"strings"
- "golang.org/x/crypto/sha3"
"golang.org/x/exp/slices"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/crypto/secp256k1"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
@@ -757,6 +751,10 @@ var (
StackAddress = NewStackType(avmBytes, static(32), "address")
// StackBytes32 represents a bytestring that should have exactly 32 bytes
StackBytes32 = NewStackType(avmBytes, static(32), "[32]byte")
+ // StackBytes64 represents a bytestring that should have exactly 64 bytes
+ StackBytes64 = NewStackType(avmBytes, static(64), "[64]byte")
+ // StackBytes80 represents a bytestring that should have exactly 80 bytes
+ StackBytes80 = NewStackType(avmBytes, static(80), "[80]byte")
// StackBigInt represents a bytestring that should be treated like an int
StackBigInt = NewStackType(avmBytes, bound(0, maxByteMathSize), "bigint")
// StackMethodSelector represents a bytestring that should be treated like a method selector
@@ -782,7 +780,9 @@ var (
'A': StackAddress,
'I': StackBigInt,
'T': StackBoolean,
- 'H': StackBytes32,
+ '3': StackBytes32,
+ '6': StackBytes64,
+ '8': StackBytes80,
'M': StackMethodSelector,
'K': StackStateKey,
'N': StackBoxName,
@@ -1628,45 +1628,6 @@ func opSelect(cx *EvalContext) error {
return nil
}
-func opSHA256(cx *EvalContext) error {
- last := len(cx.Stack) - 1
- hash := sha256.Sum256(cx.Stack[last].Bytes)
- cx.Stack[last].Bytes = hash[:]
- return nil
-}
-
-// The NIST SHA3-256 is implemented for compatibility with ICON
-func opSHA3_256(cx *EvalContext) error {
- last := len(cx.Stack) - 1
- hash := sha3.Sum256(cx.Stack[last].Bytes)
- cx.Stack[last].Bytes = hash[:]
- return nil
-}
-
-// The Keccak256 variant of SHA-3 is implemented for compatibility with Ethereum
-func opKeccak256(cx *EvalContext) error {
- last := len(cx.Stack) - 1
- hasher := sha3.NewLegacyKeccak256()
- hasher.Write(cx.Stack[last].Bytes)
- hv := make([]byte, 0, hasher.Size())
- hv = hasher.Sum(hv)
- cx.Stack[last].Bytes = hv
- return nil
-}
-
-// This is the hash commonly used in Algorand in crypto/util.go Hash()
-//
-// It is explicitly implemented here in terms of the specific hash for
-// stability and portability in case the rest of Algorand ever moves
-// to a different default hash. For stability of this language, at
-// that time a new opcode should be made with the new hash.
-func opSHA512_256(cx *EvalContext) error {
- last := len(cx.Stack) - 1
- hash := sha512.Sum512_256(cx.Stack[last].Bytes)
- cx.Stack[last].Bytes = hash[:]
- return nil
-}
-
func opPlus(cx *EvalContext) error {
last := len(cx.Stack) - 1
prev := last - 1
@@ -3679,247 +3640,6 @@ func opGlobal(cx *EvalContext) error {
return nil
}
-// Msg is data meant to be signed and then verified with the
-// ed25519verify opcode.
-type Msg struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
- ProgramHash crypto.Digest `codec:"p"`
- Data []byte `codec:"d"`
-}
-
-// ToBeHashed implements crypto.Hashable
-func (msg Msg) ToBeHashed() (protocol.HashID, []byte) {
- return protocol.ProgramData, append(msg.ProgramHash[:], msg.Data...)
-}
-
-// programHash lets us lazily compute H(cx.program)
-func (cx *EvalContext) programHash() crypto.Digest {
- if cx.programHashCached == (crypto.Digest{}) {
- cx.programHashCached = crypto.HashObj(Program(cx.program))
- }
- return cx.programHashCached
-}
-
-func opEd25519Verify(cx *EvalContext) error {
- last := len(cx.Stack) - 1 // index of PK
- prev := last - 1 // index of signature
- pprev := prev - 1 // index of data
-
- var sv crypto.SignatureVerifier
- if len(cx.Stack[last].Bytes) != len(sv) {
- return errors.New("invalid public key")
- }
- copy(sv[:], cx.Stack[last].Bytes)
-
- var sig crypto.Signature
- if len(cx.Stack[prev].Bytes) != len(sig) {
- return errors.New("invalid signature")
- }
- copy(sig[:], cx.Stack[prev].Bytes)
-
- msg := Msg{ProgramHash: cx.programHash(), Data: cx.Stack[pprev].Bytes}
- cx.Stack[pprev] = boolToSV(sv.Verify(msg, sig))
- cx.Stack = cx.Stack[:prev]
- return nil
-}
-
-func opEd25519VerifyBare(cx *EvalContext) error {
- last := len(cx.Stack) - 1 // index of PK
- prev := last - 1 // index of signature
- pprev := prev - 1 // index of data
-
- var sv crypto.SignatureVerifier
- if len(cx.Stack[last].Bytes) != len(sv) {
- return errors.New("invalid public key")
- }
- copy(sv[:], cx.Stack[last].Bytes)
-
- var sig crypto.Signature
- if len(cx.Stack[prev].Bytes) != len(sig) {
- return errors.New("invalid signature")
- }
- copy(sig[:], cx.Stack[prev].Bytes)
-
- cx.Stack[pprev] = boolToSV(sv.VerifyBytes(cx.Stack[pprev].Bytes, sig))
- cx.Stack = cx.Stack[:prev]
- return nil
-}
-
-func leadingZeros(size int, b *big.Int) ([]byte, error) {
- byteLength := (b.BitLen() + 7) / 8
- if size < byteLength {
- return nil, fmt.Errorf("insufficient buffer size: %d < %d", size, byteLength)
- }
- buf := make([]byte, size)
- b.FillBytes(buf)
- return buf, nil
-}
-
-var ecdsaVerifyCosts = []int{
- Secp256k1: 1700,
- Secp256r1: 2500,
-}
-
-var secp256r1 = elliptic.P256()
-
-func opEcdsaVerify(cx *EvalContext) error {
- ecdsaCurve := EcdsaCurve(cx.program[cx.pc+1])
- fs, ok := ecdsaCurveSpecByField(ecdsaCurve)
- if !ok || fs.version > cx.version {
- return fmt.Errorf("invalid curve %d", ecdsaCurve)
- }
-
- if fs.field != Secp256k1 && fs.field != Secp256r1 {
- return fmt.Errorf("unsupported curve %d", fs.field)
- }
-
- last := len(cx.Stack) - 1 // index of PK y
- prev := last - 1 // index of PK x
- pprev := prev - 1 // index of signature s
- fourth := pprev - 1 // index of signature r
- fifth := fourth - 1 // index of data
-
- pkY := cx.Stack[last].Bytes
- pkX := cx.Stack[prev].Bytes
- sigS := cx.Stack[pprev].Bytes
- sigR := cx.Stack[fourth].Bytes
- msg := cx.Stack[fifth].Bytes
-
- if len(msg) != 32 {
- return fmt.Errorf("the signed data must be 32 bytes long, not %d", len(msg))
- }
-
- x := new(big.Int).SetBytes(pkX)
- y := new(big.Int).SetBytes(pkY)
-
- var result bool
- if fs.field == Secp256k1 {
- signature := make([]byte, 0, len(sigR)+len(sigS))
- signature = append(signature, sigR...)
- signature = append(signature, sigS...)
-
- pubkey := secp256k1.S256().Marshal(x, y)
- result = secp256k1.VerifySignature(pubkey, msg, signature)
- } else if fs.field == Secp256r1 {
- if !cx.Proto.EnablePrecheckECDSACurve || secp256r1.IsOnCurve(x, y) {
- pubkey := ecdsa.PublicKey{
- Curve: secp256r1,
- X: x,
- Y: y,
- }
- r := new(big.Int).SetBytes(sigR)
- s := new(big.Int).SetBytes(sigS)
- result = ecdsa.Verify(&pubkey, msg, r, s)
- }
- }
-
- cx.Stack[fifth] = boolToSV(result)
- cx.Stack = cx.Stack[:fourth]
- return nil
-}
-
-var ecdsaDecompressCosts = []int{
- Secp256k1: 650,
- Secp256r1: 2400,
-}
-
-func opEcdsaPkDecompress(cx *EvalContext) error {
- ecdsaCurve := EcdsaCurve(cx.program[cx.pc+1])
- fs, ok := ecdsaCurveSpecByField(ecdsaCurve)
- if !ok || fs.version > cx.version {
- return fmt.Errorf("invalid curve %d", ecdsaCurve)
- }
-
- if fs.field != Secp256k1 && fs.field != Secp256r1 {
- return fmt.Errorf("unsupported curve %d", fs.field)
- }
-
- last := len(cx.Stack) - 1 // compressed PK
-
- pubkey := cx.Stack[last].Bytes
- var x, y *big.Int
- if fs.field == Secp256k1 {
- x, y = secp256k1.DecompressPubkey(pubkey)
- if x == nil {
- return fmt.Errorf("invalid pubkey")
- }
- } else if fs.field == Secp256r1 {
- x, y = elliptic.UnmarshalCompressed(elliptic.P256(), pubkey)
- if x == nil {
- return fmt.Errorf("invalid compressed pubkey")
- }
- }
-
- var err error
- cx.Stack[last].Uint = 0
- cx.Stack[last].Bytes, err = leadingZeros(32, x)
- if err != nil {
- return fmt.Errorf("x component zeroing failed: %w", err)
- }
-
- var sv stackValue
- sv.Bytes, err = leadingZeros(32, y)
- if err != nil {
- return fmt.Errorf("y component zeroing failed: %w", err)
- }
-
- cx.Stack = append(cx.Stack, sv)
- return nil
-}
-
-func opEcdsaPkRecover(cx *EvalContext) error {
- ecdsaCurve := EcdsaCurve(cx.program[cx.pc+1])
- fs, ok := ecdsaCurveSpecByField(ecdsaCurve)
- if !ok || fs.version > cx.version {
- return fmt.Errorf("invalid curve %d", ecdsaCurve)
- }
-
- if fs.field != Secp256k1 {
- return fmt.Errorf("unsupported curve %d", fs.field)
- }
-
- last := len(cx.Stack) - 1 // index of signature s
- prev := last - 1 // index of signature r
- pprev := prev - 1 // index of recovery id
- fourth := pprev - 1 // index of data
-
- sigS := cx.Stack[last].Bytes
- sigR := cx.Stack[prev].Bytes
- recid := cx.Stack[pprev].Uint
- msg := cx.Stack[fourth].Bytes
-
- if recid > 3 {
- return fmt.Errorf("invalid recovery id: %d", recid)
- }
-
- signature := make([]byte, 0, len(sigR)+len(sigS)+1)
- signature = append(signature, sigR...)
- signature = append(signature, sigS...)
- signature = append(signature, uint8(recid))
-
- pk, err := secp256k1.RecoverPubkey(msg, signature)
- if err != nil {
- return fmt.Errorf("pubkey recover failed: %s", err.Error())
- }
- x, y := secp256k1.S256().Unmarshal(pk)
- if x == nil {
- return fmt.Errorf("pubkey unmarshal failed")
- }
-
- cx.Stack[fourth].Uint = 0
- cx.Stack[fourth].Bytes, err = leadingZeros(32, x)
- if err != nil {
- return fmt.Errorf("x component zeroing failed: %s", err.Error())
- }
- cx.Stack[pprev].Uint = 0
- cx.Stack[pprev].Bytes, err = leadingZeros(32, y)
- if err != nil {
- return fmt.Errorf("y component zeroing failed: %s", err.Error())
- }
- cx.Stack = cx.Stack[:prev]
- return nil
-}
-
func opLoad(cx *EvalContext) error {
n := cx.program[cx.pc+1]
cx.Stack = append(cx.Stack, cx.Scratch[n])
@@ -5819,54 +5539,6 @@ func opItxnSubmit(cx *EvalContext) (err error) {
return nil
}
-type rawMessage []byte
-
-func (rm rawMessage) ToBeHashed() (protocol.HashID, []byte) {
- return "", []byte(rm)
-}
-
-func opVrfVerify(cx *EvalContext) error {
- last := len(cx.Stack) - 1 // PK
- prev := last - 1 // proof
- pprev := prev - 1 // data
-
- data := rawMessage(cx.Stack[pprev].Bytes)
- proofbytes := cx.Stack[prev].Bytes
- var proof crypto.VrfProof
- if len(proofbytes) != len(proof) {
- return fmt.Errorf("vrf proof wrong size %d != %d", len(proofbytes), len(proof))
- }
- copy(proof[:], proofbytes[:])
-
- pubkeybytes := cx.Stack[last].Bytes
- var pubkey crypto.VrfPubkey
- if len(pubkeybytes) != len(pubkey) {
- return fmt.Errorf("vrf pubkey wrong size %d != %d", len(pubkeybytes), len(pubkey))
- }
- copy(pubkey[:], pubkeybytes[:])
-
- var verified bool
- var output []byte
- std := VrfStandard(cx.program[cx.pc+1])
- ss, ok := vrfStandardSpecByField(std)
- if !ok || ss.version > cx.version {
- return fmt.Errorf("invalid VRF standard %s", std)
- }
- switch std {
- case VrfAlgorand:
- var out crypto.VrfOutput
- verified, out = pubkey.Verify(proof, data)
- output = out[:]
- default:
- return fmt.Errorf("unsupported vrf_verify standard %s", std)
- }
-
- cx.Stack[pprev].Bytes = output[:]
- cx.Stack[prev] = boolToSV(verified)
- cx.Stack = cx.Stack[:last] // pop 1 because we take 3 args and return 2
- return nil
-}
-
// availableRound checks to see if the requested round, `r`, is allowed to be
// accessed. If it is, it's returned as a basics.Round. It is named by analogy
// to the availableAsset and availableApp helpers.
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index 5f2a5970b..436643e4c 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -299,7 +299,7 @@ log
// check that ed25519verify and arg is not allowed in stateful mode between v2-v4
disallowedV4 := []string{
- "byte 0x01\nbyte 0x01\nbyte 0x01\ned25519verify",
+ "byte 0x01; int 32; bzero; int 64; bzero; ed25519verify",
"arg 0",
"arg_0",
"arg_1",
@@ -3001,11 +3001,23 @@ func TestReturnTypes(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- // Ensure all opcodes return values they are supposed to according to the OpSpecs table
- typeToArg := map[avmType]string{
- avmUint64: "int 1\n",
- avmAny: "int 1\n",
- avmBytes: "byte 0x33343536\n", // Which is the string "3456"
+ // Generate a plausible (and consistent) value for a given StackType
+ typeToArg := func(t StackType) string {
+ switch t.AVMType {
+ case avmUint64:
+ if t.Bound[0] > 0 {
+ return fmt.Sprintf("int %d\n", t.Bound[0])
+ }
+ return "int 1\n"
+ case avmAny:
+ return "int 1\n"
+ case avmBytes:
+ if t.Bound[0] > 0 {
+ return fmt.Sprintf("byte 0x%s\n", strings.Repeat("33", int(t.Bound[0])))
+ }
+ return "byte 0x33343536\n" // Which is the string "3456"
+ }
+ panic(t)
}
// We try to form a snippet that will test every opcode, by sandwiching it
@@ -3016,11 +3028,6 @@ func TestReturnTypes(t *testing.T) {
// opcodes that need to set up their own stack inputs, a ": at the front of
// the string means "start with an empty stack".
specialCmd := map[string]string{
- "txn": "txn Sender",
- "txna": "txna ApplicationArgs 0",
- "gtxn": "gtxn 0 Sender",
- "gtxna": "gtxna 0 ApplicationArgs 0",
- "global": "global MinTxnFee",
"gaids": ": int 0; gaids",
"gloads": ": int 0; gloads 0", // Needs txn index = 0 to work
"gloadss": ": int 0; int 1; gloadss", // Needs txn index = 0 to work
@@ -3039,11 +3046,8 @@ func TestReturnTypes(t *testing.T) {
"extract_uint64": ": byte 0x010203040506070809; int 1; extract_uint64",
"replace2": ": byte 0x0102030405; byte 0x0809; replace2 2",
"replace3": ": byte 0x0102030405; int 2; byte 0x0809; replace3",
- "gtxns": "gtxns Sender",
"gtxnsa": ": int 0; gtxnsa ApplicationArgs 0",
"extract": "extract 0 2",
- "txnas": "txnas ApplicationArgs",
- "gtxnas": "gtxnas 0 ApplicationArgs",
"gtxnsas": ": int 0; int 0; gtxnsas ApplicationArgs",
"divw": ": int 1; int 2; int 3; divw",
@@ -3073,10 +3077,7 @@ func TestReturnTypes(t *testing.T) {
"gitxna": "itxn_begin; int pay; itxn_field TypeEnum; itxn_submit; gitxna 0 Accounts 0",
"gitxnas": ": itxn_begin; int pay; itxn_field TypeEnum; itxn_submit; int 0; gitxnas 0 Accounts",
- "base64_decode": `: byte "YWJjMTIzIT8kKiYoKSctPUB+"; base64_decode StdEncoding`,
- "json_ref": `: byte "{\"k\": 7}"; byte "k"; json_ref JSONUint64`,
-
- "block": "block BlkSeed",
+ "json_ref": `: byte "{\"k\": 7}"; byte "k"; json_ref JSONUint64`,
"proto": "callsub p; p: proto 0 3",
"bury": ": int 1; int 2; int 3; bury 2; pop; pop;",
@@ -3097,14 +3098,8 @@ func TestReturnTypes(t *testing.T) {
"err": true,
"return": true,
- "ed25519verify": true,
- "ed25519verify_bare": true,
- "ecdsa_verify": true,
- "ecdsa_pk_recover": true,
"ecdsa_pk_decompress": true,
- "vrf_verify": true,
-
"frame_dig": true, // would need a "proto" subroutine
"frame_bury": true, // would need a "proto" subroutine
@@ -3142,32 +3137,42 @@ func TestReturnTypes(t *testing.T) {
}
} else {
for _, imm := range spec.OpDetails.Immediates {
- switch imm.kind {
- case immByte:
- cmd += " 0"
- case immInt8:
- cmd += " -2"
- case immInt:
- cmd += " 10"
- case immInts:
- cmd += " 11 12 13"
- case immBytes:
- cmd += " 0x123456"
- case immBytess:
- cmd += " 0x12 0x34 0x56"
- case immLabel:
- cmd += " done; done: ;"
- case immLabels:
- cmd += " done1 done2; done1: ; done2: ;"
- default:
- require.Fail(t, "bad immediate", "%s", imm)
+ if imm.Group != nil {
+ for _, name := range imm.Group.Names {
+ // missing names exist because of array vs normal opcodes
+ if name != "" {
+ cmd += " " + name
+ break
+ }
+ }
+ } else {
+ switch imm.kind {
+ case immByte:
+ cmd += " 0"
+ case immInt8:
+ cmd += " -2"
+ case immInt:
+ cmd += " 10"
+ case immInts:
+ cmd += " 11 12 13"
+ case immBytes:
+ cmd += " 0x123456"
+ case immBytess:
+ cmd += " 0x12 0x34 0x56"
+ case immLabel:
+ cmd += " done; done: ;"
+ case immLabels:
+ cmd += " done1 done2; done1: ; done2: ;"
+ default:
+ require.Fail(t, "bad immediate", "%s", imm)
+ }
}
}
}
var sb strings.Builder
if provideStackInput {
for _, t := range spec.Arg.Types {
- sb.WriteString(typeToArg[t.AVMType])
+ sb.WriteString(typeToArg(t))
}
}
sb.WriteString(cmd + "\n")
@@ -3178,7 +3183,7 @@ func TestReturnTypes(t *testing.T) {
tx0.Txn.ApplicationID = 300
tx0.Txn.ForeignApps = []basics.AppIndex{300}
tx0.Txn.ForeignAssets = []basics.AssetIndex{400}
- tx0.Txn.Boxes = []transactions.BoxRef{{Name: []byte("3456")}}
+ tx0.Txn.Boxes = []transactions.BoxRef{{Name: []byte("3")}} // The arg given for boxName type
tx0.Lsig.Args = [][]byte{
[]byte("aoeu"),
[]byte("aoeu"),
@@ -3218,12 +3223,13 @@ func TestReturnTypes(t *testing.T) {
ledger.NewAccount(appAddr(300), 1000000)
// these allows the box_* opcodes that to work
- ledger.CreateBox(300, "3456", 10)
+ ledger.CreateBox(300, "3", 10)
// We are running gi=1, but we never ran gi=0. Set things up as
// if we did, so they can be accessed with gtxn, gload, gaid
aep.pastScratch[0] = &scratchSpace{}
aep.TxnGroup[0].ConfigAsset = 100
+ *aep.PooledApplicationBudget = 10_000 // so we can run verifies
var cx *EvalContext
if m == ModeApp {
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index 230ad8c64..fd8b9ff80 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -395,6 +395,8 @@ func TestWrongProtoVersion(t *testing.T) {
}
}
+// TestBlankStackSufficient will fail if an opcode is added with more than the
+// current max number of stack arguments. Update `blankStack` to be longer.
func TestBlankStackSufficient(t *testing.T) {
partitiontest.PartitionTest(t)
diff --git a/data/transactions/logic/jsonspec.md b/data/transactions/logic/jsonspec.md
index 817c01ece..0df95373f 100644
--- a/data/transactions/logic/jsonspec.md
+++ b/data/transactions/logic/jsonspec.md
@@ -50,7 +50,7 @@ Duplicate keys at the top level result in an error; however, duplicate keys nest
#### Special Values
- `null`, `true`, `false` are the only accepted special values.
-- other spcial values such as `NaN`,`+Inf`,`-Inf` are not accepted
+- other special values such as `NaN`,`+Inf`,`-Inf` are not accepted
#### Exponential Notation
diff --git a/data/transactions/logic/langspec_v1.json b/data/transactions/logic/langspec_v1.json
index e88de1429..b659e662a 100644
--- a/data/transactions/logic/langspec_v1.json
+++ b/data/transactions/logic/langspec_v1.json
@@ -3,46 +3,64 @@
"LogicSigVersion": 9,
"NamedTypes": [
{
- "Name": "uint64",
- "Abbreviation": "i",
+ "Name": "[32]byte",
+ "Abbreviation": "3",
"Bound": [
- 0,
- 18446744073709551615
+ 32,
+ 32
],
- "AVMType": "uint64"
+ "AVMType": "[]byte"
},
{
- "Name": "stateKey",
- "Abbreviation": "K",
+ "Name": "[64]byte",
+ "Abbreviation": "6",
"Bound": [
- 0,
+ 64,
64
],
"AVMType": "[]byte"
},
{
- "Name": "none",
- "Abbreviation": "x",
+ "Name": "[80]byte",
+ "Abbreviation": "8",
+ "Bound": [
+ 80,
+ 80
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "[]byte",
+ "Abbreviation": "b",
"Bound": [
0,
- 0
+ 4096
],
- "AVMType": "none"
+ "AVMType": "[]byte"
},
{
- "Name": "method",
- "Abbreviation": "M",
+ "Name": "address",
+ "Abbreviation": "A",
"Bound": [
- 4,
- 4
+ 32,
+ 32
],
"AVMType": "[]byte"
},
{
- "Name": "boxName",
- "Abbreviation": "N",
+ "Name": "any",
+ "Abbreviation": "a",
"Bound": [
- 1,
+ 0,
+ 0
+ ],
+ "AVMType": "any"
+ },
+ {
+ "Name": "bigint",
+ "Abbreviation": "I",
+ "Bound": [
+ 0,
64
],
"AVMType": "[]byte"
@@ -57,49 +75,49 @@
"AVMType": "uint64"
},
{
- "Name": "bigint",
- "Abbreviation": "I",
+ "Name": "boxName",
+ "Abbreviation": "N",
"Bound": [
- 0,
+ 1,
64
],
"AVMType": "[]byte"
},
{
- "Name": "any",
- "Abbreviation": "a",
+ "Name": "method",
+ "Abbreviation": "M",
"Bound": [
- 0,
- 0
+ 4,
+ 4
],
- "AVMType": "any"
+ "AVMType": "[]byte"
},
{
- "Name": "address",
- "Abbreviation": "A",
+ "Name": "none",
+ "Abbreviation": "x",
"Bound": [
- 32,
- 32
+ 0,
+ 0
],
- "AVMType": "[]byte"
+ "AVMType": "none"
},
{
- "Name": "[]byte",
- "Abbreviation": "b",
+ "Name": "stateKey",
+ "Abbreviation": "K",
"Bound": [
0,
- 4096
+ 64
],
"AVMType": "[]byte"
},
{
- "Name": "[32]byte",
- "Abbreviation": "H",
+ "Name": "uint64",
+ "Abbreviation": "i",
"Bound": [
- 32,
- 32
+ 0,
+ 18446744073709551615
],
- "AVMType": "[]byte"
+ "AVMType": "uint64"
}
],
"Ops": [
@@ -128,7 +146,7 @@
"Doc": "SHA256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -145,7 +163,7 @@
"Doc": "Keccak256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -162,7 +180,7 @@
"Doc": "SHA512_256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -170,8 +188,8 @@
"Name": "ed25519verify",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[64]byte",
+ "[32]byte"
],
"Returns": [
"bool"
@@ -182,7 +200,7 @@
"DocExtra": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -435,7 +453,7 @@
"Doc": "yields length of byte value A",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
diff --git a/data/transactions/logic/langspec_v10.json b/data/transactions/logic/langspec_v10.json
index de938ffb7..7ee54fb98 100644
--- a/data/transactions/logic/langspec_v10.json
+++ b/data/transactions/logic/langspec_v10.json
@@ -3,46 +3,64 @@
"LogicSigVersion": 9,
"NamedTypes": [
{
- "Name": "uint64",
- "Abbreviation": "i",
+ "Name": "[32]byte",
+ "Abbreviation": "3",
"Bound": [
- 0,
- 18446744073709551615
+ 32,
+ 32
],
- "AVMType": "uint64"
+ "AVMType": "[]byte"
},
{
- "Name": "stateKey",
- "Abbreviation": "K",
+ "Name": "[64]byte",
+ "Abbreviation": "6",
"Bound": [
- 0,
+ 64,
64
],
"AVMType": "[]byte"
},
{
- "Name": "none",
- "Abbreviation": "x",
+ "Name": "[80]byte",
+ "Abbreviation": "8",
+ "Bound": [
+ 80,
+ 80
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "[]byte",
+ "Abbreviation": "b",
"Bound": [
0,
- 0
+ 4096
],
- "AVMType": "none"
+ "AVMType": "[]byte"
},
{
- "Name": "method",
- "Abbreviation": "M",
+ "Name": "address",
+ "Abbreviation": "A",
"Bound": [
- 4,
- 4
+ 32,
+ 32
],
"AVMType": "[]byte"
},
{
- "Name": "boxName",
- "Abbreviation": "N",
+ "Name": "any",
+ "Abbreviation": "a",
"Bound": [
- 1,
+ 0,
+ 0
+ ],
+ "AVMType": "any"
+ },
+ {
+ "Name": "bigint",
+ "Abbreviation": "I",
+ "Bound": [
+ 0,
64
],
"AVMType": "[]byte"
@@ -57,49 +75,49 @@
"AVMType": "uint64"
},
{
- "Name": "bigint",
- "Abbreviation": "I",
+ "Name": "boxName",
+ "Abbreviation": "N",
"Bound": [
- 0,
+ 1,
64
],
"AVMType": "[]byte"
},
{
- "Name": "any",
- "Abbreviation": "a",
+ "Name": "method",
+ "Abbreviation": "M",
"Bound": [
- 0,
- 0
+ 4,
+ 4
],
- "AVMType": "any"
+ "AVMType": "[]byte"
},
{
- "Name": "address",
- "Abbreviation": "A",
+ "Name": "none",
+ "Abbreviation": "x",
"Bound": [
- 32,
- 32
+ 0,
+ 0
],
- "AVMType": "[]byte"
+ "AVMType": "none"
},
{
- "Name": "[]byte",
- "Abbreviation": "b",
+ "Name": "stateKey",
+ "Abbreviation": "K",
"Bound": [
0,
- 4096
+ 64
],
"AVMType": "[]byte"
},
{
- "Name": "[32]byte",
- "Abbreviation": "H",
+ "Name": "uint64",
+ "Abbreviation": "i",
"Bound": [
- 32,
- 32
+ 0,
+ 18446744073709551615
],
- "AVMType": "[]byte"
+ "AVMType": "uint64"
}
],
"Ops": [
@@ -128,7 +146,7 @@
"Doc": "SHA256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -145,7 +163,7 @@
"Doc": "Keccak256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -162,7 +180,7 @@
"Doc": "SHA512_256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -170,8 +188,8 @@
"Name": "ed25519verify",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[64]byte",
+ "[32]byte"
],
"Returns": [
"bool"
@@ -182,14 +200,14 @@
"DocExtra": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
"Opcode": 5,
"Name": "ecdsa_verify",
"Args": [
- "[]byte",
+ "[32]byte",
"[]byte",
"[]byte",
"[]byte",
@@ -216,7 +234,7 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -247,17 +265,17 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
"Opcode": 7,
"Name": "ecdsa_pk_recover",
"Args": [
- "[]byte",
+ "[32]byte",
"uint64",
- "[]byte",
- "[]byte"
+ "[32]byte",
+ "[32]byte"
],
"Returns": [
"[]byte",
@@ -281,7 +299,7 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -534,7 +552,7 @@
"Doc": "yields length of byte value A",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2188,7 +2206,7 @@
"DocExtra": "`concat` fails if the result would be greater than 4096 bytes.",
"IntroducedVersion": 2,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2255,7 +2273,7 @@
"DocExtra": "see explanation of bit ordering in setbit",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2275,7 +2293,7 @@
"DocExtra": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2293,7 +2311,7 @@
"Doc": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2312,7 +2330,7 @@
"Doc": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -3009,8 +3027,8 @@
"Name": "ed25519verify_bare",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[64]byte",
+ "[32]byte"
],
"Returns": [
"bool"
@@ -3020,7 +3038,7 @@
"Doc": "for (data A, signature B, pubkey C) verify the signature of the data against the pubkey =\u003e {0 or 1}",
"IntroducedVersion": 7,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -3319,7 +3337,7 @@
"Doc": "SHA3_256 hash of value A, yields [32]byte",
"IntroducedVersion": 7,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -4566,8 +4584,8 @@
"Name": "vrf_verify",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[80]byte",
+ "[32]byte"
],
"Returns": [
"[]byte",
@@ -4590,7 +4608,7 @@
],
"IntroducedVersion": 7,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -4650,7 +4668,7 @@
],
"IntroducedVersion": 10,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -4677,7 +4695,7 @@
],
"IntroducedVersion": 10,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -4704,7 +4722,7 @@
],
"IntroducedVersion": 10,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -4731,7 +4749,7 @@
],
"IntroducedVersion": 10,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -4756,7 +4774,7 @@
],
"IntroducedVersion": 10,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -4782,7 +4800,7 @@
],
"IntroducedVersion": 10,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
}
]
diff --git a/data/transactions/logic/langspec_v2.json b/data/transactions/logic/langspec_v2.json
index b0319cf99..b518b1a42 100644
--- a/data/transactions/logic/langspec_v2.json
+++ b/data/transactions/logic/langspec_v2.json
@@ -3,46 +3,64 @@
"LogicSigVersion": 9,
"NamedTypes": [
{
- "Name": "uint64",
- "Abbreviation": "i",
+ "Name": "[32]byte",
+ "Abbreviation": "3",
"Bound": [
- 0,
- 18446744073709551615
+ 32,
+ 32
],
- "AVMType": "uint64"
+ "AVMType": "[]byte"
},
{
- "Name": "stateKey",
- "Abbreviation": "K",
+ "Name": "[64]byte",
+ "Abbreviation": "6",
"Bound": [
- 0,
+ 64,
64
],
"AVMType": "[]byte"
},
{
- "Name": "none",
- "Abbreviation": "x",
+ "Name": "[80]byte",
+ "Abbreviation": "8",
+ "Bound": [
+ 80,
+ 80
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "[]byte",
+ "Abbreviation": "b",
"Bound": [
0,
- 0
+ 4096
],
- "AVMType": "none"
+ "AVMType": "[]byte"
},
{
- "Name": "method",
- "Abbreviation": "M",
+ "Name": "address",
+ "Abbreviation": "A",
"Bound": [
- 4,
- 4
+ 32,
+ 32
],
"AVMType": "[]byte"
},
{
- "Name": "boxName",
- "Abbreviation": "N",
+ "Name": "any",
+ "Abbreviation": "a",
"Bound": [
- 1,
+ 0,
+ 0
+ ],
+ "AVMType": "any"
+ },
+ {
+ "Name": "bigint",
+ "Abbreviation": "I",
+ "Bound": [
+ 0,
64
],
"AVMType": "[]byte"
@@ -57,49 +75,49 @@
"AVMType": "uint64"
},
{
- "Name": "bigint",
- "Abbreviation": "I",
+ "Name": "boxName",
+ "Abbreviation": "N",
"Bound": [
- 0,
+ 1,
64
],
"AVMType": "[]byte"
},
{
- "Name": "any",
- "Abbreviation": "a",
+ "Name": "method",
+ "Abbreviation": "M",
"Bound": [
- 0,
- 0
+ 4,
+ 4
],
- "AVMType": "any"
+ "AVMType": "[]byte"
},
{
- "Name": "address",
- "Abbreviation": "A",
+ "Name": "none",
+ "Abbreviation": "x",
"Bound": [
- 32,
- 32
+ 0,
+ 0
],
- "AVMType": "[]byte"
+ "AVMType": "none"
},
{
- "Name": "[]byte",
- "Abbreviation": "b",
+ "Name": "stateKey",
+ "Abbreviation": "K",
"Bound": [
0,
- 4096
+ 64
],
"AVMType": "[]byte"
},
{
- "Name": "[32]byte",
- "Abbreviation": "H",
+ "Name": "uint64",
+ "Abbreviation": "i",
"Bound": [
- 32,
- 32
+ 0,
+ 18446744073709551615
],
- "AVMType": "[]byte"
+ "AVMType": "uint64"
}
],
"Ops": [
@@ -128,7 +146,7 @@
"Doc": "SHA256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -145,7 +163,7 @@
"Doc": "Keccak256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -162,7 +180,7 @@
"Doc": "SHA512_256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -170,8 +188,8 @@
"Name": "ed25519verify",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[64]byte",
+ "[32]byte"
],
"Returns": [
"bool"
@@ -182,7 +200,7 @@
"DocExtra": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -435,7 +453,7 @@
"Doc": "yields length of byte value A",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -1422,7 +1440,7 @@
"DocExtra": "`concat` fails if the result would be greater than 4096 bytes.",
"IntroducedVersion": 2,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
diff --git a/data/transactions/logic/langspec_v3.json b/data/transactions/logic/langspec_v3.json
index e3fe6e493..a8bc6df0d 100644
--- a/data/transactions/logic/langspec_v3.json
+++ b/data/transactions/logic/langspec_v3.json
@@ -3,46 +3,64 @@
"LogicSigVersion": 9,
"NamedTypes": [
{
- "Name": "uint64",
- "Abbreviation": "i",
+ "Name": "[32]byte",
+ "Abbreviation": "3",
"Bound": [
- 0,
- 18446744073709551615
+ 32,
+ 32
],
- "AVMType": "uint64"
+ "AVMType": "[]byte"
},
{
- "Name": "stateKey",
- "Abbreviation": "K",
+ "Name": "[64]byte",
+ "Abbreviation": "6",
"Bound": [
- 0,
+ 64,
64
],
"AVMType": "[]byte"
},
{
- "Name": "none",
- "Abbreviation": "x",
+ "Name": "[80]byte",
+ "Abbreviation": "8",
+ "Bound": [
+ 80,
+ 80
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "[]byte",
+ "Abbreviation": "b",
"Bound": [
0,
- 0
+ 4096
],
- "AVMType": "none"
+ "AVMType": "[]byte"
},
{
- "Name": "method",
- "Abbreviation": "M",
+ "Name": "address",
+ "Abbreviation": "A",
"Bound": [
- 4,
- 4
+ 32,
+ 32
],
"AVMType": "[]byte"
},
{
- "Name": "boxName",
- "Abbreviation": "N",
+ "Name": "any",
+ "Abbreviation": "a",
"Bound": [
- 1,
+ 0,
+ 0
+ ],
+ "AVMType": "any"
+ },
+ {
+ "Name": "bigint",
+ "Abbreviation": "I",
+ "Bound": [
+ 0,
64
],
"AVMType": "[]byte"
@@ -57,49 +75,49 @@
"AVMType": "uint64"
},
{
- "Name": "bigint",
- "Abbreviation": "I",
+ "Name": "boxName",
+ "Abbreviation": "N",
"Bound": [
- 0,
+ 1,
64
],
"AVMType": "[]byte"
},
{
- "Name": "any",
- "Abbreviation": "a",
+ "Name": "method",
+ "Abbreviation": "M",
"Bound": [
- 0,
- 0
+ 4,
+ 4
],
- "AVMType": "any"
+ "AVMType": "[]byte"
},
{
- "Name": "address",
- "Abbreviation": "A",
+ "Name": "none",
+ "Abbreviation": "x",
"Bound": [
- 32,
- 32
+ 0,
+ 0
],
- "AVMType": "[]byte"
+ "AVMType": "none"
},
{
- "Name": "[]byte",
- "Abbreviation": "b",
+ "Name": "stateKey",
+ "Abbreviation": "K",
"Bound": [
0,
- 4096
+ 64
],
"AVMType": "[]byte"
},
{
- "Name": "[32]byte",
- "Abbreviation": "H",
+ "Name": "uint64",
+ "Abbreviation": "i",
"Bound": [
- 32,
- 32
+ 0,
+ 18446744073709551615
],
- "AVMType": "[]byte"
+ "AVMType": "uint64"
}
],
"Ops": [
@@ -128,7 +146,7 @@
"Doc": "SHA256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -145,7 +163,7 @@
"Doc": "Keccak256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -162,7 +180,7 @@
"Doc": "SHA512_256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -170,8 +188,8 @@
"Name": "ed25519verify",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[64]byte",
+ "[32]byte"
],
"Returns": [
"bool"
@@ -182,7 +200,7 @@
"DocExtra": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -435,7 +453,7 @@
"Doc": "yields length of byte value A",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -1723,7 +1741,7 @@
"DocExtra": "`concat` fails if the result would be greater than 4096 bytes.",
"IntroducedVersion": 2,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -1790,7 +1808,7 @@
"DocExtra": "see explanation of bit ordering in setbit",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -1810,7 +1828,7 @@
"DocExtra": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -1828,7 +1846,7 @@
"Doc": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -1847,7 +1865,7 @@
"Doc": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
diff --git a/data/transactions/logic/langspec_v4.json b/data/transactions/logic/langspec_v4.json
index 9ffb5f741..ffc428822 100644
--- a/data/transactions/logic/langspec_v4.json
+++ b/data/transactions/logic/langspec_v4.json
@@ -3,46 +3,64 @@
"LogicSigVersion": 9,
"NamedTypes": [
{
- "Name": "uint64",
- "Abbreviation": "i",
+ "Name": "[32]byte",
+ "Abbreviation": "3",
"Bound": [
- 0,
- 18446744073709551615
+ 32,
+ 32
],
- "AVMType": "uint64"
+ "AVMType": "[]byte"
},
{
- "Name": "stateKey",
- "Abbreviation": "K",
+ "Name": "[64]byte",
+ "Abbreviation": "6",
"Bound": [
- 0,
+ 64,
64
],
"AVMType": "[]byte"
},
{
- "Name": "none",
- "Abbreviation": "x",
+ "Name": "[80]byte",
+ "Abbreviation": "8",
+ "Bound": [
+ 80,
+ 80
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "[]byte",
+ "Abbreviation": "b",
"Bound": [
0,
- 0
+ 4096
],
- "AVMType": "none"
+ "AVMType": "[]byte"
},
{
- "Name": "method",
- "Abbreviation": "M",
+ "Name": "address",
+ "Abbreviation": "A",
"Bound": [
- 4,
- 4
+ 32,
+ 32
],
"AVMType": "[]byte"
},
{
- "Name": "boxName",
- "Abbreviation": "N",
+ "Name": "any",
+ "Abbreviation": "a",
"Bound": [
- 1,
+ 0,
+ 0
+ ],
+ "AVMType": "any"
+ },
+ {
+ "Name": "bigint",
+ "Abbreviation": "I",
+ "Bound": [
+ 0,
64
],
"AVMType": "[]byte"
@@ -57,49 +75,49 @@
"AVMType": "uint64"
},
{
- "Name": "bigint",
- "Abbreviation": "I",
+ "Name": "boxName",
+ "Abbreviation": "N",
"Bound": [
- 0,
+ 1,
64
],
"AVMType": "[]byte"
},
{
- "Name": "any",
- "Abbreviation": "a",
+ "Name": "method",
+ "Abbreviation": "M",
"Bound": [
- 0,
- 0
+ 4,
+ 4
],
- "AVMType": "any"
+ "AVMType": "[]byte"
},
{
- "Name": "address",
- "Abbreviation": "A",
+ "Name": "none",
+ "Abbreviation": "x",
"Bound": [
- 32,
- 32
+ 0,
+ 0
],
- "AVMType": "[]byte"
+ "AVMType": "none"
},
{
- "Name": "[]byte",
- "Abbreviation": "b",
+ "Name": "stateKey",
+ "Abbreviation": "K",
"Bound": [
0,
- 4096
+ 64
],
"AVMType": "[]byte"
},
{
- "Name": "[32]byte",
- "Abbreviation": "H",
+ "Name": "uint64",
+ "Abbreviation": "i",
"Bound": [
- 32,
- 32
+ 0,
+ 18446744073709551615
],
- "AVMType": "[]byte"
+ "AVMType": "uint64"
}
],
"Ops": [
@@ -128,7 +146,7 @@
"Doc": "SHA256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -145,7 +163,7 @@
"Doc": "Keccak256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -162,7 +180,7 @@
"Doc": "SHA512_256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -170,8 +188,8 @@
"Name": "ed25519verify",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[64]byte",
+ "[32]byte"
],
"Returns": [
"bool"
@@ -182,7 +200,7 @@
"DocExtra": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -435,7 +453,7 @@
"Doc": "yields length of byte value A",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -1845,7 +1863,7 @@
"DocExtra": "`concat` fails if the result would be greater than 4096 bytes.",
"IntroducedVersion": 2,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -1912,7 +1930,7 @@
"DocExtra": "see explanation of bit ordering in setbit",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -1932,7 +1950,7 @@
"DocExtra": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -1950,7 +1968,7 @@
"Doc": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -1969,7 +1987,7 @@
"Doc": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
diff --git a/data/transactions/logic/langspec_v5.json b/data/transactions/logic/langspec_v5.json
index 9c336c7d8..2b946629c 100644
--- a/data/transactions/logic/langspec_v5.json
+++ b/data/transactions/logic/langspec_v5.json
@@ -3,46 +3,64 @@
"LogicSigVersion": 9,
"NamedTypes": [
{
- "Name": "uint64",
- "Abbreviation": "i",
+ "Name": "[32]byte",
+ "Abbreviation": "3",
"Bound": [
- 0,
- 18446744073709551615
+ 32,
+ 32
],
- "AVMType": "uint64"
+ "AVMType": "[]byte"
},
{
- "Name": "stateKey",
- "Abbreviation": "K",
+ "Name": "[64]byte",
+ "Abbreviation": "6",
"Bound": [
- 0,
+ 64,
64
],
"AVMType": "[]byte"
},
{
- "Name": "none",
- "Abbreviation": "x",
+ "Name": "[80]byte",
+ "Abbreviation": "8",
+ "Bound": [
+ 80,
+ 80
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "[]byte",
+ "Abbreviation": "b",
"Bound": [
0,
- 0
+ 4096
],
- "AVMType": "none"
+ "AVMType": "[]byte"
},
{
- "Name": "method",
- "Abbreviation": "M",
+ "Name": "address",
+ "Abbreviation": "A",
"Bound": [
- 4,
- 4
+ 32,
+ 32
],
"AVMType": "[]byte"
},
{
- "Name": "boxName",
- "Abbreviation": "N",
+ "Name": "any",
+ "Abbreviation": "a",
"Bound": [
- 1,
+ 0,
+ 0
+ ],
+ "AVMType": "any"
+ },
+ {
+ "Name": "bigint",
+ "Abbreviation": "I",
+ "Bound": [
+ 0,
64
],
"AVMType": "[]byte"
@@ -57,49 +75,49 @@
"AVMType": "uint64"
},
{
- "Name": "bigint",
- "Abbreviation": "I",
+ "Name": "boxName",
+ "Abbreviation": "N",
"Bound": [
- 0,
+ 1,
64
],
"AVMType": "[]byte"
},
{
- "Name": "any",
- "Abbreviation": "a",
+ "Name": "method",
+ "Abbreviation": "M",
"Bound": [
- 0,
- 0
+ 4,
+ 4
],
- "AVMType": "any"
+ "AVMType": "[]byte"
},
{
- "Name": "address",
- "Abbreviation": "A",
+ "Name": "none",
+ "Abbreviation": "x",
"Bound": [
- 32,
- 32
+ 0,
+ 0
],
- "AVMType": "[]byte"
+ "AVMType": "none"
},
{
- "Name": "[]byte",
- "Abbreviation": "b",
+ "Name": "stateKey",
+ "Abbreviation": "K",
"Bound": [
0,
- 4096
+ 64
],
"AVMType": "[]byte"
},
{
- "Name": "[32]byte",
- "Abbreviation": "H",
+ "Name": "uint64",
+ "Abbreviation": "i",
"Bound": [
- 32,
- 32
+ 0,
+ 18446744073709551615
],
- "AVMType": "[]byte"
+ "AVMType": "uint64"
}
],
"Ops": [
@@ -128,7 +146,7 @@
"Doc": "SHA256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -145,7 +163,7 @@
"Doc": "Keccak256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -162,7 +180,7 @@
"Doc": "SHA512_256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -170,8 +188,8 @@
"Name": "ed25519verify",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[64]byte",
+ "[32]byte"
],
"Returns": [
"bool"
@@ -182,14 +200,14 @@
"DocExtra": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
"Opcode": 5,
"Name": "ecdsa_verify",
"Args": [
- "[]byte",
+ "[32]byte",
"[]byte",
"[]byte",
"[]byte",
@@ -215,7 +233,7 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -245,17 +263,17 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
"Opcode": 7,
"Name": "ecdsa_pk_recover",
"Args": [
- "[]byte",
+ "[32]byte",
"uint64",
- "[]byte",
- "[]byte"
+ "[32]byte",
+ "[32]byte"
],
"Returns": [
"[]byte",
@@ -278,7 +296,7 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -531,7 +549,7 @@
"Doc": "yields length of byte value A",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2061,7 +2079,7 @@
"DocExtra": "`concat` fails if the result would be greater than 4096 bytes.",
"IntroducedVersion": 2,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2128,7 +2146,7 @@
"DocExtra": "see explanation of bit ordering in setbit",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2148,7 +2166,7 @@
"DocExtra": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2166,7 +2184,7 @@
"Doc": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2185,7 +2203,7 @@
"Doc": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
diff --git a/data/transactions/logic/langspec_v6.json b/data/transactions/logic/langspec_v6.json
index 613d38919..20575dae5 100644
--- a/data/transactions/logic/langspec_v6.json
+++ b/data/transactions/logic/langspec_v6.json
@@ -3,46 +3,64 @@
"LogicSigVersion": 9,
"NamedTypes": [
{
- "Name": "uint64",
- "Abbreviation": "i",
+ "Name": "[32]byte",
+ "Abbreviation": "3",
"Bound": [
- 0,
- 18446744073709551615
+ 32,
+ 32
],
- "AVMType": "uint64"
+ "AVMType": "[]byte"
},
{
- "Name": "stateKey",
- "Abbreviation": "K",
+ "Name": "[64]byte",
+ "Abbreviation": "6",
"Bound": [
- 0,
+ 64,
64
],
"AVMType": "[]byte"
},
{
- "Name": "none",
- "Abbreviation": "x",
+ "Name": "[80]byte",
+ "Abbreviation": "8",
+ "Bound": [
+ 80,
+ 80
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "[]byte",
+ "Abbreviation": "b",
"Bound": [
0,
- 0
+ 4096
],
- "AVMType": "none"
+ "AVMType": "[]byte"
},
{
- "Name": "method",
- "Abbreviation": "M",
+ "Name": "address",
+ "Abbreviation": "A",
"Bound": [
- 4,
- 4
+ 32,
+ 32
],
"AVMType": "[]byte"
},
{
- "Name": "boxName",
- "Abbreviation": "N",
+ "Name": "any",
+ "Abbreviation": "a",
"Bound": [
- 1,
+ 0,
+ 0
+ ],
+ "AVMType": "any"
+ },
+ {
+ "Name": "bigint",
+ "Abbreviation": "I",
+ "Bound": [
+ 0,
64
],
"AVMType": "[]byte"
@@ -57,49 +75,49 @@
"AVMType": "uint64"
},
{
- "Name": "bigint",
- "Abbreviation": "I",
+ "Name": "boxName",
+ "Abbreviation": "N",
"Bound": [
- 0,
+ 1,
64
],
"AVMType": "[]byte"
},
{
- "Name": "any",
- "Abbreviation": "a",
+ "Name": "method",
+ "Abbreviation": "M",
"Bound": [
- 0,
- 0
+ 4,
+ 4
],
- "AVMType": "any"
+ "AVMType": "[]byte"
},
{
- "Name": "address",
- "Abbreviation": "A",
+ "Name": "none",
+ "Abbreviation": "x",
"Bound": [
- 32,
- 32
+ 0,
+ 0
],
- "AVMType": "[]byte"
+ "AVMType": "none"
},
{
- "Name": "[]byte",
- "Abbreviation": "b",
+ "Name": "stateKey",
+ "Abbreviation": "K",
"Bound": [
0,
- 4096
+ 64
],
"AVMType": "[]byte"
},
{
- "Name": "[32]byte",
- "Abbreviation": "H",
+ "Name": "uint64",
+ "Abbreviation": "i",
"Bound": [
- 32,
- 32
+ 0,
+ 18446744073709551615
],
- "AVMType": "[]byte"
+ "AVMType": "uint64"
}
],
"Ops": [
@@ -128,7 +146,7 @@
"Doc": "SHA256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -145,7 +163,7 @@
"Doc": "Keccak256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -162,7 +180,7 @@
"Doc": "SHA512_256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -170,8 +188,8 @@
"Name": "ed25519verify",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[64]byte",
+ "[32]byte"
],
"Returns": [
"bool"
@@ -182,14 +200,14 @@
"DocExtra": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
"Opcode": 5,
"Name": "ecdsa_verify",
"Args": [
- "[]byte",
+ "[32]byte",
"[]byte",
"[]byte",
"[]byte",
@@ -215,7 +233,7 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -245,17 +263,17 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
"Opcode": 7,
"Name": "ecdsa_pk_recover",
"Args": [
- "[]byte",
+ "[32]byte",
"uint64",
- "[]byte",
- "[]byte"
+ "[32]byte",
+ "[32]byte"
],
"Returns": [
"[]byte",
@@ -278,7 +296,7 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -531,7 +549,7 @@
"Doc": "yields length of byte value A",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2079,7 +2097,7 @@
"DocExtra": "`concat` fails if the result would be greater than 4096 bytes.",
"IntroducedVersion": 2,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2146,7 +2164,7 @@
"DocExtra": "see explanation of bit ordering in setbit",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2166,7 +2184,7 @@
"DocExtra": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2184,7 +2202,7 @@
"Doc": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2203,7 +2221,7 @@
"Doc": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
diff --git a/data/transactions/logic/langspec_v7.json b/data/transactions/logic/langspec_v7.json
index cb3664eb3..d8be33960 100644
--- a/data/transactions/logic/langspec_v7.json
+++ b/data/transactions/logic/langspec_v7.json
@@ -3,46 +3,64 @@
"LogicSigVersion": 9,
"NamedTypes": [
{
- "Name": "uint64",
- "Abbreviation": "i",
+ "Name": "[32]byte",
+ "Abbreviation": "3",
"Bound": [
- 0,
- 18446744073709551615
+ 32,
+ 32
],
- "AVMType": "uint64"
+ "AVMType": "[]byte"
},
{
- "Name": "stateKey",
- "Abbreviation": "K",
+ "Name": "[64]byte",
+ "Abbreviation": "6",
"Bound": [
- 0,
+ 64,
64
],
"AVMType": "[]byte"
},
{
- "Name": "none",
- "Abbreviation": "x",
+ "Name": "[80]byte",
+ "Abbreviation": "8",
+ "Bound": [
+ 80,
+ 80
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "[]byte",
+ "Abbreviation": "b",
"Bound": [
0,
- 0
+ 4096
],
- "AVMType": "none"
+ "AVMType": "[]byte"
},
{
- "Name": "method",
- "Abbreviation": "M",
+ "Name": "address",
+ "Abbreviation": "A",
"Bound": [
- 4,
- 4
+ 32,
+ 32
],
"AVMType": "[]byte"
},
{
- "Name": "boxName",
- "Abbreviation": "N",
+ "Name": "any",
+ "Abbreviation": "a",
"Bound": [
- 1,
+ 0,
+ 0
+ ],
+ "AVMType": "any"
+ },
+ {
+ "Name": "bigint",
+ "Abbreviation": "I",
+ "Bound": [
+ 0,
64
],
"AVMType": "[]byte"
@@ -57,49 +75,49 @@
"AVMType": "uint64"
},
{
- "Name": "bigint",
- "Abbreviation": "I",
+ "Name": "boxName",
+ "Abbreviation": "N",
"Bound": [
- 0,
+ 1,
64
],
"AVMType": "[]byte"
},
{
- "Name": "any",
- "Abbreviation": "a",
+ "Name": "method",
+ "Abbreviation": "M",
"Bound": [
- 0,
- 0
+ 4,
+ 4
],
- "AVMType": "any"
+ "AVMType": "[]byte"
},
{
- "Name": "address",
- "Abbreviation": "A",
+ "Name": "none",
+ "Abbreviation": "x",
"Bound": [
- 32,
- 32
+ 0,
+ 0
],
- "AVMType": "[]byte"
+ "AVMType": "none"
},
{
- "Name": "[]byte",
- "Abbreviation": "b",
+ "Name": "stateKey",
+ "Abbreviation": "K",
"Bound": [
0,
- 4096
+ 64
],
"AVMType": "[]byte"
},
{
- "Name": "[32]byte",
- "Abbreviation": "H",
+ "Name": "uint64",
+ "Abbreviation": "i",
"Bound": [
- 32,
- 32
+ 0,
+ 18446744073709551615
],
- "AVMType": "[]byte"
+ "AVMType": "uint64"
}
],
"Ops": [
@@ -128,7 +146,7 @@
"Doc": "SHA256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -145,7 +163,7 @@
"Doc": "Keccak256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -162,7 +180,7 @@
"Doc": "SHA512_256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -170,8 +188,8 @@
"Name": "ed25519verify",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[64]byte",
+ "[32]byte"
],
"Returns": [
"bool"
@@ -182,14 +200,14 @@
"DocExtra": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
"Opcode": 5,
"Name": "ecdsa_verify",
"Args": [
- "[]byte",
+ "[32]byte",
"[]byte",
"[]byte",
"[]byte",
@@ -216,7 +234,7 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -247,17 +265,17 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
"Opcode": 7,
"Name": "ecdsa_pk_recover",
"Args": [
- "[]byte",
+ "[32]byte",
"uint64",
- "[]byte",
- "[]byte"
+ "[32]byte",
+ "[32]byte"
],
"Returns": [
"[]byte",
@@ -281,7 +299,7 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -534,7 +552,7 @@
"Doc": "yields length of byte value A",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2124,7 +2142,7 @@
"DocExtra": "`concat` fails if the result would be greater than 4096 bytes.",
"IntroducedVersion": 2,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2191,7 +2209,7 @@
"DocExtra": "see explanation of bit ordering in setbit",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2211,7 +2229,7 @@
"DocExtra": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2229,7 +2247,7 @@
"Doc": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2248,7 +2266,7 @@
"Doc": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2889,8 +2907,8 @@
"Name": "ed25519verify_bare",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[64]byte",
+ "[32]byte"
],
"Returns": [
"bool"
@@ -2900,7 +2918,7 @@
"Doc": "for (data A, signature B, pubkey C) verify the signature of the data against the pubkey =\u003e {0 or 1}",
"IntroducedVersion": 7,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -3093,7 +3111,7 @@
"Doc": "SHA3_256 hash of value A, yields [32]byte",
"IntroducedVersion": 7,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -4216,8 +4234,8 @@
"Name": "vrf_verify",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[80]byte",
+ "[32]byte"
],
"Returns": [
"[]byte",
@@ -4240,7 +4258,7 @@
],
"IntroducedVersion": 7,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
diff --git a/data/transactions/logic/langspec_v8.json b/data/transactions/logic/langspec_v8.json
index 1cce84389..3b496ddcb 100644
--- a/data/transactions/logic/langspec_v8.json
+++ b/data/transactions/logic/langspec_v8.json
@@ -3,46 +3,64 @@
"LogicSigVersion": 9,
"NamedTypes": [
{
- "Name": "uint64",
- "Abbreviation": "i",
+ "Name": "[32]byte",
+ "Abbreviation": "3",
"Bound": [
- 0,
- 18446744073709551615
+ 32,
+ 32
],
- "AVMType": "uint64"
+ "AVMType": "[]byte"
},
{
- "Name": "stateKey",
- "Abbreviation": "K",
+ "Name": "[64]byte",
+ "Abbreviation": "6",
"Bound": [
- 0,
+ 64,
64
],
"AVMType": "[]byte"
},
{
- "Name": "none",
- "Abbreviation": "x",
+ "Name": "[80]byte",
+ "Abbreviation": "8",
+ "Bound": [
+ 80,
+ 80
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "[]byte",
+ "Abbreviation": "b",
"Bound": [
0,
- 0
+ 4096
],
- "AVMType": "none"
+ "AVMType": "[]byte"
},
{
- "Name": "method",
- "Abbreviation": "M",
+ "Name": "address",
+ "Abbreviation": "A",
"Bound": [
- 4,
- 4
+ 32,
+ 32
],
"AVMType": "[]byte"
},
{
- "Name": "boxName",
- "Abbreviation": "N",
+ "Name": "any",
+ "Abbreviation": "a",
"Bound": [
- 1,
+ 0,
+ 0
+ ],
+ "AVMType": "any"
+ },
+ {
+ "Name": "bigint",
+ "Abbreviation": "I",
+ "Bound": [
+ 0,
64
],
"AVMType": "[]byte"
@@ -57,49 +75,49 @@
"AVMType": "uint64"
},
{
- "Name": "bigint",
- "Abbreviation": "I",
+ "Name": "boxName",
+ "Abbreviation": "N",
"Bound": [
- 0,
+ 1,
64
],
"AVMType": "[]byte"
},
{
- "Name": "any",
- "Abbreviation": "a",
+ "Name": "method",
+ "Abbreviation": "M",
"Bound": [
- 0,
- 0
+ 4,
+ 4
],
- "AVMType": "any"
+ "AVMType": "[]byte"
},
{
- "Name": "address",
- "Abbreviation": "A",
+ "Name": "none",
+ "Abbreviation": "x",
"Bound": [
- 32,
- 32
+ 0,
+ 0
],
- "AVMType": "[]byte"
+ "AVMType": "none"
},
{
- "Name": "[]byte",
- "Abbreviation": "b",
+ "Name": "stateKey",
+ "Abbreviation": "K",
"Bound": [
0,
- 4096
+ 64
],
"AVMType": "[]byte"
},
{
- "Name": "[32]byte",
- "Abbreviation": "H",
+ "Name": "uint64",
+ "Abbreviation": "i",
"Bound": [
- 32,
- 32
+ 0,
+ 18446744073709551615
],
- "AVMType": "[]byte"
+ "AVMType": "uint64"
}
],
"Ops": [
@@ -128,7 +146,7 @@
"Doc": "SHA256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -145,7 +163,7 @@
"Doc": "Keccak256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -162,7 +180,7 @@
"Doc": "SHA512_256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -170,8 +188,8 @@
"Name": "ed25519verify",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[64]byte",
+ "[32]byte"
],
"Returns": [
"bool"
@@ -182,14 +200,14 @@
"DocExtra": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
"Opcode": 5,
"Name": "ecdsa_verify",
"Args": [
- "[]byte",
+ "[32]byte",
"[]byte",
"[]byte",
"[]byte",
@@ -216,7 +234,7 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -247,17 +265,17 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
"Opcode": 7,
"Name": "ecdsa_pk_recover",
"Args": [
- "[]byte",
+ "[32]byte",
"uint64",
- "[]byte",
- "[]byte"
+ "[32]byte",
+ "[32]byte"
],
"Returns": [
"[]byte",
@@ -281,7 +299,7 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -534,7 +552,7 @@
"Doc": "yields length of byte value A",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2184,7 +2202,7 @@
"DocExtra": "`concat` fails if the result would be greater than 4096 bytes.",
"IntroducedVersion": 2,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2251,7 +2269,7 @@
"DocExtra": "see explanation of bit ordering in setbit",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2271,7 +2289,7 @@
"DocExtra": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2289,7 +2307,7 @@
"Doc": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2308,7 +2326,7 @@
"Doc": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -3005,8 +3023,8 @@
"Name": "ed25519verify_bare",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[64]byte",
+ "[32]byte"
],
"Returns": [
"bool"
@@ -3016,7 +3034,7 @@
"Doc": "for (data A, signature B, pubkey C) verify the signature of the data against the pubkey =\u003e {0 or 1}",
"IntroducedVersion": 7,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -3315,7 +3333,7 @@
"Doc": "SHA3_256 hash of value A, yields [32]byte",
"IntroducedVersion": 7,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -4562,8 +4580,8 @@
"Name": "vrf_verify",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[80]byte",
+ "[32]byte"
],
"Returns": [
"[]byte",
@@ -4586,7 +4604,7 @@
],
"IntroducedVersion": 7,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
diff --git a/data/transactions/logic/langspec_v9.json b/data/transactions/logic/langspec_v9.json
index 9c4a41c78..c52d36862 100644
--- a/data/transactions/logic/langspec_v9.json
+++ b/data/transactions/logic/langspec_v9.json
@@ -3,46 +3,64 @@
"LogicSigVersion": 9,
"NamedTypes": [
{
- "Name": "uint64",
- "Abbreviation": "i",
+ "Name": "[32]byte",
+ "Abbreviation": "3",
"Bound": [
- 0,
- 18446744073709551615
+ 32,
+ 32
],
- "AVMType": "uint64"
+ "AVMType": "[]byte"
},
{
- "Name": "stateKey",
- "Abbreviation": "K",
+ "Name": "[64]byte",
+ "Abbreviation": "6",
"Bound": [
- 0,
+ 64,
64
],
"AVMType": "[]byte"
},
{
- "Name": "none",
- "Abbreviation": "x",
+ "Name": "[80]byte",
+ "Abbreviation": "8",
+ "Bound": [
+ 80,
+ 80
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "[]byte",
+ "Abbreviation": "b",
"Bound": [
0,
- 0
+ 4096
],
- "AVMType": "none"
+ "AVMType": "[]byte"
},
{
- "Name": "method",
- "Abbreviation": "M",
+ "Name": "address",
+ "Abbreviation": "A",
"Bound": [
- 4,
- 4
+ 32,
+ 32
],
"AVMType": "[]byte"
},
{
- "Name": "boxName",
- "Abbreviation": "N",
+ "Name": "any",
+ "Abbreviation": "a",
"Bound": [
- 1,
+ 0,
+ 0
+ ],
+ "AVMType": "any"
+ },
+ {
+ "Name": "bigint",
+ "Abbreviation": "I",
+ "Bound": [
+ 0,
64
],
"AVMType": "[]byte"
@@ -57,49 +75,49 @@
"AVMType": "uint64"
},
{
- "Name": "bigint",
- "Abbreviation": "I",
+ "Name": "boxName",
+ "Abbreviation": "N",
"Bound": [
- 0,
+ 1,
64
],
"AVMType": "[]byte"
},
{
- "Name": "any",
- "Abbreviation": "a",
+ "Name": "method",
+ "Abbreviation": "M",
"Bound": [
- 0,
- 0
+ 4,
+ 4
],
- "AVMType": "any"
+ "AVMType": "[]byte"
},
{
- "Name": "address",
- "Abbreviation": "A",
+ "Name": "none",
+ "Abbreviation": "x",
"Bound": [
- 32,
- 32
+ 0,
+ 0
],
- "AVMType": "[]byte"
+ "AVMType": "none"
},
{
- "Name": "[]byte",
- "Abbreviation": "b",
+ "Name": "stateKey",
+ "Abbreviation": "K",
"Bound": [
0,
- 4096
+ 64
],
"AVMType": "[]byte"
},
{
- "Name": "[32]byte",
- "Abbreviation": "H",
+ "Name": "uint64",
+ "Abbreviation": "i",
"Bound": [
- 32,
- 32
+ 0,
+ 18446744073709551615
],
- "AVMType": "[]byte"
+ "AVMType": "uint64"
}
],
"Ops": [
@@ -128,7 +146,7 @@
"Doc": "SHA256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -145,7 +163,7 @@
"Doc": "Keccak256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -162,7 +180,7 @@
"Doc": "SHA512_256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -170,8 +188,8 @@
"Name": "ed25519verify",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[64]byte",
+ "[32]byte"
],
"Returns": [
"bool"
@@ -182,14 +200,14 @@
"DocExtra": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
"Opcode": 5,
"Name": "ecdsa_verify",
"Args": [
- "[]byte",
+ "[32]byte",
"[]byte",
"[]byte",
"[]byte",
@@ -216,7 +234,7 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -247,17 +265,17 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
"Opcode": 7,
"Name": "ecdsa_pk_recover",
"Args": [
- "[]byte",
+ "[32]byte",
"uint64",
- "[]byte",
- "[]byte"
+ "[32]byte",
+ "[32]byte"
],
"Returns": [
"[]byte",
@@ -281,7 +299,7 @@
],
"IntroducedVersion": 5,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -534,7 +552,7 @@
"Doc": "yields length of byte value A",
"IntroducedVersion": 1,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2184,7 +2202,7 @@
"DocExtra": "`concat` fails if the result would be greater than 4096 bytes.",
"IntroducedVersion": 2,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2251,7 +2269,7 @@
"DocExtra": "see explanation of bit ordering in setbit",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2271,7 +2289,7 @@
"DocExtra": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2289,7 +2307,7 @@
"Doc": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -2308,7 +2326,7 @@
"Doc": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
"Groups": [
- "Arithmetic"
+ "Byte Array Manipulation"
]
},
{
@@ -3005,8 +3023,8 @@
"Name": "ed25519verify_bare",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[64]byte",
+ "[32]byte"
],
"Returns": [
"bool"
@@ -3016,7 +3034,7 @@
"Doc": "for (data A, signature B, pubkey C) verify the signature of the data against the pubkey =\u003e {0 or 1}",
"IntroducedVersion": 7,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -3315,7 +3333,7 @@
"Doc": "SHA3_256 hash of value A, yields [32]byte",
"IntroducedVersion": 7,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
@@ -4562,8 +4580,8 @@
"Name": "vrf_verify",
"Args": [
"[]byte",
- "[]byte",
- "[]byte"
+ "[80]byte",
+ "[32]byte"
],
"Returns": [
"[]byte",
@@ -4586,7 +4604,7 @@
],
"IntroducedVersion": 7,
"Groups": [
- "Arithmetic"
+ "Cryptography"
]
},
{
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index e005d57cf..dc2d44bc0 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -485,17 +485,17 @@ func (spec *OpSpec) deadens() bool {
// assembly-time, with ops.returns()
var OpSpecs = []OpSpec{
{0x00, "err", opErr, proto(":x"), 1, detDefault()},
- {0x01, "sha256", opSHA256, proto("b:H"), 1, costly(7)},
- {0x02, "keccak256", opKeccak256, proto("b:H"), 1, costly(26)},
- {0x03, "sha512_256", opSHA512_256, proto("b:H"), 1, costly(9)},
+ {0x01, "sha256", opSHA256, proto("b:3"), 1, costly(7)},
+ {0x02, "keccak256", opKeccak256, proto("b:3"), 1, costly(26)},
+ {0x03, "sha512_256", opSHA512_256, proto("b:3"), 1, costly(9)},
// Cost of these opcodes increases in AVM version 2 based on measured
// performance. Should be able to run max hashes during stateful TEAL
// and achieve reasonable TPS. Same opcode for different versions
// is OK.
- {0x01, "sha256", opSHA256, proto("b:H"), 2, costly(35)},
- {0x02, "keccak256", opKeccak256, proto("b:H"), 2, costly(130)},
- {0x03, "sha512_256", opSHA512_256, proto("b:H"), 2, costly(45)},
+ {0x01, "sha256", opSHA256, proto("b:3"), 2, costly(35)},
+ {0x02, "keccak256", opKeccak256, proto("b:3"), 2, costly(130)},
+ {0x03, "sha512_256", opSHA512_256, proto("b:3"), 2, costly(45)},
/*
Tabling these changes until we offer unlimited global storage as there
@@ -507,12 +507,12 @@ var OpSpecs = []OpSpec{
{0x03, "sha512_256", opSHA512_256, proto("b:b"), 7, unlimitedStorage, costByLength(17, 5, 8)},
*/
- {0x04, "ed25519verify", opEd25519Verify, proto("bbb:T"), 1, costly(1900).only(ModeSig)},
- {0x04, "ed25519verify", opEd25519Verify, proto("bbb:T"), 5, costly(1900)},
+ {0x04, "ed25519verify", opEd25519Verify, proto("b63:T"), 1, costly(1900).only(ModeSig)},
+ {0x04, "ed25519verify", opEd25519Verify, proto("b63:T"), 5, costly(1900)},
- {0x05, "ecdsa_verify", opEcdsaVerify, proto("bbbbb:T"), 5, costByField("v", &EcdsaCurves, ecdsaVerifyCosts)},
+ {0x05, "ecdsa_verify", opEcdsaVerify, proto("3bbbb:T"), 5, costByField("v", &EcdsaCurves, ecdsaVerifyCosts)},
{0x06, "ecdsa_pk_decompress", opEcdsaPkDecompress, proto("b:bb"), 5, costByField("v", &EcdsaCurves, ecdsaDecompressCosts)},
- {0x07, "ecdsa_pk_recover", opEcdsaPkRecover, proto("bibb:bb"), 5, field("v", &EcdsaCurves).costs(2000)},
+ {0x07, "ecdsa_pk_recover", opEcdsaPkRecover, proto("3i33:bb"), 5, field("v", &EcdsaCurves).costs(2000)},
{0x08, "+", opPlus, proto("ii:i"), 1, detDefault()},
{0x09, "-", opMinus, proto("ii:i"), 1, detDefault()},
@@ -645,7 +645,7 @@ var OpSpecs = []OpSpec{
{0x82, "pushbytess", opPushBytess, proto(":", "", "[N items]").stackExplain(opPushBytessStackChange), 8, constants(asmPushBytess, checkByteImmArgs, "bytes ...", immBytess).typed(typePushBytess).trust()},
{0x83, "pushints", opPushInts, proto(":", "", "[N items]").stackExplain(opPushIntsStackChange), 8, constants(asmPushInts, checkIntImmArgs, "uint ...", immInts).typed(typePushInts).trust()},
- {0x84, "ed25519verify_bare", opEd25519VerifyBare, proto("bbb:T"), 7, costly(1900)},
+ {0x84, "ed25519verify_bare", opEd25519VerifyBare, proto("b63:T"), 7, costly(1900)},
// "Function oriented"
{0x88, "callsub", opCallSub, proto(":"), 4, detBranch()},
@@ -719,7 +719,7 @@ var OpSpecs = []OpSpec{
{0xc6, "gitxnas", opGitxnas, proto("i:a"), 6, immediates("t", "f").field("f", &TxnArrayFields).only(ModeApp)},
// randomness support
- {0xd0, "vrf_verify", opVrfVerify, proto("bbb:bT"), randomnessVersion, field("s", &VrfStandards).costs(5700)},
+ {0xd0, "vrf_verify", opVrfVerify, proto("b83:bT"), randomnessVersion, field("s", &VrfStandards).costs(5700)},
{0xd1, "block", opBlock, proto("i:a"), randomnessVersion, field("f", &BlockFields)},
{0xe0, "ec_add", opEcAdd, proto("bb:b"), pairingVersion,
diff --git a/data/transactions/logic/program.go b/data/transactions/logic/program.go
index 4568ebe74..85195b904 100644
--- a/data/transactions/logic/program.go
+++ b/data/transactions/logic/program.go
@@ -33,5 +33,5 @@ func (lsl Program) ToBeHashed() (protocol.HashID, []byte) {
// This Digest can be used as an Address for a logic controlled account.
func HashProgram(program []byte) crypto.Digest {
pb := Program(program)
- return crypto.HashObj(&pb)
+ return crypto.HashObj(pb)
}
diff --git a/data/transactions/logic/teal.tmLanguage.json b/data/transactions/logic/teal.tmLanguage.json
index f63c91778..56cf7b5dc 100644
--- a/data/transactions/logic/teal.tmLanguage.json
+++ b/data/transactions/logic/teal.tmLanguage.json
@@ -76,7 +76,7 @@
},
{
"name": "keyword.operator.teal",
- "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|btoi|concat|divmodw|divw|ec_add|ec_map_to|ec_multi_scalar_mul|ec_pairing_check|ec_scalar_mul|ec_subgroup_check|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|exp|expw|getbit|getbyte|itob|keccak256|len|mulw|setbit|setbyte|sha256|sha3_256|sha512_256|shl|shr|sqrt|vrf_verify|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|extract|extract3|extract_uint16|extract_uint32|extract_uint64|json_ref|replace2|replace3|substring|substring3|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b"
+ "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|btoi|divmodw|divw|exp|expw|itob|mulw|shl|shr|sqrt|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|concat|extract|extract3|extract_uint16|extract_uint32|extract_uint64|getbit|getbyte|json_ref|len|replace2|replace3|setbit|setbyte|substring|substring3|ec_add|ec_map_to|ec_multi_scalar_mul|ec_pairing_check|ec_scalar_mul|ec_subgroup_check|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|keccak256|sha256|sha3_256|sha512_256|vrf_verify|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b"
}
]
},
diff --git a/data/txHandler.go b/data/txHandler.go
index a5f44a1d0..4689a497b 100644
--- a/data/txHandler.go
+++ b/data/txHandler.go
@@ -56,6 +56,7 @@ var transactionMessageTxGroupExcessive = metrics.MakeCounter(metrics.Transaction
var transactionMessageTxGroupFull = metrics.MakeCounter(metrics.TransactionMessageTxGroupFull)
var transactionMessagesDupRawMsg = metrics.MakeCounter(metrics.TransactionMessagesDupRawMsg)
var transactionMessagesDupCanonical = metrics.MakeCounter(metrics.TransactionMessagesDupCanonical)
+var transactionMessagesAppLimiterDrop = metrics.MakeCounter(metrics.TransactionMessagesAppLimiterDrop)
var transactionMessagesBacklogSizeGauge = metrics.MakeGauge(metrics.TransactionMessagesBacklogSize)
var transactionGroupTxSyncHandled = metrics.MakeCounter(metrics.TransactionGroupTxSyncHandled)
@@ -111,23 +112,25 @@ type txBacklogMsg struct {
// TxHandler handles transaction messages
type TxHandler struct {
- txPool *pools.TransactionPool
- ledger *Ledger
- genesisID string
- genesisHash crypto.Digest
- txVerificationPool execpool.BacklogPool
- backlogQueue chan *txBacklogMsg
- postVerificationQueue chan *verify.VerificationResult
- backlogWg sync.WaitGroup
- net network.GossipNode
- msgCache *txSaltedCache
- txCanonicalCache *digestCache
- ctx context.Context
- ctxCancel context.CancelFunc
- streamVerifier *execpool.StreamToBatch
- streamVerifierChan chan execpool.InputJob
- streamVerifierDropped chan *verify.UnverifiedTxnSigJob
- erl *util.ElasticRateLimiter
+ txPool *pools.TransactionPool
+ ledger *Ledger
+ genesisID string
+ genesisHash crypto.Digest
+ txVerificationPool execpool.BacklogPool
+ backlogQueue chan *txBacklogMsg
+ backlogCongestionThreshold float64
+ postVerificationQueue chan *verify.VerificationResult
+ backlogWg sync.WaitGroup
+ net network.GossipNode
+ msgCache *txSaltedCache
+ txCanonicalCache *digestCache
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ streamVerifier *execpool.StreamToBatch
+ streamVerifierChan chan execpool.InputJob
+ streamVerifierDropped chan *verify.UnverifiedTxnSigJob
+ erl *util.ElasticRateLimiter
+ appLimiter *appRateLimiter
}
// TxHandlerOpts is TxHandler configuration options
@@ -178,14 +181,29 @@ func MakeTxHandler(opts TxHandlerOpts) (*TxHandler, error) {
handler.txCanonicalCache = makeDigestCache(int(opts.Config.TxIncomingFilterMaxSize))
}
- if opts.Config.EnableTxBacklogRateLimiting {
- rateLimiter := util.NewElasticRateLimiter(
- txBacklogSize,
- opts.Config.TxBacklogReservedCapacityPerPeer,
- time.Duration(opts.Config.TxBacklogServiceRateWindowSeconds)*time.Second,
- txBacklogDroppedCongestionManagement,
- )
- handler.erl = rateLimiter
+ if opts.Config.EnableTxBacklogRateLimiting || opts.Config.EnableTxBacklogAppRateLimiting {
+ if opts.Config.TxBacklogRateLimitingCongestionPct > 100 || opts.Config.TxBacklogRateLimitingCongestionPct < 0 {
+ return nil, fmt.Errorf("invalid value for TxBacklogRateLimitingCongestionPct: %d", opts.Config.TxBacklogRateLimitingCongestionPct)
+ }
+ if opts.Config.EnableTxBacklogAppRateLimiting && opts.Config.TxBacklogAppTxRateLimiterMaxSize == 0 {
+ return nil, fmt.Errorf("invalid value for TxBacklogAppTxRateLimiterMaxSize: %d. App rate limiter enabled with zero size", opts.Config.TxBacklogAppTxRateLimiterMaxSize)
+ }
+ handler.backlogCongestionThreshold = float64(opts.Config.TxBacklogRateLimitingCongestionPct) / 100
+ if opts.Config.EnableTxBacklogRateLimiting {
+ handler.erl = util.NewElasticRateLimiter(
+ txBacklogSize,
+ opts.Config.TxBacklogReservedCapacityPerPeer,
+ time.Duration(opts.Config.TxBacklogServiceRateWindowSeconds)*time.Second,
+ txBacklogDroppedCongestionManagement,
+ )
+ }
+ if opts.Config.EnableTxBacklogAppRateLimiting {
+ handler.appLimiter = makeAppRateLimiter(
+ opts.Config.TxBacklogAppTxRateLimiterMaxSize,
+ uint64(opts.Config.TxBacklogAppTxPerSecondRate),
+ time.Duration(opts.Config.TxBacklogServiceRateWindowSeconds)*time.Second,
+ )
+ }
}
// prepare the transaction stream verifier
@@ -578,7 +596,9 @@ func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) net
var err error
var capguard *util.ErlCapacityGuard
+ var congested bool
if handler.erl != nil {
+ congested = float64(cap(handler.backlogQueue))*handler.backlogCongestionThreshold < float64(len(handler.backlogQueue))
// consume a capacity unit
// if the elastic rate limiter cannot vend a capacity, the error it returns
// is sufficient to indicate that we should enable Congestion Control, because
@@ -591,7 +611,7 @@ func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) net
return network.OutgoingMessage{Action: network.Ignore}
}
// if the backlog Queue has 50% of its buffer back, turn congestion control off
- if float64(cap(handler.backlogQueue))*0.5 > float64(len(handler.backlogQueue)) {
+ if !congested {
handler.erl.DisableCongestionControl()
}
}
@@ -640,6 +660,12 @@ func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) net
}
}
+ // rate limit per application in a group. Limiting any app in a group drops the entire message.
+ if handler.appLimiter != nil && congested && handler.appLimiter.shouldDrop(unverifiedTxGroup, rawmsg.Sender.(network.IPAddressable).RoutingAddr()) {
+ transactionMessagesAppLimiterDrop.Inc(nil)
+ return network.OutgoingMessage{Action: network.Ignore}
+ }
+
select {
case handler.backlogQueue <- &txBacklogMsg{
rawmsg: &rawmsg,
diff --git a/data/txHandler_test.go b/data/txHandler_test.go
index 486bd7c0f..894fef9d4 100644
--- a/data/txHandler_test.go
+++ b/data/txHandler_test.go
@@ -62,6 +62,9 @@ type mockSender struct{}
func (m mockSender) OnClose(func()) {}
+func (m mockSender) IPAddr() []byte { return nil }
+func (m mockSender) RoutingAddr() []byte { return nil }
+
// txHandlerConfig is a subset of tx handler related options from config.Local
type txHandlerConfig struct {
enableFilteringRawMsg bool
@@ -2503,3 +2506,178 @@ func TestTxHandlerRestartWithBacklogAndTxPool(t *testing.T) { //nolint:parallelt
require.False(t, inBad, "invalid transaction accepted")
}
}
+
+// check ERL and AppRateLimiter enablement with separate config values,
+// and the app limiter kicks in after congestion.
+func TestTxHandlerAppRateLimiterERLEnabled(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ledgerName := fmt.Sprintf("%s-mem", t.Name())
+ const inMem = true
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Panic)
+
+ cfg := config.GetDefaultLocal()
+ cfg.TxBacklogAppTxRateLimiterMaxSize = 100
+ cfg.TxBacklogServiceRateWindowSeconds = 1
+ cfg.TxBacklogAppTxPerSecondRate = 3
+ cfg.TxBacklogReservedCapacityPerPeer = 2
+ cfg.TxBacklogSize = 1
+ cfg.IncomingConnectionsLimit = 1
+ ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, bookkeeping.GenesisBalances{}, genesisID, genesisHash, nil, cfg)
+ require.NoError(t, err)
+ defer ledger.Close()
+
+ l := ledger
+
+ func() {
+ cfg.EnableTxBacklogRateLimiting = false
+ cfg.EnableTxBacklogAppRateLimiting = false
+ handler, err := makeTestTxHandler(l, cfg)
+ require.NoError(t, err)
+ defer handler.txVerificationPool.Shutdown()
+ defer close(handler.streamVerifierDropped)
+
+ require.Nil(t, handler.erl)
+ require.Nil(t, handler.appLimiter)
+ }()
+
+ func() {
+ cfg.EnableTxBacklogRateLimiting = true
+ cfg.EnableTxBacklogAppRateLimiting = false
+ handler, err := makeTestTxHandler(l, cfg)
+ require.NoError(t, err)
+ defer handler.txVerificationPool.Shutdown()
+ defer close(handler.streamVerifierDropped)
+
+ require.NotNil(t, handler.erl)
+ require.Nil(t, handler.appLimiter)
+ }()
+
+ cfg.EnableTxBacklogRateLimiting = true
+ cfg.EnableTxBacklogAppRateLimiting = true
+ handler, err := makeTestTxHandler(l, cfg)
+ require.NoError(t, err)
+ defer handler.txVerificationPool.Shutdown()
+ defer close(handler.streamVerifierDropped)
+ require.NotNil(t, handler.erl)
+ require.NotNil(t, handler.appLimiter)
+
+ var addr basics.Address
+ crypto.RandBytes(addr[:])
+
+ tx := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: transactions.Header{
+ Sender: addr,
+ Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2},
+ FirstValid: 0,
+ LastValid: basics.Round(proto.MaxTxnLife),
+ Note: make([]byte, 2),
+ },
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: 1,
+ },
+ }
+ signedTx := tx.Sign(keypair()) // some random key
+ blob := protocol.Encode(&signedTx)
+ sender := mockSender{}
+
+ // submit and ensure it is accepted
+ congested := float64(cap(handler.backlogQueue))*0.5 < float64(len(handler.backlogQueue))
+ require.False(t, congested)
+
+ action := handler.processIncomingTxn(network.IncomingMessage{Data: blob, Sender: sender})
+ require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action)
+ require.Equal(t, 1, len(handler.backlogQueue))
+
+ // repeat the same txn, we are still not congested
+ congested = float64(cap(handler.backlogQueue))*0.5 < float64(len(handler.backlogQueue))
+ require.False(t, congested)
+
+ signedTx = tx.Sign(keypair())
+ blob = protocol.Encode(&signedTx)
+ action = handler.processIncomingTxn(network.IncomingMessage{Data: blob, Sender: sender})
+ require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action)
+ require.Equal(t, 2, len(handler.backlogQueue))
+ require.Equal(t, 0, handler.appLimiter.len()) // no rate limiting yet
+
+ congested = float64(cap(handler.backlogQueue))*0.5 < float64(len(handler.backlogQueue))
+ require.True(t, congested)
+
+ // submit it again and the app rate limiter should kick in
+ signedTx = tx.Sign(keypair())
+ blob = protocol.Encode(&signedTx)
+ action = handler.processIncomingTxn(network.IncomingMessage{Data: blob, Sender: sender})
+ require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action)
+ require.Equal(t, 3, len(handler.backlogQueue))
+
+ require.Equal(t, 1, handler.appLimiter.len())
+}
+
+func TestTxHandlerAppRateLimiter(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ const numUsers = 10
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Panic)
+
+ // prepare the accounts
+ addresses, secrets, genesis := makeTestGenesisAccounts(t, numUsers)
+ genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ ledgerName := fmt.Sprintf("%s-mem", t.Name())
+ const inMem = true
+
+ cfg := config.GetDefaultLocal()
+ cfg.EnableTxBacklogRateLimiting = true
+ cfg.TxBacklogAppTxRateLimiterMaxSize = 100
+ cfg.TxBacklogServiceRateWindowSeconds = 1
+ cfg.TxBacklogAppTxPerSecondRate = 3
+ ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg)
+ require.NoError(t, err)
+ defer ledger.Close()
+
+ l := ledger
+ handler, err := makeTestTxHandler(l, cfg)
+ require.NoError(t, err)
+ defer handler.txVerificationPool.Shutdown()
+ defer close(handler.streamVerifierDropped)
+
+ tx := transactions.Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: transactions.Header{
+ Sender: addresses[0],
+ Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2},
+ FirstValid: 0,
+ LastValid: basics.Round(proto.MaxTxnLife),
+ Note: make([]byte, 2),
+ },
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: 1,
+ },
+ }
+ signedTx := tx.Sign(secrets[1])
+ blob := protocol.Encode(&signedTx)
+
+ action := handler.processIncomingTxn(network.IncomingMessage{Data: blob, Sender: mockSender{}})
+ require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action)
+ require.Equal(t, 1, len(handler.backlogQueue))
+
+ // trigger the rate limiter and ensure the txn is ignored
+ tx2 := tx
+ for i := 0; i < cfg.TxBacklogAppTxPerSecondRate*cfg.TxBacklogServiceRateWindowSeconds; i++ {
+ tx2.ForeignApps = append(tx2.ForeignApps, 1)
+ }
+ signedTx2 := tx.Sign(secrets[1])
+ blob2 := protocol.Encode(&signedTx2)
+
+ action = handler.processIncomingTxn(network.IncomingMessage{Data: blob2, Sender: mockSender{}})
+ require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action)
+ require.Equal(t, 1, len(handler.backlogQueue))
+
+ // backlogQueue has the first txn, but the second one is dropped
+ msg := <-handler.backlogQueue
+ require.Equal(t, msg.rawmsg.Data, blob, blob)
+}
diff --git a/docs/follower_node.md b/docs/follower_node.md
index 8df230641..742d4efe4 100644
--- a/docs/follower_node.md
+++ b/docs/follower_node.md
@@ -34,7 +34,7 @@ Behavior is controlled with the `config.json` file:
On startup, a follower node will be paused (synchronized) with its ledger's
current round. For a new deployment configured as a follower node, the
-initial sync round is 0. When a sync round is set, the node advance
+initial sync round is 0. When a sync round is set, the node advances
`MaxAcctLookback-1` rounds. The node is synchronized for the availability
of `Ledger State Delta` data. This means the minimum sync round is provided
and the node advances to cache future rounds.
@@ -56,7 +56,7 @@ The follower node was stripped of all functionality not directly related to
assisting with data-gathering capabilities. Since it is designed to run
alongside another application, it was made as lightweight as possible.
Other restrictions relate to the fact that this node is designed to be
-paused. So there are no guarantees that it's internal state matches the
+paused. So there are no guarantees that its internal state matches the
current round of consensus.
In particular, the follower node cannot participate in consensus or send
diff --git a/go.mod b/go.mod
index b19e61cb6..8844a8058 100644
--- a/go.mod
+++ b/go.mod
@@ -16,7 +16,7 @@ require (
github.com/algorand/websocket v1.4.6
github.com/aws/aws-sdk-go v1.34.0
github.com/cockroachdb/pebble v0.0.0-20230807162746-af8c5f279001
- github.com/consensys/gnark-crypto v0.12.0
+ github.com/consensys/gnark-crypto v0.12.1
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c
github.com/dchest/siphash v1.2.1
github.com/fatih/color v1.13.0
@@ -42,6 +42,7 @@ require (
github.com/stretchr/testify v1.8.4
golang.org/x/crypto v0.14.0
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1
+ golang.org/x/sync v0.3.0
golang.org/x/sys v0.13.0
golang.org/x/text v0.13.0
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009
@@ -155,7 +156,6 @@ require (
go.uber.org/zap v1.24.0 // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.17.0 // indirect
- golang.org/x/sync v0.3.0 // indirect
golang.org/x/term v0.13.0 // indirect
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect
golang.org/x/tools v0.11.0 // indirect
diff --git a/go.sum b/go.sum
index 91453673d..1d4e5abf6 100644
--- a/go.sum
+++ b/go.sum
@@ -125,8 +125,8 @@ github.com/cockroachdb/tokenbucket v0.0.0-20230613231145-182959a1fad6/go.mod h1:
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
-github.com/consensys/gnark-crypto v0.12.0 h1:1OnSpOykNkUIBIBJKdhwy2p0JlW5o+Az02ICzZmvvdg=
-github.com/consensys/gnark-crypto v0.12.0/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
+github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
+github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
diff --git a/installer/config.json.example b/installer/config.json.example
index fccf558c4..aa1cb7171 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -1,5 +1,5 @@
{
- "Version": 31,
+ "Version": 33,
"AccountUpdatesStatsInterval": 5000000000,
"AccountsRebuildSynchronousMode": 1,
"AgreementIncomingBundlesQueueLength": 15,
@@ -49,6 +49,7 @@
"EnableExperimentalAPI": false,
"EnableFollowMode": false,
"EnableGossipBlockService": true,
+ "EnableGossipService": true,
"EnableIncomingMessageFilter": false,
"EnableLedgerService": false,
"EnableMetricReporting": false,
@@ -60,6 +61,7 @@
"EnableRequestLogger": false,
"EnableRuntimeMetrics": false,
"EnableTopAccountsReporting": false,
+ "EnableTxBacklogAppRateLimiting": true,
"EnableTxBacklogRateLimiting": true,
"EnableTxnEvalTracer": false,
"EnableUsageLog": false,
@@ -120,6 +122,9 @@
"TrackerDBDir": "",
"TransactionSyncDataExchangeRate": 0,
"TransactionSyncSignificantMessageThreshold": 0,
+ "TxBacklogAppTxPerSecondRate": 100,
+ "TxBacklogAppTxRateLimiterMaxSize": 1048576,
+ "TxBacklogRateLimitingCongestionPct": 50,
"TxBacklogReservedCapacityPerPeer": 20,
"TxBacklogServiceRateWindowSeconds": 10,
"TxBacklogSize": 26000,
diff --git a/ledger/bulletin.go b/ledger/bulletin.go
index 8114fefa1..5dc2b99aa 100644
--- a/ledger/bulletin.go
+++ b/ledger/bulletin.go
@@ -18,7 +18,6 @@ package ledger
import (
"context"
- "sync/atomic"
"github.com/algorand/go-deadlock"
@@ -28,29 +27,17 @@ import (
"github.com/algorand/go-algorand/ledger/store/trackerdb"
)
-// notifier is a struct that encapsulates a single-shot channel; it will only be signaled once.
+// notifier is a struct that encapsulates a single-shot channel; it should only be signaled once.
type notifier struct {
- signal chan struct{}
- notified uint32
-}
-
-// makeNotifier constructs a notifier that has not been signaled.
-func makeNotifier() notifier {
- return notifier{signal: make(chan struct{}), notified: 0}
-}
-
-// notify signals the channel if it hasn't already done so
-func (notifier *notifier) notify() {
- if atomic.CompareAndSwapUint32(&notifier.notified, 0, 1) {
- close(notifier.signal)
- }
+ signal chan struct{}
+ count int
}
// bulletin provides an easy way to wait on a round to be written to the ledger.
// To use it, call <-Wait(round).
type bulletin struct {
mu deadlock.Mutex
- pendingNotificationRequests map[basics.Round]notifier
+ pendingNotificationRequests map[basics.Round]*notifier
latestRound basics.Round
}
@@ -62,7 +49,7 @@ type bulletinMem struct {
func makeBulletin() *bulletin {
b := new(bulletin)
- b.pendingNotificationRequests = make(map[basics.Round]notifier)
+ b.pendingNotificationRequests = make(map[basics.Round]*notifier)
return b
}
@@ -80,14 +67,32 @@ func (b *bulletin) Wait(round basics.Round) chan struct{} {
signal, exists := b.pendingNotificationRequests[round]
if !exists {
- signal = makeNotifier()
+ signal = &notifier{signal: make(chan struct{})}
b.pendingNotificationRequests[round] = signal
}
+ // Increment count of waiters, to support canceling.
+ signal.count++
+
return signal.signal
}
+// CancelWait removes a wait for a particular round. If no one else is waiting, the
+// notifier channel for that round is removed.
+func (b *bulletin) CancelWait(round basics.Round) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ signal, exists := b.pendingNotificationRequests[round]
+ if exists {
+ signal.count--
+ if signal.count <= 0 {
+ delete(b.pendingNotificationRequests, round)
+ }
+ }
+}
+
func (b *bulletin) loadFromDisk(l ledgerForTracker, _ basics.Round) error {
- b.pendingNotificationRequests = make(map[basics.Round]notifier)
+ b.pendingNotificationRequests = make(map[basics.Round]*notifier)
b.latestRound = l.Latest()
return nil
}
@@ -105,7 +110,8 @@ func (b *bulletin) notifyRound(rnd basics.Round) {
}
delete(b.pendingNotificationRequests, pending)
- signal.notify()
+ // signal the channel by closing it; this is under lock and will only happen once
+ close(signal.signal)
}
b.latestRound = rnd
diff --git a/ledger/bulletin_test.go b/ledger/bulletin_test.go
index 5a6f6bb83..88d378447 100644
--- a/ledger/bulletin_test.go
+++ b/ledger/bulletin_test.go
@@ -20,7 +20,9 @@ import (
"testing"
"time"
+ "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
)
const epsilon = 5 * time.Millisecond
@@ -100,3 +102,109 @@ func TestBulletin(t *testing.T) {
t.Errorf("<-Wait(10) finished late")
}
}
+
+func TestCancelWait(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ bul := makeBulletin()
+
+ // Calling Wait before CancelWait
+ waitCh := bul.Wait(5)
+ bul.CancelWait(5)
+ bul.committedUpTo(5)
+ select {
+ case <-waitCh:
+ t.Errorf("<-Wait(5) should have been cancelled")
+ case <-time.After(epsilon):
+ // Correct
+ }
+ require.NotContains(t, bul.pendingNotificationRequests, basics.Round(5))
+
+ // Calling CancelWait before Wait
+ bul.CancelWait(6)
+ select {
+ case <-bul.Wait(6):
+ t.Errorf("<-Wait(6) should have been cancelled")
+ case <-time.After(epsilon):
+ // Correct
+ }
+ require.Contains(t, bul.pendingNotificationRequests, basics.Round(6))
+ require.Equal(t, bul.pendingNotificationRequests[basics.Round(6)].count, 1)
+ bul.CancelWait(6)
+ require.NotContains(t, bul.pendingNotificationRequests, basics.Round(6))
+
+ // Two Waits, one cancelled
+ waitCh1 := bul.Wait(7)
+ waitCh2 := bul.Wait(7)
+ require.Equal(t, waitCh1, waitCh2)
+ bul.CancelWait(7)
+ select {
+ case <-waitCh1:
+ t.Errorf("<-Wait(7) should not be notified yet")
+ case <-time.After(epsilon):
+ // Correct
+ }
+ // Still one waiter
+ require.Contains(t, bul.pendingNotificationRequests, basics.Round(7))
+ require.Equal(t, bul.pendingNotificationRequests[basics.Round(7)].count, 1)
+
+ bul.committedUpTo(7)
+ select {
+ case <-waitCh1:
+ // Correct
+ case <-time.After(epsilon):
+ t.Errorf("<-Wait(7) should have been notified")
+ }
+ require.NotContains(t, bul.pendingNotificationRequests, basics.Round(7))
+
+ // Wait followed by Cancel for a round that already completed
+ waitCh = bul.Wait(5)
+ bul.CancelWait(5)
+ require.NotContains(t, bul.pendingNotificationRequests, basics.Round(5))
+ select {
+ case <-waitCh:
+ // Correct
+ case <-time.After(epsilon):
+ t.Errorf("<-Wait(5) should have been notified right away")
+ }
+
+ // Cancel Wait after Wait triggered
+ waitCh = bul.Wait(8)
+ require.Contains(t, bul.pendingNotificationRequests, basics.Round(8))
+ require.Equal(t, bul.pendingNotificationRequests[basics.Round(8)].count, 1)
+ bul.committedUpTo(8)
+ require.NotContains(t, bul.pendingNotificationRequests, basics.Round(8))
+ select {
+ case <-waitCh:
+ // Correct
+ case <-time.After(epsilon):
+ t.Errorf("<-Wait(8) should have been notified")
+ }
+ require.NotContains(t, bul.pendingNotificationRequests, basics.Round(8))
+ bul.CancelWait(8) // should do nothing
+
+ // Cancel Wait after Wait triggered, but before Wait returned
+ waitCh = bul.Wait(9)
+ require.Contains(t, bul.pendingNotificationRequests, basics.Round(9))
+ require.Equal(t, bul.pendingNotificationRequests[basics.Round(9)].count, 1)
+ bul.committedUpTo(9)
+ require.NotContains(t, bul.pendingNotificationRequests, basics.Round(9))
+ bul.CancelWait(9) // should do nothing
+ select {
+ case <-waitCh:
+ // Correct
+ case <-time.After(epsilon):
+ t.Errorf("<-Wait(9) should have been notified")
+ }
+ require.NotContains(t, bul.pendingNotificationRequests, basics.Round(9))
+
+ // Two waits, both cancelled
+ waitCh1 = bul.Wait(10)
+ waitCh2 = bul.Wait(10)
+ require.Equal(t, waitCh1, waitCh2)
+ bul.CancelWait(10)
+ require.Contains(t, bul.pendingNotificationRequests, basics.Round(10))
+ require.Equal(t, bul.pendingNotificationRequests[basics.Round(10)].count, 1)
+ bul.CancelWait(10)
+ require.NotContains(t, bul.pendingNotificationRequests, basics.Round(10))
+}
diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go
index d3bf1f87e..32216fcb2 100644
--- a/ledger/catchpointtracker.go
+++ b/ledger/catchpointtracker.go
@@ -105,9 +105,6 @@ type catchpointTracker struct {
// enableGeneratingCatchpointFiles determines whether catchpoints files should be generated by the trackers.
enableGeneratingCatchpointFiles bool
- // Prepared SQL statements for fast accounts DB lookups.
- accountsq trackerdb.AccountsReader
-
// log copied from ledger
log logging.Logger
@@ -129,7 +126,7 @@ type catchpointTracker struct {
// catchpointDataWriting helps to synchronize the (first stage) catchpoint data file
// writing. When this atomic variable is 0, no writing is going on.
// Any non-zero value indicates a catchpoint being written, or scheduled to be written.
- catchpointDataWriting int32
+ catchpointDataWriting atomic.Int32
// The Trie tracking the current account balances. Always matches the balances that were
// written to the database.
@@ -138,6 +135,9 @@ type catchpointTracker struct {
// roundDigest stores the digest of the block for every round starting with dbRound+1 and every round after it.
roundDigest []crypto.Digest
+ // consensusVersion stores the consensus versions for every round starting with dbRound+1 and every round after it.
+ consensusVersion []protocol.ConsensusVersion
+
// reenableCatchpointsRound is a round where the EnableCatchpointsWithSPContexts feature was enabled via the consensus.
// we avoid generating catchpoints before that round in order to ensure the network remain consistent in the catchpoint
// label being produced. This variable could be "wrong" in two cases -
@@ -151,9 +151,13 @@ type catchpointTracker struct {
// catchpoint files even before the protocol upgrade took place.
forceCatchpointFileWriting bool
- // catchpointsMu protects `roundDigest`, `reenableCatchpointsRound` and
+ // catchpointsMu protects roundDigest, reenableCatchpointsRound, cachedDBRound and
// `lastCatchpointLabel`.
catchpointsMu deadlock.RWMutex
+
+ // cachedDBRound is always exactly tracker DB round (and therefore, accountsRound()),
+ // cached to use in lookup functions
+ cachedDBRound basics.Round
}
// initialize initializes the catchpointTracker structure
@@ -205,7 +209,7 @@ func (ct *catchpointTracker) getSPVerificationData() (encodedData []byte, spVeri
return encodedData, spVerificationHash, nil
}
-func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basics.Round, updatingBalancesDuration time.Duration) error {
+func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basics.Round, blockProto protocol.ConsensusVersion, updatingBalancesDuration time.Duration) error {
ct.log.Infof("finishing catchpoint's first stage dbRound: %d", dbRound)
var totalKVs uint64
@@ -216,11 +220,15 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic
var spVerificationEncodedData []byte
var catchpointGenerationStats telemetryspec.CatchpointGenerationEventDetails
- // Generate the SP Verification hash and encoded data. The hash is used in the label when tracking catchpoints,
- // and the encoded data for that hash will be added to the catchpoint file if catchpoint generation is enabled.
- spVerificationEncodedData, spVerificationHash, err := ct.getSPVerificationData()
- if err != nil {
- return err
+ params := config.Consensus[blockProto]
+ if params.EnableCatchpointsWithSPContexts {
+ // Generate the SP Verification hash and encoded data. The hash is used in the label when tracking catchpoints,
+ // and the encoded data for that hash will be added to the catchpoint file if catchpoint generation is enabled.
+ var err error
+ spVerificationEncodedData, spVerificationHash, err = ct.getSPVerificationData()
+ if err != nil {
+ return err
+ }
}
if ct.enableGeneratingCatchpointFiles {
@@ -233,7 +241,7 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic
catchpointGenerationStats.BalancesWriteTime = uint64(updatingBalancesDuration.Nanoseconds())
totalKVs, totalAccounts, totalChunks, biggestChunkLen, err = ct.generateCatchpointData(
ctx, dbRound, &catchpointGenerationStats, spVerificationEncodedData)
- atomic.StoreInt32(&ct.catchpointDataWriting, 0)
+ ct.catchpointDataWriting.Store(0)
if err != nil {
return err
}
@@ -257,7 +265,7 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic
// Possibly finish generating first stage catchpoint db record and data file after
// a crash.
-func (ct *catchpointTracker) finishFirstStageAfterCrash(dbRound basics.Round) error {
+func (ct *catchpointTracker) finishFirstStageAfterCrash(dbRound basics.Round, blockProto protocol.ConsensusVersion) error {
v, err := ct.catchpointStore.ReadCatchpointStateUint64(
context.Background(), trackerdb.CatchpointStateWritingFirstStageInfo)
if err != nil {
@@ -274,10 +282,10 @@ func (ct *catchpointTracker) finishFirstStageAfterCrash(dbRound basics.Round) er
return err
}
- return ct.finishFirstStage(context.Background(), dbRound, 0)
+ return ct.finishFirstStage(context.Background(), dbRound, blockProto, 0)
}
-func (ct *catchpointTracker) finishCatchpointsAfterCrash(catchpointLookback uint64) error {
+func (ct *catchpointTracker) finishCatchpointsAfterCrash(blockProto protocol.ConsensusVersion, catchpointLookback uint64) error {
records, err := ct.catchpointStore.SelectUnfinishedCatchpoints(context.Background())
if err != nil {
return err
@@ -292,7 +300,7 @@ func (ct *catchpointTracker) finishCatchpointsAfterCrash(catchpointLookback uint
}
err = ct.finishCatchpoint(
- context.Background(), record.Round, record.BlockHash, catchpointLookback)
+ context.Background(), record.Round, record.BlockHash, blockProto, catchpointLookback)
if err != nil {
return err
}
@@ -301,8 +309,8 @@ func (ct *catchpointTracker) finishCatchpointsAfterCrash(catchpointLookback uint
return nil
}
-func (ct *catchpointTracker) recoverFromCrash(dbRound basics.Round) error {
- err := ct.finishFirstStageAfterCrash(dbRound)
+func (ct *catchpointTracker) recoverFromCrash(dbRound basics.Round, blockProto protocol.ConsensusVersion) error {
+ err := ct.finishFirstStageAfterCrash(dbRound, blockProto)
if err != nil {
return err
}
@@ -316,7 +324,7 @@ func (ct *catchpointTracker) recoverFromCrash(dbRound basics.Round) error {
}
if catchpointLookback != 0 {
- err = ct.finishCatchpointsAfterCrash(catchpointLookback)
+ err = ct.finishCatchpointsAfterCrash(blockProto, catchpointLookback)
if err != nil {
return err
}
@@ -346,11 +354,15 @@ func (ct *catchpointTracker) loadFromDisk(l ledgerForTracker, dbRound basics.Rou
return err
}
+ ct.catchpointsMu.Lock()
+ ct.cachedDBRound = dbRound
ct.roundDigest = nil
- ct.catchpointDataWriting = 0
+ ct.consensusVersion = nil
+ ct.catchpointDataWriting.Store(0)
// keep these channel closed if we're not generating catchpoint
ct.catchpointDataSlowWriting = make(chan struct{}, 1)
close(ct.catchpointDataSlowWriting)
+ ct.catchpointsMu.Unlock()
err = ct.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error {
return ct.initializeHashes(ctx, tx, dbRound)
@@ -359,18 +371,18 @@ func (ct *catchpointTracker) loadFromDisk(l ledgerForTracker, dbRound basics.Rou
return err
}
- ct.accountsq, err = ct.dbs.MakeAccountsOptimizedReader()
+ ct.lastCatchpointLabel, err = ct.catchpointStore.ReadCatchpointStateString(
+ context.Background(), trackerdb.CatchpointStateLastCatchpoint)
if err != nil {
return
}
- ct.lastCatchpointLabel, err = ct.catchpointStore.ReadCatchpointStateString(
- context.Background(), trackerdb.CatchpointStateLastCatchpoint)
+ hdr, err := l.BlockHdr(dbRound)
if err != nil {
return
}
- return ct.recoverFromCrash(dbRound)
+ return ct.recoverFromCrash(dbRound, hdr.CurrentProtocol)
}
// newBlock informs the tracker of a new block from round
@@ -380,6 +392,7 @@ func (ct *catchpointTracker) newBlock(blk bookkeeping.Block, delta ledgercore.St
defer ct.catchpointsMu.Unlock()
ct.roundDigest = append(ct.roundDigest, blk.Digest())
+ ct.consensusVersion = append(ct.consensusVersion, blk.CurrentProtocol)
if (config.Consensus[blk.CurrentProtocol].EnableCatchpointsWithSPContexts || ct.forceCatchpointFileWriting) && ct.reenableCatchpointsRound == 0 {
catchpointLookback := config.Consensus[blk.CurrentProtocol].CatchpointLookback
@@ -396,7 +409,10 @@ func (ct *catchpointTracker) newBlock(blk bookkeeping.Block, delta ledgercore.St
// number that can be removed from the blocks database as well as the lookback that this
// tracker maintains.
func (ct *catchpointTracker) committedUpTo(rnd basics.Round) (retRound, lookback basics.Round) {
- return rnd, basics.Round(0)
+ ct.catchpointsMu.RLock()
+ defer ct.catchpointsMu.RUnlock()
+ retRound = ct.cachedDBRound
+ return retRound, basics.Round(0)
}
// Calculate whether we have intermediate first stage catchpoint rounds and the
@@ -500,11 +516,13 @@ func (ct *catchpointTracker) prepareCommit(dcc *deferredCommitContext) error {
if ct.enableGeneratingCatchpointFiles && dcc.catchpointFirstStage {
// store non-zero ( all ones ) into the catchpointWriting atomic variable to indicate that a catchpoint is being written
- atomic.StoreInt32(&ct.catchpointDataWriting, int32(-1))
+ ct.catchpointDataWriting.Store(int32(-1))
}
dcc.committedRoundDigests = make([]crypto.Digest, dcc.offset)
copy(dcc.committedRoundDigests, ct.roundDigest[:dcc.offset])
+ dcc.committedProtocolVersion = make([]protocol.ConsensusVersion, dcc.offset)
+ copy(dcc.committedProtocolVersion, ct.consensusVersion[:dcc.offset])
return nil
}
@@ -516,7 +534,7 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx trackerdb.Trans
defer func() {
if err != nil && dcc.catchpointFirstStage && ct.enableGeneratingCatchpointFiles {
- atomic.StoreInt32(&ct.catchpointDataWriting, 0)
+ ct.catchpointDataWriting.Store(0)
}
}()
@@ -601,6 +619,8 @@ func (ct *catchpointTracker) postCommit(ctx context.Context, dcc *deferredCommit
ct.catchpointsMu.Lock()
ct.roundDigest = ct.roundDigest[dcc.offset:]
+ ct.consensusVersion = ct.consensusVersion[dcc.offset:]
+ ct.cachedDBRound = dcc.newBase()
ct.catchpointsMu.Unlock()
dcc.updatingBalancesDuration = time.Since(dcc.flushTime)
@@ -736,9 +756,18 @@ func repackCatchpoint(ctx context.Context, header CatchpointFileHeader, biggestC
// Create a catchpoint (a label and possibly a file with db record) and remove
// the unfinished catchpoint record.
-func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound basics.Round, round basics.Round, dataInfo trackerdb.CatchpointFirstStageInfo, blockHash crypto.Digest) error {
+func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound basics.Round, round basics.Round, dataInfo trackerdb.CatchpointFirstStageInfo, blockHash crypto.Digest, blockProto protocol.ConsensusVersion) error {
startTime := time.Now()
- labelMaker := ledgercore.MakeCatchpointLabelMakerCurrent(round, &blockHash, &dataInfo.TrieBalancesHash, dataInfo.Totals, &dataInfo.StateProofVerificationHash)
+ var labelMaker ledgercore.CatchpointLabelMaker
+ var version uint64
+ params := config.Consensus[blockProto]
+ if params.EnableCatchpointsWithSPContexts {
+ labelMaker = ledgercore.MakeCatchpointLabelMakerCurrent(round, &blockHash, &dataInfo.TrieBalancesHash, dataInfo.Totals, &dataInfo.StateProofVerificationHash)
+ version = CatchpointFileVersionV7
+ } else {
+ labelMaker = ledgercore.MakeCatchpointLabelMakerV6(round, &blockHash, &dataInfo.TrieBalancesHash, dataInfo.Totals)
+ version = CatchpointFileVersionV6
+ }
label := ledgercore.MakeLabel(labelMaker)
ct.log.Infof(
@@ -774,7 +803,7 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound
// Make a catchpoint file.
header := CatchpointFileHeader{
- Version: CatchpointFileVersionV7,
+ Version: version,
BalancesRound: accountsRound,
BlocksRound: round,
Totals: dataInfo.Totals,
@@ -834,7 +863,7 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound
// Try create a catchpoint (a label and possibly a file with db record) and remove
// the unfinished catchpoint record.
-func (ct *catchpointTracker) finishCatchpoint(ctx context.Context, round basics.Round, blockHash crypto.Digest, catchpointLookback uint64) error {
+func (ct *catchpointTracker) finishCatchpoint(ctx context.Context, round basics.Round, blockHash crypto.Digest, blockProto protocol.ConsensusVersion, catchpointLookback uint64) error {
accountsRound := round - basics.Round(catchpointLookback)
ct.log.Infof("finishing catchpoint round: %d accountsRound: %d", round, accountsRound)
@@ -847,7 +876,7 @@ func (ct *catchpointTracker) finishCatchpoint(ctx context.Context, round basics.
if !exists {
return ct.catchpointStore.DeleteUnfinishedCatchpoint(ctx, round)
}
- return ct.createCatchpoint(ctx, accountsRound, round, dataInfo, blockHash)
+ return ct.createCatchpoint(ctx, accountsRound, round, dataInfo, blockHash, blockProto)
}
// Calculate catchpoint round numbers in [min, max]. `catchpointInterval` must be
@@ -908,7 +937,9 @@ func (ct *catchpointTracker) pruneFirstStageRecordsData(ctx context.Context, max
func (ct *catchpointTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
if dcc.catchpointFirstStage {
- err := ct.finishFirstStage(ctx, dcc.newBase(), dcc.updatingBalancesDuration)
+ round := dcc.newBase()
+ blockProto := dcc.committedProtocolVersion[round-dcc.oldBase-1]
+ err := ct.finishFirstStage(ctx, round, blockProto, dcc.updatingBalancesDuration)
if err != nil {
ct.log.Warnf(
"error finishing catchpoint's first stage dcc.newBase: %d err: %v",
@@ -918,8 +949,10 @@ func (ct *catchpointTracker) postCommitUnlocked(ctx context.Context, dcc *deferr
// Generate catchpoints for rounds in (dcc.oldBase, dcc.newBase].
for _, round := range ct.calculateCatchpointRounds(&dcc.deferredCommitRange) {
+ blockHash := dcc.committedRoundDigests[round-dcc.oldBase-1]
+ blockProto := dcc.committedProtocolVersion[round-dcc.oldBase-1]
err := ct.finishCatchpoint(
- ctx, round, dcc.committedRoundDigests[round-dcc.oldBase-1], dcc.catchpointLookback)
+ ctx, round, blockHash, blockProto, dcc.catchpointLookback)
if err != nil {
ct.log.Warnf("error creating catchpoint round: %d err: %v", round, err)
}
@@ -963,7 +996,7 @@ func (ct *catchpointTracker) cancelWrite(dcc *deferredCommitContext) {
// determine if this was a catchpoint round
if dcc.catchpointFirstStage {
// it was a catchpoint round, so update the catchpointWriting to indicate that we're done.
- atomic.StoreInt32(&ct.catchpointDataWriting, 0)
+ ct.catchpointDataWriting.Store(0)
}
}
}
@@ -1117,7 +1150,7 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun
// isWritingCatchpointDataFile returns true iff a (first stage) catchpoint data file
// is being generated.
func (ct *catchpointTracker) isWritingCatchpointDataFile() bool {
- return atomic.LoadInt32(&ct.catchpointDataWriting) != 0
+ return ct.catchpointDataWriting.Load() != 0
}
// Generates a (first stage) catchpoint data file.
@@ -1157,9 +1190,13 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
return
}
- err = catchpointWriter.FileWriteSPVerificationContext(encodedSPData)
- if err != nil {
- return
+ // do not write encodedSPData if not provided,
+ // this is an indication the older catchpoint file is being generated.
+ if encodedSPData != nil {
+ err = catchpointWriter.FileWriteSPVerificationContext(encodedSPData)
+ if err != nil {
+ return
+ }
}
for more {
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
index 4e14a1ab1..a3a450933 100644
--- a/ledger/catchpointtracker_test.go
+++ b/ledger/catchpointtracker_test.go
@@ -53,11 +53,11 @@ func TestCatchpointIsWritingCatchpointFile(t *testing.T) {
ct := &catchpointTracker{}
- ct.catchpointDataWriting = -1
+ ct.catchpointDataWriting.Store(-1)
ans := ct.isWritingCatchpointDataFile()
require.True(t, ans)
- ct.catchpointDataWriting = 0
+ ct.catchpointDataWriting.Store(0)
ans = ct.isWritingCatchpointDataFile()
require.False(t, ans)
}
@@ -360,7 +360,10 @@ func createCatchpoint(t *testing.T, ct *catchpointTracker, accountsRound basics.
require.Equal(t, calculateStateProofVerificationHash(t, ml), stateProofVerificationHash)
- err = ct.createCatchpoint(context.Background(), accountsRound, round, trackerdb.CatchpointFirstStageInfo{BiggestChunkLen: biggestChunkLen}, crypto.Digest{})
+ err = ct.createCatchpoint(
+ context.Background(), accountsRound, round,
+ trackerdb.CatchpointFirstStageInfo{BiggestChunkLen: biggestChunkLen},
+ crypto.Digest{}, protocol.ConsensusCurrentVersion)
require.NoError(t, err)
}
@@ -760,9 +763,11 @@ func TestCatchpointReproducibleLabels(t *testing.T) {
// test to see that after loadFromDisk, all the tracker content is lost ( as expected )
require.NotZero(t, len(ct.roundDigest))
+ require.NotZero(t, len(ct.consensusVersion))
require.NoError(t, ct.loadFromDisk(ml, ml.Latest()))
require.Zero(t, len(ct.roundDigest))
- require.Zero(t, ct.catchpointDataWriting)
+ require.Zero(t, len(ct.consensusVersion))
+ require.Zero(t, ct.catchpointDataWriting.Load())
select {
case _, closed := <-ct.catchpointDataSlowWriting:
require.False(t, closed)
@@ -771,48 +776,75 @@ func TestCatchpointReproducibleLabels(t *testing.T) {
}
}
+// TestCatchpointBackwardCompatibleLabels checks labels before and after EnableCatchpointsWithSPContexts was introduced.
+func TestCatchpointBackwardCompatibleLabels(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ temporaryDirectory := t.TempDir()
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+ ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+
+ ct := &catchpointTracker{enableGeneratingCatchpointFiles: false}
+ conf := config.GetDefaultLocal()
+
+ conf.Archival = true
+ paths := DirsAndPrefix{
+ ResolvedGenesisDirs: config.ResolvedGenesisDirs{
+ CatchpointGenesisDir: ".",
+ HotGenesisDir: ".",
+ },
+ }
+ ct.initialize(conf, paths)
+
+ defer ct.close()
+ ct.dbDirectory = temporaryDirectory
+ ct.tmpDir = temporaryDirectory
+
+ _, err := trackerDBInitialize(ml, true, ct.dbDirectory)
+ require.NoError(t, err)
+
+ err = ct.loadFromDisk(ml, ml.Latest())
+ require.NoError(t, err)
+
+ // create catpoint with the latest version of the code
+ round := basics.Round(2000)
+
+ protos := []protocol.ConsensusVersion{protocol.ConsensusCurrentVersion, protocol.ConsensusV37, protocol.ConsensusV36}
+ labels := make([]string, len(protos))
+ for i, proto := range protos {
+ err = ct.createCatchpoint(
+ context.Background(), round-1, round,
+ trackerdb.CatchpointFirstStageInfo{},
+ crypto.Digest{}, proto)
+ require.NoError(t, err)
+ require.NotEmpty(t, ct.lastCatchpointLabel)
+ labels[i] = ct.lastCatchpointLabel
+ }
+ require.NotEqual(t, labels[0], labels[1])
+ require.Equal(t, labels[1], labels[2])
+}
+
// blockingTracker is a testing tracker used to test "what if" a tracker would get blocked.
type blockingTracker struct {
+ emptyTracker
postCommitUnlockedEntryLock chan struct{}
postCommitUnlockedReleaseLock chan struct{}
postCommitEntryLock chan struct{}
postCommitReleaseLock chan struct{}
- committedUpToRound int64
+ committedUpToRound atomic.Int64
alwaysLock atomic.Bool
shouldLockPostCommit atomic.Bool
shouldLockPostCommitUnlocked atomic.Bool
}
-// loadFromDisk is not implemented in the blockingTracker.
-func (bt *blockingTracker) loadFromDisk(ledgerForTracker, basics.Round) error {
- return nil
-}
-
-// newBlock is not implemented in the blockingTracker.
-func (bt *blockingTracker) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
-}
-
// committedUpTo in the blockingTracker just stores the committed round.
func (bt *blockingTracker) committedUpTo(committedRnd basics.Round) (minRound, lookback basics.Round) {
- atomic.StoreInt64(&bt.committedUpToRound, int64(committedRnd))
+ bt.committedUpToRound.Store(int64(committedRnd))
return committedRnd, basics.Round(0)
}
-// produceCommittingTask is not used by the blockingTracker
-func (bt *blockingTracker) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
- return dcr
-}
-
-// prepareCommit, is not used by the blockingTracker
-func (bt *blockingTracker) prepareCommit(*deferredCommitContext) error {
- return nil
-}
-
-// commitRound is not used by the blockingTracker
-func (bt *blockingTracker) commitRound(context.Context, trackerdb.TransactionScope, *deferredCommitContext) error {
- return nil
-}
-
// postCommit implements entry/exit blockers, designed for testing.
func (bt *blockingTracker) postCommit(ctx context.Context, dcc *deferredCommitContext) {
if bt.alwaysLock.Load() || dcc.catchpointFirstStage || bt.shouldLockPostCommit.Load() {
@@ -829,18 +861,6 @@ func (bt *blockingTracker) postCommitUnlocked(ctx context.Context, dcc *deferred
}
}
-// control functions are not used by the blockingTracker
-func (bt *blockingTracker) handleUnorderedCommit(dcc *deferredCommitContext) {
-}
-func (bt *blockingTracker) handlePrepareCommitError(dcc *deferredCommitContext) {
-}
-func (bt *blockingTracker) handleCommitError(dcc *deferredCommitContext) {
-}
-
-// close is not used by the blockingTracker
-func (bt *blockingTracker) close() {
-}
-
func TestCatchpointTrackerNonblockingCatchpointWriting(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -906,7 +926,7 @@ func TestCatchpointTrackerNonblockingCatchpointWriting(t *testing.T) {
require.NoError(t, err)
// wait for the committedUpToRound to be called with the correct round number.
for {
- committedUpToRound := atomic.LoadInt64(&writeStallingTracker.committedUpToRound)
+ committedUpToRound := writeStallingTracker.committedUpToRound.Load()
if basics.Round(committedUpToRound) == ledger.Latest() {
break
}
@@ -948,7 +968,7 @@ func TestCatchpointTrackerNonblockingCatchpointWriting(t *testing.T) {
require.NoError(t, err)
// wait for the committedUpToRound to be called with the correct round number.
for {
- committedUpToRound := atomic.LoadInt64(&writeStallingTracker.committedUpToRound)
+ committedUpToRound := writeStallingTracker.committedUpToRound.Load()
if basics.Round(committedUpToRound) == ledger.Latest() {
break
}
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index b16d3a8fb..3661c6005 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -26,6 +26,7 @@ import (
"sync"
"time"
+ "github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merkletrie"
@@ -78,10 +79,10 @@ type CatchpointCatchupAccessor interface {
StoreBalancesRound(ctx context.Context, blk *bookkeeping.Block) (err error)
// StoreFirstBlock stores a single block to the blocks database.
- StoreFirstBlock(ctx context.Context, blk *bookkeeping.Block) (err error)
+ StoreFirstBlock(ctx context.Context, blk *bookkeeping.Block, cert *agreement.Certificate) (err error)
// StoreBlock stores a single block to the blocks database.
- StoreBlock(ctx context.Context, blk *bookkeeping.Block) (err error)
+ StoreBlock(ctx context.Context, blk *bookkeeping.Block, cert *agreement.Certificate) (err error)
// FinishBlocks concludes the catchup of the blocks database.
FinishBlocks(ctx context.Context, applyChanges bool) (err error)
@@ -237,7 +238,7 @@ const (
// CatchupAccessorClientLedger represents ledger interface needed for catchpoint accessor clients
type CatchupAccessorClientLedger interface {
- Block(rnd basics.Round) (blk bookkeeping.Block, err error)
+ BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreement.Certificate, err error)
GenesisHash() crypto.Digest
BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error)
Latest() (rnd basics.Round)
@@ -1055,12 +1056,12 @@ func (c *catchpointCatchupAccessorImpl) StoreBalancesRound(ctx context.Context,
}
// StoreFirstBlock stores a single block to the blocks database.
-func (c *catchpointCatchupAccessorImpl) StoreFirstBlock(ctx context.Context, blk *bookkeeping.Block) (err error) {
+func (c *catchpointCatchupAccessorImpl) StoreFirstBlock(ctx context.Context, blk *bookkeeping.Block, cert *agreement.Certificate) (err error) {
blockDbs := c.ledger.blockDB()
start := time.Now()
ledgerStorefirstblockCount.Inc(nil)
err = blockDbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- return blockdb.BlockStartCatchupStaging(tx, *blk)
+ return blockdb.BlockStartCatchupStaging(tx, *blk, *cert)
})
ledgerStorefirstblockMicros.AddMicrosecondsSince(start, nil)
if err != nil {
@@ -1070,12 +1071,12 @@ func (c *catchpointCatchupAccessorImpl) StoreFirstBlock(ctx context.Context, blk
}
// StoreBlock stores a single block to the blocks database.
-func (c *catchpointCatchupAccessorImpl) StoreBlock(ctx context.Context, blk *bookkeeping.Block) (err error) {
+func (c *catchpointCatchupAccessorImpl) StoreBlock(ctx context.Context, blk *bookkeeping.Block, cert *agreement.Certificate) (err error) {
blockDbs := c.ledger.blockDB()
start := time.Now()
ledgerCatchpointStoreblockCount.Inc(nil)
err = blockDbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- return blockdb.BlockPutStaging(tx, *blk)
+ return blockdb.BlockPutStaging(tx, *blk, *cert)
})
ledgerCatchpointStoreblockMicros.AddMicrosecondsSince(start, nil)
if err != nil {
diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go
index a97377bd3..63b0fa242 100644
--- a/ledger/catchupaccessor_test.go
+++ b/ledger/catchupaccessor_test.go
@@ -28,6 +28,7 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
@@ -162,7 +163,7 @@ func initializeTestCatchupAccessor(t *testing.T, l *Ledger, accountsCount uint64
require.NoError(t, err)
// We do this to initialize the catchpointblocks table. Needed to be able to use CompleteCatchup.
- err = catchpointAccessor.StoreFirstBlock(ctx, &bookkeeping.Block{})
+ err = catchpointAccessor.StoreFirstBlock(ctx, &bookkeeping.Block{}, &agreement.Certificate{})
require.NoError(t, err)
// We do this to initialize the accounttotals table. Needed to be able to use CompleteCatchup.
@@ -441,6 +442,7 @@ func TestVerifyCatchpoint(t *testing.T) {
// actual testing...
var blk bookkeeping.Block
+ var cert agreement.Certificate
err = catchpointAccessor.VerifyCatchpoint(ctx, &blk)
require.Error(t, err)
@@ -455,14 +457,14 @@ func TestVerifyCatchpoint(t *testing.T) {
err = catchpointAccessor.StoreBalancesRound(ctx, &blk)
require.NoError(t, err)
// StoreFirstBlock is a dumb wrapper on some db logic
- err = catchpointAccessor.StoreFirstBlock(ctx, &blk)
+ err = catchpointAccessor.StoreFirstBlock(ctx, &blk, &cert)
require.NoError(t, err)
_, err = catchpointAccessor.EnsureFirstBlock(ctx)
require.NoError(t, err)
blk.BlockHeader.Round++
- err = catchpointAccessor.StoreBlock(ctx, &blk)
+ err = catchpointAccessor.StoreBlock(ctx, &blk, &cert)
require.NoError(t, err)
// TODO: write a case with working no-err
diff --git a/ledger/eval/eval.go b/ledger/eval/eval.go
index 5fc72ff54..58f7fc8a4 100644
--- a/ledger/eval/eval.go
+++ b/ledger/eval/eval.go
@@ -1333,7 +1333,7 @@ func (eval *BlockEvaluator) endOfBlock() error {
highWeight = expectedVotersWeight
lowWeight = actualVotersWeight
}
- const stakeDiffusionFactor = 5
+ const stakeDiffusionFactor = 1
allowedDelta, overflowed := basics.Muldiv(expectedVotersWeight.Raw, stakeDiffusionFactor, 100)
if overflowed {
return fmt.Errorf("StateProofOnlineTotalWeight overflow: %v != %v", actualVotersWeight, expectedVotersWeight)
diff --git a/ledger/eval/prefetcher/prefetcher.go b/ledger/eval/prefetcher/prefetcher.go
index 40702b008..2187fa1b5 100644
--- a/ledger/eval/prefetcher/prefetcher.go
+++ b/ledger/eval/prefetcher/prefetcher.go
@@ -102,13 +102,10 @@ func PrefetchAccounts(ctx context.Context, l Ledger, rnd basics.Round, txnGroups
type groupTask struct {
// incompleteCount is the number of resources+balances still pending and need to be loaded
// this variable is used by as atomic variable to synchronize the readiness of the group taks.
- // in order to ensure support on 32-bit platforms, this variable need to be 64-bit aligned.
- incompleteCount int64
+ incompleteCount atomic.Int64
// the group task index - aligns with the index of the transaction group in the
- // provided groups slice. The usage of int64 here is to made sure the size of the
- // structure is 64-bit aligned. If this not the case, then it would fail the atomic
- // operations on the incompleteCount on 32-bit systems.
- groupTaskIndex int64
+ // provided groups slice.
+ groupTaskIndex atomic.Int64
// balances contains the loaded balances each transaction group have
balances []LoadedAccountDataEntry
// balancesCount is the number of balances that nees to be loaded per transaction group
@@ -385,21 +382,22 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
const dependencyFreeGroup = -int64(^uint64(0)/2) - 1
for grpIdx := range groupsReady {
gr := groupsReady[grpIdx]
- gr.groupTaskIndex = int64(grpIdx)
- gr.incompleteCount = int64(gr.balancesCount + gr.resourcesCount)
+ gr.groupTaskIndex.Store(int64(grpIdx))
+ gr.incompleteCount.Store(int64(gr.balancesCount + gr.resourcesCount))
gr.balances = allBalances[usedBalances : usedBalances+gr.balancesCount]
if gr.resourcesCount > 0 {
gr.resources = allResources[usedResources : usedResources+gr.resourcesCount]
usedResources += gr.resourcesCount
}
usedBalances += gr.balancesCount
- if gr.incompleteCount == 0 {
- gr.incompleteCount = dependencyFreeGroup
+ if gr.incompleteCount.Load() == 0 {
+ gr.incompleteCount.Store(dependencyFreeGroup)
}
}
- taskIdx := int64(-1)
- defer atomic.StoreInt64(&taskIdx, tasksCount)
+ var taskIdx atomic.Int64
+ taskIdx.Store(-1)
+ defer taskIdx.Store(tasksCount)
// create few go-routines to load asyncroniously the account data.
for i := 0; i < asyncAccountLoadingThreadCount; i++ {
go p.asyncPrefetchRoutine(&tasksQueue, &taskIdx, groupDoneCh)
@@ -409,7 +407,7 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
completed := make(map[int64]bool)
for i := int64(0); i < int64(len(p.txnGroups)); {
wait:
- incompleteCount := atomic.LoadInt64(&groupsReady[i].incompleteCount)
+ incompleteCount := groupsReady[i].incompleteCount.Load()
if incompleteCount > 0 || (incompleteCount != dependencyFreeGroup && !completed[i]) {
select {
case done := <-groupDoneCh:
@@ -462,27 +460,27 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) {
func (gt *groupTask) markCompletionAcct(idx int, br LoadedAccountDataEntry, groupDoneCh chan groupTaskDone) {
gt.balances[idx] = br
- if atomic.AddInt64(&gt.incompleteCount, -1) == 0 {
- groupDoneCh <- groupTaskDone{groupIdx: gt.groupTaskIndex}
+ if gt.incompleteCount.Add(-1) == 0 {
+ groupDoneCh <- groupTaskDone{groupIdx: gt.groupTaskIndex.Load()}
}
}
func (gt *groupTask) markCompletionResource(idx int, res LoadedResourcesEntry, groupDoneCh chan groupTaskDone) {
gt.resources[idx] = res
- if atomic.AddInt64(&gt.incompleteCount, -1) == 0 {
- groupDoneCh <- groupTaskDone{groupIdx: gt.groupTaskIndex}
+ if gt.incompleteCount.Add(-1) == 0 {
+ groupDoneCh <- groupTaskDone{groupIdx: gt.groupTaskIndex.Load()}
}
}
func (gt *groupTask) markCompletionAcctError(err error, task *preloaderTask, groupDoneCh chan groupTaskDone) {
for {
- curVal := atomic.LoadInt64(&gt.incompleteCount)
+ curVal := gt.incompleteCount.Load()
if curVal <= 0 {
return
}
- if atomic.CompareAndSwapInt64(&gt.incompleteCount, curVal, 0) {
+ if gt.incompleteCount.CompareAndSwap(curVal, 0) {
groupDoneCh <- groupTaskDone{
- groupIdx: gt.groupTaskIndex,
+ groupIdx: gt.groupTaskIndex.Load(),
err: err,
task: task,
}
@@ -491,11 +489,11 @@ func (gt *groupTask) markCompletionAcctError(err error, task *preloaderTask, gro
}
}
-func (p *accountPrefetcher) asyncPrefetchRoutine(queue *preloaderTaskQueue, taskIdx *int64, groupDoneCh chan groupTaskDone) {
+func (p *accountPrefetcher) asyncPrefetchRoutine(queue *preloaderTaskQueue, taskIdx *atomic.Int64, groupDoneCh chan groupTaskDone) {
var task *preloaderTask
var err error
for {
- nextTaskIdx := atomic.AddInt64(taskIdx, 1)
+ nextTaskIdx := taskIdx.Add(1)
queue, task = queue.getTaskAtIndex(int(nextTaskIdx))
if task == nil {
// no more tasks.
diff --git a/ledger/ledger.go b/ledger/ledger.go
index dc3baaf76..458ce3d53 100644
--- a/ledger/ledger.go
+++ b/ledger/ledger.go
@@ -769,6 +769,16 @@ func (l *Ledger) Wait(r basics.Round) chan struct{} {
return l.bulletinDisk.Wait(r)
}
+// WaitWithCancel returns a channel that closes once a given round is
+// stored durably in the ledger. The returned function can be used to
+// cancel the wait, which cleans up resources if no other Wait call is
+// active for the same round.
+func (l *Ledger) WaitWithCancel(r basics.Round) (chan struct{}, func()) {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+ return l.bulletinDisk.Wait(r), func() { l.bulletinDisk.CancelWait(r) }
+}
+
// WaitMem returns a channel that closes once a given round is
// available in memory in the ledger, but might not be stored
// durably on disk yet.
@@ -898,6 +908,12 @@ func (l *Ledger) LatestTrackerCommitted() basics.Round {
return l.trackers.getDbRound()
}
+// IsBehindCommittingDeltas indicates if the ledger is behind expected number of in-memory deltas.
+// It intended to slow down the catchup service when deltas overgrow some limit.
+func (l *Ledger) IsBehindCommittingDeltas() bool {
+ return l.trackers.isBehindCommittingDeltas(l.Latest())
+}
+
// DebuggerLedger defines the minimal set of method required for creating a debug balances.
type DebuggerLedger = eval.LedgerForCowBase
diff --git a/ledger/store/blockdb/blockdb.go b/ledger/store/blockdb/blockdb.go
index 64b6f02fc..bbd11d6c1 100644
--- a/ledger/store/blockdb/blockdb.go
+++ b/ledger/store/blockdb/blockdb.go
@@ -242,7 +242,7 @@ func BlockForgetBefore(tx *sql.Tx, rnd basics.Round) error {
}
// BlockStartCatchupStaging initializes catchup for catchpoint
-func BlockStartCatchupStaging(tx *sql.Tx, blk bookkeeping.Block) error {
+func BlockStartCatchupStaging(tx *sql.Tx, blk bookkeeping.Block, cert agreement.Certificate) error {
// delete the old catchpointblocks table, if there is such.
for _, stmt := range blockResetExprs {
stmt = strings.Replace(stmt, "blocks", "catchpointblocks", 1)
@@ -262,11 +262,12 @@ func BlockStartCatchupStaging(tx *sql.Tx, blk bookkeeping.Block) error {
}
// insert the top entry to the blocks table.
- _, err := tx.Exec("INSERT INTO catchpointblocks (rnd, proto, hdrdata, blkdata) VALUES (?, ?, ?, ?)",
+ _, err := tx.Exec("INSERT INTO catchpointblocks (rnd, proto, hdrdata, blkdata, certdata) VALUES (?, ?, ?, ?, ?)",
blk.Round(),
blk.CurrentProtocol,
protocol.Encode(&blk.BlockHeader),
protocol.Encode(&blk),
+ protocol.Encode(&cert),
)
if err != nil {
return err
@@ -305,13 +306,14 @@ func BlockAbortCatchup(tx *sql.Tx) error {
}
// BlockPutStaging store a block into catchpoint staging table
-func BlockPutStaging(tx *sql.Tx, blk bookkeeping.Block) (err error) {
+func BlockPutStaging(tx *sql.Tx, blk bookkeeping.Block, cert agreement.Certificate) (err error) {
// insert the new entry
- _, err = tx.Exec("INSERT INTO catchpointblocks (rnd, proto, hdrdata, blkdata) VALUES (?, ?, ?, ?)",
+ _, err = tx.Exec("INSERT INTO catchpointblocks (rnd, proto, hdrdata, blkdata, certdata) VALUES (?, ?, ?, ?, ?)",
blk.Round(),
blk.CurrentProtocol,
protocol.Encode(&blk.BlockHeader),
protocol.Encode(&blk),
+ protocol.Encode(&cert),
)
if err != nil {
return err
diff --git a/ledger/tracker.go b/ledger/tracker.go
index c5f3e8478..37fa6adf5 100644
--- a/ledger/tracker.go
+++ b/ledger/tracker.go
@@ -22,6 +22,7 @@ import (
"fmt"
"reflect"
"sync"
+ "sync/atomic"
"time"
"github.com/algorand/go-algorand/config"
@@ -175,6 +176,8 @@ type trackerRegistry struct {
// accountsWriting provides synchronization around the background writing of account balances.
accountsWriting sync.WaitGroup
+ // accountsCommitting is set when trackers registry writing accounts into DB.
+ accountsCommitting atomic.Bool
// dbRound is always exactly accountsRound(),
// cached to avoid SQL queries.
@@ -196,8 +199,16 @@ type trackerRegistry struct {
lastFlushTime time.Time
cfg config.Local
+
+ // maxAccountDeltas is a maximum number of in-memory deltas stored by trackers.
+ // When exceeded trackerRegistry will attempt to flush, and its Available() method will return false.
+ // Too many in-memory deltas could cause the node to run out of memory.
+ maxAccountDeltas uint64
}
+// defaultMaxAccountDeltas is a default value for maxAccountDeltas.
+const defaultMaxAccountDeltas = 256
+
// deferredCommitRange is used during the calls to produceCommittingTask, and used as a data structure
// to syncronize the various trackers and create a uniformity around which rounds need to be persisted
// next.
@@ -258,6 +269,9 @@ type deferredCommitContext struct {
// Block hashes for the committed rounds range.
committedRoundDigests []crypto.Digest
+ // Consensus versions for the committed rounds range.
+ committedProtocolVersion []protocol.ConsensusVersion
+
// on catchpoint rounds, the transaction tail would fill up this field with the hash of the recent 1001 rounds
// of the txtail data. The catchpointTracker would be able to use that for calculating the catchpoint label.
txTailHash crypto.Digest
@@ -285,7 +299,7 @@ func (dcc deferredCommitContext) newBase() basics.Round {
return dcc.oldBase + basics.Round(dcc.offset)
}
-var errMissingAccountUpdateTracker = errors.New("initializeTrackerCaches : called without a valid accounts update tracker")
+var errMissingAccountUpdateTracker = errors.New("trackers replay : called without a valid accounts update tracker")
func (tr *trackerRegistry) initialize(l ledgerForTracker, trackers []ledgerTracker, cfg config.Local) (err error) {
tr.mu.Lock()
@@ -293,18 +307,10 @@ func (tr *trackerRegistry) initialize(l ledgerForTracker, trackers []ledgerTrack
tr.dbs = l.trackerDB()
tr.log = l.trackerLog()
- err = tr.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) (err error) {
- ar, err := tx.MakeAccountsReader()
- if err != nil {
- return err
- }
-
- tr.dbRound, err = ar.AccountsRound()
- return err
- })
-
- if err != nil {
- return err
+ tr.maxAccountDeltas = defaultMaxAccountDeltas
+ if cfg.MaxAcctLookback > tr.maxAccountDeltas {
+ tr.maxAccountDeltas = cfg.MaxAcctLookback + 1
+ tr.log.Infof("maxAccountDeltas was overridden to %d because of MaxAcctLookback=%d: this combination might use lots of RAM. To preserve some blocks in blockdb consider using MaxBlockHistoryLookback config option instead of MaxAcctLookback", tr.maxAccountDeltas, cfg.MaxAcctLookback)
}
tr.ctx, tr.ctxCancel = context.WithCancel(context.Background())
@@ -333,24 +339,38 @@ func (tr *trackerRegistry) initialize(l ledgerForTracker, trackers []ledgerTrack
}
func (tr *trackerRegistry) loadFromDisk(l ledgerForTracker) error {
+ var dbRound basics.Round
+ err := tr.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) (err error) {
+ ar, err0 := tx.MakeAccountsReader()
+ if err0 != nil {
+ return err0
+ }
+
+ dbRound, err0 = ar.AccountsRound()
+ return err0
+ })
+ if err != nil {
+ return err
+ }
+
tr.mu.RLock()
- dbRound := tr.dbRound
+ tr.dbRound = dbRound
tr.mu.RUnlock()
for _, lt := range tr.trackers {
- err := lt.loadFromDisk(l, dbRound)
- if err != nil {
+ err0 := lt.loadFromDisk(l, dbRound)
+ if err0 != nil {
// find the tracker name.
trackerName := reflect.TypeOf(lt).String()
- return fmt.Errorf("tracker %s failed to loadFromDisk : %w", trackerName, err)
+ return fmt.Errorf("tracker %s failed to loadFromDisk : %w", trackerName, err0)
}
}
- err := tr.replay(l)
- if err != nil {
- err = fmt.Errorf("initializeTrackerCaches failed : %w", err)
+ if err0 := tr.replay(l); err0 != nil {
+ return fmt.Errorf("trackers replay failed : %w", err0)
}
- return err
+
+ return nil
}
func (tr *trackerRegistry) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
@@ -456,6 +476,20 @@ func (tr *trackerRegistry) waitAccountsWriting() {
tr.accountsWriting.Wait()
}
+func (tr *trackerRegistry) isBehindCommittingDeltas(latest basics.Round) bool {
+ tr.mu.RLock()
+ dbRound := tr.dbRound
+ tr.mu.RUnlock()
+
+ numDeltas := uint64(latest.SubSaturate(dbRound))
+ if numDeltas < tr.maxAccountDeltas {
+ return false
+ }
+
+ // there is a large number of deltas check if commitSyncer is not writing accounts
+ return tr.accountsCommitting.Load()
+}
+
func (tr *trackerRegistry) close() {
if tr.ctxCancel != nil {
tr.ctxCancel()
@@ -562,6 +596,11 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error {
start := time.Now()
ledgerCommitroundCount.Inc(nil)
err = tr.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
+ tr.accountsCommitting.Store(true)
+ defer func() {
+ tr.accountsCommitting.Store(false)
+ }()
+
aw, err := tx.MakeAccountsWriter()
if err != nil {
return err
diff --git a/ledger/tracker_test.go b/ledger/tracker_test.go
index 730c315e8..d16fb925c 100644
--- a/ledger/tracker_test.go
+++ b/ledger/tracker_test.go
@@ -150,80 +150,73 @@ func TestTrackerScheduleCommit(t *testing.T) {
a.Equal(expectedOffset, dc.offset)
}
-type ioErrorTracker struct {
+type emptyTracker struct {
}
-// loadFromDisk is not implemented in the blockingTracker.
-func (io *ioErrorTracker) loadFromDisk(ledgerForTracker, basics.Round) error {
+// loadFromDisk is not implemented in the emptyTracker.
+func (t *emptyTracker) loadFromDisk(ledgerForTracker, basics.Round) error {
return nil
}
-// newBlock is not implemented in the blockingTracker.
-func (io *ioErrorTracker) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
+// newBlock is not implemented in the emptyTracker.
+func (t *emptyTracker) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
}
-// committedUpTo in the blockingTracker just stores the committed round.
-func (io *ioErrorTracker) committedUpTo(committedRnd basics.Round) (minRound, lookback basics.Round) {
+// committedUpTo in the emptyTracker just stores the committed round.
+func (t *emptyTracker) committedUpTo(committedRnd basics.Round) (minRound, lookback basics.Round) {
return 0, basics.Round(0)
}
-func (io *ioErrorTracker) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+func (t *emptyTracker) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
return dcr
}
-// prepareCommit, is not used by the blockingTracker
-func (io *ioErrorTracker) prepareCommit(*deferredCommitContext) error {
+// prepareCommit, is not used by the emptyTracker
+func (t *emptyTracker) prepareCommit(*deferredCommitContext) error {
return nil
}
-// commitRound is not used by the blockingTracker
-func (io *ioErrorTracker) commitRound(context.Context, trackerdb.TransactionScope, *deferredCommitContext) error {
- return sqlite3.Error{Code: sqlite3.ErrIoErr}
+// commitRound is not used by the emptyTracker
+func (t *emptyTracker) commitRound(context.Context, trackerdb.TransactionScope, *deferredCommitContext) error {
+ return nil
}
-func (io *ioErrorTracker) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+func (t *emptyTracker) postCommit(ctx context.Context, dcc *deferredCommitContext) {
}
// postCommitUnlocked implements entry/exit blockers, designed for testing.
-func (io *ioErrorTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
+func (t *emptyTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
}
-// control functions are not used by the blockingTracker
-func (io *ioErrorTracker) handleUnorderedCommit(dcc *deferredCommitContext) {
+// control functions are not used by the emptyTracker
+func (t *emptyTracker) handleUnorderedCommit(dcc *deferredCommitContext) {
}
-func (io *ioErrorTracker) handlePrepareCommitError(dcc *deferredCommitContext) {
+func (t *emptyTracker) handlePrepareCommitError(dcc *deferredCommitContext) {
}
-func (io *ioErrorTracker) handleCommitError(dcc *deferredCommitContext) {
+func (t *emptyTracker) handleCommitError(dcc *deferredCommitContext) {
}
-// close is not used by the blockingTracker
-func (io *ioErrorTracker) close() {
+// close is not used by the emptyTracker
+func (t *emptyTracker) close() {
}
-func (io *ioErrorTracker) reset() {
+type ioErrorTracker struct {
+ emptyTracker
+}
+
+// commitRound is not used by the ioErrorTracker
+func (io *ioErrorTracker) commitRound(context.Context, trackerdb.TransactionScope, *deferredCommitContext) error {
+ return sqlite3.Error{Code: sqlite3.ErrIoErr}
}
type producePrepareBlockingTracker struct {
+ emptyTracker
produceReleaseLock chan struct{}
prepareCommitEntryLock chan struct{}
prepareCommitReleaseLock chan struct{}
cancelTasks bool
}
-// loadFromDisk is not implemented in the blockingTracker.
-func (bt *producePrepareBlockingTracker) loadFromDisk(ledgerForTracker, basics.Round) error {
- return nil
-}
-
-// newBlock is not implemented in the blockingTracker.
-func (bt *producePrepareBlockingTracker) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
-}
-
-// committedUpTo in the blockingTracker just stores the committed round.
-func (bt *producePrepareBlockingTracker) committedUpTo(committedRnd basics.Round) (minRound, lookback basics.Round) {
- return 0, basics.Round(0)
-}
-
func (bt *producePrepareBlockingTracker) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
if bt.cancelTasks {
return nil
@@ -240,30 +233,6 @@ func (bt *producePrepareBlockingTracker) prepareCommit(*deferredCommitContext) e
return nil
}
-// commitRound is not used by the blockingTracker
-func (bt *producePrepareBlockingTracker) commitRound(context.Context, trackerdb.TransactionScope, *deferredCommitContext) error {
- return nil
-}
-
-func (bt *producePrepareBlockingTracker) postCommit(ctx context.Context, dcc *deferredCommitContext) {
-}
-
-// postCommitUnlocked implements entry/exit blockers, designed for testing.
-func (bt *producePrepareBlockingTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
-}
-
-// control functions are not used by the blockingTracker
-func (bt *producePrepareBlockingTracker) handleUnorderedCommit(dcc *deferredCommitContext) {
-}
-func (bt *producePrepareBlockingTracker) handlePrepareCommitError(dcc *deferredCommitContext) {
-}
-func (bt *producePrepareBlockingTracker) handleCommitError(dcc *deferredCommitContext) {
-}
-
-// close is not used by the blockingTracker
-func (bt *producePrepareBlockingTracker) close() {
-}
-
func (bt *producePrepareBlockingTracker) reset() {
bt.prepareCommitEntryLock = make(chan struct{})
bt.prepareCommitReleaseLock = make(chan struct{})
@@ -271,7 +240,18 @@ func (bt *producePrepareBlockingTracker) reset() {
bt.cancelTasks = false
}
-// TestTrackerDbRoundDataRace checks for dbRound data race
+type commitRoundStallingTracker struct {
+ emptyTracker
+ commitRoundLock chan struct{}
+}
+
+// commitRound is not used by the blockingTracker
+func (st *commitRoundStallingTracker) commitRound(context.Context, trackerdb.TransactionScope, *deferredCommitContext) error {
+ <-st.commitRoundLock
+ return nil
+}
+
+// TestTrackers_DbRoundDataRace checks for dbRound data race
// when commit scheduling relies on dbRound from the tracker registry but tracker's deltas
// are used in calculations
// 1. Add say 128 + MaxAcctLookback (MaxLookback) blocks and commit
@@ -280,7 +260,7 @@ func (bt *producePrepareBlockingTracker) reset() {
// 4. Set a block in produceCommittingTask, add a new block and resume the commit
// 5. Resume produceCommittingTask
// 6. The data race and panic happens in block queue syncher thread
-func TestTrackerDbRoundDataRace(t *testing.T) {
+func TestTrackers_DbRoundDataRace(t *testing.T) {
partitiontest.PartitionTest(t)
t.Skip("For manual run when touching ledger locking")
@@ -367,7 +347,7 @@ func TestTrackerDbRoundDataRace(t *testing.T) {
close(stallingTracker.produceReleaseLock)
}
-func TestCommitRoundIOError(t *testing.T) {
+func TestTrackers_CommitRoundIOError(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
@@ -414,7 +394,119 @@ func TestCommitRoundIOError(t *testing.T) {
a.True(flag.Load())
}
-func TestAccountUpdatesLedgerEvaluatorNoBlockHdr(t *testing.T) {
+// TestTrackers_BusyCommitting ensures trackerRegistry.busy() is set when commitRound is in progress
+func TestTrackers_BusyCommitting(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 1)
+ const inMem = true
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Warn)
+ cfg := config.GetDefaultLocal()
+ ledger, err := OpenLedger(log, t.Name(), inMem, genesisInitState, cfg)
+ a.NoError(err)
+ defer ledger.Close()
+
+ // quit the commitSyncer goroutine
+ ledger.trackers.ctxCancel()
+ ledger.trackers.ctxCancel = nil
+ <-ledger.trackers.commitSyncerClosed
+ ledger.trackers.commitSyncerClosed = nil
+
+ tracker := &commitRoundStallingTracker{
+ commitRoundLock: make(chan struct{}),
+ }
+ ledger.trackerMu.Lock()
+ ledger.trackers.mu.Lock()
+ ledger.trackers.trackers = append([]ledgerTracker{tracker}, ledger.trackers.trackers...)
+ ledger.trackers.lastFlushTime = time.Time{}
+ ledger.trackers.mu.Unlock()
+ ledger.trackerMu.Unlock()
+
+ // add some blocks
+ blk := genesisInitState.Block
+ for i := basics.Round(0); i < basics.Round(cfg.MaxAcctLookback)+1; i++ {
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp++
+ ledger.trackers.newBlock(blk, ledgercore.StateDelta{})
+ }
+
+ // manually trigger a commit
+ ledger.trackers.committedUpTo(blk.BlockHeader.Round)
+ dcc := <-ledger.trackers.deferredCommits
+ go func() {
+ err = ledger.trackers.commitRound(dcc)
+ a.NoError(err)
+ }()
+
+ // commitRoundStallingTracker blocks commitRound in the goroutine above, wait few secs to ensure the trackerRegistry has set busy()
+ a.Eventually(func() bool {
+ return ledger.trackers.accountsCommitting.Load()
+ }, 3*time.Second, 50*time.Millisecond)
+ close(tracker.commitRoundLock)
+ ledger.trackers.waitAccountsWriting()
+ a.False(ledger.trackers.accountsCommitting.Load())
+}
+
+func TestTrackers_InitializeMaxAccountDeltas(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ accts := setupAccts(20)
+ ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+ tr := trackerRegistry{}
+
+ cfg := config.GetDefaultLocal()
+ err := tr.initialize(ml, []ledgerTracker{}, cfg)
+ a.NoError(err)
+ // quit the commitSyncer goroutine
+ tr.ctxCancel()
+ tr.ctxCancel = nil
+ <-tr.commitSyncerClosed
+ tr.commitSyncerClosed = nil
+ a.Equal(uint64(defaultMaxAccountDeltas), tr.maxAccountDeltas)
+
+ cfg.MaxAcctLookback = defaultMaxAccountDeltas + 100
+ err = tr.initialize(ml, []ledgerTracker{}, cfg)
+ a.NoError(err)
+ // quit the commitSyncer goroutine
+ tr.ctxCancel()
+ tr.ctxCancel = nil
+ <-tr.commitSyncerClosed
+ tr.commitSyncerClosed = nil
+ a.Equal(cfg.MaxAcctLookback+1, tr.maxAccountDeltas)
+}
+
+func TestTrackers_IsBehindCommittingDeltas(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ tr := trackerRegistry{
+ accts: &accountUpdates{},
+ maxAccountDeltas: defaultMaxAccountDeltas,
+ }
+
+ latest := basics.Round(0)
+ a.False(tr.isBehindCommittingDeltas(latest))
+
+ // no deltas but busy committing => not behind
+ tr.accountsCommitting.Store(true)
+ a.False(tr.isBehindCommittingDeltas(latest))
+ tr.accountsCommitting.Store(false)
+
+ // lots of deltas but not committing => not behind
+ latest = basics.Round(defaultMaxAccountDeltas + 10)
+ tr.dbRound = 0
+ a.False(tr.isBehindCommittingDeltas(latest))
+
+ // lots of deltas and committing => behind
+ tr.accountsCommitting.Store(true)
+ a.True(tr.isBehindCommittingDeltas(latest))
+}
+
+func TestTrackers_AccountUpdatesLedgerEvaluatorNoBlockHdr(t *testing.T) {
partitiontest.PartitionTest(t)
aul := &accountUpdatesLedgerEvaluator{
diff --git a/libgoal/participation.go b/libgoal/participation.go
index f57629a36..302af5df9 100644
--- a/libgoal/participation.go
+++ b/libgoal/participation.go
@@ -18,15 +18,10 @@ package libgoal
import (
"fmt"
- "math"
- "os"
- "path/filepath"
-
- "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/util/db"
+ "github.com/algorand/go-algorand/libgoal/participation"
)
// chooseParticipation chooses which participation keys to use for going online
@@ -59,83 +54,14 @@ func (c *Client) chooseParticipation(address basics.Address, round basics.Round)
return
}
-func participationKeysPath(dataDir string, address basics.Address, firstValid, lastValid basics.Round) (string, error) {
- // Build /<dataDir>/<genesisID>/<address>.<first_round>.<last_round>.partkey
- first := uint64(firstValid)
- last := uint64(lastValid)
- fileName := config.PartKeyFilename(address.String(), first, last)
- return filepath.Join(dataDir, fileName), nil
-}
-
// GenParticipationKeys creates a .partkey database for a given address, fills
// it with keys, and installs it in the right place
func (c *Client) GenParticipationKeys(address string, firstValid, lastValid, keyDilution uint64) (part account.Participation, filePath string, err error) {
- return c.GenParticipationKeysTo(address, firstValid, lastValid, keyDilution, "")
-}
-
-// GenParticipationKeysTo creates a .partkey database for a given address, fills
-// it with keys, and saves it in the specified output directory. If the output
-// directory is empty, the key will be installed.
-func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, keyDilution uint64, outDir string) (part account.Participation, filePath string, err error) {
-
- install := outDir == ""
-
- // Parse the address
- parsedAddr, err := basics.UnmarshalChecksumAddress(address)
- if err != nil {
- return
- }
-
- firstRound, lastRound := basics.Round(firstValid), basics.Round(lastValid)
-
- // If we are installing, generate in the temp dir
- if install {
- outDir = os.TempDir()
- }
- // Connect to the database
- partKeyPath, err := participationKeysPath(outDir, parsedAddr, firstRound, lastRound)
- if err != nil {
- return
- }
- _, err = os.Stat(partKeyPath)
- if err == nil {
- err = fmt.Errorf("ParticipationKeys exist for the range %d to %d", firstRound, lastRound)
- return
- } else if !os.IsNotExist(err) {
- err = fmt.Errorf("participation key file '%s' cannot be accessed : %w", partKeyPath, err)
- return
- }
-
- // If the key is being installed, remove it afterwards.
- if install {
- // Explicitly ignore any errors
- defer func(name string) {
- _ = os.Remove(name)
- }(partKeyPath)
- }
-
- partdb, err := db.MakeErasableAccessor(partKeyPath)
- if err != nil {
- return
- }
-
- if keyDilution == 0 {
- keyDilution = 1 + uint64(math.Sqrt(float64(lastRound-firstRound)))
- }
-
- // Fill the database with new participation keys
- newPart, err := account.FillDBWithParticipationKeys(partdb, parsedAddr, firstRound, lastRound, keyDilution)
- part = newPart.Participation
- partdb.Close()
-
- if err != nil {
- return
- }
-
- if install {
- _, err = c.AddParticipationKey(partKeyPath)
+ installFunc := func(keyPath string) error {
+ _, err := c.AddParticipationKey(keyPath)
+ return err
}
- return part, partKeyPath, err
+ return participation.GenParticipationKeysTo(address, firstValid, lastValid, keyDilution, "", installFunc)
}
// ListParticipationKeys returns the available participation keys,
diff --git a/libgoal/participation/participation.go b/libgoal/participation/participation.go
new file mode 100644
index 000000000..2d1a30e8c
--- /dev/null
+++ b/libgoal/participation/participation.go
@@ -0,0 +1,104 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package participation
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/util/db"
+)
+
+func participationKeysPath(dataDir string, address basics.Address, firstValid, lastValid basics.Round) (string, error) {
+ // Build /<dataDir>/<genesisID>/<address>.<first_round>.<last_round>.partkey
+ first := uint64(firstValid)
+ last := uint64(lastValid)
+ fileName := config.PartKeyFilename(address.String(), first, last)
+ return filepath.Join(dataDir, fileName), nil
+}
+
+// GenParticipationKeysTo creates a .partkey database for a given address, fills
+// it with keys, and saves it in the specified output directory. If the output
+// directory is empty, the key will be installed.
+func GenParticipationKeysTo(address string, firstValid, lastValid, keyDilution uint64, outDir string, installFunc func(keyPath string) error) (part account.Participation, filePath string, err error) {
+
+ install := outDir == ""
+ if install && installFunc == nil {
+ return account.Participation{}, "", fmt.Errorf("must provide an install function when installing keys")
+ }
+
+ // Parse the address
+ parsedAddr, err := basics.UnmarshalChecksumAddress(address)
+ if err != nil {
+ return
+ }
+
+ firstRound, lastRound := basics.Round(firstValid), basics.Round(lastValid)
+
+ // If we are installing, generate in the temp dir
+ if install {
+ outDir = os.TempDir()
+ }
+ // Connect to the database
+ partKeyPath, err := participationKeysPath(outDir, parsedAddr, firstRound, lastRound)
+ if err != nil {
+ return
+ }
+ _, err = os.Stat(partKeyPath)
+ if err == nil {
+ err = fmt.Errorf("ParticipationKeys exist for the range %d to %d", firstRound, lastRound)
+ return
+ } else if !os.IsNotExist(err) {
+ err = fmt.Errorf("participation key file '%s' cannot be accessed : %w", partKeyPath, err)
+ return
+ }
+
+ // If the key is being installed, remove it afterwards.
+ if install {
+ // Explicitly ignore any errors
+ defer func(name string) {
+ _ = os.Remove(name)
+ }(partKeyPath)
+ }
+
+ partdb, err := db.MakeErasableAccessor(partKeyPath)
+ if err != nil {
+ return
+ }
+
+ if keyDilution == 0 {
+ keyDilution = account.DefaultKeyDilution(firstRound, lastRound)
+ }
+
+ // Fill the database with new participation keys
+ newPart, err := account.FillDBWithParticipationKeys(partdb, parsedAddr, firstRound, lastRound, keyDilution)
+ part = newPart.Participation
+ partdb.Close()
+
+ if err != nil {
+ return
+ }
+
+ if install {
+ err = installFunc(partKeyPath)
+ }
+ return part, partKeyPath, err
+}
diff --git a/libgoal/participation/participation_test.go b/libgoal/participation/participation_test.go
new file mode 100644
index 000000000..69565a780
--- /dev/null
+++ b/libgoal/participation/participation_test.go
@@ -0,0 +1,113 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package participation
+
+import (
+ "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/test/partitiontest"
+
+ "github.com/stretchr/testify/require"
+
+ "testing"
+)
+
+func TestGenParticipationKeysTo_Install(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ testcases := []struct {
+ name string
+ outDir string
+ installed bool
+ }{
+ {
+ name: "install",
+ installed: true,
+ },
+ {
+ name: "do not install",
+ outDir: t.TempDir(),
+ installed: false,
+ },
+ }
+
+ for _, tc := range testcases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+
+ var err error
+ var called bool
+ installFunc := func(keyPath string) error {
+ called = true
+ return nil
+ }
+ var addr basics.Address
+ addr[1] = 1
+
+ _, _, err = GenParticipationKeysTo(addr.String(), 1000, 2000, 0, tc.outDir, installFunc)
+ require.NoError(t, err)
+ require.Equal(t, tc.installed, called, "The install function should only be called when outDir is not set.")
+ })
+ }
+}
+
+func TestGenParticipationKeysTo_DefaultKeyDilution(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ var addr basics.Address
+ addr[1] = 1
+ first := uint64(1000)
+ last := uint64(2000)
+
+ testcases := []struct {
+ name string
+ dilution uint64
+ expected uint64
+ }{
+ {
+ name: "default",
+ dilution: 0,
+ expected: account.DefaultKeyDilution(basics.Round(first), basics.Round(last)),
+ }, {
+ name: "override",
+ dilution: 5,
+ expected: 5,
+ },
+ }
+
+ for _, tc := range testcases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+
+ part, _, err := GenParticipationKeysTo(addr.String(), first, last, tc.dilution, t.TempDir(), nil)
+ require.NoError(t, err)
+ require.Equal(t, tc.expected, part.KeyDilution)
+ })
+ }
+}
+
+func TestBadInput(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ _, _, err := GenParticipationKeysTo("", 0, 0, 0, "", nil)
+ require.ErrorContains(t, err, "must provide an install function when installing keys")
+}
diff --git a/network/netidentity.go b/network/netidentity.go
index 6414c5e89..940ea0a63 100644
--- a/network/netidentity.go
+++ b/network/netidentity.go
@@ -20,7 +20,6 @@ import (
"encoding/base64"
"fmt"
"net/http"
- "sync/atomic"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/protocol"
@@ -329,7 +328,7 @@ func identityVerificationHandler(message IncomingMessage) OutgoingMessage {
peer := message.Sender.(*wsPeer)
// avoid doing work (crypto and potentially taking a lock) if the peer is already verified
- if atomic.LoadUint32(&peer.identityVerified) == 1 {
+ if peer.identityVerified.Load() == 1 {
return OutgoingMessage{}
}
localAddr, _ := peer.net.Address()
@@ -350,7 +349,7 @@ func identityVerificationHandler(message IncomingMessage) OutgoingMessage {
peer.log.With("remote", peer.OriginAddress()).With("local", localAddr).Warn("peer identity verification is incorrectly signed, disconnecting")
return OutgoingMessage{Action: Disconnect, reason: disconnectBadIdentityData}
}
- atomic.StoreUint32(&peer.identityVerified, 1)
+ peer.identityVerified.Store(1)
// if the identity could not be claimed by this peer, it means the identity is in use
wn.peersLock.Lock()
ok := wn.identityTracker.setIdentity(peer)
diff --git a/network/netidentity_test.go b/network/netidentity_test.go
index 13731aaae..9222da460 100644
--- a/network/netidentity_test.go
+++ b/network/netidentity_test.go
@@ -358,7 +358,8 @@ func TestIdentityTrackerSetIdentity(t *testing.T) {
// Just tests that if a peer is already verified, it just returns OutgoingMessage{}
func TestIdentityTrackerHandlerGuard(t *testing.T) {
partitiontest.PartitionTest(t)
- p := wsPeer{identityVerified: uint32(1)}
+ p := wsPeer{}
+ p.identityVerified.Store(1)
msg := IncomingMessage{
Sender: &p,
Net: &WebsocketNetwork{},
diff --git a/network/netprio.go b/network/netprio.go
index 378bea4c0..5cb122c11 100644
--- a/network/netprio.go
+++ b/network/netprio.go
@@ -18,7 +18,6 @@ package network
import (
"container/heap"
- "sync/atomic"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
@@ -126,7 +125,7 @@ func (pt *prioTracker) setPriority(peer *wsPeer, addr basics.Address, weight uin
peer.prioAddress = addr
peer.prioWeight = weight
heap.Fix(peersHeap{wn}, peer.peerIndex)
- atomic.AddInt32(&wn.peersChangeCounter, 1)
+ wn.peersChangeCounter.Add(1)
}
func (pt *prioTracker) removePeer(peer *wsPeer) {
diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go
index f36b0d328..71188b256 100644
--- a/network/p2pNetwork.go
+++ b/network/p2pNetwork.go
@@ -60,12 +60,12 @@ type P2PNetwork struct {
broadcaster msgBroadcaster
wsPeers map[peer.ID]*wsPeer
wsPeersLock deadlock.RWMutex
- wsPeersChangeCounter int32
+ wsPeersChangeCounter atomic.Int32
wsPeersConnectivityCheckTicker *time.Ticker
}
type p2pPeerStats struct {
- txReceived uint64
+ txReceived atomic.Uint64
}
// NewP2PNetwork returns an instance of GossipNode that uses the p2p.Service
@@ -366,7 +366,7 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, peer peer.ID, stream n
n.wsPeersLock.Lock()
n.wsPeers[peer] = wsp
n.wsPeersLock.Unlock()
- atomic.AddInt32(&n.wsPeersChangeCounter, 1)
+ n.wsPeersChangeCounter.Add(1)
}
// peerRemoteClose called from wsPeer to report that it has closed
@@ -375,7 +375,7 @@ func (n *P2PNetwork) peerRemoteClose(peer *wsPeer, reason disconnectReason) {
n.wsPeersLock.Lock()
delete(n.wsPeers, remotePeerID)
n.wsPeersLock.Unlock()
- atomic.AddInt32(&n.wsPeersChangeCounter, 1)
+ n.wsPeersChangeCounter.Add(1)
}
func (n *P2PNetwork) peerSnapshot(dest []*wsPeer) ([]*wsPeer, int32) {
@@ -403,7 +403,7 @@ func (n *P2PNetwork) peerSnapshot(dest []*wsPeer) ([]*wsPeer, int32) {
}
func (n *P2PNetwork) getPeersChangeCounter() int32 {
- return atomic.LoadInt32(&n.wsPeersChangeCounter)
+ return n.wsPeersChangeCounter.Load()
}
func (n *P2PNetwork) checkSlowWritingPeers() {}
@@ -453,10 +453,10 @@ func (n *P2PNetwork) txTopicValidator(ctx context.Context, peerID peer.ID, msg *
n.peerStatsMu.Lock()
peerStats, ok := n.peerStats[peerID]
if !ok {
- n.peerStats[peerID] = &p2pPeerStats{txReceived: 1}
- } else {
- peerStats.txReceived++
+ peerStats = &p2pPeerStats{}
+ n.peerStats[peerID] = peerStats
}
+ peerStats.txReceived.Add(1)
n.peerStatsMu.Unlock()
outmsg := n.handler.Handle(inmsg)
diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go
index 585d9aaa1..3b6d12759 100644
--- a/network/p2pNetwork_test.go
+++ b/network/p2pNetwork_test.go
@@ -104,7 +104,7 @@ func TestP2PSubmitTX(t *testing.T) {
if !ok {
return false
}
- return atomic.LoadUint64(&netCpeerStatsA.txReceived) == 10
+ return netCpeerStatsA.txReceived.Load() == 10
},
1*time.Second,
50*time.Millisecond,
@@ -153,12 +153,12 @@ func TestP2PSubmitWS(t *testing.T) {
// now we should be connected in a line: B <-> A <-> C where both B and C are connected to A but not each other
testTag := protocol.AgreementVoteTag
- var handlerCount uint32
+ var handlerCount atomic.Uint32
// Since we aren't using the transaction handler in this test, we need to register a pass-through handler
passThroughHandler := []TaggedMessageHandler{
{Tag: testTag, MessageHandler: HandlerFunc(func(msg IncomingMessage) OutgoingMessage {
- atomic.AddUint32(&handlerCount, 1)
+ handlerCount.Add(1)
return OutgoingMessage{Action: Broadcast}
})},
}
@@ -176,7 +176,7 @@ func TestP2PSubmitWS(t *testing.T) {
require.Eventually(
t,
func() bool {
- return atomic.LoadUint32(&handlerCount) == 20
+ return handlerCount.Load() == 20
},
1*time.Second,
50*time.Millisecond,
diff --git a/network/p2pPeer.go b/network/p2pPeer.go
index 7d788180e..343459d24 100644
--- a/network/p2pPeer.go
+++ b/network/p2pPeer.go
@@ -23,10 +23,12 @@ import (
"net"
"time"
+ "github.com/algorand/go-algorand/logging"
"github.com/algorand/websocket"
"github.com/libp2p/go-libp2p/core/network"
yamux "github.com/libp2p/go-yamux/v4"
+ mnet "github.com/multiformats/go-multiaddr/net"
)
type wsPeerConnP2PImpl struct {
@@ -82,3 +84,11 @@ func (c *wsPeerConnP2PImpl) CloseWithoutFlush() error {
}
func (c *wsPeerConnP2PImpl) UnderlyingConn() net.Conn { return nil }
+
+func (c *wsPeerConnP2PImpl) RemoteAddr() net.Addr {
+ netaddr, err := mnet.ToNetAddr(c.stream.Conn().RemoteMultiaddr())
+ if err != nil {
+ logging.Base().Errorf("Error converting multiaddr to netaddr: %v", err)
+ }
+ return netaddr
+}
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 2cce632be..7f8b3046c 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -184,7 +184,7 @@ type WebsocketNetwork struct {
peersLock deadlock.RWMutex
peers []*wsPeer
- peersChangeCounter int32 // peersChangeCounter is an atomic variable that increases on each change to the peers. It helps avoiding taking the peersLock when checking if the peers list was modified.
+ peersChangeCounter atomic.Int32 // peersChangeCounter is an atomic variable that increases on each change to the peers. It helps avoiding taking the peersLock when checking if the peers list was modified.
broadcaster msgBroadcaster
handler msgHandler
@@ -195,7 +195,7 @@ type WebsocketNetwork struct {
NetworkID protocol.NetworkID
RandomID string
- ready int32
+ ready atomic.Int32
readyChan chan struct{}
meshUpdateRequests chan meshRequest
@@ -244,7 +244,7 @@ type WebsocketNetwork struct {
lastNetworkAdvance time.Time
// number of throttled outgoing connections "slots" needed to be populated.
- throttledOutgoingConnections int32
+ throttledOutgoingConnections atomic.Int32
// transport and dialer are customized to limit the number of
// connection in compliance with connectionsRateLimitingCount.
@@ -262,7 +262,7 @@ type WebsocketNetwork struct {
// further changes.
messagesOfInterestEnc []byte
messagesOfInterestEncoded bool
- messagesOfInterestGeneration uint32
+ messagesOfInterestGeneration atomic.Uint32
// messagesOfInterestMu protects messagesOfInterest and ensures
// that messagesOfInterestEnc does not change once it is set during
@@ -279,7 +279,7 @@ type WebsocketNetwork struct {
nodeInfo NodeInfo
// atomic {0:unknown, 1:yes, 2:no}
- wantTXGossip uint32
+ wantTXGossip atomic.Uint32
// supportedProtocolVersions defines versions supported by this network.
// Should be used instead of a global network.SupportedProtocolVersions for network/peers configuration
@@ -591,7 +591,9 @@ func (wn *WebsocketNetwork) setup() {
wn.upgrader.EnableCompression = false
wn.lastPeerConnectionsSent = time.Now()
wn.router = mux.NewRouter()
- wn.router.Handle(GossipNetworkPath, wn)
+ if wn.config.EnableGossipService {
+ wn.router.Handle(GossipNetworkPath, wn)
+ }
wn.requestsTracker = makeRequestsTracker(wn.router, wn.log, wn.config)
if wn.config.EnableRequestLogger {
wn.requestsLogger = makeRequestLogger(wn.requestsTracker, wn.log)
@@ -606,7 +608,7 @@ func (wn *WebsocketNetwork) setup() {
wn.ctx, wn.ctxCancel = context.WithCancel(context.Background())
wn.relayMessages = wn.config.IsGossipServer() || wn.config.ForceRelayMessages
if wn.relayMessages || wn.config.ForceFetchTransactions {
- wn.wantTXGossip = wantTXGossipYes
+ wn.wantTXGossip.Store(wantTXGossipYes)
}
// roughly estimate the number of messages that could be seen at any given moment.
// For the late/redo/down committee, which happen in parallel, we need to allocate
@@ -667,7 +669,7 @@ func (wn *WebsocketNetwork) setup() {
wn.protocolVersion = ProtocolVersion
wn.messagesOfInterestRefresh = make(chan struct{}, 2)
- wn.messagesOfInterestGeneration = 1 // something nonzero so that any new wsPeer needs updating
+ wn.messagesOfInterestGeneration.Store(1) // something nonzero so that any new wsPeer needs updating
if wn.relayMessages {
wn.registerMessageInterest(protocol.StateProofSigTag)
}
@@ -694,13 +696,13 @@ func (wn *WebsocketNetwork) Start() {
// wrap the limited connection listener with a requests tracker listener
wn.listener = wn.requestsTracker.Listener(listener)
wn.log.Debugf("listening on %s", wn.listener.Addr().String())
- wn.throttledOutgoingConnections = int32(wn.config.GossipFanout / 2)
+ wn.throttledOutgoingConnections.Store(int32(wn.config.GossipFanout / 2))
} else {
// on non-relay, all the outgoing connections are throttled.
- wn.throttledOutgoingConnections = int32(wn.config.GossipFanout)
+ wn.throttledOutgoingConnections.Store(int32(wn.config.GossipFanout))
}
if wn.config.DisableOutgoingConnectionThrottling {
- wn.throttledOutgoingConnections = 0
+ wn.throttledOutgoingConnections.Store(0)
}
if wn.config.TLSCertFile != "" && wn.config.TLSKeyFile != "" {
wn.scheme = "https"
@@ -1009,6 +1011,11 @@ func (wn *WebsocketNetwork) GetHTTPRequestConnection(request *http.Request) (con
// ServerHTTP handles the gossip network functions over websockets
func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *http.Request) {
+ if !wn.config.EnableGossipService {
+ response.WriteHeader(http.StatusNotFound)
+ return
+ }
+
trackedRequest := wn.requestsTracker.GetTrackedRequest(request)
if wn.checkIncomingConnectionLimits(response, request, trackedRequest.remoteHost, trackedRequest.otherTelemetryGUID, trackedRequest.otherInstanceName) != http.StatusOK {
@@ -1084,7 +1091,7 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt
version: matchingVersion,
identity: peerID,
identityChallenge: peerIDChallenge,
- identityVerified: 0,
+ identityVerified: atomic.Uint32{},
features: decodePeerFeatures(matchingVersion, request.Header.Get(PeerFeaturesHeader)),
}
peer.TelemetryGUID = trackedRequest.otherTelemetryGUID
@@ -1106,8 +1113,8 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt
}
func (wn *WebsocketNetwork) maybeSendMessagesOfInterest(peer *wsPeer, messagesOfInterestEnc []byte) {
- messagesOfInterestGeneration := atomic.LoadUint32(&wn.messagesOfInterestGeneration)
- peerMessagesOfInterestGeneration := atomic.LoadUint32(&peer.messagesOfInterestGeneration)
+ messagesOfInterestGeneration := wn.messagesOfInterestGeneration.Load()
+ peerMessagesOfInterestGeneration := peer.messagesOfInterestGeneration.Load()
if peerMessagesOfInterestGeneration != messagesOfInterestGeneration {
if messagesOfInterestEnc == nil {
wn.messagesOfInterestMu.Lock()
@@ -1361,7 +1368,7 @@ func (wn *WebsocketNetwork) peerSnapshot(dest []*wsPeer) ([]*wsPeer, int32) {
}
func (wn *WebsocketNetwork) getPeersChangeCounter() int32 {
- return atomic.LoadInt32(&wn.peersChangeCounter)
+ return wn.peersChangeCounter.Load()
}
// preparePeerData prepares batches of data for sending.
@@ -1779,12 +1786,12 @@ func (wn *WebsocketNetwork) getPeerConnectionTelemetryDetails(now time.Time, pee
ConnectionDuration: uint(now.Sub(peer.createTime).Seconds()),
TelemetryGUID: peer.TelemetryGUID,
InstanceName: peer.InstanceName,
- DuplicateFilterCount: atomic.LoadUint64(&peer.duplicateFilterCount),
- TXCount: atomic.LoadUint64(&peer.txMessageCount),
- MICount: atomic.LoadUint64(&peer.miMessageCount),
- AVCount: atomic.LoadUint64(&peer.avMessageCount),
- PPCount: atomic.LoadUint64(&peer.ppMessageCount),
- UNKCount: atomic.LoadUint64(&peer.unkMessageCount),
+ DuplicateFilterCount: peer.duplicateFilterCount.Load(),
+ TXCount: peer.txMessageCount.Load(),
+ MICount: peer.miMessageCount.Load(),
+ AVCount: peer.avMessageCount.Load(),
+ PPCount: peer.ppMessageCount.Load(),
+ UNKCount: peer.unkMessageCount.Load(),
}
if tcpInfo, err := peer.GetUnderlyingConnTCPInfo(); err == nil && tcpInfo != nil {
connDetail.TCP = *tcpInfo
@@ -1822,7 +1829,7 @@ func (wn *WebsocketNetwork) prioWeightRefresh() {
return
}
- if curPeersChangeCounter := atomic.LoadInt32(&wn.peersChangeCounter); curPeersChangeCounter != lastPeersChangeCounter {
+ if curPeersChangeCounter := wn.peersChangeCounter.Load(); curPeersChangeCounter != lastPeersChangeCounter {
peers, lastPeersChangeCounter = wn.peerSnapshot(peers)
}
@@ -2141,10 +2148,10 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
}
throttledConnection := false
- if atomic.AddInt32(&wn.throttledOutgoingConnections, int32(-1)) >= 0 {
+ if wn.throttledOutgoingConnections.Add(int32(-1)) >= 0 {
throttledConnection = true
} else {
- atomic.AddInt32(&wn.throttledOutgoingConnections, int32(1))
+ wn.throttledOutgoingConnections.Add(int32(1))
}
peer := &wsPeer{
@@ -2164,7 +2171,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
// if there is a final verification message to send, it means this peer has a verified identity,
// attempt to set the peer and identityTracker
if len(idVerificationMessage) > 0 {
- atomic.StoreUint32(&peer.identityVerified, uint32(1))
+ peer.identityVerified.Store(uint32(1))
wn.peersLock.Lock()
ok := wn.identityTracker.setIdentity(peer)
wn.peersLock.Unlock()
@@ -2311,10 +2318,10 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) {
telemetryspec.DisconnectPeerEventDetails{
PeerEventDetails: eventDetails,
Reason: string(reason),
- TXCount: atomic.LoadUint64(&peer.txMessageCount),
- MICount: atomic.LoadUint64(&peer.miMessageCount),
- AVCount: atomic.LoadUint64(&peer.avMessageCount),
- PPCount: atomic.LoadUint64(&peer.ppMessageCount),
+ TXCount: peer.txMessageCount.Load(),
+ MICount: peer.miMessageCount.Load(),
+ AVCount: peer.avMessageCount.Load(),
+ PPCount: peer.ppMessageCount.Load(),
})
peers.Set(uint64(wn.NumPeers()))
@@ -2328,9 +2335,9 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) {
wn.prioTracker.removePeer(peer)
wn.identityTracker.removeIdentity(peer)
if peer.throttledOutgoingConnection {
- atomic.AddInt32(&wn.throttledOutgoingConnections, int32(1))
+ wn.throttledOutgoingConnections.Add(int32(1))
}
- atomic.AddInt32(&wn.peersChangeCounter, 1)
+ wn.peersChangeCounter.Add(1)
}
wn.countPeersSetGauges()
}
@@ -2339,7 +2346,7 @@ func (wn *WebsocketNetwork) addPeer(peer *wsPeer) {
wn.peersLock.Lock()
defer wn.peersLock.Unlock()
// guard against peers which are closed or closing
- if atomic.LoadInt32(&peer.didSignalClose) == 1 {
+ if peer.didSignalClose.Load() == 1 {
networkPeerAlreadyClosed.Inc(nil)
wn.log.Debugf("peer closing %s", peer.conn.RemoteAddrString())
return
@@ -2354,15 +2361,15 @@ func (wn *WebsocketNetwork) addPeer(peer *wsPeer) {
}
heap.Push(peersHeap{wn}, peer)
wn.prioTracker.setPriority(peer, peer.prioAddress, peer.prioWeight)
- atomic.AddInt32(&wn.peersChangeCounter, 1)
+ wn.peersChangeCounter.Add(1)
wn.countPeersSetGauges()
if len(wn.peers) >= wn.config.GossipFanout {
// we have a quorum of connected peers, if we weren't ready before, we are now
- if atomic.CompareAndSwapInt32(&wn.ready, 0, 1) {
+ if wn.ready.CompareAndSwap(0, 1) {
wn.log.Debug("ready")
close(wn.readyChan)
}
- } else if atomic.LoadInt32(&wn.ready) == 0 {
+ } else if wn.ready.Load() == 0 {
// but if we're not ready in a minute, call whatever peers we've got as good enough
wn.wg.Add(1)
go wn.eventualReady()
@@ -2375,7 +2382,7 @@ func (wn *WebsocketNetwork) eventualReady() {
select {
case <-wn.ctx.Done():
case <-minute.C:
- if atomic.CompareAndSwapInt32(&wn.ready, 0, 1) {
+ if wn.ready.CompareAndSwap(0, 1) {
wn.log.Debug("ready")
close(wn.readyChan)
}
@@ -2452,7 +2459,7 @@ func (wn *WebsocketNetwork) updateMessagesOfInterestEnc() {
// must run inside wn.messagesOfInterestMu.Lock
wn.messagesOfInterestEnc = MarshallMessageOfInterestMap(wn.messagesOfInterest)
wn.messagesOfInterestEncoded = true
- atomic.AddUint32(&wn.messagesOfInterestGeneration, 1)
+ wn.messagesOfInterestGeneration.Add(1)
var peers []*wsPeer
peers, _ = wn.peerSnapshot(peers)
wn.log.Infof("updateMessagesOfInterestEnc maybe sending messagesOfInterest %v", wn.messagesOfInterest)
@@ -2466,14 +2473,14 @@ func (wn *WebsocketNetwork) postMessagesOfInterestThread() {
<-wn.messagesOfInterestRefresh
// if we're not a relay, and not participating, we don't need txn pool
wantTXGossip := wn.nodeInfo.IsParticipating()
- if wantTXGossip && (wn.wantTXGossip != wantTXGossipYes) {
+ if wantTXGossip && (wn.wantTXGossip.Load() != wantTXGossipYes) {
wn.log.Infof("postMessagesOfInterestThread: enabling TX gossip")
wn.registerMessageInterest(protocol.TxnTag)
- atomic.StoreUint32(&wn.wantTXGossip, wantTXGossipYes)
- } else if !wantTXGossip && (wn.wantTXGossip != wantTXGossipNo) {
+ wn.wantTXGossip.Store(wantTXGossipYes)
+ } else if !wantTXGossip && (wn.wantTXGossip.Load() != wantTXGossipNo) {
wn.log.Infof("postMessagesOfInterestThread: disabling TX gossip")
wn.DeregisterMessageInterest(protocol.TxnTag)
- atomic.StoreUint32(&wn.wantTXGossip, wantTXGossipNo)
+ wn.wantTXGossip.Store(wantTXGossipNo)
}
}
}
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index 099b04e89..05e484843 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -173,7 +173,7 @@ type messageCounterHandler struct {
// For deterministically simulating slow handlers, block until test code says to go.
release sync.Cond
- shouldWait int32
+ shouldWait atomic.Int32
waitcount int
}
@@ -186,7 +186,7 @@ func (mch *messageCounterHandler) Handle(message IncomingMessage) OutgoingMessag
dnanos := now - sent
mch.t.Logf("msg trans time %dns", dnanos)
}
- if atomic.LoadInt32(&mch.shouldWait) > 0 {
+ if mch.shouldWait.Load() > 0 {
mch.waitcount++
mch.release.Wait()
mch.waitcount--
@@ -628,6 +628,10 @@ func TestWebsocketNetworkNoAddress(t *testing.T) {
noAddressConfig := defaultConfig
noAddressConfig.NetAddress = ""
+ // enable services even though NetAddress is not set (to assert they don't override NetAddress)
+ noAddressConfig.EnableGossipService = true
+ noAddressConfig.EnableBlockService = true
+ noAddressConfig.EnableLedgerService = true
netB := makeTestWebsocketNodeWithConfig(t, noAddressConfig)
netB.config.GossipFanout = 1
addrA, postListen := netA.Address()
@@ -636,6 +640,12 @@ func TestWebsocketNetworkNoAddress(t *testing.T) {
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
defer netStop(t, netB, "B")
+
+ // assert addrB is not listening
+ addrB, postListenB := netB.Address()
+ require.False(t, postListenB)
+ require.Empty(t, addrB)
+
counter := newMessageCounter(t, 2)
counterDone := counter.done
netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
@@ -656,6 +666,29 @@ func TestWebsocketNetworkNoAddress(t *testing.T) {
}
}
+func TestWebsocketNetworkNoGossipService(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ config := defaultConfig
+ config.EnableGossipService = false
+ netA := makeTestWebsocketNodeWithConfig(t, config)
+ netA.Start()
+ defer netStop(t, netA, "A")
+
+ // assert that the network was started and is listening
+ addrA, postListen := netA.Address()
+ require.True(t, postListen)
+
+ // make HTTP request to gossip service and assert 404
+ var resp *http.Response
+ require.Eventually(t, func() bool {
+ var err error
+ resp, err = http.Get(fmt.Sprintf("%s/v1/%s/gossip", addrA, genesisID))
+ return err == nil
+ }, 2*time.Second, 100*time.Millisecond)
+ require.Equal(t, http.StatusNotFound, resp.StatusCode)
+}
+
func lineNetwork(t *testing.T, numNodes int) (nodes []*WebsocketNetwork, counters []messageCounterHandler) {
nodes = make([]*WebsocketNetwork, numNodes)
counters = make([]messageCounterHandler, numNodes)
@@ -779,7 +812,8 @@ func TestSlowHandlers(t *testing.T) {
slowTag := protocol.Tag("sl")
fastTag := protocol.Tag("fa")
- slowCounter := messageCounterHandler{shouldWait: 1}
+ slowCounter := messageCounterHandler{}
+ slowCounter.shouldWait.Store(1)
slowCounter.release.L = &slowCounter.lock
fastCounter := messageCounterHandler{target: incomingThreads}
fastCounter.done = make(chan struct{})
@@ -856,7 +890,8 @@ func TestFloodingPeer(t *testing.T) {
t.Skip("flaky test")
slowTag := protocol.Tag("sl")
fastTag := protocol.Tag("fa")
- slowCounter := messageCounterHandler{shouldWait: 1}
+ slowCounter := messageCounterHandler{}
+ slowCounter.shouldWait.Store(1)
slowCounter.release.L = &slowCounter.lock
fastCounter := messageCounterHandler{}
slowHandler := TaggedMessageHandler{Tag: slowTag, MessageHandler: &slowCounter}
@@ -903,7 +938,7 @@ func TestFloodingPeer(t *testing.T) {
defer cancel()
defer func() {
t.Log("release slow handlers")
- atomic.StoreInt32(&slowCounter.shouldWait, 0)
+ slowCounter.shouldWait.Store(0)
slowCounter.Broadcast()
}()
@@ -929,7 +964,7 @@ func TestFloodingPeer(t *testing.T) {
}
func peerIsClosed(peer *wsPeer) bool {
- return atomic.LoadInt32(&peer.didInnerClose) != 0
+ return peer.didInnerClose.Load() != 0
}
func avgSendBufferHighPrioLength(wn *WebsocketNetwork) float64 {
@@ -2560,7 +2595,7 @@ func TestSlowPeerDisconnection(t *testing.T) {
}
// modify the peer on netA and
beforeLoopTime := time.Now()
- atomic.StoreInt64(&peer.intermittentOutgoingMessageEnqueueTime, beforeLoopTime.Add(-maxMessageQueueDuration).Add(time.Second).UnixNano())
+ peer.intermittentOutgoingMessageEnqueueTime.Store(beforeLoopTime.Add(-maxMessageQueueDuration).Add(time.Second).UnixNano())
// wait up to 10 seconds for the monitor to figure out it needs to disconnect.
expire = beforeLoopTime.Add(2 * slowWritingPeerMonitorInterval)
for {
@@ -2875,7 +2910,7 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
msgCounters := make(map[protocol.Tag]int)
expectedCounts := make(map[protocol.Tag]int)
expectedCounts[ft2] = 5
- var failed uint32
+ var failed atomic.Uint32
messageArriveWg := sync.WaitGroup{}
msgHandler := func(msg IncomingMessage) (out OutgoingMessage) {
t.Logf("A->B %s", msg.Tag)
@@ -2883,7 +2918,7 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
defer incomingMsgSync.Unlock()
expected := expectedCounts[msg.Tag]
if expected < 1 {
- atomic.StoreUint32(&failed, 1)
+ failed.Store(1)
t.Logf("UNEXPECTED A->B %s", msg.Tag)
return
}
@@ -2931,7 +2966,7 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
messageArriveWg.Add(5) // we're expecting exactly 5 messages.
// send 5 messages of few types.
for i := 0; i < 5; i++ {
- if atomic.LoadUint32(&failed) != 0 {
+ if failed.Load() != 0 {
t.Errorf("failed")
break
}
@@ -2940,7 +2975,7 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
netA.Broadcast(context.Background(), ft2, []byte{0, 1, 2, 3, 4}, true, nil)
netA.Broadcast(context.Background(), ft4, []byte{0, 1, 2, 3, 4}, true, nil) // NOT in MOI
}
- if atomic.LoadUint32(&failed) != 0 {
+ if failed.Load() != 0 {
t.Errorf("failed")
}
// wait until all the expected messages arrive.
@@ -2949,7 +2984,7 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
defer incomingMsgSync.Unlock()
require.Equal(t, 1, len(msgCounters))
for tag, count := range msgCounters {
- if atomic.LoadUint32(&failed) != 0 {
+ if failed.Load() != 0 {
t.Errorf("failed")
break
}
@@ -3155,7 +3190,7 @@ func TestWebsocketNetworkTXMessageOfInterestNPN(t *testing.T) {
netB.Start()
defer netStop(t, netB, "B")
require.False(t, netB.relayMessages)
- require.Equal(t, uint32(wantTXGossipUnk), atomic.LoadUint32(&netB.wantTXGossip))
+ require.Equal(t, uint32(wantTXGossipUnk), netB.wantTXGossip.Load())
incomingMsgSync := deadlock.Mutex{}
msgCounters := make(map[protocol.Tag]int)
@@ -3197,12 +3232,12 @@ func TestWebsocketNetworkTXMessageOfInterestNPN(t *testing.T) {
netB.OnNetworkAdvance()
waitForMOIRefreshQuiet(netB)
for i := 0; i < 100; i++ {
- if atomic.LoadUint32(&netB.wantTXGossip) == uint32(wantTXGossipNo) {
+ if netB.wantTXGossip.Load() == uint32(wantTXGossipNo) {
break
}
time.Sleep(10 * time.Millisecond)
}
- require.Equal(t, uint32(wantTXGossipNo), atomic.LoadUint32(&netB.wantTXGossip))
+ require.Equal(t, uint32(wantTXGossipNo), netB.wantTXGossip.Load())
// send another message which we can track, so that we'll know that the first message was delivered.
netB.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte{0, 1, 2, 3, 4}, true, nil)
messageFilterArriveWg.Wait()
@@ -3260,7 +3295,7 @@ func TestWebsocketNetworkTXMessageOfInterestPN(t *testing.T) {
netB.Start()
defer netStop(t, netB, "B")
require.False(t, netB.relayMessages)
- require.Equal(t, uint32(wantTXGossipUnk), atomic.LoadUint32(&netB.wantTXGossip))
+ require.Equal(t, uint32(wantTXGossipUnk), netB.wantTXGossip.Load())
incomingMsgSync := deadlock.Mutex{}
msgCounters := make(map[protocol.Tag]int)
@@ -3302,12 +3337,12 @@ func TestWebsocketNetworkTXMessageOfInterestPN(t *testing.T) {
netB.OnNetworkAdvance()
waitForMOIRefreshQuiet(netB)
for i := 0; i < 100; i++ {
- if atomic.LoadUint32(&netB.wantTXGossip) == uint32(wantTXGossipYes) {
+ if netB.wantTXGossip.Load() == uint32(wantTXGossipYes) {
break
}
time.Sleep(10 * time.Millisecond)
}
- require.Equal(t, uint32(wantTXGossipYes), atomic.LoadUint32(&netB.wantTXGossip))
+ require.Equal(t, uint32(wantTXGossipYes), netB.wantTXGossip.Load())
// send another message which we can track, so that we'll know that the first message was delivered.
netB.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte{0, 1, 2, 3, 4}, true, nil)
messageFilterArriveWg.Wait()
@@ -3390,9 +3425,9 @@ func testWebsocketDisconnection(t *testing.T, disconnectFunc func(wn *WebsocketN
return
}
- var msgCounterNetB uint32
+ var msgCounterNetB atomic.Uint32
msgHandlerB := func(msg IncomingMessage) (out OutgoingMessage) {
- if atomic.AddUint32(&msgCounterNetB, 1) == 5 {
+ if msgCounterNetB.Add(1) == 5 {
// disconnect
disconnectFunc(netB, &out)
} else {
@@ -3925,7 +3960,7 @@ func TestTryConnectEarlyWrite(t *testing.T) {
p := netA.peers[0]
var messageCount uint64
for x := 0; x < 1000; x++ {
- messageCount = atomic.LoadUint64(&p.miMessageCount)
+ messageCount = p.miMessageCount.Load()
if messageCount == 1 {
break
}
@@ -3934,8 +3969,8 @@ func TestTryConnectEarlyWrite(t *testing.T) {
// Confirm that we successfuly received a message of interest
assert.Len(t, netA.peers, 1)
- fmt.Printf("MI Message Count: %v\n", netA.peers[0].miMessageCount)
- assert.Equal(t, uint64(1), netA.peers[0].miMessageCount)
+ fmt.Printf("MI Message Count: %v\n", netA.peers[0].miMessageCount.Load())
+ assert.Equal(t, uint64(1), netA.peers[0].miMessageCount.Load())
}
// Test functionality that allows a node to discard a block response that it did not request or that arrived too late.
@@ -4030,7 +4065,7 @@ func TestDiscardUnrequestedBlockResponse(t *testing.T) {
500*time.Millisecond,
20*time.Millisecond,
)
- require.Equal(t, atomic.LoadInt64(&netC.peers[0].outstandingTopicRequests), int64(1))
+ require.Equal(t, netC.peers[0].outstandingTopicRequests.Load(), int64(1))
// Create a buffer to monitor log output from netC
logBuffer := bytes.NewBuffer(nil)
@@ -4040,7 +4075,7 @@ func TestDiscardUnrequestedBlockResponse(t *testing.T) {
netA.peers[0].sendBufferBulk <- sendMessages{msgs: msg}
require.Eventually(
t,
- func() bool { return atomic.LoadInt64(&netC.peers[0].outstandingTopicRequests) == int64(0) },
+ func() bool { return netC.peers[0].outstandingTopicRequests.Load() == int64(0) },
500*time.Millisecond,
20*time.Millisecond,
)
@@ -4422,7 +4457,7 @@ func TestSendMessageCallbacks(t *testing.T) {
netA, netB, _, closeFunc := setupWebsocketNetworkAB(t, 2)
defer closeFunc()
- var counter uint64
+ var counter atomic.Uint64
require.NotZero(t, netA.NumPeers())
// peerB is netA's representation of netB and vice versa
@@ -4436,10 +4471,10 @@ func TestSendMessageCallbacks(t *testing.T) {
// and goes through the actual response code path to generate and send TS responses to netB
for i := 0; i < 100; i++ {
randInt := crypto.RandUint64()%(128) + 1
- atomic.AddUint64(&counter, randInt)
+ counter.Add(randInt)
topic := MakeTopic("val", []byte("blah"))
callback := func() {
- atomic.AddUint64(&counter, ^uint64(randInt-1))
+ counter.Add(^uint64(randInt - 1))
}
msg := IncomingMessage{Sender: peerB, Tag: protocol.UniEnsBlockReqTag}
peerB.Respond(context.Background(), msg, OutgoingMessage{OnRelease: callback, Topics: Topics{topic}})
@@ -4448,14 +4483,14 @@ func TestSendMessageCallbacks(t *testing.T) {
// of outstanding TS requests below 0. This will be true because we never made any UE block requests, we only
// simulated them by manually creating a IncomingMessage with the UE tag in the loop above
require.Eventually(t,
- func() bool { return atomic.LoadInt64(&peerA.outstandingTopicRequests) < 0 },
+ func() bool { return peerA.outstandingTopicRequests.Load() < 0 },
500*time.Millisecond,
25*time.Millisecond,
)
// confirm that the test counter decrements down to zero correctly through callbacks
require.Eventually(t,
- func() bool { return atomic.LoadUint64(&counter) == uint64(0) },
+ func() bool { return counter.Load() == uint64(0) },
500*time.Millisecond,
25*time.Millisecond,
)
diff --git a/network/wsPeer.go b/network/wsPeer.go
index 56f2b6a4f..55dba8e56 100644
--- a/network/wsPeer.go
+++ b/network/wsPeer.go
@@ -120,6 +120,7 @@ var defaultSendMessageTags = map[protocol.Tag]bool{
// interface allows substituting debug implementation for *websocket.Conn
type wsPeerWebsocketConn interface {
+ RemoteAddr() net.Addr
RemoteAddrString() string
NextReader() (int, io.Reader, error)
WriteMessage(int, []byte) error
@@ -202,27 +203,24 @@ type wsPeer struct {
// lastPacketTime contains the UnixNano at the last time a successful communication was made with the peer.
// "successful communication" above refers to either reading from or writing to a connection without receiving any
// error.
- // we want this to be a 64-bit aligned for atomics support on 32bit platforms.
- lastPacketTime int64
+ lastPacketTime atomic.Int64
// outstandingTopicRequests is an atomic counter for the number of outstanding block requests we've made out to this peer
// if a peer sends more blocks than we've requested, we'll disconnect from it.
- outstandingTopicRequests int64
+ outstandingTopicRequests atomic.Int64
// intermittentOutgoingMessageEnqueueTime contains the UnixNano of the message's enqueue time that is currently being written to the
// peer, or zero if no message is being written.
- intermittentOutgoingMessageEnqueueTime int64
+ intermittentOutgoingMessageEnqueueTime atomic.Int64
// Nonce used to uniquely identify requests
- requestNonce uint64
+ requestNonce atomic.Uint64
// duplicateFilterCount counts how many times the remote peer has sent us a message hash
// to filter that it had already sent before.
- // this needs to be 64-bit aligned for use with atomic.AddUint64 on 32-bit platforms.
- duplicateFilterCount uint64
+ duplicateFilterCount atomic.Uint64
- // These message counters need to be 64-bit aligned as well.
- txMessageCount, miMessageCount, ppMessageCount, avMessageCount, unkMessageCount uint64
+ txMessageCount, miMessageCount, ppMessageCount, avMessageCount, unkMessageCount atomic.Uint64
wsPeerCore
@@ -239,8 +237,8 @@ type wsPeer struct {
wg sync.WaitGroup
- didSignalClose int32
- didInnerClose int32
+ didSignalClose atomic.Int32
+ didInnerClose atomic.Int32
TelemetryGUID string
InstanceName string
@@ -262,7 +260,7 @@ type wsPeer struct {
// the peer's identity key which it uses for identityChallenge exchanges
identity crypto.PublicKey
- identityVerified uint32
+ identityVerified atomic.Uint32
// the identityChallenge is recorded to the peer so it may verify its identity at a later time
identityChallenge identityChallengeValue
@@ -292,7 +290,7 @@ type wsPeer struct {
sendMessageTag map[protocol.Tag]bool
// messagesOfInterestGeneration is this node's messagesOfInterest version that we have seen to this peer.
- messagesOfInterestGeneration uint32
+ messagesOfInterestGeneration atomic.Uint32
// connMonitor used to measure the relative performance of the connection
// compared to the other outgoing connections. Incoming connections would have this
@@ -324,6 +322,12 @@ type HTTPPeer interface {
GetHTTPClient() *http.Client
}
+// IPAddressable is addressable with either IPv4 or IPv6 address
+type IPAddressable interface {
+ IPAddr() []byte
+ RoutingAddr() []byte
+}
+
// UnicastPeer is another possible interface for the opaque Peer.
// It is possible that we can only initiate a connection to a peer over websockets.
type UnicastPeer interface {
@@ -372,6 +376,45 @@ func (wp *wsPeer) Version() string {
return wp.version
}
+func (wp *wsPeer) IPAddr() []byte {
+ remote := wp.conn.RemoteAddr()
+ if remote == nil {
+ return nil
+ }
+ ip := remote.(*net.TCPAddr).IP
+ result := ip.To4()
+ if result == nil {
+ result = ip.To16()
+ }
+ return result
+}
+
+// RoutingAddr returns meaningful routing part of the address:
+// ipv4 for ipv4 addresses
+// top 8 bytes of ipv6 for ipv6 addresses
+// low 4 bytes for ipv4 embedded into ipv6
+// see http://www.tcpipguide.com/free/t_IPv6IPv4AddressEmbedding.htm for details.
+func (wp *wsPeer) RoutingAddr() []byte {
+ isZeros := func(ip []byte) bool {
+ for i := 0; i < len(ip); i++ {
+ if ip[i] != 0 {
+ return false
+ }
+ }
+ return true
+ }
+
+ ip := wp.IPAddr()
+ if len(ip) != net.IPv6len {
+ return ip
+ }
+ // ipv6, check if it's ipv4 embedded
+ if isZeros(ip[0:10]) {
+ return ip[12:16]
+ }
+ return ip[0:8]
+}
+
// Unicast sends the given bytes to this specific peer. Does not wait for message to be sent.
// (Implements UnicastPeer)
func (wp *wsPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag) error {
@@ -457,7 +500,7 @@ func (wp *wsPeer) init(config config.Local, sendBufferLength int) {
wp.closing = make(chan struct{})
wp.sendBufferHighPrio = make(chan sendMessages, sendBufferLength)
wp.sendBufferBulk = make(chan sendMessages, sendBufferLength)
- atomic.StoreInt64(&wp.lastPacketTime, time.Now().UnixNano())
+ wp.lastPacketTime.Store(time.Now().UnixNano())
wp.responseChannels = make(map[uint64]chan *Response)
wp.sendMessageTag = defaultSendMessageTags
wp.clientDataStore = make(map[string]interface{})
@@ -487,7 +530,7 @@ func (wp *wsPeer) OriginAddress() string {
func (wp *wsPeer) reportReadErr(err error) {
// only report error if we haven't already closed the peer
- if atomic.LoadInt32(&wp.didInnerClose) == 0 {
+ if wp.didInnerClose.Load() == 0 {
_, _, line, _ := runtime.Caller(1)
wp.log.Warnf("peer[%s] line=%d read err: %s", wp.conn.RemoteAddrString(), line, err)
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "reader err"})
@@ -541,10 +584,10 @@ func (wp *wsPeer) readLoop() {
// Skip the message if it's a response to a request we didn't make or has timed out
if msg.Tag == protocol.TopicMsgRespTag && wp.lenResponseChannels() == 0 {
- atomic.AddInt64(&wp.outstandingTopicRequests, -1)
+ wp.outstandingTopicRequests.Add(-1)
// This peers has sent us more responses than we have requested. This is a protocol violation and we should disconnect.
- if atomic.LoadInt64(&wp.outstandingTopicRequests) < 0 {
+ if wp.outstandingTopicRequests.Load() < 0 {
wp.log.Errorf("wsPeer readloop: peer %s sent TS response without a request", wp.conn.RemoteAddrString())
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "unrequestedTS"})
cleanupCloseError = disconnectUnexpectedTopicResp
@@ -578,7 +621,7 @@ func (wp *wsPeer) readLoop() {
return
}
msg.Net = wp.net
- atomic.StoreInt64(&wp.lastPacketTime, msg.Received)
+ wp.lastPacketTime.Store(msg.Received)
networkReceivedBytesTotal.AddUint64(uint64(len(msg.Data)+2), nil)
networkMessageReceivedTotal.AddUint64(1, nil)
networkReceivedBytesByTag.Add(string(tag[:]), uint64(len(msg.Data)+2))
@@ -594,7 +637,7 @@ func (wp *wsPeer) readLoop() {
switch msg.Tag {
case protocol.MsgOfInterestTag:
// try to decode the message-of-interest
- atomic.AddUint64(&wp.miMessageCount, 1)
+ wp.miMessageCount.Add(1)
if close, reason := wp.handleMessageOfInterest(msg); close {
cleanupCloseError = reason
if reason == disconnectBadData {
@@ -604,7 +647,7 @@ func (wp *wsPeer) readLoop() {
}
continue
case protocol.TopicMsgRespTag: // Handle Topic message
- atomic.AddInt64(&wp.outstandingTopicRequests, -1)
+ wp.outstandingTopicRequests.Add(-1)
topics, err := UnmarshallTopics(msg.Data)
if err != nil {
wp.log.Warnf("wsPeer readLoop: could not read the message from: %s %s", wp.conn.RemoteAddrString(), err)
@@ -634,17 +677,17 @@ func (wp *wsPeer) readLoop() {
wp.handleFilterMessage(msg)
continue
case protocol.TxnTag:
- atomic.AddUint64(&wp.txMessageCount, 1)
+ wp.txMessageCount.Add(1)
case protocol.AgreementVoteTag:
- atomic.AddUint64(&wp.avMessageCount, 1)
+ wp.avMessageCount.Add(1)
case protocol.ProposalPayloadTag:
- atomic.AddUint64(&wp.ppMessageCount, 1)
+ wp.ppMessageCount.Add(1)
// the remaining valid tags: no special handling here
case protocol.NetPrioResponseTag, protocol.PingTag, protocol.PingReplyTag,
protocol.StateProofSigTag, protocol.UniEnsBlockReqTag, protocol.VoteBundleTag, protocol.NetIDVerificationTag:
default: // unrecognized tag
unknownProtocolTagMessagesTotal.Inc(nil)
- atomic.AddUint64(&wp.unkMessageCount, 1)
+ wp.unkMessageCount.Add(1)
continue // drop message, skip adding it to queue
// TODO: should disconnect here?
}
@@ -740,7 +783,7 @@ func (wp *wsPeer) handleFilterMessage(msg IncomingMessage) {
// large message concurrently from several peers, and then sent the filter message to us after
// each large message finished transferring.
duplicateNetworkFilterReceivedTotal.Inc(nil)
- atomic.AddUint64(&wp.duplicateFilterCount, 1)
+ wp.duplicateFilterCount.Add(1)
}
}
@@ -792,17 +835,17 @@ func (wp *wsPeer) writeLoopSendMsg(msg sendMessage) disconnectReason {
return disconnectStaleWrite
}
- atomic.StoreInt64(&wp.intermittentOutgoingMessageEnqueueTime, msg.enqueued.UnixNano())
- defer atomic.StoreInt64(&wp.intermittentOutgoingMessageEnqueueTime, 0)
+ wp.intermittentOutgoingMessageEnqueueTime.Store(msg.enqueued.UnixNano())
+ defer wp.intermittentOutgoingMessageEnqueueTime.Store(0)
err := wp.conn.WriteMessage(websocket.BinaryMessage, msg.data)
if err != nil {
- if atomic.LoadInt32(&wp.didInnerClose) == 0 {
+ if wp.didInnerClose.Load() == 0 {
wp.log.Warn("peer write error ", err)
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "write err"})
}
return disconnectWriteError
}
- atomic.StoreInt64(&wp.lastPacketTime, time.Now().UnixNano())
+ wp.lastPacketTime.Store(time.Now().UnixNano())
networkSentBytesTotal.AddUint64(uint64(len(msg.data)), nil)
networkSentBytesByTag.Add(string(tag), uint64(len(msg.data)))
networkMessageSentTotal.AddUint64(1, nil)
@@ -936,7 +979,7 @@ func (wp *wsPeer) pingTimes() (lastPingSent time.Time, lastPingRoundTripTime tim
// called when the connection had an error or closed remotely
func (wp *wsPeer) internalClose(reason disconnectReason) {
- if atomic.CompareAndSwapInt32(&wp.didSignalClose, 0, 1) {
+ if wp.didSignalClose.CompareAndSwap(0, 1) {
wp.net.peerRemoteClose(wp, reason)
}
wp.Close(time.Now().Add(peerDisconnectionAckDuration))
@@ -944,8 +987,8 @@ func (wp *wsPeer) internalClose(reason disconnectReason) {
// called either here or from above enclosing node logic
func (wp *wsPeer) Close(deadline time.Time) {
- atomic.StoreInt32(&wp.didSignalClose, 1)
- if atomic.CompareAndSwapInt32(&wp.didInnerClose, 0, 1) {
+ wp.didSignalClose.Store(1)
+ if wp.didInnerClose.CompareAndSwap(0, 1) {
close(wp.closing)
err := wp.conn.CloseWithMessage(websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), deadline)
if err != nil {
@@ -984,11 +1027,11 @@ func (wp *wsPeer) CloseAndWait(deadline time.Time) {
}
func (wp *wsPeer) GetLastPacketTime() int64 {
- return atomic.LoadInt64(&wp.lastPacketTime)
+ return wp.lastPacketTime.Load()
}
func (wp *wsPeer) CheckSlowWritingPeer(now time.Time) bool {
- ongoingMessageTime := atomic.LoadInt64(&wp.intermittentOutgoingMessageEnqueueTime)
+ ongoingMessageTime := wp.intermittentOutgoingMessageEnqueueTime.Load()
if ongoingMessageTime == 0 {
return false
}
@@ -1000,7 +1043,7 @@ func (wp *wsPeer) CheckSlowWritingPeer(now time.Time) bool {
// The value is stored on wsPeer
func (wp *wsPeer) getRequestNonce() []byte {
buf := make([]byte, binary.MaxVarintLen64)
- binary.PutUvarint(buf, atomic.AddUint64(&wp.requestNonce, 1))
+ binary.PutUvarint(buf, wp.requestNonce.Add(1))
return buf
}
@@ -1016,7 +1059,7 @@ func MakeNonceTopic(nonce uint64) Topic {
func (wp *wsPeer) Request(ctx context.Context, tag Tag, topics Topics) (resp *Response, e error) {
// Add nonce, stored on the wsPeer as the topic
- nonceTopic := MakeNonceTopic(atomic.AddUint64(&wp.requestNonce, 1))
+ nonceTopic := MakeNonceTopic(wp.requestNonce.Add(1))
topics = append(topics, nonceTopic)
// serialize the topics
@@ -1038,7 +1081,7 @@ func (wp *wsPeer) Request(ctx context.Context, tag Tag, topics Topics) (resp *Re
ctx: context.Background()}
select {
case wp.sendBufferBulk <- sendMessages{msgs: msg}:
- atomic.AddInt64(&wp.outstandingTopicRequests, 1)
+ wp.outstandingTopicRequests.Add(1)
case <-wp.closing:
e = fmt.Errorf("peer closing %s", wp.conn.RemoteAddrString())
return
@@ -1102,7 +1145,7 @@ func (wp *wsPeer) sendMessagesOfInterest(messagesOfInterestGeneration uint32, me
if err != nil {
wp.log.Errorf("ws send msgOfInterest: %v", err)
} else {
- atomic.StoreUint32(&wp.messagesOfInterestGeneration, messagesOfInterestGeneration)
+ wp.messagesOfInterestGeneration.Store(messagesOfInterestGeneration)
}
}
diff --git a/network/wsPeer_test.go b/network/wsPeer_test.go
index 4853b95e3..59217047c 100644
--- a/network/wsPeer_test.go
+++ b/network/wsPeer_test.go
@@ -22,12 +22,14 @@ import (
"go/ast"
"go/parser"
"go/token"
+ "io"
+ "net"
"path/filepath"
"sort"
"strings"
+ "sync/atomic"
"testing"
"time"
- "unsafe"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
@@ -41,17 +43,17 @@ func TestCheckSlowWritingPeer(t *testing.T) {
now := time.Now()
peer := wsPeer{
- intermittentOutgoingMessageEnqueueTime: 0,
+ intermittentOutgoingMessageEnqueueTime: atomic.Int64{},
wsPeerCore: wsPeerCore{net: &WebsocketNetwork{
log: logging.TestingLog(t),
}},
}
require.Equal(t, peer.CheckSlowWritingPeer(now), false)
- peer.intermittentOutgoingMessageEnqueueTime = now.UnixNano()
+ peer.intermittentOutgoingMessageEnqueueTime.Store(now.UnixNano())
require.Equal(t, peer.CheckSlowWritingPeer(now), false)
- peer.intermittentOutgoingMessageEnqueueTime = now.Add(-maxMessageQueueDuration * 2).UnixNano()
+ peer.intermittentOutgoingMessageEnqueueTime.Store(now.Add(-maxMessageQueueDuration * 2).UnixNano())
require.Equal(t, peer.CheckSlowWritingPeer(now), true)
}
@@ -99,24 +101,6 @@ func TestDefaultMessageTagsLength(t *testing.T) {
}
}
-// TestAtomicVariablesAlignment ensures that the 64-bit atomic variables
-// offsets are 64-bit aligned. This is required due to go atomic library
-// limitation.
-func TestAtomicVariablesAlignment(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- p := wsPeer{}
- require.True(t, (unsafe.Offsetof(p.requestNonce)%8) == 0)
- require.True(t, (unsafe.Offsetof(p.lastPacketTime)%8) == 0)
- require.True(t, (unsafe.Offsetof(p.intermittentOutgoingMessageEnqueueTime)%8) == 0)
- require.True(t, (unsafe.Offsetof(p.duplicateFilterCount)%8) == 0)
- require.True(t, (unsafe.Offsetof(p.txMessageCount)%8) == 0)
- require.True(t, (unsafe.Offsetof(p.miMessageCount)%8) == 0)
- require.True(t, (unsafe.Offsetof(p.ppMessageCount)%8) == 0)
- require.True(t, (unsafe.Offsetof(p.avMessageCount)%8) == 0)
- require.True(t, (unsafe.Offsetof(p.unkMessageCount)%8) == 0)
-}
-
func TestTagCounterFiltering(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -282,3 +266,48 @@ func getProtocolTags(t *testing.T) []string {
require.Len(t, declaredTags, len(protocol.TagList))
return declaredTags
}
+
+type tcpipMockConn struct{ addr net.TCPAddr }
+
+func (m *tcpipMockConn) RemoteAddr() net.Addr { return &m.addr }
+func (m *tcpipMockConn) RemoteAddrString() string { return "" }
+func (m *tcpipMockConn) NextReader() (int, io.Reader, error) { return 0, nil, nil }
+func (m *tcpipMockConn) WriteMessage(int, []byte) error { return nil }
+func (m *tcpipMockConn) CloseWithMessage([]byte, time.Time) error { return nil }
+func (m *tcpipMockConn) SetReadLimit(int64) {}
+func (m *tcpipMockConn) CloseWithoutFlush() error { return nil }
+func (m *tcpipMockConn) UnderlyingConn() net.Conn { return nil }
+
+func TestWsPeerIPAddr(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ conn := &tcpipMockConn{}
+ peer := wsPeer{
+ conn: conn,
+ }
+ // some raw IPv4 address
+ conn.addr.IP = []byte{127, 0, 0, 1}
+ require.Equal(t, []byte{127, 0, 0, 1}, peer.IPAddr())
+ require.Equal(t, []byte{127, 0, 0, 1}, peer.RoutingAddr())
+
+ // IPv4 constructed from net.IPv4
+ conn.addr.IP = net.IPv4(127, 0, 0, 2)
+ require.Equal(t, []byte{127, 0, 0, 2}, peer.IPAddr())
+ require.Equal(t, []byte{127, 0, 0, 2}, peer.RoutingAddr())
+
+ // some IPv6 address
+ conn.addr.IP = net.IPv6linklocalallrouters
+ require.Equal(t, []byte(net.IPv6linklocalallrouters), peer.IPAddr())
+ require.Equal(t, []byte(net.IPv6linklocalallrouters[0:8]), peer.RoutingAddr())
+
+ // embedded IPv4 into IPv6
+ conn.addr.IP = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 127, 0, 0, 3}
+ require.Equal(t, 16, len(conn.addr.IP))
+ require.Equal(t, []byte{127, 0, 0, 3}, peer.IPAddr())
+ require.Equal(t, []byte{127, 0, 0, 3}, peer.RoutingAddr())
+ conn.addr.IP = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 0, 0, 4}
+ require.Equal(t, 16, len(conn.addr.IP))
+ require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 0, 0, 4}, peer.IPAddr())
+ require.Equal(t, []byte{127, 0, 0, 4}, peer.RoutingAddr())
+}
diff --git a/node/follower_node.go b/node/follower_node.go
index b7fb81065..c61c37957 100644
--- a/node/follower_node.go
+++ b/node/follower_node.go
@@ -83,7 +83,7 @@ func MakeFollower(log logging.Logger, rootDir string, cfg config.Local, phoneboo
node.genesisHash = genesis.Hash()
node.devMode = genesis.DevMode
var err error
- node.genesisDirs, err = cfg.EnsureAndResolveGenesisDirs(rootDir, genesis.ID())
+ node.genesisDirs, err = cfg.EnsureAndResolveGenesisDirs(rootDir, genesis.ID(), log)
if err != nil {
return nil, err
}
@@ -226,17 +226,17 @@ func (node *AlgorandFollowerNode) Ledger() *data.Ledger {
// BroadcastSignedTxGroup errors in follower mode
func (node *AlgorandFollowerNode) BroadcastSignedTxGroup(_ []transactions.SignedTxn) (err error) {
- return fmt.Errorf("cannot broadcast txns in sync mode")
+ return fmt.Errorf("cannot broadcast txns in follower mode")
}
// AsyncBroadcastSignedTxGroup errors in follower mode
func (node *AlgorandFollowerNode) AsyncBroadcastSignedTxGroup(_ []transactions.SignedTxn) (err error) {
- return fmt.Errorf("cannot broadcast txns in sync mode")
+ return fmt.Errorf("cannot broadcast txns in follower mode")
}
// BroadcastInternalSignedTxGroup errors in follower mode
func (node *AlgorandFollowerNode) BroadcastInternalSignedTxGroup(_ []transactions.SignedTxn) (err error) {
- return fmt.Errorf("cannot broadcast internal signed txn group in sync mode")
+ return fmt.Errorf("cannot broadcast internal signed txn group in follower mode")
}
// Simulate speculatively runs a transaction group against the current
diff --git a/node/node.go b/node/node.go
index 4c18ad1d5..e1f79907c 100644
--- a/node/node.go
+++ b/node/node.go
@@ -183,7 +183,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
node.devMode = genesis.DevMode
node.config = cfg
var err error
- node.genesisDirs, err = cfg.EnsureAndResolveGenesisDirs(rootDir, genesis.ID())
+ node.genesisDirs, err = cfg.EnsureAndResolveGenesisDirs(rootDir, genesis.ID(), log)
if err != nil {
return nil, err
}
diff --git a/node/node_test.go b/node/node_test.go
index c905fa78d..55cbae936 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -598,7 +598,7 @@ func TestConfiguredDataDirs(t *testing.T) {
require.FileExists(t, filepath.Join(testDirHot, genesis.ID(), "ledger.tracker.sqlite"))
// confirm the stateproof db in the genesis dir of hot data dir
- require.FileExists(t, filepath.Join(testDirCold, genesis.ID(), "stateproof.sqlite"))
+ require.FileExists(t, filepath.Join(testDirHot, genesis.ID(), "stateproof.sqlite"))
// confirm cold data dir exists and contains a genesis dir
require.DirExists(t, filepath.Join(testDirCold, genesis.ID()))
@@ -609,8 +609,8 @@ func TestConfiguredDataDirs(t *testing.T) {
// confirm the partregistry is in the genesis dir of cold data dir
require.FileExists(t, filepath.Join(testDirCold, genesis.ID(), "partregistry.sqlite"))
- // confirm the partregistry is in the genesis dir of cold data dir
- require.FileExists(t, filepath.Join(testDirCold, genesis.ID(), "crash.sqlite"))
+ // confirm the agreement crash DB is in the genesis dir of hot data dir
+ require.FileExists(t, filepath.Join(testDirHot, genesis.ID(), "crash.sqlite"))
}
// TestConfiguredResourcePaths tests to see that when TrackerDbFilePath, BlockDbFilePath, StateproofDir, and CrashFilePath are set, underlying resources are created in the correct locations
diff --git a/rpcs/blockService.go b/rpcs/blockService.go
index 0f48f873c..2d4a4b822 100644
--- a/rpcs/blockService.go
+++ b/rpcs/blockService.go
@@ -54,6 +54,9 @@ const blockResponseRetryAfter = "3"
const blockServerMaxBodyLength = 512 // we don't really pass meaningful content here, so 512 bytes should be a safe limit
const blockServerCatchupRequestBufferSize = 10
+// BlockResponseLatestRoundHeader is returned in the response header when the requested block is not available
+const BlockResponseLatestRoundHeader = "X-Latest-Round"
+
// BlockServiceBlockPath is the path to register BlockService as a handler for when using gorilla/mux
// e.g. .Handle(BlockServiceBlockPath, &ls)
const BlockServiceBlockPath = "/v{version:[0-9.]+}/{genesisID}/block/{round:[0-9a-z]+}"
@@ -65,6 +68,7 @@ const (
BlockDataKey = "blockData" // Block-data topic-key in the response
CertDataKey = "certData" // Cert-data topic-key in the response
BlockAndCertValue = "blockAndCert" // block+cert request data (as the value of requestDataTypeKey)
+ LatestRoundKey = "latest"
)
var errBlockServiceClosed = errors.New("block service is shutting down")
@@ -104,7 +108,7 @@ type BlockService struct {
closeWaitGroup sync.WaitGroup
mu deadlock.Mutex
memoryUsed uint64
- wsMemoryUsed uint64
+ wsMemoryUsed atomic.Uint64
memoryCap uint64
}
@@ -239,12 +243,13 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re
}
encodedBlockCert, err := bs.rawBlockBytes(basics.Round(round))
if err != nil {
- switch err.(type) {
+ switch lerr := err.(type) {
case ledgercore.ErrNoEntry:
// entry cound not be found.
ok := bs.redirectRequest(round, response, request)
if !ok {
response.Header().Set("Cache-Control", blockResponseMissingBlockCacheControl)
+ response.Header().Set(BlockResponseLatestRoundHeader, fmt.Sprintf("%d", lerr.Latest))
response.WriteHeader(http.StatusNotFound)
}
return
@@ -320,9 +325,9 @@ func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.Inc
outMsg := network.OutgoingMessage{Topics: respTopics}
if n > 0 {
outMsg.OnRelease = func() {
- atomic.AddUint64(&bs.wsMemoryUsed, ^uint64(n-1))
+ bs.wsMemoryUsed.Add(^uint64(n - 1))
}
- atomic.AddUint64(&bs.wsMemoryUsed, (n))
+ bs.wsMemoryUsed.Add(n)
}
err := target.Respond(ctx, reqMsg, outMsg)
if err != nil {
@@ -332,7 +337,7 @@ func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.Inc
// If we are over-capacity, we will not process the request
// respond to sender with error message
- memUsed := atomic.LoadUint64(&bs.wsMemoryUsed)
+ memUsed := bs.wsMemoryUsed.Load()
if memUsed > bs.memoryCap {
err := errMemoryAtCapacity{capacity: bs.memoryCap, used: memUsed}
bs.log.Infof("BlockService handleCatchupReq: %s", err.Error())
@@ -456,8 +461,12 @@ func (bs *BlockService) rawBlockBytes(round basics.Round) ([]byte, error) {
func topicBlockBytes(log logging.Logger, dataLedger LedgerForBlockService, round basics.Round, requestType string) (network.Topics, uint64) {
blk, cert, err := dataLedger.EncodedBlockCert(round)
if err != nil {
- switch err.(type) {
+ switch lerr := err.(type) {
case ledgercore.ErrNoEntry:
+ return network.Topics{
+ network.MakeTopic(network.ErrorKey, []byte(blockNotAvailableErrMsg)),
+ network.MakeTopic(LatestRoundKey, binary.BigEndian.AppendUint64([]byte{}, uint64(lerr.Latest))),
+ }, 0
default:
log.Infof("BlockService topicBlockBytes: %s", err)
}
diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go
index a0ba919c9..832b59c55 100644
--- a/rpcs/blockService_test.go
+++ b/rpcs/blockService_test.go
@@ -452,7 +452,7 @@ func TestWsBlockLimiting(t *testing.T) {
roundBin),
}
reqMsg.Data = topics.MarshallTopics()
- require.Zero(t, bs1.wsMemoryUsed)
+ require.Zero(t, bs1.wsMemoryUsed.Load())
bs1.handleCatchupReq(context.Background(), reqMsg)
// We should have received the message into the mock peer and the block service should have memoryUsed > 0
data, found := peer.responseTopics.GetValue(BlockDataKey)
@@ -460,7 +460,7 @@ func TestWsBlockLimiting(t *testing.T) {
blk, _, err := ledger.EncodedBlockCert(basics.Round(2))
require.NoError(t, err)
require.Equal(t, data, blk)
- require.Positive(t, bs1.wsMemoryUsed)
+ require.Positive(t, bs1.wsMemoryUsed.Load())
// Before making a new request save the callback since the new failed message will overwrite it in the mock peer
callback := peer.outMsg.OnRelease
@@ -474,7 +474,7 @@ func TestWsBlockLimiting(t *testing.T) {
// Now call the callback to free up memUsed
require.Nil(t, peer.outMsg.OnRelease)
callback()
- require.Zero(t, bs1.wsMemoryUsed)
+ require.Zero(t, bs1.wsMemoryUsed.Load())
}
// TestRedirectExceptions tests exception cases:
diff --git a/rpcs/ledgerService.go b/rpcs/ledgerService.go
index 8abf87e3b..a3ff63e90 100644
--- a/rpcs/ledgerService.go
+++ b/rpcs/ledgerService.go
@@ -63,7 +63,7 @@ type LedgerForService interface {
// LedgerService represents the Ledger RPC API
type LedgerService struct {
// running is non-zero once the service is running, and zero when it's not running. it needs to be at a 32-bit aligned address for RasPI support.
- running int32
+ running atomic.Int32
ledger LedgerForService
genesisID string
net network.GossipNode
@@ -89,14 +89,14 @@ func MakeLedgerService(config config.Local, ledger LedgerForService, net network
// Start listening to catchup requests
func (ls *LedgerService) Start() {
if ls.enableService {
- atomic.StoreInt32(&ls.running, 1)
+ ls.running.Store(1)
}
}
// Stop servicing catchup requests
func (ls *LedgerService) Stop() {
if ls.enableService {
- atomic.StoreInt32(&ls.running, 0)
+ ls.running.Store(0)
ls.stopping.Wait()
}
}
@@ -107,7 +107,7 @@ func (ls *LedgerService) Stop() {
func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.Request) {
ls.stopping.Add(1)
defer ls.stopping.Done()
- if atomic.AddInt32(&ls.running, 0) == 0 {
+ if ls.running.Add(0) == 0 {
response.WriteHeader(http.StatusNotFound)
return
}
diff --git a/rpcs/ledgerService_test.go b/rpcs/ledgerService_test.go
index 6b01cf0e1..1285795d4 100644
--- a/rpcs/ledgerService_test.go
+++ b/rpcs/ledgerService_test.go
@@ -82,7 +82,7 @@ func TestLedgerService(t *testing.T) {
ledgerService := MakeLedgerService(cfg, &l, &fnet, genesisID)
fnet.AssertNotCalled(t, "RegisterHTTPHandler", LedgerServiceLedgerPath, ledgerService)
ledgerService.Start()
- require.Equal(t, int32(0), ledgerService.running)
+ require.Equal(t, int32(0), ledgerService.running.Load())
// Test GET 404
rr := httptest.NewRecorder()
@@ -97,7 +97,7 @@ func TestLedgerService(t *testing.T) {
ledgerService = MakeLedgerService(cfg, &l, &fnet, genesisID)
fnet.AssertCalled(t, "RegisterHTTPHandler", LedgerServiceLedgerPath, ledgerService)
ledgerService.Start()
- require.Equal(t, int32(1), ledgerService.running)
+ require.Equal(t, int32(1), ledgerService.running.Load())
// Test GET 400 Bad Version String
rr = httptest.NewRecorder()
@@ -170,5 +170,5 @@ func TestLedgerService(t *testing.T) {
// Test LedgerService Stopped
ledgerService.Stop()
- require.Equal(t, int32(0), ledgerService.running)
+ require.Equal(t, int32(0), ledgerService.running.Load())
}
diff --git a/rpcs/txService_test.go b/rpcs/txService_test.go
index dd999d6e6..8ef49e45a 100644
--- a/rpcs/txService_test.go
+++ b/rpcs/txService_test.go
@@ -24,7 +24,6 @@ import (
"os"
"strings"
"sync"
- "sync/atomic"
"testing"
"time"
@@ -153,7 +152,7 @@ func TestTxSync(t *testing.T) {
// Since syncer is not Started, set the context here
syncer.ctx, syncer.cancel = context.WithCancel(context.Background())
require.NoError(t, syncer.sync())
- require.Equal(t, int32(3), atomic.LoadInt32(&handler.messageCounter))
+ require.Equal(t, int32(3), handler.messageCounter.Load())
}
func BenchmarkTxSync(b *testing.B) {
diff --git a/rpcs/txSyncer_test.go b/rpcs/txSyncer_test.go
index b05e050ee..43e85f452 100644
--- a/rpcs/txSyncer_test.go
+++ b/rpcs/txSyncer_test.go
@@ -103,12 +103,12 @@ func (mock mockPendingTxAggregate) PendingTxGroups() [][]transactions.SignedTxn
}
type mockHandler struct {
- messageCounter int32
+ messageCounter atomic.Int32
err error
}
func (handler *mockHandler) Handle(txgroup []transactions.SignedTxn) error {
- atomic.AddInt32(&handler.messageCounter, 1)
+ handler.messageCounter.Add(1)
return handler.err
}
@@ -201,7 +201,7 @@ func TestSyncFromClient(t *testing.T) {
syncer.log = logging.TestingLog(t)
require.NoError(t, syncer.syncFromClient(&client))
- require.Equal(t, int32(1), atomic.LoadInt32(&handler.messageCounter))
+ require.Equal(t, int32(1), handler.messageCounter.Load())
}
func TestSyncFromUnsupportedClient(t *testing.T) {
@@ -218,7 +218,7 @@ func TestSyncFromUnsupportedClient(t *testing.T) {
syncer.log = logging.TestingLog(t)
require.Error(t, syncer.syncFromClient(&client))
- require.Zero(t, atomic.LoadInt32(&handler.messageCounter))
+ require.Zero(t, handler.messageCounter.Load())
}
func TestSyncFromClientAndQuit(t *testing.T) {
@@ -235,7 +235,7 @@ func TestSyncFromClientAndQuit(t *testing.T) {
syncer.log = logging.TestingLog(t)
syncer.cancel()
require.Error(t, syncer.syncFromClient(&client))
- require.Zero(t, atomic.LoadInt32(&handler.messageCounter))
+ require.Zero(t, handler.messageCounter.Load())
}
func TestSyncFromClientAndError(t *testing.T) {
@@ -251,7 +251,7 @@ func TestSyncFromClientAndError(t *testing.T) {
syncer.ctx, syncer.cancel = context.WithCancel(context.Background())
syncer.log = logging.TestingLog(t)
require.Error(t, syncer.syncFromClient(&client))
- require.Zero(t, atomic.LoadInt32(&handler.messageCounter))
+ require.Zero(t, handler.messageCounter.Load())
}
func TestSyncFromClientAndTimeout(t *testing.T) {
@@ -268,7 +268,7 @@ func TestSyncFromClientAndTimeout(t *testing.T) {
syncer.ctx, syncer.cancel = context.WithCancel(context.Background())
syncer.log = logging.TestingLog(t)
require.Error(t, syncer.syncFromClient(&client))
- require.Zero(t, atomic.LoadInt32(&handler.messageCounter))
+ require.Zero(t, handler.messageCounter.Load())
}
func TestSync(t *testing.T) {
@@ -292,7 +292,7 @@ func TestSync(t *testing.T) {
syncer.log = logging.TestingLog(t)
require.NoError(t, syncer.sync())
- require.Equal(t, int32(1), atomic.LoadInt32(&handler.messageCounter))
+ require.Equal(t, int32(1), handler.messageCounter.Load())
}
func TestNoClientsSync(t *testing.T) {
@@ -307,7 +307,7 @@ func TestNoClientsSync(t *testing.T) {
syncer.log = logging.TestingLog(t)
require.NoError(t, syncer.sync())
- require.Zero(t, atomic.LoadInt32(&handler.messageCounter))
+ require.Zero(t, handler.messageCounter.Load())
}
func TestStartAndStop(t *testing.T) {
@@ -335,22 +335,22 @@ func TestStartAndStop(t *testing.T) {
canStart := make(chan struct{})
syncer.Start(canStart)
time.Sleep(2 * time.Second)
- require.Zero(t, atomic.LoadInt32(&handler.messageCounter))
+ require.Zero(t, handler.messageCounter.Load())
// signal that syncing can start
close(canStart)
for x := 0; x < 20; x++ {
time.Sleep(100 * time.Millisecond)
- if atomic.LoadInt32(&handler.messageCounter) != 0 {
+ if handler.messageCounter.Load() != 0 {
break
}
}
- require.Equal(t, int32(1), atomic.LoadInt32(&handler.messageCounter))
+ require.Equal(t, int32(1), handler.messageCounter.Load())
// stop syncing and ensure it doesn't happen
syncer.Stop()
time.Sleep(2 * time.Second)
- require.Equal(t, int32(1), atomic.LoadInt32(&handler.messageCounter))
+ require.Equal(t, int32(1), handler.messageCounter.Load())
}
func TestStartAndQuit(t *testing.T) {
@@ -370,12 +370,12 @@ func TestStartAndQuit(t *testing.T) {
canStart := make(chan struct{})
syncer.Start(canStart)
time.Sleep(2 * time.Second)
- require.Zero(t, atomic.LoadInt32(&handler.messageCounter))
+ require.Zero(t, handler.messageCounter.Load())
syncer.cancel()
time.Sleep(50 * time.Millisecond)
// signal that syncing can start, but ensure that it doesn't start (since we quit)
close(canStart)
time.Sleep(2 * time.Second)
- require.Zero(t, atomic.LoadInt32(&handler.messageCounter))
+ require.Zero(t, handler.messageCounter.Load())
}
diff --git a/scripts/windows/instructions.md b/scripts/windows/instructions.md
index b24370bcf..6b388d1d1 100644
--- a/scripts/windows/instructions.md
+++ b/scripts/windows/instructions.md
@@ -8,7 +8,7 @@
pacman -Syu --disable-download-timeout
```
- NOTE: It is very likely MSYS2 will ask to close the window and repeat the command for furter updates. Check `MSYS2` web page for additional support.
+ NOTE: It is very likely MSYS2 will ask to close the window and repeat the command for further updates. Check `MSYS2` web page for additional support.
4. Install GIT on MSYS2 by executing the following command:
diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go
index 0471d84be..3c34469f3 100644
--- a/shared/pingpong/accounts.go
+++ b/shared/pingpong/accounts.go
@@ -209,10 +209,10 @@ func (pps *WorkerState) ensureAccounts(ac *libgoal.Client) (err error) {
}
ppa := &pingPongAccount{
- balance: amt,
- sk: secret,
- pk: accountAddress,
+ sk: secret,
+ pk: accountAddress,
}
+ ppa.balance.Store(amt)
pps.integrateAccountInfo(addr, ppa, ai)
@@ -246,7 +246,7 @@ func (pps *WorkerState) ensureAccounts(ac *libgoal.Client) (err error) {
}
func (pps *WorkerState) integrateAccountInfo(addr string, ppa *pingPongAccount, ai model.Account) {
- ppa.balance = ai.Amount
+ ppa.balance.Store(ai.Amount)
// assets this account has created
if ai.CreatedAssets != nil {
for _, ap := range *ai.CreatedAssets {
diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go
index a35fd451a..95aeac039 100644
--- a/shared/pingpong/pingpong.go
+++ b/shared/pingpong/pingpong.go
@@ -57,7 +57,7 @@ type CreatablesInfo struct {
// pingPongAccount represents the account state for each account in the pingpong application
// This includes the current balance and public/private keys tied to the account
type pingPongAccount struct {
- balance uint64
+ balance atomic.Uint64
balanceRound uint64
deadlock.Mutex
@@ -69,22 +69,22 @@ type pingPongAccount struct {
}
func (ppa *pingPongAccount) getBalance() uint64 {
- return atomic.LoadUint64(&ppa.balance)
+ return ppa.balance.Load()
}
func (ppa *pingPongAccount) setBalance(balance uint64) {
- atomic.StoreUint64(&ppa.balance, balance)
+ ppa.balance.Store(balance)
}
func (ppa *pingPongAccount) addBalance(offset int64) {
if offset >= 0 {
- atomic.AddUint64(&ppa.balance, uint64(offset))
+ ppa.balance.Add(uint64(offset))
return
}
for {
- v := atomic.LoadUint64(&ppa.balance)
+ v := ppa.balance.Load()
nv := v - uint64(-offset)
- done := atomic.CompareAndSwapUint64(&ppa.balance, v, nv)
+ done := ppa.balance.CompareAndSwap(v, nv)
if done {
return
}
@@ -118,7 +118,7 @@ func (ppa *pingPongAccount) String() string {
ppa.Lock()
defer ppa.Unlock()
var ow strings.Builder
- fmt.Fprintf(&ow, "%s %d", ppa.pk.String(), ppa.balance)
+ fmt.Fprintf(&ow, "%s %d", ppa.pk.String(), ppa.balance.Load())
if len(ppa.holdings) > 0 {
fmt.Fprintf(&ow, "[")
first := true
@@ -1036,11 +1036,11 @@ type paymentUpdate struct {
}
func (au *paymentUpdate) apply(pps *WorkerState) {
- pps.accounts[au.from].balance -= (au.fee + au.amt)
+ pps.accounts[au.from].balance.Add(-(au.fee + au.amt))
// update account balance
to := pps.accounts[au.to]
if to != nil {
- to.balance += au.amt
+ to.balance.Add(au.amt)
}
}
@@ -1164,7 +1164,7 @@ type assetUpdate struct {
}
func (au *assetUpdate) apply(pps *WorkerState) {
- pps.accounts[au.from].balance -= au.fee
+ pps.accounts[au.from].balance.Add(-au.fee)
pps.accounts[au.from].holdings[au.aidx] -= au.amt
to := pps.accounts[au.to]
if to.holdings == nil {
@@ -1240,7 +1240,7 @@ type appUpdate struct {
}
func (au *appUpdate) apply(pps *WorkerState) {
- pps.accounts[au.from].balance -= au.fee
+ pps.accounts[au.from].balance.Add(-au.fee)
}
func (pps *WorkerState) constructNFTGenTxn(from, to string, fee uint64, client *libgoal.Client, noteField []byte, lease [32]byte) (txn transactions.Transaction, sender string, update txnUpdate, err error) {
@@ -1323,7 +1323,7 @@ type nftgenUpdate struct {
}
func (au *nftgenUpdate) apply(pps *WorkerState) {
- pps.accounts[au.from].balance -= au.fee
+ pps.accounts[au.from].balance.Add(-au.fee)
}
func signTxn(signer *pingPongAccount, txn transactions.Transaction, cfg PpConfig) (stxn transactions.SignedTxn, err error) {
diff --git a/test/e2e-go/features/catchup/basicCatchup_test.go b/test/e2e-go/features/catchup/basicCatchup_test.go
index 212d4cac9..f6a009dc1 100644
--- a/test/e2e-go/features/catchup/basicCatchup_test.go
+++ b/test/e2e-go/features/catchup/basicCatchup_test.go
@@ -73,24 +73,6 @@ func TestBasicCatchup(t *testing.T) {
// Now, catch up
err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound)
a.NoError(err)
-
- cloneNC := fixture.GetNodeControllerForDataDir(cloneDataDir)
- cloneRestClient := fixture.GetAlgodClientForController(cloneNC)
-
- // an immediate call for ready will error, for sync time != 0
- a.Error(cloneRestClient.ReadyCheck())
-
- for {
- status, err := cloneRestClient.Status()
- a.NoError(err)
-
- if status.LastRound < 10 {
- time.Sleep(250 * time.Millisecond)
- continue
- }
- a.NoError(cloneRestClient.ReadyCheck())
- break
- }
}
// TestCatchupOverGossip tests catchup across network versions
diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go
index f9f62b3d0..eb8345130 100644
--- a/test/e2e-go/features/catchup/catchpointCatchup_test.go
+++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go
@@ -363,6 +363,10 @@ func TestBasicCatchpointCatchup(t *testing.T) {
err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetCatchpointRound+1))
a.NoError(err)
+
+ // ensure the raw block can be downloaded (including cert)
+ _, err = usingNodeRestClient.RawBlock(uint64(targetCatchpointRound))
+ a.NoError(err)
}
func TestCatchpointLabelGeneration(t *testing.T) {
diff --git a/test/e2e-go/features/participation/accountParticipationTransitions_test.go b/test/e2e-go/features/participation/accountParticipationTransitions_test.go
index e9a4e5735..6752af841 100644
--- a/test/e2e-go/features/participation/accountParticipationTransitions_test.go
+++ b/test/e2e-go/features/participation/accountParticipationTransitions_test.go
@@ -21,6 +21,7 @@ package participation
// deterministic.
import (
+ "errors"
"fmt"
"path/filepath"
"testing"
@@ -32,6 +33,7 @@ import (
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/libgoal"
+ "github.com/algorand/go-algorand/libgoal/participation"
"github.com/algorand/go-algorand/test/framework/fixtures"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -39,7 +41,10 @@ import (
// installParticipationKey generates a new key for a given account and installs it with the client.
func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid uint64) (resp model.PostParticipationResponse, part account.Participation, err error) {
// Install overlapping participation keys...
- part, filePath, err := client.GenParticipationKeysTo(addr, firstValid, lastValid, 100, t.TempDir())
+ installFunc := func(keyPath string) error {
+ return errors.New("the install directory is provided, so keys should not be installed")
+ }
+ part, filePath, err := participation.GenParticipationKeysTo(addr, firstValid, lastValid, 100, t.TempDir(), installFunc)
require.NoError(t, err)
require.NotNil(t, filePath)
require.Equal(t, addr, part.Parent.String())
diff --git a/test/e2e-go/features/stateproofs/stateproofs_test.go b/test/e2e-go/features/stateproofs/stateproofs_test.go
index 4d4bc9f51..4631ecd6e 100644
--- a/test/e2e-go/features/stateproofs/stateproofs_test.go
+++ b/test/e2e-go/features/stateproofs/stateproofs_test.go
@@ -18,6 +18,7 @@ package stateproofs
import (
"bytes"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -41,6 +42,7 @@ import (
"github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/libgoal"
+ "github.com/algorand/go-algorand/libgoal/participation"
"github.com/algorand/go-algorand/nodecontrol"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/framework/fixtures"
@@ -321,6 +323,8 @@ func TestStateProofMessageCommitmentVerification(t *testing.T) {
consensusVersion := protocol.ConsensusVersion("test-fast-stateproofs")
consensusParams := getDefaultStateProofConsensusParams()
configurableConsensus[consensusVersion] = consensusParams
+ oldConsensus := config.SetConfigurableConsensusProtocols(configurableConsensus)
+ defer config.SetConfigurableConsensusProtocols(oldConsensus)
var fixture fixtures.RestClientFixture
fixture.SetConsensus(configurableConsensus)
@@ -677,7 +681,10 @@ func installParticipationKey(t *testing.T, client libgoal.Client, addr string, f
defer os.RemoveAll(dir)
// Install overlapping participation keys...
- part, filePath, err := client.GenParticipationKeysTo(addr, firstValid, lastValid, 100, dir)
+ installFunc := func(keyPath string) error {
+ return errors.New("the install directory is provided, so keys should not be installed")
+ }
+ part, filePath, err := participation.GenParticipationKeysTo(addr, firstValid, lastValid, 100, dir, installFunc)
require.NoError(t, err)
require.NotNil(t, filePath)
require.Equal(t, addr, part.Parent.String())
diff --git a/test/e2e-go/features/transactions/onlineStatusChange_test.go b/test/e2e-go/features/transactions/onlineStatusChange_test.go
index a8a9ceb01..7a9e6b346 100644
--- a/test/e2e-go/features/transactions/onlineStatusChange_test.go
+++ b/test/e2e-go/features/transactions/onlineStatusChange_test.go
@@ -17,6 +17,7 @@
package transactions
import (
+ "errors"
"fmt"
"path/filepath"
"testing"
@@ -24,6 +25,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/libgoal/participation"
"github.com/algorand/go-algorand/test/framework/fixtures"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -171,7 +173,10 @@ func TestCloseOnError(t *testing.T) {
_, curRound := fixture.GetBalanceAndRound(initiallyOnline)
var partkeyFile string
- _, partkeyFile, err = client.GenParticipationKeysTo(initiallyOffline, 0, curRound+1000, 0, t.TempDir())
+ installFunc := func(keyPath string) error {
+ return errors.New("the install directory is provided, so keys should not be installed")
+ }
+ _, partkeyFile, err = participation.GenParticipationKeysTo(initiallyOffline, 0, curRound+1000, 0, t.TempDir(), installFunc)
a.NoError(err)
// make a participation key for initiallyOffline
diff --git a/test/framework/fixtures/fixture.go b/test/framework/fixtures/fixture.go
index 83bb7d712..4ccba1acc 100644
--- a/test/framework/fixtures/fixture.go
+++ b/test/framework/fixtures/fixture.go
@@ -161,11 +161,13 @@ func (st *synchTest) Helper() {
st.t.Helper()
}
func (st *synchTest) Log(args ...interface{}) {
+ st.t.Helper()
st.Lock()
defer st.Unlock()
st.t.Log(args...)
}
func (st *synchTest) Logf(format string, args ...interface{}) {
+ st.t.Helper()
st.Lock()
defer st.Unlock()
st.t.Logf(format, args...)
diff --git a/test/heapwatch/metrics_viz.py b/test/heapwatch/metrics_viz.py
index a2616a2eb..584fc0ae5 100644
--- a/test/heapwatch/metrics_viz.py
+++ b/test/heapwatch/metrics_viz.py
@@ -54,7 +54,7 @@ def gather_metrics_files_by_nick(metrics_files: Iterable[str]) -> Dict[str, Dict
TYPE_GAUGE = 0
TYPE_COUNTER = 1
-def parse_metrics(fin: Iterable[str], nick: str, metrics_names: set=None) -> Tuple[Dict[str, float], Dict[str, int]]:
+def parse_metrics(fin: Iterable[str], nick: str, metrics_names: set=None, diff: bool=None) -> Tuple[Dict[str, float], Dict[str, int]]:
"""Parse metrics file and return dicts of values and types"""
out = {}
types = {}
@@ -93,17 +93,28 @@ def parse_metrics(fin: Iterable[str], nick: str, metrics_names: set=None) -> Tup
except:
print(f'An exception occurred in parse_metrics: {sys.exc_info()}')
pass
+ if diff and metrics_names and len(metrics_names) == 2 and len(out) == 2:
+ m = list(out.keys())
+ name = f'{m[0]}_-_{m[1]}'
+ new_out = {name: out[m[0]] - out[m[1]]}
+ new_types = {name: TYPE_GAUGE}
+ out = new_out
+ types = new_types
+
return out, types
def main():
os.environ['TZ'] = 'UTC'
time.tzset()
+ default_output_file = 'metrics_viz.png'
ap = argparse.ArgumentParser()
ap.add_argument('metrics_names', nargs='+', default=None, help='metric name(s) to track')
ap.add_argument('-d', '--dir', type=str, default=None, help='dir path to find /*.metrics in')
ap.add_argument('-l', '--list-nodes', default=False, action='store_true', help='list available node names with metrics')
+ ap.add_argument('-s', '--save', action='store_true', default=None, help=f'save plot to \'{default_output_file}\' file instead of showing it')
+ ap.add_argument('--diff', action='store_true', default=None, help='diff two gauge metrics instead of plotting their values. Requires two metrics names to be set')
ap.add_argument('--verbose', default=False, action='store_true')
args = ap.parse_args()
@@ -141,15 +152,16 @@ def main():
])
)
metrics_names = set(args.metrics_names)
+ nrows = 1 if args.diff and len(args.metrics_names) == 2 else len(metrics_names)
fig = make_subplots(
- rows=len(metrics_names), cols=1,
+ rows=nrows, cols=1,
vertical_spacing=0.03, shared_xaxes=True)
fig['layout']['margin'] = {
'l': 30, 'r': 10, 'b': 10, 't': 10
}
- fig['layout']['height'] = 1500
+ fig['layout']['height'] = 500 * nrows
# fig.update_layout(template="plotly_dark")
data = {
@@ -161,7 +173,7 @@ def main():
for dt, metrics_file in items.items():
data['time'].append(dt)
with open(metrics_file, 'rt') as f:
- metrics, types = parse_metrics(f, nick, metrics_names)
+ metrics, types = parse_metrics(f, nick, metrics_names, args.diff)
for metric_name, metric_value in metrics.items():
raw_value = metric_value
if metric_name not in data:
@@ -186,7 +198,10 @@ def main():
line=dict(width=1),
), i+1, 1)
- fig.show()
+ if args.save:
+ fig.write_image(os.path.join(args.dir, default_output_file))
+ else:
+ fig.show()
# app.run_server(debug=True)
return 0
diff --git a/test/heapwatch/plot_crr_csv.py b/test/heapwatch/plot_crr_csv.py
index 00a0aa599..9e1a47134 100755
--- a/test/heapwatch/plot_crr_csv.py
+++ b/test/heapwatch/plot_crr_csv.py
@@ -63,6 +63,8 @@ def main():
fvals = {}
minv = None
maxv = None
+ minr = None
+ maxr = None
with open(fname) as fin:
reader = csv.DictReader(fin)
for rec in reader:
@@ -84,13 +86,16 @@ def main():
minv = smin(minv, v)
maxv = smax(maxv, v)
+ minr = smin(minr, xround)
+ maxr = smax(maxr, xround)
if not fvals:
print(f"{fname} empty")
continue
nodes = sorted(fvals.keys())
- print("{} found series {}".format(fname, nodes))
+ print("{} found series {} ({} - {})".format(fname, nodes, minr, maxr))
fig, ax = plt.subplots()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
+ ax.set_xlim([minr, maxr])
ax.yaxis.set_major_formatter(FuncFormatter(format_mem))
ax.set_ylabel('bytes')
ax.set_xlabel('round')
diff --git a/test/heapwatch/requirements.txt b/test/heapwatch/requirements.txt
index e261301c6..b46aead08 100644
--- a/test/heapwatch/requirements.txt
+++ b/test/heapwatch/requirements.txt
@@ -4,3 +4,4 @@ Jinja2==3.1.2
matplotlib==3.7.2
plotly==5.16.0
py-algorand-sdk==2.3.0
+kaleido==0.2.1
diff --git a/test/testdata/configs/config-v32.json b/test/testdata/configs/config-v32.json
new file mode 100644
index 000000000..ce0238033
--- /dev/null
+++ b/test/testdata/configs/config-v32.json
@@ -0,0 +1,139 @@
+{
+ "Version": 32,
+ "AccountUpdatesStatsInterval": 5000000000,
+ "AccountsRebuildSynchronousMode": 1,
+ "AgreementIncomingBundlesQueueLength": 15,
+ "AgreementIncomingProposalsQueueLength": 50,
+ "AgreementIncomingVotesQueueLength": 20000,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BlockDBDir": "",
+ "BlockServiceCustomFallbackEndpoints": "",
+ "BlockServiceMemCap": 500000000,
+ "BroadcastConnectionsLimit": -1,
+ "CadaverDirectory": "",
+ "CadaverSizeTarget": 0,
+ "CatchpointDir": "",
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchpointTracking": 0,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ColdDataDir": "",
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "CrashDBDir": "",
+ "DNSBootstrapID": "<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(network|net)",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DeadlockDetectionThreshold": 30,
+ "DisableAPIAuth": false,
+ "DisableLedgerLRUCache": false,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": false,
+ "EnableCatchupFromArchiveServers": false,
+ "EnableDeveloperAPI": false,
+ "EnableExperimentalAPI": false,
+ "EnableFollowMode": false,
+ "EnableGossipBlockService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnableP2P": false,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableRuntimeMetrics": false,
+ "EnableTopAccountsReporting": false,
+ "EnableTxBacklogAppRateLimiting": true,
+ "EnableTxBacklogRateLimiting": true,
+ "EnableTxnEvalTracer": false,
+ "EnableUsageLog": false,
+ "EnableVerbosedTransactionSyncLogging": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceFetchTransactions": false,
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "HeartbeatUpdateInterval": 600,
+ "HotDataDir": "",
+ "IncomingConnectionsLimit": 2400,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "LedgerSynchronousMode": 2,
+ "LogArchiveDir": "",
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogFileDir": "",
+ "LogSizeLimit": 1073741824,
+ "MaxAPIBoxPerApplication": 100000,
+ "MaxAPIResourcesPerAccount": 100000,
+ "MaxAcctLookback": 4,
+ "MaxBlockHistoryLookback": 0,
+ "MaxCatchpointDownloadDuration": 43200000000000,
+ "MaxConnectionsPerIP": 15,
+ "MinCatchpointFileDownloadBytesPerSecond": 20480,
+ "NetAddress": "",
+ "NetworkMessageTraceServer": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "P2PPersistPeerID": false,
+ "P2PPrivateKeyLocation": "",
+ "ParticipationKeysRefreshInterval": 60000000000,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "ProposalAssemblyTime": 500000000,
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestConnectionsHardLimit": 2048,
+ "RestConnectionsSoftLimit": 1024,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "StateproofDir": "",
+ "StorageEngine": "sqlite",
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TrackerDBDir": "",
+ "TransactionSyncDataExchangeRate": 0,
+ "TransactionSyncSignificantMessageThreshold": 0,
+ "TxBacklogAppTxPerSecondRate": 100,
+ "TxBacklogAppTxRateLimiterMaxSize": 1048576,
+ "TxBacklogRateLimitingCongestionPct": 50,
+ "TxBacklogReservedCapacityPerPeer": 20,
+ "TxBacklogServiceRateWindowSeconds": 10,
+ "TxBacklogSize": 26000,
+ "TxIncomingFilterMaxSize": 500000,
+ "TxIncomingFilteringFlags": 1,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 75000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": "",
+ "VerifiedTranscationsCacheSize": 150000
+}
diff --git a/test/testdata/configs/config-v33.json b/test/testdata/configs/config-v33.json
new file mode 100644
index 000000000..aa1cb7171
--- /dev/null
+++ b/test/testdata/configs/config-v33.json
@@ -0,0 +1,140 @@
+{
+ "Version": 33,
+ "AccountUpdatesStatsInterval": 5000000000,
+ "AccountsRebuildSynchronousMode": 1,
+ "AgreementIncomingBundlesQueueLength": 15,
+ "AgreementIncomingProposalsQueueLength": 50,
+ "AgreementIncomingVotesQueueLength": 20000,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BlockDBDir": "",
+ "BlockServiceCustomFallbackEndpoints": "",
+ "BlockServiceMemCap": 500000000,
+ "BroadcastConnectionsLimit": -1,
+ "CadaverDirectory": "",
+ "CadaverSizeTarget": 0,
+ "CatchpointDir": "",
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchpointTracking": 0,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ColdDataDir": "",
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "CrashDBDir": "",
+ "DNSBootstrapID": "<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(network|net)",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DeadlockDetectionThreshold": 30,
+ "DisableAPIAuth": false,
+ "DisableLedgerLRUCache": false,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": false,
+ "EnableCatchupFromArchiveServers": false,
+ "EnableDeveloperAPI": false,
+ "EnableExperimentalAPI": false,
+ "EnableFollowMode": false,
+ "EnableGossipBlockService": true,
+ "EnableGossipService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnableP2P": false,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableRuntimeMetrics": false,
+ "EnableTopAccountsReporting": false,
+ "EnableTxBacklogAppRateLimiting": true,
+ "EnableTxBacklogRateLimiting": true,
+ "EnableTxnEvalTracer": false,
+ "EnableUsageLog": false,
+ "EnableVerbosedTransactionSyncLogging": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceFetchTransactions": false,
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "HeartbeatUpdateInterval": 600,
+ "HotDataDir": "",
+ "IncomingConnectionsLimit": 2400,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "LedgerSynchronousMode": 2,
+ "LogArchiveDir": "",
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogFileDir": "",
+ "LogSizeLimit": 1073741824,
+ "MaxAPIBoxPerApplication": 100000,
+ "MaxAPIResourcesPerAccount": 100000,
+ "MaxAcctLookback": 4,
+ "MaxBlockHistoryLookback": 0,
+ "MaxCatchpointDownloadDuration": 43200000000000,
+ "MaxConnectionsPerIP": 15,
+ "MinCatchpointFileDownloadBytesPerSecond": 20480,
+ "NetAddress": "",
+ "NetworkMessageTraceServer": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "P2PPersistPeerID": false,
+ "P2PPrivateKeyLocation": "",
+ "ParticipationKeysRefreshInterval": 60000000000,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "ProposalAssemblyTime": 500000000,
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestConnectionsHardLimit": 2048,
+ "RestConnectionsSoftLimit": 1024,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "StateproofDir": "",
+ "StorageEngine": "sqlite",
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TrackerDBDir": "",
+ "TransactionSyncDataExchangeRate": 0,
+ "TransactionSyncSignificantMessageThreshold": 0,
+ "TxBacklogAppTxPerSecondRate": 100,
+ "TxBacklogAppTxRateLimiterMaxSize": 1048576,
+ "TxBacklogRateLimitingCongestionPct": 50,
+ "TxBacklogReservedCapacityPerPeer": 20,
+ "TxBacklogServiceRateWindowSeconds": 10,
+ "TxBacklogSize": 26000,
+ "TxIncomingFilterMaxSize": 500000,
+ "TxIncomingFilteringFlags": 1,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 75000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": "",
+ "VerifiedTranscationsCacheSize": 150000
+}
diff --git a/tools/block-generator/go.mod b/tools/block-generator/go.mod
index 79a485c24..965cbd9a0 100644
--- a/tools/block-generator/go.mod
+++ b/tools/block-generator/go.mod
@@ -35,7 +35,7 @@ require (
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230613231145-182959a1fad6 // indirect
github.com/consensys/bavard v0.1.13 // indirect
- github.com/consensys/gnark-crypto v0.12.0 // indirect
+ github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
diff --git a/tools/block-generator/go.sum b/tools/block-generator/go.sum
index d4620db58..dbba8cc72 100644
--- a/tools/block-generator/go.sum
+++ b/tools/block-generator/go.sum
@@ -119,8 +119,8 @@ github.com/cockroachdb/tokenbucket v0.0.0-20230613231145-182959a1fad6/go.mod h1:
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
-github.com/consensys/gnark-crypto v0.12.0 h1:1OnSpOykNkUIBIBJKdhwy2p0JlW5o+Az02ICzZmvvdg=
-github.com/consensys/gnark-crypto v0.12.0/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
+github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
+github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
diff --git a/util/codecs/json.go b/util/codecs/json.go
index e283ef062..8c2cebf08 100644
--- a/util/codecs/json.go
+++ b/util/codecs/json.go
@@ -18,6 +18,7 @@ package codecs
import (
"bufio"
+ "bytes"
"encoding/json"
"fmt"
"io"
@@ -48,6 +49,16 @@ func LoadObjectFromFile(filename string, object interface{}) (err error) {
return
}
+func writeBytes(writer io.Writer, object interface{}, prettyFormat bool) error {
+ var enc *json.Encoder
+ if prettyFormat {
+ enc = NewFormattedJSONEncoder(writer)
+ } else {
+ enc = json.NewEncoder(writer)
+ }
+ return enc.Encode(object)
+}
+
// SaveObjectToFile implements the common pattern for saving an object to a file as json
func SaveObjectToFile(filename string, object interface{}, prettyFormat bool) error {
f, err := os.Create(filename)
@@ -55,22 +66,13 @@ func SaveObjectToFile(filename string, object interface{}, prettyFormat bool) er
return err
}
defer f.Close()
- var enc *json.Encoder
- if prettyFormat {
- enc = NewFormattedJSONEncoder(f)
- } else {
- enc = json.NewEncoder(f)
- }
- err = enc.Encode(object)
- return err
+ return writeBytes(f, object, prettyFormat)
}
-// SaveNonDefaultValuesToFile saves an object to a file as json, but only fields that are not
+// WriteNonDefaultValues writes object to a writer as json, but only fields that are not
// currently set to be the default value.
// Optionally, you can specify an array of field names to always include.
-func SaveNonDefaultValuesToFile(filename string, object, defaultObject interface{}, ignore []string, prettyFormat bool) error {
- // Serialize object to temporary file.
- // Read file into string array
+func WriteNonDefaultValues(writer io.Writer, object, defaultObject interface{}, ignore []string) error {
// Iterate one line at a time, parse Name
// If ignore contains Name, don't delete
// Use reflection to compare object[Name].value == defaultObject[Name].value
@@ -78,25 +80,13 @@ func SaveNonDefaultValuesToFile(filename string, object, defaultObject interface
// When done, ensure last value line doesn't include comma
// Write string array to file.
- file, err := os.CreateTemp("", "encsndv")
- if err != nil {
- return err
- }
- name := file.Name()
- file.Close()
-
- defer os.Remove(name)
- // Save object to file pretty-formatted so we can read one value-per-line
- err = SaveObjectToFile(name, object, true)
+ var buf bytes.Buffer
+ err := writeBytes(&buf, object, true)
if err != nil {
return err
}
+ content := buf.Bytes()
- // Read lines from encoded file into string array
- content, err := os.ReadFile(name)
- if err != nil {
- return err
- }
valueLines := strings.Split(string(content), "\n")
// Create maps of the name->value pairs for the object and the defaults
@@ -155,19 +145,30 @@ func SaveNonDefaultValuesToFile(filename string, object, defaultObject interface
}
}
+ combined := strings.Join(newFile, "\n")
+ combined = strings.TrimRight(combined, "\r\n ")
+ _, err = writer.Write([]byte(combined))
+ return err
+}
+
+// SaveNonDefaultValuesToFile saves an object to a file as json, but only fields that are not
+// currently set to be the default value.
+// Optionally, you can specify an array of field names to always include.
+func SaveNonDefaultValuesToFile(filename string, object, defaultObject interface{}, ignore []string) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
writer := bufio.NewWriter(outFile)
- combined := strings.Join(newFile, "\n")
- combined = strings.TrimRight(combined, "\r\n ")
- _, err = writer.WriteString(combined)
- if err == nil {
- writer.Flush()
+
+ err = WriteNonDefaultValues(writer, object, defaultObject, ignore)
+ if err != nil {
+ return err
}
- return err
+
+ writer.Flush()
+ return nil
}
func extractValueName(line string) (name string) {
diff --git a/util/codecs/json_test.go b/util/codecs/json_test.go
index 1f5653197..6bd4d53cd 100644
--- a/util/codecs/json_test.go
+++ b/util/codecs/json_test.go
@@ -17,9 +17,15 @@
package codecs
import (
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
+ "bytes"
+ "os"
+ "path"
"testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
)
type testValue struct {
@@ -30,6 +36,7 @@ type testValue struct {
func TestIsDefaultValue(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
a := require.New(t)
@@ -52,3 +59,113 @@ func TestIsDefaultValue(t *testing.T) {
a.False(isDefaultValue("Int", objectValues, defaultValues))
a.True(isDefaultValue("Missing", objectValues, defaultValues))
}
+
+func TestSaveObjectToFile(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ type TestType struct {
+ A uint64
+ B string
+ }
+
+ obj := TestType{1024, "test"}
+
+ // prettyFormat = false
+ {
+ filename := path.Join(t.TempDir(), "test.json")
+ SaveObjectToFile(filename, obj, false)
+ data, err := os.ReadFile(filename)
+ require.NoError(t, err)
+ expected := `{"A":1024,"B":"test"}
+`
+ require.Equal(t, expected, string(data))
+ }
+
+ // prettyFormat = true
+ {
+ filename := path.Join(t.TempDir(), "test.json")
+ SaveObjectToFile(filename, obj, true)
+ data, err := os.ReadFile(filename)
+ require.NoError(t, err)
+ expected := `{
+ "A": 1024,
+ "B": "test"
+}
+`
+ require.Equal(t, expected, string(data))
+ }
+
+}
+
+func TestWriteNonDefaultValue(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ type TestType struct {
+ Version uint32
+ Archival bool
+ GossipFanout int
+ NetAddress string
+ ReconnectTime time.Duration
+ }
+
+ defaultObject := TestType{
+ Version: 1,
+ Archival: true,
+ GossipFanout: 50,
+ NetAddress: "Denver",
+ ReconnectTime: 60 * time.Second,
+ }
+
+ testcases := []struct {
+ name string
+ in TestType
+ out string
+ ignore []string
+ }{
+ {
+ name: "all defaults",
+ in: defaultObject,
+ out: `{
+}`,
+ }, {
+ name: "some defaults",
+ in: TestType{
+ Version: 1,
+ Archival: false,
+ GossipFanout: 25,
+ NetAddress: "Denver",
+ ReconnectTime: 60 * time.Nanosecond,
+ },
+ out: `{
+ "Archival": false,
+ "GossipFanout": 25,
+ "ReconnectTime": 60
+}`,
+ }, {
+ name: "ignore",
+ in: defaultObject,
+ ignore: []string{"Version", "Archival", "GossipFanout", "NetAddress", "ReconnectTime"},
+ out: `{
+ "Version": 1,
+ "Archival": true,
+ "GossipFanout": 50,
+ "NetAddress": "Denver",
+ "ReconnectTime": 60000000000
+}`,
+ },
+ }
+
+ for _, tc := range testcases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ a := require.New(t)
+ var writer bytes.Buffer
+ err := WriteNonDefaultValues(&writer, tc.in, defaultObject, tc.ignore)
+ a.NoError(err)
+ a.Equal(tc.out, writer.String())
+ })
+ }
+}
diff --git a/util/condvar/timedwait.go b/util/condvar/timedwait.go
index e14f2b33b..7b275bfb3 100644
--- a/util/condvar/timedwait.go
+++ b/util/condvar/timedwait.go
@@ -32,12 +32,12 @@ import (
// This function does not indicate whether a timeout occurred or not;
// the caller should check time.Now() as needed.
func TimedWait(c *sync.Cond, timeout time.Duration) {
- var done int32
+ var done atomic.Bool
go func() {
util.NanoSleep(timeout)
- for atomic.LoadInt32(&done) == 0 {
+ for !done.Load() {
c.Broadcast()
// It is unlikely but possible that the parent
@@ -49,5 +49,5 @@ func TimedWait(c *sync.Cond, timeout time.Duration) {
}()
c.Wait()
- atomic.StoreInt32(&done, 1)
+ done.Store(true)
}
diff --git a/util/metrics/counter.go b/util/metrics/counter.go
index 2efb52be5..db0c6e686 100644
--- a/util/metrics/counter.go
+++ b/util/metrics/counter.go
@@ -20,7 +20,6 @@ import (
"math"
"strconv"
"strings"
- "sync/atomic"
"time"
)
@@ -111,7 +110,7 @@ func (counter *Counter) AddMicrosecondsSince(t time.Time, labels map[string]stri
// GetUint64Value returns the value of the counter.
func (counter *Counter) GetUint64Value() (x uint64) {
- return atomic.LoadUint64(&counter.intValue)
+ return counter.intValue.Load()
}
// GetUint64ValueForLabels returns the value of the counter for the given labels or 0 if it's not found.
@@ -128,7 +127,7 @@ func (counter *Counter) GetUint64ValueForLabels(labels map[string]string) uint64
}
func (counter *Counter) fastAddUint64(x uint64) {
- if atomic.AddUint64(&counter.intValue, x) == x {
+ if counter.intValue.Add(x) == x {
// What we just added is the whole value, this
// is the first Add. Create a dummy
// counterValue for the no-labels value.
@@ -202,7 +201,7 @@ func (counter *Counter) WriteMetric(buf *strings.Builder, parentLabels string) {
buf.WriteString("} ")
value := l.counter
if len(l.labels) == 0 {
- value += atomic.LoadUint64(&counter.intValue)
+ value += counter.intValue.Load()
}
buf.WriteString(strconv.FormatUint(value, 10))
buf.WriteString("\n")
@@ -221,7 +220,7 @@ func (counter *Counter) AddMetric(values map[string]float64) {
for _, l := range counter.values {
sum := l.counter
if len(l.labels) == 0 {
- sum += atomic.LoadUint64(&counter.intValue)
+ sum += counter.intValue.Load()
}
var suffix string
if len(l.formattedLabels) > 0 {
diff --git a/util/metrics/counterCommon.go b/util/metrics/counterCommon.go
index 2a810ace6..dc187b3b4 100644
--- a/util/metrics/counterCommon.go
+++ b/util/metrics/counterCommon.go
@@ -17,14 +17,15 @@
package metrics
import (
+ "sync/atomic"
+
"github.com/algorand/go-deadlock"
)
// Counter represent a single counter variable.
type Counter struct {
// Collects value for special fast-path with no labels through Inc(nil) AddUint64(x, nil)
- // We want to make it on a 64-bit aligned address for ARM compiliers as it's being used by AddUint64
- intValue uint64
+ intValue atomic.Uint64
deadlock.Mutex
name string
diff --git a/util/metrics/gauge.go b/util/metrics/gauge.go
index ce203d47c..593be0e9d 100644
--- a/util/metrics/gauge.go
+++ b/util/metrics/gauge.go
@@ -24,7 +24,7 @@ import (
// Gauge represent a single gauge variable.
type Gauge struct {
- value uint64
+ value atomic.Uint64
name string
description string
}
@@ -59,12 +59,12 @@ func (gauge *Gauge) Deregister(reg *Registry) {
// Add increases gauge by x
func (gauge *Gauge) Add(x uint64) {
- atomic.AddUint64(&gauge.value, x)
+ gauge.value.Add(x)
}
// Set sets gauge to x
func (gauge *Gauge) Set(x uint64) {
- atomic.StoreUint64(&gauge.value, x)
+ gauge.value.Store(x)
}
// WriteMetric writes the metric into the output stream
@@ -82,14 +82,14 @@ func (gauge *Gauge) WriteMetric(buf *strings.Builder, parentLabels string) {
buf.WriteString(parentLabels)
}
buf.WriteString("} ")
- value := atomic.LoadUint64(&gauge.value)
+ value := gauge.value.Load()
buf.WriteString(strconv.FormatUint(value, 10))
buf.WriteString("\n")
}
// AddMetric adds the metric into the map
func (gauge *Gauge) AddMetric(values map[string]float64) {
- value := atomic.LoadUint64(&gauge.value)
+ value := gauge.value.Load()
values[sanitizeTelemetryName(gauge.name)] = float64(value)
}
diff --git a/util/metrics/metrics.go b/util/metrics/metrics.go
index cb376eb0a..cebece25f 100644
--- a/util/metrics/metrics.go
+++ b/util/metrics/metrics.go
@@ -123,6 +123,8 @@ var (
TransactionMessagesDupRawMsg = MetricName{Name: "algod_transaction_messages_dropped_dup_raw", Description: "Number of dupe raw transaction messages dropped"}
// TransactionMessagesDupCanonical "Number of transaction messages dropped after canonical re-encoding"
TransactionMessagesDupCanonical = MetricName{Name: "algod_transaction_messages_dropped_dup_canonical", Description: "Number of transaction messages dropped after canonical re-encoding"}
+ // TransactionMessagesAppLimiterDrop "Number of transaction messages dropped after app limits check"
+ TransactionMessagesAppLimiterDrop = MetricName{Name: "algod_transaction_messages_dropped_app_limiter", Description: "Number of transaction messages dropped after app limits check"}
// TransactionMessagesBacklogSize "Number of transaction messages in the TX handler backlog queue"
TransactionMessagesBacklogSize = MetricName{Name: "algod_transaction_messages_backlog_size", Description: "Number of transaction messages in the TX handler backlog queue"}