summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2023-11-29 12:53:41 -0500
committerGitHub <noreply@github.com>2023-11-29 12:53:41 -0500
commit14c0d8d0811e8a83a0f55f6692e233873b80199b (patch)
treef61dacb4e16815d7b0da5411e78c6d14bb71dc1a
parentc724cd311dd7e86d73d65da4e2faaa6a8c100216 (diff)
parent89254b4d55bb294826f143b2e81ac657a8ae5d2c (diff)
Merge pull request #5849 from Algo-devops-service/relbeta3.20.1v3.20.1-beta
-rw-r--r--buildnumber.dat2
-rw-r--r--catchup/catchpointService.go11
-rw-r--r--catchup/catchpointService_test.go70
-rw-r--r--cmd/goal/application.go8
-rw-r--r--cmd/goal/interact.go4
-rw-r--r--cmd/goal/tealsign.go2
-rw-r--r--config/consensus.go24
-rw-r--r--config/localTemplate.go6
-rw-r--r--config/local_defaults.go3
-rw-r--r--crypto/batchverifier.c20
-rw-r--r--crypto/batchverifier.go81
-rw-r--r--crypto/curve25519.go25
-rw-r--r--crypto/hashes.go2
-rw-r--r--crypto/hashes_test.go1
-rw-r--r--crypto/merklearray/layer.go6
-rw-r--r--crypto/merklearray/merkle_test.go5
-rw-r--r--crypto/merklearray/partial.go2
-rw-r--r--crypto/onetimesig.go15
-rw-r--r--crypto/util.go12
-rw-r--r--crypto/util_test.go31
-rw-r--r--daemon/algod/api/server/v2/test/handlers_test.go6
-rw-r--r--daemon/algod/api/server/v2/test/helpers.go3
-rw-r--r--data/account/participation.go4
-rw-r--r--data/account/participation_test.go10
-rw-r--r--data/bookkeeping/genesis_test.go28
-rw-r--r--data/bookkeeping/lightBlockHeader.go20
-rw-r--r--data/bookkeeping/msgp_gen.go45
-rw-r--r--data/transactions/logic/program.go2
-rw-r--r--installer/config.json.example3
-rw-r--r--ledger/catchupaccessor.go2
-rw-r--r--network/wsNetwork.go9
-rw-r--r--network/wsNetwork_test.go33
-rw-r--r--test/e2e-go/features/stateproofs/stateproofs_test.go2
-rw-r--r--test/testdata/configs/config-v33.json140
34 files changed, 536 insertions, 101 deletions
diff --git a/buildnumber.dat b/buildnumber.dat
index 573541ac9..d00491fd7 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-0
+1
diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go
index a5175aff4..a0a22c5e3 100644
--- a/catchup/catchpointService.go
+++ b/catchup/catchpointService.go
@@ -373,8 +373,9 @@ func (cs *CatchpointCatchupService) processStageLatestBlockDownload() (err error
var blk *bookkeeping.Block
var cert *agreement.Certificate
// check to see if the current ledger might have this block. If so, we should try this first instead of downloading anything.
- if ledgerBlock, err := cs.ledger.Block(blockRound); err == nil {
+ if ledgerBlock, ledgerCert, err0 := cs.ledger.BlockCert(blockRound); err0 == nil {
blk = &ledgerBlock
+ cert = &ledgerCert
}
var protoParams config.ConsensusParams
var ok bool
@@ -551,15 +552,17 @@ func (cs *CatchpointCatchupService) processStageBlocksDownload() (err error) {
}
blk = nil
+ cert = nil
// check to see if the current ledger might have this block. If so, we should try this first instead of downloading anything.
- if ledgerBlock, err := cs.ledger.Block(topBlock.Round() - basics.Round(blocksFetched)); err == nil {
+ if ledgerBlock, ledgerCert, err0 := cs.ledger.BlockCert(topBlock.Round() - basics.Round(blocksFetched)); err0 == nil {
blk = &ledgerBlock
+ cert = &ledgerCert
} else {
- switch err.(type) {
+ switch err0.(type) {
case ledgercore.ErrNoEntry:
// this is expected, ignore this one.
default:
- cs.log.Warnf("processStageBlocksDownload encountered the following error when attempting to retrieve the block for round %d : %v", topBlock.Round()-basics.Round(blocksFetched), err)
+ cs.log.Warnf("processStageBlocksDownload encountered the following error when attempting to retrieve the block for round %d : %v", topBlock.Round()-basics.Round(blocksFetched), err0)
}
}
diff --git a/catchup/catchpointService_test.go b/catchup/catchpointService_test.go
index 48cea110d..34f1adf0f 100644
--- a/catchup/catchpointService_test.go
+++ b/catchup/catchpointService_test.go
@@ -22,12 +22,14 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/components/mocks"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -35,7 +37,7 @@ import (
type catchpointCatchupLedger struct {
}
-func (l *catchpointCatchupLedger) Block(rnd basics.Round) (blk bookkeeping.Block, err error) {
+func (l *catchpointCatchupLedger) BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreement.Certificate, err error) {
blk = bookkeeping.Block{
BlockHeader: bookkeeping.BlockHeader{
UpgradeState: bookkeeping.UpgradeState{
@@ -43,13 +45,14 @@ func (l *catchpointCatchupLedger) Block(rnd basics.Round) (blk bookkeeping.Block
},
},
}
+ cert = agreement.Certificate{}
commitments, err := blk.PaysetCommit()
if err != nil {
- return blk, err
+ return blk, cert, err
}
blk.TxnCommitments = commitments
- return blk, nil
+ return blk, cert, nil
}
func (l *catchpointCatchupLedger) GenesisHash() (d crypto.Digest) {
@@ -95,3 +98,64 @@ func TestCatchpointServicePeerRank(t *testing.T) {
err := cs.processStageLatestBlockDownload()
require.NoError(t, err)
}
+
+type catchpointAccessorMock struct {
+ mocks.MockCatchpointCatchupAccessor
+ t *testing.T
+ topBlk bookkeeping.Block
+}
+
+func (m *catchpointAccessorMock) EnsureFirstBlock(ctx context.Context) (blk bookkeeping.Block, err error) {
+ return m.topBlk, nil
+}
+
+func (m *catchpointAccessorMock) StoreBlock(ctx context.Context, blk *bookkeeping.Block, cert *agreement.Certificate) (err error) {
+ require.NotNil(m.t, blk)
+ require.NotNil(m.t, cert)
+ return nil
+}
+
+type catchpointCatchupLedger2 struct {
+ catchpointCatchupLedger
+ blk bookkeeping.Block
+}
+
+func (l *catchpointCatchupLedger2) BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreement.Certificate, err error) {
+ return l.blk, agreement.Certificate{}, nil
+}
+
+// TestProcessStageBlocksDownloadNilCert ensures StoreBlock does not receive a nil certificate when ledger has already had a block.
+// It uses two mocks catchpointAccessorMock and catchpointCatchupLedger2 and pre-crafted blocks to make a single iteration of processStageBlocksDownload.
+func TestProcessStageBlocksDownloadNilCert(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var err error
+ blk1 := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: 1,
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: protocol.ConsensusCurrentVersion,
+ },
+ },
+ }
+ blk1.TxnCommitments, err = blk1.PaysetCommit()
+ require.NoError(t, err)
+
+ blk2 := blk1
+ blk2.BlockHeader.Round = 2
+ blk2.BlockHeader.Branch = blk1.Hash()
+ blk2.TxnCommitments, err = blk2.PaysetCommit()
+ require.NoError(t, err)
+
+ ctx, cf := context.WithCancel(context.Background())
+ cs := CatchpointCatchupService{
+ ctx: ctx,
+ cancelCtxFunc: cf,
+ ledgerAccessor: &catchpointAccessorMock{topBlk: blk2, t: t},
+ ledger: &catchpointCatchupLedger2{blk: blk1},
+ log: logging.TestingLog(t),
+ }
+
+ err = cs.processStageBlocksDownload()
+ require.NoError(t, err)
+}
diff --git a/cmd/goal/application.go b/cmd/goal/application.go
index 8d442bf5c..2aebe89de 100644
--- a/cmd/goal/application.go
+++ b/cmd/goal/application.go
@@ -488,7 +488,7 @@ var createAppCmd = &cobra.Command{
reportErrorf(errorBroadcastingTX, err2)
}
- reportInfof("Attempting to create app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
+ reportInfof("Attempting to create app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), logic.HashProgram(clearProg))
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
@@ -563,7 +563,7 @@ var updateAppCmd = &cobra.Command{
reportErrorf(errorBroadcastingTX, err2)
}
- reportInfof("Attempting to update app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
+ reportInfof("Attempting to update app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), logic.HashProgram(clearProg))
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
@@ -1455,9 +1455,9 @@ var methodAppCmd = &cobra.Command{
// Report tx details to user
if methodCreatesApp {
- reportInfof("Attempting to create app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
+ reportInfof("Attempting to create app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), logic.HashProgram(clearProg))
} else if onCompletionEnum == transactions.UpdateApplicationOC {
- reportInfof("Attempting to update app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
+ reportInfof("Attempting to update app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), logic.HashProgram(clearProg))
}
reportInfof("Issued %d transaction(s):", len(signedTxnGroup))
diff --git a/cmd/goal/interact.go b/cmd/goal/interact.go
index 825d74388..ca79daf0d 100644
--- a/cmd/goal/interact.go
+++ b/cmd/goal/interact.go
@@ -625,9 +625,9 @@ var appExecuteCmd = &cobra.Command{
}
if appIdx == 0 {
- reportInfof("Attempting to create app (global ints %d, global blobs %d, local ints %d, local blobs %d, approval size %d, hash %v; clear size %d, hash %v)", globalSchema.NumUint, globalSchema.NumByteSlice, localSchema.NumUint, localSchema.NumByteSlice, len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
+ reportInfof("Attempting to create app (global ints %d, global blobs %d, local ints %d, local blobs %d, approval size %d, hash %v; clear size %d, hash %v)", globalSchema.NumUint, globalSchema.NumByteSlice, localSchema.NumUint, localSchema.NumByteSlice, len(approvalProg), logic.HashProgram(approvalProg), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
} else if onCompletion == transactions.UpdateApplicationOC {
- reportInfof("Attempting to update app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
+ reportInfof("Attempting to update app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), logic.HashProgram(clearProg))
}
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
diff --git a/cmd/goal/tealsign.go b/cmd/goal/tealsign.go
index 9d9f144da..45fbd3a98 100644
--- a/cmd/goal/tealsign.go
+++ b/cmd/goal/tealsign.go
@@ -139,7 +139,7 @@ The base64 encoding of the signature will always be printed to stdout. Optionall
reportErrorf(tealsignEmptyLogic)
}
- progHash = crypto.HashObj(logic.Program(stxn.Lsig.Logic))
+ progHash = logic.HashProgram(stxn.Lsig.Logic)
} else {
// Otherwise, the contract address is the logic hash
parsedAddr, err := basics.UnmarshalChecksumAddress(contractAddr)
diff --git a/config/consensus.go b/config/consensus.go
index 25b5dc785..a2f28b97d 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -419,6 +419,11 @@ type ConsensusParams struct {
// their account balances.
StateProofExcludeTotalWeightWithRewards bool
+ // StateProofBlockHashInLightHeader specifies that the LightBlockHeader
+ // committed to by state proofs should contain the BlockHash of each
+ // block, instead of the seed.
+ StateProofBlockHashInLightHeader bool
+
// EnableAssetCloseAmount adds an extra field to the ApplyData. The field contains the amount of the remaining
// asset that were sent to the close-to address.
EnableAssetCloseAmount bool
@@ -759,15 +764,22 @@ func LoadConfigurableConsensusProtocols(dataDirectory string) error {
return err
}
if newConsensus != nil {
- Consensus = newConsensus
- // Set allocation limits
- for _, p := range Consensus {
- checkSetAllocBounds(p)
- }
+ SetConfigurableConsensusProtocols(newConsensus)
}
return nil
}
+// SetConfigurableConsensusProtocols sets the configurable protocols.
+func SetConfigurableConsensusProtocols(newConsensus ConsensusProtocols) ConsensusProtocols {
+ oldConsensus := Consensus
+ Consensus = newConsensus
+ // Set allocation limits
+ for _, p := range Consensus {
+ checkSetAllocBounds(p)
+ }
+ return oldConsensus
+}
+
// PreloadConfigurableConsensusProtocols loads the configurable protocols from the data directory
// and merge it with a copy of the Consensus map. Then, it returns it to the caller.
func PreloadConfigurableConsensusProtocols(dataDirectory string) (ConsensusProtocols, error) {
@@ -1377,6 +1389,8 @@ func initConsensusProtocols() {
vFuture.LogicSigVersion = 10 // When moving this to a release, put a new higher LogicSigVersion here
vFuture.EnableLogicSigCostPooling = true
+ vFuture.StateProofBlockHashInLightHeader = true
+
// Setting DynamicFilterTimeout in vFuture will impact e2e test performance
// by reducing round time. Hence, it is commented out for now.
// vFuture.DynamicFilterTimeout = true
diff --git a/config/localTemplate.go b/config/localTemplate.go
index 07a9bf5eb..61c2381fa 100644
--- a/config/localTemplate.go
+++ b/config/localTemplate.go
@@ -42,7 +42,7 @@ type Local struct {
// Version tracks the current version of the defaults so we can migrate old -> new
// This is specifically important whenever we decide to change the default value
// for an existing parameter. This field tag must be updated any time we add a new version.
- Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24" version[25]:"25" version[26]:"26" version[27]:"27" version[28]:"28" version[29]:"29" version[30]:"30" version[31]:"31" version[32]:"32"`
+ Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24" version[25]:"25" version[26]:"26" version[27]:"27" version[28]:"28" version[29]:"29" version[30]:"30" version[31]:"31" version[32]:"32" version[33]:"33"`
// Archival nodes retain a full copy of the block history. Non-Archival nodes will delete old blocks and only retain what's need to properly validate blockchain messages (the precise number of recent blocks depends on the consensus parameters. Currently the last 1321 blocks are required). This means that non-Archival nodes require significantly less storage than Archival nodes. Relays (nodes with a valid NetAddress) are always Archival, regardless of this setting. This may change in the future. If setting this to true for the first time, the existing ledger may need to be deleted to get the historical values stored as the setting only effects current blocks forward. To do this, shutdown the node and delete all .sqlite files within the data/testnet-version directory, except the crash.sqlite file. Restart the node and wait for the node to sync.
Archival bool `version[0]:"false"`
@@ -375,6 +375,10 @@ type Local struct {
// 0 means don't store any, -1 mean unlimited and positive number suggest the maximum number of most recent catchpoint files to store.
CatchpointFileHistoryLength int `version[7]:"365"`
+ // EnableGossipService enables the gossip network HTTP websockets endpoint. The functionality of this depends on NetAddress, which must also be provided.
+ // This functionality is required for serving gossip traffic.
+ EnableGossipService bool `version[33]:"true"`
+
// EnableLedgerService enables the ledger serving service. The functionality of this depends on NetAddress, which must also be provided.
// This functionality is required for the catchpoint catchup.
EnableLedgerService bool `version[7]:"false"`
diff --git a/config/local_defaults.go b/config/local_defaults.go
index 06a26f2c1..3df773a76 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -20,7 +20,7 @@
package config
var defaultLocal = Local{
- Version: 32,
+ Version: 33,
AccountUpdatesStatsInterval: 5000000000,
AccountsRebuildSynchronousMode: 1,
AgreementIncomingBundlesQueueLength: 15,
@@ -70,6 +70,7 @@ var defaultLocal = Local{
EnableExperimentalAPI: false,
EnableFollowMode: false,
EnableGossipBlockService: true,
+ EnableGossipService: true,
EnableIncomingMessageFilter: false,
EnableLedgerService: false,
EnableMetricReporting: false,
diff --git a/crypto/batchverifier.c b/crypto/batchverifier.c
new file mode 100644
index 000000000..118542aa7
--- /dev/null
+++ b/crypto/batchverifier.c
@@ -0,0 +1,20 @@
+#include "sodium.h"
+int ed25519_batch_wrapper(const unsigned char **messages2D,
+ const unsigned char **publicKeys2D,
+ const unsigned char **signatures2D,
+ const unsigned char *messages1D,
+ const unsigned long long *mlen,
+ const unsigned char *publicKeys1D,
+ const unsigned char *signatures1D,
+ size_t num,
+ int *valid) {
+ // fill 2-D arrays for messages, pks, sigs from provided 1-D arrays
+ unsigned long long mpos = 0;
+ for (size_t i = 0; i < num; i++) {
+ messages2D[i] = &messages1D[mpos];
+ mpos += mlen[i];
+ publicKeys2D[i] = &publicKeys1D[i*crypto_sign_ed25519_PUBLICKEYBYTES];
+ signatures2D[i] = &signatures1D[i*crypto_sign_ed25519_BYTES];
+ }
+ return crypto_sign_ed25519_open_batch(messages2D, mlen, publicKeys2D, signatures2D, num, valid);
+}
diff --git a/crypto/batchverifier.go b/crypto/batchverifier.go
index 9c14771ba..af7a677ac 100644
--- a/crypto/batchverifier.go
+++ b/crypto/batchverifier.go
@@ -30,15 +30,22 @@ package crypto
// #cgo windows,amd64 CFLAGS: -I${SRCDIR}/libs/windows/amd64/include
// #cgo windows,amd64 LDFLAGS: ${SRCDIR}/libs/windows/amd64/lib/libsodium.a
// #include <stdint.h>
-// #include "sodium.h"
// enum {
// sizeofPtr = sizeof(void*),
// sizeofULongLong = sizeof(unsigned long long),
// };
+// int ed25519_batch_wrapper(const unsigned char **messages2D,
+// const unsigned char **publicKeys2D,
+// const unsigned char **signatures2D,
+// const unsigned char *messages1D,
+// const unsigned long long *mlen,
+// const unsigned char *publicKeys1D,
+// const unsigned char *signatures1D,
+// size_t num,
+// int *valid_p);
import "C"
import (
"errors"
- "runtime"
"unsafe"
)
@@ -120,14 +127,21 @@ func (b *BatchVerifier) Verify() error {
// if some signatures are invalid, true will be set in failed at the corresponding indexes, and
// ErrBatchVerificationFailed for err
func (b *BatchVerifier) VerifyWithFeedback() (failed []bool, err error) {
- if b.GetNumberOfEnqueuedSignatures() == 0 {
+ if len(b.messages) == 0 {
return nil, nil
}
- var messages = make([][]byte, b.GetNumberOfEnqueuedSignatures())
+
+ const estimatedMessageSize = 64
+ msgLengths := make([]uint64, 0, len(b.messages))
+ var messages = make([]byte, 0, len(b.messages)*estimatedMessageSize)
+
+ lenWas := 0
for i := range b.messages {
- messages[i] = HashRep(b.messages[i])
+ messages = HashRepToBuff(b.messages[i], messages)
+ msgLengths = append(msgLengths, uint64(len(messages)-lenWas))
+ lenWas = len(messages)
}
- allValid, failed := batchVerificationImpl(messages, b.publicKeys, b.signatures)
+ allValid, failed := batchVerificationImpl(messages, msgLengths, b.publicKeys, b.signatures)
if allValid {
return failed, nil
}
@@ -137,50 +151,27 @@ func (b *BatchVerifier) VerifyWithFeedback() (failed []bool, err error) {
// batchVerificationImpl invokes the ed25519 batch verification algorithm.
// it returns true if all the signatures were authentically signed by the owners
// otherwise, returns false, and sets the indexes of the failed sigs in failed
-func batchVerificationImpl(messages [][]byte, publicKeys []SignatureVerifier, signatures []Signature) (allSigsValid bool, failed []bool) {
-
- numberOfSignatures := len(messages)
-
- messagesAllocation := C.malloc(C.size_t(C.sizeofPtr * numberOfSignatures))
- messagesLenAllocation := C.malloc(C.size_t(C.sizeofULongLong * numberOfSignatures))
- publicKeysAllocation := C.malloc(C.size_t(C.sizeofPtr * numberOfSignatures))
- signaturesAllocation := C.malloc(C.size_t(C.sizeofPtr * numberOfSignatures))
- valid := C.malloc(C.size_t(C.sizeof_int * numberOfSignatures))
-
- defer func() {
- // release staging memory
- C.free(messagesAllocation)
- C.free(messagesLenAllocation)
- C.free(publicKeysAllocation)
- C.free(signaturesAllocation)
- C.free(valid)
- }()
-
- // load all the data pointers into the array pointers.
- for i := 0; i < numberOfSignatures; i++ {
- *(*uintptr)(unsafe.Pointer(uintptr(messagesAllocation) + uintptr(i*C.sizeofPtr))) = uintptr(unsafe.Pointer(&messages[i][0]))
- *(*C.ulonglong)(unsafe.Pointer(uintptr(messagesLenAllocation) + uintptr(i*C.sizeofULongLong))) = C.ulonglong(len(messages[i]))
- *(*uintptr)(unsafe.Pointer(uintptr(publicKeysAllocation) + uintptr(i*C.sizeofPtr))) = uintptr(unsafe.Pointer(&publicKeys[i][0]))
- *(*uintptr)(unsafe.Pointer(uintptr(signaturesAllocation) + uintptr(i*C.sizeofPtr))) = uintptr(unsafe.Pointer(&signatures[i][0]))
- }
+func batchVerificationImpl(messages []byte, msgLengths []uint64, publicKeys []SignatureVerifier, signatures []Signature) (allSigsValid bool, failed []bool) {
+
+ numberOfSignatures := len(msgLengths)
+ valid := make([]C.int, numberOfSignatures)
+ messages2D := make([]*C.uchar, numberOfSignatures)
+ publicKeys2D := make([]*C.uchar, numberOfSignatures)
+ signatures2D := make([]*C.uchar, numberOfSignatures)
// call the batch verifier
- allValid := C.crypto_sign_ed25519_open_batch(
- (**C.uchar)(unsafe.Pointer(messagesAllocation)),
- (*C.ulonglong)(unsafe.Pointer(messagesLenAllocation)),
- (**C.uchar)(unsafe.Pointer(publicKeysAllocation)),
- (**C.uchar)(unsafe.Pointer(signaturesAllocation)),
- C.size_t(len(messages)),
- (*C.int)(unsafe.Pointer(valid)))
-
- runtime.KeepAlive(messages)
- runtime.KeepAlive(publicKeys)
- runtime.KeepAlive(signatures)
+ allValid := C.ed25519_batch_wrapper(
+ &messages2D[0], &publicKeys2D[0], &signatures2D[0],
+ (*C.uchar)(&messages[0]),
+ (*C.ulonglong)(&msgLengths[0]),
+ (*C.uchar)(&publicKeys[0][0]),
+ (*C.uchar)(&signatures[0][0]),
+ C.size_t(numberOfSignatures),
+ (*C.int)(&valid[0]))
failed = make([]bool, numberOfSignatures)
for i := 0; i < numberOfSignatures; i++ {
- cint := *(*C.int)(unsafe.Pointer(uintptr(valid) + uintptr(i*C.sizeof_int)))
- failed[i] = (cint == 0)
+ failed[i] = (valid[i] == 0)
}
return allValid == 0, failed
}
diff --git a/crypto/curve25519.go b/crypto/curve25519.go
index 58950a3de..a8637399d 100644
--- a/crypto/curve25519.go
+++ b/crypto/curve25519.go
@@ -35,6 +35,7 @@ import "C"
import (
"fmt"
+ "unsafe"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/util/metrics"
@@ -64,6 +65,30 @@ func init() {
_ = [C.crypto_sign_ed25519_PUBLICKEYBYTES]byte(ed25519PublicKey{})
_ = [C.crypto_sign_ed25519_SECRETKEYBYTES]byte(ed25519PrivateKey{})
_ = [C.crypto_sign_ed25519_SEEDBYTES]byte(ed25519Seed{})
+
+ // Check that this platform makes slices []Signature and []SignatureVerifier that use a backing
+ // array of contiguously allocated 64- and 32-byte segments, respectively, with no padding.
+ // These slice's backing arrays are passed to C.ed25519_batch_wrapper. In practice, this check
+ // should always succeed, but to be careful we can double-check, since the Go specification does
+ // not explicitly define platform-specific alignment sizes and slice allocation behavior.
+ length := 1024
+ sigs := make([]Signature, length) // same as [][64]byte
+ pks := make([]SignatureVerifier, length) // same as [][32]byte
+
+ for i := 1; i < length; i++ {
+ if uintptr(unsafe.Pointer(&sigs[i]))-uintptr(unsafe.Pointer(&sigs[0])) != uintptr(i)*C.crypto_sign_ed25519_BYTES {
+ panic("Unexpected alignment for a slice of signatures")
+ }
+ if uintptr(unsafe.Pointer(&pks[i]))-uintptr(unsafe.Pointer(&pks[0])) != uintptr(i)*C.crypto_sign_ed25519_PUBLICKEYBYTES {
+ panic("Unexpected alignment for a slice of public keys")
+ }
+ }
+ if uintptr(unsafe.Pointer(&sigs[length-1]))-uintptr(unsafe.Pointer(&sigs[0])) != uintptr(length-1)*C.crypto_sign_ed25519_BYTES {
+ panic("Unexpected total size for a backing array of signatures")
+ }
+ if uintptr(unsafe.Pointer(&pks[length-1]))-uintptr(unsafe.Pointer(&pks[0])) != uintptr(length-1)*C.crypto_sign_ed25519_PUBLICKEYBYTES {
+ panic("Unexpected total size for a backing array of public keys")
+ }
}
// A Seed holds the entropy needed to generate cryptographic keys.
diff --git a/crypto/hashes.go b/crypto/hashes.go
index 8933717e4..04db757f3 100644
--- a/crypto/hashes.go
+++ b/crypto/hashes.go
@@ -118,7 +118,7 @@ func (z *HashFactory) Validate() error {
}
// GenericHashObj Makes it easier to sum using hash interface and Hashable interface
-func GenericHashObj(hsh hash.Hash, h Hashable) []byte {
+func GenericHashObj[H Hashable](hsh hash.Hash, h H) []byte {
rep := HashRep(h)
return hashBytes(hsh, rep)
}
diff --git a/crypto/hashes_test.go b/crypto/hashes_test.go
index dd4b8c3bd..9f8b57fe3 100644
--- a/crypto/hashes_test.go
+++ b/crypto/hashes_test.go
@@ -53,7 +53,6 @@ func TestHashSum(t *testing.T) {
dgst := HashObj(TestingHashable{})
a.Equal(GenericHashObj(h, TestingHashable{}), dgst[:])
-
}
func TestEmptyHash(t *testing.T) {
diff --git a/crypto/merklearray/layer.go b/crypto/merklearray/layer.go
index 5018ae074..88eed6ffe 100644
--- a/crypto/merklearray/layer.go
+++ b/crypto/merklearray/layer.go
@@ -37,14 +37,14 @@ type pair struct {
hashDigestSize int
}
-func (p *pair) ToBeHashed() (protocol.HashID, []byte) {
+func (p pair) ToBeHashed() (protocol.HashID, []byte) {
// hashing of internal node will always be fixed length.
// If one of the children is missing we use [0...0].
// The size of the slice is based on the relevant hash function output size
buf := make([]byte, 2*p.hashDigestSize)
copy(buf[:], p.l[:])
copy(buf[len(p.l):], p.r[:])
- return protocol.MerkleArrayNode, buf[:]
+ return protocol.MerkleArrayNode, buf
}
func upWorker(ws *workerState, in Layer, out Layer, h hash.Hash) {
@@ -69,7 +69,7 @@ func upWorker(ws *workerState, in Layer, out Layer, h hash.Hash) {
p.r = in[i+1]
}
- out[i/2] = crypto.GenericHashObj(h, &p)
+ out[i/2] = crypto.GenericHashObj(h, p)
}
batchSize += 2
diff --git a/crypto/merklearray/merkle_test.go b/crypto/merklearray/merkle_test.go
index 9a1a5c0fd..0d392dcef 100644
--- a/crypto/merklearray/merkle_test.go
+++ b/crypto/merklearray/merkle_test.go
@@ -1172,12 +1172,13 @@ func merkleCommitBench(b *testing.B, hashType crypto.HashType) {
msg := make(TestBuf, sz)
crypto.RandBytes(msg[:])
- for cnt := 10; cnt <= 10000000; cnt *= 10 {
+ for cnt := 10; cnt <= 100000; cnt *= 10 {
var a TestRepeatingArray
a.item = msg
a.count = uint64(cnt)
b.Run(fmt.Sprintf("Item%d/Count%d", sz, cnt), func(b *testing.B) {
+ b.ReportAllocs()
for i := 0; i < b.N; i++ {
tree, err := Build(a, crypto.HashFactory{HashType: hashType})
require.NoError(b, err)
@@ -1205,6 +1206,7 @@ func benchmarkMerkleProve1M(b *testing.B, hashType crypto.HashType) {
require.NoError(b, err)
b.ResetTimer()
+ b.ReportAllocs()
for i := uint64(0); i < uint64(b.N); i++ {
_, err := tree.Prove([]uint64{i % a.count})
@@ -1238,6 +1240,7 @@ func benchmarkMerkleVerify1M(b *testing.B, hashType crypto.HashType) {
}
b.ResetTimer()
+ b.ReportAllocs()
for i := uint64(0); i < uint64(b.N); i++ {
err := Verify(root, map[uint64]crypto.Hashable{i % a.count: msg}, proofs[i])
diff --git a/crypto/merklearray/partial.go b/crypto/merklearray/partial.go
index 4baf777f3..b1aa07c52 100644
--- a/crypto/merklearray/partial.go
+++ b/crypto/merklearray/partial.go
@@ -118,7 +118,7 @@ func (pl partialLayer) up(s *siblings, l uint64, doHash bool, hsh hash.Hash) (pa
p.l = siblingHash
p.r = posHash
}
- nextLayerHash = crypto.GenericHashObj(hsh, &p)
+ nextLayerHash = crypto.GenericHashObj(hsh, p)
}
res = append(res, layerItem{
diff --git a/crypto/onetimesig.go b/crypto/onetimesig.go
index 1de485496..a2db211a7 100644
--- a/crypto/onetimesig.go
+++ b/crypto/onetimesig.go
@@ -319,8 +319,21 @@ func (v OneTimeSignatureVerifier) Verify(id OneTimeSignatureIdentifier, message
Batch: id.Batch,
}
+ // serialize encoded batchID, offsetID, message into a continuous memory buffer with the layout
+ // hashRep(batchID)... hashRep(offsetID)... hashRep(message)...
+ const estimatedSize = 256
+ messageBuffer := make([]byte, 0, estimatedSize)
+
+ messageBuffer = HashRepToBuff(batchID, messageBuffer)
+ batchIDLen := uint64(len(messageBuffer))
+ messageBuffer = HashRepToBuff(offsetID, messageBuffer)
+ offsetIDLen := uint64(len(messageBuffer)) - batchIDLen
+ messageBuffer = HashRepToBuff(message, messageBuffer)
+ messageLen := uint64(len(messageBuffer)) - offsetIDLen - batchIDLen
+ msgLengths := []uint64{batchIDLen, offsetIDLen, messageLen}
allValid, _ := batchVerificationImpl(
- [][]byte{HashRep(batchID), HashRep(offsetID), HashRep(message)},
+ messageBuffer,
+ msgLengths,
[]PublicKey{PublicKey(v), PublicKey(batchID.SubKeyPK), PublicKey(offsetID.SubKeyPK)},
[]Signature{Signature(sig.PK2Sig), Signature(sig.PK1Sig), Signature(sig.Sig)},
)
diff --git a/crypto/util.go b/crypto/util.go
index 60bb12aef..078d52c0c 100644
--- a/crypto/util.go
+++ b/crypto/util.go
@@ -35,11 +35,19 @@ type Hashable interface {
}
// HashRep appends the correct hashid before the message to be hashed.
-func HashRep(h Hashable) []byte {
+func HashRep[H Hashable](h H) []byte {
hashid, data := h.ToBeHashed()
return append([]byte(hashid), data...)
}
+// HashRepToBuff appends the correct hashid before the message to be hashed into the provided buffer
+func HashRepToBuff(h Hashable, buffer []byte) []byte {
+ hashid, data := h.ToBeHashed()
+ buffer = append(buffer, hashid...)
+ buffer = append(buffer, data...)
+ return buffer
+}
+
// DigestSize is the number of bytes in the preferred hash Digest used here.
const DigestSize = sha512.Size256
@@ -86,7 +94,7 @@ func Hash(data []byte) Digest {
}
// HashObj computes a hash of a Hashable object and its type
-func HashObj(h Hashable) Digest {
+func HashObj[H Hashable](h H) Digest {
return Hash(HashRep(h))
}
diff --git a/crypto/util_test.go b/crypto/util_test.go
index 667da0bcd..2e0828bcc 100644
--- a/crypto/util_test.go
+++ b/crypto/util_test.go
@@ -17,8 +17,10 @@
package crypto
import (
+ "fmt"
"testing"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/stretchr/testify/require"
)
@@ -46,3 +48,32 @@ func TestDigest_IsZero(t *testing.T) {
require.NotZero(t, d2)
}
+
+type testToBeHashed struct {
+ i int
+}
+
+func (tbh *testToBeHashed) ToBeHashed() (protocol.HashID, []byte) {
+ data := make([]byte, tbh.i)
+ for x := 0; x < tbh.i; x++ {
+ data[x] = byte(tbh.i)
+ }
+ return protocol.HashID(fmt.Sprintf("ID%d", tbh.i)), data
+}
+
+func TestHashRepToBuff(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ values := []int{32, 64, 512, 1024}
+ buffer := make([]byte, 0, 128)
+ for _, val := range values {
+ tbh := &testToBeHashed{i: val}
+ buffer = HashRepToBuff(tbh, buffer)
+ }
+ pos := 0
+ for _, val := range values {
+ tbh := &testToBeHashed{i: val}
+ data := HashRep(tbh)
+ require.Equal(t, data, buffer[pos:pos+len(data)])
+ pos = pos + len(data)
+ }
+}
diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go
index d8e086ff3..aa5a7492b 100644
--- a/daemon/algod/api/server/v2/test/handlers_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_test.go
@@ -22,7 +22,6 @@ import (
"encoding/json"
"errors"
"fmt"
- "golang.org/x/sync/semaphore"
"io"
"math"
"net"
@@ -33,6 +32,8 @@ import (
"testing"
"time"
+ "golang.org/x/sync/semaphore"
+
"github.com/algorand/go-algorand/daemon/algod/api/server"
"github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
@@ -272,8 +273,7 @@ func addBlockHelper(t *testing.T) (v2.Handlers, echo.Context, *httptest.Response
// make an app call txn with eval delta
lsig := transactions.LogicSig{Logic: retOneProgram} // int 1
- program := logic.Program(lsig.Logic)
- lhash := crypto.HashObj(&program)
+ lhash := logic.HashProgram(lsig.Logic)
var sender basics.Address
copy(sender[:], lhash[:])
stx := transactions.SignedTxn{
diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go
index 2f887d25a..e2b52fc7c 100644
--- a/daemon/algod/api/server/v2/test/helpers.go
+++ b/daemon/algod/api/server/v2/test/helpers.go
@@ -342,8 +342,7 @@ func testingenvWithBalances(t testing.TB, minMoneyAtStart, maxMoneyAtStart, numA
genesis[poolAddr] = basics_testing.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 100000 * uint64(proto.RewardsRateRefreshInterval)})
- program := logic.Program(retOneProgram)
- lhash := crypto.HashObj(&program)
+ lhash := logic.HashProgram(retOneProgram)
var addr basics.Address
copy(addr[:], lhash[:])
ad := basics_testing.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 100000 * uint64(proto.RewardsRateRefreshInterval)})
diff --git a/data/account/participation.go b/data/account/participation.go
index 22130df6c..9493f21f0 100644
--- a/data/account/participation.go
+++ b/data/account/participation.go
@@ -80,8 +80,8 @@ func (id *ParticipationKeyIdentity) ToBeHashed() (protocol.HashID, []byte) {
}
// ID creates a ParticipationID hash from the identity file.
-func (id ParticipationKeyIdentity) ID() ParticipationID {
- return ParticipationID(crypto.HashObj(&id))
+func (id *ParticipationKeyIdentity) ID() ParticipationID {
+ return ParticipationID(crypto.HashObj(id))
}
// ID computes a ParticipationID.
diff --git a/data/account/participation_test.go b/data/account/participation_test.go
index 4a933d72b..ccf300122 100644
--- a/data/account/participation_test.go
+++ b/data/account/participation_test.go
@@ -606,3 +606,13 @@ func BenchmarkParticipationSign(b *testing.B) {
_ = part.Voting.Sign(ephID, msg)
}
}
+
+func BenchmarkID(b *testing.B) {
+ pki := ParticipationKeyIdentity{}
+ b.Run("existing", func(b *testing.B) {
+ b.ReportAllocs() // demonstrate this is a single alloc
+ for i := 0; i < b.N; i++ {
+ pki.ID()
+ }
+ })
+}
diff --git a/data/bookkeeping/genesis_test.go b/data/bookkeeping/genesis_test.go
index 72e94947f..393e38922 100644
--- a/data/bookkeeping/genesis_test.go
+++ b/data/bookkeeping/genesis_test.go
@@ -23,6 +23,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -155,3 +156,30 @@ func TestGenesis_Balances(t *testing.T) {
})
}
}
+
+func (genesis Genesis) hashOld() crypto.Digest {
+ return hashObjOld(genesis)
+}
+
+// hashObjOld computes a hash of a Hashable object and its type, doing so the
+// "old way" to show it requires an extra allocation in benchmarks.
+func hashObjOld(h crypto.Hashable) crypto.Digest {
+ return crypto.Hash(crypto.HashRep(h))
+}
+
+func BenchmarkGenesisHash(b *testing.B) {
+ b.ReportAllocs()
+ g := Genesis{}
+ b.Run("new", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ g.Hash()
+ }
+ })
+ b.Run("old", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ g.hashOld()
+ }
+ })
+}
diff --git a/data/bookkeeping/lightBlockHeader.go b/data/bookkeeping/lightBlockHeader.go
index ea283e039..90edea3ba 100644
--- a/data/bookkeeping/lightBlockHeader.go
+++ b/data/bookkeeping/lightBlockHeader.go
@@ -17,6 +17,7 @@
package bookkeeping
import (
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/committee"
@@ -38,8 +39,15 @@ type LightBlockHeader struct {
In addition, we make sure that the Seed (The unpredictable value) would be the first field that gets
hashed (give it the lowest codec value in the LightBlockHeader struct) to mitigate a collision attack
on the merkle damgard construction.
+
+ The BlockHash serves a similar role, in that it also depends on the seed and introduces some
+ uncontrollable input. It is slightly weaker, in the sense that an adversary can influence
+ the BlockHash to some degree (e.g., by including specific transactions in the payset), but
+ it comes with the added benefit of allowing to authenticate the entire blockchain based on
+ the BlockHash value.
*/
Seed committee.Seed `codec:"0"`
+ BlockHash BlockHash `codec:"1"`
Round basics.Round `codec:"r"`
GenesisHash crypto.Digest `codec:"gh"`
Sha256TxnCommitment crypto.GenericDigest `codec:"tc,allocbound=crypto.Sha256Size"`
@@ -47,12 +55,20 @@ type LightBlockHeader struct {
// ToLightBlockHeader creates returns a LightBlockHeader from a given block header
func (bh *BlockHeader) ToLightBlockHeader() LightBlockHeader {
- return LightBlockHeader{
- Seed: bh.Seed,
+ res := LightBlockHeader{
GenesisHash: bh.GenesisHash,
Round: bh.Round,
Sha256TxnCommitment: bh.Sha256Commitment[:],
}
+
+ proto := config.Consensus[bh.CurrentProtocol]
+ if proto.StateProofBlockHashInLightHeader {
+ res.BlockHash = bh.Hash()
+ } else {
+ res.Seed = bh.Seed
+ }
+
+ return res
}
// ToBeHashed implements the crypto.Hashable interface
diff --git a/data/bookkeeping/msgp_gen.go b/data/bookkeeping/msgp_gen.go
index 7eda4de55..cb3a63ad2 100644
--- a/data/bookkeeping/msgp_gen.go
+++ b/data/bookkeeping/msgp_gen.go
@@ -2663,23 +2663,27 @@ func GenesisAllocationMaxSize() (s int) {
func (z *LightBlockHeader) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0001Len := uint32(4)
- var zb0001Mask uint8 /* 5 bits */
+ zb0001Len := uint32(5)
+ var zb0001Mask uint8 /* 6 bits */
if (*z).Seed.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x1
}
+ if (*z).BlockHash.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
if (*z).GenesisHash.MsgIsZero() {
zb0001Len--
- zb0001Mask |= 0x4
+ zb0001Mask |= 0x8
}
if (*z).Round.MsgIsZero() {
zb0001Len--
- zb0001Mask |= 0x8
+ zb0001Mask |= 0x10
}
if (*z).Sha256TxnCommitment.MsgIsZero() {
zb0001Len--
- zb0001Mask |= 0x10
+ zb0001Mask |= 0x20
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
@@ -2689,17 +2693,22 @@ func (z *LightBlockHeader) MarshalMsg(b []byte) (o []byte) {
o = append(o, 0xa1, 0x30)
o = (*z).Seed.MarshalMsg(o)
}
- if (zb0001Mask & 0x4) == 0 { // if not empty
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "1"
+ o = append(o, 0xa1, 0x31)
+ o = (*z).BlockHash.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
// string "gh"
o = append(o, 0xa2, 0x67, 0x68)
o = (*z).GenesisHash.MarshalMsg(o)
}
- if (zb0001Mask & 0x8) == 0 { // if not empty
+ if (zb0001Mask & 0x10) == 0 { // if not empty
// string "r"
o = append(o, 0xa1, 0x72)
o = (*z).Round.MarshalMsg(o)
}
- if (zb0001Mask & 0x10) == 0 { // if not empty
+ if (zb0001Mask & 0x20) == 0 { // if not empty
// string "tc"
o = append(o, 0xa2, 0x74, 0x63)
o = (*z).Sha256TxnCommitment.MarshalMsg(o)
@@ -2741,6 +2750,14 @@ func (z *LightBlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalSt
}
if zb0001 > 0 {
zb0001--
+ bts, err = (*z).BlockHash.UnmarshalMsgWithState(bts, st)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "BlockHash")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
bts, err = (*z).Round.UnmarshalMsgWithState(bts, st)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Round")
@@ -2792,6 +2809,12 @@ func (z *LightBlockHeader) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalSt
err = msgp.WrapError(err, "Seed")
return
}
+ case "1":
+ bts, err = (*z).BlockHash.UnmarshalMsgWithState(bts, st)
+ if err != nil {
+ err = msgp.WrapError(err, "BlockHash")
+ return
+ }
case "r":
bts, err = (*z).Round.UnmarshalMsgWithState(bts, st)
if err != nil {
@@ -2833,18 +2856,18 @@ func (_ *LightBlockHeader) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *LightBlockHeader) Msgsize() (s int) {
- s = 1 + 2 + (*z).Seed.Msgsize() + 2 + (*z).Round.Msgsize() + 3 + (*z).GenesisHash.Msgsize() + 3 + (*z).Sha256TxnCommitment.Msgsize()
+ s = 1 + 2 + (*z).Seed.Msgsize() + 2 + (*z).BlockHash.Msgsize() + 2 + (*z).Round.Msgsize() + 3 + (*z).GenesisHash.Msgsize() + 3 + (*z).Sha256TxnCommitment.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *LightBlockHeader) MsgIsZero() bool {
- return ((*z).Seed.MsgIsZero()) && ((*z).Round.MsgIsZero()) && ((*z).GenesisHash.MsgIsZero()) && ((*z).Sha256TxnCommitment.MsgIsZero())
+ return ((*z).Seed.MsgIsZero()) && ((*z).BlockHash.MsgIsZero()) && ((*z).Round.MsgIsZero()) && ((*z).GenesisHash.MsgIsZero()) && ((*z).Sha256TxnCommitment.MsgIsZero())
}
// MaxSize returns a maximum valid message size for this message type
func LightBlockHeaderMaxSize() (s int) {
- s = 1 + 2 + committee.SeedMaxSize() + 2 + basics.RoundMaxSize() + 3 + crypto.DigestMaxSize() + 3 + crypto.GenericDigestMaxSize()
+ s = 1 + 2 + committee.SeedMaxSize() + 2 + BlockHashMaxSize() + 2 + basics.RoundMaxSize() + 3 + crypto.DigestMaxSize() + 3 + crypto.GenericDigestMaxSize()
return
}
diff --git a/data/transactions/logic/program.go b/data/transactions/logic/program.go
index 4568ebe74..85195b904 100644
--- a/data/transactions/logic/program.go
+++ b/data/transactions/logic/program.go
@@ -33,5 +33,5 @@ func (lsl Program) ToBeHashed() (protocol.HashID, []byte) {
// This Digest can be used as an Address for a logic controlled account.
func HashProgram(program []byte) crypto.Digest {
pb := Program(program)
- return crypto.HashObj(&pb)
+ return crypto.HashObj(pb)
}
diff --git a/installer/config.json.example b/installer/config.json.example
index ce0238033..aa1cb7171 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -1,5 +1,5 @@
{
- "Version": 32,
+ "Version": 33,
"AccountUpdatesStatsInterval": 5000000000,
"AccountsRebuildSynchronousMode": 1,
"AgreementIncomingBundlesQueueLength": 15,
@@ -49,6 +49,7 @@
"EnableExperimentalAPI": false,
"EnableFollowMode": false,
"EnableGossipBlockService": true,
+ "EnableGossipService": true,
"EnableIncomingMessageFilter": false,
"EnableLedgerService": false,
"EnableMetricReporting": false,
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index 64ada07a0..3661c6005 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -238,7 +238,7 @@ const (
// CatchupAccessorClientLedger represents ledger interface needed for catchpoint accessor clients
type CatchupAccessorClientLedger interface {
- Block(rnd basics.Round) (blk bookkeeping.Block, err error)
+ BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreement.Certificate, err error)
GenesisHash() crypto.Digest
BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error)
Latest() (rnd basics.Round)
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 05e7ba44c..7f8b3046c 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -591,7 +591,9 @@ func (wn *WebsocketNetwork) setup() {
wn.upgrader.EnableCompression = false
wn.lastPeerConnectionsSent = time.Now()
wn.router = mux.NewRouter()
- wn.router.Handle(GossipNetworkPath, wn)
+ if wn.config.EnableGossipService {
+ wn.router.Handle(GossipNetworkPath, wn)
+ }
wn.requestsTracker = makeRequestsTracker(wn.router, wn.log, wn.config)
if wn.config.EnableRequestLogger {
wn.requestsLogger = makeRequestLogger(wn.requestsTracker, wn.log)
@@ -1009,6 +1011,11 @@ func (wn *WebsocketNetwork) GetHTTPRequestConnection(request *http.Request) (con
// ServerHTTP handles the gossip network functions over websockets
func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *http.Request) {
+ if !wn.config.EnableGossipService {
+ response.WriteHeader(http.StatusNotFound)
+ return
+ }
+
trackedRequest := wn.requestsTracker.GetTrackedRequest(request)
if wn.checkIncomingConnectionLimits(response, request, trackedRequest.remoteHost, trackedRequest.otherTelemetryGUID, trackedRequest.otherInstanceName) != http.StatusOK {
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index 445ede3dc..05e484843 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -628,6 +628,10 @@ func TestWebsocketNetworkNoAddress(t *testing.T) {
noAddressConfig := defaultConfig
noAddressConfig.NetAddress = ""
+ // enable services even though NetAddress is not set (to assert they don't override NetAddress)
+ noAddressConfig.EnableGossipService = true
+ noAddressConfig.EnableBlockService = true
+ noAddressConfig.EnableLedgerService = true
netB := makeTestWebsocketNodeWithConfig(t, noAddressConfig)
netB.config.GossipFanout = 1
addrA, postListen := netA.Address()
@@ -636,6 +640,12 @@ func TestWebsocketNetworkNoAddress(t *testing.T) {
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
netB.Start()
defer netStop(t, netB, "B")
+
+ // assert addrB is not listening
+ addrB, postListenB := netB.Address()
+ require.False(t, postListenB)
+ require.Empty(t, addrB)
+
counter := newMessageCounter(t, 2)
counterDone := counter.done
netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}})
@@ -656,6 +666,29 @@ func TestWebsocketNetworkNoAddress(t *testing.T) {
}
}
+func TestWebsocketNetworkNoGossipService(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ config := defaultConfig
+ config.EnableGossipService = false
+ netA := makeTestWebsocketNodeWithConfig(t, config)
+ netA.Start()
+ defer netStop(t, netA, "A")
+
+ // assert that the network was started and is listening
+ addrA, postListen := netA.Address()
+ require.True(t, postListen)
+
+ // make HTTP request to gossip service and assert 404
+ var resp *http.Response
+ require.Eventually(t, func() bool {
+ var err error
+ resp, err = http.Get(fmt.Sprintf("%s/v1/%s/gossip", addrA, genesisID))
+ return err == nil
+ }, 2*time.Second, 100*time.Millisecond)
+ require.Equal(t, http.StatusNotFound, resp.StatusCode)
+}
+
func lineNetwork(t *testing.T, numNodes int) (nodes []*WebsocketNetwork, counters []messageCounterHandler) {
nodes = make([]*WebsocketNetwork, numNodes)
counters = make([]messageCounterHandler, numNodes)
diff --git a/test/e2e-go/features/stateproofs/stateproofs_test.go b/test/e2e-go/features/stateproofs/stateproofs_test.go
index b58669a6a..4631ecd6e 100644
--- a/test/e2e-go/features/stateproofs/stateproofs_test.go
+++ b/test/e2e-go/features/stateproofs/stateproofs_test.go
@@ -323,6 +323,8 @@ func TestStateProofMessageCommitmentVerification(t *testing.T) {
consensusVersion := protocol.ConsensusVersion("test-fast-stateproofs")
consensusParams := getDefaultStateProofConsensusParams()
configurableConsensus[consensusVersion] = consensusParams
+ oldConsensus := config.SetConfigurableConsensusProtocols(configurableConsensus)
+ defer config.SetConfigurableConsensusProtocols(oldConsensus)
var fixture fixtures.RestClientFixture
fixture.SetConsensus(configurableConsensus)
diff --git a/test/testdata/configs/config-v33.json b/test/testdata/configs/config-v33.json
new file mode 100644
index 000000000..aa1cb7171
--- /dev/null
+++ b/test/testdata/configs/config-v33.json
@@ -0,0 +1,140 @@
+{
+ "Version": 33,
+ "AccountUpdatesStatsInterval": 5000000000,
+ "AccountsRebuildSynchronousMode": 1,
+ "AgreementIncomingBundlesQueueLength": 15,
+ "AgreementIncomingProposalsQueueLength": 50,
+ "AgreementIncomingVotesQueueLength": 20000,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BlockDBDir": "",
+ "BlockServiceCustomFallbackEndpoints": "",
+ "BlockServiceMemCap": 500000000,
+ "BroadcastConnectionsLimit": -1,
+ "CadaverDirectory": "",
+ "CadaverSizeTarget": 0,
+ "CatchpointDir": "",
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchpointTracking": 0,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ColdDataDir": "",
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "CrashDBDir": "",
+ "DNSBootstrapID": "<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(network|net)",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DeadlockDetectionThreshold": 30,
+ "DisableAPIAuth": false,
+ "DisableLedgerLRUCache": false,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": false,
+ "EnableCatchupFromArchiveServers": false,
+ "EnableDeveloperAPI": false,
+ "EnableExperimentalAPI": false,
+ "EnableFollowMode": false,
+ "EnableGossipBlockService": true,
+ "EnableGossipService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnableP2P": false,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableRuntimeMetrics": false,
+ "EnableTopAccountsReporting": false,
+ "EnableTxBacklogAppRateLimiting": true,
+ "EnableTxBacklogRateLimiting": true,
+ "EnableTxnEvalTracer": false,
+ "EnableUsageLog": false,
+ "EnableVerbosedTransactionSyncLogging": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceFetchTransactions": false,
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "HeartbeatUpdateInterval": 600,
+ "HotDataDir": "",
+ "IncomingConnectionsLimit": 2400,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "LedgerSynchronousMode": 2,
+ "LogArchiveDir": "",
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogFileDir": "",
+ "LogSizeLimit": 1073741824,
+ "MaxAPIBoxPerApplication": 100000,
+ "MaxAPIResourcesPerAccount": 100000,
+ "MaxAcctLookback": 4,
+ "MaxBlockHistoryLookback": 0,
+ "MaxCatchpointDownloadDuration": 43200000000000,
+ "MaxConnectionsPerIP": 15,
+ "MinCatchpointFileDownloadBytesPerSecond": 20480,
+ "NetAddress": "",
+ "NetworkMessageTraceServer": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "P2PPersistPeerID": false,
+ "P2PPrivateKeyLocation": "",
+ "ParticipationKeysRefreshInterval": 60000000000,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "ProposalAssemblyTime": 500000000,
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestConnectionsHardLimit": 2048,
+ "RestConnectionsSoftLimit": 1024,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "StateproofDir": "",
+ "StorageEngine": "sqlite",
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TrackerDBDir": "",
+ "TransactionSyncDataExchangeRate": 0,
+ "TransactionSyncSignificantMessageThreshold": 0,
+ "TxBacklogAppTxPerSecondRate": 100,
+ "TxBacklogAppTxRateLimiterMaxSize": 1048576,
+ "TxBacklogRateLimitingCongestionPct": 50,
+ "TxBacklogReservedCapacityPerPeer": 20,
+ "TxBacklogServiceRateWindowSeconds": 10,
+ "TxBacklogSize": 26000,
+ "TxIncomingFilterMaxSize": 500000,
+ "TxIncomingFilteringFlags": 1,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 75000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": "",
+ "VerifiedTranscationsCacheSize": 150000
+}